Apply clang-format 15/280715/3
authorKwanghoon Son <k.son@samsung.com>
Fri, 2 Sep 2022 09:25:40 +0000 (05:25 -0400)
committerKwanghoon Son <k.son@samsung.com>
Mon, 5 Sep 2022 01:33:51 +0000 (21:33 -0400)
[Issue type] Clean code
[Version] 0.23.23

This patch only change code convention.

Change-Id: I5f1d3dd56ecd5d962c8a9a52087ed8315db156a6
Signed-off-by: Kwanghoon Son <k.son@samsung.com>
237 files changed:
.clang-format
doc/mediavision_doc.h
include/mv_barcode_detect.h
include/mv_barcode_generate.h
include/mv_barcode_type.h
include/mv_common.h
include/mv_face.h
include/mv_face_type.h
include/mv_image.h
include/mv_inference.h
include/mv_inference_private.h
include/mv_inference_type.h
include/mv_private.h
include/mv_roi_tracker.h
include/mv_roi_tracker_type.h
include/mv_surveillance.h
include/mv_surveillance_private.h
mv_barcode/barcode_detector/include/Barcode.h
mv_barcode/barcode_detector/include/BarcodeUtils.h
mv_barcode/barcode_detector/include/mv_barcode_detect_open.h
mv_barcode/barcode_detector/src/Barcode.cpp
mv_barcode/barcode_detector/src/BarcodeUtils.cpp
mv_barcode/barcode_detector/src/mv_barcode_detect.c
mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp
mv_barcode/barcode_generator/include/BarcodeGenerator.h
mv_barcode/barcode_generator/include/BarcodeOptions.h
mv_barcode/barcode_generator/include/mv_barcode_generate_open.h
mv_barcode/barcode_generator/src/BarcodeGenerator.cpp
mv_barcode/barcode_generator/src/mv_barcode_generate.c
mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp
mv_common/include/CommonUtils.h
mv_common/include/EngineConfig.h
mv_common/include/MediaSource.h
mv_common/include/mv_common_c.h
mv_common/include/util.h
mv_common/src/CommonUtils.cpp
mv_common/src/EngineConfig.cpp
mv_common/src/MediaSource.cpp
mv_common/src/mv_common.c
mv_common/src/mv_common_c.cpp
mv_common/src/mv_private.c
mv_face/face/include/FaceDetector.h
mv_face/face/include/FaceExpressionRecognizer.h
mv_face/face/include/FaceEyeCondition.h
mv_face/face/include/FaceRecognitionModel.h
mv_face/face/include/FaceTracker.h
mv_face/face/include/FaceTrackingModel.h
mv_face/face/include/FaceUtil.h
mv_face/face/include/mv_face_open.h
mv_face/face/src/FaceDetector.cpp
mv_face/face/src/FaceExpressionRecognizer.cpp
mv_face/face/src/FaceEyeCondition.cpp
mv_face/face/src/FaceRecognitionModel.cpp
mv_face/face/src/FaceTracker.cpp
mv_face/face/src/FaceTrackingModel.cpp
mv_face/face/src/FaceUtil.cpp
mv_face/face/src/mv_face.c
mv_face/face/src/mv_face_open.cpp
mv_image/image/include/Features/BasicExtractorFactory.h
mv_image/image/include/Features/FeatureExtractor.h
mv_image/image/include/Features/FeatureExtractorFactory.h
mv_image/image/include/Features/FeatureMatcher.h
mv_image/image/include/Features/FeaturePack.h
mv_image/image/include/Features/ORBExtractorFactory.h
mv_image/image/include/ImageConfig.h
mv_image/image/include/ImageMathUtil.h
mv_image/image/include/Recognition/ImageObject.h
mv_image/image/include/Recognition/ImageRecognizer.h
mv_image/image/include/Tracking/AsyncTracker.h
mv_image/image/include/Tracking/CascadeTracker.h
mv_image/image/include/Tracking/FeatureSubstitutionTracker.h
mv_image/image/include/Tracking/ImageContourStabilizator.h
mv_image/image/include/Tracking/ImageTrackingModel.h
mv_image/image/include/Tracking/MFTracker.h
mv_image/image/include/Tracking/ObjectTracker.h
mv_image/image/include/Tracking/RecognitionBasedTracker.h
mv_image/image/include/mv_image_open.h
mv_image/image/src/Features/BasicExtractorFactory.cpp
mv_image/image/src/Features/FeatureExtractor.cpp
mv_image/image/src/Features/FeatureExtractorFactory.cpp
mv_image/image/src/Features/FeatureMatcher.cpp
mv_image/image/src/Features/FeaturePack.cpp
mv_image/image/src/Features/ORBExtractorFactory.cpp
mv_image/image/src/ImageConfig.cpp
mv_image/image/src/ImageMathUtil.cpp
mv_image/image/src/Recognition/ImageObject.cpp
mv_image/image/src/Recognition/ImageRecognizer.cpp
mv_image/image/src/Tracking/AsyncTracker.cpp
mv_image/image/src/Tracking/CascadeTracker.cpp
mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp
mv_image/image/src/Tracking/ImageContourStabilizator.cpp
mv_image/image/src/Tracking/ImageTrackingModel.cpp
mv_image/image/src/Tracking/MFTracker.cpp
mv_image/image/src/Tracking/ObjectTracker.cpp
mv_image/image/src/Tracking/RecognitionBasedTracker.cpp
mv_image/image/src/mv_image.c
mv_image/image/src/mv_image_open.cpp
mv_machine_learning/common/include/context.h
mv_machine_learning/common/include/itask.h
mv_machine_learning/common/include/machine_learning_exception.h
mv_machine_learning/face_recognition/include/backbone_model_info.h
mv_machine_learning/face_recognition/include/face_net_info.h
mv_machine_learning/face_recognition/include/face_recognition.h
mv_machine_learning/face_recognition/include/face_recognition_adapter.h
mv_machine_learning/face_recognition/include/mv_face_recognition_open.h
mv_machine_learning/face_recognition/include/nntrainer_dsm.h
mv_machine_learning/face_recognition/include/nntrainer_fvm.h
mv_machine_learning/face_recognition/include/simple_shot.h
mv_machine_learning/face_recognition/src/face_net_info.cpp
mv_machine_learning/face_recognition/src/face_recognition.cpp
mv_machine_learning/face_recognition/src/face_recognition_adapter.cpp
mv_machine_learning/face_recognition/src/mv_face_recognition.c
mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp
mv_machine_learning/face_recognition/src/nntrainer_dsm.cpp
mv_machine_learning/face_recognition/src/nntrainer_fvm.cpp
mv_machine_learning/face_recognition/src/simple_shot.cpp
mv_machine_learning/inference/include/BoxInfo.h
mv_machine_learning/inference/include/Bvh.h
mv_machine_learning/inference/include/BvhParser.h
mv_machine_learning/inference/include/BvhUtils.h
mv_machine_learning/inference/include/DecodeInfo.h
mv_machine_learning/inference/include/DimInfo.h
mv_machine_learning/inference/include/DispVec.h
mv_machine_learning/inference/include/Edge.h
mv_machine_learning/inference/include/Inference.h
mv_machine_learning/inference/include/InferenceIni.h
mv_machine_learning/inference/include/InputMetadata.h
mv_machine_learning/inference/include/Joint.h
mv_machine_learning/inference/include/Landmark.h
mv_machine_learning/inference/include/Metadata.h
mv_machine_learning/inference/include/ObjectDecoder.h [changed mode: 0755->0644]
mv_machine_learning/inference/include/OffsetVec.h
mv_machine_learning/inference/include/OutputMetadata.h
mv_machine_learning/inference/include/OutputMetadataTypes.h
mv_machine_learning/inference/include/PoseDecoder.h
mv_machine_learning/inference/include/PostProcess.h
mv_machine_learning/inference/include/Posture.h
mv_machine_learning/inference/include/PreProcess.h
mv_machine_learning/inference/include/ScoreInfo.h
mv_machine_learning/inference/include/TensorBuffer.h
mv_machine_learning/inference/include/Utils.h
mv_machine_learning/inference/include/mv_inference_open.h
mv_machine_learning/inference/src/BoxInfo.cpp
mv_machine_learning/inference/src/Bvh.cpp
mv_machine_learning/inference/src/BvhParser.cpp
mv_machine_learning/inference/src/BvhUtils.cpp
mv_machine_learning/inference/src/Inference.cpp [changed mode: 0755->0644]
mv_machine_learning/inference/src/InferenceIni.cpp
mv_machine_learning/inference/src/InputMetadata.cpp
mv_machine_learning/inference/src/Metadata.cpp
mv_machine_learning/inference/src/ObjectDecoder.cpp [changed mode: 0755->0644]
mv_machine_learning/inference/src/OutputMetadata.cpp [changed mode: 0755->0644]
mv_machine_learning/inference/src/PoseDecoder.cpp
mv_machine_learning/inference/src/PostProcess.cpp [changed mode: 0755->0644]
mv_machine_learning/inference/src/Posture.cpp
mv_machine_learning/inference/src/PreProcess.cpp
mv_machine_learning/inference/src/TensorBuffer.cpp
mv_machine_learning/inference/src/mv_inference.c
mv_machine_learning/inference/src/mv_inference_open.cpp
mv_machine_learning/training/include/data_augment.h
mv_machine_learning/training/include/data_augment_default.h
mv_machine_learning/training/include/data_augment_flip.h
mv_machine_learning/training/include/data_augment_rotate.h
mv_machine_learning/training/include/data_set_manager.h
mv_machine_learning/training/include/feature_vector_manager.h
mv_machine_learning/training/include/file_util.h
mv_machine_learning/training/include/label_manager.h
mv_machine_learning/training/include/training_model.h
mv_machine_learning/training/src/data_augment.cpp
mv_machine_learning/training/src/data_augment_default.cpp
mv_machine_learning/training/src/data_augment_flip.cpp
mv_machine_learning/training/src/data_augment_rotate.cpp
mv_machine_learning/training/src/data_set_manager.cpp
mv_machine_learning/training/src/feature_vector_manager.cpp
mv_machine_learning/training/src/file_util.cpp
mv_machine_learning/training/src/label_manager.cpp
mv_machine_learning/training/src/training_model.cpp
mv_roi_tracker/roi_tracker/include/ROITracker.h
mv_roi_tracker/roi_tracker/include/ROITrackerUtil.h
mv_roi_tracker/roi_tracker/include/mv_roi_tracker_open.h
mv_roi_tracker/roi_tracker/src/ROITracker.cpp
mv_roi_tracker/roi_tracker/src/ROITrackerUtil.cpp
mv_roi_tracker/roi_tracker/src/mv_roi_tracker.c
mv_roi_tracker/roi_tracker/src/mv_roi_tracker_open.cpp
mv_surveillance/surveillance/include/EventDefs.h
mv_surveillance/surveillance/include/EventManager.h
mv_surveillance/surveillance/include/EventResult.h
mv_surveillance/surveillance/include/EventTrigger.h
mv_surveillance/surveillance/include/EventTriggerMovementDetection.h
mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h
mv_surveillance/surveillance/include/EventTriggerPersonRecognition.h
mv_surveillance/surveillance/include/MFTracker.h
mv_surveillance/surveillance/include/SurveillanceHelper.h
mv_surveillance/surveillance/include/mv_absdiff.h
mv_surveillance/surveillance/include/mv_apply_mask.h
mv_surveillance/surveillance/include/mv_mask_buffer.h
mv_surveillance/surveillance/include/mv_surveillance_open.h
mv_surveillance/surveillance/src/EventManager.cpp
mv_surveillance/surveillance/src/EventTrigger.cpp
mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp
mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp
mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp
mv_surveillance/surveillance/src/MFTracker.cpp
mv_surveillance/surveillance/src/SurveillanceHelper.cpp
mv_surveillance/surveillance/src/mv_absdiff.c
mv_surveillance/surveillance/src/mv_apply_mask.c
mv_surveillance/surveillance/src/mv_mask_buffer.c
mv_surveillance/surveillance/src/mv_surveillance.c
mv_surveillance/surveillance/src/mv_surveillance_open.cpp
packaging/capi-media-vision.spec
test/assessment/barcode/assessment_barcode.cpp
test/assessment/face/assessment_face.cpp
test/assessment/surveillance/assessment_surveillance.cpp
test/testsuites/barcode/barcode_test_suite.c
test/testsuites/barcode/test_barcode.cpp
test/testsuites/common/image_helper/include/ImageHelper.h
test/testsuites/common/image_helper/include/image_helper.h
test/testsuites/common/image_helper/src/ImageHelper.cpp
test/testsuites/common/image_helper/src/image_helper.cpp
test/testsuites/common/testsuite_common/mv_testsuite_common.c
test/testsuites/common/testsuite_common/mv_testsuite_common.h
test/testsuites/common/video_helper/mv_log_cfg.h
test/testsuites/common/video_helper/mv_video_helper.c
test/testsuites/common/video_helper/mv_video_helper.h
test/testsuites/face/face_test_suite.c
test/testsuites/image/image_test_suite.c
test/testsuites/machine_learning/face_recognition/measure_face_recognition.cpp
test/testsuites/machine_learning/face_recognition/test_face_recognition.cpp
test/testsuites/machine_learning/inference/inference_test_suite.c
test/testsuites/machine_learning/inference/test_face_detection.cpp
test/testsuites/machine_learning/inference/test_face_landmark_detection.cpp
test/testsuites/machine_learning/inference/test_image_classification.cpp
test/testsuites/machine_learning/inference/test_inference_helper.cpp
test/testsuites/machine_learning/inference/test_object_detection.cpp
test/testsuites/machine_learning/inference/test_pose_landmark_detection.cpp
test/testsuites/surveillance/surveillance_test_suite.c
test/testsuites/tracker/test_tracker.cpp

index 3409848..aa20f06 100644 (file)
@@ -46,14 +46,14 @@ BraceWrapping:
   AfterClass:      true
   AfterNamespace:  true
   AfterObjCDeclaration: true
-  AfterExternBlock: true
+  AfterExternBlock: false # mediavision want false
   IndentBraces:    false
   SplitEmptyFunction: false
   SplitEmptyRecord: false
   SplitEmptyNamespace: false
 
 # from pptx
-ColumnLimit:     80
+ColumnLimit:     120 # mediavision want 120
 
 # M11
 SpaceAfterCStyleCast: true
@@ -87,7 +87,7 @@ ConstructorInitializerIndentWidth: 8
 ContinuationIndentWidth: 8
 Cpp11BracedListStyle: false
 KeepEmptyLinesAtTheStartOfBlocks: false
-NamespaceIndentation: Inner
+NamespaceIndentation: None # mediavision want None
 PenaltyBreakAssignment: 10
 PenaltyBreakBeforeFirstCallParameter: 30
 PenaltyBreakComment: 10
@@ -96,7 +96,6 @@ PenaltyBreakString: 10
 PenaltyExcessCharacter: 100
 SpacesInAngles:  false
 SpacesInContainerLiterals: false
-NamespaceIndentation: None
 
 ForEachMacros:
   - 'apei_estatus_for_each_section'
index 2fa14a9..bf43202 100644 (file)
  * and it sets newly generated coordinates to result parameter.
  */
 
-#endif  /* __TIZEN_MEDIAVISION_DOC_H__ */
+#endif /* __TIZEN_MEDIAVISION_DOC_H__ */
index c0e7ca7..caed05d 100644 (file)
@@ -41,7 +41,7 @@ extern "C" {
  *
  * @see mv_barcode_detect_attr_target_e
  */
-#define MV_BARCODE_DETECT_ATTR_TARGET "MV_BARCODE_DETECT_ATTR_TARGET"  /**< Target: 0-all, 1-1D, 2-2D*/
+#define MV_BARCODE_DETECT_ATTR_TARGET "MV_BARCODE_DETECT_ATTR_TARGET" /**< Target: 0-all, 1-1D, 2-2D*/
 
 /**
  * @brief Use #MV_BARCODE_DETECT_ATTR_ROTATION_DEGREES
@@ -94,10 +94,11 @@ extern "C" {
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-typedef enum {
-       MV_BARCODE_DETECT_ATTR_TARGET_ALL,          /**< 1D and 2D */
-       MV_BARCODE_DETECT_ATTR_TARGET_1D_BARCODE,   /**< 1D barcode only */
-       MV_BARCODE_DETECT_ATTR_TARGET_2D_BARCODE,   /**< 2D barcode only */
+typedef enum
+{
+       MV_BARCODE_DETECT_ATTR_TARGET_ALL, /**< 1D and 2D */
+       MV_BARCODE_DETECT_ATTR_TARGET_1D_BARCODE, /**< 1D barcode only */
+       MV_BARCODE_DETECT_ATTR_TARGET_2D_BARCODE, /**< 2D barcode only */
 } mv_barcode_detect_attr_target_e;
 
 /**
@@ -111,13 +112,13 @@ typedef enum {
  * @see #MV_BARCODE_DETECT_ATTR_ROTATION_DIRECTION
  * @see #MV_BARCODE_DETECT_ATTR_ROTATION_COUNT
  */
-typedef enum {
-       MV_BARCODE_DETECT_ATTR_ROTATION_CLOCKWISE,           /**< Clockwise */
-       MV_BARCODE_DETECT_ATTR_ROTATION_COUNTER_CLOCKWISE,   /**< Counter clockwise */
-       MV_BARCODE_DETECT_ATTR_ROTATION_ALL,   /**< Clockwise and counter clockwise */
+typedef enum
+{
+       MV_BARCODE_DETECT_ATTR_ROTATION_CLOCKWISE, /**< Clockwise */
+       MV_BARCODE_DETECT_ATTR_ROTATION_COUNTER_CLOCKWISE, /**< Counter clockwise */
+       MV_BARCODE_DETECT_ATTR_ROTATION_ALL, /**< Clockwise and counter clockwise */
 } mv_barcode_detect_attr_rotation_direction_e;
 
-
 /**
  * @brief Called when barcode detection is completed.
  * @details If no barcode is detected then the method will be called, barcodes
@@ -137,14 +138,9 @@ typedef enum {
  *
  * @see mv_barcode_detect()
  */
-typedef void (*mv_barcode_detected_cb)(
-       mv_source_h source,
-       mv_engine_config_h engine_cfg,
-       const mv_quadrangle_s *barcode_locations,
-       const char* messages[],
-       const mv_barcode_type_e *types,
-       int number_of_barcodes,
-       void *user_data);
+typedef void (*mv_barcode_detected_cb)(mv_source_h source, mv_engine_config_h engine_cfg,
+                                                                          const mv_quadrangle_s *barcode_locations, const char *messages[],
+                                                                          const mv_barcode_type_e *types, int number_of_barcodes, void *user_data);
 
 /**
  * @brief Detects barcode(s) on source and reads message from it.
@@ -171,12 +167,8 @@ typedef void (*mv_barcode_detected_cb)(
  *
  * @see mv_barcode_detected_cb()
  */
-int mv_barcode_detect(
-       mv_source_h source,
-       mv_engine_config_h engine_cfg,
-       mv_rectangle_s roi,
-       mv_barcode_detected_cb detect_cb,
-       void *user_data);
+int mv_barcode_detect(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s roi,
+                                         mv_barcode_detected_cb detect_cb, void *user_data);
 
 /**
  * @}
index aaaa2b4..0654fdc 100644 (file)
@@ -43,7 +43,8 @@ extern "C" {
  *
  * @see mv_barcode_generate_attr_text_e
  */
-#define MV_BARCODE_GENERATE_ATTR_TEXT "MV_BARCODE_GENERATE_ATTR_TEXT" /**< Text:
+#define MV_BARCODE_GENERATE_ATTR_TEXT \
+       "MV_BARCODE_GENERATE_ATTR_TEXT" /**< Text:
                                                                0-invisible,
                                                                1-visible */
 
@@ -70,12 +71,12 @@ extern "C" {
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-typedef enum {
-       MV_BARCODE_GENERATE_ATTR_TEXT_INVISIBLE,   /**< Invisible */
-       MV_BARCODE_GENERATE_ATTR_TEXT_VISIBLE,     /**< Visible */
+typedef enum
+{
+       MV_BARCODE_GENERATE_ATTR_TEXT_INVISIBLE, /**< Invisible */
+       MV_BARCODE_GENERATE_ATTR_TEXT_VISIBLE, /**< Visible */
 } mv_barcode_generate_attr_text_e;
 
-
 /**
  * @brief Generates #mv_source_h with barcode image.
  * @details Pay attention that for EAN-8 and EAN-13 barcode types the barcode
@@ -116,14 +117,9 @@ typedef enum {
  *
  * @see mv_barcode_generate_image()
  */
-int mv_barcode_generate_source(
-       mv_engine_config_h engine_cfg,
-       const char *message,
-       mv_barcode_type_e type,
-       mv_barcode_qr_mode_e qr_enc_mode,
-       mv_barcode_qr_ecc_e qr_ecc,
-       int qr_version,
-       mv_source_h image);
+int mv_barcode_generate_source(mv_engine_config_h engine_cfg, const char *message, mv_barcode_type_e type,
+                                                          mv_barcode_qr_mode_e qr_enc_mode, mv_barcode_qr_ecc_e qr_ecc, int qr_version,
+                                                          mv_source_h image);
 
 /**
  * @brief Generates image file with barcode.
@@ -173,17 +169,9 @@ int mv_barcode_generate_source(
  *
  * @see mv_barcode_generate_source()
  */
-int mv_barcode_generate_image(
-       mv_engine_config_h engine_cfg,
-       const char *message,
-       int image_width,
-       int image_height,
-       mv_barcode_type_e type,
-       mv_barcode_qr_mode_e qr_enc_mode,
-       mv_barcode_qr_ecc_e qr_ecc,
-       int qr_version,
-       const char *image_path,
-       mv_barcode_image_format_e image_format);
+int mv_barcode_generate_image(mv_engine_config_h engine_cfg, const char *message, int image_width, int image_height,
+                                                         mv_barcode_type_e type, mv_barcode_qr_mode_e qr_enc_mode, mv_barcode_qr_ecc_e qr_ecc,
+                                                         int qr_version, const char *image_path, mv_barcode_image_format_e image_format);
 
 /**
  * @}
index 89080b2..1f033a2 100644 (file)
@@ -38,23 +38,24 @@ extern "C" {
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  * @remarks #MV_BARCODE_UNDEFINED is deprecated. Use #MV_BARCODE_UNKNOWN instead
  */
-typedef enum {
-       MV_BARCODE_QR,          /**< 2D barcode - Quick Response code */
-       MV_BARCODE_UPC_A,       /**< 1D barcode - Universal Product Code with 12-digit */
-       MV_BARCODE_UPC_E,       /**< 1D barcode - Universal Product Code with 6-digit */
-       MV_BARCODE_EAN_8,       /**< 1D barcode - International Article Number with 8-digit */
-       MV_BARCODE_EAN_13,      /**< 1D barcode - International Article Number with 13-digit */
-       MV_BARCODE_CODE128,     /**< 1D barcode - Code 128 */
-       MV_BARCODE_CODE39,      /**< 1D barcode - Code 39 */
-       MV_BARCODE_I2_5,        /**< 1D barcode - Interleaved Two of Five */
-       MV_BARCODE_UNDEFINED,   /**< @deprecated Undefined (Deprecated since 6.0) */
-       MV_BARCODE_EAN_2,       /**< 1D barcode - International Article Number with 2-digit(add-on) (since 6.0) */
-       MV_BARCODE_EAN_5,       /**< 1D barcode - International Article Number with 5-digit(add-on) (since 6.0) */
-       MV_BARCODE_CODE93,      /**< 1D barcode - Code 93 (since 6.0)  */
-       MV_BARCODE_CODABAR,     /**< 1D barcode - CODABAR (since 6.0)  */
-       MV_BARCODE_DATABAR,     /**< 1D barcode - GS1 DATABAR (since 6.0)  */
-       MV_BARCODE_DATABAR_EXPAND,     /**< 1D barcode - GS1 DATABAR EXPAND(since 6.0)  */
-       MV_BARCODE_UNKNOWN  = 100   /**< Unknown (since 6.0) */
+typedef enum
+{
+       MV_BARCODE_QR, /**< 2D barcode - Quick Response code */
+       MV_BARCODE_UPC_A, /**< 1D barcode - Universal Product Code with 12-digit */
+       MV_BARCODE_UPC_E, /**< 1D barcode - Universal Product Code with 6-digit */
+       MV_BARCODE_EAN_8, /**< 1D barcode - International Article Number with 8-digit */
+       MV_BARCODE_EAN_13, /**< 1D barcode - International Article Number with 13-digit */
+       MV_BARCODE_CODE128, /**< 1D barcode - Code 128 */
+       MV_BARCODE_CODE39, /**< 1D barcode - Code 39 */
+       MV_BARCODE_I2_5, /**< 1D barcode - Interleaved Two of Five */
+       MV_BARCODE_UNDEFINED, /**< @deprecated Undefined (Deprecated since 6.0) */
+       MV_BARCODE_EAN_2, /**< 1D barcode - International Article Number with 2-digit(add-on) (since 6.0) */
+       MV_BARCODE_EAN_5, /**< 1D barcode - International Article Number with 5-digit(add-on) (since 6.0) */
+       MV_BARCODE_CODE93, /**< 1D barcode - Code 93 (since 6.0)  */
+       MV_BARCODE_CODABAR, /**< 1D barcode - CODABAR (since 6.0)  */
+       MV_BARCODE_DATABAR, /**< 1D barcode - GS1 DATABAR (since 6.0)  */
+       MV_BARCODE_DATABAR_EXPAND, /**< 1D barcode - GS1 DATABAR EXPAND(since 6.0)  */
+       MV_BARCODE_UNKNOWN = 100 /**< Unknown (since 6.0) */
 } mv_barcode_type_e;
 
 /**
@@ -63,12 +64,13 @@ typedef enum {
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  * @remarks This is unavailable for 1D barcodes
  */
-typedef enum {
-       MV_BARCODE_QR_ECC_LOW,           /**< Recovery up to  7% losses */
-       MV_BARCODE_QR_ECC_MEDIUM,        /**< Recovery up to 15% losses */
-       MV_BARCODE_QR_ECC_QUARTILE,      /**< Recovery up to 25% losses */
-       MV_BARCODE_QR_ECC_HIGH,          /**< Recovery up to 30% losses */
-       MV_BARCODE_QR_ECC_UNAVAILABLE    /**< Unavailable  */
+typedef enum
+{
+       MV_BARCODE_QR_ECC_LOW, /**< Recovery up to  7% losses */
+       MV_BARCODE_QR_ECC_MEDIUM, /**< Recovery up to 15% losses */
+       MV_BARCODE_QR_ECC_QUARTILE, /**< Recovery up to 25% losses */
+       MV_BARCODE_QR_ECC_HIGH, /**< Recovery up to 30% losses */
+       MV_BARCODE_QR_ECC_UNAVAILABLE /**< Unavailable  */
 } mv_barcode_qr_ecc_e;
 
 /**
@@ -77,12 +79,13 @@ typedef enum {
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  * @remarks This is unavailable for 1D barcodes
  */
-typedef enum {
-       MV_BARCODE_QR_MODE_NUMERIC,         /**< Numeric digits */
-       MV_BARCODE_QR_MODE_ALPHANUMERIC,    /**< Alphanumeric characters */
-       MV_BARCODE_QR_MODE_BYTE,            /**< Raw 8-bit bytes */
-       MV_BARCODE_QR_MODE_UTF8,            /**< UTF-8 character encoding */
-       MV_BARCODE_QR_MODE_UNAVAILABLE      /**< Unavailable */
+typedef enum
+{
+       MV_BARCODE_QR_MODE_NUMERIC, /**< Numeric digits */
+       MV_BARCODE_QR_MODE_ALPHANUMERIC, /**< Alphanumeric characters */
+       MV_BARCODE_QR_MODE_BYTE, /**< Raw 8-bit bytes */
+       MV_BARCODE_QR_MODE_UTF8, /**< UTF-8 character encoding */
+       MV_BARCODE_QR_MODE_UNAVAILABLE /**< Unavailable */
 } mv_barcode_qr_mode_e;
 
 /**
@@ -90,12 +93,13 @@ typedef enum {
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-typedef enum {
+typedef enum
+{
        MV_BARCODE_IMAGE_FORMAT_UNAVAILABLE = -1, /**< Unavailable image format */
-       MV_BARCODE_IMAGE_FORMAT_BMP,    /**< BMP image format */
-       MV_BARCODE_IMAGE_FORMAT_JPG,    /**< JPEG image format */
-       MV_BARCODE_IMAGE_FORMAT_PNG,    /**< PNG image format */
-       MV_BARCODE_IMAGE_FORMAT_NUM,    /**< The number of supported image format */
+       MV_BARCODE_IMAGE_FORMAT_BMP, /**< BMP image format */
+       MV_BARCODE_IMAGE_FORMAT_JPG, /**< JPEG image format */
+       MV_BARCODE_IMAGE_FORMAT_PNG, /**< PNG image format */
+       MV_BARCODE_IMAGE_FORMAT_NUM, /**< The number of supported image format */
 } mv_barcode_image_format_e;
 
 /**
index b678894..fe06802 100644 (file)
@@ -38,7 +38,8 @@ extern "C" {
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-typedef struct {
+typedef struct
+{
        int x; /**< X-axis coordinate of the point in 2D space */
        int y; /**< Y-axis coordinate of the point in 2D space */
 } mv_point_s;
@@ -48,8 +49,9 @@ typedef struct {
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-typedef struct {
-       mv_point_s points[4];    /**< Four points that define object bounding
+typedef struct
+{
+       mv_point_s points[4]; /**< Four points that define object bounding
                                                                quadrangle */
 } mv_quadrangle_s;
 
@@ -59,10 +61,11 @@ typedef struct {
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-typedef struct {
-       mv_point_s point;     /**< Top left corner of rectangle coordinates */
-       int width;            /**< Width of the bounding rectangle */
-       int height;           /**< Height of the bounding rectangle */
+typedef struct
+{
+       mv_point_s point; /**< Top left corner of rectangle coordinates */
+       int width; /**< Width of the bounding rectangle */
+       int height; /**< Height of the bounding rectangle */
 } mv_rectangle_s;
 
 /**
@@ -70,35 +73,22 @@ typedef struct {
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-typedef enum {
-       MEDIA_VISION_ERROR_NONE
-                       = TIZEN_ERROR_NONE,                /**< Successful */
-       MEDIA_VISION_ERROR_NOT_SUPPORTED
-                       = TIZEN_ERROR_NOT_SUPPORTED,       /**< Not supported */
-       MEDIA_VISION_ERROR_MSG_TOO_LONG
-                       = TIZEN_ERROR_MSG_TOO_LONG,        /**< Message too long */
-       MEDIA_VISION_ERROR_NO_DATA
-                       = TIZEN_ERROR_NO_DATA,             /**< No data */
-       MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE
-                       = TIZEN_ERROR_KEY_NOT_AVAILABLE,   /**< Key not available */
-       MEDIA_VISION_ERROR_OUT_OF_MEMORY
-                       = TIZEN_ERROR_OUT_OF_MEMORY,       /**< Out of memory */
-       MEDIA_VISION_ERROR_INVALID_PARAMETER
-                       = TIZEN_ERROR_INVALID_PARAMETER,   /**< Invalid parameter */
-       MEDIA_VISION_ERROR_INVALID_OPERATION
-                       = TIZEN_ERROR_INVALID_OPERATION,   /**< Invalid operation */
-       MEDIA_VISION_ERROR_PERMISSION_DENIED
-                       = TIZEN_ERROR_NOT_PERMITTED,       /**< Not permitted */
-       MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT
-                       = TIZEN_ERROR_MEDIA_VISION | 0x01, /**< Not supported format */
-       MEDIA_VISION_ERROR_INTERNAL
-                       = TIZEN_ERROR_MEDIA_VISION | 0x02, /**< Internal error */
-       MEDIA_VISION_ERROR_INVALID_DATA
-                       = TIZEN_ERROR_MEDIA_VISION | 0x03, /**< Invalid data */
-       MEDIA_VISION_ERROR_INVALID_PATH
-                       = TIZEN_ERROR_MEDIA_VISION | 0x04, /**< Invalid path (Since 3.0) */
-       MEDIA_VISION_ERROR_NOT_SUPPORTED_ENGINE
-                       = TIZEN_ERROR_MEDIA_VISION | 0x05  /**< Not supported engine (Since 5.5.) */
+typedef enum
+{
+       MEDIA_VISION_ERROR_NONE = TIZEN_ERROR_NONE, /**< Successful */
+       MEDIA_VISION_ERROR_NOT_SUPPORTED = TIZEN_ERROR_NOT_SUPPORTED, /**< Not supported */
+       MEDIA_VISION_ERROR_MSG_TOO_LONG = TIZEN_ERROR_MSG_TOO_LONG, /**< Message too long */
+       MEDIA_VISION_ERROR_NO_DATA = TIZEN_ERROR_NO_DATA, /**< No data */
+       MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE = TIZEN_ERROR_KEY_NOT_AVAILABLE, /**< Key not available */
+       MEDIA_VISION_ERROR_OUT_OF_MEMORY = TIZEN_ERROR_OUT_OF_MEMORY, /**< Out of memory */
+       MEDIA_VISION_ERROR_INVALID_PARAMETER = TIZEN_ERROR_INVALID_PARAMETER, /**< Invalid parameter */
+       MEDIA_VISION_ERROR_INVALID_OPERATION = TIZEN_ERROR_INVALID_OPERATION, /**< Invalid operation */
+       MEDIA_VISION_ERROR_PERMISSION_DENIED = TIZEN_ERROR_NOT_PERMITTED, /**< Not permitted */
+       MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT = TIZEN_ERROR_MEDIA_VISION | 0x01, /**< Not supported format */
+       MEDIA_VISION_ERROR_INTERNAL = TIZEN_ERROR_MEDIA_VISION | 0x02, /**< Internal error */
+       MEDIA_VISION_ERROR_INVALID_DATA = TIZEN_ERROR_MEDIA_VISION | 0x03, /**< Invalid data */
+       MEDIA_VISION_ERROR_INVALID_PATH = TIZEN_ERROR_MEDIA_VISION | 0x04, /**< Invalid path (Since 3.0) */
+       MEDIA_VISION_ERROR_NOT_SUPPORTED_ENGINE = TIZEN_ERROR_MEDIA_VISION | 0x05 /**< Not supported engine (Since 5.5.) */
 } mv_error_e;
 
 /**
@@ -107,11 +97,12 @@ typedef enum {
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-typedef enum {
-       MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE,     /**< Double attribute type */
-       MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER,    /**< Integer attribute type */
-       MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN,    /**< Boolean attribute type */
-       MV_ENGINE_CONFIG_ATTR_TYPE_STRING,      /**< String attribute type */
+typedef enum
+{
+       MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE, /**< Double attribute type */
+       MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER, /**< Integer attribute type */
+       MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN, /**< Boolean attribute type */
+       MV_ENGINE_CONFIG_ATTR_TYPE_STRING, /**< String attribute type */
        MV_ENGINE_CONFIG_ATTR_TYPE_ARRAY_STRING /**< Array of string attribute type (Since 5.5) */
 } mv_config_attribute_type_e;
 
@@ -120,19 +111,20 @@ typedef enum {
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-typedef enum {
+typedef enum
+{
        MEDIA_VISION_COLORSPACE_INVALID, /**< The colorspace type is invalid */
-       MEDIA_VISION_COLORSPACE_Y800,    /**< The colorspace type is Y800 */
-       MEDIA_VISION_COLORSPACE_I420,    /**< The colorspace type is I420 */
-       MEDIA_VISION_COLORSPACE_NV12,    /**< The colorspace type is NV12 */
-       MEDIA_VISION_COLORSPACE_YV12,    /**< The colorspace type is YV12 */
-       MEDIA_VISION_COLORSPACE_NV21,    /**< The colorspace type is NV21 */
-       MEDIA_VISION_COLORSPACE_YUYV,    /**< The colorspace type is YUYV */
-       MEDIA_VISION_COLORSPACE_UYVY,    /**< The colorspace type is UYVY */
-       MEDIA_VISION_COLORSPACE_422P,    /**< The colorspace type is 422P */
-       MEDIA_VISION_COLORSPACE_RGB565,  /**< The colorspace type is RGB565 */
-       MEDIA_VISION_COLORSPACE_RGB888,  /**< The colorspace type is RGB888 */
-       MEDIA_VISION_COLORSPACE_RGBA,    /**< The colorspace type is RGBA */
+       MEDIA_VISION_COLORSPACE_Y800, /**< The colorspace type is Y800 */
+       MEDIA_VISION_COLORSPACE_I420, /**< The colorspace type is I420 */
+       MEDIA_VISION_COLORSPACE_NV12, /**< The colorspace type is NV12 */
+       MEDIA_VISION_COLORSPACE_YV12, /**< The colorspace type is YV12 */
+       MEDIA_VISION_COLORSPACE_NV21, /**< The colorspace type is NV21 */
+       MEDIA_VISION_COLORSPACE_YUYV, /**< The colorspace type is YUYV */
+       MEDIA_VISION_COLORSPACE_UYVY, /**< The colorspace type is UYVY */
+       MEDIA_VISION_COLORSPACE_422P, /**< The colorspace type is 422P */
+       MEDIA_VISION_COLORSPACE_RGB565, /**< The colorspace type is RGB565 */
+       MEDIA_VISION_COLORSPACE_RGB888, /**< The colorspace type is RGB888 */
+       MEDIA_VISION_COLORSPACE_RGBA, /**< The colorspace type is RGBA */
 } mv_colorspace_e;
 
 /**
@@ -175,8 +167,7 @@ typedef void *mv_source_h;
  *
  * @see mv_destroy_source()
  */
-int mv_create_source(
-               mv_source_h *source);
+int mv_create_source(mv_source_h *source);
 
 /**
  * @brief Destroys the source handle and releases all its resources.
@@ -190,8 +181,7 @@ int mv_create_source(
  *
  * @see mv_create_source()
  */
-int mv_destroy_source(
-               mv_source_h source);
+int mv_destroy_source(mv_source_h source);
 
 /**
  * @brief Fills the media source based on the media packet.
@@ -213,9 +203,7 @@ int mv_destroy_source(
  * @see mv_create_source()
  * @see mv_destroy_source()
  */
-int mv_source_fill_by_media_packet(
-               mv_source_h source,
-               media_packet_h media_packet);
+int mv_source_fill_by_media_packet(mv_source_h source, media_packet_h media_packet);
 
 /**
  * @brief Fills the media source based on the buffer and metadata.
@@ -237,13 +225,8 @@ int mv_source_fill_by_media_packet(
  *
  * @see mv_source_clear()
  */
-int mv_source_fill_by_buffer(
-               mv_source_h source,
-               unsigned char *data_buffer,
-               unsigned int buffer_size,
-               unsigned int image_width,
-               unsigned int image_height,
-               mv_colorspace_e image_colorspace);
+int mv_source_fill_by_buffer(mv_source_h source, unsigned char *data_buffer, unsigned int buffer_size,
+                                                        unsigned int image_width, unsigned int image_height, mv_colorspace_e image_colorspace);
 
 /**
  * @brief Clears the buffer of the media source.
@@ -257,8 +240,7 @@ int mv_source_fill_by_buffer(
  *
  * @see mv_source_fill_by_buffer()
  */
-int mv_source_clear(
-               mv_source_h source);
+int mv_source_clear(mv_source_h source);
 
 /**
  * @brief Gets buffer of the media source.
@@ -280,10 +262,7 @@ int mv_source_clear(
  * @see mv_source_get_height()
  * @see mv_source_get_colorspace()
  */
-int mv_source_get_buffer(
-               mv_source_h source,
-               unsigned char **data_buffer,
-               unsigned int *buffer_size);
+int mv_source_get_buffer(mv_source_h source, unsigned char **data_buffer, unsigned int *buffer_size);
 
 /**
  * @brief Gets height of the media source.
@@ -300,9 +279,7 @@ int mv_source_get_buffer(
  * @see mv_source_get_colorspace()
  * @see mv_source_get_buffer()
  */
-int mv_source_get_height(
-               mv_source_h source,
-               unsigned int *image_height);
+int mv_source_get_height(mv_source_h source, unsigned int *image_height);
 
 /**
  * @brief Gets width of the media source.
@@ -319,9 +296,7 @@ int mv_source_get_height(
  * @see mv_source_get_colorspace()
  * @see mv_source_get_buffer()
  */
-int mv_source_get_width(
-               mv_source_h source,
-               unsigned int *image_width);
+int mv_source_get_width(mv_source_h source, unsigned int *image_width);
 
 /**
  * @brief Gets colorspace of the media source.
@@ -338,9 +313,7 @@ int mv_source_get_width(
  * @see mv_source_get_height()
  * @see mv_source_get_buffer()
  */
-int mv_source_get_colorspace(
-               mv_source_h source,
-               mv_colorspace_e *image_colorspace);
+int mv_source_get_colorspace(mv_source_h source, mv_colorspace_e *image_colorspace);
 
 /**
  * @brief Creates the handle to the configuration of engine.
@@ -369,8 +342,7 @@ int mv_source_get_colorspace(
  * @see mv_engine_config_get_bool_attribute()
  * @see mv_engine_config_get_string_attribute()
  */
-int mv_create_engine_config(
-               mv_engine_config_h *engine_cfg);
+int mv_create_engine_config(mv_engine_config_h *engine_cfg);
 
 /**
  * @brief Destroys the engine configuration handle and releases all its
@@ -387,8 +359,7 @@ int mv_create_engine_config(
  * @see #mv_engine_config_h
  * @see mv_create_engine_config()
  */
-int mv_destroy_engine_config(
-               mv_engine_config_h engine_cfg);
+int mv_destroy_engine_config(mv_engine_config_h engine_cfg);
 
 /**
  * @brief Sets the double attribute to the configuration.
@@ -411,10 +382,7 @@ int mv_destroy_engine_config(
  * @see mv_engine_config_set_bool_attribute()
  * @see mv_engine_config_set_string_attribute()
  */
-int mv_engine_config_set_double_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               double value);
+int mv_engine_config_set_double_attribute(mv_engine_config_h engine_cfg, const char *name, double value);
 
 /**
  * @brief Sets the integer attribute to the configuration.
@@ -439,10 +407,7 @@ int mv_engine_config_set_double_attribute(
  * @see mv_barcode_detect_attr_target_e
  * @see mv_barcode_generate_attr_text_e
  */
-int mv_engine_config_set_int_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               int value);
+int mv_engine_config_set_int_attribute(mv_engine_config_h engine_cfg, const char *name, int value);
 
 /**
  * @brief Sets the boolean attribute to the configuration.
@@ -465,10 +430,7 @@ int mv_engine_config_set_int_attribute(
  * @see mv_engine_config_set_int_attribute()
  * @see mv_engine_config_set_string_attribute()
  */
-int mv_engine_config_set_bool_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               bool value);
+int mv_engine_config_set_bool_attribute(mv_engine_config_h engine_cfg, const char *name, bool value);
 
 /**
  * @brief Sets the string attribute to the configuration.
@@ -491,10 +453,7 @@ int mv_engine_config_set_bool_attribute(
  * @see mv_engine_config_set_int_attribute()
  * @see mv_engine_config_set_bool_attribute()
  */
-int mv_engine_config_set_string_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               const char *value);
+int mv_engine_config_set_string_attribute(mv_engine_config_h engine_cfg, const char *name, const char *value);
 
 /**
  * @brief Sets the array of string attribute to the configuration.
@@ -515,11 +474,8 @@ int mv_engine_config_set_string_attribute(
  *
  * @see mv_engine_config_get_array_string_attribute()
  */
-int mv_engine_config_set_array_string_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               const char **values,
-               unsigned int size);
+int mv_engine_config_set_array_string_attribute(mv_engine_config_h engine_cfg, const char *name, const char **values,
+                                                                                               unsigned int size);
 
 /**
  * @brief Gets the double attribute from the configuration dictionary.
@@ -543,10 +499,7 @@ int mv_engine_config_set_array_string_attribute(
  * @see mv_engine_config_get_bool_attribute()
  * @see mv_engine_config_get_string_attribute()
  */
-int mv_engine_config_get_double_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               double *value);
+int mv_engine_config_get_double_attribute(mv_engine_config_h engine_cfg, const char *name, double *value);
 
 /**
  * @brief Gets the integer attribute from the configuration dictionary.
@@ -572,10 +525,7 @@ int mv_engine_config_get_double_attribute(
  * @see mv_barcode_detect_attr_target_e
  * @see mv_barcode_generate_attr_text_e
  */
-int mv_engine_config_get_int_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               int *value);
+int mv_engine_config_get_int_attribute(mv_engine_config_h engine_cfg, const char *name, int *value);
 
 /**
  * @brief Gets the boolean attribute from the configuration dictionary.
@@ -599,10 +549,7 @@ int mv_engine_config_get_int_attribute(
  * @see mv_engine_config_get_int_attribute()
  * @see mv_engine_config_get_string_attribute()
  */
-int mv_engine_config_get_bool_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               bool *value);
+int mv_engine_config_get_bool_attribute(mv_engine_config_h engine_cfg, const char *name, bool *value);
 
 /**
  * @brief Gets the string attribute from the configuration dictionary.
@@ -628,10 +575,7 @@ int mv_engine_config_get_bool_attribute(
  * @see mv_engine_config_get_int_attribute()
  * @see mv_engine_config_get_bool_attribute()
  */
-int mv_engine_config_get_string_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               char **value);
+int mv_engine_config_get_string_attribute(mv_engine_config_h engine_cfg, const char *name, char **value);
 
 /**
  * @brief Gets the array of string attribute from the configuration dictionary.
@@ -655,11 +599,8 @@ int mv_engine_config_get_string_attribute(
  *
  * @see mv_engine_config_set_array_string_attribute()
  */
-int mv_engine_config_get_array_string_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               char ***values,
-               int *size);
+int mv_engine_config_get_array_string_attribute(mv_engine_config_h engine_cfg, const char *name, char ***values,
+                                                                                               int *size);
 
 /**
  * @brief Called to get information (type and name) once for each supported
@@ -677,10 +618,8 @@ int mv_engine_config_get_array_string_attribute(
  * @pre mv_engine_config_foreach_supported_attribute() will invoke this callback
  * @see mv_engine_config_foreach_supported_attribute()
  */
-typedef bool (*mv_supported_attribute_cb)(
-               mv_config_attribute_type_e attribute_type,
-               const char *attribute_name,
-               void *user_data);
+typedef bool (*mv_supported_attribute_cb)(mv_config_attribute_type_e attribute_type, const char *attribute_name,
+                                                                                 void *user_data);
 
 /**
  * @brief Traverses the list of supported attribute names and types.
@@ -719,9 +658,7 @@ typedef bool (*mv_supported_attribute_cb)(
  * @see mv_engine_config_get_bool_attribute()
  * @see mv_engine_config_get_string_attribute()
  */
-int mv_engine_config_foreach_supported_attribute(
-               mv_supported_attribute_cb callback,
-               void *user_data);
+int mv_engine_config_foreach_supported_attribute(mv_supported_attribute_cb callback, void *user_data);
 
 /**
  * @}
index e3c90f6..bcb58fe 100644 (file)
@@ -163,12 +163,8 @@ extern "C" {
  *
  * @see mv_face_detect()
  */
-typedef void (*mv_face_detected_cb)(
-       mv_source_h source,
-       mv_engine_config_h engine_cfg,
-       mv_rectangle_s *faces_locations,
-       int number_of_faces,
-       void *user_data);
+typedef void (*mv_face_detected_cb)(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s *faces_locations,
+                                                                       int number_of_faces, void *user_data);
 
 /**
  * @brief Performs face detection on the @a source for the @a engine_cfg.
@@ -202,12 +198,7 @@ typedef void (*mv_face_detected_cb)(
  *
  * @see mv_face_detected_cb()
  */
-int mv_face_detect(
-       mv_source_h source,
-       mv_engine_config_h engine_cfg,
-       mv_face_detected_cb detected_cb,
-       void *user_data);
-
+int mv_face_detect(mv_source_h source, mv_engine_config_h engine_cfg, mv_face_detected_cb detected_cb, void *user_data);
 
 /********************/
 /* Face recognition */
@@ -249,14 +240,9 @@ int mv_face_detect(
  *
  * @see mv_face_recognize()
  */
-typedef void (*mv_face_recognized_cb)(
-       mv_source_h source,
-       mv_face_recognition_model_h recognition_model,
-       mv_engine_config_h engine_cfg,
-       mv_rectangle_s *face_location,
-       const int *face_label,
-       double confidence,
-       void *user_data);
+typedef void (*mv_face_recognized_cb)(mv_source_h source, mv_face_recognition_model_h recognition_model,
+                                                                         mv_engine_config_h engine_cfg, mv_rectangle_s *face_location,
+                                                                         const int *face_label, double confidence, void *user_data);
 
 /**
  * @brief Performs face recognition on the @a source image.
@@ -308,14 +294,8 @@ typedef void (*mv_face_recognized_cb)(
  *
  * @see mv_face_recognized_cb()
  */
-int mv_face_recognize(
-       mv_source_h source,
-       mv_face_recognition_model_h recognition_model,
-       mv_engine_config_h engine_cfg,
-       mv_rectangle_s *face_location,
-       mv_face_recognized_cb recognized_cb,
-       void *user_data);
-
+int mv_face_recognize(mv_source_h source, mv_face_recognition_model_h recognition_model, mv_engine_config_h engine_cfg,
+                                         mv_rectangle_s *face_location, mv_face_recognized_cb recognized_cb, void *user_data);
 
 /*****************/
 /* Face tracking */
@@ -352,13 +332,9 @@ int mv_face_recognize(
  *
  * @see mv_face_track()
  */
-typedef void (*mv_face_tracked_cb)(
-       mv_source_h source,
-       mv_face_tracking_model_h tracking_model,
-       mv_engine_config_h engine_cfg,
-       mv_quadrangle_s *location,
-       double confidence,
-       void *user_data);
+typedef void (*mv_face_tracked_cb)(mv_source_h source, mv_face_tracking_model_h tracking_model,
+                                                                  mv_engine_config_h engine_cfg, mv_quadrangle_s *location, double confidence,
+                                                                  void *user_data);
 
 /**
  * @brief Performs face tracking on the @a source for the @a tracking_model.
@@ -411,14 +387,8 @@ typedef void (*mv_face_tracked_cb)(
  *
  * @see mv_face_tracked_cb()
  */
-int mv_face_track(
-       mv_source_h source,
-       mv_face_tracking_model_h tracking_model,
-       mv_engine_config_h engine_cfg,
-       mv_face_tracked_cb tracked_cb,
-       bool do_learn,
-       void *user_data);
-
+int mv_face_track(mv_source_h source, mv_face_tracking_model_h tracking_model, mv_engine_config_h engine_cfg,
+                                 mv_face_tracked_cb tracked_cb, bool do_learn, void *user_data);
 
 /********************************/
 /* Recognition of eye condition */
@@ -449,12 +419,9 @@ int mv_face_track(
  *
  * @see mv_face_eye_condition_recognize()
  */
-typedef void (*mv_face_eye_condition_recognized_cb)(
-       mv_source_h source,
-       mv_engine_config_h engine_cfg,
-       mv_rectangle_s face_location,
-       mv_face_eye_condition_e eye_condition,
-       void *user_data);
+typedef void (*mv_face_eye_condition_recognized_cb)(mv_source_h source, mv_engine_config_h engine_cfg,
+                                                                                                       mv_rectangle_s face_location, mv_face_eye_condition_e eye_condition,
+                                                                                                       void *user_data);
 
 /**
  * @brief Determines eye-blink condition for @a face_location on media @a source.
@@ -486,13 +453,8 @@ typedef void (*mv_face_eye_condition_recognized_cb)(
  *
  * @see mv_face_eye_condition_recognized_cb()
  */
-int mv_face_eye_condition_recognize(
-       mv_source_h source,
-       mv_engine_config_h engine_cfg,
-       mv_rectangle_s face_location,
-       mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
-       void *user_data);
-
+int mv_face_eye_condition_recognize(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s face_location,
+                                                                       mv_face_eye_condition_recognized_cb eye_condition_recognized_cb, void *user_data);
 
 /************************************/
 /* Recognition of facial expression */
@@ -521,12 +483,9 @@ int mv_face_eye_condition_recognize(
  *
  * @see mv_face_facial_expression_recognize()
  */
-typedef void (*mv_face_facial_expression_recognized_cb)(
-       mv_source_h source,
-       mv_engine_config_h engine_cfg,
-       mv_rectangle_s face_location,
-       mv_face_facial_expression_e facial_expression,
-       void *user_data);
+typedef void (*mv_face_facial_expression_recognized_cb)(mv_source_h source, mv_engine_config_h engine_cfg,
+                                                                                                               mv_rectangle_s face_location,
+                                                                                                               mv_face_facial_expression_e facial_expression, void *user_data);
 
 /**
  * @brief Determines facial expression for @a face_location on media @a source.
@@ -557,12 +516,9 @@ typedef void (*mv_face_facial_expression_recognized_cb)(
  *
  * @see mv_face_facial_expression_recognized_cb()
  */
-int mv_face_facial_expression_recognize(
-       mv_source_h source,
-       mv_engine_config_h engine_cfg,
-       mv_rectangle_s face_location,
-       mv_face_facial_expression_recognized_cb expression_recognized_cb,
-       void *user_data);
+int mv_face_facial_expression_recognize(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s face_location,
+                                                                               mv_face_facial_expression_recognized_cb expression_recognized_cb,
+                                                                               void *user_data);
 
 /*******************************/
 /* Recognition model behavior */
@@ -604,8 +560,7 @@ int mv_face_facial_expression_recognize(
  *
  * @see mv_face_recognition_model_destroy()
  */
-int mv_face_recognition_model_create(
-       mv_face_recognition_model_h *recognition_model);
+int mv_face_recognition_model_create(mv_face_recognition_model_h *recognition_model);
 
 /**
  * @brief Destroys the face recognition model handle and releases all its
@@ -623,8 +578,7 @@ int mv_face_recognition_model_create(
  *
  * @see mv_face_recognition_model_create()
  */
-int mv_face_recognition_model_destroy(
-       mv_face_recognition_model_h recognition_model);
+int mv_face_recognition_model_destroy(mv_face_recognition_model_h recognition_model);
 
 /**
  * @brief Creates a copy of existed recognition model handle and clones all its
@@ -648,9 +602,7 @@ int mv_face_recognition_model_destroy(
  *
  * @see mv_face_recognition_model_create()
  */
-int mv_face_recognition_model_clone(
-       mv_face_recognition_model_h src,
-       mv_face_recognition_model_h *dst);
+int mv_face_recognition_model_clone(mv_face_recognition_model_h src, mv_face_recognition_model_h *dst);
 
 /**
  * @brief Saves recognition model to the file.
@@ -686,9 +638,7 @@ int mv_face_recognition_model_clone(
  * @see mv_face_recognition_model_create()
  * @see app_get_data_path()
  */
-int mv_face_recognition_model_save(
-       const char *file_name,
-       mv_face_recognition_model_h recognition_model);
+int mv_face_recognition_model_save(const char *file_name, mv_face_recognition_model_h recognition_model);
 
 /**
  * @brief Loads recognition model from file.
@@ -724,9 +674,7 @@ int mv_face_recognition_model_save(
  * @see mv_face_recognition_model_destroy()
  * @see app_get_data_path()
  */
-int mv_face_recognition_model_load(
-       const char *file_name,
-       mv_face_recognition_model_h *recognition_model);
+int mv_face_recognition_model_load(const char *file_name, mv_face_recognition_model_h *recognition_model);
 
 /**
  * @brief Adds face image example to be used for face recognition model learning
@@ -768,11 +716,8 @@ int mv_face_recognition_model_load(
  * @see mv_face_recognition_model_reset()
  * @see mv_face_recognition_model_learn()
  */
-int mv_face_recognition_model_add(
-       const mv_source_h source,
-       mv_face_recognition_model_h recognition_model,
-       const mv_rectangle_s *example_location,
-       int face_label);
+int mv_face_recognition_model_add(const mv_source_h source, mv_face_recognition_model_h recognition_model,
+                                                                 const mv_rectangle_s *example_location, int face_label);
 
 /**
  * @brief Removes from @a recognition_model all collected with
@@ -805,9 +750,7 @@ int mv_face_recognition_model_add(
  * @see mv_face_recognition_model_add()
  * @see mv_face_recognition_model_learn()
  */
-int mv_face_recognition_model_reset(
-       mv_face_recognition_model_h recognition_model,
-       int *face_label);
+int mv_face_recognition_model_reset(mv_face_recognition_model_h recognition_model, int *face_label);
 
 /**
  * @brief Learns face recognition model.
@@ -865,9 +808,7 @@ int mv_face_recognition_model_reset(
  * @see mv_face_recognition_model_reset()
  * @see mv_face_recognize()
  */
-int mv_face_recognition_model_learn(
-       mv_engine_config_h engine_cfg,
-       mv_face_recognition_model_h recognition_model);
+int mv_face_recognition_model_learn(mv_engine_config_h engine_cfg, mv_face_recognition_model_h recognition_model);
 
 /**
  * @brief Queries labels list and number of labels had been learned by the model.
@@ -895,10 +836,8 @@ int mv_face_recognition_model_learn(
  * @see mv_face_recognition_model_reset()
  * @see mv_face_recognition_model_learn()
  */
-int mv_face_recognition_model_query_labels(
-       mv_face_recognition_model_h recognition_model,
-       int **labels,
-       unsigned int *number_of_labels);
+int mv_face_recognition_model_query_labels(mv_face_recognition_model_h recognition_model, int **labels,
+                                                                                  unsigned int *number_of_labels);
 
 /***************************/
 /* Tracking model behavior */
@@ -937,8 +876,7 @@ int mv_face_recognition_model_query_labels(
  * @see mv_face_tracking_model_prepare()
  * @see mv_face_tracking_model_load()
  */
-int mv_face_tracking_model_create(
-       mv_face_tracking_model_h *tracking_model);
+int mv_face_tracking_model_create(mv_face_tracking_model_h *tracking_model);
 
 /**
  * @brief Calls this function to destroy the face tracking model handle and
@@ -956,8 +894,7 @@ int mv_face_tracking_model_create(
  *
  * @see mv_face_tracking_model_create()
  */
-int mv_face_tracking_model_destroy(
-       mv_face_tracking_model_h tracking_model);
+int mv_face_tracking_model_destroy(mv_face_tracking_model_h tracking_model);
 
 /**
  * @brief Calls this function to initialize tracking model by the location of the
@@ -1001,11 +938,8 @@ int mv_face_tracking_model_destroy(
  * @see mv_face_tracking_model_create()
  * @see mv_face_track()
  */
-int mv_face_tracking_model_prepare(
-       mv_face_tracking_model_h tracking_model,
-       mv_engine_config_h engine_cfg,
-       mv_source_h source,
-       mv_quadrangle_s *location);
+int mv_face_tracking_model_prepare(mv_face_tracking_model_h tracking_model, mv_engine_config_h engine_cfg,
+                                                                  mv_source_h source, mv_quadrangle_s *location);
 
 /**
  * @brief Calls this function to make a copy of existed tracking model handle and
@@ -1029,9 +963,7 @@ int mv_face_tracking_model_prepare(
  *
  * @see mv_face_tracking_model_create()
  */
-int mv_face_tracking_model_clone(
-       mv_face_tracking_model_h src,
-       mv_face_tracking_model_h *dst);
+int mv_face_tracking_model_clone(mv_face_tracking_model_h src, mv_face_tracking_model_h *dst);
 
 /**
  * @brief Calls this method to save tracking model to the file.
@@ -1062,9 +994,7 @@ int mv_face_tracking_model_clone(
  * @see mv_face_tracking_model_create()
  * @see app_get_data_path()
  */
-int mv_face_tracking_model_save(
-       const char *file_name,
-       mv_face_tracking_model_h tracking_model);
+int mv_face_tracking_model_save(const char *file_name, mv_face_tracking_model_h tracking_model);
 
 /**
  * @brief Calls this method to load a tracking model from file.
@@ -1098,9 +1028,7 @@ int mv_face_tracking_model_save(
  * @see mv_face_tracking_model_destroy()
  * @see app_get_data_path()
  */
-int mv_face_tracking_model_load(
-       const char *file_name,
-       mv_face_tracking_model_h *tracking_model);
+int mv_face_tracking_model_load(const char *file_name, mv_face_tracking_model_h *tracking_model);
 
 /**
  * @}
index f26b5f7..170bd4d 100644 (file)
@@ -39,10 +39,11 @@ extern "C" {
  *
  * @see mv_face_eye_condition_recognize()
  */
-typedef enum {
-       MV_FACE_EYES_OPEN,      /**< Eyes are open */
-       MV_FACE_EYES_CLOSED,    /**< Eyes are closed */
-       MV_FACE_EYES_NOT_FOUND  /**< The eyes condition wasn't determined */
+typedef enum
+{
+       MV_FACE_EYES_OPEN, /**< Eyes are open */
+       MV_FACE_EYES_CLOSED, /**< Eyes are closed */
+       MV_FACE_EYES_NOT_FOUND /**< The eyes condition wasn't determined */
 } mv_face_eye_condition_e;
 
 /**
@@ -52,15 +53,16 @@ typedef enum {
  *
  * @see mv_face_facial_expression_recognize()
  */
-typedef enum {
-       MV_FACE_UNKNOWN,    /**< Unknown face expression */
-       MV_FACE_NEUTRAL,    /**< Face expression is neutral */
-       MV_FACE_SMILE,      /**< Face expression is smiling */
-       MV_FACE_SADNESS,    /**< Face expression is sadness */
-       MV_FACE_SURPRISE,   /**< Face expression is surprise */
-       MV_FACE_ANGER,      /**< Face expression is anger */
-       MV_FACE_FEAR,       /**< Face expression is fear */
-       MV_FACE_DISGUST,    /**< Face expression is disgust */
+typedef enum
+{
+       MV_FACE_UNKNOWN, /**< Unknown face expression */
+       MV_FACE_NEUTRAL, /**< Face expression is neutral */
+       MV_FACE_SMILE, /**< Face expression is smiling */
+       MV_FACE_SADNESS, /**< Face expression is sadness */
+       MV_FACE_SURPRISE, /**< Face expression is surprise */
+       MV_FACE_ANGER, /**< Face expression is anger */
+       MV_FACE_FEAR, /**< Face expression is fear */
+       MV_FACE_DISGUST, /**< Face expression is disgust */
 } mv_face_facial_expression_e;
 
 /**
index 1ad1a99..0145398 100644 (file)
@@ -253,13 +253,9 @@ extern "C" {
  * @see mv_engine_config_h
  * @see mv_quadrangle_s
  */
-typedef void (*mv_image_recognized_cb)(
-       mv_source_h source,
-       mv_engine_config_h engine_cfg,
-       const mv_image_object_h *image_objects,
-       mv_quadrangle_s **locations,
-       unsigned int number_of_objects,
-       void *user_data);
+typedef void (*mv_image_recognized_cb)(mv_source_h source, mv_engine_config_h engine_cfg,
+                                                                          const mv_image_object_h *image_objects, mv_quadrangle_s **locations,
+                                                                          unsigned int number_of_objects, void *user_data);
 
 /**
  * @brief Recognizes the given image objects on the source image.
@@ -305,13 +301,8 @@ typedef void (*mv_image_recognized_cb)(
  * @see mv_image_object_destroy()
  * @see mv_engine_config_h
  */
-int mv_image_recognize(
-       mv_source_h source,
-       const mv_image_object_h *image_objects,
-       int number_of_objects,
-       mv_engine_config_h engine_cfg,
-       mv_image_recognized_cb recognized_cb,
-       void *user_data);
+int mv_image_recognize(mv_source_h source, const mv_image_object_h *image_objects, int number_of_objects,
+                                          mv_engine_config_h engine_cfg, mv_image_recognized_cb recognized_cb, void *user_data);
 
 /*************************/
 /* Image object tracking */
@@ -352,12 +343,8 @@ int mv_image_recognize(
  * @see mv_engine_config_h
  * @see mv_quadrangle_s
  */
-typedef void (*mv_image_tracked_cb)(
-       mv_source_h source,
-       mv_image_tracking_model_h image_tracking_model,
-       mv_engine_config_h engine_cfg,
-       mv_quadrangle_s *location,
-       void *user_data);
+typedef void (*mv_image_tracked_cb)(mv_source_h source, mv_image_tracking_model_h image_tracking_model,
+                                                                       mv_engine_config_h engine_cfg, mv_quadrangle_s *location, void *user_data);
 
 /**
  * @brief Tracks the given image tracking model on the current frame
@@ -411,12 +398,8 @@ typedef void (*mv_image_tracked_cb)(
  * @see mv_image_tracking_model_set_target()
  * @see mv_image_tracking_model_destroy()
  */
-int mv_image_track(
-       mv_source_h source,
-       mv_image_tracking_model_h image_tracking_model,
-       mv_engine_config_h engine_cfg,
-       mv_image_tracked_cb tracked_cb,
-       void *user_data);
+int mv_image_track(mv_source_h source, mv_image_tracking_model_h image_tracking_model, mv_engine_config_h engine_cfg,
+                                  mv_image_tracked_cb tracked_cb, void *user_data);
 
 /**************************/
 /* Image object behaviour */
@@ -438,8 +421,7 @@ int mv_image_track(
  * @see mv_image_object_destroy()
  * @see mv_image_object_h
  */
-int mv_image_object_create(
-       mv_image_object_h *image_object);
+int mv_image_object_create(mv_image_object_h *image_object);
 
 /**
  * @brief Destroys the image object.
@@ -456,8 +438,7 @@ int mv_image_object_create(
  * @see mv_image_object_create()
  * @see mv_image_object_h
  */
-int mv_image_object_destroy(
-       mv_image_object_h image_object);
+int mv_image_object_destroy(mv_image_object_h image_object);
 
 /**
  * @brief Fills the image object.
@@ -497,11 +478,8 @@ int mv_image_object_destroy(
  * @see mv_image_object_destroy()
  * @see mv_engine_config_h
  */
-int mv_image_object_fill(
-       mv_image_object_h image_object,
-       mv_engine_config_h engine_cfg,
-       mv_source_h source,
-       mv_rectangle_s *location);
+int mv_image_object_fill(mv_image_object_h image_object, mv_engine_config_h engine_cfg, mv_source_h source,
+                                                mv_rectangle_s *location);
 
 /**
  * @brief Gets a value that determines how well an image object can be recognized.
@@ -533,9 +511,7 @@ int mv_image_object_fill(
  * @see mv_image_object_destroy()
  * @see mv_engine_config_h
  */
-int mv_image_object_get_recognition_rate(
-       mv_image_object_h image_object,
-       double *recognition_rate);
+int mv_image_object_get_recognition_rate(mv_image_object_h image_object, double *recognition_rate);
 
 /**
  * @brief Sets a label for the image object.
@@ -559,9 +535,7 @@ int mv_image_object_get_recognition_rate(
  * @see mv_image_object_create()
  * @see mv_image_object_destroy()
  */
-int mv_image_object_set_label(
-       mv_image_object_h image_object,
-       int label);
+int mv_image_object_set_label(mv_image_object_h image_object, int label);
 
 /**
  * @brief Gets a label of image object.
@@ -587,9 +561,7 @@ int mv_image_object_set_label(
  * @see mv_image_object_create()
  * @see mv_image_object_destroy()
  */
-int mv_image_object_get_label(
-       mv_image_object_h image_object,
-       int *label);
+int mv_image_object_get_label(mv_image_object_h image_object, int *label);
 
 /**
  * @brief Clones the image object.
@@ -609,9 +581,7 @@ int mv_image_object_get_label(
  * @see mv_image_object_create()
  * @see mv_image_object_destroy()
  */
-int mv_image_object_clone(
-       mv_image_object_h src,
-       mv_image_object_h *dst);
+int mv_image_object_clone(mv_image_object_h src, mv_image_object_h *dst);
 
 /**
  * @brief Saves the image object.
@@ -636,8 +606,7 @@ int mv_image_object_clone(
  * @see mv_image_object_load()
  * @see mv_image_object_destroy()
  */
-int mv_image_object_save(
-       const char *file_name, mv_image_object_h image_object);
+int mv_image_object_save(const char *file_name, mv_image_object_h image_object);
 
 /**
  * @brief Loads an image object from the file.
@@ -664,8 +633,7 @@ int mv_image_object_save(
  * @see mv_image_object_save()
  * @see mv_image_object_destroy()
  */
-int mv_image_object_load(
-       const char *file_name, mv_image_object_h *image_object);
+int mv_image_object_load(const char *file_name, mv_image_object_h *image_object);
 
 /**********************************/
 /* Image tracking model behaviour */
@@ -685,8 +653,7 @@ int mv_image_object_load(
  *
  * @see mv_image_tracking_model_destroy()
  */
-int mv_image_tracking_model_create(
-       mv_image_tracking_model_h *image_tracking_model);
+int mv_image_tracking_model_create(mv_image_tracking_model_h *image_tracking_model);
 
 /**
  * @brief Sets target of image tracking model.
@@ -720,9 +687,7 @@ int mv_image_tracking_model_create(
  * @see mv_image_track()
  * @see mv_image_tracking_model_destroy()
  */
-int mv_image_tracking_model_set_target(
-       mv_image_object_h image_object,
-       mv_image_tracking_model_h image_tracking_model);
+int mv_image_tracking_model_set_target(mv_image_object_h image_object, mv_image_tracking_model_h image_tracking_model);
 
 /**
  * @brief Destroys the image tracking model.
@@ -739,8 +704,7 @@ int mv_image_tracking_model_set_target(
  *
  * @see mv_image_tracking_model_create()
  */
-int mv_image_tracking_model_destroy(
-       mv_image_tracking_model_h image_tracking_model);
+int mv_image_tracking_model_destroy(mv_image_tracking_model_h image_tracking_model);
 
 /**
  * @brief Refreshes the state of image tracking model.
@@ -771,9 +735,7 @@ int mv_image_tracking_model_destroy(
  * @see mv_image_track()
  * @see mv_image_tracking_model_destroy()
  */
-int mv_image_tracking_model_refresh(
-       mv_image_tracking_model_h image_tracking_model,
-       mv_engine_config_h engine_cfg);
+int mv_image_tracking_model_refresh(mv_image_tracking_model_h image_tracking_model, mv_engine_config_h engine_cfg);
 
 /**
  * @brief Clones the image tracking model.
@@ -794,9 +756,7 @@ int mv_image_tracking_model_refresh(
  * @see mv_image_tracking_model_create()
  * @see mv_image_tracking_model_destroy()
  */
-int mv_image_tracking_model_clone(
-       mv_image_tracking_model_h src,
-       mv_image_tracking_model_h *dst);
+int mv_image_tracking_model_clone(mv_image_tracking_model_h src, mv_image_tracking_model_h *dst);
 
 /**
  * @brief Saves the image tracking model.
@@ -823,8 +783,7 @@ int mv_image_tracking_model_clone(
  * @see mv_image_tracking_model_load()
  * @see mv_image_tracking_model_destroy()
  */
-int mv_image_tracking_model_save(
-       const char *file_name, mv_image_tracking_model_h image_tracking_model);
+int mv_image_tracking_model_save(const char *file_name, mv_image_tracking_model_h image_tracking_model);
 
 /**
  * @brief Loads an image tracking model from the file.
@@ -851,8 +810,7 @@ int mv_image_tracking_model_save(
  * @see mv_image_tracking_model_save()
  * @see mv_image_tracking_model_destroy()
  */
-int mv_image_tracking_model_load(
-       const char *file_name, mv_image_tracking_model_h *image_tracking_model);
+int mv_image_tracking_model_load(const char *file_name, mv_image_tracking_model_h *image_tracking_model);
 
 /**
  * @}
index 2b8e054..0032481 100644 (file)
@@ -247,7 +247,6 @@ extern "C" {
  */
 #define MV_INFERENCE_CONFIDENCE_THRESHOLD "MV_INFERENCE_CONFIDENCE_THRESHOLD"
 
-
 /*************/
 /* Inference */
 /*************/
@@ -321,8 +320,7 @@ int mv_inference_destroy(mv_inference_h infer);
  * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
  *                                          in @a engine_config
  */
-int mv_inference_configure(mv_inference_h infer,
-            mv_engine_config_h engine_config);
+int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_config);
 
 /**
  * @brief Prepares inference.
@@ -362,10 +360,7 @@ int mv_inference_prepare(mv_inference_h infer);
 *
 * @pre mv_inference_foreach_supported_engine()
 */
-typedef bool(*mv_inference_supported_engine_cb) (
-       const char *engine,
-       bool supported,
-       void *user_data);
+typedef bool (*mv_inference_supported_engine_cb)(const char *engine, bool supported, void *user_data);
 
 /**
 * @brief Traverses the list of supported engines for inference.
@@ -385,10 +380,8 @@ typedef bool(*mv_inference_supported_engine_cb) (
 *
 * @see mv_inference_supported_engine_cb()
 */
-int mv_inference_foreach_supported_engine(
-       mv_inference_h infer,
-       mv_inference_supported_engine_cb callback,
-       void *user_data);
+int mv_inference_foreach_supported_engine(mv_inference_h infer, mv_inference_supported_engine_cb callback,
+                                                                                 void *user_data);
 
 /************************/
 /* Image classification */
@@ -420,13 +413,8 @@ int mv_inference_foreach_supported_engine(
  *
  * @see mv_inference_image_classify()
  */
-typedef void (*mv_inference_image_classified_cb)(
-       mv_source_h source,
-       int number_of_classes,
-       const int *indices,
-       const char **names,
-       const float *confidences,
-       void *user_data);
+typedef void (*mv_inference_image_classified_cb)(mv_source_h source, int number_of_classes, const int *indices,
+                                                                                                const char **names, const float *confidences, void *user_data);
 
 /**
  * @brief Performs image classification on the @a source.
@@ -465,13 +453,8 @@ typedef void (*mv_inference_image_classified_cb)(
  *
  * @see mv_inference_image_classified_cb()
  */
-int mv_inference_image_classify(
-       mv_source_h source,
-       mv_inference_h infer,
-       mv_rectangle_s *roi,
-       mv_inference_image_classified_cb classified_cb,
-       void *user_data);
-
+int mv_inference_image_classify(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+                                                               mv_inference_image_classified_cb classified_cb, void *user_data);
 
 /************************/
 /* Object Detection     */
@@ -504,14 +487,9 @@ int mv_inference_image_classify(
  *
  * @see mv_inference_object_detect()
  */
-typedef void (*mv_inference_object_detected_cb)(
-       mv_source_h source,
-       int number_of_objects,
-       const int *indices,
-       const char **names,
-       const float *confidences,
-       const mv_rectangle_s *locations,
-       void *user_data);
+typedef void (*mv_inference_object_detected_cb)(mv_source_h source, int number_of_objects, const int *indices,
+                                                                                               const char **names, const float *confidences,
+                                                                                               const mv_rectangle_s *locations, void *user_data);
 
 /**
  * @brief Performs object detection on the @a source.
@@ -547,11 +525,8 @@ typedef void (*mv_inference_object_detected_cb)(
  *
  * @see mv_inference_object_detected_cb()
  */
-int mv_inference_object_detect(
-       mv_source_h source,
-       mv_inference_h infer,
-       mv_inference_object_detected_cb detected_cb,
-       void *user_data);
+int mv_inference_object_detect(mv_source_h source, mv_inference_h infer, mv_inference_object_detected_cb detected_cb,
+                                                          void *user_data);
 
 /*************************************/
 /* Face and its landmark detection   */
@@ -582,12 +557,8 @@ int mv_inference_object_detect(
  *
  * @see mv_inference_face_detect()
  */
-typedef void (*mv_inference_face_detected_cb)(
-       mv_source_h source,
-       int number_of_faces,
-       const float *confidences,
-       const mv_rectangle_s *locations,
-       void *user_data);
+typedef void (*mv_inference_face_detected_cb)(mv_source_h source, int number_of_faces, const float *confidences,
+                                                                                         const mv_rectangle_s *locations, void *user_data);
 
 /**
  * @brief Performs face detection on the @a source.
@@ -623,11 +594,8 @@ typedef void (*mv_inference_face_detected_cb)(
  *
  * @see mv_inference_face_detected_cb()
  */
-int mv_inference_face_detect(
-       mv_source_h source,
-       mv_inference_h infer,
-       mv_inference_face_detected_cb detected_cb,
-       void *user_data);
+int mv_inference_face_detect(mv_source_h source, mv_inference_h infer, mv_inference_face_detected_cb detected_cb,
+                                                        void *user_data);
 
 /**
  * @brief Called when facial landmarks in @a source are detected.
@@ -653,11 +621,8 @@ int mv_inference_face_detect(
  *
  * @see mv_inference_face_detect()
  */
-typedef void (*mv_inference_facial_landmark_detected_cb)(
-       mv_source_h source,
-       int number_of_landmarks,
-       const mv_point_s *locations,
-       void *user_data);
+typedef void (*mv_inference_facial_landmark_detected_cb)(mv_source_h source, int number_of_landmarks,
+                                                                                                                const mv_point_s *locations, void *user_data);
 
 /**
  * @brief Performs facial landmarks detection on the @a source.
@@ -694,12 +659,8 @@ typedef void (*mv_inference_facial_landmark_detected_cb)(
  *
  * @see mv_inference_facial_landmark_detected_cb()
  */
-int mv_inference_facial_landmark_detect(
-       mv_source_h source,
-       mv_inference_h infer,
-       mv_rectangle_s *roi,
-       mv_inference_facial_landmark_detected_cb detected_cb,
-       void *user_data);
+int mv_inference_facial_landmark_detect(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+                                                                               mv_inference_facial_landmark_detected_cb detected_cb, void *user_data);
 
 /**
  * @brief Called when poses in @a source are detected.
@@ -720,10 +681,8 @@ int mv_inference_facial_landmark_detect(
  *
  * @see mv_inference_pose_landmark_detect()
  */
-typedef void (*mv_inference_pose_landmark_detected_cb)(
-       mv_source_h source,
-       mv_inference_pose_result_h locations,
-       void *user_data);
+typedef void (*mv_inference_pose_landmark_detected_cb)(mv_source_h source, mv_inference_pose_result_h locations,
+                                                                                                          void *user_data);
 
 /**
  * @brief Performs pose landmarks detection on the @a source.
@@ -759,12 +718,8 @@ typedef void (*mv_inference_pose_landmark_detected_cb)(
  *
  * @see mv_inference_pose_landmark_detected_cb()
  */
-int mv_inference_pose_landmark_detect(
-       mv_source_h source,
-       mv_inference_h infer,
-       mv_rectangle_s *roi,
-       mv_inference_pose_landmark_detected_cb detected_cb,
-       void *user_data);
+int mv_inference_pose_landmark_detect(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+                                                                         mv_inference_pose_landmark_detected_cb detected_cb, void *user_data);
 
 /**
  * @brief Gets the number of poses.
@@ -782,8 +737,7 @@ int mv_inference_pose_landmark_detect(
  * @see mv_inference_pose_landmark_detected_cb()
  * @see mv_inference_pose_result_h
  */
-int mv_inference_pose_get_number_of_poses(
-       mv_inference_pose_result_h result, int *number_of_poses);
+int mv_inference_pose_get_number_of_poses(mv_inference_pose_result_h result, int *number_of_poses);
 
 /**
  * @brief Gets the number of landmarks per a pose.
@@ -801,8 +755,7 @@ int mv_inference_pose_get_number_of_poses(
  * @see mv_inference_pose_landmark_detected_cb()
  * @see mv_inference_pose_result_h
  */
-int mv_inference_pose_get_number_of_landmarks(
-       mv_inference_pose_result_h result, int *number_of_landmarks);
+int mv_inference_pose_get_number_of_landmarks(mv_inference_pose_result_h result, int *number_of_landmarks);
 
 /**
  * @brief Gets landmark location of a part of a pose.
@@ -829,8 +782,8 @@ int mv_inference_pose_get_number_of_landmarks(
  * @see mv_inference_pose_landmark_detected_cb()
  * @see mv_inference_pose_result_h
  */
-int mv_inference_pose_get_landmark(
-       mv_inference_pose_result_h result, int pose_index, int pose_part, mv_point_s *location, float *score);
+int mv_inference_pose_get_landmark(mv_inference_pose_result_h result, int pose_index, int pose_part,
+                                                                  mv_point_s *location, float *score);
 
 /**
  * @brief Gets a label of a pose.
@@ -853,8 +806,7 @@ int mv_inference_pose_get_landmark(
  * @see mv_inference_pose_landmark_detected_cb()
  * @see mv_inference_pose_result_h
  */
-int mv_inference_pose_get_label(
-       mv_inference_pose_result_h result, int pose_index, int *label);
+int mv_inference_pose_get_label(mv_inference_pose_result_h result, int pose_index, int *label);
 
 /**
  * @brief Creates pose handle.
index 7bcd315..604810d 100644 (file)
@@ -19,8 +19,8 @@
 
 #include <mv_common.h>
 
-#define MAX_NUMBER_OF_POSE     5
-#define MAX_NUMBER_OF_LANDMARKS_PER_POSE       500
+#define MAX_NUMBER_OF_POSE 5
+#define MAX_NUMBER_OF_LANDMARKS_PER_POSE 500
 
 #ifdef __cplusplus
 extern "C" {
@@ -37,19 +37,21 @@ extern "C" {
   * @since_tizen 6.0
   *
   */
-typedef struct mv_inference_landmark_s{
-    bool isAvailable;   /**< Availability of landmark */
-    mv_point_s point;   /**< 2D position of landmark */
-    int label;    /**< Label of landmark */
-    float score;        /**< Score of landmark */
+typedef struct mv_inference_landmark_s
+{
+       bool isAvailable; /**< Availability of landmark */
+       mv_point_s point; /**< 2D position of landmark */
+       int label; /**< Label of landmark */
+       float score; /**< Score of landmark */
 
-    mv_inference_landmark_s() {
-      isAvailable = false;
-      point.x = -1;
-      point.y = -1;
-      label = -1;
-      score = -1.0f;
-    }
+       mv_inference_landmark_s()
+       {
+               isAvailable = false;
+               point.x = -1;
+               point.y = -1;
+               label = -1;
+               score = -1.0f;
+       }
 } mv_inference_landmark_s;
 
 /**
@@ -58,10 +60,12 @@ typedef struct mv_inference_landmark_s{
   * @since_tizen 6.0
   *
   */
-typedef struct {
-    int number_of_poses;
-    int number_of_landmarks_per_pose;            /**< The number of landmarks*/
-    mv_inference_landmark_s landmarks[MAX_NUMBER_OF_POSE][MAX_NUMBER_OF_LANDMARKS_PER_POSE]; /**< A set of landmarks describing pose */
+typedef struct
+{
+       int number_of_poses;
+       int number_of_landmarks_per_pose; /**< The number of landmarks*/
+       mv_inference_landmark_s landmarks[MAX_NUMBER_OF_POSE]
+                                                                        [MAX_NUMBER_OF_LANDMARKS_PER_POSE]; /**< A set of landmarks describing pose */
 } mv_inference_pose_s;
 
 /**
index 1794b83..1aaff69 100644 (file)
@@ -58,16 +58,17 @@ extern "C" {
  *
  * @see mv_inference_prepare()
  */
-typedef enum {
-    MV_INFERENCE_BACKEND_NONE = -1, /**< None */
-    MV_INFERENCE_BACKEND_OPENCV,    /**< OpenCV */
-    MV_INFERENCE_BACKEND_TFLITE,    /**< TensorFlow-Lite */
-    MV_INFERENCE_BACKEND_ARMNN,     /**< ARMNN (Since 6.0) */
-    MV_INFERENCE_BACKEND_MLAPI,     /**< @deprecated ML Single API of NNStreamer (Deprecated since 7.0) */
-    MV_INFERENCE_BACKEND_ONE,       /**< On-device Neural Engine (Since 6.0) */
-    MV_INFERENCE_BACKEND_NNTRAINER, /**< NNTrainer (Since 7.0) */
-    MV_INFERENCE_BACKEND_SNPE,      /**< SNPE Engine (Since 7.0) */
-    MV_INFERENCE_BACKEND_MAX        /**< @deprecated Backend MAX (Deprecated since 7.0) */
+typedef enum
+{
+       MV_INFERENCE_BACKEND_NONE = -1, /**< None */
+       MV_INFERENCE_BACKEND_OPENCV, /**< OpenCV */
+       MV_INFERENCE_BACKEND_TFLITE, /**< TensorFlow-Lite */
+       MV_INFERENCE_BACKEND_ARMNN, /**< ARMNN (Since 6.0) */
+       MV_INFERENCE_BACKEND_MLAPI, /**< @deprecated ML Single API of NNStreamer (Deprecated since 7.0) */
+       MV_INFERENCE_BACKEND_ONE, /**< On-device Neural Engine (Since 6.0) */
+       MV_INFERENCE_BACKEND_NNTRAINER, /**< NNTrainer (Since 7.0) */
+       MV_INFERENCE_BACKEND_SNPE, /**< SNPE Engine (Since 7.0) */
+       MV_INFERENCE_BACKEND_MAX /**< @deprecated Backend MAX (Deprecated since 7.0) */
 } mv_inference_backend_type_e;
 
 /**
@@ -77,12 +78,13 @@ typedef enum {
  * @since_tizen 5.5
  *
  */
-typedef enum {
-    MV_INFERENCE_TARGET_NONE = -1,     /**< None */
-    MV_INFERENCE_TARGET_CPU,           /**< CPU */
-    MV_INFERENCE_TARGET_GPU,           /**< GPU*/
-    MV_INFERENCE_TARGET_CUSTOM,                /**< CUSTOM*/
-    MV_INFERENCE_TARGET_MAX                    /**< Target MAX */
+typedef enum
+{
+       MV_INFERENCE_TARGET_NONE = -1, /**< None */
+       MV_INFERENCE_TARGET_CPU, /**< CPU */
+       MV_INFERENCE_TARGET_GPU, /**< GPU*/
+       MV_INFERENCE_TARGET_CUSTOM, /**< CUSTOM*/
+       MV_INFERENCE_TARGET_MAX /**< Target MAX */
 } mv_inference_target_type_e;
 
 /**
@@ -91,12 +93,13 @@ typedef enum {
  * @since_tizen 6.0
  *
  */
-typedef enum {
-    MV_INFERENCE_TARGET_DEVICE_NONE = 0,               /**< None */
-    MV_INFERENCE_TARGET_DEVICE_CPU = 1 << 0,   /**< CPU */
-    MV_INFERENCE_TARGET_DEVICE_GPU = 1 << 1,   /**< GPU*/
-    MV_INFERENCE_TARGET_DEVICE_CUSTOM = 1 << 2,        /**< CUSTOM*/
-    MV_INFERENCE_TARGET_DEVICE_MAX = 1 << 3            /**< Target MAX */
+typedef enum
+{
+       MV_INFERENCE_TARGET_DEVICE_NONE = 0, /**< None */
+       MV_INFERENCE_TARGET_DEVICE_CPU = 1 << 0, /**< CPU */
+       MV_INFERENCE_TARGET_DEVICE_GPU = 1 << 1, /**< GPU*/
+       MV_INFERENCE_TARGET_DEVICE_CUSTOM = 1 << 2, /**< CUSTOM*/
+       MV_INFERENCE_TARGET_DEVICE_MAX = 1 << 3 /**< Target MAX */
 } mv_inference_target_device_e;
 
 /**
@@ -105,9 +108,10 @@ typedef enum {
  * @since_tizen 6.0
  *
  */
-typedef enum {
-       MV_INFERENCE_DATA_FLOAT32 = 0,  /**< Data type of a given pre-trained model is float. */
-       MV_INFERENCE_DATA_UINT8                 /**< Data type of a given pre-trained model is unsigned char. */
+typedef enum
+{
+       MV_INFERENCE_DATA_FLOAT32 = 0, /**< Data type of a given pre-trained model is float. */
+       MV_INFERENCE_DATA_UINT8 /**< Data type of a given pre-trained model is unsigned char. */
 } mv_inference_data_type_e;
 
 /**
@@ -116,23 +120,24 @@ typedef enum {
   * @since_tizen 6.0
   *
   */
-typedef enum {
-    MV_INFERENCE_HUMAN_POSE_HEAD = 1,       /**< Head of human pose */
-    MV_INFERENCE_HUMAN_POSE_NECK,           /**< Neck of human pose */
-    MV_INFERENCE_HUMAN_POSE_THORAX,         /**< Thorax of human pose */
-    MV_INFERENCE_HUMAN_POSE_RIGHT_SHOULDER, /**< Right shoulder of human pose */
-    MV_INFERENCE_HUMAN_POSE_RIGHT_ELBOW,    /**< Right elbow of human pose */
-    MV_INFERENCE_HUMAN_POSE_RIGHT_WRIST,    /**< Right wrist of human pose */
-    MV_INFERENCE_HUMAN_POSE_LEFT_SHOULDER,  /**< Left shoulder of human pose */
-    MV_INFERENCE_HUMAN_POSE_LEFT_ELBOW,     /**< Left elbow of human pose */
-    MV_INFERENCE_HUMAN_POSE_LEFT_WRIST,     /**< Left wrist of human pose */
-    MV_INFERENCE_HUMAN_POSE_PELVIS,         /**< Pelvis of human pose */
-    MV_INFERENCE_HUMAN_POSE_RIGHT_HIP,      /**< Right hip of human pose */
-    MV_INFERENCE_HUMAN_POSE_RIGHT_KNEE,     /**< Right knee of human pose */
-    MV_INFERENCE_HUMAN_POSE_RIGHT_ANKLE,    /**< Right ankle of human pose */
-    MV_INFERENCE_HUMAN_POSE_LEFT_HIP,       /**< Left hip of human pose */
-    MV_INFERENCE_HUMAN_POSE_LEFT_KNEE,      /**< Left knee of human pose */
-    MV_INFERENCE_HUMAN_POSE_LEFT_ANKLE      /**< Left ankle of human pose */
+typedef enum
+{
+       MV_INFERENCE_HUMAN_POSE_HEAD = 1, /**< Head of human pose */
+       MV_INFERENCE_HUMAN_POSE_NECK, /**< Neck of human pose */
+       MV_INFERENCE_HUMAN_POSE_THORAX, /**< Thorax of human pose */
+       MV_INFERENCE_HUMAN_POSE_RIGHT_SHOULDER, /**< Right shoulder of human pose */
+       MV_INFERENCE_HUMAN_POSE_RIGHT_ELBOW, /**< Right elbow of human pose */
+       MV_INFERENCE_HUMAN_POSE_RIGHT_WRIST, /**< Right wrist of human pose */
+       MV_INFERENCE_HUMAN_POSE_LEFT_SHOULDER, /**< Left shoulder of human pose */
+       MV_INFERENCE_HUMAN_POSE_LEFT_ELBOW, /**< Left elbow of human pose */
+       MV_INFERENCE_HUMAN_POSE_LEFT_WRIST, /**< Left wrist of human pose */
+       MV_INFERENCE_HUMAN_POSE_PELVIS, /**< Pelvis of human pose */
+       MV_INFERENCE_HUMAN_POSE_RIGHT_HIP, /**< Right hip of human pose */
+       MV_INFERENCE_HUMAN_POSE_RIGHT_KNEE, /**< Right knee of human pose */
+       MV_INFERENCE_HUMAN_POSE_RIGHT_ANKLE, /**< Right ankle of human pose */
+       MV_INFERENCE_HUMAN_POSE_LEFT_HIP, /**< Left hip of human pose */
+       MV_INFERENCE_HUMAN_POSE_LEFT_KNEE, /**< Left knee of human pose */
+       MV_INFERENCE_HUMAN_POSE_LEFT_ANKLE /**< Left ankle of human pose */
 } mv_inference_human_pose_landmark_e;
 
 /**
@@ -141,13 +146,14 @@ typedef enum {
   * @since_tizen 6.0
   *
   */
-typedef enum {
-    MV_INFERENCE_HUMAN_BODY_PART_HEAD = 1 << 0,         /**< HEAD, NECK, and THORAX */
-    MV_INFERENCE_HUMAN_BODY_PART_ARM_RIGHT = 1 << 1,    /**< RIGHT SHOULDER, ELBOW, and WRIST */
-    MV_INFERENCE_HUMAN_BODY_PART_ARM_LEFT = 1 << 2,     /**< LEFT SHOULDER, ELBOW, and WRIST */
-    MV_INFERENCE_HUMAN_BODY_PART_BODY = 1 << 3,         /**< THORAX, PELVIS, RIGHT HIP, and LEFT HIP */
-    MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT = 1 << 4,    /**< RIGHT HIP, KNEE, and ANKLE */
-    MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT = 1 << 5      /**< LEFT HIP, KNEE, and ANKLE */
+typedef enum
+{
+       MV_INFERENCE_HUMAN_BODY_PART_HEAD = 1 << 0, /**< HEAD, NECK, and THORAX */
+       MV_INFERENCE_HUMAN_BODY_PART_ARM_RIGHT = 1 << 1, /**< RIGHT SHOULDER, ELBOW, and WRIST */
+       MV_INFERENCE_HUMAN_BODY_PART_ARM_LEFT = 1 << 2, /**< LEFT SHOULDER, ELBOW, and WRIST */
+       MV_INFERENCE_HUMAN_BODY_PART_BODY = 1 << 3, /**< THORAX, PELVIS, RIGHT HIP, and LEFT HIP */
+       MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT = 1 << 4, /**< RIGHT HIP, KNEE, and ANKLE */
+       MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT = 1 << 5 /**< LEFT HIP, KNEE, and ANKLE */
 } mv_inference_human_body_part_e;
 
 /**
index 94aa31c..49eb3aa 100644 (file)
@@ -30,44 +30,35 @@ extern "C" {
 
 #define LOG_TAG "TIZEN_MEDIA_VISION"
 
-#define MEDIA_VISION_FUNCTION_ENTER() \
-               LOGI("[%s] <ENTER>", __FUNCTION__)
+#define MEDIA_VISION_FUNCTION_ENTER() LOGI("[%s] <ENTER>", __FUNCTION__)
 
-#define MEDIA_VISION_FUNCTION_LEAVE() \
-               LOGI("[%s] <LEAVE>", __FUNCTION__)
+#define MEDIA_VISION_FUNCTION_LEAVE() LOGI("[%s] <LEAVE>", __FUNCTION__)
 
-#define MEDIA_VISION_ASSERT(function, msg) \
-               do { \
-                       int error_code = function; \
-                       if (error_code != MEDIA_VISION_ERROR_NONE) { \
-                               LOGE("%s(0x%08x)", msg, error_code); \
-                               return error_code; \
-                       } \
-               } \
-               while (0)
+#define MEDIA_VISION_ASSERT(function, msg)           \
+       do {                                             \
+               int error_code = function;                   \
+               if (error_code != MEDIA_VISION_ERROR_NONE) { \
+                       LOGE("%s(0x%08x)", msg, error_code);     \
+                       return error_code;                       \
+               }                                            \
+       } while (0)
 
-#define MEDIA_VISION_CHECK_CONDITION(condition, error, msg) \
-               do { \
-                       if (!(condition)) { \
-                               LOGE("[%s] %s(0x%08x)", __FUNCTION__, msg, error); \
-                               return error; \
-                       } \
-               } \
-               while (0)
+#define MEDIA_VISION_CHECK_CONDITION(condition, error, msg)    \
+       do {                                                       \
+               if (!(condition)) {                                    \
+                       LOGE("[%s] %s(0x%08x)", __FUNCTION__, msg, error); \
+                       return error;                                      \
+               }                                                      \
+       } while (0)
 
 #define MEDIA_VISION_INSTANCE_CHECK(arg) \
-               MEDIA_VISION_CHECK_CONDITION((arg), \
-                                                                       MEDIA_VISION_ERROR_INVALID_PARAMETER, \
-                                                                       "MEDIA_VISION_ERROR_INVALID_PARAMETER")
+       MEDIA_VISION_CHECK_CONDITION((arg), MEDIA_VISION_ERROR_INVALID_PARAMETER, "MEDIA_VISION_ERROR_INVALID_PARAMETER")
 
 #define MEDIA_VISION_NULL_ARG_CHECK(arg) \
-               MEDIA_VISION_CHECK_CONDITION((arg), \
-                                                                       MEDIA_VISION_ERROR_INVALID_PARAMETER, \
-                                                                       "MEDIA_VISION_ERROR_INVALID_PARAMETER")
+       MEDIA_VISION_CHECK_CONDITION((arg), MEDIA_VISION_ERROR_INVALID_PARAMETER, "MEDIA_VISION_ERROR_INVALID_PARAMETER")
 
 #define MEDIA_VISION_SUPPORT_CHECK(arg) \
-               MEDIA_VISION_CHECK_CONDITION((arg), MEDIA_VISION_ERROR_NOT_SUPPORTED, \
-                                                                       "MEDIA_VISION_ERROR_NOT_SUPPORTED")
+       MEDIA_VISION_CHECK_CONDITION((arg), MEDIA_VISION_ERROR_NOT_SUPPORTED, "MEDIA_VISION_ERROR_NOT_SUPPORTED")
 
 bool _mv_check_system_info_feature_supported(void);
 bool _mv_barcode_detect_check_system_info_feature_supported(void);
index 180f848..b13d31c 100644 (file)
@@ -102,8 +102,7 @@ int mv_roi_tracker_destroy(mv_roi_tracker_h handle);
  * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
  *                                               in @a engine_config
  */
-int mv_roi_tracker_configure(mv_roi_tracker_h handle,
-            mv_engine_config_h engine_config);
+int mv_roi_tracker_configure(mv_roi_tracker_h handle, mv_engine_config_h engine_config);
 
 /**
  * @brief Prepares roi tracker.
@@ -143,11 +142,7 @@ int mv_roi_tracker_prepare(mv_roi_tracker_h handle, int x, int y, int width, int
  *
  * @see mv_roi_tracker_perform()
  */
-typedef void (*mv_roi_tracker_tracked_cb)(
-       mv_source_h source,
-       mv_rectangle_s roi,
-       void *user_data);
-
+typedef void (*mv_roi_tracker_tracked_cb)(mv_source_h source, mv_rectangle_s roi, void *user_data);
 
 /**
  * @brief Tracks with a given tracker on the @a source.
@@ -173,11 +168,8 @@ typedef void (*mv_roi_tracker_tracked_cb)(
  *
  * @pre Create a new tracker handle by calling @ref mv_roi_tracker_create()
  */
-int mv_roi_tracker_perform(
-       mv_roi_tracker_h handle,
-       mv_source_h source,
-       mv_roi_tracker_tracked_cb tracked_cb,
-       void *user_data);
+int mv_roi_tracker_perform(mv_roi_tracker_h handle, mv_source_h source, mv_roi_tracker_tracked_cb tracked_cb,
+                                                  void *user_data);
 
 /**
  * @}
index f5d8c54..11c09e2 100644 (file)
@@ -20,8 +20,8 @@
 
 #include <mv_common.h>
 
-#define MAX_LABEL_LEN  128
-#define MAX_LABEL_CNT  100
+#define MAX_LABEL_LEN 128
+#define MAX_LABEL_CNT 100
 
 #ifdef __cplusplus
 extern "C" {
@@ -43,12 +43,13 @@ extern "C" {
  * @details Contains roi tracker result such as coordinates.
  * @since_tizen 7.0
  */
-typedef struct {
-       int x;                          /**< Left-top x coordinate of tracked region */
-       int y;                          /**< Left-top y coordinate of tracked region */
-       int width;                      /**< Width of tracked region */
-       int height;                     /**< Height of tracked region */
-       bool initialized;       /**< flag that struct is initialized or not */
+typedef struct
+{
+       int x; /**< Left-top x coordinate of tracked region */
+       int y; /**< Left-top y coordinate of tracked region */
+       int width; /**< Width of tracked region */
+       int height; /**< Height of tracked region */
+       bool initialized; /**< flag that struct is initialized or not */
 } mv_roi_tracker_result_s;
 
 /**
@@ -57,16 +58,16 @@ typedef struct {
  */
 typedef void *mv_roi_tracker_h;
 
-
 /**
  * @brief Enumeration for ROI tracker type.
  * @since_tizen 7.0
  */
-typedef enum {
-       MV_ROI_TRACKER_TYPE_NONE = 0,   /**< None */
-       MV_ROI_TRACKER_TYPE_ACCURACY,   /**< Tracker type focused on accuracy */
-       MV_ROI_TRACKER_TYPE_BALANCE,    /**< Tracker type focused on balance */
-       MV_ROI_TRACKER_TYPE_SPEED               /**< Tracker type focused on speed */
+typedef enum
+{
+       MV_ROI_TRACKER_TYPE_NONE = 0, /**< None */
+       MV_ROI_TRACKER_TYPE_ACCURACY, /**< Tracker type focused on accuracy */
+       MV_ROI_TRACKER_TYPE_BALANCE, /**< Tracker type focused on balance */
+       MV_ROI_TRACKER_TYPE_SPEED /**< Tracker type focused on speed */
 } mv_roi_tracker_type_e;
 /**
  * @}
index 90b4f7d..3deb6e9 100644 (file)
@@ -48,8 +48,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED \
-               "MV_SURVEILLANCE_EVENT_MOVEMENT_DETECTED"
+#define MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED "MV_SURVEILLANCE_EVENT_MOVEMENT_DETECTED"
 
 /**
  * @brief Name of the event result value that contains number of regions where
@@ -90,8 +89,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS \
-               "NUMBER_OF_MOVEMENT_REGIONS"
+#define MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS "NUMBER_OF_MOVEMENT_REGIONS"
 
 /**
  * @brief Name of the event result value that contains rectangular regions where
@@ -175,8 +173,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED \
-               "MV_SURVEILLANCE_EVENT_PERSON_APPEARED_DISAPEARED"
+#define MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED "MV_SURVEILLANCE_EVENT_PERSON_APPEARED_DISAPEARED"
 
 /**
  * @brief Name of the event result value that contains number
@@ -216,8 +213,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER \
-               "NUMBER_OF_APPEARED_PERSONS"
+#define MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER "NUMBER_OF_APPEARED_PERSONS"
 
 /**
  * @brief Name of the event result value that contains number
@@ -257,8 +253,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER \
-               "NUMBER_OF_DISAPPEARED_PERSONS"
+#define MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER "NUMBER_OF_DISAPPEARED_PERSONS"
 
 /**
  * @brief Name of the event result value that contains number
@@ -298,8 +293,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER \
-               "NUMBER_OF_TRACKED_PERSONS"
+#define MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER "NUMBER_OF_TRACKED_PERSONS"
 
 /**
  * @brief Name of the event result value that contains a set of rectangular
@@ -351,8 +345,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS \
-               "APPEARED_PERSONS_LOCATIONS"
+#define MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS "APPEARED_PERSONS_LOCATIONS"
 
 /**
  * @brief Name of the event result value that contains a set of rectangular
@@ -404,8 +397,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS \
-               "DISAPPEARED_PERSONS_LOCATIONS"
+#define MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS "DISAPPEARED_PERSONS_LOCATIONS"
 
 /**
  * @brief Name of the event result value that contains a set of rectangular
@@ -456,8 +448,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS \
-               "TRACKED_PERSONS_LOCATIONS"
+#define MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS "TRACKED_PERSONS_LOCATIONS"
 
 /**
  * @brief Name of the person recognition event type.
@@ -487,8 +478,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED \
-               "MV_SURVEILLANCE_EVENT_PERSON_RECOGNIZED"
+#define MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED "MV_SURVEILLANCE_EVENT_PERSON_RECOGNIZED"
 
 /**
  * @brief Name of the event result value that contains number of locations where
@@ -528,8 +518,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER \
-               "NUMBER_OF_PERSONS"
+#define MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER "NUMBER_OF_PERSONS"
 
 /**
  * @brief Name of the event result value that contains a set of rectangular
@@ -579,8 +568,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS \
-               "PERSONS_LOCATIONS"
+#define MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS "PERSONS_LOCATIONS"
 
 /**
  * @brief Name of the event result value that contains a set of labels that
@@ -629,8 +617,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS \
-               "PERSONS_LABELS"
+#define MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS "PERSONS_LABELS"
 
 /**
  * @brief Name of the event result value that contains a set of confidence
@@ -679,8 +666,7 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-#define MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES \
-               "PERSONS_CONFIDENCES"
+#define MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES "PERSONS_CONFIDENCES"
 
 /**
  * @}
@@ -702,8 +688,7 @@ extern "C" {
  * @see mv_engine_config_set_string_attribute()
  * @see mv_engine_config_get_string_attribute()
  */
-#define MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH \
-               "MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH"
+#define MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH "MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH"
 
 /**
  * @brief Defines MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESOLD to set movement
@@ -719,8 +704,7 @@ extern "C" {
  * @see mv_engine_config_set_int_attribute()
  * @see mv_engine_config_get_int_attribute()
  */
-#define MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD \
-               "MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD"
+#define MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD "MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD"
 
 /**
  * @brief Defines MV_SURVEILLANCE_SKIP_FRAMES_COUNT to set how many frames
@@ -789,12 +773,9 @@ typedef void *mv_surveillance_result_h;
  * @see mv_surveillance_subscribe_event_trigger()
  * @see mv_surveillance_unsubscribe_event_trigger()
  */
-typedef void (*mv_surveillance_event_occurred_cb)(
-               mv_surveillance_event_trigger_h trigger,
-               mv_source_h source,
-               int video_stream_id,
-               mv_surveillance_result_h event_result,
-               void *user_data);
+typedef void (*mv_surveillance_event_occurred_cb)(mv_surveillance_event_trigger_h trigger, mv_source_h source,
+                                                                                                 int video_stream_id, mv_surveillance_result_h event_result,
+                                                                                                 void *user_data);
 
 /**
  * @brief Called to get the information once for each supported event type.
@@ -811,9 +792,7 @@ typedef void (*mv_surveillance_event_occurred_cb)(
  * @pre mv_surveillance_foreach_supported_event_type() will invoke this callback
  * @see mv_surveillance_foreach_supported_event_type()
  */
-typedef bool (*mv_surveillance_event_type_cb)(
-               const char *event_type,
-               void *user_data);
+typedef bool (*mv_surveillance_event_type_cb)(const char *event_type, void *user_data);
 
 /**
  * @brief Called to get the result name from the triggered event.
@@ -834,9 +813,7 @@ typedef bool (*mv_surveillance_event_type_cb)(
  *      callback
  * @see mv_surveillance_foreach_event_result_name()
  */
-typedef bool (*mv_surveillance_event_result_name_cb)(
-               const char *name,
-               void *user_data);
+typedef bool (*mv_surveillance_event_result_name_cb)(const char *name, void *user_data);
 
 /**
  * @brief Creates surveillance event trigger handle.
@@ -858,9 +835,7 @@ typedef bool (*mv_surveillance_event_result_name_cb)(
  * @see mv_surveillance_event_trigger_destroy()
  * @see mv_surveillance_foreach_supported_event_type()
  */
-int mv_surveillance_event_trigger_create(
-               const char *event_type,
-               mv_surveillance_event_trigger_h *trigger);
+int mv_surveillance_event_trigger_create(const char *event_type, mv_surveillance_event_trigger_h *trigger);
 
 /**
  * @brief Destroys the surveillance event trigger handle and releases all its
@@ -875,8 +850,7 @@ int mv_surveillance_event_trigger_create(
  *
  * @see mv_surveillance_event_trigger_create()
  */
-int mv_surveillance_event_trigger_destroy(
-               mv_surveillance_event_trigger_h trigger);
+int mv_surveillance_event_trigger_destroy(mv_surveillance_event_trigger_h trigger);
 
 /**
  * @brief Gets the surveillance event trigger type as character string.
@@ -894,9 +868,7 @@ int mv_surveillance_event_trigger_destroy(
  * @pre Event trigger has to be created by
  *      mv_surveillance_event_trigger_create() function
  */
-int mv_surveillance_get_event_trigger_type(
-               mv_surveillance_event_trigger_h trigger,
-               char **event_type);
+int mv_surveillance_get_event_trigger_type(mv_surveillance_event_trigger_h trigger, char **event_type);
 
 /**
  * @brief Sets ROI (Region Of Interest) to the event trigger.
@@ -924,10 +896,8 @@ int mv_surveillance_get_event_trigger_type(
  * @see mv_surveillance_event_trigger_h
  * @see mv_surveillance_get_event_trigger_roi()
  */
-int mv_surveillance_set_event_trigger_roi(
-               mv_surveillance_event_trigger_h trigger,
-               int number_of_points,
-               mv_point_s *roi);
+int mv_surveillance_set_event_trigger_roi(mv_surveillance_event_trigger_h trigger, int number_of_points,
+                                                                                 mv_point_s *roi);
 
 /**
  * @brief Gets ROI (Region Of Interest) from the event trigger.
@@ -952,10 +922,8 @@ int mv_surveillance_set_event_trigger_roi(
  * @see mv_surveillance_event_trigger_h
  * @see mv_surveillance_set_event_trigger_roi()
  */
-int mv_surveillance_get_event_trigger_roi(
-               mv_surveillance_event_trigger_h trigger,
-               int *number_of_points,
-               mv_point_s **roi);
+int mv_surveillance_get_event_trigger_roi(mv_surveillance_event_trigger_h trigger, int *number_of_points,
+                                                                                 mv_point_s **roi);
 
 /**
  * @brief Subscribes @a trigger to process sources pushed from video identified
@@ -991,12 +959,9 @@ int mv_surveillance_get_event_trigger_roi(
  * @see mv_surveillance_unsubscribe_event_trigger()
  * @see mv_surveillance_push_source()
  */
-int mv_surveillance_subscribe_event_trigger(
-               mv_surveillance_event_trigger_h trigger,
-               int video_stream_id,
-               mv_engine_config_h engine_cfg,
-               mv_surveillance_event_occurred_cb callback,
-               void *user_data);
+int mv_surveillance_subscribe_event_trigger(mv_surveillance_event_trigger_h trigger, int video_stream_id,
+                                                                                       mv_engine_config_h engine_cfg, mv_surveillance_event_occurred_cb callback,
+                                                                                       void *user_data);
 
 /**
  * @brief Unsubscribes @a trigger from the event and stop calling @a video_stream_id.
@@ -1019,9 +984,7 @@ int mv_surveillance_subscribe_event_trigger(
  * @see mv_surveillance_event_trigger_h
  * @see mv_surveillance_subscribe_event_trigger()
  */
-int mv_surveillance_unsubscribe_event_trigger(
-               mv_surveillance_event_trigger_h trigger,
-               int video_stream_id);
+int mv_surveillance_unsubscribe_event_trigger(mv_surveillance_event_trigger_h trigger, int video_stream_id);
 
 /**
  * @brief Pushes source to the surveillance system to detect events.
@@ -1051,9 +1014,7 @@ int mv_surveillance_unsubscribe_event_trigger(
  * @see mv_surveillance_subscribe_event_trigger()
  * @see mv_surveillance_unsubscribe_event_trigger()
  */
-int mv_surveillance_push_source(
-               mv_source_h source,
-               int video_stream_id);
+int mv_surveillance_push_source(mv_source_h source, int video_stream_id);
 
 /**
  * @brief Starts traversing through list of supported event types.
@@ -1074,9 +1035,7 @@ int mv_surveillance_push_source(
  * @see mv_surveillance_event_type_cb()
  * @see mv_surveillance_foreach_event_result_name()
  */
-int mv_surveillance_foreach_supported_event_type(
-               mv_surveillance_event_type_cb callback,
-               void *user_data);
+int mv_surveillance_foreach_supported_event_type(mv_surveillance_event_type_cb callback, void *user_data);
 
 /**
  * @brief Starts traversing through list of supported event result value names.
@@ -1103,10 +1062,8 @@ int mv_surveillance_foreach_supported_event_type(
  * @see mv_surveillance_foreach_supported_event_type()
  * @see mv_surveillance_get_result_value()
  */
-int mv_surveillance_foreach_event_result_name(
-               const char *event_type,
-               mv_surveillance_event_result_name_cb callback,
-               void *user_data);
+int mv_surveillance_foreach_event_result_name(const char *event_type, mv_surveillance_event_result_name_cb callback,
+                                                                                         void *user_data);
 
 /**
  * @brief Gets result value.
@@ -1138,10 +1095,7 @@ int mv_surveillance_foreach_event_result_name(
  * @see mv_surveillance_foreach_supported_event_type()
  * @see mv_surveillance_foreach_event_result_name()
  */
-int mv_surveillance_get_result_value(
-               mv_surveillance_result_h result,
-               const char *name,
-               void *value);
+int mv_surveillance_get_result_value(mv_surveillance_result_h result, const char *name, void *value);
 
 /**
  * @}
index 1097370..c64e1b5 100644 (file)
@@ -33,11 +33,12 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-typedef struct {
-       unsigned int trigger_id;     /**< Unique event trigger identifier */
-       char *event_type;            /**< Type of the event */
-       int number_of_roi_points;    /**< Number of ROI (Region of interest) points */
-       mv_point_s *roi;             /**< ROI points array */
+typedef struct
+{
+       unsigned int trigger_id; /**< Unique event trigger identifier */
+       char *event_type; /**< Type of the event */
+       int number_of_roi_points; /**< Number of ROI (Region of interest) points */
+       mv_point_s *roi; /**< ROI points array */
 } mv_surveillance_event_trigger_s;
 
 #ifdef __cplusplus
index 79c76c5..46c24a5 100644 (file)
 #include <stddef.h>
 #include <zbar.h>
 
-namespace MediaVision {
-namespace Barcode {
+namespace MediaVision
+{
+namespace Barcode
+{
 /**
  * @class    Barcode
  * @brief    Handle to barcode object.
  */
-class Barcode {
+class Barcode
+{
 public:
        /**
         * @brief   Barcode constructor.
@@ -38,7 +41,7 @@ public:
         * @param   [in] barObj zbar barcode handle
         *
         */
-       Barcode(const zbar::SymbolbarObj);
+       Barcode(const zbar::Symbol &barObj);
 
        /**
         * @brief  Barcode destructor.
@@ -72,7 +75,7 @@ public:
         * @return @c MEDIA_VISION_ERROR_NONE on success,
         *         otherwise a negative error value
         */
-       int calculateLocation(mv_quadrangle_slocation) const;
+       int calculateLocation(mv_quadrangle_s &location) const;
 
 private:
        const zbar::Symbol *__pBarcodeObj; ///< Pointer to zbar barcode handle
index 2d1392b..72fb8d0 100644 (file)
 #include <opencv2/core/core.hpp>
 #include <opencv2/imgproc/imgproc.hpp>
 
-namespace zbar {
-       class Image;
+namespace zbar
+{
+class Image;
 }
 
-namespace MediaVision {
-namespace Barcode {
-
+namespace MediaVision
+{
+namespace Barcode
+{
 /**
  * @brief    This function converts media vision image handle to zbar image handle.
  *
@@ -37,7 +39,7 @@ namespace Barcode {
  * @return @c MEDIA_VISION_ERROR_NONE on success,
            otherwise a negative error value
  */
-int convertSourceMV2Zbar(mv_source_h mvSource, zbar::ImagezbarSource);
+int convertSourceMV2Zbar(mv_source_h mvSource, zbar::Image &zbarSource);
 
 } /* Barcode */
 } /* MediaVision */
index 8631b36..413a6da 100644 (file)
@@ -52,12 +52,8 @@ extern "C" {
  *
  * @see mv_barcode_detected_cb()
  */
-int mv_barcode_detect_open(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s roi,
-               mv_barcode_detected_cb detect_cb,
-               void *user_data);
+int mv_barcode_detect_open(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s roi,
+                                                  mv_barcode_detected_cb detect_cb, void *user_data);
 
 #ifdef __cplusplus
 }
index b6ef76c..7e61d70 100644 (file)
 
 #include <mv_private.h>
 
-namespace MediaVision {
-namespace Barcode {
-
-Barcode::Barcode(const zbar::Symbol& barObj):
-                               __pBarcodeObj(new zbar::Symbol(barObj))
+namespace MediaVision
+{
+namespace Barcode
+{
+Barcode::Barcode(const zbar::Symbol &barObj) : __pBarcodeObj(new zbar::Symbol(barObj))
 {
        ; /* NULL */
 }
@@ -92,7 +92,7 @@ mv_barcode_type_e Barcode::getType(void) const
        }
 }
 
-int Barcode::calculateLocation(mv_quadrangle_slocation) const
+int Barcode::calculateLocation(mv_quadrangle_s &location) const
 {
        const int numberOfVertexes = 4;
 
@@ -101,13 +101,12 @@ int Barcode::calculateLocation(mv_quadrangle_s& location) const
        /* polygon location should contain at least 4 points */
        if (locationPolygonSize < numberOfVertexes) {
                LOGE("Can't compute location of the barcode by %i"
-                               " points (less then %i).", locationPolygonSize,
-                               numberOfVertexes);
+                        " points (less then %i).",
+                        locationPolygonSize, numberOfVertexes);
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
        /* bounding quadrangle is computing by 4 marginal points */
-       mv_point_s first = {__pBarcodeObj->get_location_x(0),
-               __pBarcodeObj->get_location_y(0)};
+       mv_point_s first = { __pBarcodeObj->get_location_x(0), __pBarcodeObj->get_location_y(0) };
 
        int minX = first.x;
        int maxX = first.x;
@@ -115,8 +114,7 @@ int Barcode::calculateLocation(mv_quadrangle_s& location) const
        int maxY = first.y;
 
        for (int i = 0; i < locationPolygonSize; ++i) {
-               mv_point_s current = {__pBarcodeObj->get_location_x(i),
-                       __pBarcodeObj->get_location_y(i)};
+               mv_point_s current = { __pBarcodeObj->get_location_x(i), __pBarcodeObj->get_location_y(i) };
                if (current.x < minX) {
                        minX = current.x;
                } else if (current.x > maxX) {
@@ -141,10 +139,10 @@ int Barcode::calculateLocation(mv_quadrangle_s& location) const
        if (abs(minX - maxX) < 5 || abs(minY - maxY) < 5)
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
 
-       mv_point_s bottomLeft = {minX, maxY};
-       mv_point_s bottomRight = {maxX, maxY};
-       mv_point_s topRight = {maxX, minY};
-       mv_point_s topLeft = {minX, minY};
+       mv_point_s bottomLeft = { minX, maxY };
+       mv_point_s bottomRight = { maxX, maxY };
+       mv_point_s topRight = { maxX, minY };
+       mv_point_s topLeft = { minX, minY };
 
        location.points[0] = topLeft;
        location.points[1] = topRight;
index 807a92b..f002dcd 100644 (file)
 
 #include <zbar.h>
 
-namespace MediaVision {
-namespace Barcode {
-
+namespace MediaVision
+{
+namespace Barcode
+{
 // LCOV_EXCL_START
-int convertSourceMV2Zbar(mv_source_h mvSource, zbar::ImagezbarSource)
+int convertSourceMV2Zbar(mv_source_h mvSource, zbar::Image &zbarSource)
 {
        int err = MEDIA_VISION_ERROR_NONE;
        unsigned char *buffer = NULL;
@@ -37,11 +38,11 @@ int convertSourceMV2Zbar(mv_source_h mvSource, zbar::Image& zbarSource)
        err = mv_source_get_colorspace_c(mvSource, &colorspace);
        if (err != MEDIA_VISION_ERROR_NONE) {
                LOGE("Can't determine mv_source_h colorspace to convert"
-                               " to ZBar colorspace. Conversion failed");
+                        " to ZBar colorspace. Conversion failed");
                return err;
        }
 
-       switch(colorspace) {
+       switch (colorspace) {
        case MEDIA_VISION_COLORSPACE_Y800:
                zbarSource.set_format("Y800");
                break;
index c46d12a..aa36a95 100644 (file)
  * @brief This file contains the porting layer for Media Vision barcode module.
  */
 
-int mv_barcode_detect(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s roi,
-               mv_barcode_detected_cb detect_cb,
-               void *user_data)
+int mv_barcode_detect(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s roi,
+                                         mv_barcode_detected_cb detect_cb, void *user_data)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_barcode_detect_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -56,16 +52,14 @@ int mv_barcode_detect(
                return ret;
        }
 
-       if (roi.point.x < 0 || roi.point.y < 0 ||
-               (unsigned int)(roi.point.x + roi.width) > src_w ||
-               (unsigned int)(roi.point.y + roi.height) > src_h) {
+       if (roi.point.x < 0 || roi.point.y < 0 || (unsigned int) (roi.point.x + roi.width) > src_w ||
+               (unsigned int) (roi.point.y + roi.height) > src_h) {
                LOGE("roi is out of area on source");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
        /* Use open barcode detect functionality here. */
-       ret = mv_barcode_detect_open(
-                                       source, engine_cfg, roi, detect_cb, user_data);
+       ret = mv_barcode_detect_open(source, engine_cfg, roi, detect_cb, user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
index c2a7e83..d375e53 100644 (file)
 
 using namespace MediaVision::Barcode;
 
-int mv_barcode_detect_open(
-       mv_source_h source,
-       mv_engine_config_h engine_cfg,
-       mv_rectangle_s roi,
-       mv_barcode_detected_cb detect_cb,
-       void *user_data)
+int mv_barcode_detect_open(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s roi,
+                                                  mv_barcode_detected_cb detect_cb, void *user_data)
 {
        if (!source || !detect_cb)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -41,13 +37,10 @@ int mv_barcode_detect_open(
        zbar::ImageScanner scanner;
 
        int target_val;
-       int err = mv_engine_config_get_int_attribute(
-                       engine_cfg,
-                       "MV_BARCODE_DETECT_ATTR_TARGET",
-               &target_val);
+       int err = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_DETECT_ATTR_TARGET", &target_val);
        if (err != MEDIA_VISION_ERROR_NONE || engine_cfg == NULL) {
                LOGW("mv_engine_config_get_int_attribute failed."
-                       "MV_BARCODE_DETECT_ATTR_TARGET_ALL is used as default");
+                        "MV_BARCODE_DETECT_ATTR_TARGET_ALL is used as default");
                /* Default value */
                target_val = 0;
        }
@@ -78,7 +71,7 @@ int mv_barcode_detect_open(
                scanner.set_config(zbar::ZBAR_DATABAR_EXP, zbar::ZBAR_CFG_ENABLE, 1);
                scanner.set_config(zbar::ZBAR_QRCODE, zbar::ZBAR_CFG_ENABLE, 1);
                break;
-// LCOV_EXCL_START
+               // LCOV_EXCL_START
        case 1:
                scanner.set_config(zbar::ZBAR_UPCA, zbar::ZBAR_CFG_ENABLE, 1);
                scanner.set_config(zbar::ZBAR_UPCE, zbar::ZBAR_CFG_ENABLE, 1);
@@ -97,7 +90,7 @@ int mv_barcode_detect_open(
        case 2:
                scanner.set_config(zbar::ZBAR_QRCODE, zbar::ZBAR_CFG_ENABLE, 1);
                break;
-// LCOV_EXCL_STOP
+               // LCOV_EXCL_STOP
        default:
                LOGW("Unavailable target value %d", target_val);
        }
@@ -107,37 +100,27 @@ int mv_barcode_detect_open(
        int rotateDirection = 0;
        bool isEnhancementMode = false;
        if (engine_cfg != NULL) {
-               err = mv_engine_config_get_int_attribute(
-                               engine_cfg,
-                               MV_BARCODE_DETECT_ATTR_ROTATION_DEGREES,
-                               &rotateDegree);
+               err = mv_engine_config_get_int_attribute(engine_cfg, MV_BARCODE_DETECT_ATTR_ROTATION_DEGREES, &rotateDegree);
                if (err != MEDIA_VISION_ERROR_NONE) {
                        LOGE("mv_engine_config_get_int_attribute failed to get MV_BARCODE_DETECT_ATTR_ROTATE_DEGREES");
                        return err;
                }
 
-               err = mv_engine_config_get_int_attribute(
-                               engine_cfg,
-                               MV_BARCODE_DETECT_ATTR_ROTATION_COUNT,
-                               &rotateNumber);
+               err = mv_engine_config_get_int_attribute(engine_cfg, MV_BARCODE_DETECT_ATTR_ROTATION_COUNT, &rotateNumber);
                if (err != MEDIA_VISION_ERROR_NONE) {
                        LOGE("mv_engine_config_get_int_attribute failed to get MV_BARCODE_DETECT_ATTR_ROTATE_COUNT");
                        return err;
                }
 
-               err = mv_engine_config_get_int_attribute(
-                               engine_cfg,
-                               MV_BARCODE_DETECT_ATTR_ROTATION_DIRECTION,
-                               &rotateDirection);
+               err = mv_engine_config_get_int_attribute(engine_cfg, MV_BARCODE_DETECT_ATTR_ROTATION_DIRECTION,
+                                                                                                &rotateDirection);
                if (err != MEDIA_VISION_ERROR_NONE) {
                        LOGE("mv_engine_config_get_int_attribute failed to get MV_BARCODE_DETECT_ATTR_ROTATE_DIRECTION");
                        return err;
                }
 
-               err = mv_engine_config_get_bool_attribute(
-                               engine_cfg,
-                               MV_BARCODE_DETECT_ATTR_USE_ENHANCEMENT,
-                               &isEnhancementMode);
+               err = mv_engine_config_get_bool_attribute(engine_cfg, MV_BARCODE_DETECT_ATTR_USE_ENHANCEMENT,
+                                                                                                 &isEnhancementMode);
                if (err != MEDIA_VISION_ERROR_NONE) {
                        LOGE("mv_engine_config_get_bool_attribute failed to get MV_BARCODE_DETECT_ATTR_USE_ENHANCEMENT");
                        return err;
@@ -182,14 +165,14 @@ int mv_barcode_detect_open(
                } else if (rotateDirection == MV_BARCODE_DETECT_ATTR_ROTATION_COUNTER_CLOCKWISE) {
                        degree = rotateDegree * i;
                } else { // MV_BARCODE_DETECT_ATTR_ROTATE_ALL
-                       if (i%2) {
-                               degree = -1 * rotateDegree * ((i+1)/2);
+                       if (i % 2) {
+                               degree = -1 * rotateDegree * ((i + 1) / 2);
                        } else {
-                               degree =  rotateDegree * (i/2);
+                               degree = rotateDegree * (i / 2);
                        }
                }
 
-               rotMat = cv::getRotationMatrix2D(cv::Point((roi.width/2), (roi.height/2)), degree, 1.0);
+               rotMat = cv::getRotationMatrix2D(cv::Point((roi.width / 2), (roi.height / 2)), degree, 1.0);
                warpAffine(rawBuffer, rotBuffer, rotMat, rawBuffer.size());
 
                _image.set_format("Y800");
@@ -211,9 +194,7 @@ int mv_barcode_detect_open(
                barcodeTypes.clear();
                barcodeLocations.clear();
                bool isDetected = true;
-               for (zbar::SymbolIterator symbol = _image.symbol_begin();
-                       symbol != _image.symbol_end(); ++symbol) {
-
+               for (zbar::SymbolIterator symbol = _image.symbol_begin(); symbol != _image.symbol_end(); ++symbol) {
                        Barcode curBarcode(*symbol);
                        mv_quadrangle_s location;
                        int err = curBarcode.calculateLocation(location);
@@ -229,12 +210,11 @@ int mv_barcode_detect_open(
 
                if (isDetected) {
                        LOGI("Call the detect callback for %d detected barcodes", numberOfBarcodes);
-                       const char **messagesArray = new const char*[numberOfBarcodes];
+                       const char **messagesArray = new const char *[numberOfBarcodes];
                        mv_barcode_type_e *types = new mv_barcode_type_e[numberOfBarcodes];
                        mv_quadrangle_s *locations = new mv_quadrangle_s[numberOfBarcodes];
 
                        for (int i = 0; i < numberOfBarcodes; ++i) {
-
                                size_t messageLength = barcodeMessages[i].size();
                                char *curMessage = new char[messageLength + 1];
                                barcodeMessages[i].copy(curMessage, messageLength);
@@ -245,8 +225,7 @@ int mv_barcode_detect_open(
                                locations[i] = barcodeLocations[i];
                                LOGI("%d: barcode with %s with type %d", i, messagesArray[i], types[i]);
                        }
-                       detect_cb(source, engine_cfg, locations, messagesArray, types,
-                                       numberOfBarcodes, user_data);
+                       detect_cb(source, engine_cfg, locations, messagesArray, types, numberOfBarcodes, user_data);
                        LOGI("Clean the memory from barcodes messages and types");
                        for (int j = 0; j < numberOfBarcodes; ++j)
                                delete[] messagesArray[j];
@@ -258,8 +237,7 @@ int mv_barcode_detect_open(
                }
        }
        LOGI("Call the detect callback for 0 detected barcodes");
-       detect_cb(source, engine_cfg, NULL, NULL,
-                       NULL, 0, user_data);
+       detect_cb(source, engine_cfg, NULL, NULL, NULL, 0, user_data);
 
        return MEDIA_VISION_ERROR_NONE;
 }
index 288e169..9f7075d 100644 (file)
  * @brief This file contains the BarcodeGenerator class.
  */
 
-namespace MediaVision {
-namespace Barcode {
+namespace MediaVision
+{
+namespace Barcode
+{
 /**
  * @brief This class implements barcode generation.
  * @details 1D Barcodes and 2D QR codes are supported.
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-class BarcodeGenerator {
+class BarcodeGenerator
+{
 public:
        /**
         * @brief This method generates Barcodes image according to options.
@@ -55,19 +58,12 @@ public:
         * @return BARCODE_ERROR_NONE from BarcodeError which is 0 if success,
         *         BarcodeError value otherwise
         */
-       static int generateBarcodeToImage(
-               const std::string& imageFileName,
-               BarcodeImageFormat imageFormat,
-               const int imageWidth,
-               const int imageHeight,
-               const std::string& message,
-               BarcodeType type,
-               BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_UNAVAILABLE,
-               BarcodeQRErrorCorrectionLevel correctionLevel = BARCODE_QR_ECC_UNAVAILABLE,
-               int qrVersion = 0,
-               int showText = 0,
-               char *fgcolour = NULL,
-               char *bgcolur = NULL);
+       static int generateBarcodeToImage(const std::string &imageFileName, BarcodeImageFormat imageFormat,
+                                                                         const int imageWidth, const int imageHeight, const std::string &message,
+                                                                         BarcodeType type,
+                                                                         BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_UNAVAILABLE,
+                                                                         BarcodeQRErrorCorrectionLevel correctionLevel = BARCODE_QR_ECC_UNAVAILABLE,
+                                                                         int qrVersion = 0, int showText = 0, char *fgcolour = NULL, char *bgcolur = NULL);
 
        /**
         * @brief This method generates Barcodes image buffer according to options.
@@ -88,23 +84,15 @@ public:
         * @return BARCODE_ERROR_NONE from BarcodeError which is 0 if success,
         *         BarcodeError value otherwise
         */
-       static int generateBarcodeToBuffer(
-               unsigned char **imageBuffer,
-               unsigned int *imageWidth,
-               unsigned int *imageHeight,
-               unsigned int *imageChannels,
-               const std::string& message,
-               BarcodeType type,
-               BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_UNAVAILABLE,
-               BarcodeQRErrorCorrectionLevel correctionLevel = BARCODE_QR_ECC_UNAVAILABLE,
-               int qrVersion = 0,
-               int showText = 0,
-               char *fgcolour = NULL,
-               char *bgcolur = NULL);
+       static int generateBarcodeToBuffer(unsigned char **imageBuffer, unsigned int *imageWidth, unsigned int *imageHeight,
+                                                                          unsigned int *imageChannels, const std::string &message, BarcodeType type,
+                                                                          BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_UNAVAILABLE,
+                                                                          BarcodeQRErrorCorrectionLevel correctionLevel = BARCODE_QR_ECC_UNAVAILABLE,
+                                                                          int qrVersion = 0, int showText = 0, char *fgcolour = NULL,
+                                                                          char *bgcolur = NULL);
 };
 
 } /* Barcode */
 } /* MediaVision */
 
 #endif /* __MEDIA_VISION_BARCODE_GENERATOR_H__ */
-
index 5399b8a..1a2cbfd 100644 (file)
  * @brief This file contains the Barcode options.
  */
 
-namespace MediaVision {
-namespace Barcode {
+namespace MediaVision
+{
+namespace Barcode
+{
 /**
  * @brief The Barcode type enumeration.
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-enum BarcodeType {
+enum BarcodeType
+{
        BARCODE_QR = 58,
        BARCODE_UPCA = 34,
        BARCODE_UPCE = 37,
@@ -52,7 +55,8 @@ enum BarcodeType {
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  * @remarks This is unavailable for 1D barcodes.
  */
-enum BarcodeQRErrorCorrectionLevel {
+enum BarcodeQRErrorCorrectionLevel
+{
        BARCODE_QR_ECC_UNAVAILABLE = 0,
        BARCODE_QR_ECC_LOW = 1,
        BARCODE_QR_ECC_MEDIUM = 2,
@@ -66,7 +70,8 @@ enum BarcodeQRErrorCorrectionLevel {
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  * @remarks This is unavailable for 1D barcodes.
  */
-enum BarcodeQREncodingMode {
+enum BarcodeQREncodingMode
+{
        BARCODE_QR_MODE_NUMERIC = 1,
        BARCODE_QR_MODE_ALPHANUMERIC = 1,
        BARCODE_QR_MODE_BYTE = 0,
@@ -80,7 +85,8 @@ enum BarcodeQREncodingMode {
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-enum BarcodeImageFormat {
+enum BarcodeImageFormat
+{
        BARCODE_IMAGE_JPG,
        BARCODE_IMAGE_PNG,
        BARCODE_IMAGE_BMP
@@ -91,7 +97,8 @@ enum BarcodeImageFormat {
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-enum BarcodeGenTextOpt {
+enum BarcodeGenTextOpt
+{
        BARCODE_GEN_TEXT_INVISIBLE,
        BARCODE_GEN_TEXT_VISIBLE
 };
@@ -101,7 +108,8 @@ enum BarcodeGenTextOpt {
  *
  * @since_tizen @if MOBILE 2.4 @else 3.0 @endif
  */
-enum BarcodeError {
+enum BarcodeError
+{
        BARCODE_ERROR_NONE = 0,
        BARCODE_WARNING_INVALID_OPTION = 2,
        BARCODE_ERROR_TOO_LONG = 5,
@@ -118,4 +126,3 @@ enum BarcodeError {
 } /* MediaVision */
 
 #endif /* __MEDIA_VISION_BARCODE_OPTIONS_H__ */
-
index 612a681..772668d 100644 (file)
@@ -54,13 +54,9 @@ extern "C" {
  *
  * @see mv_barcode_generate_image_open()
  */
-int mv_barcode_generate_source_open(mv_engine_config_h engine_cfg,
-               const char *message,
-               mv_barcode_type_e type,
-               mv_barcode_qr_mode_e qr_enc_mode,
-               mv_barcode_qr_ecc_e qr_ecc,
-               int qr_version,
-               mv_source_h image);
+int mv_barcode_generate_source_open(mv_engine_config_h engine_cfg, const char *message, mv_barcode_type_e type,
+                                                                       mv_barcode_qr_mode_e qr_enc_mode, mv_barcode_qr_ecc_e qr_ecc, int qr_version,
+                                                                       mv_source_h image);
 
 /**
  * @brief Generates image file with barcode.
@@ -90,17 +86,10 @@ int mv_barcode_generate_source_open(mv_engine_config_h engine_cfg,
  *
  * @see mv_barcode_generate_source_open()
  */
-int mv_barcode_generate_image_open(
-               mv_engine_config_h engine_cfg,
-               const char *message,
-               int image_width,
-               int image_height,
-               mv_barcode_type_e type,
-               mv_barcode_qr_mode_e qr_enc_mode,
-               mv_barcode_qr_ecc_e qr_ecc,
-               int qr_version,
-               const char *image_path,
-               mv_barcode_image_format_e image_format);
+int mv_barcode_generate_image_open(mv_engine_config_h engine_cfg, const char *message, int image_width,
+                                                                  int image_height, mv_barcode_type_e type, mv_barcode_qr_mode_e qr_enc_mode,
+                                                                  mv_barcode_qr_ecc_e qr_ecc, int qr_version, const char *image_path,
+                                                                  mv_barcode_image_format_e image_format);
 
 #ifdef __cplusplus
 }
index c54bfe1..9798da0 100644 (file)
 
 #define ZINT_COLOUR_SIZE 10
 
-namespace MediaVision {
-namespace Barcode {
-
-namespace {
-
-int getFormatEncodingInfo(
-               BarcodeImageFormat imageFormat,
-               std::vector<std::string>& extensions,
-               std::vector<int>& compressionParams)
+namespace MediaVision
+{
+namespace Barcode
+{
+namespace
+{
+int getFormatEncodingInfo(BarcodeImageFormat imageFormat, std::vector<std::string> &extensions,
+                                                 std::vector<int> &compressionParams)
 {
        static const int PNG_COMPRESSION_LEVEL = 3;
 
@@ -67,16 +66,9 @@ int getFormatEncodingInfo(
        return BARCODE_ERROR_NONE;
 }
 
-int createBarcode(
-               const std::string& message,
-               BarcodeType type,
-               BarcodeQREncodingMode encodingMode,
-               BarcodeQRErrorCorrectionLevel correctionLevel,
-               int qrVersion,
-               int showText,
-               char *fgcolour,
-               char *bgcolour,
-               zint_symbol *symbol)
+int createBarcode(const std::string &message, BarcodeType type, BarcodeQREncodingMode encodingMode,
+                                 BarcodeQRErrorCorrectionLevel correctionLevel, int qrVersion, int showText, char *fgcolour,
+                                 char *bgcolour, zint_symbol *symbol)
 {
        /* set input values */
        symbol->symbology = type;
@@ -88,21 +80,21 @@ int createBarcode(
 
        /* set default values */
        if (fgcolour) {
-               std::strncpy(symbol->fgcolour, fgcolour, ZINT_COLOUR_SIZE-1);
+               std::strncpy(symbol->fgcolour, fgcolour, ZINT_COLOUR_SIZE - 1);
                if (strlen(fgcolour) > 9) {
                        symbol->fgcolour[9] = '\0';
                }
        } else {
-               std::strncpy(symbol->fgcolour, "000000", ZINT_COLOUR_SIZE-1);
+               std::strncpy(symbol->fgcolour, "000000", ZINT_COLOUR_SIZE - 1);
        }
 
        if (bgcolour) {
-               std::strncpy(symbol->bgcolour, bgcolour, ZINT_COLOUR_SIZE-1);
+               std::strncpy(symbol->bgcolour, bgcolour, ZINT_COLOUR_SIZE - 1);
                if (strlen(bgcolour) > 9) {
                        symbol->bgcolour[9] = '\0';
                }
        } else {
-               std::strncpy(symbol->bgcolour, "ffffff", ZINT_COLOUR_SIZE-1);
+               std::strncpy(symbol->bgcolour, "ffffff", ZINT_COLOUR_SIZE - 1);
        }
 
        LOGI("Check colors: front %s, back %s", symbol->fgcolour, symbol->bgcolour);
@@ -118,25 +110,19 @@ int createBarcode(
 
        /* create barcode */
        const int rotationAngle = 0;
-       int error = ZBarcode_Encode_and_Buffer(
-                                       symbol,
-                                       (unsigned char*)(message.c_str()),
-                                       message.length(),
-                                       rotationAngle);
+       int error =
+                       ZBarcode_Encode_and_Buffer(symbol, (unsigned char *) (message.c_str()), message.length(), rotationAngle);
 
        return error;
 }
 
-int writeBufferToImageFile(
-               zint_symbol *symbol,
-               const std::string& imageFileName,
-               BarcodeImageFormat imageFormat,
-               const int imageWidth,
-               const int imageHeight)
+int writeBufferToImageFile(zint_symbol *symbol, const std::string &imageFileName, BarcodeImageFormat imageFormat,
+                                                  const int imageWidth, const int imageHeight)
 {
        if (imageWidth <= 0 || imageHeight <= 0) {
                LOGE("Barcode image size is invalid: %i x %i. Terminate write to "
-                       "the image operation", imageWidth, imageHeight);
+                        "the image operation",
+                        imageWidth, imageHeight);
                return BARCODE_ERROR_INVALID_DATA;
        }
 
@@ -154,8 +140,7 @@ int writeBufferToImageFile(
        std::vector<std::string> expectedExtensions;
        std::vector<int> compressionParams;
 
-       int error = getFormatEncodingInfo(imageFormat,
-                               expectedExtensions, compressionParams);
+       int error = getFormatEncodingInfo(imageFormat, expectedExtensions, compressionParams);
 
        if (BARCODE_ERROR_NONE != error || expectedExtensions.empty()) {
                LOGE("Image format is incorrectly specified or not supported");
@@ -169,12 +154,9 @@ int writeBufferToImageFile(
        for (size_t extNum = 0; extNum < expectedExtensions.size(); ++extNum) {
                if (resultFilePath.size() >= expectedExtensions[extNum].size()) {
                        std::string givenExtension = resultFilePath.substr(
-                               resultFilePath.length() - expectedExtensions[extNum].size(),
-                               expectedExtensions[extNum].size());
+                                       resultFilePath.length() - expectedExtensions[extNum].size(), expectedExtensions[extNum].size());
 
-                       std::transform(
-                               givenExtension.begin(), givenExtension.end(),
-                               givenExtension.begin(), ::tolower);
+                       std::transform(givenExtension.begin(), givenExtension.end(), givenExtension.begin(), ::tolower);
 
                        if (givenExtension == expectedExtensions[extNum]) {
                                rightExtensionFlag = true;
@@ -189,12 +171,10 @@ int writeBufferToImageFile(
        cv::Mat image(symbol->bitmap_height, symbol->bitmap_width, CV_8UC3, symbol->bitmap);
        cv::resize(image, image, cv::Size(imageWidth, imageHeight), 0, 0, cv::INTER_AREA);
 
-       error = cv::imwrite(resultFilePath, image, compressionParams) ?
-                       BARCODE_ERROR_NONE : BARCODE_ERROR_INVALID_DATA;
+       error = cv::imwrite(resultFilePath, image, compressionParams) ? BARCODE_ERROR_NONE : BARCODE_ERROR_INVALID_DATA;
 
        if (BARCODE_ERROR_NONE != error) {
-               LOGE("Write barcode image to file %s operation failed.",
-                               resultFilePath.c_str());
+               LOGE("Write barcode image to file %s operation failed.", resultFilePath.c_str());
                return error;
        }
 
@@ -203,37 +183,21 @@ int writeBufferToImageFile(
 
 } /* anonymous namespace */
 
-int BarcodeGenerator::generateBarcodeToImage(
-               const std::string& imageFileName,
-               BarcodeImageFormat imageFormat,
-               const int imageWidth,
-               const int imageHeight,
-               const std::string& message,
-               BarcodeType type,
-               BarcodeQREncodingMode encodingMode,
-               BarcodeQRErrorCorrectionLevel correctionLevel,
-               int qrVersion,
-               int showText,
-               char *fgcolour,
-               char *bgcolour)
+int BarcodeGenerator::generateBarcodeToImage(const std::string &imageFileName, BarcodeImageFormat imageFormat,
+                                                                                        const int imageWidth, const int imageHeight, const std::string &message,
+                                                                                        BarcodeType type, BarcodeQREncodingMode encodingMode,
+                                                                                        BarcodeQRErrorCorrectionLevel correctionLevel, int qrVersion, int showText,
+                                                                                        char *fgcolour, char *bgcolour)
 {
        zint_symbol *symbol = ZBarcode_Create();
 
-       if(symbol == NULL) {
+       if (symbol == NULL) {
                LOGE("ZBarcode creation failed");
                return BARCODE_ERROR_ENCODING_PROBLEM;
        }
 
-       int error = createBarcode(
-                                       message,
-                                       type,
-                                       encodingMode,
-                                       correctionLevel,
-                                       qrVersion,
-                                       showText,
-                                       fgcolour,
-                                       bgcolour,
-                                       symbol);
+       int error = createBarcode(message, type, encodingMode, correctionLevel, qrVersion, showText, fgcolour, bgcolour,
+                                                         symbol);
 
        if (error != BARCODE_ERROR_NONE) {
                LOGE("Barcode creation failed, clean memory");
@@ -241,55 +205,34 @@ int BarcodeGenerator::generateBarcodeToImage(
                return error;
        }
 
-       error = writeBufferToImageFile(
-                               symbol,
-                               imageFileName,
-                               imageFormat,
-                               imageWidth,
-                               imageHeight);
+       error = writeBufferToImageFile(symbol, imageFileName, imageFormat, imageWidth, imageHeight);
        if (error != BARCODE_ERROR_NONE)
                LOGE("Barcode [%s] file write fail, clean memory", imageFileName.c_str());
        else
-               LOGI("Barcode image [%s] is successfully generated, clean memory",
-                               imageFileName.c_str());
+               LOGI("Barcode image [%s] is successfully generated, clean memory", imageFileName.c_str());
 
        ZBarcode_Delete(symbol);
 
        return error;
 }
 
-int BarcodeGenerator::generateBarcodeToBuffer(
-               unsigned char **imageBuffer,
-               unsigned int *imageWidth,
-               unsigned int *imageHeight,
-               unsigned int *imageChannels,
-               const std::string& message,
-               BarcodeType type,
-               BarcodeQREncodingMode encodingMode,
-               BarcodeQRErrorCorrectionLevel correctionLevel,
-               int qrVersion,
-               int showText,
-               char *fgcolour,
-               char *bgcolour)
+int BarcodeGenerator::generateBarcodeToBuffer(unsigned char **imageBuffer, unsigned int *imageWidth,
+                                                                                         unsigned int *imageHeight, unsigned int *imageChannels,
+                                                                                         const std::string &message, BarcodeType type,
+                                                                                         BarcodeQREncodingMode encodingMode,
+                                                                                         BarcodeQRErrorCorrectionLevel correctionLevel, int qrVersion,
+                                                                                         int showText, char *fgcolour, char *bgcolour)
 {
        zint_symbol *symbol = ZBarcode_Create();
 
-       if(symbol == NULL) {
+       if (symbol == NULL) {
                LOGE("ZBarcode creation failed");
 
                return BARCODE_ERROR_ENCODING_PROBLEM;
        }
 
-       int error = createBarcode(
-                                       message,
-                                       type,
-                                       encodingMode,
-                                       correctionLevel,
-                                       qrVersion,
-                                       showText,
-                                       fgcolour,
-                                       bgcolour,
-                                       symbol);
+       int error = createBarcode(message, type, encodingMode, correctionLevel, qrVersion, showText, fgcolour, bgcolour,
+                                                         symbol);
 
        if (error != BARCODE_ERROR_NONE) {
                LOGE("Barcode creation failed, clean memory");
@@ -301,8 +244,7 @@ int BarcodeGenerator::generateBarcodeToBuffer(
        *imageWidth = symbol->bitmap_width;
        *imageHeight = symbol->bitmap_height;
        *imageChannels = 3;
-       const unsigned int imageBufferSize = (*imageWidth) * (*imageHeight) *
-               (*imageChannels);
+       const unsigned int imageBufferSize = (*imageWidth) * (*imageHeight) * (*imageChannels);
        *imageBuffer = new unsigned char[imageBufferSize];
        memmove(*imageBuffer, symbol->bitmap, imageBufferSize);
 
index a1340a1..caa3f03 100644 (file)
  * @brief This file contains the porting layer for Media Vision barcode module.
  */
 
-
-int mv_barcode_generate_source(
-               mv_engine_config_h engine_cfg,
-               const char *message,
-               mv_barcode_type_e type,
-               mv_barcode_qr_mode_e qr_enc_mode,
-               mv_barcode_qr_ecc_e qr_ecc,
-               int qr_version,
-               mv_source_h image)
+int mv_barcode_generate_source(mv_engine_config_h engine_cfg, const char *message, mv_barcode_type_e type,
+                                                          mv_barcode_qr_mode_e qr_enc_mode, mv_barcode_qr_ecc_e qr_ecc, int qr_version,
+                                                          mv_source_h image)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_barcode_generate_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(message);
@@ -43,21 +37,18 @@ int mv_barcode_generate_source(
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       if (type < MV_BARCODE_QR ||
-               type >= MV_BARCODE_UNKNOWN || type == MV_BARCODE_UNDEFINED) {
+       if (type < MV_BARCODE_QR || type >= MV_BARCODE_UNKNOWN || type == MV_BARCODE_UNDEFINED) {
                LOGE("Not supported barcode type [%d]", type);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
        if (type == MV_BARCODE_QR) {
-               if (qr_enc_mode < MV_BARCODE_QR_MODE_NUMERIC ||
-                       qr_enc_mode >= MV_BARCODE_QR_MODE_UNAVAILABLE) {
+               if (qr_enc_mode < MV_BARCODE_QR_MODE_NUMERIC || qr_enc_mode >= MV_BARCODE_QR_MODE_UNAVAILABLE) {
                        LOGE("Not supported QR encoding mode[%d]", qr_enc_mode);
                        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
                }
 
-               if (qr_ecc < MV_BARCODE_QR_ECC_LOW ||
-                       qr_ecc >= MV_BARCODE_QR_ECC_UNAVAILABLE) {
+               if (qr_ecc < MV_BARCODE_QR_ECC_LOW || qr_ecc >= MV_BARCODE_QR_ECC_UNAVAILABLE) {
                        LOGE("Not supported QR ECC level [%d]", qr_ecc);
                        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
                }
@@ -69,25 +60,15 @@ int mv_barcode_generate_source(
        }
 
        /* Use open barcode generate functionality here. */
-       int ret = mv_barcode_generate_source_open(
-                                       engine_cfg, message, type, qr_enc_mode, qr_ecc, qr_version,
-                                       image);
+       int ret = mv_barcode_generate_source_open(engine_cfg, message, type, qr_enc_mode, qr_ecc, qr_version, image);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_barcode_generate_image(
-               mv_engine_config_h engine_cfg,
-               const char *message,
-               int image_width,
-               int image_height,
-               mv_barcode_type_e type,
-               mv_barcode_qr_mode_e qr_enc_mode,
-               mv_barcode_qr_ecc_e qr_ecc,
-               int qr_version,
-               const char *image_path,
-               mv_barcode_image_format_e image_format)
+int mv_barcode_generate_image(mv_engine_config_h engine_cfg, const char *message, int image_width, int image_height,
+                                                         mv_barcode_type_e type, mv_barcode_qr_mode_e qr_enc_mode, mv_barcode_qr_ecc_e qr_ecc,
+                                                         int qr_version, const char *image_path, mv_barcode_image_format_e image_format)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_barcode_generate_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(message);
@@ -99,21 +80,18 @@ int mv_barcode_generate_image(
                return MEDIA_VISION_ERROR_INVALID_PATH;
        }
 
-       if (type < MV_BARCODE_QR ||
-               type >= MV_BARCODE_UNKNOWN || type == MV_BARCODE_UNDEFINED) {
+       if (type < MV_BARCODE_QR || type >= MV_BARCODE_UNKNOWN || type == MV_BARCODE_UNDEFINED) {
                LOGE("Not supported barcode type [%d]", type);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
        if (type == MV_BARCODE_QR) {
-               if (qr_enc_mode < MV_BARCODE_QR_MODE_NUMERIC ||
-                       qr_enc_mode >= MV_BARCODE_QR_MODE_UNAVAILABLE) {
+               if (qr_enc_mode < MV_BARCODE_QR_MODE_NUMERIC || qr_enc_mode >= MV_BARCODE_QR_MODE_UNAVAILABLE) {
                        LOGE("Not supported QR encoding mode[%d]", qr_enc_mode);
                        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
                }
 
-               if (qr_ecc < MV_BARCODE_QR_ECC_LOW ||
-                       qr_ecc >= MV_BARCODE_QR_ECC_UNAVAILABLE) {
+               if (qr_ecc < MV_BARCODE_QR_ECC_LOW || qr_ecc >= MV_BARCODE_QR_ECC_UNAVAILABLE) {
                        LOGE("Not supported QR ECC level [%d]", qr_ecc);
                        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
                }
@@ -124,16 +102,14 @@ int mv_barcode_generate_image(
                }
        }
 
-       if (image_format < MV_BARCODE_IMAGE_FORMAT_BMP ||
-               image_format >= MV_BARCODE_IMAGE_FORMAT_NUM) {
+       if (image_format < MV_BARCODE_IMAGE_FORMAT_BMP || image_format >= MV_BARCODE_IMAGE_FORMAT_NUM) {
                LOGE("Not supported image format [%d]", image_format);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
        /* Use open barcode generate functionality here. */
-       int ret = mv_barcode_generate_image_open(
-                                       engine_cfg, message, image_width, image_height, type,
-                                       qr_enc_mode, qr_ecc, qr_version, image_path, image_format);
+       int ret = mv_barcode_generate_image_open(engine_cfg, message, image_width, image_height, type, qr_enc_mode, qr_ecc,
+                                                                                        qr_version, image_path, image_format);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
index c689f9f..17da765 100644 (file)
 
 using namespace MediaVision::Barcode;
 
-namespace {
-
-int alphanumToUpper(std::stringstrToTransform)
+namespace
+{
+int alphanumToUpper(std::string &strToTransform)
 {
        std::string tempString = strToTransform;
-       std::transform(tempString.begin(), tempString.end(),
-                       tempString.begin(), ::toupper);
+       std::transform(tempString.begin(), tempString.end(), tempString.begin(), ::toupper);
 
-       if (std::string::npos != tempString.find_first_not_of(
-                       "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:")) {
+       if (std::string::npos != tempString.find_first_not_of("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:")) {
                LOGE("Barcode message can't be converted according to support "
-                               "alphanumeric (0..9, A..Z, space, $, %%, *, +, -, ., /, :) "
-                               "mode: %s", strToTransform.c_str());
+                        "alphanumeric (0..9, A..Z, space, $, %%, *, +, -, ., /, :) "
+                        "mode: %s",
+                        strToTransform.c_str());
                return BARCODE_ERROR_INVALID_DATA;
        }
 
        LOGI("Barcode message was converted according to support alphanumeric "
-                       "mode: %s -> %s", strToTransform.c_str(), tempString.c_str());
+                "mode: %s -> %s",
+                strToTransform.c_str(), tempString.c_str());
        strToTransform = tempString;
        return BARCODE_ERROR_NONE;
 }
@@ -96,7 +96,8 @@ BarcodeType convertBarcodeType(mv_barcode_type_e type)
        }
 
        LOGI("Media vision barcode type has been converted to ZInt barcode type "
-                       "(%i -> %i)", type, barcodeType);
+                "(%i -> %i)",
+                type, barcodeType);
        return barcodeType;
 }
 
@@ -119,7 +120,8 @@ BarcodeQREncodingMode convertEncodingMode(mv_barcode_qr_mode_e mode)
        }
 
        LOGI("Media vision QRCode encoding mode has been converted to "
-                       "ZInt encoding mode (%i -> %i)", mode, encodingMode);
+                "ZInt encoding mode (%i -> %i)",
+                mode, encodingMode);
        return encodingMode;
 }
 
@@ -142,7 +144,8 @@ BarcodeQRErrorCorrectionLevel convertECC(mv_barcode_qr_ecc_e ecc)
        }
 
        LOGI("Media vision ECC level has been converted to "
-                       "ZInt ECC level (%i -> %i)", ecc, ecclevel);
+                "ZInt ECC level (%i -> %i)",
+                ecc, ecclevel);
        return ecclevel;
 }
 
@@ -182,7 +185,8 @@ int convertBarcodeError(int barcodeError)
        }
 
        LOGI("ZInt error code has been converted to the media vision error code "
-                       "(%i -> (0x%08x))", barcodeError, mvError);
+                "(%i -> (0x%08x))",
+                barcodeError, mvError);
        return mvError;
 }
 
@@ -202,33 +206,28 @@ BarcodeImageFormat convertImageFormat(mv_barcode_image_format_e format)
        }
 
        LOGI("Media vision image format has been converted to "
-                       "internal image format (%i -> %i)", format, imageFormat);
+                "internal image format (%i -> %i)",
+                format, imageFormat);
        return imageFormat;
 }
 
 } /* anonymous namespace */
 
-int mv_barcode_generate_source_open(
-               mv_engine_config_h engine_cfg,
-               const char *message,
-               mv_barcode_type_e type,
-               mv_barcode_qr_mode_e qr_enc_mode,
-               mv_barcode_qr_ecc_e qr_ecc,
-               int qr_version,
-               mv_source_h image)
+int mv_barcode_generate_source_open(mv_engine_config_h engine_cfg, const char *message, mv_barcode_type_e type,
+                                                                       mv_barcode_qr_mode_e qr_enc_mode, mv_barcode_qr_ecc_e qr_ecc, int qr_version,
+                                                                       mv_source_h image)
 {
        std::string messageStr = std::string(message);
 
-       if (qr_enc_mode == MV_BARCODE_QR_MODE_NUMERIC &&
-               messageStr.find_first_not_of("0123456789") != std::string::npos) {
+       if (qr_enc_mode == MV_BARCODE_QR_MODE_NUMERIC && messageStr.find_first_not_of("0123456789") != std::string::npos) {
                LOGE("Barcode message can't be used according to support "
-                       "numeric (0..9) mode: %s", messageStr.c_str());
+                        "numeric (0..9) mode: %s",
+                        messageStr.c_str());
                return MEDIA_VISION_ERROR_INVALID_DATA;
        }
 
        int error = BARCODE_ERROR_NONE;
-       if (MV_BARCODE_QR == type &&
-               MV_BARCODE_QR_MODE_ALPHANUMERIC == qr_enc_mode) {
+       if (MV_BARCODE_QR == type && MV_BARCODE_QR_MODE_ALPHANUMERIC == qr_enc_mode) {
                error = alphanumToUpper(messageStr);
                if (BARCODE_ERROR_NONE != error)
                        return convertBarcodeError(error);
@@ -305,19 +304,10 @@ int mv_barcode_generate_source_open(
                bgcolour[5] = value;
        }
 
-       error = BarcodeGenerator::generateBarcodeToBuffer(
-                                       &imageBuffer,
-                                       &imageWidth,
-                                       &imageHeight,
-                                       &imageChannels,
-                                       messageStr,
-                                       convertBarcodeType(type),
-                                       convertEncodingMode(qr_enc_mode),
-                                       convertECC(qr_ecc),
-                                       qr_version,
-                                       showText,
-                                       fgcolour,
-                                       bgcolour);
+       error = BarcodeGenerator::generateBarcodeToBuffer(&imageBuffer, &imageWidth, &imageHeight, &imageChannels,
+                                                                                                         messageStr, convertBarcodeType(type),
+                                                                                                         convertEncodingMode(qr_enc_mode), convertECC(qr_ecc), qr_version,
+                                                                                                         showText, fgcolour, bgcolour);
 
        if (fgcolour != NULL) {
                free(fgcolour);
@@ -341,16 +331,11 @@ int mv_barcode_generate_source_open(
        const unsigned int imageBufferSize = imageWidth * imageHeight * imageChannels;
 
        LOGI("Barcode has been generated to the buffer: "
-               "Buffer size = %ui x %ui; Channels = %ui; Message = %s",
-               imageWidth, imageHeight, imageChannels, messageStr.c_str());
+                "Buffer size = %ui x %ui; Channels = %ui; Message = %s",
+                imageWidth, imageHeight, imageChannels, messageStr.c_str());
 
-       error = mv_source_fill_by_buffer_c(
-                                       image,
-                                       imageBuffer,
-                                       imageBufferSize,
-                                       imageWidth,
-                                       imageHeight,
-                                       MEDIA_VISION_COLORSPACE_RGB888);
+       error = mv_source_fill_by_buffer_c(image, imageBuffer, imageBufferSize, imageWidth, imageHeight,
+                                                                          MEDIA_VISION_COLORSPACE_RGB888);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                LOGE("Meidiavision source fill by generated buffer failed");
@@ -364,25 +349,17 @@ int mv_barcode_generate_source_open(
        return error;
 }
 
-
-int mv_barcode_generate_image_open(
-               mv_engine_config_h engine_cfg,
-               const char *message,
-               int image_width,
-               int image_height,
-               mv_barcode_type_e type,
-               mv_barcode_qr_mode_e qr_enc_mode,
-               mv_barcode_qr_ecc_e qr_ecc,
-               int qr_version,
-               const char *image_path,
-               mv_barcode_image_format_e image_format)
+int mv_barcode_generate_image_open(mv_engine_config_h engine_cfg, const char *message, int image_width,
+                                                                  int image_height, mv_barcode_type_e type, mv_barcode_qr_mode_e qr_enc_mode,
+                                                                  mv_barcode_qr_ecc_e qr_ecc, int qr_version, const char *image_path,
+                                                                  mv_barcode_image_format_e image_format)
 {
        std::string messageStr = std::string(message);
 
-       if (qr_enc_mode == MV_BARCODE_QR_MODE_NUMERIC &&
-               messageStr.find_first_not_of("0123456789") != std::string::npos) {
+       if (qr_enc_mode == MV_BARCODE_QR_MODE_NUMERIC && messageStr.find_first_not_of("0123456789") != std::string::npos) {
                LOGE("Barcode message can't be used according to support "
-                               "numeric (0..9) mode: %s", messageStr.c_str());
+                        "numeric (0..9) mode: %s",
+                        messageStr.c_str());
                return MEDIA_VISION_ERROR_INVALID_DATA;
        }
 
@@ -392,8 +369,7 @@ int mv_barcode_generate_image_open(
        }
 
        int error = BARCODE_ERROR_NONE;
-       if (MV_BARCODE_QR == type &&
-               MV_BARCODE_QR_MODE_ALPHANUMERIC == qr_enc_mode) {
+       if (MV_BARCODE_QR == type && MV_BARCODE_QR_MODE_ALPHANUMERIC == qr_enc_mode) {
                error = alphanumToUpper(messageStr);
                if (BARCODE_ERROR_NONE != error) {
                        return convertBarcodeError(error);
@@ -467,19 +443,10 @@ int mv_barcode_generate_image_open(
                bgcolour[5] = value;
        }
 
-       error = BarcodeGenerator::generateBarcodeToImage(
-                                                       std::string(image_path),
-                                                       convertImageFormat(image_format),
-                                                       image_width,
-                                                       image_height,
-                                                       messageStr,
-                                                       convertBarcodeType(type),
-                                                       convertEncodingMode(qr_enc_mode),
-                                                       convertECC(qr_ecc),
-                                                       qr_version,
-                                                       showText,
-                                                       fgcolour,
-                                                       bgcolour);
+       error = BarcodeGenerator::generateBarcodeToImage(std::string(image_path), convertImageFormat(image_format),
+                                                                                                        image_width, image_height, messageStr, convertBarcodeType(type),
+                                                                                                        convertEncodingMode(qr_enc_mode), convertECC(qr_ecc), qr_version,
+                                                                                                        showText, fgcolour, bgcolour);
 
        if (fgcolour != NULL) {
                free(fgcolour);
@@ -495,8 +462,8 @@ int mv_barcode_generate_image_open(
                LOGE("Barcode generation to the image file failed");
        } else {
                LOGI("Barcode has been generated to the image: "
-                       "Image size = %ui x %ui; Message = %s",
-                       image_width, image_height, messageStr.c_str());
+                        "Image size = %ui x %ui; Message = %s",
+                        image_width, image_height, messageStr.c_str());
        }
 
        return convertBarcodeError(error);
index 9d81649..5131260 100644 (file)
 #include <opencv2/core/mat.hpp>
 #include <opencv2/imgproc.hpp>
 
-namespace MediaVision {
-namespace Common {
-
+namespace MediaVision
+{
+namespace Common
+{
 /**
  * @brief    This function converts media vision image handle to cv::Mat with gray color.
  *
@@ -33,7 +34,7 @@ namespace Common {
  * @return @c MEDIA_VISION_ERROR_NONE on success,
            otherwise a negative error value
  */
-int convertSourceMV2GrayCV(mv_source_h mvSource, cv::MatcvSource);
+int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat &cvSource);
 
 } /* Common */
 } /* MediaVision */
index 365e5fb..9be5ed6 100644 (file)
  * @brief  Engine Configuration class definition.
  */
 
-namespace MediaVision {
-namespace Common {
-
+namespace MediaVision
+{
+namespace Common
+{
 using DictDblConstIter = std::map<std::string, double>::const_iterator;
 using DictIntConstIter = std::map<std::string, int>::const_iterator;
 using DictBoolConstIter = std::map<std::string, bool>::const_iterator;
 using DictStrConstIter = std::map<std::string, std::string>::const_iterator;
-using DictVecStrConstIter = std::map<std::string, std::vector<std::string>>::const_iterator;
+using DictVecStrConstIter = std::map<std::string, std::vector<std::string> >::const_iterator;
 
-class EngineConfig {
+class EngineConfig
+{
 public:
        /**
         * @brief Engine configuration constructor.
@@ -49,7 +51,8 @@ public:
         * @since_tizen 7.0
         * @param [in] config_file_path      A full path of config file. (Optional)
         */
-       explicit EngineConfig(std::string config_file_path = std::string(MV_CONFIG_PATH) + std::string(MV_ENGINE_CONFIG_FILE_NAME));
+       explicit EngineConfig(std::string config_file_path = std::string(MV_CONFIG_PATH) +
+                                                                                                                std::string(MV_ENGINE_CONFIG_FILE_NAME));
 
        /**
         * @brief Engine configuration destructor.
@@ -65,7 +68,7 @@ public:
         * @return @c MEDIA_VISION_ERROR_NONE on success,\n
         *         otherwise a negative error value
         */
-       int setAttribute(const std::stringkey, const double value);
+       int setAttribute(const std::string &key, const double value);
 
        /**
         * @brief Sets attribute with integer value.
@@ -76,7 +79,7 @@ public:
         * @return @c MEDIA_VISION_ERROR_NONE on success,\n
         *         otherwise a negative error value
         */
-       int setAttribute(const std::stringkey, const int value);
+       int setAttribute(const std::string &key, const int value);
 
        /**
         * @brief Sets attribute with boolean value.
@@ -87,7 +90,7 @@ public:
         * @return @c MEDIA_VISION_ERROR_NONE on success,\n
         *         otherwise a negative error value
         */
-       int setAttribute(const std::stringkey, const bool value);
+       int setAttribute(const std::string &key, const bool value);
 
        /**
         * @brief Sets attribute with string value.
@@ -98,7 +101,7 @@ public:
         * @return @c MEDIA_VISION_ERROR_NONE on success,\n
         *         otherwise a negative error value
         */
-       int setAttribute(const std::string& key, const std::string& value);
+       int setAttribute(const std::string &key, const std::string &value);
 
        /**
         * @brief Sets attribute with the vector of string value.
@@ -109,7 +112,7 @@ public:
         * @return @c MEDIA_VISION_ERROR_NONE on success,\n
         *         otherwise a negative error value
         */
-       int setAttribute(const std::string& key, const std::vector<std::string>& value);
+       int setAttribute(const std::string &key, const std::vector<std::string> &value);
 
        /**
         * @brief Gets double attribute value by attribute name.
@@ -122,7 +125,7 @@ public:
         * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
         *         doesn't exist in the engine configuration dictionary
         */
-       int getDoubleAttribute(const std::stringkey, double *value) const;
+       int getDoubleAttribute(const std::string &key, double *value) const;
 
        /**
         * @brief Gets integer attribute value by attribute name.
@@ -135,7 +138,7 @@ public:
         * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
         *         doesn't exist in the engine configuration dictionary
         */
-       int getIntegerAttribute(const std::stringkey, int *value) const;
+       int getIntegerAttribute(const std::string &key, int *value) const;
 
        /**
         * @brief Gets boolean attribute value by attribute name.
@@ -148,7 +151,7 @@ public:
         * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
         *         doesn't exist in the engine configuration dictionary
         */
-       int getBooleanAttribute(const std::stringkey, bool *value) const;
+       int getBooleanAttribute(const std::string &key, bool *value) const;
 
        /**
         * @brief Gets string attribute value by attribute name.
@@ -161,7 +164,7 @@ public:
         * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
         *         doesn't exist in the engine configuration dictionary
         */
-       int getStringAttribute(const std::stringkey, std::string *value) const;
+       int getStringAttribute(const std::string &key, std::string *value) const;
 
        /**
         * @brief Gets vector attribute value of string by attribute name.
@@ -174,22 +177,22 @@ public:
         * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
         *         doesn't exist in the engine configuration dictionary
         */
-       int getStringAttribute(const std::stringkey, std::vector<std::string> *value) const;
+       int getStringAttribute(const std::string &key, std::vector<std::string> *value) const;
 
-       const std::map<std::string, double>getDefaultDblDict();
-       const std::map<std::string, int>getDefaultIntDict();
-       const std::map<std::string, bool>getDefaultBoolDict();
-       const std::map<std::string, std::string>getDefaultStrDict();
-       const std::map<std::string, std::vector<std::string>>& getDefaultVecStrDict();
+       const std::map<std::string, double> &getDefaultDblDict();
+       const std::map<std::string, int> &getDefaultIntDict();
+       const std::map<std::string, bool> &getDefaultBoolDict();
+       const std::map<std::string, std::string> &getDefaultStrDict();
+       const std::map<std::string, std::vector<std::string> > &getDefaultVecStrDict();
 
 private:
        std::map<std::string, double> __dblDict;
        std::map<std::string, int> __intDict;
        std::map<std::string, bool> __boolDict;
        std::map<std::string, std::string> __strDict;
-       std::map<std::string, std::vector<std::string>> __vecStrDict;
+       std::map<std::string, std::vector<std::string> > __vecStrDict;
 
-       int loadDictionaries(std::stringconfig_file_path);
+       int loadDictionaries(std::string &config_file_path);
 };
 
 } /* Common */
index 1cf2965..69c1ca7 100644 (file)
  * @brief This file contains the MediaSource class.
  */
 
-namespace MediaVision {
-namespace Common {
+namespace MediaVision
+{
+namespace Common
+{
 /**
  * @class   MediaSource
  * @brief   The Media Source container
  * @details It is class which contains Media Source information. This class
  *          will be use in the Media Vision as simple image.
  */
-class MediaSource {
+class MediaSource
+{
 public:
        /**
         * @brief   Creates a MediaSource.
@@ -69,8 +72,7 @@ public:
         * @see MediaSource::MediaSource()
         * @see MediaSource::fill()
         */
-       bool alloc(unsigned int bufferSize,
-                       unsigned int width, unsigned int height, mv_colorspace_e colorspace);
+       bool alloc(unsigned int bufferSize, unsigned int width, unsigned int height, mv_colorspace_e colorspace);
 
        /**
         * @brief   Clears the MediaSource.
@@ -97,8 +99,8 @@ public:
         * @see MediaSource::MediaSource()
         * @see MediaSource::clear()
         */
-       bool fill(const unsigned char *buffer, unsigned int bufferSize,
-                       unsigned int width, unsigned int height, mv_colorspace_e colorspace);
+       bool fill(const unsigned char *buffer, unsigned int bufferSize, unsigned int width, unsigned int height,
+                         mv_colorspace_e colorspace);
 
        /**
         * @brief Fills the MediaSource based on the buffer and metadata.
@@ -116,8 +118,8 @@ public:
         * @see MediaSource::MediaSource()
         * @see MediaSource::clear()
         */
-       bool fill(const unsigned char *buffer, unsigned int bufferSize,
-                       unsigned int width, unsigned int height, size_t offset);
+       bool fill(const unsigned char *buffer, unsigned int bufferSize, unsigned int width, unsigned int height,
+                         size_t offset);
 
        /**
         * @brief Gets data buffer of the MediaSource.
@@ -160,15 +162,15 @@ public:
        mv_colorspace_e getColorspace(void) const;
 
 protected:
-       unsigned char *m_pBuffer;        /**< The data buffer */
+       unsigned char *m_pBuffer; /**< The data buffer */
 
-       unsigned int m_bufferSize;       /**< The buffer size */
+       unsigned int m_bufferSize; /**< The buffer size */
 
-       unsigned int m_width;            /**< The image width */
+       unsigned int m_width; /**< The image width */
 
-       unsigned int m_height;           /**< The image height */
+       unsigned int m_height; /**< The image height */
 
-       mv_colorspace_e m_colorspace;    /**< The image colorspace */
+       mv_colorspace_e m_colorspace; /**< The image colorspace */
 };
 
 } /* Common */
index 234fc52..cbcb731 100644 (file)
@@ -41,8 +41,7 @@ extern "C" {
  *
  * @see mv_destroy_source_c()
  */
-int mv_create_source_c(
-               mv_source_h *source);
+int mv_create_source_c(mv_source_h *source);
 
 /**
  * @brief Destroys the source handle and releases all its resources.
@@ -55,8 +54,7 @@ int mv_create_source_c(
  *
  * @see mv_create_source_c()
  */
-int mv_destroy_source_c(
-               mv_source_h source);
+int mv_destroy_source_c(mv_source_h source);
 
 /**
  * @brief Fills the media source based on the media packet.
@@ -77,9 +75,7 @@ int mv_destroy_source_c(
  * @see mv_create_source_c()
  * @see mv_destroy_source_c()
  */
-int mv_source_fill_by_media_packet_c(
-               mv_source_h source,
-               media_packet_h media_packet);
+int mv_source_fill_by_media_packet_c(mv_source_h source, media_packet_h media_packet);
 
 /**
  * @brief Fills the media source based on the buffer and metadata.
@@ -100,13 +96,8 @@ int mv_source_fill_by_media_packet_c(
  *
  * @see mv_source_clear_c()
  */
-int mv_source_fill_by_buffer_c(
-               mv_source_h source,
-               unsigned char *data_buffer,
-               unsigned int buffer_size,
-               unsigned int image_width,
-               unsigned int image_height,
-               mv_colorspace_e image_colorspace);
+int mv_source_fill_by_buffer_c(mv_source_h source, unsigned char *data_buffer, unsigned int buffer_size,
+                                                          unsigned int image_width, unsigned int image_height, mv_colorspace_e image_colorspace);
 
 /**
  * @brief Clears the buffer of the media source.
@@ -119,8 +110,7 @@ int mv_source_fill_by_buffer_c(
  *
  * @see mv_source_fill_by_buffer_c()
  */
-int mv_source_clear_c(
-               mv_source_h source);
+int mv_source_clear_c(mv_source_h source);
 
 /**
  * @brief Gets buffer of the media source.
@@ -141,10 +131,7 @@ int mv_source_clear_c(
  * @see mv_source_get_height_c()
  * @see mv_source_get_colorspace_c()
  */
-int mv_source_get_buffer_c(
-               mv_source_h source,
-               unsigned char **data_buffer,
-               unsigned int *buffer_size);
+int mv_source_get_buffer_c(mv_source_h source, unsigned char **data_buffer, unsigned int *buffer_size);
 
 /**
  * @brief Gets height of the media source.
@@ -160,9 +147,7 @@ int mv_source_get_buffer_c(
  * @see mv_source_get_colorspace_c()
  * @see mv_source_get_buffer_c()
  */
-int mv_source_get_height_c(
-               mv_source_h source,
-               unsigned int *image_height);
+int mv_source_get_height_c(mv_source_h source, unsigned int *image_height);
 
 /**
  * @brief Gets width of the media source.
@@ -178,9 +163,7 @@ int mv_source_get_height_c(
  * @see mv_source_get_colorspace_c()
  * @see mv_source_get_buffer_c()
  */
-int mv_source_get_width_c(
-               mv_source_h source,
-               unsigned int *image_width);
+int mv_source_get_width_c(mv_source_h source, unsigned int *image_width);
 
 /**
  * @brief Gets colorspace of the media source.
@@ -196,9 +179,7 @@ int mv_source_get_width_c(
  * @see mv_source_get_height_c()
  * @see mv_source_get_buffer_c()
  */
-int mv_source_get_colorspace_c(
-               mv_source_h source,
-               mv_colorspace_e *image_colorspace);
+int mv_source_get_colorspace_c(mv_source_h source, mv_colorspace_e *image_colorspace);
 
 /**
  * @brief Creates the handle to the configuration of engine.
@@ -221,8 +202,7 @@ int mv_source_get_colorspace_c(
  * @see mv_engine_config_get_bool_attribute_c()
  * @see mv_engine_config_get_string_attribute_c()
  */
-int mv_create_engine_config_c(
-               mv_engine_config_h *engine_cfg);
+int mv_create_engine_config_c(mv_engine_config_h *engine_cfg);
 
 /**
  * @brief Destroys the engine configuration handle and releases all its
@@ -238,8 +218,7 @@ int mv_create_engine_config_c(
  * @see mv_engine_config_h
  * @see mv_create_engine_config_c()
  */
-int mv_destroy_engine_config_c(
-               mv_engine_config_h engine_cfg);
+int mv_destroy_engine_config_c(mv_engine_config_h engine_cfg);
 
 /**
  * @brief Sets the double attribute to the configuration.
@@ -260,10 +239,7 @@ int mv_destroy_engine_config_c(
  * @see mv_engine_config_set_bool_attribute_c()
  * @see mv_engine_config_set_string_attribute_c()
  */
-int mv_engine_config_set_double_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               double value);
+int mv_engine_config_set_double_attribute_c(mv_engine_config_h engine_cfg, const char *name, double value);
 
 /**
  * @brief Sets the integer attribute to the configuration.
@@ -284,10 +260,7 @@ int mv_engine_config_set_double_attribute_c(
  * @see mv_engine_config_set_bool_attribute_c()
  * @see mv_engine_config_set_string_attribute_c()
  */
-int mv_engine_config_set_int_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               int value);
+int mv_engine_config_set_int_attribute_c(mv_engine_config_h engine_cfg, const char *name, int value);
 
 /**
  * @brief Sets the boolean attribute to the configuration.
@@ -308,10 +281,7 @@ int mv_engine_config_set_int_attribute_c(
  * @see mv_engine_config_set_int_attribute_c()
  * @see mv_engine_config_set_string_attribute_c()
  */
-int mv_engine_config_set_bool_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               bool attribute);
+int mv_engine_config_set_bool_attribute_c(mv_engine_config_h engine_cfg, const char *name, bool attribute);
 
 /**
  * @brief Sets the string attribute to the configuration.
@@ -332,10 +302,7 @@ int mv_engine_config_set_bool_attribute_c(
  * @see mv_engine_config_set_int_attribute_c()
  * @see mv_engine_config_set_bool_attribute_c()
  */
-int mv_engine_config_set_string_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               const char *value);
+int mv_engine_config_set_string_attribute_c(mv_engine_config_h engine_cfg, const char *name, const char *value);
 
 /**
  * @brief Sets the array of string attribute to the configuration.
@@ -354,11 +321,8 @@ int mv_engine_config_set_string_attribute_c(
  *
  * @see mv_engine_config_get_array_string_attribute_c()
  */
-int mv_engine_config_set_array_string_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               const char **values,
-               unsigned int size);
+int mv_engine_config_set_array_string_attribute_c(mv_engine_config_h engine_cfg, const char *name, const char **values,
+                                                                                                 unsigned int size);
 
 /**
  * @brief Gets the double attribute from the configuration dictionary.
@@ -381,10 +345,7 @@ int mv_engine_config_set_array_string_attribute_c(
  * @see mv_engine_config_get_bool_attribute_c()
  * @see mv_engine_config_get_string_attribute_c()
  */
-int mv_engine_config_get_double_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               double *value);
+int mv_engine_config_get_double_attribute_c(mv_engine_config_h engine_cfg, const char *name, double *value);
 
 /**
  * @brief Gets the integer attribute from the configuration dictionary.
@@ -407,10 +368,7 @@ int mv_engine_config_get_double_attribute_c(
  * @see mv_engine_config_get_bool_attribute_c()
  * @see mv_engine_config_get_string_attribute_c()
  */
-int mv_engine_config_get_int_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               int *value);
+int mv_engine_config_get_int_attribute_c(mv_engine_config_h engine_cfg, const char *name, int *value);
 
 /**
  * @brief Gets the boolean attribute from the configuration dictionary.
@@ -433,10 +391,7 @@ int mv_engine_config_get_int_attribute_c(
  * @see mv_engine_config_get_int_attribute_c()
  * @see mv_engine_config_get_string_attribute_c()
  */
-int mv_engine_config_get_bool_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               bool *value);
+int mv_engine_config_get_bool_attribute_c(mv_engine_config_h engine_cfg, const char *name, bool *value);
 
 /**
  * @brief Gets the string attribute from the configuration dictionary.
@@ -461,10 +416,7 @@ int mv_engine_config_get_bool_attribute_c(
  * @see mv_engine_config_get_int_attribute_c()
  * @see mv_engine_config_get_bool_attribute_c()
  */
-int mv_engine_config_get_string_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               char **value);
+int mv_engine_config_get_string_attribute_c(mv_engine_config_h engine_cfg, const char *name, char **value);
 
 /**
  * @brief Gets the array of string attribute from the configuration dictionary.
@@ -487,11 +439,8 @@ int mv_engine_config_get_string_attribute_c(
  *
  * @see mv_engine_config_set_array_string_attribute_c()
  */
-int mv_engine_config_get_array_string_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               char ***values,
-               int *size);
+int mv_engine_config_get_array_string_attribute_c(mv_engine_config_h engine_cfg, const char *name, char ***values,
+                                                                                                 int *size);
 
 /**
  * @brief Traverses the list of supported attribute names and types.
@@ -531,9 +480,7 @@ int mv_engine_config_get_array_string_attribute_c(
  * @see mv_engine_config_get_bool_attribute_c()
  * @see mv_engine_config_get_string_attribute_c()
  */
-int mv_engine_config_foreach_supported_attribute_c(
-               mv_supported_attribute_cb callback,
-               void *user_data);
+int mv_engine_config_foreach_supported_attribute_c(mv_supported_attribute_cb callback, void *user_data);
 
 #ifdef __cplusplus
 }
index 5cb6b8e..adc6aca 100644 (file)
@@ -28,55 +28,52 @@ namespace common
 {
 namespace util
 {
+static int ConvertToCvSource(std::vector<mv_source_h> &mv_srcs, std::vector<cv::Mat> &cv_srcs,
+                                                        std::vector<mv_rectangle_s> &rects)
+{
+       unsigned int rect_idx = 0;
+
+       for (auto &mv_src : mv_srcs) {
+               mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
+               mv_rectangle_s *roi = &rects[rect_idx++];
+               unsigned int width = 0, height = 0;
+               unsigned int bufferSize = 0;
+               unsigned char *buffer = NULL;
+
+               if (mv_source_get_width(mv_src, &width) != MEDIA_VISION_ERROR_NONE ||
+                       mv_source_get_height(mv_src, &height) != MEDIA_VISION_ERROR_NONE ||
+                       mv_source_get_colorspace(mv_src, &colorspace) != MEDIA_VISION_ERROR_NONE ||
+                       mv_source_get_buffer(mv_src, &buffer, &bufferSize))
+                       return MEDIA_VISION_ERROR_INTERNAL;
+
+               // TODO. Let's support various color spaces.
+
+               if (colorspace != MEDIA_VISION_COLORSPACE_RGB888) {
+                       LOGE("Not Supported format.");
+                       return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+               }
 
-       static int ConvertToCvSource(std::vector<mv_source_h> &mv_srcs, std::vector<cv::Mat>& cv_srcs, std::vector<mv_rectangle_s>& rects)
-       {
-               unsigned int rect_idx = 0;
-
-               for (auto& mv_src : mv_srcs) {
-                       mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
-                       mv_rectangle_s *roi = &rects[rect_idx++];
-                       unsigned int width = 0, height = 0;
-                       unsigned int bufferSize = 0;
-                       unsigned char *buffer = NULL;
-
-                       if (mv_source_get_width(mv_src, &width) != MEDIA_VISION_ERROR_NONE ||
-                               mv_source_get_height(mv_src, &height) !=
-                                               MEDIA_VISION_ERROR_NONE ||
-                               mv_source_get_colorspace(mv_src, &colorspace) !=
-                                               MEDIA_VISION_ERROR_NONE ||
-                               mv_source_get_buffer(mv_src, &buffer, &bufferSize))
-                               return MEDIA_VISION_ERROR_INTERNAL;
-
-                       // TODO. Let's support various color spaces.
-
-                       if (colorspace != MEDIA_VISION_COLORSPACE_RGB888) {
-                               LOGE("Not Supported format.");
-                               return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
-                       }
-
-                       /* convert mv_source to cv::Mat */
-                       cv::Mat cvSource;
-
-                       if (roi == NULL) {
-                               cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3), buffer).clone();
-                       } else {
-                               cv::Rect cvRoi;
+               /* convert mv_source to cv::Mat */
+               cv::Mat cvSource;
 
-                               cvRoi.x = roi->point.x;
-                               cvRoi.y = roi->point.y;
-                               cvRoi.width = unsigned(roi->point.x + roi->width) >= width ? width - roi->point.x : roi->width;
-                               cvRoi.height = unsigned(roi->point.y + roi->height) >= height ? height - roi->point.y : roi->height;
-                               cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3), buffer)(cvRoi).clone();
-                       }
+               if (roi == NULL) {
+                       cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3), buffer).clone();
+               } else {
+                       cv::Rect cvRoi;
 
-                       cv_srcs.push_back(cvSource);
-                       LOGI("Size: w:%u, h:%u", cvSource.size().width, cvSource.size().height);
+                       cvRoi.x = roi->point.x;
+                       cvRoi.y = roi->point.y;
+                       cvRoi.width = unsigned(roi->point.x + roi->width) >= width ? width - roi->point.x : roi->width;
+                       cvRoi.height = unsigned(roi->point.y + roi->height) >= height ? height - roi->point.y : roi->height;
+                       cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3), buffer)(cvRoi).clone();
                }
 
-               return MEDIA_VISION_ERROR_NONE;
+               cv_srcs.push_back(cvSource);
+               LOGI("Size: w:%u, h:%u", cvSource.size().width, cvSource.size().height);
        }
 
+       return MEDIA_VISION_ERROR_NONE;
+}
 
 } // util
 } // common
index a90b73e..7035205 100644 (file)
 
 #include <CommonUtils.h>
 
-namespace MediaVision {
-namespace Common {
-
-
-int convertSourceMV2GrayCV(mv_source_h mvSource, cv::MatcvSource)
+namespace MediaVision
+{
+namespace Common
+{
+int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat &cvSource)
 {
        MEDIA_VISION_INSTANCE_CHECK(mvSource);
 
@@ -34,17 +34,13 @@ int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource)
 
        mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
 
-       MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width),
-                       "Failed to get the width.");
-       MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height),
-                       "Failed to get the height.");
-       MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace),
-                       "Failed to get the colorspace.");
-       MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize),
-                       "Failed to get the buffer size.");
+       MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width), "Failed to get the width.");
+       MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height), "Failed to get the height.");
+       MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace), "Failed to get the colorspace.");
+       MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize), "Failed to get the buffer size.");
 
        int conversionType;
-       switch(colorspace) {
+       switch (colorspace) {
        case MEDIA_VISION_COLORSPACE_INVALID:
                LOGE("Error: mv_source has invalid colorspace.");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -103,19 +99,16 @@ int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource)
        }
 
        if (bufferSize < width * height * channelsNumber) {
-               LOGE("bufferSize : %u is too small for image w: %u, h: %u, c: %u",
-                               bufferSize, width, height, channelsNumber);
+               LOGE("bufferSize : %u is too small for image w: %u, h: %u, c: %u", bufferSize, width, height, channelsNumber);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       if (conversionType == -1) {/* Without conversion */
-               cvSource = cv::Mat(cv::Size(width, height),
-                                       CV_MAKETYPE(depth, channelsNumber), buffer).clone();
-       } else {/* With conversion */
+       if (conversionType == -1) { /* Without conversion */
+               cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(depth, channelsNumber), buffer).clone();
+       } else { /* With conversion */
                /* Class for representation the given image as cv::Mat before conversion */
                try {
-                       cv::Mat origin = cv::Mat(cv::Size(width, height),
-                                               CV_MAKETYPE(depth, channelsNumber), buffer);
+                       cv::Mat origin = cv::Mat(cv::Size(width, height), CV_MAKETYPE(depth, channelsNumber), buffer);
 
                        cv::cvtColor(origin, cvSource, conversionType);
                } catch (const cv::Exception &e) {
index bbcdfdd..212be17 100644 (file)
  * @brief  Engine Configuration class methods implementation.
  */
 
-namespace MediaVision {
-namespace Common {
-
+namespace MediaVision
+{
+namespace Common
+{
 EngineConfig::EngineConfig(std::string config_file_path)
 {
        LOGI("Default Engine config file location is %s", config_file_path.c_str());
@@ -43,10 +44,9 @@ EngineConfig::~EngineConfig()
        ; /* NULL */
 }
 
-int EngineConfig::setAttribute(const std::stringkey, const double value)
+int EngineConfig::setAttribute(const std::string &key, const double value)
 {
-       LOGI("Set double attribute for the engine config %p. [%s] = %f",
-                       this, key.c_str(), value);
+       LOGI("Set double attribute for the engine config %p. [%s] = %f", this, key.c_str(), value);
 
        if (__dblDict.find(key) == __dblDict.end()) {
                LOGE("Double attribute [%s] can't be set because isn't supported", key.c_str());
@@ -58,10 +58,9 @@ int EngineConfig::setAttribute(const std::string& key, const double value)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int EngineConfig::setAttribute(const std::stringkey, const int value)
+int EngineConfig::setAttribute(const std::string &key, const int value)
 {
-       LOGI("Set integer attribute for the engine config %p. [%s] = %i",
-                       this, key.c_str(), value);
+       LOGI("Set integer attribute for the engine config %p. [%s] = %i", this, key.c_str(), value);
 
        if (__intDict.find(key) == __intDict.end()) {
                LOGE("Integer attribute [%s] can't be set because isn't supported", key.c_str());
@@ -73,10 +72,9 @@ int EngineConfig::setAttribute(const std::string& key, const int value)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int EngineConfig::setAttribute(const std::stringkey, const bool value)
+int EngineConfig::setAttribute(const std::string &key, const bool value)
 {
-       LOGI("Set boolean attribute for the engine config %p. [%s] = %s",
-                       this, key.c_str(), value ? "TRUE" : "FALSE");
+       LOGI("Set boolean attribute for the engine config %p. [%s] = %s", this, key.c_str(), value ? "TRUE" : "FALSE");
 
        if (__boolDict.find(key) == __boolDict.end()) {
                LOGE("Boolean attribute [%s] can't be set because isn't supported", key.c_str());
@@ -88,10 +86,9 @@ int EngineConfig::setAttribute(const std::string& key, const bool value)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int EngineConfig::setAttribute(const std::string& key, const std::string& value)
+int EngineConfig::setAttribute(const std::string &key, const std::string &value)
 {
-       LOGI("Set string attribute for the engine config %p. [%s] = %s",
-                       this, key.c_str(), value.c_str());
+       LOGI("Set string attribute for the engine config %p. [%s] = %s", this, key.c_str(), value.c_str());
 
        if (__strDict.find(key) == __strDict.end()) {
                LOGE("String attribute [%s] can't be set because isn't supported", key.c_str());
@@ -103,10 +100,10 @@ int EngineConfig::setAttribute(const std::string& key, const std::string& value)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int EngineConfig::setAttribute(const std::string& key, const std::vector<std::string>& value)
+int EngineConfig::setAttribute(const std::string &key, const std::vector<std::string> &value)
 {
-       LOGI("Set vector attribute of string for the engine config %p. [%s] = [%s, ...]",
-                       this, key.c_str(), value[0].c_str());
+       LOGI("Set vector attribute of string for the engine config %p. [%s] = [%s, ...]", this, key.c_str(),
+                value[0].c_str());
 
        if (__vecStrDict.find(key) == __vecStrDict.end()) {
                LOGE("Vector attribute of string [%s] can't be set because isn't supported", key.c_str());
@@ -118,118 +115,121 @@ int EngineConfig::setAttribute(const std::string& key, const std::vector<std::st
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int EngineConfig::getDoubleAttribute(const std::stringkey, double *value) const
+int EngineConfig::getDoubleAttribute(const std::string &key, double *value) const
 {
        DictDblConstIter dictIter = __dblDict.find(key);
        if (dictIter == __dblDict.end()) {
                LOGE("Attempt to access to the unsupported double attribute [%s] "
-                               "of the engine config %p", key.c_str(), this);
+                        "of the engine config %p",
+                        key.c_str(), this);
                return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
        }
 
-       LOGD("Get double attribute from the engine config %p. [%s] = %f",
-                       this, dictIter->first.c_str(), dictIter->second);
+       LOGD("Get double attribute from the engine config %p. [%s] = %f", this, dictIter->first.c_str(), dictIter->second);
 
        *value = dictIter->second;
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int EngineConfig::getIntegerAttribute(const std::stringkey, int *value) const
+int EngineConfig::getIntegerAttribute(const std::string &key, int *value) const
 {
        DictIntConstIter dictIter = __intDict.find(key);
        if (dictIter == __intDict.end()) {
                LOGE("Attempt to access to the unsupported integer attribute [%s] "
-                               "of the engine config %p", key.c_str(), this);
+                        "of the engine config %p",
+                        key.c_str(), this);
                return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
        }
 
-       LOGD("Get integer attribute from the engine config %p. [%s] = %i",
-                       this, dictIter->first.c_str(), dictIter->second);
+       LOGD("Get integer attribute from the engine config %p. [%s] = %i", this, dictIter->first.c_str(), dictIter->second);
 
        *value = dictIter->second;
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int EngineConfig::getBooleanAttribute(const std::stringkey, bool *value) const
+int EngineConfig::getBooleanAttribute(const std::string &key, bool *value) const
 {
        DictBoolConstIter dictIter = __boolDict.find(key);
        if (dictIter == __boolDict.end()) {
                LOGE("Attempt to access to the unsupported boolean attribute [%s] "
-                               "of the engine config %p", key.c_str(), this);
+                        "of the engine config %p",
+                        key.c_str(), this);
                return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
        }
 
-       LOGD("Get boolean attribute from the engine config %p. [%s] = %s",
-                       this, dictIter->first.c_str(), dictIter->second ? "TRUE" : "FALSE");
+       LOGD("Get boolean attribute from the engine config %p. [%s] = %s", this, dictIter->first.c_str(),
+                dictIter->second ? "TRUE" : "FALSE");
 
        *value = dictIter->second;
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int EngineConfig::getStringAttribute(const std::stringkey, std::string *value) const
+int EngineConfig::getStringAttribute(const std::string &key, std::string *value) const
 {
        DictStrConstIter dictIter = __strDict.find(key);
 
        if (dictIter == __strDict.end()) {
                LOGE("Attempt to access to the unsupported string attribute [%s] "
-                               "of the engine config %p", key.c_str(), this);
+                        "of the engine config %p",
+                        key.c_str(), this);
                return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
        }
 
-       LOGD("Get string attribute from the engine config %p. [%s] = %s",
-                       this, dictIter->first.c_str(), dictIter->second.c_str());
+       LOGD("Get string attribute from the engine config %p. [%s] = %s", this, dictIter->first.c_str(),
+                dictIter->second.c_str());
 
        *value = dictIter->second;
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int EngineConfig::getStringAttribute(const std::stringkey, std::vector<std::string> *value) const
+int EngineConfig::getStringAttribute(const std::string &key, std::vector<std::string> *value) const
 {
        DictVecStrConstIter dictIter = __vecStrDict.find(key);
        if (dictIter == __vecStrDict.end()) {
                LOGE("Attempt to access to the unsupported vector attribute [%s] of string "
-                               "of the engine config %p", key.c_str(), this);
+                        "of the engine config %p",
+                        key.c_str(), this);
                return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
        }
 
-       LOGD("Get vector attribute of string from the engine config %p. [%s] = [%s,...]",
-                       this, dictIter->first.c_str(), dictIter->second[0].c_str());
+       LOGD("Get vector attribute of string from the engine config %p. [%s] = [%s,...]", this, dictIter->first.c_str(),
+                dictIter->second[0].c_str());
 
        *value = dictIter->second;
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-const std::map<std::string, double>EngineConfig::getDefaultDblDict()
+const std::map<std::string, double> &EngineConfig::getDefaultDblDict()
 {
        return __dblDict;
 }
 
-const std::map<std::string, int>EngineConfig::getDefaultIntDict()
+const std::map<std::string, int> &EngineConfig::getDefaultIntDict()
 {
        return __intDict;
 }
 
-const std::map<std::string, bool>EngineConfig::getDefaultBoolDict()
+const std::map<std::string, bool> &EngineConfig::getDefaultBoolDict()
 {
        return __boolDict;
 }
 
-const std::map<std::string, std::string>EngineConfig::getDefaultStrDict()
+const std::map<std::string, std::string> &EngineConfig::getDefaultStrDict()
 {
        return __strDict;
 }
 
-const std::map<std::string, std::vector<std::string>>& EngineConfig::getDefaultVecStrDict()
+const std::map<std::string, std::vector<std::string> > &EngineConfig::getDefaultVecStrDict()
 {
        return __vecStrDict;
 }
 
-int EngineConfig::loadDictionaries(std::stringconfig_file_path)
+int EngineConfig::loadDictionaries(std::string &config_file_path)
 {
        LOGI("Start to cache default attributes from engine configuration file.");
 
@@ -254,7 +254,7 @@ int EngineConfig::loadDictionaries(std::string& config_file_path)
        JsonNode *root = json_parser_get_root(parser);
        if (JSON_NODE_OBJECT != json_node_get_node_type(root)) {
                LOGW("Can't parse tests configuration file. "
-                       "Incorrect json markup.");
+                        "Incorrect json markup.");
                g_object_unref(parser);
                return MEDIA_VISION_ERROR_NO_DATA;
        }
@@ -263,7 +263,7 @@ int EngineConfig::loadDictionaries(std::string& config_file_path)
 
        if (!json_object_has_member(jobj, "attributes")) {
                LOGW("Can't parse tests configuration file. "
-                               "No 'attributes' section.");
+                        "No 'attributes' section.");
                g_object_unref(parser);
                return MEDIA_VISION_ERROR_NO_DATA;
        }
@@ -272,7 +272,7 @@ int EngineConfig::loadDictionaries(std::string& config_file_path)
 
        if (JSON_NODE_ARRAY != json_node_get_node_type(attr_node)) {
                LOGW("Can't parse tests configuration file. "
-                               "'attributes' section isn't array.");
+                        "'attributes' section isn't array.");
                g_object_unref(parser);
                return MEDIA_VISION_ERROR_NO_DATA;
        }
@@ -291,32 +291,32 @@ int EngineConfig::loadDictionaries(std::string& config_file_path)
 
                JsonObject *attr_obj = json_node_get_object(attr_node);
 
-               if (!json_object_has_member(attr_obj, "name") ||
-                               !json_object_has_member(attr_obj, "type") ||
-                               !json_object_has_member(attr_obj, "value")) {
+               if (!json_object_has_member(attr_obj, "name") || !json_object_has_member(attr_obj, "type") ||
+                       !json_object_has_member(attr_obj, "value")) {
                        LOGW("Attribute %u wasn't parsed from json file.", attrInd);
                        continue;
                }
 
-               const char *nameStr = (const char*)json_object_get_string_member(attr_obj, "name");
-               const char *typeStr = (const char*)json_object_get_string_member(attr_obj, "type");
+               const char *nameStr = (const char *) json_object_get_string_member(attr_obj, "name");
+               const char *typeStr = (const char *) json_object_get_string_member(attr_obj, "type");
 
                if (NULL == nameStr || NULL == typeStr) {
                        LOGW("Attribute %i wasn't parsed from json file. name and/or "
-                                       "type of the attribute are parsed as NULL.", attrInd);
+                                "type of the attribute are parsed as NULL.",
+                                attrInd);
                        continue;
                }
 
                if (0 == strcmp("double", typeStr)) {
-                       __dblDict[std::string(nameStr)] = (double)json_object_get_double_member(attr_obj, "value");
+                       __dblDict[std::string(nameStr)] = (double) json_object_get_double_member(attr_obj, "value");
                } else if (0 == strcmp("integer", typeStr)) {
-                       __intDict[std::string(nameStr)] = (int)json_object_get_int_member(attr_obj, "value");
+                       __intDict[std::string(nameStr)] = (int) json_object_get_int_member(attr_obj, "value");
                } else if (0 == strcmp("boolean", typeStr)) {
                        __boolDict[std::string(nameStr)] = json_object_get_boolean_member(attr_obj, "value") ? true : false;
                } else if (0 == strcmp("string", typeStr)) {
-                       __strDict[std::string(nameStr)] = (char*)json_object_get_string_member(attr_obj, "value");
+                       __strDict[std::string(nameStr)] = (char *) json_object_get_string_member(attr_obj, "value");
                } else if (0 == strcmp("array", typeStr)) {
-                       const char *subTypeStr = (const char*)json_object_get_string_member(attr_obj, "subtype");
+                       const char *subTypeStr = (const char *) json_object_get_string_member(attr_obj, "subtype");
 
                        if (NULL == subTypeStr)
                                continue;
@@ -329,12 +329,12 @@ int EngineConfig::loadDictionaries(std::string& config_file_path)
                                        defaultVecStr.push_back(std::string(json_array_get_string_element(attr_array, item)));
                                }
                                __vecStrDict[std::string(nameStr)] = defaultVecStr;
-
                        }
                        //TO-DO: add other subtypes
                } else {
                        LOGW("Attribute %i:%s wasn't parsed from json file. "
-                                       "Type isn't supported.", attrInd, nameStr);
+                                "Type isn't supported.",
+                                attrInd, nameStr);
                        continue;
                }
        }
index 50f956c..84d815b 100644 (file)
 #include <cstring>
 #include <new>
 
-namespace MediaVision {
-namespace Common {
-
-MediaSource::MediaSource() :
-       m_pBuffer(NULL),
-       m_bufferSize(0),
-       m_width(0),
-       m_height(0),
-       m_colorspace(MEDIA_VISION_COLORSPACE_INVALID)
+namespace MediaVision
 {
-}
+namespace Common
+{
+MediaSource::MediaSource()
+               : m_pBuffer(NULL), m_bufferSize(0), m_width(0), m_height(0), m_colorspace(MEDIA_VISION_COLORSPACE_INVALID)
+{}
 
 MediaSource::~MediaSource()
 {
        clear();
 }
 
-bool MediaSource::alloc(unsigned int bufferSize,
-               unsigned int width, unsigned int height, mv_colorspace_e colorspace)
+bool MediaSource::alloc(unsigned int bufferSize, unsigned int width, unsigned int height, mv_colorspace_e colorspace)
 {
        if (bufferSize == 0)
                return false;
@@ -47,23 +42,26 @@ bool MediaSource::alloc(unsigned int bufferSize,
        LOGD("Call clear() first for media source %p", this);
        clear();
 
-       m_pBuffer = new (std::nothrow)unsigned char[bufferSize];
+       m_pBuffer = new (std::nothrow) unsigned char[bufferSize];
        if (m_pBuffer == NULL) {
                LOGE("Memory allocating for buffer in media source %p failed!", this);
                return false;
        }
 
        LOGD("Assign new size of the internal buffer of media source %p. "
-                       "New size is %ui.", this, bufferSize);
+                "New size is %ui.",
+                this, bufferSize);
        m_bufferSize = bufferSize;
 
        LOGD("Assign new size (%ui x %ui) of the internal buffer image for "
-                       "the media source %p", width, height, this);
+                "the media source %p",
+                width, height, this);
        m_width = width;
        m_height = height;
 
        LOGD("Assign new colorspace (%i) of the internal buffer image for "
-                       "the media source %p", colorspace, this);
+                "the media source %p",
+                colorspace, this);
        m_colorspace = colorspace;
 
        return true;
@@ -76,8 +74,9 @@ void MediaSource::clear(void)
                delete[] m_pBuffer;
        }
        LOGD("Set defaults for media source %p : buffer = NULL; "
-                       "bufferSize = 0; width = 0; height = 0; "
-                       "colorspace = MEDIA_VISION_COLORSPACE_INVALID", this);
+                "bufferSize = 0; width = 0; height = 0; "
+                "colorspace = MEDIA_VISION_COLORSPACE_INVALID",
+                this);
        m_pBuffer = NULL;
        m_bufferSize = 0;
        m_width = 0;
@@ -85,8 +84,8 @@ void MediaSource::clear(void)
        m_colorspace = MEDIA_VISION_COLORSPACE_INVALID;
 }
 
-bool MediaSource::fill(const unsigned char *buffer, unsigned int bufferSize,
-               unsigned int width, unsigned int height, mv_colorspace_e colorspace)
+bool MediaSource::fill(const unsigned char *buffer, unsigned int bufferSize, unsigned int width, unsigned int height,
+                                          mv_colorspace_e colorspace)
 {
        if (bufferSize == 0 || buffer == NULL)
                return false;
@@ -96,37 +95,43 @@ bool MediaSource::fill(const unsigned char *buffer, unsigned int bufferSize,
 
        LOGD("Allocate memory [%i] for buffer in media source %p", bufferSize, this);
        LOGD("Assign new size (%ui x %ui) of the internal buffer image for "
-                       "the media source %p", width, height, this);
+                "the media source %p",
+                width, height, this);
        LOGD("Assign new colorspace (%i) of the internal buffer image for "
-               "the media source %p", colorspace, this);
-       m_pBuffer = new (std::nothrow)unsigned char[bufferSize];
+                "the media source %p",
+                colorspace, this);
+       m_pBuffer = new (std::nothrow) unsigned char[bufferSize];
        if (m_pBuffer == NULL) {
                LOGE("Memory allocating for buffer in media source %p failed!", this);
                return false;
        }
 
        LOGD("Copy data from external buffer (%p) to the internal buffer (%p) of "
-                       "media source %p", buffer, m_pBuffer, this);
+                "media source %p",
+                buffer, m_pBuffer, this);
        std::memcpy(m_pBuffer, buffer, bufferSize);
 
        LOGD("Assign new size of the internal buffer of media source %p. "
-                       "New size is %ui.", this, bufferSize);
+                "New size is %ui.",
+                this, bufferSize);
        m_bufferSize = bufferSize;
 
        LOGD("Assign new size (%ui x %ui) of the internal buffer image for "
-                       "the media source %p", width, height, this);
+                "the media source %p",
+                width, height, this);
        m_width = width;
        m_height = height;
 
        LOGD("Assign new colorspace (%i) of the internal buffer image for "
-                       "the media source %p", colorspace, this);
+                "the media source %p",
+                colorspace, this);
        m_colorspace = colorspace;
 
        return true;
 }
 
-bool MediaSource::fill(const unsigned char *buffer, unsigned int bufferSize,
-               unsigned int width, unsigned int height, size_t offset)
+bool MediaSource::fill(const unsigned char *buffer, unsigned int bufferSize, unsigned int width, unsigned int height,
+                                          size_t offset)
 {
        if (bufferSize == 0 || buffer == NULL) {
                LOGE("bufferSize is %d and buffer[%p]", bufferSize, buffer);
@@ -140,10 +145,12 @@ bool MediaSource::fill(const unsigned char *buffer, unsigned int bufferSize,
 
        LOGD("Allocate memory [%i] for buffer in media source %p", bufferSize, this);
        LOGD("Assign new size (%ui x %ui) of the internal buffer image for "
-                       "the media source %p", width, height, this);
+                "the media source %p",
+                width, height, this);
 
        LOGD("Copy data from external buffer (%p) to the internal buffer (%p + %zd) of "
-                       "media source %p", buffer, m_pBuffer, offset, this);
+                "media source %p",
+                buffer, m_pBuffer, offset, this);
        std::memcpy(m_pBuffer + offset, buffer, bufferSize);
 
        LOGD("size is %ui x %ui [%ui] on buffer(%p).", width, height, bufferSize, this);
index 573945a..19b5432 100644 (file)
@@ -18,8 +18,7 @@
 #include "mv_common.h"
 #include "mv_common_c.h"
 
-int mv_create_source(
-               mv_source_h *source)
+int mv_create_source(mv_source_h *source)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(source);
@@ -31,8 +30,7 @@ int mv_create_source(
        return ret;
 }
 
-int mv_destroy_source(
-               mv_source_h source)
+int mv_destroy_source(mv_source_h source)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -44,9 +42,7 @@ int mv_destroy_source(
        return ret;
 }
 
-int mv_source_fill_by_media_packet(
-               mv_source_h source,
-               media_packet_h media_packet)
+int mv_source_fill_by_media_packet(mv_source_h source, media_packet_h media_packet)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -59,29 +55,21 @@ int mv_source_fill_by_media_packet(
        return ret;
 }
 
-int mv_source_fill_by_buffer(
-               mv_source_h source,
-               unsigned char *data_buffer,
-               unsigned int buffer_size,
-               unsigned int image_width,
-               unsigned int image_height,
-               mv_colorspace_e image_colorspace)
+int mv_source_fill_by_buffer(mv_source_h source, unsigned char *data_buffer, unsigned int buffer_size,
+                                                        unsigned int image_width, unsigned int image_height, mv_colorspace_e image_colorspace)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
        MEDIA_VISION_NULL_ARG_CHECK(data_buffer);
 
        MEDIA_VISION_FUNCTION_ENTER();
-       int ret = mv_source_fill_by_buffer_c(
-               source, data_buffer, buffer_size, image_width, image_height,
-               image_colorspace);
+       int ret = mv_source_fill_by_buffer_c(source, data_buffer, buffer_size, image_width, image_height, image_colorspace);
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_source_clear(
-               mv_source_h source)
+int mv_source_clear(mv_source_h source)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -93,10 +81,7 @@ int mv_source_clear(
        return ret;
 }
 
-int mv_source_get_buffer(
-               mv_source_h source,
-               unsigned char **data_buffer,
-               unsigned int *buffer_size)
+int mv_source_get_buffer(mv_source_h source, unsigned char **data_buffer, unsigned int *buffer_size)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -110,9 +95,7 @@ int mv_source_get_buffer(
        return ret;
 }
 
-int mv_source_get_height(
-               mv_source_h source,
-               unsigned int *image_height)
+int mv_source_get_height(mv_source_h source, unsigned int *image_height)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -125,9 +108,7 @@ int mv_source_get_height(
        return ret;
 }
 
-int mv_source_get_width(
-               mv_source_h source,
-               unsigned int *image_width)
+int mv_source_get_width(mv_source_h source, unsigned int *image_width)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -140,9 +121,7 @@ int mv_source_get_width(
        return ret;
 }
 
-int mv_source_get_colorspace(
-               mv_source_h source,
-               mv_colorspace_e *image_colorspace)
+int mv_source_get_colorspace(mv_source_h source, mv_colorspace_e *image_colorspace)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -155,8 +134,7 @@ int mv_source_get_colorspace(
        return ret;
 }
 
-int mv_create_engine_config(
-               mv_engine_config_h *engine_cfg)
+int mv_create_engine_config(mv_engine_config_h *engine_cfg)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(engine_cfg);
@@ -168,8 +146,7 @@ int mv_create_engine_config(
        return ret;
 }
 
-int mv_destroy_engine_config(
-               mv_engine_config_h engine_cfg)
+int mv_destroy_engine_config(mv_engine_config_h engine_cfg)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
@@ -181,61 +158,46 @@ int mv_destroy_engine_config(
        return ret;
 }
 
-int mv_engine_config_set_double_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               double value)
+int mv_engine_config_set_double_attribute(mv_engine_config_h engine_cfg, const char *name, double value)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
        MEDIA_VISION_NULL_ARG_CHECK(name);
 
        MEDIA_VISION_FUNCTION_ENTER();
-       int ret = mv_engine_config_set_double_attribute_c(
-                                       engine_cfg, name, value);
+       int ret = mv_engine_config_set_double_attribute_c(engine_cfg, name, value);
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_engine_config_set_int_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               int value)
+int mv_engine_config_set_int_attribute(mv_engine_config_h engine_cfg, const char *name, int value)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
        MEDIA_VISION_NULL_ARG_CHECK(name);
 
        MEDIA_VISION_FUNCTION_ENTER();
-       int ret = mv_engine_config_set_int_attribute_c(
-                                       engine_cfg, name, value);
+       int ret = mv_engine_config_set_int_attribute_c(engine_cfg, name, value);
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_engine_config_set_bool_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               bool value)
+int mv_engine_config_set_bool_attribute(mv_engine_config_h engine_cfg, const char *name, bool value)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
        MEDIA_VISION_NULL_ARG_CHECK(name);
 
        MEDIA_VISION_FUNCTION_ENTER();
-       int ret = mv_engine_config_set_bool_attribute_c(
-                                       engine_cfg, name, value);
+       int ret = mv_engine_config_set_bool_attribute_c(engine_cfg, name, value);
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_engine_config_set_string_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               const char *value)
+int mv_engine_config_set_string_attribute(mv_engine_config_h engine_cfg, const char *name, const char *value)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
@@ -243,18 +205,14 @@ int mv_engine_config_set_string_attribute(
        MEDIA_VISION_NULL_ARG_CHECK(value);
 
        MEDIA_VISION_FUNCTION_ENTER();
-       int ret = mv_engine_config_set_string_attribute_c(
-                                       engine_cfg, name, value);
+       int ret = mv_engine_config_set_string_attribute_c(engine_cfg, name, value);
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_engine_config_set_array_string_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               const char **values,
-               unsigned int size)
+int mv_engine_config_set_array_string_attribute(mv_engine_config_h engine_cfg, const char *name, const char **values,
+                                                                                               unsigned int size)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
@@ -263,17 +221,13 @@ int mv_engine_config_set_array_string_attribute(
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = mv_engine_config_set_array_string_attribute_c(
-                                       engine_cfg, name, values, size);
+       int ret = mv_engine_config_set_array_string_attribute_c(engine_cfg, name, values, size);
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_engine_config_get_double_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               double *value)
+int mv_engine_config_get_double_attribute(mv_engine_config_h engine_cfg, const char *name, double *value)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
@@ -281,17 +235,13 @@ int mv_engine_config_get_double_attribute(
        MEDIA_VISION_NULL_ARG_CHECK(value);
 
        MEDIA_VISION_FUNCTION_ENTER();
-       int ret = mv_engine_config_get_double_attribute_c(
-                                       engine_cfg, name, value);
+       int ret = mv_engine_config_get_double_attribute_c(engine_cfg, name, value);
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_engine_config_get_int_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               int *value)
+int mv_engine_config_get_int_attribute(mv_engine_config_h engine_cfg, const char *name, int *value)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
@@ -299,17 +249,13 @@ int mv_engine_config_get_int_attribute(
        MEDIA_VISION_NULL_ARG_CHECK(value);
 
        MEDIA_VISION_FUNCTION_ENTER();
-       int ret = mv_engine_config_get_int_attribute_c(
-               engine_cfg, name, value);
+       int ret = mv_engine_config_get_int_attribute_c(engine_cfg, name, value);
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_engine_config_get_bool_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               bool *value)
+int mv_engine_config_get_bool_attribute(mv_engine_config_h engine_cfg, const char *name, bool *value)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
@@ -317,17 +263,13 @@ int mv_engine_config_get_bool_attribute(
        MEDIA_VISION_NULL_ARG_CHECK(value);
 
        MEDIA_VISION_FUNCTION_ENTER();
-       int ret = mv_engine_config_get_bool_attribute_c(
-                                       engine_cfg, name, value);
+       int ret = mv_engine_config_get_bool_attribute_c(engine_cfg, name, value);
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_engine_config_get_string_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               char **value)
+int mv_engine_config_get_string_attribute(mv_engine_config_h engine_cfg, const char *name, char **value)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
@@ -335,18 +277,14 @@ int mv_engine_config_get_string_attribute(
        MEDIA_VISION_NULL_ARG_CHECK(value);
 
        MEDIA_VISION_FUNCTION_ENTER();
-       int ret = mv_engine_config_get_string_attribute_c(
-                                       engine_cfg, name, value);
+       int ret = mv_engine_config_get_string_attribute_c(engine_cfg, name, value);
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_engine_config_get_array_string_attribute(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               char ***values,
-               int *size)
+int mv_engine_config_get_array_string_attribute(mv_engine_config_h engine_cfg, const char *name, char ***values,
+                                                                                               int *size)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
@@ -354,23 +292,19 @@ int mv_engine_config_get_array_string_attribute(
        MEDIA_VISION_NULL_ARG_CHECK(values);
 
        MEDIA_VISION_FUNCTION_ENTER();
-       int ret = mv_engine_config_get_array_string_attribute_c(
-                                       engine_cfg, name, values, size);
+       int ret = mv_engine_config_get_array_string_attribute_c(engine_cfg, name, values, size);
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_engine_config_foreach_supported_attribute(
-               mv_supported_attribute_cb callback,
-               void *user_data)
+int mv_engine_config_foreach_supported_attribute(mv_supported_attribute_cb callback, void *user_data)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(callback);
 
        MEDIA_VISION_FUNCTION_ENTER();
-       int ret = mv_engine_config_foreach_supported_attribute_c(
-                                       callback, user_data);
+       int ret = mv_engine_config_foreach_supported_attribute_c(callback, user_data);
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
index 76b249b..67ac10e 100644 (file)
 #include <stdlib.h>
 #include <media_packet.h>
 
-int mv_create_source_c(
-               mv_source_h *source_ptr)
+int mv_create_source_c(mv_source_h *source_ptr)
 {
        if (source_ptr == NULL)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
 
        LOGD("Creating media vision source");
-       (*source_ptr) = (static_cast<mv_source_h>
-                       (new (std::nothrow)MediaVision::Common::MediaSource()));
+       (*source_ptr) = (static_cast<mv_source_h>(new (std::nothrow) MediaVision::Common::MediaSource()));
 
        if (*source_ptr == NULL) {
                LOGE("Failed to create media vision source");
@@ -46,8 +44,7 @@ int mv_create_source_c(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_destroy_source_c(
-               mv_source_h source)
+int mv_destroy_source_c(mv_source_h source)
 {
        if (!source) {
                LOGE("Media source can't be destroyed because handle is NULL");
@@ -55,20 +52,19 @@ int mv_destroy_source_c(
        }
 
        LOGD("Destroying media vision source [%p]", source);
-       delete (static_cast<MediaVision::Common::MediaSource*>(source));
+       delete (static_cast<MediaVision::Common::MediaSource *>(source));
        LOGD("Media vision source has been destroyed");
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_source_fill_by_media_packet_c(
-               mv_source_h source,
-               media_packet_h media_packet)
+int mv_source_fill_by_media_packet_c(mv_source_h source, media_packet_h media_packet)
 {
        if (!source || !media_packet) {
                LOGE("Media source can't be filled by media_packet handle because "
-                               "one of the source or media_packet handles is NULL. "
-                               "source = %p; media_packet = %p", source, media_packet);
+                        "one of the source or media_packet handles is NULL. "
+                        "source = %p; media_packet = %p",
+                        source, media_packet);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
@@ -78,10 +74,16 @@ int mv_source_fill_by_media_packet_c(
        int image_width = 0;
        int image_height = 0;
 
-       int plane_width[4] = {0, };
-       int plane_height[4] = {0, };
-       uint64_t plane_size[4] = {0, };
-       size_t  offset = 0;
+       int plane_width[4] = {
+               0,
+       };
+       int plane_height[4] = {
+               0,
+       };
+       uint64_t plane_size[4] = {
+               0,
+       };
+       size_t offset = 0;
        media_format_h format = NULL;
        media_format_mimetype_e mimetype = MEDIA_FORMAT_I420;
        unsigned char *data_buffer = NULL;
@@ -105,8 +107,7 @@ int mv_source_fill_by_media_packet_c(
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       ret = media_format_get_video_info(
-                       format, &mimetype, &image_width, &image_height, NULL, NULL);
+       ret = media_format_get_video_info(format, &mimetype, &image_width, &image_height, NULL, NULL);
        if (ret != MEDIA_PACKET_ERROR_NONE) {
                LOGE("media_format_get_video_info() failed, mv_source_h fill skipped");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -150,7 +151,8 @@ int mv_source_fill_by_media_packet_c(
                break;
        default:
                LOGE("Format of the media packet buffer is not supported by media "
-                               "vision source (media_format_h mimetype=%i)", mimetype);
+                        "vision source (media_format_h mimetype=%i)",
+                        mimetype);
                return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
        }
 
@@ -180,23 +182,24 @@ int mv_source_fill_by_media_packet_c(
                buffer_size += plane_size[ind];
        }
 
-       if (!(static_cast<MediaVision::Common::MediaSource*>(source))->alloc(buffer_size,
-                       static_cast<unsigned int>(plane_width[0]),
-                       static_cast<unsigned int>(plane_height[0]), image_colorspace)) {
+       if (!(static_cast<MediaVision::Common::MediaSource *>(source))
+                                ->alloc(buffer_size, static_cast<unsigned int>(plane_width[0]),
+                                                static_cast<unsigned int>(plane_height[0]), image_colorspace)) {
                LOGE("mv_source_h alloc from media_packet_h failed");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
        }
 
        for (ind = 0; ind < plane_num; ++ind) {
-               ret = media_packet_get_video_plane_data_ptr(media_packet, ind, (void**)&data_buffer);
+               ret = media_packet_get_video_plane_data_ptr(media_packet, ind, (void **) &data_buffer);
                if (ret != MEDIA_PACKET_ERROR_NONE) {
                        LOGE("media_packet_get_video_plane_data_ptr() plane[%d] failed, mv_source_h fill skipped", ind);
                        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
                }
 
-               offset = (ind == 0) ? 0 : plane_size[ind-1]*sizeof(char);
-               if (!(static_cast<MediaVision::Common::MediaSource*>(source))->fill(data_buffer, plane_size[ind],
-                       static_cast<unsigned int>(plane_width[ind]), static_cast<unsigned int>(plane_height[ind]), offset)) {
+               offset = (ind == 0) ? 0 : plane_size[ind - 1] * sizeof(char);
+               if (!(static_cast<MediaVision::Common::MediaSource *>(source))
+                                        ->fill(data_buffer, plane_size[ind], static_cast<unsigned int>(plane_width[ind]),
+                                                       static_cast<unsigned int>(plane_height[ind]), offset)) {
                        LOGE("mv_source_h filling from media_packet_h failed");
                        return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
                }
@@ -207,24 +210,19 @@ int mv_source_fill_by_media_packet_c(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_source_fill_by_buffer_c(
-               mv_source_h source,
-               unsigned char *data_buffer,
-               unsigned int buffer_size,
-               unsigned int image_width,
-               unsigned int image_height,
-               mv_colorspace_e image_colorspace)
+int mv_source_fill_by_buffer_c(mv_source_h source, unsigned char *data_buffer, unsigned int buffer_size,
+                                                          unsigned int image_width, unsigned int image_height, mv_colorspace_e image_colorspace)
 {
        if (!source || buffer_size == 0 || data_buffer == NULL) {
                LOGE("Media source can't be filled by buffer because "
-                               "one of the source or data_buffer is NULL or buffer_size = 0. "
-                               "source = %p; data_buffer = %p; buffer_size = %u",
-                               source, data_buffer, buffer_size);
+                        "one of the source or data_buffer is NULL or buffer_size = 0. "
+                        "source = %p; data_buffer = %p; buffer_size = %u",
+                        source, data_buffer, buffer_size);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       if (!(static_cast<MediaVision::Common::MediaSource*>(source))->fill(data_buffer,
-                       buffer_size, image_width, image_height, image_colorspace)) {
+       if (!(static_cast<MediaVision::Common::MediaSource *>(source))
+                                ->fill(data_buffer, buffer_size, image_width, image_height, image_colorspace)) {
                LOGE("mv_source_h filling from buffer failed");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
        }
@@ -233,8 +231,7 @@ int mv_source_fill_by_buffer_c(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_source_clear_c(
-               mv_source_h source)
+int mv_source_clear_c(mv_source_h source)
 {
        if (!source) {
                LOGE("Media source can't be cleared because source handle is NULL");
@@ -242,16 +239,13 @@ int mv_source_clear_c(
        }
 
        LOGD("Clear media vision source [%p]", source);
-       (static_cast<MediaVision::Common::MediaSource*>(source))->clear();
+       (static_cast<MediaVision::Common::MediaSource *>(source))->clear();
        LOGD("Media vision source [%p] has been cleared", source);
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_source_get_buffer_c(
-               mv_source_h source,
-               unsigned char **buffer,
-               unsigned int *size)
+int mv_source_get_buffer_c(mv_source_h source, unsigned char **buffer, unsigned int *size)
 {
        if (!source) {
                LOGE("Impossible to get buffer for NULL mv_source_h handle");
@@ -259,16 +253,14 @@ int mv_source_get_buffer_c(
        }
 
        LOGD("Get media vision source [%p] buffer and buffer size to be returned", source);
-       *buffer = (static_cast<MediaVision::Common::MediaSource*>(source))->getBuffer();
-       *size = (static_cast<MediaVision::Common::MediaSource*>(source))->getBufferSize();
+       *buffer = (static_cast<MediaVision::Common::MediaSource *>(source))->getBuffer();
+       *size = (static_cast<MediaVision::Common::MediaSource *>(source))->getBufferSize();
        LOGD("Media vision source [%p] buffer (%p) and buffer size (%ui) has been returned", source, buffer, *size);
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_source_get_height_c(
-               mv_source_h source,
-               unsigned int *height)
+int mv_source_get_height_c(mv_source_h source, unsigned int *height)
 {
        if (!source) {
                LOGE("Impossible to get height for NULL mv_source_h handle");
@@ -276,15 +268,13 @@ int mv_source_get_height_c(
        }
 
        LOGD("Get media vision source [%p] height to be returned", source);
-       *height = (static_cast<MediaVision::Common::MediaSource*>(source))->getHeight();
+       *height = (static_cast<MediaVision::Common::MediaSource *>(source))->getHeight();
        LOGD("Media vision source [%p] height (%ui) has been returned", source, *height);
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_source_get_width_c(
-               mv_source_h source,
-               unsigned int *width)
+int mv_source_get_width_c(mv_source_h source, unsigned int *width)
 {
        if (!source) {
                LOGE("Impossible to get width for NULL mv_source_h handle");
@@ -292,15 +282,13 @@ int mv_source_get_width_c(
        }
 
        LOGD("Get media vision source [%p] width to be returned", source);
-       *width = (static_cast<MediaVision::Common::MediaSource*>(source))->getWidth();
+       *width = (static_cast<MediaVision::Common::MediaSource *>(source))->getWidth();
        LOGD("Media vision source [%p] width (%ui) has been returned", source, *width);
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_source_get_colorspace_c(
-               mv_source_h source,
-               mv_colorspace_e *colorspace)
+int mv_source_get_colorspace_c(mv_source_h source, mv_colorspace_e *colorspace)
 {
        if (!source) {
                LOGE("Impossible to get colorspace for NULL mv_source_h handle");
@@ -308,14 +296,13 @@ int mv_source_get_colorspace_c(
        }
 
        LOGD("Get media vision source [%p] colorspace to be returned", source);
-       *colorspace = (static_cast<MediaVision::Common::MediaSource*>(source))->getColorspace();
+       *colorspace = (static_cast<MediaVision::Common::MediaSource *>(source))->getColorspace();
        LOGD("Media vision source [%p] colorspace (%i) has been returned", source, *colorspace);
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_create_engine_config_c(
-               mv_engine_config_h *engine_cfg)
+int mv_create_engine_config_c(mv_engine_config_h *engine_cfg)
 {
        if (engine_cfg == NULL) {
                LOGE("Impossible to create mv_engine_config_h handle");
@@ -324,8 +311,7 @@ int mv_create_engine_config_c(
 
        LOGD("Creating media vision engine config");
        try {
-               (*engine_cfg) = static_cast<mv_engine_config_h>
-                       (new MediaVision::Common::EngineConfig());
+               (*engine_cfg) = static_cast<mv_engine_config_h>(new MediaVision::Common::EngineConfig());
        } catch (...) {
                LOGE("Failed to create mv_engine_config_h handle");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
@@ -335,8 +321,7 @@ int mv_create_engine_config_c(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_destroy_engine_config_c(
-               mv_engine_config_h engine_cfg)
+int mv_destroy_engine_config_c(mv_engine_config_h engine_cfg)
 {
        if (!engine_cfg) {
                LOGE("Impossible to destroy NULL mv_engine_config_h handle");
@@ -344,30 +329,25 @@ int mv_destroy_engine_config_c(
        }
 
        LOGD("Destroying media vision engine config [%p]", engine_cfg);
-       delete (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg));
+       delete (static_cast<MediaVision::Common::EngineConfig *>(engine_cfg));
        LOGD("Media vision engine config has been destroyed");
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_engine_config_set_double_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               double value)
+int mv_engine_config_set_double_attribute_c(mv_engine_config_h engine_cfg, const char *name, double value)
 {
        if (!engine_cfg || name == NULL) {
                LOGE("Impossible to set attribute. One of the required parameters is "
-                               "NULL. engine_cfg = %p; name = %p;",
-                               engine_cfg, name);
+                        "NULL. engine_cfg = %p; name = %p;",
+                        engine_cfg, name);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->setAttribute(
-                       std::string(name), value);
+       int ret = (static_cast<MediaVision::Common::EngineConfig *>(engine_cfg))->setAttribute(std::string(name), value);
 
        if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Failed to set attribute [%s] with value %f. Error code (0x%08x)",
-                               name, value, ret);
+               LOGE("Failed to set attribute [%s] with value %f. Error code (0x%08x)", name, value, ret);
                return ret;
        }
 
@@ -375,24 +355,19 @@ int mv_engine_config_set_double_attribute_c(
        return ret;
 }
 
-int mv_engine_config_set_int_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               int value)
+int mv_engine_config_set_int_attribute_c(mv_engine_config_h engine_cfg, const char *name, int value)
 {
        if (!engine_cfg || name == NULL) {
                LOGE("Impossible to set attribute. One of the required parameters is "
-                               "NULL. engine_cfg = %p; name = %p;",
-                               engine_cfg, name);
+                        "NULL. engine_cfg = %p; name = %p;",
+                        engine_cfg, name);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->setAttribute(
-                       std::string(name), value);
+       int ret = (static_cast<MediaVision::Common::EngineConfig *>(engine_cfg))->setAttribute(std::string(name), value);
 
        if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Failed to set attribute [%s] with value %i. Error code (0x%08x)",
-                               name, value, ret);
+               LOGE("Failed to set attribute [%s] with value %i. Error code (0x%08x)", name, value, ret);
                return ret;
        }
 
@@ -401,50 +376,40 @@ int mv_engine_config_set_int_attribute_c(
        return ret;
 }
 
-int mv_engine_config_set_bool_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               bool value)
+int mv_engine_config_set_bool_attribute_c(mv_engine_config_h engine_cfg, const char *name, bool value)
 {
        if (!engine_cfg || name == NULL) {
                LOGE("Impossible to set attribute. One of the required parameters is "
-                               "NULL. engine_cfg = %p; name = %p;",
-                               engine_cfg, name);
+                        "NULL. engine_cfg = %p; name = %p;",
+                        engine_cfg, name);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->setAttribute(
-                       std::string(name), value);
+       int ret = (static_cast<MediaVision::Common::EngineConfig *>(engine_cfg))->setAttribute(std::string(name), value);
 
        if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Failed to set attribute [%s] with value %s. Error code (0x%08x)",
-                               name, value ? "TRUE" : "FALSE", ret);
+               LOGE("Failed to set attribute [%s] with value %s. Error code (0x%08x)", name, value ? "TRUE" : "FALSE", ret);
                return ret;
        }
 
-       LOGD("Attribute [%s] (value %s) has been set",
-                       name, value ? "TRUE" : "FALSE");
+       LOGD("Attribute [%s] (value %s) has been set", name, value ? "TRUE" : "FALSE");
        return ret;
 }
 
-int mv_engine_config_set_string_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               const char *value)
+int mv_engine_config_set_string_attribute_c(mv_engine_config_h engine_cfg, const char *name, const char *value)
 {
        if (!engine_cfg || name == NULL || value == NULL) {
                LOGE("Impossible to set attribute. One of the required parameters is "
-                               "NULL. engine_cfg = %p; name = %p; value = %p;",
-                               engine_cfg, name, value);
+                        "NULL. engine_cfg = %p; name = %p; value = %p;",
+                        engine_cfg, name, value);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->setAttribute(
-                       std::string(name), std::string(value));
+       int ret = (static_cast<MediaVision::Common::EngineConfig *>(engine_cfg))
+                                         ->setAttribute(std::string(name), std::string(value));
 
        if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Failed to set attribute [%s] with value %s. Error code (0x%08x)",
-                               name, value, ret);
+               LOGE("Failed to set attribute [%s] with value %s. Error code (0x%08x)", name, value, ret);
                return ret;
        }
 
@@ -452,16 +417,13 @@ int mv_engine_config_set_string_attribute_c(
        return ret;
 }
 
-int mv_engine_config_set_array_string_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               const char **values,
-               unsigned int size)
+int mv_engine_config_set_array_string_attribute_c(mv_engine_config_h engine_cfg, const char *name, const char **values,
+                                                                                                 unsigned int size)
 {
        if (!engine_cfg || name == NULL || values == NULL) {
                LOGE("Impossible to set attribute. One of the required parameters is "
-                               "NULL. engine_cfg = %p; name = %p; values = %p;",
-                               engine_cfg, name, values);
+                        "NULL. engine_cfg = %p; name = %p; values = %p;",
+                        engine_cfg, name, values);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
@@ -476,8 +438,8 @@ int mv_engine_config_set_array_string_attribute_c(
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->setAttribute(
-                       std::string(name), arrayValues);
+       int ret =
+                       (static_cast<MediaVision::Common::EngineConfig *>(engine_cfg))->setAttribute(std::string(name), arrayValues);
 
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Failed to set attribute [%s]. Error code (0x%08x)", name, ret);
@@ -488,109 +450,90 @@ int mv_engine_config_set_array_string_attribute_c(
        return ret;
 }
 
-int mv_engine_config_get_double_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               double *value)
+int mv_engine_config_get_double_attribute_c(mv_engine_config_h engine_cfg, const char *name, double *value)
 {
        if (!engine_cfg || name == NULL || value == NULL) {
                LOGE("Impossible to get attribute. One of the required parameters is "
-                               "NULL. engine_cfg = %p; name = %p; value = %p;",
-                               engine_cfg, name, value);
+                        "NULL. engine_cfg = %p; name = %p; value = %p;",
+                        engine_cfg, name, value);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->getDoubleAttribute(
-                       std::string(name), value);
+       int ret =
+                       (static_cast<MediaVision::Common::EngineConfig *>(engine_cfg))->getDoubleAttribute(std::string(name), value);
 
        if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
-                               name, ret);
+               LOGE("Failed to get attribute [%s]. Error code (0x%08x)", name, ret);
                return ret;
        }
 
-       LOGD("Attribute [%s] (value %f) has been gotten",
-                       name, *value);
+       LOGD("Attribute [%s] (value %f) has been gotten", name, *value);
        return ret;
 }
 
-int mv_engine_config_get_int_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               int *value)
+int mv_engine_config_get_int_attribute_c(mv_engine_config_h engine_cfg, const char *name, int *value)
 {
        if (!engine_cfg || name == NULL || value == NULL) {
                LOGE("Impossible to get attribute. One of the required parameters is "
-                               "NULL. engine_cfg = %p; name = %p; value = %p;",
-                               engine_cfg, name, value);
+                        "NULL. engine_cfg = %p; name = %p; value = %p;",
+                        engine_cfg, name, value);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->getIntegerAttribute(
-                       std::string(name), value);
+       int ret = (static_cast<MediaVision::Common::EngineConfig *>(engine_cfg))
+                                         ->getIntegerAttribute(std::string(name), value);
 
        if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
-                               name, ret);
+               LOGE("Failed to get attribute [%s]. Error code (0x%08x)", name, ret);
                return ret;
        }
 
-       LOGD("Attribute [%s] (value %i) has been gotten",
-                       name, *value);
+       LOGD("Attribute [%s] (value %i) has been gotten", name, *value);
        return ret;
 }
 
-int mv_engine_config_get_bool_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               bool *value)
+int mv_engine_config_get_bool_attribute_c(mv_engine_config_h engine_cfg, const char *name, bool *value)
 {
        if (!engine_cfg || name == NULL || value == NULL) {
                LOGE("Impossible to get attribute. One of the required parameters is "
-                               "NULL. engine_cfg = %p; name = %p; value = %p;",
-                               engine_cfg, name, value);
+                        "NULL. engine_cfg = %p; name = %p; value = %p;",
+                        engine_cfg, name, value);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->getBooleanAttribute(
-                       std::string(name), value);
+       int ret = (static_cast<MediaVision::Common::EngineConfig *>(engine_cfg))
+                                         ->getBooleanAttribute(std::string(name), value);
 
        if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
-                               name, ret);
+               LOGE("Failed to get attribute [%s]. Error code (0x%08x)", name, ret);
                return ret;
        }
 
-       LOGD("Attribute [%s] (value %s) has been gotten",
-                       name, *value ? "TRUE" : "FALSE");
+       LOGD("Attribute [%s] (value %s) has been gotten", name, *value ? "TRUE" : "FALSE");
        return ret;
 }
 
-int mv_engine_config_get_string_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               char **value)
+int mv_engine_config_get_string_attribute_c(mv_engine_config_h engine_cfg, const char *name, char **value)
 {
        if (!engine_cfg || name == NULL || value == NULL) {
                LOGE("Impossible to get attribute. One of the required parameters is "
-                               "NULL. engine_cfg = %p; name = %p; value = %p;",
-                               engine_cfg, name, value);
+                        "NULL. engine_cfg = %p; name = %p; value = %p;",
+                        engine_cfg, name, value);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
        std::string attributeValue;
-       int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->getStringAttribute(
-                       std::string(name), &attributeValue);
+       int ret = (static_cast<MediaVision::Common::EngineConfig *>(engine_cfg))
+                                         ->getStringAttribute(std::string(name), &attributeValue);
 
        if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
-                               name, ret);
+               LOGE("Failed to get attribute [%s]. Error code (0x%08x)", name, ret);
                return ret;
        }
 
        LOGD("Convert string to char*");
        int stringSize = attributeValue.size();
-       (*value) = (char*)malloc(sizeof(char) * (stringSize + 1));
+       (*value) = (char *) malloc(sizeof(char) * (stringSize + 1));
        if ((*value) == NULL) {
                LOGE("Failed to convert string to char*");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
@@ -604,7 +547,7 @@ int mv_engine_config_get_string_attribute_c(
                        (*value) = NULL;
                        return MEDIA_VISION_ERROR_INVALID_OPERATION;
                }
-       } catch (std::out_of_rangee) {
+       } catch (std::out_of_range &e) {
                LOGE("Conversion from string to char* failed");
                free(*value);
                (*value) = NULL;
@@ -612,38 +555,33 @@ int mv_engine_config_get_string_attribute_c(
        }
        (*value)[stringSize] = '\0';
 
-       LOGD("Attribute [%s] (value %s) has been gotten",
-                       name, *value);
+       LOGD("Attribute [%s] (value %s) has been gotten", name, *value);
        return ret;
 }
 
-int mv_engine_config_get_array_string_attribute_c(
-               mv_engine_config_h engine_cfg,
-               const char *name,
-               char ***values,
-               int *size)
+int mv_engine_config_get_array_string_attribute_c(mv_engine_config_h engine_cfg, const char *name, char ***values,
+                                                                                                 int *size)
 {
        if (!engine_cfg || name == NULL || values == NULL) {
                LOGE("Impossible to get attribute. One of the required parameters is "
-                               "NULL. engine_cfg = %p; name = %p; values = %p;",
-                               engine_cfg, name, values);
+                        "NULL. engine_cfg = %p; name = %p; values = %p;",
+                        engine_cfg, name, values);
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
        std::vector<std::string> attributeValue;
-       int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->getStringAttribute(
-                       std::string(name), &attributeValue);
+       int ret = (static_cast<MediaVision::Common::EngineConfig *>(engine_cfg))
+                                         ->getStringAttribute(std::string(name), &attributeValue);
 
        if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
-                               name, ret);
+               LOGE("Failed to get attribute [%s]. Error code (0x%08x)", name, ret);
                return ret;
        }
 
        int attributeSize = attributeValue.size();
        LOGD("Allocating %d arrays", attributeSize);
 
-       (*values) = (char**)malloc(sizeof(char*) * attributeSize);
+       (*values) = (char **) malloc(sizeof(char *) * attributeSize);
        if ((*values) == NULL) {
                LOGE("Failed allocation");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
@@ -651,10 +589,10 @@ int mv_engine_config_get_array_string_attribute_c(
        (*size) = attributeSize;
 
        int stringSize = 0;
-       for(int idx = 0; idx < attributeSize; ++idx) {
+       for (int idx = 0; idx < attributeSize; ++idx) {
                stringSize = attributeValue[idx].size();
                LOGE("Converting %s with length %d to char*", attributeValue[idx].c_str(), stringSize);
-               (*values)[idx] = (char*)malloc(sizeof(char) * (stringSize + 1));
+               (*values)[idx] = (char *) malloc(sizeof(char) * (stringSize + 1));
                if ((*values)[idx] == NULL) {
                        LOGE("Failed to convert string to char*");
 
@@ -680,20 +618,17 @@ int mv_engine_config_get_array_string_attribute_c(
                }
                ((*values)[idx])[stringSize] = '\0';
 
-               LOGD("Attribute [%s] (value[%d] %s) has been gotten",
-                       name, idx, (*values)[idx]);
+               LOGD("Attribute [%s] (value[%d] %s) has been gotten", name, idx, (*values)[idx]);
        }
 
        return ret;
 }
 
-int mv_engine_config_foreach_supported_attribute_c(
-               mv_supported_attribute_cb callback,
-               void *user_data)
+int mv_engine_config_foreach_supported_attribute_c(mv_supported_attribute_cb callback, void *user_data)
 {
        if (NULL == callback) {
                LOGE("Impossible to traverse supported by Media Vision engine "
-                               "configuration attributes. Callback is NULL");
+                        "configuration attributes. Callback is NULL");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
@@ -702,15 +637,14 @@ int mv_engine_config_foreach_supported_attribute_c(
 
        try {
                config = std::make_unique<EngineConfig>();
-       } catch(int& exception) {
+       } catch (int &exception) {
                return exception;
        }
 
        DictDblConstIter dblDictIter = config->getDefaultDblDict().begin();
 
        while (dblDictIter != config->getDefaultDblDict().end()) {
-               if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE,
-                               dblDictIter->first.c_str(), user_data)) {
+               if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE, dblDictIter->first.c_str(), user_data)) {
                        LOGD("Attribute names/types traverse has been stopped by the user");
                        return MEDIA_VISION_ERROR_NONE;
                }
@@ -720,8 +654,7 @@ int mv_engine_config_foreach_supported_attribute_c(
        DictIntConstIter intDictIter = config->getDefaultIntDict().begin();
 
        while (intDictIter != config->getDefaultIntDict().end()) {
-               if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER,
-                               intDictIter->first.c_str(), user_data)) {
+               if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER, intDictIter->first.c_str(), user_data)) {
                        LOGD("Attribute names/types traverse has been stopped by the user");
                        return MEDIA_VISION_ERROR_NONE;
                }
@@ -731,8 +664,7 @@ int mv_engine_config_foreach_supported_attribute_c(
        DictBoolConstIter boolDictIter = config->getDefaultBoolDict().begin();
 
        while (boolDictIter != config->getDefaultBoolDict().end()) {
-               if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN,
-                               boolDictIter->first.c_str(), user_data)) {
+               if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN, boolDictIter->first.c_str(), user_data)) {
                        LOGD("Attribute names/types traverse has been stopped by the user");
                        return MEDIA_VISION_ERROR_NONE;
                }
@@ -742,8 +674,7 @@ int mv_engine_config_foreach_supported_attribute_c(
        DictStrConstIter strDictIter = config->getDefaultStrDict().begin();
 
        while (strDictIter != config->getDefaultStrDict().end()) {
-               if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_STRING,
-                               strDictIter->first.c_str(), user_data)) {
+               if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_STRING, strDictIter->first.c_str(), user_data)) {
                        LOGD("Attribute names/types traverse has been stopped by the user");
                        return MEDIA_VISION_ERROR_NONE;
                }
index 56ac8ca..84be33a 100644 (file)
 static gpointer __get_system_info_feature_once(gpointer data)
 {
        bool supported = false;
-       gchar *feature = (gchar *)data;
+       gchar *feature = (gchar *) data;
 
        if (system_info_get_platform_bool(feature, &supported) != SYSTEM_INFO_ERROR_NONE)
                LOGE("SYSTEM_INFO_ERROR: %s", feature);
 
-       return (gpointer)supported;
+       return (gpointer) supported;
 }
 
 bool _mv_check_system_info_feature_supported(void)
@@ -40,93 +40,83 @@ bool _mv_check_system_info_feature_supported(void)
        bool isInferenceImageSupported = false;
        bool isInferenceFaceSupported = false;
 
-       const int nRetVal1 = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.barcode_detection",
-                                       &isBarcodeDetectionSupported);
+       const int nRetVal1 = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_detection",
+                                                                                                          &isBarcodeDetectionSupported);
 
        if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.barcode_detection");
                return false;
        }
 
-       const int nRetVal2 = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.barcode_generation",
-                                       &isBarcodeGenerationSupported);
+       const int nRetVal2 = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_generation",
+                                                                                                          &isBarcodeGenerationSupported);
 
        if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.barcode_generation");
                return false;
        }
 
-       const int nRetVal3 = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.face_recognition",
-                                       &isFaceRecognitionSupported);
+       const int nRetVal3 = system_info_get_platform_bool("http://tizen.org/feature/vision.face_recognition",
+                                                                                                          &isFaceRecognitionSupported);
 
        if (nRetVal3 != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.face_recognition");
                return false;
        }
 
-       const int nRetVal4 = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.image_recognition",
-                                       &isImageRecognitionSupported);
+       const int nRetVal4 = system_info_get_platform_bool("http://tizen.org/feature/vision.image_recognition",
+                                                                                                          &isImageRecognitionSupported);
 
        if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.image_recognition");
                return false;
        }
-       const int nRetVal5 = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.inference.image",
-                                       &isInferenceImageSupported);
+       const int nRetVal5 = system_info_get_platform_bool("http://tizen.org/feature/vision.inference.image",
+                                                                                                          &isInferenceImageSupported);
 
        if (nRetVal5 != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
                return false;
        }
 
-       const int nRetVal6 = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.inference.face",
-                                       &isInferenceFaceSupported);
+       const int nRetVal6 =
+                       system_info_get_platform_bool("http://tizen.org/feature/vision.inference.face", &isInferenceFaceSupported);
 
        if (nRetVal6 != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
                return false;
        }
-       (isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
-               isFaceRecognitionSupported  || isImageRecognitionSupported ||
-               isInferenceImageSupported || isInferenceFaceSupported) ?
+       (isBarcodeDetectionSupported || isBarcodeGenerationSupported || isFaceRecognitionSupported ||
+        isImageRecognitionSupported || isInferenceImageSupported || isInferenceFaceSupported) ?
                        LOGI("system_info_get_platform_bool returned "
-                                       "Supported one feature among barcode detection, "
-                                       "barcode generation, face recognition, "
-                                       "image recognition, and inference capability\n") :
+                                "Supported one feature among barcode detection, "
+                                "barcode generation, face recognition, "
+                                "image recognition, and inference capability\n") :
                        LOGE("system_info_get_platform_bool returned "
-                                       "Unsupported all features of barcode detection, "
-                                       "barcode generation, face recognition, "
-                                       "image recognition, inference capability\n") ;
+                                "Unsupported all features of barcode detection, "
+                                "barcode generation, face recognition, "
+                                "image recognition, inference capability\n");
 
-       return (isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
-                       isFaceRecognitionSupported  || isImageRecognitionSupported ||
-                       isInferenceImageSupported   || isInferenceFaceSupported);
+       return (isBarcodeDetectionSupported || isBarcodeGenerationSupported || isFaceRecognitionSupported ||
+                       isImageRecognitionSupported || isInferenceImageSupported || isInferenceFaceSupported);
 }
 
 bool _mv_barcode_detect_check_system_info_feature_supported(void)
 {
        bool isBarcodeDetectionSupported = false;
 
-       const int nRetVal = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.barcode_detection",
-                                       &isBarcodeDetectionSupported);
+       const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_detection",
+                                                                                                         &isBarcodeDetectionSupported);
 
        if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.barcode_detection");
                return false;
        }
 
-       isBarcodeDetectionSupported ?
-                       LOGI("system_info_get_platform_bool returned "
-                                       "Supported barcode detection feature capability\n") :
-                       LOGE("system_info_get_platform_bool returned "
-                                       "Unsupported barcode detection feature capability\n");
+       isBarcodeDetectionSupported ? LOGI("system_info_get_platform_bool returned "
+                                                                          "Supported barcode detection feature capability\n") :
+                                                                 LOGE("system_info_get_platform_bool returned "
+                                                                          "Unsupported barcode detection feature capability\n");
 
        return isBarcodeDetectionSupported;
 }
@@ -135,20 +125,18 @@ bool _mv_barcode_generate_check_system_info_feature_supported(void)
 {
        bool isBarcodeGenerationSupported = false;
 
-       const int nRetVal = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.barcode_generation",
-                                       &isBarcodeGenerationSupported);
+       const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_generation",
+                                                                                                         &isBarcodeGenerationSupported);
 
        if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.barcode_generation");
                return false;
        }
 
-       isBarcodeGenerationSupported ?
-                       LOGI("system_info_get_platform_bool returned "
-                                       "Supported barcode generation feature capability\n") :
-                       LOGE("system_info_get_platform_bool returned "
-                                       "Unsupported barcode generation feature capability\n");
+       isBarcodeGenerationSupported ? LOGI("system_info_get_platform_bool returned "
+                                                                               "Supported barcode generation feature capability\n") :
+                                                                  LOGE("system_info_get_platform_bool returned "
+                                                                               "Unsupported barcode generation feature capability\n");
 
        return isBarcodeGenerationSupported;
 }
@@ -157,20 +145,18 @@ bool _mv_face_check_system_info_feature_supported(void)
 {
        bool isFaceRecognitionSupported = false;
 
-       const int nRetVal = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.face_recognition",
-                                       &isFaceRecognitionSupported);
+       const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.face_recognition",
+                                                                                                         &isFaceRecognitionSupported);
 
        if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.face_recognition");
                return false;
        }
 
-       isFaceRecognitionSupported ?
-                       LOGI("system_info_get_platform_bool returned "
-                                       "Supported face recognition feature capability\n") :
-                       LOGE("system_info_get_platform_bool returned "
-                                       "Unsupported face recognition feature capability\n");
+       isFaceRecognitionSupported ? LOGI("system_info_get_platform_bool returned "
+                                                                         "Supported face recognition feature capability\n") :
+                                                                LOGE("system_info_get_platform_bool returned "
+                                                                         "Unsupported face recognition feature capability\n");
 
        return isFaceRecognitionSupported;
 }
@@ -179,20 +165,18 @@ bool _mv_image_check_system_info_feature_supported(void)
 {
        bool isImageRecognitionSupported = false;
 
-       const int nRetVal = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.image_recognition",
-                                       &isImageRecognitionSupported);
+       const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.image_recognition",
+                                                                                                         &isImageRecognitionSupported);
 
        if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.image_recognition");
                return false;
        }
 
-       isImageRecognitionSupported ?
-                       LOGI("system_info_get_platform_bool returned "
-                                       "Supported image recognition feature capability\n") :
-                       LOGE("system_info_get_platform_bool returned "
-                                       "Unsupported image recognition feature capability\n");
+       isImageRecognitionSupported ? LOGI("system_info_get_platform_bool returned "
+                                                                          "Supported image recognition feature capability\n") :
+                                                                 LOGE("system_info_get_platform_bool returned "
+                                                                          "Unsupported image recognition feature capability\n");
 
        return isImageRecognitionSupported;
 }
@@ -202,29 +186,26 @@ bool _mv_inference_check_system_info_feature_supported(void)
        bool isInferenceImageSupported = false;
        bool isInferenceFaceSupported = false;
 
-       const int nRetVal1 = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.inference.image",
-                                       &isInferenceImageSupported);
+       const int nRetVal1 = system_info_get_platform_bool("http://tizen.org/feature/vision.inference.image",
+                                                                                                          &isInferenceImageSupported);
 
        if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
                return false;
        }
 
-       const int nRetVal2 = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.inference.face",
-                                       &isInferenceFaceSupported);
+       const int nRetVal2 =
+                       system_info_get_platform_bool("http://tizen.org/feature/vision.inference.face", &isInferenceFaceSupported);
 
        if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
                return false;
        }
 
-       (isInferenceImageSupported || isInferenceFaceSupported) ?
-                       LOGI("system_info_get_platform_bool returned "
-                                       "Supported inference feature capability\n") :
-                       LOGE("system_info_get_platform_bool returned "
-                                       "Unsupported inference feature capability\n");
+       (isInferenceImageSupported || isInferenceFaceSupported) ? LOGI("system_info_get_platform_bool returned "
+                                                                                                                                  "Supported inference feature capability\n") :
+                                                                                                                         LOGE("system_info_get_platform_bool returned "
+                                                                                                                                  "Unsupported inference feature capability\n");
 
        return (isInferenceImageSupported || isInferenceFaceSupported);
 }
@@ -233,20 +214,18 @@ bool _mv_inference_image_check_system_info_feature_supported(void)
 {
        bool isInferenceImageSupported = false;
 
-       const int nRetVal = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.inference.image",
-                                       &isInferenceImageSupported);
+       const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.inference.image",
+                                                                                                         &isInferenceImageSupported);
 
        if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
                return false;
        }
 
-       isInferenceImageSupported ?
-                       LOGI("system_info_get_platform_bool returned "
-                                       "Supported inference image feature capability\n") :
-                       LOGE("system_info_get_platform_bool returned "
-                                       "Unsupported inference image feature capability\n");
+       isInferenceImageSupported ? LOGI("system_info_get_platform_bool returned "
+                                                                        "Supported inference image feature capability\n") :
+                                                               LOGE("system_info_get_platform_bool returned "
+                                                                        "Unsupported inference image feature capability\n");
 
        return isInferenceImageSupported;
 }
@@ -255,20 +234,18 @@ bool _mv_inference_face_check_system_info_feature_supported(void)
 {
        bool isInferenceFaceSupported = false;
 
-       const int nRetVal = system_info_get_platform_bool(
-                                       "http://tizen.org/feature/vision.inference.face",
-                                       &isInferenceFaceSupported);
+       const int nRetVal =
+                       system_info_get_platform_bool("http://tizen.org/feature/vision.inference.face", &isInferenceFaceSupported);
 
        if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
                LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
                return false;
        }
 
-       isInferenceFaceSupported ?
-                       LOGI("system_info_get_platform_bool returned "
-                                       "Supported inference face feature capability\n") :
-                       LOGE("system_info_get_platform_bool returned "
-                                       "Unsupported inference face feature capability\n");
+       isInferenceFaceSupported ? LOGI("system_info_get_platform_bool returned "
+                                                                       "Supported inference face feature capability\n") :
+                                                          LOGE("system_info_get_platform_bool returned "
+                                                                       "Unsupported inference face feature capability\n");
 
        return isInferenceFaceSupported;
 }
@@ -276,7 +253,7 @@ bool _mv_inference_face_check_system_info_feature_supported(void)
 bool _mv_roi_tracking_check_system_info_feature_supported(void)
 {
        static GOnce once = G_ONCE_INIT;
-       bool supported = (bool)g_once(&once, __get_system_info_feature_once, VISION_FEATURE_ROI_TRACKING);
+       bool supported = (bool) g_once(&once, __get_system_info_feature_once, VISION_FEATURE_ROI_TRACKING);
 
        LOGI("Feature[%s] : %d", VISION_FEATURE_ROI_TRACKING, supported);
 
index 55c0132..19176a6 100644 (file)
  *        detection functionality.
  */
 
-namespace MediaVision {
-namespace Face {
+namespace MediaVision
+{
+namespace Face
+{
 /**
  * @class   FaceDetector
  * @brief   The Face Detector container.
@@ -36,7 +38,8 @@ namespace Face {
  *
  * @since_tizen 3.0
  */
-class FaceDetector {
+class FaceDetector
+{
 public:
        /**
         * @brief   Creates a FaceDetector.
@@ -68,11 +71,8 @@ public:
         *
         * @see setHaarcascadeFilepath()
         */
-       bool detectFaces(
-                       const cv::Mat& image,
-                       const cv::Rect& roi,
-                       const cv::Size& minSize,
-                       std::vector<cv::Rect>& faceLocations);
+       bool detectFaces(const cv::Mat &image, const cv::Rect &roi, const cv::Size &minSize,
+                                        std::vector<cv::Rect> &faceLocations);
 
        /**
         * @brief Loads haar cascade classifier for detection process.
@@ -85,13 +85,13 @@ public:
         * @return true if cascade is loaded from file and ready for detecting
         *         process. Otherwise is false.
         */
-       bool loadHaarcascade(const std::stringhaarcascadeFilepath);
+       bool loadHaarcascade(const std::string &haarcascadeFilepath);
 
 private:
        cv::CascadeClassifier m_faceCascade; /**< Cascade classifier of the face
                                                                                                detecting process. */
 
-       std::string m_haarcascadeFilepath;   /**< Path to the file, which contains
+       std::string m_haarcascadeFilepath; /**< Path to the file, which contains
                                                                                                cascade classifier information. */
 
        bool m_faceCascadeIsLoaded; /**< Flag to determine the state of the
index 496e407..8efbf7c 100644 (file)
  *        the facial expressions recognition functionality.
  */
 
-namespace MediaVision {
-namespace Face {
+namespace MediaVision
+{
+namespace Face
+{
 /**
  * @brief Face expression recognition configuration.
  *
  * @since_tizen 3.0
  */
-struct FaceRecognizerConfig {
+struct FaceRecognizerConfig
+{
        FaceRecognizerConfig();
        std::string mHaarcascadeFilepath;
 };
@@ -48,7 +51,8 @@ struct FaceRecognizerConfig {
  *
  * @since_tizen 3.0
  */
-class FaceExpressionRecognizer {
+class FaceExpressionRecognizer
+{
 public:
        /**
         * @brief Recognizes facial expression on the image with known face location.
@@ -63,11 +67,9 @@ public:
         *
         * @see MediaVision::Face::FaceRecognizerConfig
         */
-       static int recognizeFaceExpression(
-                                       const cv::Mat& grayImage,
-                                       const mv_rectangle_s& faceLocation,
-                                       mv_face_facial_expression_e *faceExpression,
-                                       const FaceRecognizerConfig& config = FaceRecognizerConfig());
+       static int recognizeFaceExpression(const cv::Mat &grayImage, const mv_rectangle_s &faceLocation,
+                                                                          mv_face_facial_expression_e *faceExpression,
+                                                                          const FaceRecognizerConfig &config = FaceRecognizerConfig());
 };
 
 } /* Face */
index cc7a9bc..c301f47 100644 (file)
  *        eye condition recognition functionality.
  */
 
-namespace MediaVision {
-namespace Face {
+namespace MediaVision
+{
+namespace Face
+{
 /**
  * @class FaceEyeCondition
  * @brief The FaceEyeCondition implements the face
@@ -37,7 +39,8 @@ namespace Face {
  *
  * @since_tizen 3.0
  */
-class FaceEyeCondition {
+class FaceEyeCondition
+{
 public:
        /**
         * @brief Recognizes eye condition on the image with face location.
@@ -49,19 +52,17 @@ public:
         * @param [out] eyeCondition    The eye condition which was recognized
         * @return @c 0 on success, otherwise a negative error value
         */
-       static int recognizeEyeCondition(
-                                       const cv::Mat& grayImage,
-                                       mv_rectangle_s faceLocation,
-                                       mv_face_eye_condition_e *eyeCondition);
+       static int recognizeEyeCondition(const cv::Mat &grayImage, mv_rectangle_s faceLocation,
+                                                                        mv_face_eye_condition_e *eyeCondition);
 
 private:
        static void splitEyes(
-                                       /*[in]*/ const cv::Mat& grayImage,
-                                       /*[in]*/ mv_rectangle_s faceLocation,
-                                       /*[out]*/ cv::Mat& leftEye,
-                                       /*[out]*/ cv::Mat& rightEye);
+                       /*[in]*/ const cv::Mat &grayImage,
+                       /*[in]*/ mv_rectangle_s faceLocation,
+                       /*[out]*/ cv::Mat &leftEye,
+                       /*[out]*/ cv::Mat &rightEye);
 
-       static int isEyeOpen(/*[in]*/const cv::Mat& eye);
+       static int isEyeOpen(/*[in]*/ const cv::Mat &eye);
 };
 
 } /* Face */
index 8a8f3ae..8a7118c 100644 (file)
  *        provides face recognition model interface.
  */
 
-namespace MediaVision {
-namespace Face {
+namespace MediaVision
+{
+namespace Face
+{
 /**
  * @brief Structure containing supported recognition algorithms settings.
  *
  * @since_tizen 3.0
  */
-struct FaceRecognitionModelConfig {
+struct FaceRecognitionModelConfig
+{
        /**
         * @brief Default constructor for the @ref FaceRecognitionModelConfig
         *
@@ -48,33 +51,32 @@ struct FaceRecognitionModelConfig {
         */
        FaceRecognitionModelConfig();
 
-       bool operator!=(
-                       const FaceRecognitionModelConfig& other) const;
+       bool operator!=(const FaceRecognitionModelConfig &other) const;
 
        FaceRecognitionModelType mModelType; /**<
                                                                Type of the recognition algorithm */
 
-       int mNumComponents;    /**< How many principal components will be included
+       int mNumComponents; /**< How many principal components will be included
                                                                to the Eigenvectors */
 
-       double mThreshold;     /**< Minimal distance between principal components of
+       double mThreshold; /**< Minimal distance between principal components of
                                                                the model allowed */
 
-       int mRadius;           /**< Radius of the local features for LBHP algorithm */
+       int mRadius; /**< Radius of the local features for LBHP algorithm */
 
-       int mNeighbors;        /**< How many neighboring pixels has to be analyzed
+       int mNeighbors; /**< How many neighboring pixels has to be analyzed
                                                                when LBHP learning applied. Usually set as
                                                                8*radius */
 
-       int mGridX;            /**< X size of the spatial histogram (LBPH) */
+       int mGridX; /**< X size of the spatial histogram (LBPH) */
 
-       int mGridY;            /**< Y size of the spatial histogram (LBPH) */
+       int mGridY; /**< Y size of the spatial histogram (LBPH) */
 
-       int mImgWidth;         /**< Width of the image to resize the samples for
+       int mImgWidth; /**< Width of the image to resize the samples for
                                                                Eigenfaces and Fisherfaces algorithms working
                                                                on the samples of the same size */
 
-       int mImgHeight;        /**< Height of the image to resize the samples for
+       int mImgHeight; /**< Height of the image to resize the samples for
                                                                Eigenfaces and Fisherfaces algorithms working
                                                                on the samples of the same size */
 };
@@ -86,7 +88,8 @@ struct FaceRecognitionModelConfig {
  *
  * @since_tizen 3.0
  */
-struct FaceRecognitionResults {
+struct FaceRecognitionResults
+{
        /**
         * @brief Default constructor for the @ref FaceRecognitionResults
         *
@@ -94,12 +97,12 @@ struct FaceRecognitionResults {
         */
        FaceRecognitionResults();
 
-       bool mIsRecognized;               /**< The flag indication success of the
+       bool mIsRecognized; /**< The flag indication success of the
                                                                                        recognition */
-       cv::Rect_<int> mFaceLocation;     /**< Location of the face where face has
+       cv::Rect_<int> mFaceLocation; /**< Location of the face where face has
                                                                                        been recognized */
-       int mFaceLabel;                   /**< Unique label of the face */
-       double mConfidence;               /**< Recognition confidence level */
+       int mFaceLabel; /**< Unique label of the face */
+       double mConfidence; /**< Recognition confidence level */
 };
 
 /**
@@ -108,7 +111,8 @@ struct FaceRecognitionResults {
  *
  * @since_tizen 3.0
  */
-class FaceRecognitionModel {
+class FaceRecognitionModel
+{
 public:
        /**
         * @brief    Creates a FaceRecognitionModel class instance.
@@ -125,7 +129,7 @@ public:
         * @param [in] origin    The FaceRecognitionModel object that will be used
         *                       for creation of new one
         */
-       FaceRecognitionModel(const FaceRecognitionModelorigin);
+       FaceRecognitionModel(const FaceRecognitionModel &origin);
 
        /**
         * @brief @ref FaceRecognitionModel copy assignment operator.
@@ -135,7 +139,7 @@ public:
         * @param [in] copy      @ref FaceRecognitionModel object which will be
         *                       copied
         */
-       FaceRecognitionModel& operator=(const FaceRecognitionModel& copy);
+       FaceRecognitionModel &operator=(const FaceRecognitionModel &copy);
 
        /**
         * @brief    Destroys the FaceRecognitionModel class instance including all
@@ -155,7 +159,7 @@ public:
         *
         * @see FaceRecognitionModel::load()
         */
-       int save(const std::stringfileName);
+       int save(const std::string &fileName);
 
        /**
         * @brief Deserializes FaceRecognitionModel object from the file.
@@ -167,7 +171,7 @@ public:
         *
         * @see FaceRecognitionModel::save()
         */
-       int load(const std::stringfileName);
+       int load(const std::string &fileName);
 
        /**
         * @brief Adds face image example for face labeled by @a faceLabel
@@ -179,7 +183,7 @@ public:
         *
         * @see FaceRecognitionModel::resetFaceExamples()
         */
-       int addFaceExample(const cv::MatfaceImage, int faceLabel);
+       int addFaceExample(const cv::Mat &faceImage, int faceLabel);
 
        /**
         * @brief Clears the internal set of face image examples.
@@ -220,7 +224,7 @@ public:
         * @see FaceRecognitionModel::addFaceExample()
         * @see FaceRecognitionModel::learn()
         */
-       const std::set<int>getFaceLabels(void) const;
+       const std::set<int> &getFaceLabels(void) const;
 
        /**
         * @brief Learns recognition model based on the set of collected face image
@@ -233,7 +237,7 @@ public:
         *
         * @see FaceRecognitionModel::addFaceExample()
         */
-       int learn(const FaceRecognitionModelConfigconfig = FaceRecognitionModelConfig());
+       int learn(const FaceRecognitionModelConfig &config = FaceRecognitionModelConfig());
 
        /**
         * @brief Recognizes faces in the image and outputs recognition results to
@@ -247,16 +251,15 @@ public:
         *
         * @see FaceRecognitionModel::learn()
         */
-       int recognize(const cv::Mat& image, FaceRecognitionResults& results);
+       int recognize(const cv::Mat &image, FaceRecognitionResults &results);
 
 private:
        /**
         * Factory method for creating of the recognition algorithm based on input
         * configuration:
         */
-       static cv::Ptr<cv::face::FaceRecognizer> CreateRecognitionAlgorithm(
-                       const FaceRecognitionModelConfig& config =
-                       FaceRecognitionModelConfig());
+       static cv::Ptr<cv::face::FaceRecognizer>
+       CreateRecognitionAlgorithm(const FaceRecognitionModelConfig &config = FaceRecognitionModelConfig());
 
 private:
        bool m_canRecognize; /**< The flag showing possibility to recognize with
index fc484c9..fe21752 100644 (file)
@@ -46,8 +46,8 @@
 #include <opencv2/tracking.hpp>
 #include <opencv2/tracking/tracking_legacy.hpp>
 
-namespace cv {
-
+namespace cv
+{
 //class TrackerMedianFlowModel;
 
 /** @brief Median Flow tracker implementation.
@@ -60,90 +60,80 @@ by authors to outperform MIL). During the implementation period the code at
 <http://www.aonsquared.co.uk/node/5>, the courtesy of the author Arthur Amarra, was used for the
 reference purpose.
  */
-class FaceTracker : public legacy::TrackerMedianFlow {
+class FaceTracker : public legacy::TrackerMedianFlow
+{
 public:
-       struct Params {
+       struct Params
+       {
                /**
                 * @brief TrackerMedianFlow algorithm parameters constructor
                 */
                Params();
-               void read(const FileNodefn);
-               void write(FileStoragefs) const;
+               void read(const FileNode &fn);
+               void write(FileStorage &fs) const;
 
                int mPointsInGrid; /**< Square root of number of keypoints used.
                                                                Increase it to trade accurateness for speed.
                                                                Default value is sensible and recommended */
 
-               Size mWindowSize;  /**< Size of the search window at each pyramid level
+               Size mWindowSize; /**< Size of the search window at each pyramid level
                                                                for Lucas-Kanade optical flow search used for
                                                                tracking */
 
-               int mPyrMaxLevel;  /**< Number of pyramid levels for Lucas-Kanade optical
+               int mPyrMaxLevel; /**< Number of pyramid levels for Lucas-Kanade optical
                                                                flow search used for tracking */
        };
 
        FaceTracker(Params paramsIn = Params());
 
-       bool copyTo(FaceTrackercopy) const;
+       bool copyTo(FaceTracker &copy) const;
 
-       bool initImpl(const Mat& image, const Rect2d& boundingBox);
-       bool updateImpl(const Mat& image, Rect2d& boundingBox);
+       bool initImpl(const Mat &image, const Rect2d &boundingBox);
+       bool updateImpl(const Mat &image, Rect2d &boundingBox);
 
        bool isInited() const;
 
        float getLastConfidence() const;
        Rect_<float> getLastBoundingBox() const;
 
-       void read(FileStoragefn);
-       void write(FileStoragefs) const;
-       void read( const FileNode& fn );
+       void read(FileStorage &fn);
+       void write(FileStorage &fs) const;
+       void read(const FileNode &fn);
 
 private:
        bool m_isInit;
 
-       bool medianFlowImpl(Mat oldImage, Mat newImage, Rect2foldBox);
+       bool medianFlowImpl(Mat oldImage, Mat newImage, Rect2f &oldBox);
 
-       Rect_<float> vote(
-                       const std::vector<Point2f>& oldPoints,
-                       const std::vector<Point2f>& newPoints,
-                       const Rect_<float>& oldRect,
-                       Point2f& mD);
+       Rect_<float> vote(const std::vector<Point2f> &oldPoints, const std::vector<Point2f> &newPoints,
+                                         const Rect_<float> &oldRect, Point2f &mD);
 
-       template<typename T>
-       T getMedian(
-                       std::vector<T>& values, int size = -1);
+       template<typename T> T getMedian(std::vector<T> &values, int size = -1);
 
-       void check_FB(
-                       std::vector<Mat> newPyramid,
-                       const std::vector<Point2f>& oldPoints,
-                       const std::vector<Point2f>& newPoints,
-                       std::vector<bool>& status);
+       void check_FB(std::vector<Mat> newPyramid, const std::vector<Point2f> &oldPoints,
+                                 const std::vector<Point2f> &newPoints, std::vector<bool> &status);
 
-       void check_NCC(
-                       const Mat& oldImage,
-                       const Mat& newImage,
-                       const std::vector<Point2f>& oldPoints,
-                       const std::vector<Point2f>& newPoints,
-                       std::vector<bool>& status);
+       void check_NCC(const Mat &oldImage, const Mat &newImage, const std::vector<Point2f> &oldPoints,
+                                  const std::vector<Point2f> &newPoints, std::vector<bool> &status);
 
        inline float l2distance(Point2f p1, Point2f p2);
 
-       Params m_params;               /**< Parameters used during tracking, see
+       Params m_params; /**< Parameters used during tracking, see
                                                                                @ref TrackerMedianFlow::Params */
 
-       TermCriteria m_termcrit;       /**< Terminating criteria for OpenCV
+       TermCriteria m_termcrit; /**< Terminating criteria for OpenCV
                                                                                Lucas–Kanade optical flow algorithm used
                                                                                during tracking */
 
-       Rect2d m_boundingBox;  /**< Tracking object bounding box */
+       Rect2d m_boundingBox; /**< Tracking object bounding box */
 
-       float m_confidence;          /**< Confidence that face was tracked correctly
+       float m_confidence; /**< Confidence that face was tracked correctly
                                                                                at the last tracking iteration */
 
-       Mat m_image;                 /**< Last image for which tracking was
+       Mat m_image; /**< Last image for which tracking was
                                                                                performed */
 
-       std::vector<Mat> m_pyramid;  /**< The pyramid had been calculated for
+       std::vector<Mat> m_pyramid; /**< The pyramid had been calculated for
                                                                                the previous frame or when
                                                                                initialize the model */
 };
index 95f8d6e..3361133 100644 (file)
 #include <opencv2/core.hpp>
 #include "FaceTracker.h"
 
-
 /**
  * @file FaceTrackingModel.h
  * @brief This file contains the FaceTrackingModel class definition which
  *        provides face tracking model interface.
  */
 
-namespace MediaVision {
-namespace Face {
+namespace MediaVision
+{
+namespace Face
+{
 /**
  * @brief Structure where results of
  *        @ref MediaVision::Face::FaceTrackingModel::track() call are stored.
  *
  * @since_tizen 3.0
  */
-struct FaceTrackingResults {
+struct FaceTrackingResults
+{
        /**
         * @brief Default constructor for the @ref FaceTrackingResults
         *
@@ -43,12 +45,12 @@ struct FaceTrackingResults {
         */
        FaceTrackingResults();
 
-       bool mIsTracked;                  /**< The flag indication success of the
+       bool mIsTracked; /**< The flag indication success of the
                                                                                        tracking */
-       cv::Rect_<float> mFaceLocation;   /**< Location of the face at the current
+       cv::Rect_<float> mFaceLocation; /**< Location of the face at the current
                                                                                        track iteration where face position
                                                                                        is predicted */
-       float mConfidence;                /**< Tracking confidence level
+       float mConfidence; /**< Tracking confidence level
                                                                                        (0.0 .. 1.0) */
 };
 
@@ -58,7 +60,8 @@ struct FaceTrackingResults {
  *
  * @since_tizen 3.0
  */
-class FaceTrackingModel {
+class FaceTrackingModel
+{
 public:
        /**
         * @brief    Creates a FaceTrackingModel class instance.
@@ -75,7 +78,7 @@ public:
         * @param [in] origin    The FaceTrackingModel object that will be used
         *                       for creation of new one
         */
-       FaceTrackingModel(const FaceTrackingModelorigin);
+       FaceTrackingModel(const FaceTrackingModel &origin);
 
        /**
         * @brief @ref FaceTrackingModel copy assignment operator.
@@ -85,7 +88,7 @@ public:
         * @param [in] copy      @ref FaceTrackingModel object which will be
         *                       copied
         */
-       FaceTrackingModel& operator=(const FaceTrackingModel& copy);
+       FaceTrackingModel &operator=(const FaceTrackingModel &copy);
 
        /**
         * @brief    Destroys the FaceTrackingModel class instance including all
@@ -105,7 +108,7 @@ public:
         *
         * @see FaceTrackingModel::load()
         */
-       int save(const std::stringfileName);
+       int save(const std::string &fileName);
 
        /**
         * @brief Deserializes FaceTrackingModel object from the file.
@@ -117,7 +120,7 @@ public:
         *
         * @see FaceTrackingModel::save()
         */
-       int load(const std::stringfileName);
+       int load(const std::string &fileName);
 
        /**
         * @brief Prepares FaceTrackingModel object to the next tracking session.
@@ -129,7 +132,7 @@ public:
         *
         * @see FaceTrackingModel::save()
         */
-       int prepare(const cv::Matimage);
+       int prepare(const cv::Mat &image);
 
        /**
         * @brief Prepares FaceTrackingModel object to the next tracking session.
@@ -143,7 +146,7 @@ public:
         *
         * @see FaceTrackingModel::save()
         */
-       int prepare(const cv::Mat& image, const cv::Rect_<float>& boundingBox);
+       int prepare(const cv::Mat &image, const cv::Rect_<float> &boundingBox);
 
        /**
         * @brief Performs one tracking iteration for the video frame or image
@@ -153,10 +156,10 @@ public:
         * @param [in]  image
         * @param [out] boundingBox
         */
-       int track(const cv::Mat& image, FaceTrackingResults& results);
+       int track(const cv::Mat &image, FaceTrackingResults &results);
 
 private:
-       bool m_canTrack;                          /**< The flag showing possibility
+       bool m_canTrack; /**< The flag showing possibility
                                                                                                        of the tracking model to
                                                                                                        perform track */
 
index 0bbe521..85734b3 100644 (file)
  * @file FaceUtil.h
  * @brief This file contains the useful functionality for Face module.
  */
-namespace MediaVision {
-namespace Face {
+namespace MediaVision
+{
+namespace Face
+{
 /**
  * @brief Enumeration of supported learning algorithms.
  *
  * @since_tizen 3.0
  */
-enum FaceRecognitionModelType {
-       MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN = 0,     /**< Unknown algorithm type */
-       MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES = 1,  /**< Eigenfaces algorithm */
+enum FaceRecognitionModelType
+{
+       MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN = 0, /**< Unknown algorithm type */
+       MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES = 1, /**< Eigenfaces algorithm */
        MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES = 2, /**< Fisherfaces algorithm */
-       MEDIA_VISION_FACE_MODEL_TYPE_LBPH = 3         /**< Local Binary Patterns
+       MEDIA_VISION_FACE_MODEL_TYPE_LBPH = 3 /**< Local Binary Patterns
                                                                                                                Histograms algorithm */
 };
 
@@ -45,12 +48,13 @@ enum FaceRecognitionModelType {
  *
  * @since_tizen 3.0
  */
-struct RecognitionParams {
+struct RecognitionParams
+{
        RecognitionParams(FaceRecognitionModelType algType);
 
        RecognitionParams();
 
-       FaceRecognitionModelType mRecognitionAlgType;  /**< The type of
+       FaceRecognitionModelType mRecognitionAlgType; /**< The type of
                                                                                                                the learning algorithm */
 };
 
index bc8b054..e2d092a 100644 (file)
@@ -64,12 +64,8 @@ extern "C" {
  *
  * @see mv_face_detected_cb
  */
-int mv_face_detect_open(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_face_detected_cb detected_cb,
-               void *user_data);
-
+int mv_face_detect_open(mv_source_h source, mv_engine_config_h engine_cfg, mv_face_detected_cb detected_cb,
+                                               void *user_data);
 
 /********************/
 /* Face recognition */
@@ -125,14 +121,9 @@ int mv_face_detect_open(
  *
  * @see mv_face_recognized_cb
  */
-int mv_face_recognize_open(
-               mv_source_h source,
-               mv_face_recognition_model_h recognition_model,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s *face_location,
-               mv_face_recognized_cb recognized_cb,
-               void *user_data);
-
+int mv_face_recognize_open(mv_source_h source, mv_face_recognition_model_h recognition_model,
+                                                  mv_engine_config_h engine_cfg, mv_rectangle_s *face_location,
+                                                  mv_face_recognized_cb recognized_cb, void *user_data);
 
 /*****************/
 /* Face tracking */
@@ -189,14 +180,8 @@ int mv_face_recognize_open(
  *
  * @see mv_face_tracked_cb
  */
-int mv_face_track_open(
-               mv_source_h source,
-               mv_face_tracking_model_h tracking_model,
-               mv_engine_config_h engine_cfg,
-               mv_face_tracked_cb tracked_cb,
-               bool do_learn,
-               void *user_data);
-
+int mv_face_track_open(mv_source_h source, mv_face_tracking_model_h tracking_model, mv_engine_config_h engine_cfg,
+                                          mv_face_tracked_cb tracked_cb, bool do_learn, void *user_data);
 
 /********************************/
 /* Recognition of eye condition */
@@ -232,13 +217,10 @@ int mv_face_track_open(
  *
  * @see mv_face_eye_condition_recognized_cb
  */
-int mv_face_eye_condition_recognize_open(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s face_location,
-               mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
-               void *user_data);
-
+int mv_face_eye_condition_recognize_open(mv_source_h source, mv_engine_config_h engine_cfg,
+                                                                                mv_rectangle_s face_location,
+                                                                                mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
+                                                                                void *user_data);
 
 /************************************/
 /* Recognition of facial expression */
@@ -273,12 +255,10 @@ int mv_face_eye_condition_recognize_open(
  *
  * @see mv_face_facial_expression_recognized_cb
  */
-int mv_face_facial_expression_recognize_open(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s face_location,
-               mv_face_facial_expression_recognized_cb expression_recognized_cb,
-               void *user_data);
+int mv_face_facial_expression_recognize_open(mv_source_h source, mv_engine_config_h engine_cfg,
+                                                                                        mv_rectangle_s face_location,
+                                                                                        mv_face_facial_expression_recognized_cb expression_recognized_cb,
+                                                                                        void *user_data);
 
 /*******************************/
 /* Recognition model behavior */
@@ -312,8 +292,7 @@ int mv_face_facial_expression_recognize_open(
  *
  * @see mv_face_recognition_model_destroy_open()
  */
-int mv_face_recognition_model_create_open(
-               mv_face_recognition_model_h *recognition_model);
+int mv_face_recognition_model_create_open(mv_face_recognition_model_h *recognition_model);
 
 /**
  * @brief Destroys the face recognition model handle and releases all its
@@ -328,8 +307,7 @@ int mv_face_recognition_model_create_open(
  *
  * @see mv_face_recognition_model_create_open()
  */
-int mv_face_recognition_model_destroy_open(
-               mv_face_recognition_model_h recognition_model);
+int mv_face_recognition_model_destroy_open(mv_face_recognition_model_h recognition_model);
 
 /**
  * @brief Creates a copy of existed recognition model handle and clones all its
@@ -352,9 +330,7 @@ int mv_face_recognition_model_destroy_open(
  *
  * @see mv_face_recognition_model_create_open()
  */
-int mv_face_recognition_model_clone_open(
-               mv_face_recognition_model_h src,
-               mv_face_recognition_model_h *dst);
+int mv_face_recognition_model_clone_open(mv_face_recognition_model_h src, mv_face_recognition_model_h *dst);
 
 /**
  * @brief Saves recognition model to the file.
@@ -386,9 +362,7 @@ int mv_face_recognition_model_clone_open(
  * @see mv_face_recognition_model_load_open()
  * @see mv_face_recognition_model_create_open()
  */
-int mv_face_recognition_model_save_open(
-               const char *file_name,
-               mv_face_recognition_model_h recognition_model);
+int mv_face_recognition_model_save_open(const char *file_name, mv_face_recognition_model_h recognition_model);
 
 /**
  * @brief Loads recognition model from file.
@@ -419,9 +393,7 @@ int mv_face_recognition_model_save_open(
  * @see mv_face_recognition_model_save_open()
  * @see mv_face_recognition_model_destroy_open()
  */
-int mv_face_recognition_model_load_open(
-               const char *file_name,
-               mv_face_recognition_model_h *recognition_model);
+int mv_face_recognition_model_load_open(const char *file_name, mv_face_recognition_model_h *recognition_model);
 
 /**
  * @brief Adds face image example to be used for face recognition model learning
@@ -459,11 +431,8 @@ int mv_face_recognition_model_load_open(
  * @see mv_face_recognition_model_reset_open()
  * @see mv_face_recognition_model_learn_open()
  */
-int mv_face_recognition_model_add_open(
-               const mv_source_h source,
-               mv_face_recognition_model_h recognition_model,
-               const mv_rectangle_s *example_location,
-               int face_label);
+int mv_face_recognition_model_add_open(const mv_source_h source, mv_face_recognition_model_h recognition_model,
+                                                                          const mv_rectangle_s *example_location, int face_label);
 
 /**
  * @brief Remove from @a recognition_model all collected with
@@ -497,9 +466,7 @@ int mv_face_recognition_model_add_open(
  * @see mv_face_recognition_model_add_open()
  * @see mv_face_recognition_model_learn_open()
  */
-int mv_face_recognition_model_reset_open(
-               mv_face_recognition_model_h recognition_model,
-               const int *face_label);
+int mv_face_recognition_model_reset_open(mv_face_recognition_model_h recognition_model, const int *face_label);
 
 /**
  * @brief Learns face recognition model.
@@ -554,9 +521,7 @@ int mv_face_recognition_model_reset_open(
  * @see mv_face_recognition_model_reset_open()
  * @see mv_face_recognize_open()
  */
-int mv_face_recognition_model_learn_open(
-               mv_engine_config_h engine_cfg,
-               mv_face_recognition_model_h recognition_model);
+int mv_face_recognition_model_learn_open(mv_engine_config_h engine_cfg, mv_face_recognition_model_h recognition_model);
 
 /**
  * @brief Queries labels list and number of labels had been learned by the model.
@@ -584,10 +549,8 @@ int mv_face_recognition_model_learn_open(
  * @see mv_face_recognition_model_reset_open()
  * @see mv_face_recognition_model_learn_open()
  */
-int mv_face_recognition_model_query_labels_open(
-               mv_face_recognition_model_h recognition_model,
-               int **labels,
-               unsigned int *number_of_labels);
+int mv_face_recognition_model_query_labels_open(mv_face_recognition_model_h recognition_model, int **labels,
+                                                                                               unsigned int *number_of_labels);
 
 /***************************/
 /* Tracking model behavior */
@@ -627,8 +590,7 @@ int mv_face_recognition_model_query_labels_open(
  * @see mv_face_tracking_model_prepare_open()
  * @see mv_face_tracking_model_load_open()
  */
-int mv_face_tracking_model_create_open(
-               mv_face_tracking_model_h *tracking_model);
+int mv_face_tracking_model_create_open(mv_face_tracking_model_h *tracking_model);
 
 /**
  * @brief Call this function to destroy the face tracking model handle and
@@ -644,8 +606,7 @@ int mv_face_tracking_model_create_open(
  *
  * @see mv_face_tracking_model_create_open()
  */
-int mv_face_tracking_model_destroy_open(
-               mv_face_tracking_model_h tracking_model);
+int mv_face_tracking_model_destroy_open(mv_face_tracking_model_h tracking_model);
 
 /**
  * @brief Call this function to initialize tracking model by the location of the
@@ -688,11 +649,8 @@ int mv_face_tracking_model_destroy_open(
  * @see mv_face_tracking_model_create_open()
  * @see mv_face_track_open()
  */
-int mv_face_tracking_model_prepare_open(
-               mv_face_tracking_model_h tracking_model,
-               mv_engine_config_h engine_cfg,
-               mv_source_h source,
-               mv_quadrangle_s */*location*/);
+int mv_face_tracking_model_prepare_open(mv_face_tracking_model_h tracking_model, mv_engine_config_h engine_cfg,
+                                                                               mv_source_h source, mv_quadrangle_s * /*location*/);
 
 /**
  * @brief Call this function to make a copy of existed tracking model handle and
@@ -716,9 +674,7 @@ int mv_face_tracking_model_prepare_open(
  *
  * @see mv_face_tracking_model_create_open()
  */
-int mv_face_tracking_model_clone_open(
-               mv_face_tracking_model_h src,
-               mv_face_tracking_model_h *dst);
+int mv_face_tracking_model_clone_open(mv_face_tracking_model_h src, mv_face_tracking_model_h *dst);
 
 /**
  * @brief Call this method to save tracking model to the file.
@@ -746,9 +702,7 @@ int mv_face_tracking_model_clone_open(
  * @see mv_face_tracking_model_load_open()
  * @see mv_face_tracking_model_create_open()
  */
-int mv_face_tracking_model_save_open(
-               const char *file_name,
-               mv_face_tracking_model_h tracking_model);
+int mv_face_tracking_model_save_open(const char *file_name, mv_face_tracking_model_h tracking_model);
 
 /**
  * @brief Call this method to load a tracking model from file.
@@ -780,9 +734,7 @@ int mv_face_tracking_model_save_open(
  * @see mv_face_tracking_model_save_open()
  * @see mv_face_tracking_model_destroy_open()
  */
-int mv_face_tracking_model_load_open(
-               const char *file_name,
-               mv_face_tracking_model_h *tracking_model);
+int mv_face_tracking_model_load_open(const char *file_name, mv_face_tracking_model_h *tracking_model);
 
 #ifdef __cplusplus
 }
index 9b66368..756d536 100644 (file)
 
 #include "FaceDetector.h"
 
-namespace MediaVision {
-namespace Face {
-
-FaceDetector::FaceDetector() :
-               m_faceCascade(),
-               m_haarcascadeFilepath(),
-               m_faceCascadeIsLoaded(false)
+namespace MediaVision
+{
+namespace Face
+{
+FaceDetector::FaceDetector() : m_faceCascade(), m_haarcascadeFilepath(), m_faceCascadeIsLoaded(false)
 {
        ; /* NULL */
 }
@@ -32,11 +30,8 @@ FaceDetector::~FaceDetector()
        ; /* NULL */
 }
 
-bool FaceDetector::detectFaces(
-               const cv::Mat& image,
-               const cv::Rect& roi,
-               const cv::Size& minSize,
-               std::vector<cv::Rect>& faceLocations)
+bool FaceDetector::detectFaces(const cv::Mat &image, const cv::Rect &roi, const cv::Size &minSize,
+                                                          std::vector<cv::Rect> &faceLocations)
 {
        if (!m_faceCascadeIsLoaded)
                return false;
@@ -46,21 +41,14 @@ bool FaceDetector::detectFaces(
        cv::Mat intrestingRegion = image;
 
        bool roiIsUsed = false;
-       if (roi.x >= 0 && roi.y >= 0 && roi.width > 0 && roi.height > 0 &&
-                       (roi.x + roi.width) <= image.cols &&
-                       (roi.y + roi.height) <= image.rows) {
+       if (roi.x >= 0 && roi.y >= 0 && roi.width > 0 && roi.height > 0 && (roi.x + roi.width) <= image.cols &&
+               (roi.y + roi.height) <= image.rows) {
                intrestingRegion = intrestingRegion(roi);
                roiIsUsed = true;
        }
 
        try {
-               m_faceCascade.detectMultiScale(
-                               intrestingRegion,
-                               faceLocations,
-                               1.1,
-                               3,
-                               0,
-                               minSize);
+               m_faceCascade.detectMultiScale(intrestingRegion, faceLocations, 1.1, 3, 0, minSize);
        } catch (cv::Exception &e) {
                return false;
        }
@@ -76,10 +64,9 @@ bool FaceDetector::detectFaces(
        return true;
 }
 
-bool FaceDetector::loadHaarcascade(const std::stringhaarcascadeFilepath)
+bool FaceDetector::loadHaarcascade(const std::string &haarcascadeFilepath)
 {
-       if (!m_faceCascadeIsLoaded ||
-                       m_haarcascadeFilepath != haarcascadeFilepath) {
+       if (!m_faceCascadeIsLoaded || m_haarcascadeFilepath != haarcascadeFilepath) {
                if (!(m_faceCascadeIsLoaded = m_faceCascade.load(haarcascadeFilepath)))
                        return false;
 
index 54f5309..fd345ae 100644 (file)
 
 #include <vector>
 
-
-namespace MediaVision {
-namespace Face {
-
+namespace MediaVision
+{
+namespace Face
+{
 static const int MIN_DETECTION_WIDTH = 30;
 static const int MIN_DETECTION_HEIGHT = 30;
 
-FaceRecognizerConfig::FaceRecognizerConfig() :
-               mHaarcascadeFilepath(
-                               "/usr/share/OpenCV/haarcascades/haarcascade_smile.xml")
+FaceRecognizerConfig::FaceRecognizerConfig()
+               : mHaarcascadeFilepath("/usr/share/OpenCV/haarcascades/haarcascade_smile.xml")
 {
        ; /* NULL */
 }
 
-int FaceExpressionRecognizer::recognizeFaceExpression(
-               const cv::Mat& grayImage,
-               const mv_rectangle_s& faceLocation,
-               mv_face_facial_expression_e *faceExpression,
-               const FaceRecognizerConfig& config)
+int FaceExpressionRecognizer::recognizeFaceExpression(const cv::Mat &grayImage, const mv_rectangle_s &faceLocation,
+                                                                                                         mv_face_facial_expression_e *faceExpression,
+                                                                                                         const FaceRecognizerConfig &config)
 {
        if (NULL == faceExpression)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
 
-       const int smileRectHeight = cvRound((float)faceLocation.height / 2);
+       const int smileRectHeight = cvRound((float) faceLocation.height / 2);
 
-       const cv::Rect roi(
-                                       faceLocation.point.x,
-                                       faceLocation.point.y + faceLocation.height - smileRectHeight,
-                                       faceLocation.width,
-                                       smileRectHeight);
+       const cv::Rect roi(faceLocation.point.x, faceLocation.point.y + faceLocation.height - smileRectHeight,
+                                          faceLocation.width, smileRectHeight);
 
-       if (roi.width < MIN_DETECTION_WIDTH ||
-                       roi.height < MIN_DETECTION_HEIGHT) {
+       if (roi.width < MIN_DETECTION_WIDTH || roi.height < MIN_DETECTION_HEIGHT) {
                (*faceExpression) = MV_FACE_UNKNOWN;
                return MEDIA_VISION_ERROR_NONE;
        }
 
-       if (0 > roi.x || 0 > roi.y ||
-                       roi.x + roi.width > grayImage.cols ||
-                       roi.y + roi.height > grayImage.rows)
+       if (0 > roi.x || 0 > roi.y || roi.x + roi.width > grayImage.cols || roi.y + roi.height > grayImage.rows)
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
 
        const cv::Mat mouthImg(grayImage, roi);
@@ -68,15 +59,10 @@ int FaceExpressionRecognizer::recognizeFaceExpression(
 
        cv::CascadeClassifier smileClassifier;
        smileClassifier.load(config.mHaarcascadeFilepath);
-       smileClassifier.detectMultiScale(
-                       mouthImg,
-                       areas,
-                       1.1,
-                       80,
-                       cv::CASCADE_FIND_BIGGEST_OBJECT |
-                       cv::CASCADE_DO_CANNY_PRUNING    |
-                       cv::CASCADE_SCALE_IMAGE,
-                       cv::Size(MIN_DETECTION_WIDTH, MIN_DETECTION_HEIGHT));
+       smileClassifier.detectMultiScale(mouthImg, areas, 1.1, 80,
+                                                                        cv::CASCADE_FIND_BIGGEST_OBJECT | cv::CASCADE_DO_CANNY_PRUNING |
+                                                                                        cv::CASCADE_SCALE_IMAGE,
+                                                                        cv::Size(MIN_DETECTION_WIDTH, MIN_DETECTION_HEIGHT));
 
        (*faceExpression) = MV_FACE_UNKNOWN;
        const size_t smilesFoundSize = areas.size();
index 7e953c6..26276ce 100644 (file)
 #include <vector>
 #include <opencv2/imgproc/imgproc_c.h>
 
-namespace MediaVision {
-namespace Face {
-void FaceEyeCondition::splitEyes(
-                                                       const cv::Mat& grayImage,
-                                                       mv_rectangle_s faceLocation,
-                                                       cv::Mat& leftEye,
-                                                       cv::Mat& rightEye)
+namespace MediaVision
+{
+namespace Face
+{
+void FaceEyeCondition::splitEyes(const cv::Mat &grayImage, mv_rectangle_s faceLocation, cv::Mat &leftEye,
+                                                                cv::Mat &rightEye)
 {
        leftEye = grayImage.rowRange(0, grayImage.rows / 2 - grayImage.rows / 10)
-                                               .colRange(grayImage.cols / 2 + grayImage.cols / 10,
-                                                               grayImage.cols)
-                                               .clone();
-
-       rightEye = grayImage.rowRange(grayImage.rows / 2 + grayImage.rows / 10,
-                                                                       grayImage.rows)
-                                               .colRange(grayImage.cols / 2 + grayImage.cols / 10,
-                                                                       grayImage.cols)
-                                               .clone();
-
-       const cv::Rect faceRect(
-                                               faceLocation.point.x,
-                                               faceLocation.point.y,
-                                               faceLocation.width,
-                                               faceLocation.height);
-
-       const cv::Rect eyeAreaRight(
-                                               faceRect.x + faceRect.width / 16,
-                                               (int) (faceRect.y + (faceRect.height / 4.5)),
-                                               (faceRect.width - 2 * faceRect.width / 16) / 2,
-                                               (int) (faceRect.height / 3.0));
-
-       const cv::Rect eyeAreaLeft(
-                                               faceRect.x + faceRect.width / 16
-                                               + (faceRect.width - 2 * faceRect.width / 16) / 2,
-                                               (int) (faceRect.y + (faceRect.height / 4.5)),
-                                               (faceRect.width - 2 * faceRect.width / 16) / 2,
-                                               (int) (faceRect.height / 3.0));
+                                         .colRange(grayImage.cols / 2 + grayImage.cols / 10, grayImage.cols)
+                                         .clone();
+
+       rightEye = grayImage.rowRange(grayImage.rows / 2 + grayImage.rows / 10, grayImage.rows)
+                                          .colRange(grayImage.cols / 2 + grayImage.cols / 10, grayImage.cols)
+                                          .clone();
+
+       const cv::Rect faceRect(faceLocation.point.x, faceLocation.point.y, faceLocation.width, faceLocation.height);
+
+       const cv::Rect eyeAreaRight(faceRect.x + faceRect.width / 16, (int) (faceRect.y + (faceRect.height / 4.5)),
+                                                               (faceRect.width - 2 * faceRect.width / 16) / 2, (int) (faceRect.height / 3.0));
+
+       const cv::Rect eyeAreaLeft(faceRect.x + faceRect.width / 16 + (faceRect.width - 2 * faceRect.width / 16) / 2,
+                                                          (int) (faceRect.y + (faceRect.height / 4.5)),
+                                                          (faceRect.width - 2 * faceRect.width / 16) / 2, (int) (faceRect.height / 3.0));
 
        const double xLeftEyeCenter = (2 * eyeAreaLeft.x + eyeAreaLeft.width) / 2.;
        const double yLeftEyeCenter = (2 * eyeAreaLeft.y + eyeAreaLeft.height) / 2.;
@@ -65,27 +51,19 @@ void FaceEyeCondition::splitEyes(
        const double xRightEyeCenter = (2 * eyeAreaRight.x + eyeAreaRight.width) / 2.;
        const double yRightEyeCenter = (2 * eyeAreaRight.y + eyeAreaRight.height) / 2.;
 
-       const cv::Rect leftEyeRect((int)(xLeftEyeCenter - (double)eyeAreaLeft.width / 4),
-                                                               (int)(yLeftEyeCenter - (double)eyeAreaLeft.height / 4),
-                                                               eyeAreaLeft.width / 2,
-                                                               eyeAreaLeft.height / 2);
+       const cv::Rect leftEyeRect((int) (xLeftEyeCenter - (double) eyeAreaLeft.width / 4),
+                                                          (int) (yLeftEyeCenter - (double) eyeAreaLeft.height / 4), eyeAreaLeft.width / 2,
+                                                          eyeAreaLeft.height / 2);
 
-       const cv::Rect rightEyeRect((int)(xRightEyeCenter - (double)eyeAreaRight.width / 4),
-                                                               (int)(yRightEyeCenter - (double)eyeAreaRight.height / 4),
-                                                               eyeAreaRight.width / 2,
+       const cv::Rect rightEyeRect((int) (xRightEyeCenter - (double) eyeAreaRight.width / 4),
+                                                               (int) (yRightEyeCenter - (double) eyeAreaRight.height / 4), eyeAreaRight.width / 2,
                                                                eyeAreaRight.height / 2);
 
-       cv::resize(
-                       grayImage(leftEyeRect),
-                       leftEye,
-                       leftEye.size());
-       cv::resize(
-                       grayImage(rightEyeRect),
-                       rightEye,
-                       rightEye.size());
+       cv::resize(grayImage(leftEyeRect), leftEye, leftEye.size());
+       cv::resize(grayImage(rightEyeRect), rightEye, rightEye.size());
 }
 
-int FaceEyeCondition::isEyeOpen(const cv::Mateye)
+int FaceEyeCondition::isEyeOpen(const cv::Mat &eye)
 {
        int isOpen = MV_FACE_EYES_CLOSED;
 
@@ -98,12 +76,7 @@ int FaceEyeCondition::isEyeOpen(const cv::Mat& eye)
        std::vector<std::vector<cv::Point> > contours;
        std::vector<cv::Vec4i> hierarchy;
 
-       cv::findContours(
-                       eyeEqualized,
-                       contours,
-                       hierarchy,
-                       CV_RETR_CCOMP,
-                       CV_CHAIN_APPROX_SIMPLE);
+       cv::findContours(eyeEqualized, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
 
        const size_t contoursSize = contours.size();
 
@@ -126,14 +99,11 @@ int FaceEyeCondition::isEyeOpen(const cv::Mat& eye)
                const cv::Rect currentRect = cv::boundingRect(contours[i]);
                const double currentArea = cv::contourArea(contours[i]);
 
-               if (boundThresold.contains(currentRect.br()) &&
-                               boundThresold.contains(currentRect.tl()) &&
-                               currentArea > areaRatio * boundThresold.area() &&
-                               currentRect.width < widthHeightRatio * currentRect.height)
+               if (boundThresold.contains(currentRect.br()) && boundThresold.contains(currentRect.tl()) &&
+                       currentArea > areaRatio * boundThresold.area() && currentRect.width < widthHeightRatio * currentRect.height)
                        isOpen = MV_FACE_EYES_OPEN;
-               else if (boundThresold.contains(currentRect.br()) &&
-                               boundThresold.contains(currentRect.tl()) &&
-                               currentArea > areaSmallRatio * boundThresold.area())
+               else if (boundThresold.contains(currentRect.br()) && boundThresold.contains(currentRect.tl()) &&
+                                currentArea > areaSmallRatio * boundThresold.area())
                        ++rectanglesInsideCount;
        }
 
@@ -143,10 +113,8 @@ int FaceEyeCondition::isEyeOpen(const cv::Mat& eye)
        return isOpen;
 }
 
-int FaceEyeCondition::recognizeEyeCondition(
-               const cv::Mat& grayImage,
-               mv_rectangle_s faceLocation,
-               mv_face_eye_condition_e *eyeCondition)
+int FaceEyeCondition::recognizeEyeCondition(const cv::Mat &grayImage, mv_rectangle_s faceLocation,
+                                                                                       mv_face_eye_condition_e *eyeCondition)
 {
        if (grayImage.empty()) {
                *eyeCondition = MV_FACE_EYES_NOT_FOUND;
@@ -160,8 +128,7 @@ int FaceEyeCondition::recognizeEyeCondition(
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       if (faceLocation.height <= 0 || faceLocation.width <= 0 ||
-               faceLocation.point.x < 0 || faceLocation.point.y < 0 ||
+       if (faceLocation.height <= 0 || faceLocation.width <= 0 || faceLocation.point.x < 0 || faceLocation.point.y < 0 ||
                (faceLocation.point.x + faceLocation.width) > grayImage.cols ||
                (faceLocation.point.y + faceLocation.height) > grayImage.rows) {
                *eyeCondition = MV_FACE_EYES_NOT_FOUND;
index 1cc519f..e104147 100644 (file)
 #include <unistd.h>
 #include <fstream>
 
-namespace MediaVision {
-namespace Face {
-namespace {
-
+namespace MediaVision
+{
+namespace Face
+{
+namespace
+{
 unsigned int DefaultUnisizeWidth = 200;
 unsigned int DefaultUnisizeHeight = 200;
 
-bool isEmptyAlgorithmParam(const std::stringpath)
+bool isEmptyAlgorithmParam(const std::string &path)
 {
        char valid[256] = "";
        std::ifstream ifs;
@@ -56,8 +58,8 @@ bool isEmptyAlgorithmParam(const std::string& path)
        return false;
 }
 
-int CopyOpenCVAlgorithmParameters(const cv::Ptr<cv::face::FaceRecognizer>srcAlg,
-               cv::Ptr<cv::face::FaceRecognizer>& dstAlg)
+int CopyOpenCVAlgorithmParameters(const cv::Ptr<cv::face::FaceRecognizer> &srcAlg,
+                                                                 cv::Ptr<cv::face::FaceRecognizer> &dstAlg)
 {
        char tempPath[1024] = "";
 
@@ -122,9 +124,7 @@ int CopyOpenCVAlgorithmParameters(const cv::Ptr<cv::face::FaceRecognizer>& srcAl
        return MEDIA_VISION_ERROR_NONE;
 }
 
-void ParseOpenCVLabels(
-               const cv::Mat labels,
-               std::set<int>& outLabels)
+void ParseOpenCVLabels(const cv::Mat labels, std::set<int> &outLabels)
 {
        if (!labels.empty()) {
                for (int i = 0; i < labels.rows; ++i)
@@ -134,62 +134,50 @@ void ParseOpenCVLabels(
 
 } /* anonymous namespace */
 
-FaceRecognitionModelConfig::FaceRecognitionModelConfig() :
-               mModelType(MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN),
-               mNumComponents(0),
-               mThreshold(DBL_MAX),
-               mRadius(1),
-               mNeighbors(8),
-               mGridX(8),
-               mGridY(8),
-               mImgWidth(DefaultUnisizeWidth),
-               mImgHeight(DefaultUnisizeHeight)
+FaceRecognitionModelConfig::FaceRecognitionModelConfig()
+               : mModelType(MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN)
+               , mNumComponents(0)
+               , mThreshold(DBL_MAX)
+               , mRadius(1)
+               , mNeighbors(8)
+               , mGridX(8)
+               , mGridY(8)
+               , mImgWidth(DefaultUnisizeWidth)
+               mImgHeight(DefaultUnisizeHeight)
 {
        ; /* NULL */
 }
 
-FaceRecognitionResults::FaceRecognitionResults() :
-               mIsRecognized(false),
-               mFaceLabel(-1),
-               mConfidence(0.0)
+FaceRecognitionResults::FaceRecognitionResults() : mIsRecognized(false), mFaceLabel(-1), mConfidence(0.0)
 {
        ; /* NULL */
 }
 
-bool FaceRecognitionModelConfig::operator!=(
-               const FaceRecognitionModelConfig& other) const
+bool FaceRecognitionModelConfig::operator!=(const FaceRecognitionModelConfig &other) const
 {
-       return mModelType      != other.mModelType     ||
-                       mNumComponents != other.mNumComponents ||
-                       mThreshold     != other.mThreshold     ||
-                       mRadius        != other.mRadius        ||
-                       mNeighbors     != other.mNeighbors     ||
-                       mGridX         != other.mGridX         ||
-                       mGridY         != other.mGridY         ||
-                       mImgWidth      != other.mImgWidth      ||
-                       mImgHeight     != other.mImgHeight;
+       return mModelType != other.mModelType || mNumComponents != other.mNumComponents || mThreshold != other.mThreshold ||
+                  mRadius != other.mRadius || mNeighbors != other.mNeighbors || mGridX != other.mGridX ||
+                  mGridY != other.mGridY || mImgWidth != other.mImgWidth || mImgHeight != other.mImgHeight;
 }
 
-FaceRecognitionModel::FaceRecognitionModel() :
-               m_canRecognize(false),
-               m_recognizer() // The default constructor creates a null Ptr
+FaceRecognitionModel::FaceRecognitionModel()
+               : m_canRecognize(false), m_recognizer() // The default constructor creates a null Ptr
 {
        ; /* NULL */
 }
 
-FaceRecognitionModel::FaceRecognitionModel(const FaceRecognitionModel& origin) :
-               m_canRecognize(origin.m_canRecognize),
-               m_faceSamples(origin.m_faceSamples),
-               m_learnAlgorithmConfig(origin.m_learnAlgorithmConfig),
-               m_recognizer(CreateRecognitionAlgorithm(origin.m_learnAlgorithmConfig)),
-               m_learnedLabels(origin.m_learnedLabels)
+FaceRecognitionModel::FaceRecognitionModel(const FaceRecognitionModel &origin)
+               : m_canRecognize(origin.m_canRecognize)
+               , m_faceSamples(origin.m_faceSamples)
+               , m_learnAlgorithmConfig(origin.m_learnAlgorithmConfig)
+               , m_recognizer(CreateRecognitionAlgorithm(origin.m_learnAlgorithmConfig))
+               m_learnedLabels(origin.m_learnedLabels)
 {
        if (!m_recognizer.empty())
                CopyOpenCVAlgorithmParameters(origin.m_recognizer, m_recognizer);
 }
 
-FaceRecognitionModel& FaceRecognitionModel::operator=(
-               const FaceRecognitionModel& copy)
+FaceRecognitionModel &FaceRecognitionModel::operator=(const FaceRecognitionModel &copy)
 {
        if (this != &copy) {
                m_canRecognize = copy.m_canRecognize;
@@ -210,7 +198,7 @@ FaceRecognitionModel::~FaceRecognitionModel()
        ; /* NULL */
 }
 
-int FaceRecognitionModel::save(const std::stringfileName)
+int FaceRecognitionModel::save(const std::string &fileName)
 {
        if (!m_recognizer.empty()) {
                std::string filePath;
@@ -226,7 +214,7 @@ int FaceRecognitionModel::save(const std::string& fileName)
 
                                return MEDIA_VISION_ERROR_INVALID_PATH;
                        }
-               } catch (const std::out_of_rangee) {
+               } catch (const std::out_of_range &e) {
                        LOGE("Can't save recognition model. Path[%s] doesn't exist.", filePath.c_str());
                        return MEDIA_VISION_ERROR_INVALID_PATH;
                }
@@ -239,17 +227,20 @@ int FaceRecognitionModel::save(const std::string& fileName)
 
                switch (m_learnAlgorithmConfig.mModelType) {
                case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES:
-                       storage << "algorithm" << "Eigenfaces";
+                       storage << "algorithm"
+                                       << "Eigenfaces";
                        storage << "resizeW" << m_learnAlgorithmConfig.mImgWidth;
                        storage << "resizeH" << m_learnAlgorithmConfig.mImgHeight;
                        break;
                case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES:
-                       storage << "algorithm" << "Fisherfaces";
+                       storage << "algorithm"
+                                       << "Fisherfaces";
                        storage << "resizeW" << m_learnAlgorithmConfig.mImgWidth;
                        storage << "resizeH" << m_learnAlgorithmConfig.mImgHeight;
                        break;
                case MEDIA_VISION_FACE_MODEL_TYPE_LBPH:
-                       storage << "algorithm" << "LBPH";
+                       storage << "algorithm"
+                                       << "LBPH";
                        break;
                default:
                        storage.release();
@@ -268,7 +259,7 @@ int FaceRecognitionModel::save(const std::string& fileName)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int FaceRecognitionModel::load(const std::stringfileName)
+int FaceRecognitionModel::load(const std::string &fileName)
 {
        std::string filePath;
 
@@ -304,9 +295,8 @@ int FaceRecognitionModel::load(const std::string& fileName)
                storage["resizeW"] >> tempConfig.mImgWidth;
                storage["resizeH"] >> tempConfig.mImgHeight;
                tempRecognizer->read(storage.root());
-               tempConfig.mModelType =
-                               MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES;
-               cv::face::EigenFaceRecognizer* recognizer = dynamic_cast<cv::face::EigenFaceRecognizer*>(tempRecognizer.get());
+               tempConfig.mModelType = MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES;
+               cv::face::EigenFaceRecognizer *recognizer = dynamic_cast<cv::face::EigenFaceRecognizer *>(tempRecognizer.get());
                if (recognizer != NULL) {
                        tempConfig.mNumComponents = recognizer->getNumComponents();
                        labels = recognizer->getLabels();
@@ -318,9 +308,9 @@ int FaceRecognitionModel::load(const std::string& fileName)
                storage["resizeW"] >> tempConfig.mImgWidth;
                storage["resizeH"] >> tempConfig.mImgHeight;
                tempRecognizer->read(storage.root());
-               tempConfig.mModelType =
-                               MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES;
-               cv::face::FisherFaceRecognizer* recognizer = dynamic_cast<cv::face::FisherFaceRecognizer*>(tempRecognizer.get());
+               tempConfig.mModelType = MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES;
+               cv::face::FisherFaceRecognizer *recognizer =
+                               dynamic_cast<cv::face::FisherFaceRecognizer *>(tempRecognizer.get());
                if (recognizer != NULL) {
                        tempConfig.mNumComponents = recognizer->getNumComponents();
                        labels = recognizer->getLabels();
@@ -330,10 +320,9 @@ int FaceRecognitionModel::load(const std::string& fileName)
        } else if (algName == "LBPH") {
                tempRecognizer = cv::face::LBPHFaceRecognizer::create();
                tempRecognizer->read(storage.root());
-               cv::face::LBPHFaceRecognizer* recognizer = dynamic_cast<cv::face::LBPHFaceRecognizer*>(tempRecognizer.get());
+               cv::face::LBPHFaceRecognizer *recognizer = dynamic_cast<cv::face::LBPHFaceRecognizer *>(tempRecognizer.get());
                if (recognizer != NULL) {
-                       tempConfig.mModelType =
-                                       MEDIA_VISION_FACE_MODEL_TYPE_LBPH;
+                       tempConfig.mModelType = MEDIA_VISION_FACE_MODEL_TYPE_LBPH;
                        tempConfig.mGridX = recognizer->getGridX();
                        tempConfig.mGridY = recognizer->getGridY();
                        tempConfig.mNeighbors = recognizer->getNeighbors();
@@ -345,7 +334,7 @@ int FaceRecognitionModel::load(const std::string& fileName)
        } else {
                tempConfig = FaceRecognitionModelConfig();
                LOGE("Failed to load face recognition model from file. File is in "
-                               "unsupported format");
+                        "unsupported format");
 
                storage.release();
 
@@ -354,8 +343,7 @@ int FaceRecognitionModel::load(const std::string& fileName)
        ParseOpenCVLabels(labels, tempLearnedLabels);
        tempConfig.mThreshold = tempRecognizer->getThreshold();
 
-       LOGD("Recognition model of [%s] type has been loaded from file",
-                       algName.c_str());
+       LOGD("Recognition model of [%s] type has been loaded from file", algName.c_str());
 
        storage.release();
 
@@ -368,14 +356,11 @@ int FaceRecognitionModel::load(const std::string& fileName)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int FaceRecognitionModel::addFaceExample(
-               const cv::Mat& faceImage,
-               int faceLabel)
+int FaceRecognitionModel::addFaceExample(const cv::Mat &faceImage, int faceLabel)
 {
        m_faceSamples[faceLabel].push_back(faceImage);
 
-       LOGD("Added face image example for label %i for recognition model",
-                       faceLabel);
+       LOGD("Added face image example for label %i for recognition model", faceLabel);
 
        return MEDIA_VISION_ERROR_NONE;
 }
@@ -393,31 +378,32 @@ int FaceRecognitionModel::resetFaceExamples(int faceLabel)
 {
        if (1 > m_faceSamples.erase(faceLabel)) {
                LOGD("Failed to remove face image examples for label %i. "
-                               "No such examples", faceLabel);
+                        "No such examples",
+                        faceLabel);
 
                return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
        }
 
        LOGD("Face image examples for label %i have been removed from "
-                       "recognition model", faceLabel);
+                "recognition model",
+                faceLabel);
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-const std::set<int>FaceRecognitionModel::getFaceLabels(void) const
+const std::set<int> &FaceRecognitionModel::getFaceLabels(void) const
 {
        return m_learnedLabels;
 }
 
-int FaceRecognitionModel::learn(const FaceRecognitionModelConfigconfig)
+int FaceRecognitionModel::learn(const FaceRecognitionModelConfig &config)
 {
        /* Check number of classes collected for learning, some algorithms
         * require specific class number constraints. For example, Fisherfaces
         * requires more that 1 class in training set */
-       if (MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == config.mModelType &&
-                       m_faceSamples.size() < 2) {
+       if (MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == config.mModelType && m_faceSamples.size() < 2) {
                LOGE("Can't apply Fisherfaces learning algorithm. It requires at "
-                               "least two classes (face labes) to learn on.");
+                        "least two classes (face labes) to learn on.");
 
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
@@ -429,7 +415,7 @@ int FaceRecognitionModel::learn(const FaceRecognitionModelConfig& config)
                isIncremental = true;
 
        if (MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES == config.mModelType ||
-                       MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == config.mModelType)
+               MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == config.mModelType)
                isUnisize = true;
 
        std::vector<cv::Mat> samples;
@@ -447,16 +433,13 @@ int FaceRecognitionModel::learn(const FaceRecognitionModelConfig& config)
                learnedLabels.insert(it->first);
 
                if (!isUnisize) {
-                       LOGD("%zu examples has been added with label %i",
-                                       it->second.size(), it->first);
+                       LOGD("%zu examples has been added with label %i", it->second.size(), it->first);
                        samples.insert(samples.end(), it->second.begin(), it->second.end());
                } else {
                        for (size_t sampleInd = 0; sampleInd < faceClassSamplesSize; ++sampleInd) {
                                cv::Mat resizedSample;
-                               cv::resize(it->second[sampleInd],
-                                               resizedSample,
-                                               cv::Size(config.mImgWidth, config.mImgHeight),
-                                               0.0, 0.0, cv::INTER_CUBIC);
+                               cv::resize(it->second[sampleInd], resizedSample, cv::Size(config.mImgWidth, config.mImgHeight), 0.0,
+                                                  0.0, cv::INTER_CUBIC);
                                samples.push_back(resizedSample);
                        }
                }
@@ -466,27 +449,25 @@ int FaceRecognitionModel::learn(const FaceRecognitionModelConfig& config)
        const size_t labelsSize = labels.size();
 
        if (0 != samplesSize && samplesSize == labelsSize) {
-               LOGD("Start to learn the model for %zu samples and %zu labels",
-                               samplesSize, labelsSize);
+               LOGD("Start to learn the model for %zu samples and %zu labels", samplesSize, labelsSize);
 
                if (m_learnAlgorithmConfig != config || m_recognizer.empty())
                        m_recognizer = CreateRecognitionAlgorithm(config);
 
                if (m_recognizer.empty()) {
                        LOGE("Can't create recognition algorithm for recognition model. "
-                                       "Configuration is not supported by any of known algorithms.");
+                                "Configuration is not supported by any of known algorithms.");
 
                        return MEDIA_VISION_ERROR_NOT_SUPPORTED;
                }
 
-               isIncremental ? m_recognizer->update(samples, labels) :
-                                               m_recognizer->train(samples, labels);
+               isIncremental ? m_recognizer->update(samples, labels) : m_recognizer->train(samples, labels);
                m_canRecognize = true;
                m_learnedLabels.clear();
                m_learnedLabels = learnedLabels;
        } else {
                LOGE("Can't create recognition algorithm for no examples. Try to add "
-                               "some face examples before learning");
+                        "some face examples before learning");
 
                return MEDIA_VISION_ERROR_NO_DATA;
        }
@@ -498,17 +479,15 @@ int FaceRecognitionModel::learn(const FaceRecognitionModelConfig& config)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int FaceRecognitionModel::recognize(const cv::Mat& image, FaceRecognitionResults& results)
+int FaceRecognitionModel::recognize(const cv::Mat &image, FaceRecognitionResults &results)
 {
        if (!m_recognizer.empty() && m_canRecognize) {
                double absConf = 0.0;
-               cv::Mat predictionImg(m_learnAlgorithmConfig.mImgWidth,
-                               m_learnAlgorithmConfig.mImgHeight, CV_8UC1);
+               cv::Mat predictionImg(m_learnAlgorithmConfig.mImgWidth, m_learnAlgorithmConfig.mImgHeight, CV_8UC1);
 
                if ((MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES == m_learnAlgorithmConfig.mModelType ||
-                               MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == m_learnAlgorithmConfig.mModelType) &&
-                               (image.cols != m_learnAlgorithmConfig.mImgWidth ||
-                               image.rows != m_learnAlgorithmConfig.mImgHeight))
+                        MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == m_learnAlgorithmConfig.mModelType) &&
+                       (image.cols != m_learnAlgorithmConfig.mImgWidth || image.rows != m_learnAlgorithmConfig.mImgHeight))
                        cv::resize(image, predictionImg, predictionImg.size());
                else
                        predictionImg = image;
@@ -545,28 +524,20 @@ int FaceRecognitionModel::recognize(const cv::Mat& image, FaceRecognitionResults
        return MEDIA_VISION_ERROR_NONE;
 }
 
-cv::Ptr<cv::face::FaceRecognizer> FaceRecognitionModel::CreateRecognitionAlgorithm(
-               const FaceRecognitionModelConfig& config)
+cv::Ptr<cv::face::FaceRecognizer>
+FaceRecognitionModel::CreateRecognitionAlgorithm(const FaceRecognitionModelConfig &config)
 {
        cv::Ptr<cv::face::FaceRecognizer> tempRecognizer;
        switch (config.mModelType) {
        case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES:
-               tempRecognizer = cv::face::EigenFaceRecognizer::create(
-                                                                       config.mNumComponents,
-                                                                       config.mThreshold);
+               tempRecognizer = cv::face::EigenFaceRecognizer::create(config.mNumComponents, config.mThreshold);
                break;
        case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES:
-               tempRecognizer = cv::face::FisherFaceRecognizer::create(
-                                                                       config.mNumComponents,
-                                                                       config.mThreshold);
+               tempRecognizer = cv::face::FisherFaceRecognizer::create(config.mNumComponents, config.mThreshold);
                break;
        case MEDIA_VISION_FACE_MODEL_TYPE_LBPH:
-               tempRecognizer = cv::face::LBPHFaceRecognizer::create(
-                                                                       config.mRadius,
-                                                                       config.mNeighbors,
-                                                                       config.mGridX,
-                                                                       config.mGridY,
-                                                                       config.mThreshold);
+               tempRecognizer = cv::face::LBPHFaceRecognizer::create(config.mRadius, config.mNeighbors, config.mGridX,
+                                                                                                                         config.mGridY, config.mThreshold);
                break;
        default:
                LOGE("Unknown FaceRecognition model");
index 9303b16..30cdf64 100644 (file)
 #include <algorithm>
 #include <cmath>
 
-namespace {
-       float FloatEps = 10e-6f;
+namespace
+{
+float FloatEps = 10e-6f;
 } /* anonymous namespace */
 
-namespace cv {
+namespace cv
+{
 FaceTracker::Params::Params()
 {
        mPointsInGrid = 10;
@@ -59,7 +61,7 @@ FaceTracker::Params::Params()
        mPyrMaxLevel = 5;
 }
 
-void FaceTracker::Params::read(const cv::FileNodefn)
+void FaceTracker::Params::read(const cv::FileNode &fn)
 {
        mPointsInGrid = fn["pointsInGrid"];
        int winSizeHeight = fn["windowSizeHeight"];
@@ -68,7 +70,7 @@ void FaceTracker::Params::read(const cv::FileNode& fn)
        mPyrMaxLevel = fn["pyrMaxLevel"];
 }
 
-void FaceTracker::Params::write(cv::FileStoragefs) const
+void FaceTracker::Params::write(cv::FileStorage &fs) const
 {
        fs << "pointsInGrid" << mPointsInGrid;
        fs << "windowSizeHeight" << mWindowSize.height;
@@ -76,15 +78,14 @@ void FaceTracker::Params::write(cv::FileStorage& fs) const
        fs << "pyrMaxLevel" << mPyrMaxLevel;
 }
 
-FaceTracker::FaceTracker(Params paramsIn) :
-       m_termcrit(TermCriteria::COUNT | TermCriteria::EPS, 20, 0.3),
-       m_confidence(0.0)
+FaceTracker::FaceTracker(Params paramsIn)
+               : m_termcrit(TermCriteria::COUNT | TermCriteria::EPS, 20, 0.3), m_confidence(0.0)
 {
        m_params = paramsIn;
        m_isInit = false;
 }
 
-bool FaceTracker::copyTo(FaceTrackercopy) const
+bool FaceTracker::copyTo(FaceTracker &copy) const
 {
        copy.m_isInit = m_isInit;
        copy.m_params = m_params;
@@ -95,21 +96,20 @@ bool FaceTracker::copyTo(FaceTracker& copy) const
        return true;
 }
 
-bool FaceTracker::initImpl(const Mat& image, const Rect2d& boundingBox)
+bool FaceTracker::initImpl(const Mat &image, const Rect2d &boundingBox)
 {
        if (image.empty())
                return false;
 
        image.copyTo(m_image);
-       buildOpticalFlowPyramid(
-                       m_image, m_pyramid, m_params.mWindowSize, m_params.mPyrMaxLevel);
+       buildOpticalFlowPyramid(m_image, m_pyramid, m_params.mWindowSize, m_params.mPyrMaxLevel);
        m_boundingBox = boundingBox;
 
        m_isInit = true;
        return m_isInit;
 }
 
-bool FaceTracker::updateImpl(const Mat& image, Rect2d& boundingBox)
+bool FaceTracker::updateImpl(const Mat &image, Rect2d &boundingBox)
 {
        if (!m_isInit || image.empty())
                return false;
@@ -132,11 +132,11 @@ bool FaceTracker::updateImpl(const Mat& image, Rect2d& boundingBox)
 
        Mat oldImage = m_image;
 
-       Rect2f oldBox = (Rect2f)m_boundingBox;
-       if(!medianFlowImpl(oldImage, image, oldBox))
+       Rect2f oldBox = (Rect2f) m_boundingBox;
+       if (!medianFlowImpl(oldImage, image, oldBox))
                return false;
 
-       boundingBox = (Rect2d)oldBox;
+       boundingBox = (Rect2d) oldBox;
        image.copyTo(m_image);
        m_boundingBox = boundingBox;
        return true;
@@ -157,8 +157,7 @@ Rect_<float> FaceTracker::getLastBoundingBox() const
        return m_boundingBox;
 }
 
-bool FaceTracker::medianFlowImpl(
-               Mat oldGrayImage, Mat newGrayImage, Rect2f& oldBox)
+bool FaceTracker::medianFlowImpl(Mat oldGrayImage, Mat newGrayImage, Rect2f &oldBox)
 {
        std::vector<Point2f> pointsToTrackOld, pointsToTrackNew;
 
@@ -166,29 +165,17 @@ bool FaceTracker::medianFlowImpl(
        const float gridYStep = oldBox.height / m_params.mPointsInGrid;
        for (int i = 0; i < m_params.mPointsInGrid; i++)
                for (int j = 0; j < m_params.mPointsInGrid; j++)
-                       pointsToTrackOld.push_back(
-                                               Point2f(oldBox.x + .5f*gridXStep + 1.f*gridXStep*j,
-                                                               oldBox.y + .5f*gridYStep + 1.f*gridYStep*i));
+                       pointsToTrackOld.push_back(Point2f(oldBox.x + .5f * gridXStep + 1.f * gridXStep * j,
+                                                                                          oldBox.y + .5f * gridYStep + 1.f * gridYStep * i));
 
        std::vector<uchar> status(pointsToTrackOld.size());
        std::vector<float> errors(pointsToTrackOld.size());
 
        std::vector<Mat> tempPyramid;
-       buildOpticalFlowPyramid(
-                                                       newGrayImage,
-                                                       tempPyramid,
-                                                       m_params.mWindowSize,
-                                                       m_params.mPyrMaxLevel);
-
-       calcOpticalFlowPyrLK(m_pyramid,
-                                               tempPyramid,
-                                               pointsToTrackOld,
-                                               pointsToTrackNew,
-                                               status,
-                                               errors,
-                                               m_params.mWindowSize,
-                                               m_params.mPyrMaxLevel,
-                                               m_termcrit);
+       buildOpticalFlowPyramid(newGrayImage, tempPyramid, m_params.mWindowSize, m_params.mPyrMaxLevel);
+
+       calcOpticalFlowPyrLK(m_pyramid, tempPyramid, pointsToTrackOld, pointsToTrackNew, status, errors,
+                                                m_params.mWindowSize, m_params.mPyrMaxLevel, m_termcrit);
 
        std::vector<Point2f> di;
        for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++)
@@ -196,16 +183,9 @@ bool FaceTracker::medianFlowImpl(
                        di.push_back(pointsToTrackNew[idx] - pointsToTrackOld[idx]);
 
        std::vector<bool> filter_status;
-       check_FB(tempPyramid,
-                               pointsToTrackOld,
-                               pointsToTrackNew,
-                               filter_status);
+       check_FB(tempPyramid, pointsToTrackOld, pointsToTrackNew, filter_status);
 
-       check_NCC(oldGrayImage,
-                               newGrayImage,
-                               pointsToTrackOld,
-                               pointsToTrackNew,
-                               filter_status);
+       check_NCC(oldGrayImage, newGrayImage, pointsToTrackOld, pointsToTrackNew, filter_status);
 
        for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++) {
                if (!filter_status[idx]) {
@@ -220,8 +200,7 @@ bool FaceTracker::medianFlowImpl(
                return false;
 
        Point2f mDisplacement;
-       Rect_<float> boxCandidate =
-                       vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement);
+       Rect_<float> boxCandidate = vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement);
 
        std::vector<float> displacements;
        for (size_t idx = 0u; idx < di.size(); idx++) {
@@ -229,8 +208,7 @@ bool FaceTracker::medianFlowImpl(
                displacements.push_back(sqrt(di[idx].ddot(di[idx])));
        }
 
-       m_confidence =
-                               (10.f - getMedian(displacements, (int)displacements.size())) / 10.f;
+       m_confidence = (10.f - getMedian(displacements, (int) displacements.size())) / 10.f;
        if (m_confidence <= 0.f) {
                m_confidence = 0.f;
                return false;
@@ -241,22 +219,18 @@ bool FaceTracker::medianFlowImpl(
        return true;
 }
 
-Rect_<float> FaceTracker::vote(
-               const std::vector<Point2f>& oldPoints,
-               const std::vector<Point2f>& newPoints,
-               const Rect_<float>& oldRect,
-               Point2f& mD)
+Rect_<float> FaceTracker::vote(const std::vector<Point2f> &oldPoints, const std::vector<Point2f> &newPoints,
+                                                          const Rect_<float> &oldRect, Point2f &mD)
 {
        Rect_<float> newRect;
-       Point2d newCenter(oldRect.x + oldRect.width/2.0,
-                                               oldRect.y + oldRect.height/2.0);
+       Point2d newCenter(oldRect.x + oldRect.width / 2.0, oldRect.y + oldRect.height / 2.0);
 
-       int n = (int)oldPoints.size();
-       std::vector<float> buf(std::max(n*(n-1) / 2, 3), 0.f);
+       int n = (int) oldPoints.size();
+       std::vector<float> buf(std::max(n * (n - 1) / 2, 3), 0.f);
 
-       if(oldPoints.size() == 1) {
-               newRect.x = oldRect.x+newPoints[0].x-oldPoints[0].x;
-               newRect.y = oldRect.y+newPoints[0].y-oldPoints[0].y;
+       if (oldPoints.size() == 1) {
+               newRect.x = oldRect.x + newPoints[0].x - oldPoints[0].x;
+               newRect.y = oldRect.y + newPoints[0].y - oldPoints[0].y;
                newRect.width = oldRect.width;
                newRect.height = oldRect.height;
                return newRect;
@@ -264,19 +238,19 @@ Rect_<float> FaceTracker::vote(
 
        float xshift = 0.f;
        float yshift = 0.f;
-       for(int i = 0; i < n; i++)
+       for (int i = 0; i < n; i++)
                buf[i] = newPoints[i].x - oldPoints[i].x;
 
        xshift = getMedian(buf, n);
        newCenter.x += xshift;
-       for(int idx = 0; idx < n; idx++)
+       for (int idx = 0; idx < n; idx++)
                buf[idx] = newPoints[idx].y - oldPoints[idx].y;
 
        yshift = getMedian(buf, n);
        newCenter.y += yshift;
        mD = Point2f(xshift, yshift);
 
-       if(oldPoints.size() == 1) {
+       if (oldPoints.size() == 1) {
                newRect.x = newCenter.x - oldRect.width / 2.0;
                newRect.y = newCenter.y - oldRect.height / 2.0;
                newRect.width = oldRect.width;
@@ -287,7 +261,7 @@ Rect_<float> FaceTracker::vote(
        float nd = 0.f;
        float od = 0.f;
        for (int i = 0, ctr = 0; i < n; i++) {
-               for(int j = 0; j < i; j++) {
+               for (int j = 0; j < i; j++) {
                        nd = l2distance(newPoints[i], newPoints[j]);
                        od = l2distance(oldPoints[i], oldPoints[j]);
                        buf[ctr] = (od == 0.f ? 0.f : nd / od);
@@ -295,25 +269,24 @@ Rect_<float> FaceTracker::vote(
                }
        }
 
-       float scale = getMedian(buf, n*(n-1) / 2);
+       float scale = getMedian(buf, n * (n - 1) / 2);
        newRect.x = newCenter.x - scale * oldRect.width / 2.f;
-       newRect.y = newCenter.y-scale * oldRect.height / 2.f;
+       newRect.y = newCenter.y - scale * oldRect.height / 2.f;
        newRect.width = scale * oldRect.width;
        newRect.height = scale * oldRect.height;
 
        return newRect;
 }
 
-template<typename T>
-T FaceTracker::getMedian(std::vector<T>& values, int size)
+template<typename T> T FaceTracker::getMedian(std::vector<T> &values, int size)
 {
        if (size == -1)
-               size = (int)values.size();
+               size = (int) values.size();
 
        std::vector<T> copy(values.begin(), values.begin() + size);
        std::sort(copy.begin(), copy.end());
-       if(size%2 == 0) {
-               return (copy[size/2-1]+copy[size/2])/((T)2.0);
+       if (size % 2 == 0) {
+               return (copy[size / 2 - 1] + copy[size / 2]) / ((T) 2.0);
        } else {
                return copy[(size - 1) / 2];
        }
@@ -326,13 +299,10 @@ float FaceTracker::l2distance(Point2f p1, Point2f p2)
        return sqrt(dx * dx + dy * dy);
 }
 
-void FaceTracker::check_FB(
-               std::vector<Mat> newPyramid,
-               const std::vector<Point2f>& oldPoints,
-               const std::vector<Point2f>& newPoints,
-               std::vector<bool>& status)
+void FaceTracker::check_FB(std::vector<Mat> newPyramid, const std::vector<Point2f> &oldPoints,
+                                                  const std::vector<Point2f> &newPoints, std::vector<bool> &status)
 {
-       if(status.size() == 0)
+       if (status.size() == 0)
                status = std::vector<bool>(oldPoints.size(), true);
 
        std::vector<uchar> LKstatus(oldPoints.size());
@@ -340,15 +310,8 @@ void FaceTracker::check_FB(
        std::vector<float> FBerror(oldPoints.size());
        std::vector<Point2f> pointsToTrackReprojection;
 
-       calcOpticalFlowPyrLK(newPyramid,
-                                               m_pyramid,
-                                               newPoints,
-                                               pointsToTrackReprojection,
-                                               LKstatus,
-                                               errors,
-                                               m_params.mWindowSize,
-                                               m_params.mPyrMaxLevel,
-                                               m_termcrit);
+       calcOpticalFlowPyrLK(newPyramid, m_pyramid, newPoints, pointsToTrackReprojection, LKstatus, errors,
+                                                m_params.mWindowSize, m_params.mPyrMaxLevel, m_termcrit);
 
        for (size_t idx = 0u; idx < oldPoints.size(); idx++)
                FBerror[idx] = l2distance(oldPoints[idx], pointsToTrackReprojection[idx]);
@@ -358,12 +321,8 @@ void FaceTracker::check_FB(
                status[idx] = (FBerror[idx] < FBerrorMedian);
 }
 
-void FaceTracker::check_NCC(
-               const Mat& oldImage,
-               const Mat& newImage,
-               const std::vector<Point2f>& oldPoints,
-               const std::vector<Point2f>& newPoints,
-               std::vector<bool>& status)
+void FaceTracker::check_NCC(const Mat &oldImage, const Mat &newImage, const std::vector<Point2f> &oldPoints,
+                                                       const std::vector<Point2f> &newPoints, std::vector<bool> &status)
 {
        std::vector<float> NCC(oldPoints.size(), 0.f);
        Size patch(30, 30);
@@ -382,21 +341,20 @@ void FaceTracker::check_NCC(
                const float prod = p1.dot(p2);
                const float sq1 = sqrt(n1 * n1 - s1 * s1 / N);
                const float sq2 = sqrt(n2 * n2 - s2 * s2 / N);
-               NCC[idx] = (sq2 == 0 ? sq1 / std::abs(sq1)
-                                       : (prod - s1 * s2 / N) / sq1 / sq2);
+               NCC[idx] = (sq2 == 0 ? sq1 / std::abs(sq1) : (prod - s1 * s2 / N) / sq1 / sq2);
        }
 
        float median = getMedian(NCC) - FloatEps;
-       for(size_t idx = 0u; idx < oldPoints.size(); idx++)
+       for (size_t idx = 0u; idx < oldPoints.size(); idx++)
                status[idx] = status[idx] && (NCC[idx] > median);
 }
 
-void FaceTracker::read(const cv::FileNodefn)
+void FaceTracker::read(const cv::FileNode &fn)
 {
        m_params.read(fn);
 }
 
-void FaceTracker::read(cv::FileStoragefs)
+void FaceTracker::read(cv::FileStorage &fs)
 {
        read(fs.root());
        float bbX = 0.f;
@@ -411,8 +369,7 @@ void FaceTracker::read(cv::FileStorage& fs)
        fs["lastImage"] >> m_image;
 }
 
-
-void FaceTracker::write(cv::FileStorage& fs) const
+void FaceTracker::write(cv::FileStorage &fs) const
 {
        m_params.write(fs);
        fs << "lastLocationX" << m_boundingBox.x;
index 778a82e..af20651 100644 (file)
 
 #include <unistd.h>
 
-namespace MediaVision {
-namespace Face {
-FaceTrackingResults::FaceTrackingResults() :
-       mIsTracked(false),
-       mConfidence(0.f)
+namespace MediaVision
+{
+namespace Face
+{
+FaceTrackingResults::FaceTrackingResults() : mIsTracked(false), mConfidence(0.f)
 {
        ; /* NULL */
 }
 
-FaceTrackingModel::FaceTrackingModel() :
-       m_canTrack(false),
-       m_tracker(new cv::FaceTracker())
+FaceTrackingModel::FaceTrackingModel() : m_canTrack(false), m_tracker(new cv::FaceTracker())
 {
        ; /* NULL */
 }
 
-FaceTrackingModel::FaceTrackingModel(const FaceTrackingModel& origin) :
-       m_canTrack(origin.m_canTrack),
-       m_tracker(new cv::FaceTracker())
+FaceTrackingModel::FaceTrackingModel(const FaceTrackingModel &origin)
+               : m_canTrack(origin.m_canTrack), m_tracker(new cv::FaceTracker())
 {
        if (!origin.m_tracker.empty())
                origin.m_tracker->copyTo(*(m_tracker.get()));
 }
 
-FaceTrackingModel& FaceTrackingModel::operator=(const FaceTrackingModel& copy)
+FaceTrackingModel &FaceTrackingModel::operator=(const FaceTrackingModel &copy)
 {
        if (this != &copy) {
                m_canTrack = copy.m_canTrack;
@@ -62,7 +59,7 @@ FaceTrackingModel::~FaceTrackingModel()
        ; /* NULL */
 }
 
-int FaceTrackingModel::save(const std::stringfileName)
+int FaceTrackingModel::save(const std::string &fileName)
 {
        if (m_tracker.empty()) {
                LOGE("Can't save tracking model. No tracking algorithm is used");
@@ -82,7 +79,7 @@ int FaceTrackingModel::save(const std::string& fileName)
 
                        return MEDIA_VISION_ERROR_INVALID_PATH;
                }
-       } catch (const std::out_of_rangee) {
+       } catch (const std::out_of_range &e) {
                LOGE("Can't save tracking model. Path[%s] is invalid.", filePath.c_str());
                return MEDIA_VISION_ERROR_INVALID_PATH;
        }
@@ -105,7 +102,7 @@ int FaceTrackingModel::save(const std::string& fileName)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int FaceTrackingModel::load(const std::stringfileName)
+int FaceTrackingModel::load(const std::string &fileName)
 {
        std::string filePath;
 
@@ -135,11 +132,11 @@ int FaceTrackingModel::load(const std::string& fileName)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int FaceTrackingModel::prepare(const cv::Matimage)
+int FaceTrackingModel::prepare(const cv::Mat &image)
 {
        if (m_tracker.empty()) {
                LOGE("Failed to prepare tracking model. No tracking algorithm "
-                               "is available.");
+                        "is available.");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
@@ -156,13 +153,11 @@ int FaceTrackingModel::prepare(const cv::Mat& image)
        return prepare(image, lastBoundingBox);
 }
 
-int FaceTrackingModel::prepare(
-               const cv::Mat& image,
-               const cv::Rect_<float>& boundingBox)
+int FaceTrackingModel::prepare(const cv::Mat &image, const cv::Rect_<float> &boundingBox)
 {
        if (m_tracker.empty()) {
                LOGE("Failed to prepare tracking model. No tracking algorithm "
-                               "is available.");
+                        "is available.");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
@@ -175,13 +170,13 @@ int FaceTrackingModel::prepare(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int FaceTrackingModel::track(const cv::Mat& image, FaceTrackingResults& results)
+int FaceTrackingModel::track(const cv::Mat &image, FaceTrackingResults &results)
 {
        if (!m_tracker.empty() && m_canTrack) {
-               cv::Rect2d faceLocation = (cv::Rect2d)results.mFaceLocation;
+               cv::Rect2d faceLocation = (cv::Rect2d) results.mFaceLocation;
                results.mIsTracked = m_tracker->updateImpl(image, faceLocation);
                results.mConfidence = m_tracker->getLastConfidence();
-               results.mFaceLocation = (cv::Rect2f)faceLocation;
+               results.mFaceLocation = (cv::Rect2f) faceLocation;
        } else {
                LOGE("Attempt to track face with not prepared model");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
index 1d89744..c1af063 100644 (file)
 #include <opencv2/imgproc.hpp>
 #include <opencv2/imgproc/imgproc_c.h>
 
-namespace MediaVision {
-namespace Face {
-RecognitionParams::RecognitionParams(FaceRecognitionModelType algType) :
-       mRecognitionAlgType(algType)
+namespace MediaVision
+{
+namespace Face
+{
+RecognitionParams::RecognitionParams(FaceRecognitionModelType algType) : mRecognitionAlgType(algType)
 {
        ; /* NULL */
 }
 
-RecognitionParams::RecognitionParams() :
-       mRecognitionAlgType(MEDIA_VISION_FACE_MODEL_TYPE_LBPH)
+RecognitionParams::RecognitionParams() : mRecognitionAlgType(MEDIA_VISION_FACE_MODEL_TYPE_LBPH)
 {
        ; /* NULL */
 }
index 7624622..782180d 100644 (file)
@@ -42,8 +42,7 @@ static int check_source_roi_quadrangle(mv_quadrangle_s *roi, mv_source_h source)
                return ret;
        }
        for (int idx = 0; idx < 4; idx++) {
-               if (roi->points[idx].x < 0 || roi->points[idx].y < 0 ||
-                       (unsigned int) roi->points[idx].x > src_w ||
+               if (roi->points[idx].x < 0 || roi->points[idx].y < 0 || (unsigned int) roi->points[idx].x > src_w ||
                        (unsigned int) roi->points[idx].y > src_h) {
                        LOGE("roi is out of area on source");
                        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -74,8 +73,7 @@ static int check_source_roi(const mv_rectangle_s *roi, mv_source_h source)
                LOGE("mv_source_get_height fail");
                return ret;
        }
-       if (roi->point.x < 0 || roi->point.y < 0 ||
-               (unsigned int) (roi->point.x + roi->width) > src_w ||
+       if (roi->point.x < 0 || roi->point.y < 0 || (unsigned int) (roi->point.x + roi->width) > src_w ||
                (unsigned int) (roi->point.y + roi->height) > src_h) {
                LOGE("roi is out of area on source");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -83,11 +81,7 @@ static int check_source_roi(const mv_rectangle_s *roi, mv_source_h source)
        return ret;
 }
 
-int mv_face_detect(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_face_detected_cb detected_cb,
-               void *user_data)
+int mv_face_detect(mv_source_h source, mv_engine_config_h engine_cfg, mv_face_detected_cb detected_cb, void *user_data)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -103,13 +97,8 @@ int mv_face_detect(
        return ret;
 }
 
-int mv_face_recognize(
-               mv_source_h source,
-               mv_face_recognition_model_h recognition_model,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s *face_location,
-               mv_face_recognized_cb recognized_cb,
-               void *user_data)
+int mv_face_recognize(mv_source_h source, mv_face_recognition_model_h recognition_model, mv_engine_config_h engine_cfg,
+                                         mv_rectangle_s *face_location, mv_face_recognized_cb recognized_cb, void *user_data)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -124,25 +113,14 @@ int mv_face_recognize(
                return ret;
        }
 
-       ret = mv_face_recognize_open(
-                                       source,
-                                       recognition_model,
-                                       engine_cfg,
-                                       face_location,
-                                       recognized_cb,
-                                       user_data);
+       ret = mv_face_recognize_open(source, recognition_model, engine_cfg, face_location, recognized_cb, user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_face_track(
-               mv_source_h source,
-               mv_face_tracking_model_h tracking_model,
-               mv_engine_config_h engine_cfg,
-               mv_face_tracked_cb tracked_cb,
-               bool do_learn,
-               void *user_data)
+int mv_face_track(mv_source_h source, mv_face_tracking_model_h tracking_model, mv_engine_config_h engine_cfg,
+                                 mv_face_tracked_cb tracked_cb, bool do_learn, void *user_data)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -153,24 +131,14 @@ int mv_face_track(
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_face_track_open(
-                       source,
-                       tracking_model,
-                       engine_cfg,
-                       tracked_cb,
-                       do_learn,
-                       user_data);
+       ret = mv_face_track_open(source, tracking_model, engine_cfg, tracked_cb, do_learn, user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_face_eye_condition_recognize(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s face_location,
-               mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
-               void *user_data)
+int mv_face_eye_condition_recognize(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s face_location,
+                                                                       mv_face_eye_condition_recognized_cb eye_condition_recognized_cb, void *user_data)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -180,23 +148,16 @@ int mv_face_eye_condition_recognize(
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_face_eye_condition_recognize_open(
-                               source,
-                               engine_cfg,
-                               face_location,
-                               eye_condition_recognized_cb,
-                               user_data);
+       ret = mv_face_eye_condition_recognize_open(source, engine_cfg, face_location, eye_condition_recognized_cb,
+                                                                                          user_data);
 
-MEDIA_VISION_FUNCTION_LEAVE();
-return ret;
+       MEDIA_VISION_FUNCTION_LEAVE();
+       return ret;
 }
 
-int mv_face_facial_expression_recognize(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s face_location,
-               mv_face_facial_expression_recognized_cb expression_recognized_cb,
-               void *user_data)
+int mv_face_facial_expression_recognize(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s face_location,
+                                                                               mv_face_facial_expression_recognized_cb expression_recognized_cb,
+                                                                               void *user_data)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -210,19 +171,14 @@ int mv_face_facial_expression_recognize(
                return ret;
        }
 
-       ret = mv_face_facial_expression_recognize_open(
-                                       source,
-                                       engine_cfg,
-                                       face_location,
-                                       expression_recognized_cb,
-                                       user_data);
+       ret = mv_face_facial_expression_recognize_open(source, engine_cfg, face_location, expression_recognized_cb,
+                                                                                                  user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_face_recognition_model_create(
-               mv_face_recognition_model_h *recognition_model)
+int mv_face_recognition_model_create(mv_face_recognition_model_h *recognition_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(recognition_model);
@@ -237,8 +193,7 @@ int mv_face_recognition_model_create(
        return ret;
 }
 
-int mv_face_recognition_model_destroy(
-               mv_face_recognition_model_h recognition_model)
+int mv_face_recognition_model_destroy(mv_face_recognition_model_h recognition_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(recognition_model);
@@ -253,9 +208,7 @@ int mv_face_recognition_model_destroy(
        return ret;
 }
 
-int mv_face_recognition_model_clone(
-               mv_face_recognition_model_h src,
-               mv_face_recognition_model_h *dst)
+int mv_face_recognition_model_clone(mv_face_recognition_model_h src, mv_face_recognition_model_h *dst)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(src);
@@ -271,9 +224,7 @@ int mv_face_recognition_model_clone(
        return ret;
 }
 
-int mv_face_recognition_model_save(
-               const char *file_name,
-               mv_face_recognition_model_h recognition_model)
+int mv_face_recognition_model_save(const char *file_name, mv_face_recognition_model_h recognition_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(recognition_model);
@@ -287,17 +238,13 @@ int mv_face_recognition_model_save(
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_face_recognition_model_save_open(
-                                       file_name,
-                                       recognition_model);
+       ret = mv_face_recognition_model_save_open(file_name, recognition_model);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_face_recognition_model_load(
-               const char *file_name,
-               mv_face_recognition_model_h *recognition_model)
+int mv_face_recognition_model_load(const char *file_name, mv_face_recognition_model_h *recognition_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(recognition_model);
@@ -311,19 +258,14 @@ int mv_face_recognition_model_load(
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_face_recognition_model_load_open(
-                                       file_name,
-                                       recognition_model);
+       ret = mv_face_recognition_model_load_open(file_name, recognition_model);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_face_recognition_model_add(
-               const mv_source_h source,
-               mv_face_recognition_model_h recognition_model,
-               const mv_rectangle_s *example_location,
-               int face_label)
+int mv_face_recognition_model_add(const mv_source_h source, mv_face_recognition_model_h recognition_model,
+                                                                 const mv_rectangle_s *example_location, int face_label)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -337,19 +279,13 @@ int mv_face_recognition_model_add(
                return ret;
        }
 
-       ret = mv_face_recognition_model_add_open(
-                                       source,
-                                       recognition_model,
-                                       example_location,
-                                       face_label);
+       ret = mv_face_recognition_model_add_open(source, recognition_model, example_location, face_label);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_face_recognition_model_reset(
-               mv_face_recognition_model_h recognition_model,
-               int *face_label)
+int mv_face_recognition_model_reset(mv_face_recognition_model_h recognition_model, int *face_label)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(recognition_model);
@@ -358,17 +294,13 @@ int mv_face_recognition_model_reset(
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_face_recognition_model_reset_open(
-                                       recognition_model,
-                                       face_label);
+       ret = mv_face_recognition_model_reset_open(recognition_model, face_label);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_face_recognition_model_learn(
-               mv_engine_config_h engine_cfg,
-               mv_face_recognition_model_h recognition_model)
+int mv_face_recognition_model_learn(mv_engine_config_h engine_cfg, mv_face_recognition_model_h recognition_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(recognition_model);
@@ -383,10 +315,8 @@ int mv_face_recognition_model_learn(
        return ret;
 }
 
-int mv_face_recognition_model_query_labels(
-               mv_face_recognition_model_h recognition_model,
-               int **labels,
-               unsigned int *number_of_labels)
+int mv_face_recognition_model_query_labels(mv_face_recognition_model_h recognition_model, int **labels,
+                                                                                  unsigned int *number_of_labels)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(recognition_model);
@@ -403,8 +333,7 @@ int mv_face_recognition_model_query_labels(
        return ret;
 }
 
-int mv_face_tracking_model_create(
-               mv_face_tracking_model_h *tracking_model)
+int mv_face_tracking_model_create(mv_face_tracking_model_h *tracking_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(tracking_model);
@@ -419,8 +348,7 @@ int mv_face_tracking_model_create(
        return ret;
 }
 
-int mv_face_tracking_model_destroy(
-               mv_face_tracking_model_h tracking_model)
+int mv_face_tracking_model_destroy(mv_face_tracking_model_h tracking_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(tracking_model);
@@ -435,11 +363,8 @@ int mv_face_tracking_model_destroy(
        return ret;
 }
 
-int mv_face_tracking_model_prepare(
-               mv_face_tracking_model_h tracking_model,
-               mv_engine_config_h engine_cfg,
-               mv_source_h source,
-               mv_quadrangle_s *location)
+int mv_face_tracking_model_prepare(mv_face_tracking_model_h tracking_model, mv_engine_config_h engine_cfg,
+                                                                  mv_source_h source, mv_quadrangle_s *location)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(tracking_model);
@@ -453,16 +378,13 @@ int mv_face_tracking_model_prepare(
                return ret;
        }
 
-       ret = mv_face_tracking_model_prepare_open(
-                               tracking_model, engine_cfg, source, location);
+       ret = mv_face_tracking_model_prepare_open(tracking_model, engine_cfg, source, location);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_face_tracking_model_clone(
-               mv_face_tracking_model_h src,
-               mv_face_tracking_model_h *dst)
+int mv_face_tracking_model_clone(mv_face_tracking_model_h src, mv_face_tracking_model_h *dst)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(src);
@@ -478,9 +400,7 @@ int mv_face_tracking_model_clone(
        return ret;
 }
 
-int mv_face_tracking_model_save(
-               const char *file_name,
-               mv_face_tracking_model_h tracking_model)
+int mv_face_tracking_model_save(const char *file_name, mv_face_tracking_model_h tracking_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(tracking_model);
@@ -494,17 +414,13 @@ int mv_face_tracking_model_save(
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_face_tracking_model_save_open(
-                                       file_name,
-                                       tracking_model);
+       ret = mv_face_tracking_model_save_open(file_name, tracking_model);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_face_tracking_model_load(
-               const char *file_name,
-               mv_face_tracking_model_h *tracking_model)
+int mv_face_tracking_model_load(const char *file_name, mv_face_tracking_model_h *tracking_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(tracking_model);
@@ -518,9 +434,7 @@ int mv_face_tracking_model_load(
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_face_tracking_model_load_open(
-                                       file_name,
-                                       tracking_model);
+       ret = mv_face_tracking_model_load_open(file_name, tracking_model);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
index ce297d3..30dda5f 100644 (file)
@@ -35,9 +35,7 @@ using namespace ::MediaVision::Face;
 
 static const RecognitionParams DEFAULT_RECOGNITION_PARAMS = RecognitionParams();
 
-static void extractRecognitionParams(
-               mv_engine_config_h engine_cfg,
-               RecognitionParams& recognitionParams)
+static void extractRecognitionParams(mv_engine_config_h engine_cfg, RecognitionParams &recognitionParams)
 {
        mv_engine_config_h working_cfg = NULL;
 
@@ -50,23 +48,18 @@ static void extractRecognitionParams(
        }
 
        int algType = 0;
-       mv_engine_config_get_int_attribute_c(
-                       working_cfg,
-                       "MV_FACE_RECOGNITION_MODEL_TYPE",
-                       &algType);
+       mv_engine_config_get_int_attribute_c(working_cfg, "MV_FACE_RECOGNITION_MODEL_TYPE", &algType);
 
        if (0 < algType && 4 > algType)
-               recognitionParams.mRecognitionAlgType =
-                               (FaceRecognitionModelType)algType;
+               recognitionParams.mRecognitionAlgType = (FaceRecognitionModelType) algType;
        else
-               recognitionParams.mRecognitionAlgType =
-                               DEFAULT_RECOGNITION_PARAMS.mRecognitionAlgType;
+               recognitionParams.mRecognitionAlgType = DEFAULT_RECOGNITION_PARAMS.mRecognitionAlgType;
 
        if (NULL == engine_cfg)
                mv_destroy_engine_config(working_cfg);
 }
 
-inline void convertRectCV2MV(const cv::Rect& src, mv_rectangle_s& dst)
+inline void convertRectCV2MV(const cv::Rect &src, mv_rectangle_s &dst)
 {
        dst.point.x = src.x;
        dst.point.y = src.y;
@@ -74,11 +67,8 @@ inline void convertRectCV2MV(const cv::Rect& src, mv_rectangle_s& dst)
        dst.height = src.height;
 }
 
-int mv_face_detect_open(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_face_detected_cb detected_cb,
-               void *user_data)
+int mv_face_detect_open(mv_source_h source, mv_engine_config_h engine_cfg, mv_face_detected_cb detected_cb,
+                                               void *user_data)
 {
        cv::Mat image;
 
@@ -91,17 +81,14 @@ int mv_face_detect_open(
        /* default path */
        cv::Size minSize(0, 0);
        cv::Rect roi(-1, -1, -1, -1);
-       std::string haarcascadeFilePathStr =
-                       "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml";
+       std::string haarcascadeFilePathStr = "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml";
        if (engine_cfg) {
                int minWidth, minHeight;
 
                /* set face detection model */
                char *haarcascadeFilepath;
-               error = mv_engine_config_get_string_attribute_c(
-                               engine_cfg,
-                               "MV_FACE_DETECTION_MODEL_FILE_PATH",
-                               &haarcascadeFilepath);
+               error = mv_engine_config_get_string_attribute_c(engine_cfg, "MV_FACE_DETECTION_MODEL_FILE_PATH",
+                                                                                                               &haarcascadeFilepath);
                if (error == MEDIA_VISION_ERROR_NONE) {
                        LOGI("Haarcascade file was set as default");
                        haarcascadeFilePathStr = std::string(haarcascadeFilepath);
@@ -110,61 +97,49 @@ int mv_face_detect_open(
                        haarcascadeFilepath = NULL;
                } else {
                        LOGE("Error occurred during face detection haarcascade file receiving."
-                               " (%i)", error);
+                                " (%i)",
+                                error);
                }
 
                /* Ser roi to be detected */
-               error = mv_engine_config_get_int_attribute_c(
-                               engine_cfg,
-                               MV_FACE_DETECTION_ROI_X,
-                               &roi.x);
+               error = mv_engine_config_get_int_attribute_c(engine_cfg, MV_FACE_DETECTION_ROI_X, &roi.x);
                if (error != MEDIA_VISION_ERROR_NONE)
                        LOGE("Error occurred during face detection roi (x) receiving."
-                               " (%i)", error);
+                                " (%i)",
+                                error);
 
-               error = mv_engine_config_get_int_attribute_c(
-                                       engine_cfg,
-                                       MV_FACE_DETECTION_ROI_Y,
-                                       &roi.y);
+               error = mv_engine_config_get_int_attribute_c(engine_cfg, MV_FACE_DETECTION_ROI_Y, &roi.y);
                if (error != MEDIA_VISION_ERROR_NONE)
                        LOGE("Error occurred during face detection roi (y) receiving."
-                                       " (%i)", error);
+                                " (%i)",
+                                error);
 
-               error = mv_engine_config_get_int_attribute_c(
-                                       engine_cfg,
-                                       MV_FACE_DETECTION_ROI_WIDTH,
-                                       &roi.width);
+               error = mv_engine_config_get_int_attribute_c(engine_cfg, MV_FACE_DETECTION_ROI_WIDTH, &roi.width);
                if (error != MEDIA_VISION_ERROR_NONE)
                        LOGE("Error occurred during face detection roi (width) receiving."
-                                       " (%i)", error);
+                                " (%i)",
+                                error);
 
-               error = mv_engine_config_get_int_attribute_c(
-                                       engine_cfg,
-                                       MV_FACE_DETECTION_ROI_HEIGHT,
-                                       &roi.height);
+               error = mv_engine_config_get_int_attribute_c(engine_cfg, MV_FACE_DETECTION_ROI_HEIGHT, &roi.height);
                if (error != MEDIA_VISION_ERROR_NONE)
                        LOGE("Error occurred during face detection roi (height) receiving."
-                                       " (%i)", error);
+                                " (%i)",
+                                error);
 
                /* Set minimum size to be detected */
-               error = mv_engine_config_get_int_attribute_c(
-                                       engine_cfg,
-                                       MV_FACE_DETECTION_MIN_SIZE_WIDTH,
-                                       &minWidth);
+               error = mv_engine_config_get_int_attribute_c(engine_cfg, MV_FACE_DETECTION_MIN_SIZE_WIDTH, &minWidth);
                if (error != MEDIA_VISION_ERROR_NONE)
                        LOGE("Error occurred during face detection minimum width receiving."
-                                       " (%i)", error);
+                                " (%i)",
+                                error);
 
-               error = mv_engine_config_get_int_attribute_c(
-                                       engine_cfg,
-                                       MV_FACE_DETECTION_MIN_SIZE_HEIGHT,
-                                       &minHeight);
+               error = mv_engine_config_get_int_attribute_c(engine_cfg, MV_FACE_DETECTION_MIN_SIZE_HEIGHT, &minHeight);
                if (error != MEDIA_VISION_ERROR_NONE)
                        LOGE("Error occurred during face detection minimum height receiving."
-                                       " (%i)", error);
+                                " (%i)",
+                                error);
 
-               if (minWidth > 0 && minHeight > 0 &&
-                       minWidth <= image.cols && minHeight <= image.rows) {
+               if (minWidth > 0 && minHeight > 0 && minWidth <= image.cols && minHeight <= image.rows) {
                        minSize.width = minWidth;
                        minSize.height = minHeight;
                }
@@ -199,13 +174,9 @@ int mv_face_detect_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_face_recognize_open(
-               mv_source_h source,
-               mv_face_recognition_model_h recognition_model,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s *face_location,
-               mv_face_recognized_cb recognized_cb,
-               void *user_data)
+int mv_face_recognize_open(mv_source_h source, mv_face_recognition_model_h recognition_model,
+                                                  mv_engine_config_h engine_cfg, mv_rectangle_s *face_location,
+                                                  mv_face_recognized_cb recognized_cb, void *user_data)
 {
        if (!source) {
                LOGE("Can't recognize for the NULL Media Vision source handle");
@@ -214,7 +185,7 @@ int mv_face_recognize_open(
 
        if (!recognized_cb) {
                LOGE("Recognition failed. Can't output recognition results without "
-                               "callback function");
+                        "callback function");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
@@ -223,7 +194,7 @@ int mv_face_recognize_open(
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel*>(recognition_model);
+       FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel *>(recognition_model);
 
        cv::Mat grayImage;
        int ret = MediaVision::Common::convertSourceMV2GrayCV(source, grayImage);
@@ -257,14 +228,7 @@ int mv_face_recognize_open(
        }
 
        if (!results.mIsRecognized) {
-               recognized_cb(
-                               source,
-                               recognition_model,
-                               engine_cfg,
-                               NULL,
-                               NULL,
-                               0.0,
-                               user_data);
+               recognized_cb(source, recognition_model, engine_cfg, NULL, NULL, 0.0, user_data);
        } else {
                mv_rectangle_s location;
                location.point.x = results.mFaceLocation.x;
@@ -277,14 +241,8 @@ int mv_face_recognize_open(
                        location.point.y += face_location->point.y;
                }
 
-               recognized_cb(
-                               source,
-                               recognition_model,
-                               engine_cfg,
-                               &location,
-                               &(results.mFaceLabel),
-                               results.mConfidence,
-                               user_data);
+               recognized_cb(source, recognition_model, engine_cfg, &location, &(results.mFaceLabel), results.mConfidence,
+                                         user_data);
        }
 
        LOGD("Face recognition is finished");
@@ -292,13 +250,8 @@ int mv_face_recognize_open(
        return ret;
 }
 
-int mv_face_track_open(
-               mv_source_h source,
-               mv_face_tracking_model_h tracking_model,
-               mv_engine_config_h engine_cfg,
-               mv_face_tracked_cb tracked_cb,
-               bool /*do_learn*/,
-               void *user_data)
+int mv_face_track_open(mv_source_h source, mv_face_tracking_model_h tracking_model, mv_engine_config_h engine_cfg,
+                                          mv_face_tracked_cb tracked_cb, bool /*do_learn*/, void *user_data)
 {
        if (!source) {
                LOGE("Can't track for the NULL Media Vision source handle");
@@ -307,7 +260,7 @@ int mv_face_track_open(
 
        if (!tracked_cb) {
                LOGE("Tracking failed. Can't output tracking results without "
-                               "callback function");
+                        "callback function");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
@@ -316,8 +269,7 @@ int mv_face_track_open(
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       FaceTrackingModel *pTrackModel =
-                       static_cast<FaceTrackingModel*>(tracking_model);
+       FaceTrackingModel *pTrackModel = static_cast<FaceTrackingModel *>(tracking_model);
 
        cv::Mat grayImage;
        int ret = MediaVision::Common::convertSourceMV2GrayCV(source, grayImage);
@@ -332,7 +284,7 @@ int mv_face_track_open(
 
        if (MEDIA_VISION_ERROR_NONE != ret) {
                LOGE("Tracking can't be performed. "
-                               "Check that tracking model is prepared when tracking starts");
+                        "Check that tracking model is prepared when tracking starts");
                return ret;
        }
 
@@ -340,42 +292,24 @@ int mv_face_track_open(
                mv_quadrangle_s predictedLocation;
                predictedLocation.points[0].x = results.mFaceLocation.x;
                predictedLocation.points[0].y = results.mFaceLocation.y;
-               predictedLocation.points[1].x =
-                               results.mFaceLocation.x + results.mFaceLocation.width;
+               predictedLocation.points[1].x = results.mFaceLocation.x + results.mFaceLocation.width;
                predictedLocation.points[1].y = results.mFaceLocation.y;
-               predictedLocation.points[2].x =
-                               results.mFaceLocation.x + results.mFaceLocation.width;
-               predictedLocation.points[2].y =
-                               results.mFaceLocation.y + results.mFaceLocation.height;
+               predictedLocation.points[2].x = results.mFaceLocation.x + results.mFaceLocation.width;
+               predictedLocation.points[2].y = results.mFaceLocation.y + results.mFaceLocation.height;
                predictedLocation.points[3].x = results.mFaceLocation.x;
-               predictedLocation.points[3].y =
-                               results.mFaceLocation.y + results.mFaceLocation.height;
-               tracked_cb(
-                               source,
-                               tracking_model,
-                               engine_cfg,
-                               &predictedLocation,
-                               results.mConfidence,
-                               user_data);
+               predictedLocation.points[3].y = results.mFaceLocation.y + results.mFaceLocation.height;
+               tracked_cb(source, tracking_model, engine_cfg, &predictedLocation, results.mConfidence, user_data);
        } else {
-               tracked_cb(
-                               source,
-                               tracking_model,
-                               engine_cfg,
-                               NULL,
-                               results.mConfidence,
-                               user_data);
+               tracked_cb(source, tracking_model, engine_cfg, NULL, results.mConfidence, user_data);
        }
 
        return ret;
 }
 
-int mv_face_eye_condition_recognize_open(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s face_location,
-               mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
-               void *user_data)
+int mv_face_eye_condition_recognize_open(mv_source_h source, mv_engine_config_h engine_cfg,
+                                                                                mv_rectangle_s face_location,
+                                                                                mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
+                                                                                void *user_data)
 {
        cv::Mat image;
 
@@ -386,32 +320,22 @@ int mv_face_eye_condition_recognize_open(
        }
 
        mv_face_eye_condition_e eye_condition;
-       error = FaceEyeCondition::recognizeEyeCondition(
-                                                                       image,
-                                                                       face_location,
-                                                                       &eye_condition);
+       error = FaceEyeCondition::recognizeEyeCondition(image, face_location, &eye_condition);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                LOGE("eye contition recognition failed");
                return error;
        }
 
-       eye_condition_recognized_cb(
-                               source,
-                               engine_cfg,
-                               face_location,
-                               eye_condition,
-                               user_data);
+       eye_condition_recognized_cb(source, engine_cfg, face_location, eye_condition, user_data);
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_face_facial_expression_recognize_open(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s face_location,
-               mv_face_facial_expression_recognized_cb expression_recognized_cb,
-               void *user_data)
+int mv_face_facial_expression_recognize_open(mv_source_h source, mv_engine_config_h engine_cfg,
+                                                                                        mv_rectangle_s face_location,
+                                                                                        mv_face_facial_expression_recognized_cb expression_recognized_cb,
+                                                                                        void *user_data)
 {
        cv::Mat image;
 
@@ -422,34 +346,26 @@ int mv_face_facial_expression_recognize_open(
        }
 
        mv_face_facial_expression_e expression;
-       error = FaceExpressionRecognizer::recognizeFaceExpression(
-                                       image, face_location, &expression);
+       error = FaceExpressionRecognizer::recognizeFaceExpression(image, face_location, &expression);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                LOGE("eye contition recognition failed");
                return error;
        }
 
-       expression_recognized_cb(
-                               source,
-                               engine_cfg,
-                               face_location,
-                               expression,
-                               user_data);
+       expression_recognized_cb(source, engine_cfg, face_location, expression, user_data);
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_face_recognition_model_create_open(
-               mv_face_recognition_model_h *recognition_model)
+int mv_face_recognition_model_create_open(mv_face_recognition_model_h *recognition_model)
 {
        if (recognition_model == NULL) {
                LOGE("Recognition model can't be created because handle pointer is NULL");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       (*recognition_model) =
-                       static_cast<mv_face_recognition_model_h>(new (std::nothrow)FaceRecognitionModel());
+       (*recognition_model) = static_cast<mv_face_recognition_model_h>(new (std::nothrow) FaceRecognitionModel());
 
        if (*recognition_model == NULL) {
                LOGE("Failed to create media vision recognition model");
@@ -461,8 +377,7 @@ int mv_face_recognition_model_create_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_face_recognition_model_destroy_open(
-               mv_face_recognition_model_h recognition_model)
+int mv_face_recognition_model_destroy_open(mv_face_recognition_model_h recognition_model)
 {
        if (!recognition_model) {
                LOGE("Recognition model can't be destroyed because handle is NULL");
@@ -470,23 +385,21 @@ int mv_face_recognition_model_destroy_open(
        }
 
        LOGD("Destroying media vision recognition model [%p]", recognition_model);
-       delete static_cast<FaceRecognitionModel*>(recognition_model);
+       delete static_cast<FaceRecognitionModel *>(recognition_model);
        LOGD("Media vision recognition model has been destroyed");
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_face_recognition_model_clone_open(
-               mv_face_recognition_model_h src,
-               mv_face_recognition_model_h *dst)
+int mv_face_recognition_model_clone_open(mv_face_recognition_model_h src, mv_face_recognition_model_h *dst)
 {
        if (!src || !dst) {
                LOGE("Can't clone recognition model. Both source and destination"
-                               "recognition model handles has to be not NULL");
+                        "recognition model handles has to be not NULL");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       (*dst) = static_cast<mv_face_recognition_model_h>(new (std::nothrow)FaceRecognitionModel());
+       (*dst) = static_cast<mv_face_recognition_model_h>(new (std::nothrow) FaceRecognitionModel());
 
        if (*dst == NULL) {
                LOGE("Failed to create media vision recognition model");
@@ -495,8 +408,8 @@ int mv_face_recognition_model_clone_open(
 
        LOGD("Recognition model [%p] has been created", *dst);
 
-       const FaceRecognitionModel *pSrcModel = static_cast<FaceRecognitionModel*>(src);
-       FaceRecognitionModel *pDstModel = static_cast<FaceRecognitionModel*>(*dst);
+       const FaceRecognitionModel *pSrcModel = static_cast<FaceRecognitionModel *>(src);
+       FaceRecognitionModel *pDstModel = static_cast<FaceRecognitionModel *>(*dst);
 
        *pDstModel = *pSrcModel;
 
@@ -504,9 +417,7 @@ int mv_face_recognition_model_clone_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_face_recognition_model_save_open(
-               const char *file_name,
-               mv_face_recognition_model_h recognition_model)
+int mv_face_recognition_model_save_open(const char *file_name, mv_face_recognition_model_h recognition_model)
 {
        if (!recognition_model) {
                LOGE("Can't save recognition model to the file. Handle has to be not NULL");
@@ -518,7 +429,7 @@ int mv_face_recognition_model_save_open(
                return MEDIA_VISION_ERROR_INVALID_PATH;
        }
 
-       FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel*>(recognition_model);
+       FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel *>(recognition_model);
        const int ret = pRecModel->save(std::string(file_name));
 
        if (MEDIA_VISION_ERROR_NONE != ret) {
@@ -530,36 +441,32 @@ int mv_face_recognition_model_save_open(
        return ret;
 }
 
-int mv_face_recognition_model_load_open(
-               const char *file_name,
-               mv_face_recognition_model_h *recognition_model)
+int mv_face_recognition_model_load_open(const char *file_name, mv_face_recognition_model_h *recognition_model)
 {
        if (!recognition_model) {
                LOGE("Can't load recognition model from the file. "
-                               "Handle has to be not NULL");
+                        "Handle has to be not NULL");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
        if (NULL == file_name) {
                LOGE("Can't load recognition model from the file. "
-                               "File name has to be specified");
+                        "File name has to be specified");
                return MEDIA_VISION_ERROR_INVALID_PATH;
        }
 
-       (*recognition_model) =
-               static_cast<mv_face_recognition_model_h>(new (std::nothrow)FaceRecognitionModel());
+       (*recognition_model) = static_cast<mv_face_recognition_model_h>(new (std::nothrow) FaceRecognitionModel());
 
        if (*recognition_model == NULL) {
                LOGE("Failed to create media vision recognition model");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
        }
 
-       FaceRecognitionModel *pRecModel =
-                       static_cast<FaceRecognitionModel*>(*recognition_model);
+       FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel *>(*recognition_model);
 
        if (!pRecModel) {
                LOGE("Loading of the face recognition model from file failed. "
-                               "Incorrect Media Vision Face recognition model handle is used");
+                        "Incorrect Media Vision Face recognition model handle is used");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
@@ -574,26 +481,22 @@ int mv_face_recognition_model_load_open(
        return ret;
 }
 
-int mv_face_recognition_model_add_open(
-               const mv_source_h source,
-               mv_face_recognition_model_h recognition_model,
-               const mv_rectangle_s *example_location,
-               int face_label)
+int mv_face_recognition_model_add_open(const mv_source_h source, mv_face_recognition_model_h recognition_model,
+                                                                          const mv_rectangle_s *example_location, int face_label)
 {
        if (!source) {
                LOGE("Can't add face image example for recognition model. "
-                               "Media Vision source handle has to be not NULL");
+                        "Media Vision source handle has to be not NULL");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
        if (!recognition_model) {
                LOGE("Can't add face image example for recognition model. "
-                               "Model handle has to be not NULL");
+                        "Model handle has to be not NULL");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       FaceRecognitionModel *pRecModel =
-                       static_cast<FaceRecognitionModel*>(recognition_model);
+       FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel *>(recognition_model);
 
        cv::Mat image;
        int ret = MediaVision::Common::convertSourceMV2GrayCV(source, image);
@@ -619,25 +522,21 @@ int mv_face_recognition_model_add_open(
        }
 
        LOGD("The face image example labeled %i has been added "
-                       "to the Media Vision recognition model", face_label);
+                "to the Media Vision recognition model",
+                face_label);
        return ret;
 }
 
-int mv_face_recognition_model_reset_open(
-               mv_face_recognition_model_h recognition_model,
-               const int *face_label)
+int mv_face_recognition_model_reset_open(mv_face_recognition_model_h recognition_model, const int *face_label)
 {
        if (!recognition_model) {
                LOGE("Can't reset positive examples for NULL recognition model");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       FaceRecognitionModel *pRecModel =
-                       static_cast<FaceRecognitionModel*>(recognition_model);
+       FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel *>(recognition_model);
 
-       int ret = (NULL != face_label ?
-                               pRecModel->resetFaceExamples(*face_label) :
-                               pRecModel->resetFaceExamples());
+       int ret = (NULL != face_label ? pRecModel->resetFaceExamples(*face_label) : pRecModel->resetFaceExamples());
 
        if (MEDIA_VISION_ERROR_NONE != ret) {
                LOGE("Error occurred when reset positive examples of the recognition model");
@@ -648,17 +547,14 @@ int mv_face_recognition_model_reset_open(
        return ret;
 }
 
-int mv_face_recognition_model_learn_open(
-               mv_engine_config_h engine_cfg,
-               mv_face_recognition_model_h recognition_model)
+int mv_face_recognition_model_learn_open(mv_engine_config_h engine_cfg, mv_face_recognition_model_h recognition_model)
 {
        if (!recognition_model) {
                LOGE("Can't learn recognition model. Model handle has to be not NULL");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       FaceRecognitionModel *pRecModel =
-                       static_cast<FaceRecognitionModel*>(recognition_model);
+       FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel *>(recognition_model);
 
        RecognitionParams recognitionParams;
        extractRecognitionParams(engine_cfg, recognitionParams);
@@ -676,10 +572,8 @@ int mv_face_recognition_model_learn_open(
        return ret;
 }
 
-int mv_face_recognition_model_query_labels_open(
-               mv_face_recognition_model_h recognition_model,
-               int **labels,
-               unsigned int *number_of_labels)
+int mv_face_recognition_model_query_labels_open(mv_face_recognition_model_h recognition_model, int **labels,
+                                                                                               unsigned int *number_of_labels)
 {
        if (!recognition_model) {
                LOGE("Can't get list of labels for NULL recognition model");
@@ -688,19 +582,18 @@ int mv_face_recognition_model_query_labels_open(
 
        if (NULL == labels || NULL == number_of_labels) {
                LOGE("Can't get list of labels. labels and number_of_labels out "
-                               "parameters both has to be not NULL.");
+                        "parameters both has to be not NULL.");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       FaceRecognitionModel *pRecModel =
-                               static_cast<FaceRecognitionModel*>(recognition_model);
+       FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel *>(recognition_model);
 
-       const std::set<int>learnedLabels = pRecModel->getFaceLabels();
+       const std::set<int> &learnedLabels = pRecModel->getFaceLabels();
        auto _number_of_labels = learnedLabels.size();
        int *_pLabels = NULL;
        if (_number_of_labels) {
-               _pLabels = (int*)malloc(sizeof(int) * (_number_of_labels));
-               if(_pLabels == NULL) {
+               _pLabels = (int *) malloc(sizeof(int) * (_number_of_labels));
+               if (_pLabels == NULL) {
                        LOGE("Fail to alloc memory for %zu labels", _number_of_labels);
                        return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
                }
@@ -715,16 +608,14 @@ int mv_face_recognition_model_query_labels_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_face_tracking_model_create_open(
-               mv_face_tracking_model_h *tracking_model)
+int mv_face_tracking_model_create_open(mv_face_tracking_model_h *tracking_model)
 {
        if (tracking_model == NULL) {
                LOGE("Tracking model can't be created because handle pointer is NULL");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       (*tracking_model) =
-                       static_cast<mv_face_tracking_model_h>(new (std::nothrow)FaceTrackingModel());
+       (*tracking_model) = static_cast<mv_face_tracking_model_h>(new (std::nothrow) FaceTrackingModel());
 
        if (*tracking_model == NULL) {
                LOGE("Failed to create media vision tracking model");
@@ -736,8 +627,7 @@ int mv_face_tracking_model_create_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_face_tracking_model_destroy_open(
-               mv_face_tracking_model_h tracking_model)
+int mv_face_tracking_model_destroy_open(mv_face_tracking_model_h tracking_model)
 {
        if (!tracking_model) {
                LOGE("Tracking model can't be destroyed because handle is NULL");
@@ -745,17 +635,14 @@ int mv_face_tracking_model_destroy_open(
        }
 
        LOGD("Destroying media vision tracking model [%p]", tracking_model);
-       delete static_cast<FaceTrackingModel*>(tracking_model);
+       delete static_cast<FaceTrackingModel *>(tracking_model);
        LOGD("Media vision tracking model has been destroyed");
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_face_tracking_model_prepare_open(
-               mv_face_tracking_model_h tracking_model,
-               mv_engine_config_h /*engine_cfg*/,
-               mv_source_h source,
-               mv_quadrangle_s *location)
+int mv_face_tracking_model_prepare_open(mv_face_tracking_model_h tracking_model, mv_engine_config_h /*engine_cfg*/,
+                                                                               mv_source_h source, mv_quadrangle_s *location)
 {
        if (!tracking_model) {
                LOGE("Can't prepare tracking model. Handle has to be not NULL");
@@ -764,12 +651,11 @@ int mv_face_tracking_model_prepare_open(
 
        if (!source) {
                LOGE("Can't prepare tracking model. "
-                               "Media Vision source handle has to be not NULL");
+                        "Media Vision source handle has to be not NULL");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       FaceTrackingModel *pTrackModel =
-                       static_cast<FaceTrackingModel*>(tracking_model);
+       FaceTrackingModel *pTrackModel = static_cast<FaceTrackingModel *>(tracking_model);
 
        cv::Mat image;
        int ret = MediaVision::Common::convertSourceMV2GrayCV(source, image);
@@ -810,17 +696,15 @@ int mv_face_tracking_model_prepare_open(
        return ret;
 }
 
-int mv_face_tracking_model_clone_open(
-               mv_face_tracking_model_h src,
-               mv_face_tracking_model_h *dst)
+int mv_face_tracking_model_clone_open(mv_face_tracking_model_h src, mv_face_tracking_model_h *dst)
 {
        if (!src || !dst) {
                LOGE("Can't clone tracking model. Both source and destination"
-                               "tracking model handles has to be not NULL");
+                        "tracking model handles has to be not NULL");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       (*dst) = static_cast<mv_face_tracking_model_h>(new (std::nothrow)FaceTrackingModel());
+       (*dst) = static_cast<mv_face_tracking_model_h>(new (std::nothrow) FaceTrackingModel());
 
        if (*dst == NULL) {
                LOGE("Failed to create media vision tracking model");
@@ -829,8 +713,8 @@ int mv_face_tracking_model_clone_open(
 
        LOGD("Tracking model [%p] has been created", *dst);
 
-       const FaceTrackingModel *pSrcModel = static_cast<FaceTrackingModel*>(src);
-       FaceTrackingModel *pDstModel = static_cast<FaceTrackingModel*>(*dst);
+       const FaceTrackingModel *pSrcModel = static_cast<FaceTrackingModel *>(src);
+       FaceTrackingModel *pDstModel = static_cast<FaceTrackingModel *>(*dst);
 
        *pDstModel = *pSrcModel;
 
@@ -839,23 +723,21 @@ int mv_face_tracking_model_clone_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_face_tracking_model_save_open(
-               const char *file_name,
-               mv_face_tracking_model_h tracking_model)
+int mv_face_tracking_model_save_open(const char *file_name, mv_face_tracking_model_h tracking_model)
 {
        if (!tracking_model) {
                LOGE("Can't save tracking model to the file. "
-                               "Handle has to be not NULL");
+                        "Handle has to be not NULL");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
        if (NULL == file_name) {
                LOGE("Can't save tracking model to the file. "
-                               "File name has to be specified");
+                        "File name has to be specified");
                return MEDIA_VISION_ERROR_INVALID_PATH;
        }
 
-       FaceTrackingModel *pTrackModel = static_cast<FaceTrackingModel*>(tracking_model);
+       FaceTrackingModel *pTrackModel = static_cast<FaceTrackingModel *>(tracking_model);
 
        const int ret = pTrackModel->save(std::string(file_name));
 
@@ -869,36 +751,32 @@ int mv_face_tracking_model_save_open(
        return ret;
 }
 
-int mv_face_tracking_model_load_open(
-               const char *file_name,
-               mv_face_tracking_model_h *tracking_model)
+int mv_face_tracking_model_load_open(const char *file_name, mv_face_tracking_model_h *tracking_model)
 {
        if (!tracking_model) {
                LOGE("Can't load tracking model from the file. "
-                               "Handle has to be not NULL");
+                        "Handle has to be not NULL");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
        if (NULL == file_name) {
                LOGE("Can't load tracking model from the file. "
-                               "File name has to be specified");
+                        "File name has to be specified");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       (*tracking_model) =
-               static_cast<mv_face_tracking_model_h>(new (std::nothrow)FaceTrackingModel());
+       (*tracking_model) = static_cast<mv_face_tracking_model_h>(new (std::nothrow) FaceTrackingModel());
 
        if (*tracking_model == NULL) {
                LOGE("Failed to create media vision tracking model");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
        }
 
-       FaceTrackingModel *pTrackModel =
-                       static_cast<FaceTrackingModel*>(*tracking_model);
+       FaceTrackingModel *pTrackModel = static_cast<FaceTrackingModel *>(*tracking_model);
 
        if (!pTrackModel) {
                LOGE("Loading of the face tracking model from file failed. "
-                               "Incorrect Media Vision Face tracking model handle is used");
+                        "Incorrect Media Vision Face tracking model handle is used");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
@@ -906,7 +784,7 @@ int mv_face_tracking_model_load_open(
 
        if (MEDIA_VISION_ERROR_NONE != ret) {
                LOGE("Error occurred when load tracking model from the file");
-               delete static_cast<FaceTrackingModel*>(*tracking_model);
+               delete static_cast<FaceTrackingModel *>(*tracking_model);
                *tracking_model = NULL;
 
                return ret;
index ca65f4f..8cfa99d 100644 (file)
 
 #include "Features/FeatureExtractorFactory.h"
 
-namespace MediaVision {
-namespace Image {
-
-class BasicExtractorFactory : public FeatureExtractorFactory {
+namespace MediaVision
+{
+namespace Image
+{
+class BasicExtractorFactory : public FeatureExtractorFactory
+{
 public:
        BasicExtractorFactory(KeypointType keypointsType, DescriptorType descType);
 
index f2f6e5e..5642c3e 100644 (file)
 
 #include "Features/FeaturePack.h"
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 /**
  * @class FeatureExtractor
  * @brief Class contains functionality to extract features from an image
  *
  * @since_tizen 3.0
  */
-class FeatureExtractor {
+class FeatureExtractor
+{
 public:
        FeatureExtractor();
 
-       void setFeatureDetector(
-                       const cv::Ptr<cv::FeatureDetector> detector,
-                       KeypointType keypointType);
+       void setFeatureDetector(const cv::Ptr<cv::FeatureDetector> detector, KeypointType keypointType);
 
-       void setDescriptorExtractor(
-                       cv::Ptr<cv::DescriptorExtractor> extractor,
-                       DescriptorType descriptorType);
+       void setDescriptorExtractor(cv::Ptr<cv::DescriptorExtractor> extractor, DescriptorType descriptorType);
 
-       void setRecognitionRateMetric(
-                       float (*computeRecognitionRate)(
-                                       const cv::Mat&,
-                                       const std::vector<cv::KeyPoint>&));
+       void setRecognitionRateMetric(float (*computeRecognitionRate)(const cv::Mat &, const std::vector<cv::KeyPoint> &));
 
-       bool extract(
-                       const cv::Mat& image,
-                       FeaturePack& result,
-                       const std::vector<cv::Point2f>& roi = std::vector<cv::Point2f>());
+       bool extract(const cv::Mat &image, FeaturePack &result,
+                                const std::vector<cv::Point2f> &roi = std::vector<cv::Point2f>());
 
 private:
        static const cv::Size __MIN_SIZE;
@@ -63,9 +57,7 @@ private:
 
        cv::Ptr<cv::DescriptorExtractor> __extractor;
 
-       float (*__computeRecognitionRate)(
-                       const cv::Mat&,
-                       const std::vector<cv::KeyPoint>&);
+       float (*__computeRecognitionRate)(const cv::Mat &, const std::vector<cv::KeyPoint> &);
 };
 
 } /* Image */
index 1477292..fb21df5 100644 (file)
 
 #include <opencv2/core.hpp>
 
-namespace MediaVision {
-namespace Image {
-
-class FeatureExtractorFactory {
+namespace MediaVision
+{
+namespace Image
+{
+class FeatureExtractorFactory
+{
 public:
        virtual ~FeatureExtractorFactory();
 
index 971e7f9..831938a 100644 (file)
 
 #include "Features/FeaturePack.h"
 
-namespace MediaVision {
-namespace Image {
-
-class FeatureMatcher {
+namespace MediaVision
+{
+namespace Image
+{
+class FeatureMatcher
+{
 public:
-       enum MatchError {
+       enum MatchError
+       {
                InvalidFeaturePackFrom,
                InvalidFeaturePackTo,
                DisparateTypes,
@@ -33,15 +36,9 @@ public:
        };
 
 public:
-       FeatureMatcher(
-                       float affectingPart = 1.f,
-                       float tolerantError = 0.f,
-                       size_t minimumMatchesNumber = 0u);
-
-       MatchError match(
-                       const FeaturePack& from,
-                       const FeaturePack& to,
-                       cv::Mat& homophraphyMatrix) const;
+       FeatureMatcher(float affectingPart = 1.f, float tolerantError = 0.f, size_t minimumMatchesNumber = 0u);
+
+       MatchError match(const FeaturePack &from, const FeaturePack &to, cv::Mat &homophraphyMatrix) const;
 
        float getAffectingPart() const;
 
index 422de0b..746dac6 100644 (file)
 #include <opencv2/xfeatures2d.hpp>
 #include <opencv2/calib3d.hpp>
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 /**
  * @class    FeaturePack
  * @brief    This class contains information about features and can be used for
@@ -36,7 +38,8 @@ namespace Image {
  *
  * @since_tizen 3.0
  */
-class FeaturePack {
+class FeaturePack
+{
 public:
        /**
         * @brief   @ref FeaturePack default constructor.
@@ -52,7 +55,7 @@ public:
         * @since_tizen 3.0
         * @param   [in] copy @ref FeaturePack which will be copied
         */
-       FeaturePack(const FeaturePackcopy);
+       FeaturePack(const FeaturePack &copy);
 
        /**
         * @brief   @ref FeaturePack copy assignment operator.
@@ -61,7 +64,7 @@ public:
         * @since_tizen 3.0
         * @param   [in] copy @ref FeaturePack which will be copied
         */
-       FeaturePack& operator= (const FeaturePack& copy);
+       FeaturePack &operator=(const FeaturePack &copy);
 
        KeypointType __keypointsType;
 
index 335c04b..888619e 100644 (file)
 
 #include "Features/FeatureExtractorFactory.h"
 
-namespace MediaVision {
-namespace Image {
-
-class ORBExtractorFactory : public FeatureExtractorFactory {
+namespace MediaVision
+{
+namespace Image
+{
+class ORBExtractorFactory : public FeatureExtractorFactory
+{
 public:
-       ORBExtractorFactory(
-                       float scaleFactor = 1.2f,
-                       size_t maximumFeaturesNumber = 800u);
+       ORBExtractorFactory(float scaleFactor = 1.2f, size_t maximumFeaturesNumber = 800u);
 
        virtual cv::Ptr<FeatureExtractor> buildFeatureExtractor();
 
@@ -39,9 +39,7 @@ public:
        void setMaximumFeaturesNumber(size_t maximumFeaturesNumber);
 
 private:
-       static float computeRecognitionRate(
-                       const cv::Mat&,
-                       const std::vector<cv::KeyPoint>&);
+       static float computeRecognitionRate(const cv::Mat &, const std::vector<cv::KeyPoint> &);
 
        float __scaleFactor; /**< Recognition scale factor for the ORB detector. */
 
index a29806b..0581a49 100644 (file)
  * @brief This file contains Image Module configuration.
  */
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 /**
  * @brief Keypoint's type enumeration.
  *
  * @since_tizen 3.0
  */
-enum KeypointType {
-       KT_INVALID = -1,  /**< Undefined keypoint's type */
-       KT_ORB,           /**< Oriented FAST keypoint's type */
-       KT_GFTT,          /**< Keypoint's type of good features to track */
-       KT_SIZE           /**< Number of keypoint's types */
+enum KeypointType
+{
+       KT_INVALID = -1, /**< Undefined keypoint's type */
+       KT_ORB, /**< Oriented FAST keypoint's type */
+       KT_GFTT, /**< Keypoint's type of good features to track */
+       KT_SIZE /**< Number of keypoint's types */
 };
 
-const std::string KeypointNames[KT_SIZE] = {
-       [KT_ORB] = "ORB",
-       [KT_GFTT] = "GFTT"
-};
+const std::string KeypointNames[KT_SIZE] = { [KT_ORB] = "ORB", [KT_GFTT] = "GFTT" };
 
 /*
  * @brief Descriptor's type enumeration.
  *
  * @since_tizen 3.0
  */
-enum DescriptorType {
-       DT_INVALID = -1,  /**< Undefined descriptor's type */
-       DT_ORB,           /**< Rotated BRIEF descriptor's type */
-       DT_BRIEF,         /**< Descriptor's type of binary robust independent
+enum DescriptorType
+{
+       DT_INVALID = -1, /**< Undefined descriptor's type */
+       DT_ORB, /**< Rotated BRIEF descriptor's type */
+       DT_BRIEF, /**< Descriptor's type of binary robust independent
                                                        elementary features */
-       DT_SIZE           /**< Number of descriptor's types */
+       DT_SIZE /**< Number of descriptor's types */
 };
 
-const std::string DescriptorNames[DT_SIZE] = {
-       [DT_ORB] = "ORB",
-       [DT_BRIEF] = "BRIEF"
-};
+const std::string DescriptorNames[DT_SIZE] = { [DT_ORB] = "ORB", [DT_BRIEF] = "BRIEF" };
 
 /**
  * @class FeaturesExtractingParams
@@ -67,15 +65,18 @@ const std::string DescriptorNames[DT_SIZE] = {
  *
  * @since_tizen 3.0
  */
-struct FeaturesExtractingParams {
+struct FeaturesExtractingParams
+{
        FeaturesExtractingParams();
 
        KeypointType mKeypointType; /**< Keypoint's type. */
 
        DescriptorType mDescriptorType; /**< Descriptor's type. */
 
-       union { /**< Extracting parameters for concretes algorithms. */
-               struct { /**< Extracting parameters for ORB algorithm. */
+       union
+       { /**< Extracting parameters for concretes algorithms. */
+               struct
+               { /**< Extracting parameters for ORB algorithm. */
                        double mScaleFactor; /**< Recognition scale factor for the ORB detector. */
                        int mMaximumFeaturesNumber; /**< Maximum number of features,
                                                                        which will be extracted from object image.*/
@@ -89,11 +90,9 @@ struct FeaturesExtractingParams {
  *
  * @since_tizen 3.0
  */
-struct RecognitionParams {
-       RecognitionParams(
-                       int minMatchesNumber,
-                       double requiredMatchesPart,
-                       double tolerantMatchesPartError);
+struct RecognitionParams
+{
+       RecognitionParams(int minMatchesNumber, double requiredMatchesPart, double tolerantMatchesPartError);
 
        RecognitionParams();
 
@@ -115,14 +114,10 @@ struct RecognitionParams {
  *
  * @since_tizen 3.0
  */
-struct StabilizationParams {
-       StabilizationParams(
-                       bool isEnabled,
-                       size_t historyAmount,
-                       double tolerantShift,
-                       double tolerantShiftExtra,
-                       double stabilizationSpeed,
-                       double stabilizationAcceleration);
+struct StabilizationParams
+{
+       StabilizationParams(bool isEnabled, size_t historyAmount, double tolerantShift, double tolerantShiftExtra,
+                                               double stabilizationSpeed, double stabilizationAcceleration);
 
        StabilizationParams();
 
@@ -156,12 +151,10 @@ struct StabilizationParams {
  *
  * @since_tizen 3.0
  */
-struct TrackingParams {
-       TrackingParams(
-                       FeaturesExtractingParams framesFeaturesExtractingParams,
-                       RecognitionParams recognitionParams,
-                       StabilizationParams stabilizationParams,
-                       double expectedOffset);
+struct TrackingParams
+{
+       TrackingParams(FeaturesExtractingParams framesFeaturesExtractingParams, RecognitionParams recognitionParams,
+                                  StabilizationParams stabilizationParams, double expectedOffset);
 
        TrackingParams();
 
index bd49674..ae75205 100644 (file)
  * @brief This file contains math utility for Image Module.
  */
 
-namespace MediaVision {
-namespace Image {
-
+namespace MediaVision
+{
+namespace Image
+{
 const size_t MinimumNumberOfFeatures = 4u; /* Minimum number of features
                                                                                        when perspective transform
                                                                                        parameters calculation
@@ -42,9 +43,7 @@ const size_t NumberOfQuadrangleCorners = 4u; /* Number of quadrangle corneres */
  * @param [in] point2   The second point
  * @return distance between two points
  */
-float getDistance(
-               const cv::Point2f& point1,
-               const cv::Point2f& point2);
+float getDistance(const cv::Point2f &point1, const cv::Point2f &point2);
 
 /**
  * @brief   Calculates area of triangle.
@@ -55,10 +54,7 @@ float getDistance(
  * @param [in] point3   The third corner of triangle
  * @return area of triangle
  */
-float getTriangleArea(
-               const cv::Point2f& point1,
-               const cv::Point2f& point2,
-               const cv::Point2f& point3);
+float getTriangleArea(const cv::Point2f &point1, const cv::Point2f &point2, const cv::Point2f &point3);
 
 /**
  * @brief   Calculates area of quadrangle.
@@ -67,8 +63,7 @@ float getTriangleArea(
  * @param [in] points   Four corners of quadrangle
  * @return area of quadrangle
  */
-float getQuadrangleArea(
-               const cv::Point2f points[NumberOfQuadrangleCorners]);
+float getQuadrangleArea(const cv::Point2f points[NumberOfQuadrangleCorners]);
 
 /**
  * @brief   Checks point on the accessory region.
@@ -78,9 +73,7 @@ float getQuadrangleArea(
  * @param [in] region  Contour of region
  * @return true if point is inside the region, otherwise return false
  */
-bool checkAccessory(
-               const cv::Point2f& point,
-               const std::vector<cv::Point2f>& region);
+bool checkAccessory(const cv::Point2f &point, const std::vector<cv::Point2f> &region);
 
 /**
  * @brief   Cuts a rectangle according to the maximum size.
@@ -91,7 +84,7 @@ bool checkAccessory(
  * @param [in] rectange   Rectangle which will be cut
  * @param [in] maxSize    Maximum values of needed rectangle
  */
-void catRect(cv::Rect& rectange, const cv::Size& maxSize);
+void catRect(cv::Rect &rectange, const cv::Size &maxSize);
 
 /**
  * @brief   Resizes a region.
@@ -100,9 +93,7 @@ void catRect(cv::Rect& rectange, const cv::Size& maxSize);
  * @param [in] roi                  Contour of region which will be resized
  * @param [in] scalingCoefficient   Scaling coefficient
  */
-std::vector<cv::Point2f> contourResize(
-               const std::vector<cv::Point2f>& roi,
-               float scalingCoefficient);
+std::vector<cv::Point2f> contourResize(const std::vector<cv::Point2f> &roi, float scalingCoefficient);
 
 } /* Image */
 } /* MediaVision */
index 35d33e2..bae3421 100644 (file)
  * @brief This file contains the @ref ImageObject class.
  */
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 /**
  * @class    ImageObject
  * @brief    This class contains the image information, which will
@@ -35,7 +37,8 @@ namespace Image {
  *
  * @since_tizen 3.0
  */
-class ImageObject {
+class ImageObject
+{
 public:
        /**
         * @brief   @ref ImageObject default constructor.
@@ -54,7 +57,7 @@ public:
         *                      will be created
         * @param [in] params   Features extracting parameters
         */
-       ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params);
+       ImageObject(const cv::Mat &image, const FeaturesExtractingParams &params);
 
        /**
         * @brief   @ref ImageObject copy constructor.
@@ -63,7 +66,7 @@ public:
         * @since_tizen 3.0
         * @param   [in] copy @ref ImageObject which will be copied
         */
-       ImageObject(const ImageObjectcopy);
+       ImageObject(const ImageObject &copy);
 
        /**
         * @brief   @ref ImageObject copy assignment operator.
@@ -73,7 +76,7 @@ public:
         * @param   [in] copy @ref ImageObject which will be copied
         *
         */
-       ImageObject& operator=(const ImageObject& copy);
+       ImageObject &operator=(const ImageObject &copy);
 
        /**
         * @brief   @ref ImageObject destructor.
@@ -93,10 +96,8 @@ public:
         * @param [in] params        Features extracting parameters
         * @param [in] roi           Region of interested object on the @a image
         */
-       void fill(
-                       const cv::Mat& image,
-                       const FeaturesExtractingParams& params,
-                       const std::vector<cv::Point2f>& roi = std::vector<cv::Point2f>());
+       void fill(const cv::Mat &image, const FeaturesExtractingParams &params,
+                         const std::vector<cv::Point2f> &roi = std::vector<cv::Point2f>());
 
        /**
         * @brief Gets a value that determines how well an @ref ImageObject can be recognized.
@@ -127,7 +128,7 @@ public:
         * @since_tizen 3.0
         * @param [in] contour  The contour which will be used with @ref ImageObject
         */
-       void setContour(const std::vector<cv::Point2f>contour);
+       void setContour(const std::vector<cv::Point2f> &contour);
 
        /**
         * @brief Sets a label for the image object.
@@ -144,7 +145,7 @@ public:
         * @param [out] label   The label of image object
         * @return @c true if object is labeled, otherwise return @c false
         */
-       bool getLabel(intlabel) const;
+       bool getLabel(int &label) const;
 
        /**
         * @brief  Stores the @ref ImageObject in a file.
@@ -165,10 +166,8 @@ public:
        int load(const char *fileName);
 
 private:
-       void extractFeatures(
-                       const cv::Mat& image,
-                       const FeaturesExtractingParams& params,
-                       const std::vector<cv::Point2f>& roi);
+       void extractFeatures(const cv::Mat &image, const FeaturesExtractingParams &params,
+                                                const std::vector<cv::Point2f> &roi);
 
 private:
        FeaturesExtractingParams __featureExtractingParams;
@@ -185,9 +184,9 @@ private:
 
        friend class ImageRecognizer;
 
-       friend std::ostream& operator << (std::ostream& os, const ImageObject& obj);
+       friend std::ostream &operator<<(std::ostream &os, const ImageObject &obj);
 
-       friend std::istream& operator >> (std::istream& is, ImageObject& obj);
+       friend std::istream &operator>>(std::istream &is, ImageObject &obj);
 };
 
 } /* Image */
index 5ac2f2e..a549e16 100644 (file)
  * @brief This file contains functionality for image object recognition.
  */
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 /**
  * @class    ImageRecognizer
  * @brief    This class contains functionality for image object recognition.
  *
  * @since_tizen 3.0
  */
-class ImageRecognizer {
+class ImageRecognizer
+{
 public:
        /**
         * @brief   @ref ImageRecognizer constructor based on the scene @ref ImageObject.
@@ -46,7 +49,7 @@ public:
         * @param [in] scene   The scene for which the objects will be recognized by
         *                     calling method recognize()
         */
-       ImageRecognizer(const ImageObjectscene);
+       ImageRecognizer(const ImageObject &scene);
 
        /**
         * @brief   @ref ImageRecognizer destructor.
@@ -67,31 +70,22 @@ public:
         *                             of object which will be ignored
         * @return true if object is found on the scene, otherwise return false
         */
-       bool recognize(
-                       const ImageObject& target,
-                       const RecognitionParams& params,
-                       std::vector<cv::Point2f>& contour,
-                       float ignoreFactor = 0.f) const;
+       bool recognize(const ImageObject &target, const RecognitionParams &params, std::vector<cv::Point2f> &contour,
+                                  float ignoreFactor = 0.f) const;
 
 private:
        ImageRecognizer();
 
-       bool findHomophraphyMatrix(
-                       const ImageObject& target,
-                       const RecognitionParams& params,
-                       cv::Mat& homophraphyMatrix,
-                       float ignoreFactor) const;
+       bool findHomophraphyMatrix(const ImageObject &target, const RecognitionParams &params, cv::Mat &homophraphyMatrix,
+                                                          float ignoreFactor) const;
 
-       size_t matchesSelection(
-                       std::vector<cv::DMatch>& examples,
-                       unsigned int filterAmount, unsigned int allowableError) const;
+       size_t matchesSelection(std::vector<cv::DMatch> &examples, unsigned int filterAmount,
+                                                       unsigned int allowableError) const;
 
-       float computeLinearSupportElement(
-                       const std::vector<cv::DMatch>& examples,
-                       int requiredNumber, int leftLimit, int rightLimit) const;
+       float computeLinearSupportElement(const std::vector<cv::DMatch> &examples, int requiredNumber, int leftLimit,
+                                                                         int rightLimit) const;
 
-       static bool isPossibleQuadrangleCorners(
-                       const cv::Point2f corners[NumberOfQuadrangleCorners]);
+       static bool isPossibleQuadrangleCorners(const cv::Point2f corners[NumberOfQuadrangleCorners]);
 
 private:
        /* TODO: Replace to cv::Ptr<ImageObject> */
index a84fa57..174f28a 100644 (file)
 
 #include <pthread.h>
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 /**
  * @class    AsyncTracker
  * @brief    Tracker is based on the another tracker and extends the
@@ -30,7 +32,8 @@ namespace Image {
  *
  * @since_tizen 3.0
  */
-class AsyncTracker : public ObjectTracker {
+class AsyncTracker : public ObjectTracker
+{
 public:
        /**
         * @brief @ref AsyncTracker copy constructor.
@@ -38,7 +41,7 @@ public:
         * @since_tizen 3.0
         * @param   [in] copy @ref AsyncTracker which will be copied
         */
-       AsyncTracker(const AsyncTrackercopy);
+       AsyncTracker(const AsyncTracker &copy);
 
        /**
         * @brief @ref AsyncTracker constructor based on the another tracker.
@@ -68,7 +71,7 @@ public:
         * @param [out]  result   Result contour
         * @return true if object is tracked, otherwise return false
         */
-       virtual bool track(const cv::Mat& frame, std::vector<cv::Point>& result);
+       virtual bool track(const cv::Mat &frame, std::vector<cv::Point> &result);
 
        /**
         * @brief Provides the current location of a target.
@@ -76,7 +79,7 @@ public:
         * @since_tizen 3.0
         * @param [in] location  Current location of a target
         */
-       virtual void reinforcement(const std::vector<cv::Point>location);
+       virtual void reinforcement(const std::vector<cv::Point> &location);
 
        /*
         * @brief Creates a copy of itself
@@ -90,14 +93,14 @@ public:
 
        bool isRun();
 
-       bool isUpdated(std::vector<cv::Point>result);
+       bool isUpdated(std::vector<cv::Point> &result);
 
-       bool getResult(std::vector<cv::Point>location);
+       bool getResult(std::vector<cv::Point> &location);
 
 private:
-       AsyncTracker& operator= (const AsyncTracker& copy);
+       AsyncTracker &operator=(const AsyncTracker &copy);
 
-       bool baseTrack(std::vector<cv::Point>result);
+       bool baseTrack(std::vector<cv::Point> &result);
 
        static void *asyncTrack(void *data);
 
index 09a9d08..d83dc6d 100644 (file)
 
 #include <set>
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 /**
  * @class    CascadeTracker
  * @brief    Tracker is based on the another trackers and combines the results.
  *
  * @since_tizen 3.0
  */
-class CascadeTracker : public ObjectTracker {
+class CascadeTracker : public ObjectTracker
+{
 public:
        /**
         * @brief @ref CascadeTracker default constructor
@@ -47,7 +50,7 @@ public:
         * @since_tizen 3.0
         * @param   [in] copy @ref CascadeTracker which will be copied
         */
-       CascadeTracker(const CascadeTrackercopy);
+       CascadeTracker(const CascadeTracker &copy);
 
        /**
         * @brief @ref CascadeTracker destructor
@@ -65,7 +68,7 @@ public:
         * @param [out]  result   Result contour
         * @return true if object is tracked, otherwise return false
         */
-       virtual bool track(const cv::Mat& frame, std::vector<cv::Point>& result);
+       virtual bool track(const cv::Mat &frame, std::vector<cv::Point> &result);
 
        /**
         * @brief Provides the current location of a target.
@@ -73,7 +76,7 @@ public:
         * @since_tizen 3.0
         * @param [in] location  Current location of a target
         */
-       virtual void reinforcement(const std::vector<cv::Point>location);
+       virtual void reinforcement(const std::vector<cv::Point> &location);
 
        /*
         * @brief Creates a copy of itself
@@ -90,7 +93,7 @@ public:
         * @param [in] copy @ref CascadeTracker which will be copied
         * @return itself
         */
-       virtual CascadeTracker& operator=(const CascadeTracker& copy);
+       virtual CascadeTracker &operator=(const CascadeTracker &copy);
 
        bool enableTracker(cv::Ptr<ObjectTracker> tracker, float priority);
 
@@ -99,17 +102,18 @@ public:
 private:
        void internalReinforcement();
 
-       bool mergeResults(std::vector<cv::Point>result) const;
+       bool mergeResults(std::vector<cv::Point> &result) const;
 
 private:
-       struct TrackerInfo {
+       struct TrackerInfo
+       {
                TrackerInfo(cv::Ptr<ObjectTracker>, float);
 
-               bool operator<(const TrackerInfo&) const;
+               bool operator<(const TrackerInfo &) const;
 
-               bool operator==(const TrackerInfo&) const;
+               bool operator==(const TrackerInfo &) const;
 
-               bool operator!=(const TrackerInfo&) const;
+               bool operator!=(const TrackerInfo &) const;
 
                cv::Ptr<ObjectTracker> mTracker;
 
index 29ec6b1..222bc6a 100644 (file)
 
 #include "Recognition/ImageObject.h"
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 /**
  * @class    FeatureSubstitutionTracker
  * @brief    Tracker uses feature substitution.
  *
  * @since_tizen 3.0
  */
-class FeatureSubstitutionTracker : public ObjectTracker {
+class FeatureSubstitutionTracker : public ObjectTracker
+{
 public:
        /**
         * @brief @ref FeatureSubstitutionTracker constructor.
@@ -47,12 +50,9 @@ public:
         *                                        recognition
         *                                        (recommended value is 0.5 - 1)
         */
-       FeatureSubstitutionTracker(
-                       const FeaturesExtractingParams& featuresExtractingParams,
-                       const RecognitionParams& recognitionParams,
-                       float expectedOffset,
-                       float sceneScalingFactor = 1.2f,
-                       float objectScalingFactor = 0.85f);
+       FeatureSubstitutionTracker(const FeaturesExtractingParams &featuresExtractingParams,
+                                                          const RecognitionParams &recognitionParams, float expectedOffset,
+                                                          float sceneScalingFactor = 1.2f, float objectScalingFactor = 0.85f);
 
        /**
         * @brief Tracks the target for the video stream consisting of frames.
@@ -63,7 +63,7 @@ public:
         * @param [out]  result   Result contour
         * @return true if object is tracked, otherwise return false
         */
-       virtual bool track(const cv::Mat& frame, std::vector<cv::Point>& result);
+       virtual bool track(const cv::Mat &frame, std::vector<cv::Point> &result);
 
        /**
         * @brief Provides the current location of a target.
@@ -71,7 +71,7 @@ public:
         * @since_tizen 3.0
         * @param [in] location  Current location of a target
         */
-       virtual void reinforcement(const std::vector<cv::Point>location);
+       virtual void reinforcement(const std::vector<cv::Point> &location);
 
        /*
         * @brief Creates a copy of itself
index 4b0b224..88306db 100644 (file)
  *        during tracking.
  */
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 /**
  * @class    ImageContourStabilizator
  * @brief    This class contains functionality for image contour stabilization
@@ -35,20 +37,22 @@ namespace Image {
  *
  * @since_tizen 3.0
  */
-class ImageContourStabilizator {
+class ImageContourStabilizator
+{
 public:
        /**
         * @brief Enumeration for stabilization return value
         *
         * @since_tizen 3.0
         */
-       enum StabilizationError {
-               Successfully,           /**< Contour is stabilized. */
-               TooShortMovingHistory,  /**< Too short moving history, it's normal
+       enum StabilizationError
+       {
+               Successfully, /**< Contour is stabilized. */
+               TooShortMovingHistory, /**< Too short moving history, it's normal
                                                                        behavior, you can continue to call
                                                                        stabilization in order to accumulate it. */
-               InvalidSettings,        /**< Invalid settings. */
-               UnsupportedContourType  /**< Unsupported contour type. */
+               InvalidSettings, /**< Invalid settings. */
+               UnsupportedContourType /**< Unsupported contour type. */
        };
 
        /**
@@ -71,9 +75,7 @@ public:
         * @retval #InvalidSettings        Invalid settings
         * @retval #UnsupportedContourType Unsupported contour type
         */
-       StabilizationError stabilize(
-                       std::vector<cv::Point2f>& contour,
-                       const StabilizationParams& params);
+       StabilizationError stabilize(std::vector<cv::Point2f> &contour, const StabilizationParams &params);
 
        /**
         * @brief Resets stabilization process.
@@ -84,7 +86,7 @@ public:
        void reset(void);
 
 private:
-       bool updateSettings(const StabilizationParamsparams);
+       bool updateSettings(const StabilizationParams &params);
 
        std::vector<cv::Point2f> computeStabilizedQuadrangleContour(void);
 
index 83664fd..f3a22b3 100644 (file)
  * @brief This file contains the @ref ImageTrackingModel class.
  */
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 /**
  * @class    ImageTrackingModel
  * @brief    This class contains the tracking functionality for image objects.
  *
  * @since_tizen 3.0
  */
-class ImageTrackingModel {
+class ImageTrackingModel
+{
 public:
        /**
         * @brief   @ref ImageTrackingModel default constructor
@@ -51,7 +54,7 @@ public:
         * @since_tizen 3.0
         * @param   [in] copy @ref ImageTrackingModel which will be copied
         */
-       ImageTrackingModel(const ImageTrackingModelcopy);
+       ImageTrackingModel(const ImageTrackingModel &copy);
 
        /**
         * @brief   Sets @ref ImageObject as target which will be tracked.
@@ -61,7 +64,7 @@ public:
         *
         * @return @a 0 on success, otherwise a negative error value
         */
-       int setTarget(const ImageObjecttarget);
+       int setTarget(const ImageObject &target);
 
        /**
         * @brief Checks whether the tracking model is valid for tracking.
@@ -85,7 +88,7 @@ public:
         * @param [out]  result   Result contour
         * @return true if target is tracked, otherwise return false
         */
-       bool track(const cv::Mat& frame, std::vector<cv::Point>& result);
+       bool track(const cv::Mat &frame, std::vector<cv::Point> &result);
 
        /**
         * @brief   Refreshes tracking model.
@@ -102,7 +105,7 @@ public:
         * @since_tizen 3.0
         * @param   [in] copy @ref ImageTrackingModel which will be copied
         */
-       ImageTrackingModel& operator=(const ImageTrackingModel& copy);
+       ImageTrackingModel &operator=(const ImageTrackingModel &copy);
 
        /**
         * @brief  Stores the @ref ImageTrackingModel in a file.
@@ -122,13 +125,9 @@ public:
         */
        int load(const char *filepath);
 
-        friend std::ostream& operator << (
-                       std::ostream& os,
-                       const ImageTrackingModel& obj);
+       friend std::ostream &operator<<(std::ostream &os, const ImageTrackingModel &obj);
 
-       friend std::istream& operator >> (
-                       std::istream& is,
-                       ImageTrackingModel& obj);
+       friend std::istream &operator>>(std::istream &is, ImageTrackingModel &obj);
 
 private:
        ImageObject __target;
index 3fbd9ea..e051c17 100644 (file)
 
 #include "Tracking/ObjectTracker.h"
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 /**
  * @class    MFTracker
  * @brief    Median Flow tracker implementation.
  *
  * @since_tizen 3.0
  */
-class MFTracker : public ObjectTracker {
+class MFTracker : public ObjectTracker
+{
 public:
-       struct Params {
+       struct Params
+       {
                /**
                 * @brief TrackerMedianFlow algorithm parameters constructor
                 */
@@ -47,7 +51,7 @@ public:
                                                                        flow search used for tracking */
 
                /* TODO: add lifetime*/
-               /*time_t mLifetime;*/  /**< Time of tracking without reinforcement. */
+               /*time_t mLifetime;*/ /**< Time of tracking without reinforcement. */
        };
 
        /**
@@ -67,7 +71,7 @@ public:
         * @param [out]  result   Result contour
         * @return true if object is tracked, otherwise return false
         */
-       virtual bool track(const cv::Mat& frame, std::vector<cv::Point>& result);
+       virtual bool track(const cv::Mat &frame, std::vector<cv::Point> &result);
 
        /**
         * @brief Provides the current location of a target.
@@ -75,7 +79,7 @@ public:
         * @since_tizen 3.0
         * @param [in] location  Current location of a target
         */
-       virtual void reinforcement(const std::vector<cv::Point>location);
+       virtual void reinforcement(const std::vector<cv::Point> &location);
 
        /**
         * @brief Creates a copy of itself
@@ -88,43 +92,33 @@ public:
 private:
        bool isInited() const;
 
-       bool init(const cv::Matimage);
+       bool init(const cv::Mat &image);
 
-       bool update(const cv::Matimage);
+       bool update(const cv::Mat &image);
 
        float getLastConfidence() const;
 
        cv::Rect_<float> getLastBoundingBox() const;
 
-       bool medianFlowImpl(cv::Mat oldImage, cv::Mat newImage, cv::Rect_<float>oldBox);
+       bool medianFlowImpl(cv::Mat oldImage, cv::Mat newImage, cv::Rect_<float> &oldBox);
 
-       cv::Rect_<float> vote(
-                       const std::vector<cv::Point2f>& oldPoints,
-                       const std::vector<cv::Point2f>& newPoints,
-                       const cv::Rect_<float>& oldRect,
-                       cv::Point2f& mD);
+       cv::Rect_<float> vote(const std::vector<cv::Point2f> &oldPoints, const std::vector<cv::Point2f> &newPoints,
+                                                 const cv::Rect_<float> &oldRect, cv::Point2f &mD);
 
-       void check_FB(
-                       std::vector<cv::Mat> newPyramid,
-                       const std::vector<cv::Point2f>& oldPoints,
-                       const std::vector<cv::Point2f>& newPoints,
-                       std::vector<bool>& status);
+       void check_FB(std::vector<cv::Mat> newPyramid, const std::vector<cv::Point2f> &oldPoints,
+                                 const std::vector<cv::Point2f> &newPoints, std::vector<bool> &status);
 
-       void check_NCC(
-                       const cv::Mat& oldImage,
-                       const cv::Mat& newImage,
-                       const std::vector<cv::Point2f>& oldPoints,
-                       const std::vector<cv::Point2f>& newPoints,
-                       std::vector<bool>& status);
+       void check_NCC(const cv::Mat &oldImage, const cv::Mat &newImage, const std::vector<cv::Point2f> &oldPoints,
+                                  const std::vector<cv::Point2f> &newPoints, std::vector<bool> &status);
 
 private:
-       bool __isInit;                /**< Flag is used to determine the model
+       bool __isInit; /**< Flag is used to determine the model
                                                                                initialization */
 
-       Params __params;              /**< Parameters used during tracking, see
+       Params __params; /**< Parameters used during tracking, see
                                                                                @ref TrackerMedianFlow::Params */
 
-       cv::TermCriteria __termcrit;  /**< Terminating criteria for OpenCV
+       cv::TermCriteria __termcrit; /**< Terminating criteria for OpenCV
                                                                                Lucas–Kanade optical flow algorithm used
                                                                                during tracking */
 
@@ -132,15 +126,15 @@ private:
                                                                                                        location with relative values
                                                                                                        to the bounding box */
 
-       cv::Rect_<float> __boundingBox;  /**< Tracking object bounding box */
+       cv::Rect_<float> __boundingBox; /**< Tracking object bounding box */
 
-       float __confidence;              /**< Confidence that object was tracked
+       float __confidence; /**< Confidence that object was tracked
                                                                                        correctly at the last tracking iteration */
 
-       cv::Mat __image;                 /**< Last image for which tracking was
+       cv::Mat __image; /**< Last image for which tracking was
                                                                                        performed */
 
-       std::vector<cv::Mat> __pyramid;  /**< The pyramid had been calculated for
+       std::vector<cv::Mat> __pyramid; /**< The pyramid had been calculated for
                                                                                        the previous frame(or when
                                                                                        initialize the model) */
 };
index 9486b51..55cb051 100644 (file)
 #include <opencv2/core.hpp>
 #include <opencv2/tracking.hpp>
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 /**
  * @class    ObjectTracker
  * @brief    Basic object tracker.
  *
  * @since_tizen 3.0
  */
-class ObjectTracker {
+class ObjectTracker
+{
 public:
        /**
         * @brief @ref ObjectTracker destructor
@@ -46,7 +49,7 @@ public:
         * @param [out]  result   Result contour
         * @return true if object is tracked, otherwise return false
         */
-       virtual bool track(const cv::Mat& frame, std::vector<cv::Point>& result) = 0;
+       virtual bool track(const cv::Mat &frame, std::vector<cv::Point> &result) = 0;
 
        /**
         * @brief Provides the current location of a target.
@@ -54,7 +57,7 @@ public:
         * @since_tizen 3.0
         * @param [in] location  Current location of a target
         */
-       virtual void reinforcement(const std::vector<cv::Point>location) = 0;
+       virtual void reinforcement(const std::vector<cv::Point> &location) = 0;
 
        /*
         * @brief Creates a copy of itself
@@ -72,7 +75,7 @@ private:
         * @param [in] copy @ref ObjectTracker which will be copied
         * @return itself
         */
-       ObjectTracker& operator=(const ObjectTracker& copy) = default;
+       ObjectTracker &operator=(const ObjectTracker &copy) = default;
 };
 
 } /* Image */
index 3f63b75..222060f 100644 (file)
 
 #include "Recognition/ImageObject.h"
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 /**
  * @class    FeatureSubstitutionTracker
  * @brief    Tracker uses recognition of target on the entire frame.
  *
  * @since_tizen 3.0
  */
-class RecognitionBasedTracker : public ObjectTracker {
+class RecognitionBasedTracker : public ObjectTracker
+{
 public:
        /**
         * @brief @ref RecognitionBasedTracker constructor.
@@ -40,10 +43,8 @@ public:
         *                                             extracting from frames
         * @param [in] recognitionParams               Parameters of recognition
         */
-       RecognitionBasedTracker(
-                       const ImageObject& target,
-                       const FeaturesExtractingParams& sceneFeaturesExtractingParams,
-                       const RecognitionParams& recognitionParams);
+       RecognitionBasedTracker(const ImageObject &target, const FeaturesExtractingParams &sceneFeaturesExtractingParams,
+                                                       const RecognitionParams &recognitionParams);
 
        /**
         * @brief @ref RecognitionBasedTracker destructor
@@ -61,7 +62,7 @@ public:
         * @param [out]  result   Result contour
         * @return true if object is tracked, otherwise return false
         */
-       virtual bool track(const cv::Mat& frame, std::vector<cv::Point>& result);
+       virtual bool track(const cv::Mat &frame, std::vector<cv::Point> &result);
 
        /**
         * @brief Provides the current location of a target.
@@ -69,7 +70,7 @@ public:
         * @since_tizen 3.0
         * @param [in] location  Current location of a target
         */
-       virtual void reinforcement(const std::vector<cv::Point>location);
+       virtual void reinforcement(const std::vector<cv::Point> &location);
 
        /*
         * @brief Creates a copy of itself
index 3089778..5ed4ee2 100644 (file)
@@ -76,13 +76,8 @@ extern "C" {
  * @see mv_image_object_destroy_open()
  * @see mv_engine_config_h
  */
-int mv_image_recognize_open(
-               mv_source_h source,
-               const mv_image_object_h *image_objects,
-               int number_of_objects,
-               mv_engine_config_h engine_cfg,
-               mv_image_recognized_cb recognized_cb,
-               void *user_data);
+int mv_image_recognize_open(mv_source_h source, const mv_image_object_h *image_objects, int number_of_objects,
+                                                       mv_engine_config_h engine_cfg, mv_image_recognized_cb recognized_cb, void *user_data);
 
 /*************************/
 /* Image object tracking */
@@ -139,12 +134,8 @@ int mv_image_recognize_open(
  * @see mv_image_tracking_model_set_target_open()
  * @see mv_image_tracking_model_destroy_open()
  */
-int mv_image_track_open(
-               mv_source_h source,
-               mv_image_tracking_model_h image_tracking_model,
-               mv_engine_config_h engine_cfg,
-               mv_image_tracked_cb tracked_cb,
-               void *user_data);
+int mv_image_track_open(mv_source_h source, mv_image_tracking_model_h image_tracking_model,
+                                               mv_engine_config_h engine_cfg, mv_image_tracked_cb tracked_cb, void *user_data);
 
 /**************************/
 /* Image object behaviour */
@@ -164,8 +155,7 @@ int mv_image_track_open(
  *
  * @see mv_image_object_destroy_open()
  */
-int mv_image_object_create_open(
-               mv_image_object_h *image_object);
+int mv_image_object_create_open(mv_image_object_h *image_object);
 
 /**
  * @brief Destroys the image object.
@@ -178,8 +168,7 @@ int mv_image_object_create_open(
  *
  * @see mv_image_object_create_open()
  */
-int mv_image_object_destroy_open(
-               mv_image_object_h image_object);
+int mv_image_object_destroy_open(mv_image_object_h image_object);
 
 /**
  * @brief Fills the image object.
@@ -218,11 +207,8 @@ int mv_image_object_destroy_open(
  * @see mv_image_object_destroy_open()
  * @see mv_engine_config_h
  */
-int mv_image_object_fill_open(
-               mv_image_object_h image_object,
-               mv_engine_config_h engine_cfg,
-               mv_source_h source,
-               mv_rectangle_s *location);
+int mv_image_object_fill_open(mv_image_object_h image_object, mv_engine_config_h engine_cfg, mv_source_h source,
+                                                         mv_rectangle_s *location);
 
 /**
  * @brief Gets a value that determines how well an image object can be recognized.
@@ -254,9 +240,7 @@ int mv_image_object_fill_open(
  * @see mv_image_object_destroy_open()
  * @see mv_engine_config_h
  */
-int mv_image_object_get_recognition_rate_open(
-               mv_image_object_h image_object,
-               double *recognition_rate);
+int mv_image_object_get_recognition_rate_open(mv_image_object_h image_object, double *recognition_rate);
 
 /**
  * @brief Sets a label for the image object.
@@ -280,9 +264,7 @@ int mv_image_object_get_recognition_rate_open(
  * @see mv_image_object_create_open()
  * @see mv_image_object_destroy_open()
  */
-int mv_image_object_set_label_open(
-               mv_image_object_h image_object,
-               int label);
+int mv_image_object_set_label_open(mv_image_object_h image_object, int label);
 
 /**
  * @brief Gets a label of image object.
@@ -308,9 +290,7 @@ int mv_image_object_set_label_open(
  * @see mv_image_object_create_open()
  * @see mv_image_object_destroy_open()
  */
-int mv_image_object_get_label_open(
-               mv_image_object_h image_object,
-               int *label);
+int mv_image_object_get_label_open(mv_image_object_h image_object, int *label);
 
 /**
  * @brief Clones the image object.
@@ -329,9 +309,7 @@ int mv_image_object_get_label_open(
  * @see mv_image_object_create_open()
  * @see mv_image_object_destroy_open()
  */
-int mv_image_object_clone_open(
-               mv_image_object_h src,
-               mv_image_object_h *dst);
+int mv_image_object_clone_open(mv_image_object_h src, mv_image_object_h *dst);
 
 /**
  * @brief Saves the image object.
@@ -350,9 +328,7 @@ int mv_image_object_clone_open(
  * @see mv_image_object_load_open()
  * @see mv_image_object_destroy_open()
  */
-int mv_image_object_save_open(
-               const char *file_name,
-               mv_image_object_h image_object);
+int mv_image_object_save_open(const char *file_name, mv_image_object_h image_object);
 
 /**
  * @brief Loads an image object from the file.
@@ -377,9 +353,7 @@ int mv_image_object_save_open(
  * @see mv_image_object_save_open()
  * @see mv_image_object_destroy_open()
  */
-int mv_image_object_load_open(
-               const char *file_name,
-               mv_image_object_h *image_object);
+int mv_image_object_load_open(const char *file_name, mv_image_object_h *image_object);
 
 /**********************************/
 /* Image tracking model behaviour */
@@ -399,8 +373,7 @@ int mv_image_object_load_open(
  *
  * @see mv_image_tracking_model_destroy_open()
  */
-int mv_image_tracking_model_create_open(
-               mv_image_tracking_model_h *image_tracking_model);
+int mv_image_tracking_model_create_open(mv_image_tracking_model_h *image_tracking_model);
 
 /**
  * @brief Sets target of image tracking model.
@@ -433,9 +406,8 @@ int mv_image_tracking_model_create_open(
  * @see mv_image_track_open()
  * @see mv_image_tracking_model_destroy_open()
  */
-int mv_image_tracking_model_set_target_open(
-               mv_image_object_h image_object,
-               mv_image_tracking_model_h image_tracking_model);
+int mv_image_tracking_model_set_target_open(mv_image_object_h image_object,
+                                                                                       mv_image_tracking_model_h image_tracking_model);
 
 /**
  * @brief Destroys the image tracking model.
@@ -451,8 +423,7 @@ int mv_image_tracking_model_set_target_open(
  *
  * @see mv_image_tracking_model_create_open()
  */
-int mv_image_tracking_model_destroy_open(
-               mv_image_tracking_model_h image_tracking_model);
+int mv_image_tracking_model_destroy_open(mv_image_tracking_model_h image_tracking_model);
 
 /**
  * @brief Refreshes the state of image tracking model.
@@ -482,9 +453,7 @@ int mv_image_tracking_model_destroy_open(
  * @see mv_image_track_open()
  * @see mv_image_tracking_model_destroy_open()
  */
-int mv_image_tracking_model_refresh_open(
-               mv_image_tracking_model_h image_tracking_model,
-               mv_engine_config_h engine_cfg);
+int mv_image_tracking_model_refresh_open(mv_image_tracking_model_h image_tracking_model, mv_engine_config_h engine_cfg);
 
 /**
  * @brief Clones the image tracking model.
@@ -501,9 +470,7 @@ int mv_image_tracking_model_refresh_open(
  * @see mv_image_tracking_model_create_open()
  * @see mv_image_tracking_model_destroy_open()
  */
-int mv_image_tracking_model_clone_open(
-               mv_image_tracking_model_h src,
-               mv_image_tracking_model_h *dst);
+int mv_image_tracking_model_clone_open(mv_image_tracking_model_h src, mv_image_tracking_model_h *dst);
 
 /**
  * @brief Saves the image tracking model.
@@ -528,9 +495,7 @@ int mv_image_tracking_model_clone_open(
  * @see mv_image_tracking_model_load_open()
  * @see mv_image_tracking_model_destroy_open()
  */
-int mv_image_tracking_model_save_open(
-               const char *file_name,
-               mv_image_tracking_model_h image_tracking_model);
+int mv_image_tracking_model_save_open(const char *file_name, mv_image_tracking_model_h image_tracking_model);
 
 /**
  * @brief Loads an image tracking model from the file.
@@ -555,9 +520,7 @@ int mv_image_tracking_model_save_open(
  * @see mv_image_tracking_model_save_open()
  * @see mv_image_tracking_model_destroy_open()
  */
-int mv_image_tracking_model_load_open(
-               const char *file_name,
-               mv_image_tracking_model_h *image_tracking_model);
+int mv_image_tracking_model_load_open(const char *file_name, mv_image_tracking_model_h *image_tracking_model);
 
 #ifdef __cplusplus
 }
index c788895..0c70c3a 100644 (file)
 
 #include <opencv2/core.hpp>
 
-namespace MediaVision {
-namespace Image {
-
-BasicExtractorFactory::BasicExtractorFactory(
-               KeypointType keypointsType,
-               DescriptorType descType) :
-                               __kpType(keypointsType),
-                               __descType(descType)
+namespace MediaVision
 {
-}
+namespace Image
+{
+BasicExtractorFactory::BasicExtractorFactory(KeypointType keypointsType, DescriptorType descType)
+               : __kpType(keypointsType), __descType(descType)
+{}
 
 cv::Ptr<FeatureExtractor> BasicExtractorFactory::buildFeatureExtractor()
 {
-       cv::Ptr<FeatureExtractor> featureExtractor(new (std::nothrow)FeatureExtractor());
+       cv::Ptr<FeatureExtractor> featureExtractor(new (std::nothrow) FeatureExtractor());
 
        if (featureExtractor != NULL) {
                cv::Ptr<cv::FeatureDetector> detector;
index 5b04f00..4e6d9a0 100644 (file)
 
 #include <opencv2/core.hpp>
 
-
-namespace MediaVision {
-namespace Image {
-
+namespace MediaVision
+{
+namespace Image
+{
 const cv::Size FeatureExtractor::__MIN_SIZE = cv::Size(5, 5);
 
-FeatureExtractor::FeatureExtractor() :
-               __kpType(KT_INVALID),
-               __detector(),
-               __descType(DT_INVALID),
-               __extractor(),
-               __computeRecognitionRate(NULL)
-{
-}
+FeatureExtractor::FeatureExtractor()
+               : __kpType(KT_INVALID), __detector(), __descType(DT_INVALID), __extractor(), __computeRecognitionRate(NULL)
+{}
 
-void FeatureExtractor::setFeatureDetector(
-               const cv::Ptr<cv::FeatureDetector> detector,
-               KeypointType keypointType)
+void FeatureExtractor::setFeatureDetector(const cv::Ptr<cv::FeatureDetector> detector, KeypointType keypointType)
 {
        __detector = detector;
        __kpType = keypointType;
 }
 
-void FeatureExtractor::setDescriptorExtractor(
-               cv::Ptr<cv::DescriptorExtractor> extractor,
-               DescriptorType descriptorType)
+void FeatureExtractor::setDescriptorExtractor(cv::Ptr<cv::DescriptorExtractor> extractor, DescriptorType descriptorType)
 {
        __extractor = extractor;
        __descType = descriptorType;
 }
 
-void FeatureExtractor::setRecognitionRateMetric(
-               float (*computeRecognitionRate)(
-                               const cv::Mat&,
-                               const std::vector<cv::KeyPoint>&))
+void FeatureExtractor::setRecognitionRateMetric(float (*computeRecognitionRate)(const cv::Mat &,
+                                                                                                                                                               const std::vector<cv::KeyPoint> &))
 {
        __computeRecognitionRate = computeRecognitionRate;
 }
 
-bool FeatureExtractor::extract(
-               const cv::Mat& image,
-               FeaturePack& result,
-               const std::vector<cv::Point2f>& roi)
+bool FeatureExtractor::extract(const cv::Mat &image, FeaturePack &result, const std::vector<cv::Point2f> &roi)
 {
        if (__detector.empty() || __extractor.empty())
                return false;
@@ -89,9 +75,7 @@ bool FeatureExtractor::extract(
 
        std::vector<cv::KeyPoint> keypoints;
 
-       __detector->detect(
-                       image(boundingBox),
-                       keypoints);
+       __detector->detect(image(boundingBox), keypoints);
 
        result.__objectKeypoints = keypoints;
 
@@ -116,15 +100,10 @@ bool FeatureExtractor::extract(
                }
        }
 
-        __extractor->compute(
-                       image,
-                       result.__objectKeypoints,
-                       result.__objectDescriptors);
+       __extractor->compute(image, result.__objectKeypoints, result.__objectDescriptors);
 
        if (NULL != __computeRecognitionRate) {
-               result.__recognitionRate = __computeRecognitionRate(
-                               image(boundingBox),
-                               keypoints);
+               result.__recognitionRate = __computeRecognitionRate(image(boundingBox), keypoints);
        } else {
                /* Default recognition rate metric */
                if (result.__objectKeypoints.size() < MinimumNumberOfFeatures)
index 3cda88e..463a754 100644 (file)
 
 #include "Features/FeatureExtractorFactory.h"
 
-namespace MediaVision {
-namespace Image {
-
+namespace MediaVision
+{
+namespace Image
+{
 FeatureExtractorFactory::~FeatureExtractorFactory()
 {
        ; /* NULL */
index c32d49d..31e3e0e 100644 (file)
 #include <opencv2/core.hpp>
 #include <opencv2/calib3d/calib3d_c.h>
 
-namespace MediaVision {
-namespace Image {
-
+namespace MediaVision
+{
+namespace Image
+{
 // LCOV_EXCL_START
-namespace {
-
-float computeLinearSupportElement(
-               const std::vector<cv::DMatch>& examples,
-               int requiredNumber,
-               int leftLimit,
-               int rightLimit)
+namespace
+{
+float computeLinearSupportElement(const std::vector<cv::DMatch> &examples, int requiredNumber, int leftLimit,
+                                                                 int rightLimit)
 {
        int sizeOfExamples = rightLimit - leftLimit + 1;
 
@@ -58,10 +56,7 @@ float computeLinearSupportElement(
        return k * requiredNumber + b;
 }
 
-size_t matchesSelection(
-               std::vector<cv::DMatch>& examples,
-               size_t filterAmount,
-               size_t allowableError)
+size_t matchesSelection(std::vector<cv::DMatch> &examples, size_t filterAmount, size_t allowableError)
 {
        size_t sizeOfExamples = examples.size();
 
@@ -86,21 +81,18 @@ size_t matchesSelection(
                        break;
                }
 
-               supportElement = computeLinearSupportElement(examples, requiredNumber,
-                               leftLimit, rightLimit);
+               supportElement = computeLinearSupportElement(examples, requiredNumber, leftLimit, rightLimit);
 
                /* Iteration similar quicksort */
                while (true) {
                        /* Search the leftmost element which have
                         * bigger confidence than support element */
-                       while (examples[leftLimit].distance <= supportElement &&
-                                       leftLimit < startRightLimit)
+                       while (examples[leftLimit].distance <= supportElement && leftLimit < startRightLimit)
                                ++leftLimit;
 
                        /* Search the rightmost element which have smaller
                         * confidence than support element */
-                       while (examples[rightLimit].distance >= supportElement &&
-                                       rightLimit >= startLeftLimit)
+                       while (examples[rightLimit].distance >= supportElement && rightLimit >= startLeftLimit)
                                --rightLimit;
 
                        if (leftLimit >= rightLimit)
@@ -122,25 +114,20 @@ size_t matchesSelection(
                }
        }
 
-       return (size_t)leftLimit;
+       return (size_t) leftLimit;
 }
 
 } /* anonymous namespace */
 
-FeatureMatcher::FeatureMatcher(
-               float affectingPart,
-               float tolerantError,
-               size_t minimumMatchesNumber)
+FeatureMatcher::FeatureMatcher(float affectingPart, float tolerantError, size_t minimumMatchesNumber)
 {
        setAffectingPart(affectingPart);
        setTolerantError(tolerantError);
        setMinimumMatchesNumber(minimumMatchesNumber);
 }
 
-FeatureMatcher::MatchError FeatureMatcher::match(
-               const FeaturePack& from,
-               const FeaturePack& to,
-               cv::Mat& homophraphyMatrix) const
+FeatureMatcher::MatchError FeatureMatcher::match(const FeaturePack &from, const FeaturePack &to,
+                                                                                                cv::Mat &homophraphyMatrix) const
 {
        if (MinimumNumberOfFeatures > from.__objectKeypoints.size())
                return InvalidFeaturePackFrom;
@@ -164,30 +151,24 @@ FeatureMatcher::MatchError FeatureMatcher::match(
        size_t allowableMatchesNumberError = __tolerantError * requiredMatchesNumber;
 
        if (matchesNumber - allowableMatchesNumberError > MinimumNumberOfFeatures &&
-                       requiredMatchesNumber + allowableMatchesNumberError < matchesNumber) {
-               if (requiredMatchesNumber - allowableMatchesNumberError <
-                               __minimumMatchesNumber) {
-                       if (requiredMatchesNumber + allowableMatchesNumberError >
-                                       __minimumMatchesNumber) {
-                               requiredMatchesNumber = (requiredMatchesNumber +
-                                               __minimumMatchesNumber + allowableMatchesNumberError) / 2;
-
-                               allowableMatchesNumberError = requiredMatchesNumber -
-                                               __minimumMatchesNumber + allowableMatchesNumberError;
+               requiredMatchesNumber + allowableMatchesNumberError < matchesNumber) {
+               if (requiredMatchesNumber - allowableMatchesNumberError < __minimumMatchesNumber) {
+                       if (requiredMatchesNumber + allowableMatchesNumberError > __minimumMatchesNumber) {
+                               requiredMatchesNumber =
+                                               (requiredMatchesNumber + __minimumMatchesNumber + allowableMatchesNumberError) / 2;
+
+                               allowableMatchesNumberError =
+                                               requiredMatchesNumber - __minimumMatchesNumber + allowableMatchesNumberError;
                        } else {
                                const size_t minimalAllowableMatchesNumberError = 2u;
 
-                               requiredMatchesNumber = minimalAllowableMatchesNumberError +
-                                                                               __minimumMatchesNumber;
+                               requiredMatchesNumber = minimalAllowableMatchesNumberError + __minimumMatchesNumber;
 
                                allowableMatchesNumberError = minimalAllowableMatchesNumberError;
                        }
                }
 
-               const size_t filterAmount = matchesSelection(
-                                                                                                       matches,
-                                                                                                       requiredMatchesNumber,
-                                                                                                       allowableMatchesNumberError);
+               const size_t filterAmount = matchesSelection(matches, requiredMatchesNumber, allowableMatchesNumberError);
 
                if (filterAmount >= MinimumNumberOfFeatures)
                        matches.resize(filterAmount);
@@ -199,11 +180,9 @@ FeatureMatcher::MatchError FeatureMatcher::match(
        std::vector<cv::Point2f> scenePoints(matchesNumber);
 
        for (size_t matchIdx = 0; matchIdx < matchesNumber; ++matchIdx) {
-               objectPoints[matchIdx] =
-                               from.__objectKeypoints[matches[matchIdx].queryIdx].pt;
+               objectPoints[matchIdx] = from.__objectKeypoints[matches[matchIdx].queryIdx].pt;
 
-               scenePoints[matchIdx] =
-                               to.__objectKeypoints[matches[matchIdx].trainIdx].pt;
+               scenePoints[matchIdx] = to.__objectKeypoints[matches[matchIdx].trainIdx].pt;
        }
 
        homophraphyMatrix = cv::findHomography(objectPoints, scenePoints, CV_RANSAC);
index d2ce616..0d8e9dd 100644 (file)
 
 #include <opencv2/core.hpp>
 
-namespace MediaVision {
-namespace Image {
-
-FeaturePack::FeaturePack() :
-               __keypointsType(KT_INVALID),
-               __objectKeypoints(),
-               __descriptorsType(DT_INVALID),
-               __objectDescriptors(),
-               __recognitionRate(0.f)
+namespace MediaVision
+{
+namespace Image
+{
+FeaturePack::FeaturePack()
+               : __keypointsType(KT_INVALID)
+               , __objectKeypoints()
+               , __descriptorsType(DT_INVALID)
+               , __objectDescriptors()
+               , __recognitionRate(0.f)
 {
        ; /* NULL */
 }
 
-FeaturePack::FeaturePack(const FeaturePack& copy) :
-               __keypointsType(copy.__keypointsType),
-               __objectKeypoints(copy.__objectKeypoints),
-               __descriptorsType(copy.__descriptorsType),
-               __objectDescriptors(copy.__objectDescriptors.clone()),
-               __recognitionRate(copy.__recognitionRate)
+FeaturePack::FeaturePack(const FeaturePack &copy)
+               : __keypointsType(copy.__keypointsType)
+               , __objectKeypoints(copy.__objectKeypoints)
+               , __descriptorsType(copy.__descriptorsType)
+               , __objectDescriptors(copy.__objectDescriptors.clone())
+               __recognitionRate(copy.__recognitionRate)
 {
        ; /* NULL */
 }
 
-FeaturePack& FeaturePack::operator= (const FeaturePack& copy)
+FeaturePack &FeaturePack::operator=(const FeaturePack &copy)
 {
        if (this != &copy) {
                __keypointsType = copy.__keypointsType;
index 1dcded8..1a12237 100644 (file)
 
 #include <opencv2/core.hpp>
 
-namespace MediaVision {
-namespace Image {
-ORBExtractorFactory::ORBExtractorFactory(
-               float scaleFactor,
-               size_t maximumFeaturesNumber)
+namespace MediaVision
+{
+namespace Image
+{
+ORBExtractorFactory::ORBExtractorFactory(float scaleFactor, size_t maximumFeaturesNumber)
 {
        setScaleFactor(scaleFactor);
        setMaximumFeaturesNumber(maximumFeaturesNumber);
@@ -32,7 +32,7 @@ ORBExtractorFactory::ORBExtractorFactory(
 
 cv::Ptr<FeatureExtractor> ORBExtractorFactory::buildFeatureExtractor()
 {
-       cv::Ptr<FeatureExtractor> featureExtractor(new (std::nothrow)FeatureExtractor());
+       cv::Ptr<FeatureExtractor> featureExtractor(new (std::nothrow) FeatureExtractor());
 
        cv::Ptr<cv::ORB> detector = cv::ORB::create(__maximumFeaturesNumber, __scaleFactor);
        cv::Ptr<cv::ORB> extractor = detector;
@@ -64,9 +64,7 @@ void ORBExtractorFactory::setMaximumFeaturesNumber(size_t maximumFeaturesNumber)
        __maximumFeaturesNumber = maximumFeaturesNumber;
 }
 
-float ORBExtractorFactory::computeRecognitionRate(
-               const cv::Mat& image,
-               const std::vector<cv::KeyPoint>& keypoints)
+float ORBExtractorFactory::computeRecognitionRate(const cv::Mat &image, const std::vector<cv::KeyPoint> &keypoints)
 {
        const size_t numberOfKeypoints = keypoints.size();
 
@@ -86,11 +84,7 @@ float ORBExtractorFactory::computeRecognitionRate(
 
        for (size_t x = 0u; x < X_CELLS_NUMBER; ++x) {
                for (size_t y = 0u; y < Y_CELLS_NUMBER; ++y) {
-                       cells[x][y] = image(cv::Rect(
-                                       x * cellWidth,
-                                       y * cellHeight,
-                                       cellWidth,
-                                       cellHeight));
+                       cells[x][y] = image(cv::Rect(x * cellWidth, y * cellHeight, cellWidth, cellHeight));
 
                        accumulationCounter[x][y] = 0;
                }
@@ -106,26 +100,23 @@ float ORBExtractorFactory::computeRecognitionRate(
                ++(accumulationCounter[xCellIdx][yCellIdx]);
        }
 
-       const float exceptedNumber = numberOfKeypoints /
-                       (float)(X_CELLS_NUMBER * Y_CELLS_NUMBER);
+       const float exceptedNumber = numberOfKeypoints / (float) (X_CELLS_NUMBER * Y_CELLS_NUMBER);
 
        float distributedEvaluation = 0.f;
 
        for (size_t x = 0u; x < X_CELLS_NUMBER; ++x) {
                for (size_t y = 0u; y < Y_CELLS_NUMBER; ++y) {
                        distributedEvaluation += (accumulationCounter[x][y] - exceptedNumber) *
-                                       (accumulationCounter[x][y] - exceptedNumber) / exceptedNumber;
+                                                                        (accumulationCounter[x][y] - exceptedNumber) / exceptedNumber;
                }
        }
 
-       float maximumDistributedEvaluation = (X_CELLS_NUMBER * Y_CELLS_NUMBER - 1) *
-                       exceptedNumber;
+       float maximumDistributedEvaluation = (X_CELLS_NUMBER * Y_CELLS_NUMBER - 1) * exceptedNumber;
 
-       maximumDistributedEvaluation += (numberOfKeypoints - exceptedNumber) *
-                       (numberOfKeypoints - exceptedNumber) / exceptedNumber;
+       maximumDistributedEvaluation +=
+                       (numberOfKeypoints - exceptedNumber) * (numberOfKeypoints - exceptedNumber) / exceptedNumber;
 
-       distributedEvaluation = 1 -
-                       (distributedEvaluation / maximumDistributedEvaluation);
+       distributedEvaluation = 1 - (distributedEvaluation / maximumDistributedEvaluation);
 
        /* Exponentiation to find an approximate confidence value based on the
         * number of key points on the image. */
index de887d5..86cec12 100644 (file)
 
 #include "ImageConfig.h"
 
-namespace MediaVision {
-namespace Image {
-FeaturesExtractingParams::FeaturesExtractingParams() :
-       mKeypointType(KT_INVALID),
-       mDescriptorType(DT_INVALID)
+namespace MediaVision
+{
+namespace Image
+{
+FeaturesExtractingParams::FeaturesExtractingParams() : mKeypointType(KT_INVALID), mDescriptorType(DT_INVALID)
 {
        ; /* NULL */
 }
 
-RecognitionParams::RecognitionParams(
-                                       int minMatchesNumber,
-                                       double requiredMatchesPart,
-                                       double tolerantMatchesPartError) :
-       mMinMatchesNumber(minMatchesNumber),
-       mRequiredMatchesPart(requiredMatchesPart),
-       mTolerantMatchesPartError(tolerantMatchesPartError)
+RecognitionParams::RecognitionParams(int minMatchesNumber, double requiredMatchesPart, double tolerantMatchesPartError)
+               : mMinMatchesNumber(minMatchesNumber)
+               , mRequiredMatchesPart(requiredMatchesPart)
+               , mTolerantMatchesPartError(tolerantMatchesPartError)
 {
        ; /* NULL */
 }
 
-RecognitionParams::RecognitionParams() :
-       mMinMatchesNumber(0),
-       mRequiredMatchesPart(1.0),
-       mTolerantMatchesPartError(0.0)
+RecognitionParams::RecognitionParams() : mMinMatchesNumber(0), mRequiredMatchesPart(1.0), mTolerantMatchesPartError(0.0)
 {
        ; /* NULL */
 }
 
-StabilizationParams::StabilizationParams(
-                                       bool isEnabled,
-                                       size_t historyAmount,
-                                       double tolerantShift,
-                                       double tolerantShiftExtra,
-                                       double stabilizationSpeed,
-                                       double stabilizationAcceleration) :
-       mIsEnabled(isEnabled),
-       mHistoryAmount(historyAmount),
-       mTolerantShift(tolerantShift),
-       mTolerantShiftExtra(tolerantShiftExtra),
-       mStabilizationSpeed(stabilizationSpeed),
-       mStabilizationAcceleration(stabilizationAcceleration)
+StabilizationParams::StabilizationParams(bool isEnabled, size_t historyAmount, double tolerantShift,
+                                                                                double tolerantShiftExtra, double stabilizationSpeed,
+                                                                                double stabilizationAcceleration)
+               : mIsEnabled(isEnabled)
+               , mHistoryAmount(historyAmount)
+               , mTolerantShift(tolerantShift)
+               , mTolerantShiftExtra(tolerantShiftExtra)
+               , mStabilizationSpeed(stabilizationSpeed)
+               , mStabilizationAcceleration(stabilizationAcceleration)
 {
        ; /* NULL */
 }
 
-StabilizationParams::StabilizationParams() :
-       mIsEnabled(false),
-       mHistoryAmount(1),
-       mTolerantShift(0.0),
-       mTolerantShiftExtra(0.0),
-       mStabilizationSpeed(0.0),
-       mStabilizationAcceleration(1.0)
+StabilizationParams::StabilizationParams()
+               : mIsEnabled(false)
+               , mHistoryAmount(1)
+               , mTolerantShift(0.0)
+               , mTolerantShiftExtra(0.0)
+               , mStabilizationSpeed(0.0)
+               , mStabilizationAcceleration(1.0)
 {
        ; /* NULL */
 }
 
-TrackingParams::TrackingParams(
-                                       FeaturesExtractingParams framesFeaturesExtractingParams,
-                                       RecognitionParams recognitionParams,
-                                       StabilizationParams stabilizationParams,
-                                       double expectedOffset) :
-       mFramesFeaturesExtractingParams(framesFeaturesExtractingParams),
-       mRecognitionParams(recognitionParams),
-       mStabilizationParams(stabilizationParams),
-       mExpectedOffset(expectedOffset)
+TrackingParams::TrackingParams(FeaturesExtractingParams framesFeaturesExtractingParams,
+                                                          RecognitionParams recognitionParams, StabilizationParams stabilizationParams,
+                                                          double expectedOffset)
+               : mFramesFeaturesExtractingParams(framesFeaturesExtractingParams)
+               , mRecognitionParams(recognitionParams)
+               , mStabilizationParams(stabilizationParams)
+               , mExpectedOffset(expectedOffset)
 {
        ; /* NULL */
 }
 
-TrackingParams::TrackingParams() :
-       mFramesFeaturesExtractingParams(),
-       mRecognitionParams(),
-       mStabilizationParams(),
-       mExpectedOffset(0.0)
+TrackingParams::TrackingParams()
+               : mFramesFeaturesExtractingParams(), mRecognitionParams(), mStabilizationParams(), mExpectedOffset(0.0)
 {
        ; /* NULL */
 }
index 8b53f9f..b6e08ce 100644 (file)
 
 #include "ImageMathUtil.h"
 
-namespace MediaVision {
-namespace Image {
-float getDistance(
-               const cv::Point2f& point1,
-               const cv::Point2f& point2)
+namespace MediaVision
 {
-       return sqrt(
-                       (point1.x - point2.x) * (point1.x - point2.x) +
-                       (point1.y - point2.y) * (point1.y - point2.y));
+namespace Image
+{
+float getDistance(const cv::Point2f &point1, const cv::Point2f &point2)
+{
+       return sqrt((point1.x - point2.x) * (point1.x - point2.x) + (point1.y - point2.y) * (point1.y - point2.y));
 }
 
-float getTriangleArea(
-               const cv::Point2f& point1,
-               const cv::Point2f& point2,
-               const cv::Point2f& point3)
+float getTriangleArea(const cv::Point2f &point1, const cv::Point2f &point2, const cv::Point2f &point3)
 {
        float distances[3];
 
@@ -40,10 +35,8 @@ float getTriangleArea(
 
        const float semiperimeter = (distances[0] + distances[1] + distances[2]) / 2.0f;
 
-       const float res2x = semiperimeter *
-                       (semiperimeter - distances[0]) *
-                       (semiperimeter - distances[1]) *
-                       (semiperimeter - distances[2]);
+       const float res2x = semiperimeter * (semiperimeter - distances[0]) * (semiperimeter - distances[1]) *
+                                               (semiperimeter - distances[2]);
 
        if (res2x < 0.f)
                return 0.f;
@@ -53,13 +46,10 @@ float getTriangleArea(
 
 float getQuadrangleArea(const cv::Point2f points[NumberOfQuadrangleCorners])
 {
-       return getTriangleArea(points[0], points[1], points[2]) +
-                       getTriangleArea(points[0], points[3], points[2]);
+       return getTriangleArea(points[0], points[1], points[2]) + getTriangleArea(points[0], points[3], points[2]);
 }
 
-bool checkAccessory(
-               const cv::Point2f& point,
-               const std::vector<cv::Point2f>& region)
+bool checkAccessory(const cv::Point2f &point, const std::vector<cv::Point2f> &region)
 {
        if (region.size() < 3)
                return false;
@@ -69,16 +59,15 @@ bool checkAccessory(
 
        for (size_t i = 0u, j = numberOfContourPoints - 1; i < numberOfContourPoints; j = i++) {
                if (((region[i].y > point.y) != (region[j].y > point.y)) &&
-                               ((float) point.x < (float)
-                               (region[j].x - region[i].x) * (point.y - region[i].y) /
-                               (region[j].y - region[i].y) + region[i].x))
+                       ((float) point.x <
+                        (float) (region[j].x - region[i].x) * (point.y - region[i].y) / (region[j].y - region[i].y) + region[i].x))
                        insideFlag = !insideFlag;
        }
 
        return insideFlag;
 }
 
-void catRect(cv::Rect& rectange, const cv::Size& maxSize)
+void catRect(cv::Rect &rectange, const cv::Size &maxSize)
 {
        if (rectange.width < 0) {
                rectange.x += rectange.width;
@@ -115,9 +104,7 @@ void catRect(cv::Rect& rectange, const cv::Size& maxSize)
                rectange.height = maxSize.height - rectange.y;
 }
 
-std::vector<cv::Point2f> contourResize(
-               const std::vector<cv::Point2f>& roi,
-               float scalingCoefficient)
+std::vector<cv::Point2f> contourResize(const std::vector<cv::Point2f> &roi, float scalingCoefficient)
 {
        const size_t numberOfContourPoints = roi.size();
        cv::Point2f centre(0, 0);
index 79f99c9..a13d599 100644 (file)
 #include <unistd.h>
 #include <iomanip>
 
-namespace MediaVision {
-namespace Image {
-ImageObject::ImageObject() :
-               __features(),
-               __isEmpty(true),
-               __isLabeled(false),
-               __label(0)
+namespace MediaVision
+{
+namespace Image
+{
+ImageObject::ImageObject() : __features(), __isEmpty(true), __isLabeled(false), __label(0)
 {
        ; /* NULL */
 }
 
-ImageObject::ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params) :
-               __featureExtractingParams(),
-               __features(),
-               __isEmpty(true),
-               __isLabeled(false),
-               __label(0)
+ImageObject::ImageObject(const cv::Mat &image, const FeaturesExtractingParams &params)
+               : __featureExtractingParams(), __features(), __isEmpty(true), __isLabeled(false), __label(0)
 {
        fill(image, params);
 }
 
-ImageObject::ImageObject(const ImageObject& copy) :
-               __featureExtractingParams(copy.__featureExtractingParams),
-               __features(copy.__features),
-               __isEmpty(copy.__isEmpty),
-               __isLabeled(copy.__isLabeled),
-               __label(copy.__label),
-               __boundingContour(copy.__boundingContour)
+ImageObject::ImageObject(const ImageObject &copy)
+               : __featureExtractingParams(copy.__featureExtractingParams)
+               , __features(copy.__features)
+               , __isEmpty(copy.__isEmpty)
+               , __isLabeled(copy.__isLabeled)
+               , __label(copy.__label)
+               __boundingContour(copy.__boundingContour)
 {
        ; /* NULL */
 }
 
-ImageObject& ImageObject::operator=(const ImageObject& copy)
+ImageObject &ImageObject::operator=(const ImageObject &copy)
 {
        if (this != &copy) {
                __isEmpty = copy.__isEmpty;
@@ -82,10 +76,8 @@ ImageObject::~ImageObject()
        ; /* NULL */
 }
 
-void ImageObject::fill(
-               const cv::Mat& image,
-               const FeaturesExtractingParams& params,
-               const std::vector<cv::Point2f>& roi)
+void ImageObject::fill(const cv::Mat &image, const FeaturesExtractingParams &params,
+                                          const std::vector<cv::Point2f> &roi)
 {
        __isEmpty = false;
 
@@ -119,27 +111,22 @@ float ImageObject::getRecognitionRate(void) const
        return __features.__recognitionRate;
 }
 
-void ImageObject::extractFeatures(
-               const cv::Mat& image,
-               const FeaturesExtractingParams& params,
-               const std::vector<cv::Point2f>& roi)
+void ImageObject::extractFeatures(const cv::Mat &image, const FeaturesExtractingParams &params,
+                                                                 const std::vector<cv::Point2f> &roi)
 {
        /* TODO: It is advisable to consider the distribution of functional */
 
        cv::Ptr<FeatureExtractor> extractor;
 
-       if (params.mKeypointType == KT_ORB &&
-                       params.mDescriptorType == DT_ORB) {
+       if (params.mKeypointType == KT_ORB && params.mDescriptorType == DT_ORB) {
                ORBExtractorFactory extractorFactory;
 
-               extractorFactory.setScaleFactor((float)params.ORB.mScaleFactor);
+               extractorFactory.setScaleFactor((float) params.ORB.mScaleFactor);
                extractorFactory.setMaximumFeaturesNumber(params.ORB.mMaximumFeaturesNumber);
 
                extractor = extractorFactory.buildFeatureExtractor();
        } else {
-               BasicExtractorFactory extractorFactory(
-                               params.mKeypointType,
-                               params.mDescriptorType);
+               BasicExtractorFactory extractorFactory(params.mKeypointType, params.mDescriptorType);
 
                extractor = extractorFactory.buildFeatureExtractor();
        }
@@ -150,11 +137,10 @@ void ImageObject::extractFeatures(
 
 bool ImageObject::isEmpty() const
 {
-       return (__features.__objectKeypoints.empty() ||
-                               __features.__objectDescriptors.empty());
+       return (__features.__objectKeypoints.empty() || __features.__objectDescriptors.empty());
 }
 
-void ImageObject::setContour(const std::vector<cv::Point2f>contour)
+void ImageObject::setContour(const std::vector<cv::Point2f> &contour)
 {
        __boundingContour = contour;
 }
@@ -165,7 +151,7 @@ void ImageObject::setLabel(int label)
        __label = label;
 }
 
-bool ImageObject::getLabel(intlabel) const
+bool ImageObject::getLabel(int &label) const
 {
        if (!__isLabeled) {
                LOGW("[%s] Image hasn't label.", __FUNCTION__);
@@ -228,7 +214,7 @@ int ImageObject::load(const char *fileName)
                return MEDIA_VISION_ERROR_PERMISSION_DENIED;
        }
 
-       in>>(*this);
+       in >> (*this);
 
        if (!in.good()) {
                /* TODO: Provide another error code */
@@ -242,7 +228,7 @@ int ImageObject::load(const char *fileName)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-std::ostream& operator << (std::ostream& os, const ImageObject& obj)
+std::ostream &operator<<(std::ostream &os, const ImageObject &obj)
 {
        os << std::setprecision(7);
 
@@ -275,20 +261,16 @@ std::ostream& operator << (std::ostream& os, const ImageObject& obj)
        os << sizeOfDescriptor << ' ';
        os << obj.__features.__objectDescriptors.type() << '\n';
 
-       for (int descriptorNum = 0; descriptorNum < numberOfDescriptors;
-                               ++descriptorNum, os << '\n') {
-               for (int featureNum = 0; featureNum < sizeOfDescriptor;
-                                       ++featureNum) {
-                               os << (int)obj.__features.__objectDescriptors.at<uchar>(
-                               descriptorNum,
-                               featureNum) << ' ';
+       for (int descriptorNum = 0; descriptorNum < numberOfDescriptors; ++descriptorNum, os << '\n') {
+               for (int featureNum = 0; featureNum < sizeOfDescriptor; ++featureNum) {
+                       os << (int) obj.__features.__objectDescriptors.at<uchar>(descriptorNum, featureNum) << ' ';
                }
        }
 
        return os;
 }
 
-std::istream& operator >> (std::istream& is, ImageObject& obj)
+std::istream &operator>>(std::istream &is, ImageObject &obj)
 {
        size_t numberOfContourPoints = 0u;
        size_t numberOfKeypoints = 0u;
@@ -298,8 +280,8 @@ std::istream& operator >> (std::istream& is, ImageObject& obj)
        ImageObject temporal;
 
 #define MEDIA_VISION_CHECK_IFSTREAM \
-       if (!is.good()) { \
-               return is; \
+       if (!is.good()) {               \
+               return is;                  \
        }
 
        is >> temporal.__isEmpty;
@@ -352,8 +334,7 @@ std::istream& operator >> (std::istream& is, ImageObject& obj)
                        is >> value;
                        MEDIA_VISION_CHECK_IFSTREAM
 
-                       temporal.__features.__objectDescriptors.at<uchar>(descriptorNum, featureNum) =
-                                       (uchar)value;
+                       temporal.__features.__objectDescriptors.at<uchar>(descriptorNum, featureNum) = (uchar) value;
                }
        }
 
index a119009..603ebbe 100644 (file)
 
 #include "mv_private.h"
 
-namespace MediaVision {
-namespace Image {
-ImageRecognizer::ImageRecognizer(const ImageObject& scene) :
-               __scene(scene)
+namespace MediaVision
+{
+namespace Image
+{
+ImageRecognizer::ImageRecognizer(const ImageObject &scene) : __scene(scene)
 {
        ; /* NULL */
 }
@@ -33,11 +34,8 @@ ImageRecognizer::~ImageRecognizer()
        ; /* NULL */
 }
 
-bool ImageRecognizer::recognize(
-               const ImageObject& target,
-               const RecognitionParams& params,
-               std::vector<cv::Point2f>& contour,
-               float ignoreFactor) const
+bool ImageRecognizer::recognize(const ImageObject &target, const RecognitionParams &params,
+                                                               std::vector<cv::Point2f> &contour, float ignoreFactor) const
 {
        cv::Mat homophraphyMatrix;
 
@@ -53,7 +51,7 @@ bool ImageRecognizer::recognize(
                return false;
        }
 
-       if(!findHomophraphyMatrix(target, params, homophraphyMatrix, ignoreFactor)) {
+       if (!findHomophraphyMatrix(target, params, homophraphyMatrix, ignoreFactor)) {
                LOGE("[%s] Can't match the features.", __FUNCTION__);
                return false;
        }
@@ -72,18 +70,12 @@ bool ImageRecognizer::recognize(
        return true;
 }
 
-bool ImageRecognizer::findHomophraphyMatrix(
-               const ImageObject& target,
-               const RecognitionParams& params,
-               cv::Mat& homophraphyMatrix,
-               float ignoreFactor) const
+bool ImageRecognizer::findHomophraphyMatrix(const ImageObject &target, const RecognitionParams &params,
+                                                                                       cv::Mat &homophraphyMatrix, float ignoreFactor) const
 {
        std::vector<cv::DMatch> matches;
 
-       __matcher.match(
-                       target.__features.__objectDescriptors,
-                       __scene.__features.__objectDescriptors,
-                       matches);
+       __matcher.match(target.__features.__objectDescriptors, __scene.__features.__objectDescriptors, matches);
 
        size_t matchesNumber = matches.size();
 
@@ -92,39 +84,29 @@ bool ImageRecognizer::findHomophraphyMatrix(
                return false;
        }
 
-       size_t requiredMatchesNumber =
-                       params.mRequiredMatchesPart * matchesNumber;
-
-       size_t allowableMatchesNumberError =
-                       params.mTolerantMatchesPartError * requiredMatchesNumber;
-
-       if (matchesNumber - allowableMatchesNumberError >
-                       (size_t)params.mMinMatchesNumber &&
-                       requiredMatchesNumber + allowableMatchesNumberError <
-                       matchesNumber) {
-               if (requiredMatchesNumber - allowableMatchesNumberError <
-                               (size_t)params.mMinMatchesNumber) {
-                       if (requiredMatchesNumber + allowableMatchesNumberError >
-                                       (size_t)params.mMinMatchesNumber) {
-                               requiredMatchesNumber = ((size_t)params.mMinMatchesNumber +
-                                               requiredMatchesNumber + allowableMatchesNumberError) / 2;
-
-                               allowableMatchesNumberError = requiredMatchesNumber-
-                                               (size_t)params.mMinMatchesNumber +
-                                               allowableMatchesNumberError;
+       size_t requiredMatchesNumber = params.mRequiredMatchesPart * matchesNumber;
+
+       size_t allowableMatchesNumberError = params.mTolerantMatchesPartError * requiredMatchesNumber;
+
+       if (matchesNumber - allowableMatchesNumberError > (size_t) params.mMinMatchesNumber &&
+               requiredMatchesNumber + allowableMatchesNumberError < matchesNumber) {
+               if (requiredMatchesNumber - allowableMatchesNumberError < (size_t) params.mMinMatchesNumber) {
+                       if (requiredMatchesNumber + allowableMatchesNumberError > (size_t) params.mMinMatchesNumber) {
+                               requiredMatchesNumber =
+                                               ((size_t) params.mMinMatchesNumber + requiredMatchesNumber + allowableMatchesNumberError) / 2;
+
+                               allowableMatchesNumberError =
+                                               requiredMatchesNumber - (size_t) params.mMinMatchesNumber + allowableMatchesNumberError;
                        } else {
                                const size_t minimalAllowableMatchesNumberError = 2u;
 
-                               requiredMatchesNumber = params.mMinMatchesNumber +
-                                               minimalAllowableMatchesNumberError;
+                               requiredMatchesNumber = params.mMinMatchesNumber + minimalAllowableMatchesNumberError;
 
                                allowableMatchesNumberError = minimalAllowableMatchesNumberError;
                        }
                }
 
-               const size_t filterAmount = matchesSelection(matches,
-                                                                                                       requiredMatchesNumber,
-                                                                                                       allowableMatchesNumberError);
+               const size_t filterAmount = matchesSelection(matches, requiredMatchesNumber, allowableMatchesNumberError);
 
                if (filterAmount >= MinimumNumberOfFeatures)
                        matches.resize(filterAmount);
@@ -138,17 +120,13 @@ bool ImageRecognizer::findHomophraphyMatrix(
        std::vector<cv::Point2f> scenePoints(matchesNumber);
 
        for (size_t matchIdx = 0; matchIdx < matchesNumber; ++matchIdx) {
-               objectPoints[matchIdx] =
-                               target.__features.__objectKeypoints[matches[matchIdx].queryIdx].pt;
+               objectPoints[matchIdx] = target.__features.__objectKeypoints[matches[matchIdx].queryIdx].pt;
 
-               scenePoints[matchIdx] =
-                               __scene.__features.__objectKeypoints[matches[matchIdx].trainIdx].pt;
+               scenePoints[matchIdx] = __scene.__features.__objectKeypoints[matches[matchIdx].trainIdx].pt;
        }
 
        if (ignoreFactor > FLT_EPSILON) {
-               const std::vector<cv::Point2f> significantArea = contourResize(
-                               target.__boundingContour,
-                               ignoreFactor);
+               const std::vector<cv::Point2f> significantArea = contourResize(target.__boundingContour, ignoreFactor);
 
                for (size_t matchIdx = 0; matchIdx < objectPoints.size(); ++matchIdx) {
                        if (!checkAccessory(objectPoints[matchIdx], significantArea)) {
@@ -167,9 +145,8 @@ bool ImageRecognizer::findHomophraphyMatrix(
        return true;
 }
 
-size_t ImageRecognizer::matchesSelection(
-               std::vector<cv::DMatch>& examples,
-               unsigned int filterAmount, unsigned int allowableError) const
+size_t ImageRecognizer::matchesSelection(std::vector<cv::DMatch> &examples, unsigned int filterAmount,
+                                                                                unsigned int allowableError) const
 {
        size_t sizeOfExamples = examples.size();
 
@@ -188,25 +165,22 @@ size_t ImageRecognizer::matchesSelection(
 
        while (true) {
                if (leftLimit >= rightLimit) {
-                       if (leftLimit < (requiredNumber - (int)allowableError))
-                               leftLimit = requiredNumber + (int)allowableError;
+                       if (leftLimit < (requiredNumber - (int) allowableError))
+                               leftLimit = requiredNumber + (int) allowableError;
 
                        break;
                }
 
-               supportElement = computeLinearSupportElement(examples, requiredNumber,
-                                       leftLimit, rightLimit);
+               supportElement = computeLinearSupportElement(examples, requiredNumber, leftLimit, rightLimit);
 
                /* Iteration similar quicksort */
                while (true) {
                        /* Search the leftmost element which have bigger confidence than support element */
-                       while (examples[leftLimit].distance <= supportElement &&
-                                       leftLimit < startRightLimit)
+                       while (examples[leftLimit].distance <= supportElement && leftLimit < startRightLimit)
                                ++leftLimit;
 
                        /* Search the rightmost element which have smaller confidence than support element */
-                       while (examples[rightLimit].distance >= supportElement &&
-                                       rightLimit >= startLeftLimit)
+                       while (examples[rightLimit].distance >= supportElement && rightLimit >= startLeftLimit)
                                --rightLimit;
 
                        if (leftLimit >= rightLimit)
@@ -219,7 +193,7 @@ size_t ImageRecognizer::matchesSelection(
                if (std::abs(static_cast<int>(filterAmount - leftLimit)) <= static_cast<int>(allowableError))
                        break;
 
-               if ((int)filterAmount > leftLimit) {
+               if ((int) filterAmount > leftLimit) {
                        requiredNumber -= leftLimit - startLeftLimit;
 
                        rightLimit = startRightLimit;
@@ -230,11 +204,11 @@ size_t ImageRecognizer::matchesSelection(
                }
        }
 
-       return (size_t)leftLimit;
+       return (size_t) leftLimit;
 }
 
-float ImageRecognizer::computeLinearSupportElement(const std::vector<cv::DMatch>& examples,
-               int requiredNumber, int leftLimit, int rightLimit) const
+float ImageRecognizer::computeLinearSupportElement(const std::vector<cv::DMatch> &examples, int requiredNumber,
+                                                                                                  int leftLimit, int rightLimit) const
 {
        int sizeOfExamples = rightLimit - leftLimit + 1;
 
@@ -263,22 +237,21 @@ float ImageRecognizer::computeLinearSupportElement(const std::vector<cv::DMatch>
        return k * requiredNumber + b;
 }
 
-bool ImageRecognizer::isPossibleQuadrangleCorners(
-               const cv::Point2f corners[NumberOfQuadrangleCorners])
+bool ImageRecognizer::isPossibleQuadrangleCorners(const cv::Point2f corners[NumberOfQuadrangleCorners])
 {
        static const float __EPSILON = 0.1f;
 
        /* TODO: move the __MIN_SIZE_OF_DETECTED_AREA out of the ImageRecognizer */
        static const float __MIN_SIZE_OF_DETECTED_AREA = 64.f;
 
-       const float firstSemiArea = getTriangleArea(corners[0], corners[2], corners[1]) +
-                       getTriangleArea(corners[0], corners[2], corners[3]);
+       const float firstSemiArea =
+                       getTriangleArea(corners[0], corners[2], corners[1]) + getTriangleArea(corners[0], corners[2], corners[3]);
 
-       const float secondSemiArea = getTriangleArea(corners[1], corners[3], corners[2]) +
-                       getTriangleArea(corners[1], corners[3], corners[0]);
+       const float secondSemiArea =
+                       getTriangleArea(corners[1], corners[3], corners[2]) + getTriangleArea(corners[1], corners[3], corners[0]);
 
        if (__EPSILON < fabs(firstSemiArea - secondSemiArea) ||
-                       __MIN_SIZE_OF_DETECTED_AREA > (firstSemiArea + secondSemiArea))
+               __MIN_SIZE_OF_DETECTED_AREA > (firstSemiArea + secondSemiArea))
                return false;
 
        return true;
index 427814d..c4522b7 100644 (file)
 
 #include <new>
 
-namespace MediaVision {
-namespace Image {
-
-AsyncTracker::AsyncTracker(const AsyncTracker& copy) :
-               __baseTracker(copy.__baseTracker.get()->clone()),
-               __result(copy.__result),
-               __isRun(false),
-               __isUpdated(copy.__isUpdated),
-               __copyingPolicy(copy.__copyingPolicy),
-               __mvThread(0)
+namespace MediaVision
+{
+namespace Image
+{
+AsyncTracker::AsyncTracker(const AsyncTracker &copy)
+               : __baseTracker(copy.__baseTracker.get()->clone())
+               , __result(copy.__result)
+               , __isRun(false)
+               , __isUpdated(copy.__isUpdated)
+               , __copyingPolicy(copy.__copyingPolicy)
+               , __mvThread(0)
 {
        pthread_mutex_init(&__globalGuard, NULL);
        pthread_spin_init(&__resultGuard, PTHREAD_PROCESS_SHARED);
@@ -35,15 +36,13 @@ AsyncTracker::AsyncTracker(const AsyncTracker& copy) :
        pthread_spin_init(&__isUpdatedGuard, PTHREAD_PROCESS_SHARED);
 }
 
-AsyncTracker::AsyncTracker(
-               cv::Ptr<ObjectTracker> baseTracker,
-               bool copyingPolicy) :
-               __baseTracker(baseTracker),
-               __result(),
-               __isRun(false),
-               __isUpdated(false),
-               __copyingPolicy(copyingPolicy),
-               __mvThread(0)
+AsyncTracker::AsyncTracker(cv::Ptr<ObjectTracker> baseTracker, bool copyingPolicy)
+               : __baseTracker(baseTracker)
+               , __result()
+               , __isRun(false)
+               , __isUpdated(false)
+               , __copyingPolicy(copyingPolicy)
+               , __mvThread(0)
 {
        pthread_mutex_init(&__globalGuard, NULL);
        pthread_spin_init(&__resultGuard, PTHREAD_PROCESS_SHARED);
@@ -53,7 +52,7 @@ AsyncTracker::AsyncTracker(
 
 AsyncTracker::~AsyncTracker()
 {
-       if(isRun()) {
+       if (isRun()) {
                pthread_mutex_lock(&__globalGuard);
                pthread_mutex_unlock(&__globalGuard);
        }
@@ -64,9 +63,7 @@ AsyncTracker::~AsyncTracker()
        pthread_spin_destroy(&__isUpdatedGuard);
 }
 
-bool AsyncTracker::track(
-               const cv::Mat& frame,
-               std::vector<cv::Point>& result)
+bool AsyncTracker::track(const cv::Mat &frame, std::vector<cv::Point> &result)
 {
        while (pthread_mutex_trylock(&__globalGuard) != 0)
                return getResult(result);
@@ -96,7 +93,7 @@ bool AsyncTracker::track(
        return getResult(result);
 }
 
-void AsyncTracker::reinforcement(const std::vector<cv::Point>location)
+void AsyncTracker::reinforcement(const std::vector<cv::Point> &location)
 {
        /* TODO: Unsafe. Need to redesign. */
        __baseTracker->reinforcement(location);
@@ -108,17 +105,17 @@ void AsyncTracker::reinforcement(const std::vector<cv::Point>& location)
 
 cv::Ptr<ObjectTracker> AsyncTracker::clone() const
 {
-       return cv::Ptr<ObjectTracker>(new (std::nothrow)AsyncTracker(*this));
+       return cv::Ptr<ObjectTracker>(new (std::nothrow) AsyncTracker(*this));
 }
 
-bool AsyncTracker::baseTrack(std::vector<cv::Point>result)
+bool AsyncTracker::baseTrack(std::vector<cv::Point> &result)
 {
        return __baseTracker->track(__frame, result);
 }
 
 void *AsyncTracker::asyncTrack(void *data)
 {
-       AsyncTracker *tracker = reinterpret_cast<AsyncTracker*>(data);
+       AsyncTracker *tracker = reinterpret_cast<AsyncTracker *>(data);
 
        std::vector<cv::Point> result;
        tracker->baseTrack(result);
@@ -142,7 +139,7 @@ void *AsyncTracker::asyncTrack(void *data)
 
 bool AsyncTracker::wait()
 {
-       if(isRun()) {
+       if (isRun()) {
                pthread_mutex_lock(&__globalGuard);
                pthread_mutex_unlock(&__globalGuard);
                return true;
@@ -161,7 +158,7 @@ bool AsyncTracker::isRun()
        return result;
 }
 
-bool AsyncTracker::isUpdated(std::vector<cv::Point>result)
+bool AsyncTracker::isUpdated(std::vector<cv::Point> &result)
 {
        bool isUpdated = false;
 
@@ -175,7 +172,7 @@ bool AsyncTracker::isUpdated(std::vector<cv::Point>& result)
        return isUpdated;
 }
 
-bool AsyncTracker::getResult(std::vector<cv::Point>result)
+bool AsyncTracker::getResult(std::vector<cv::Point> &result)
 {
        bool isTracked = false;
 
index a60bbb2..2db13af 100644 (file)
 #include "ImageMathUtil.h"
 #include <new>
 
-namespace MediaVision {
-namespace Image {
-
-CascadeTracker::CascadeTracker(float minimumArea) :
-               __trackers(),
-               __minimumArea(minimumArea)
+namespace MediaVision
+{
+namespace Image
+{
+CascadeTracker::CascadeTracker(float minimumArea) : __trackers(), __minimumArea(minimumArea)
 {
        ; /* NULL */
 }
 
-CascadeTracker::CascadeTracker(const CascadeTracker& copy) :
-               __trackers(),
-               __minimumArea(copy.__minimumArea)
+CascadeTracker::CascadeTracker(const CascadeTracker &copy) : __trackers(), __minimumArea(copy.__minimumArea)
 {
        *this = copy;
 }
@@ -42,7 +39,7 @@ CascadeTracker::~CascadeTracker()
        ; /* NULL */
 }
 
-bool CascadeTracker::track(const cv::Mat& frame, std::vector<cv::Point>& result)
+bool CascadeTracker::track(const cv::Mat &frame, std::vector<cv::Point> &result)
 {
        internalReinforcement();
 
@@ -55,7 +52,7 @@ bool CascadeTracker::track(const cv::Mat& frame, std::vector<cv::Point>& result)
        return mergeResults(result);
 }
 
-void CascadeTracker::reinforcement(const std::vector<cv::Point>location)
+void CascadeTracker::reinforcement(const std::vector<cv::Point> &location)
 {
        std::set<TrackerInfo>::iterator it = __trackers.begin();
 
@@ -65,10 +62,10 @@ void CascadeTracker::reinforcement(const std::vector<cv::Point>& location)
 
 cv::Ptr<ObjectTracker> CascadeTracker::clone() const
 {
-       return cv::Ptr<ObjectTracker>(new (std::nothrow)CascadeTracker(*this));
+       return cv::Ptr<ObjectTracker>(new (std::nothrow) CascadeTracker(*this));
 }
 
-CascadeTracker& CascadeTracker::operator=(const CascadeTracker& copy)
+CascadeTracker &CascadeTracker::operator=(const CascadeTracker &copy)
 {
        if (this != &copy) {
                this->__minimumArea = copy.__minimumArea;
@@ -90,8 +87,7 @@ bool CascadeTracker::enableTracker(cv::Ptr<ObjectTracker> tracker, float priorit
 {
        TrackerInfo temp(tracker, priority);
 
-       std::set<TrackerInfo>::iterator it =
-                       std::find(__trackers.begin(), __trackers.end(), temp);
+       std::set<TrackerInfo>::iterator it = std::find(__trackers.begin(), __trackers.end(), temp);
 
        if (it != __trackers.end())
                __trackers.erase(it);
@@ -103,8 +99,7 @@ bool CascadeTracker::disableTracker(cv::Ptr<ObjectTracker> tracker)
 {
        TrackerInfo target(tracker, 0);
 
-       std::set<TrackerInfo>::iterator it =
-                       std::find(__trackers.begin(), __trackers.end(), target);
+       std::set<TrackerInfo>::iterator it = std::find(__trackers.begin(), __trackers.end(), target);
 
        if (it == __trackers.end())
                return false;
@@ -121,7 +116,7 @@ void CascadeTracker::internalReinforcement()
                bool isUpdated = true;
 
                /* TODO: Redesign without dynamic_cast */
-               AsyncTracker *asyncView = dynamic_cast<AsyncTracker*>(it1->mTracker.get());
+               AsyncTracker *asyncView = dynamic_cast<AsyncTracker *>(it1->mTracker.get());
                if (NULL != asyncView)
                        isUpdated = asyncView->isUpdated(it1->mResult);
 
@@ -143,12 +138,12 @@ void CascadeTracker::internalReinforcement()
 
                        for (; it2 != __trackers.end(); ++it2)
                                if (it1 != it2 && priority > it2->mPriority)
-                                        it2->mTracker.get()->reinforcement(it1->mResult);
+                                       it2->mTracker.get()->reinforcement(it1->mResult);
                }
        }
 }
 
-bool CascadeTracker::mergeResults(std::vector<cv::Point>result) const
+bool CascadeTracker::mergeResults(std::vector<cv::Point> &result) const
 {
        result.clear();
 
@@ -165,25 +160,23 @@ bool CascadeTracker::mergeResults(std::vector<cv::Point>& result) const
        return !(result.empty());
 }
 
-CascadeTracker::TrackerInfo::TrackerInfo(cv::Ptr<ObjectTracker> tracker, float priority) :
-               mTracker(tracker),
-               mPriority(priority),
-               mResult()
+CascadeTracker::TrackerInfo::TrackerInfo(cv::Ptr<ObjectTracker> tracker, float priority)
+               : mTracker(tracker), mPriority(priority), mResult()
 {
        ; /* NULL */
 }
 
-bool CascadeTracker::TrackerInfo::operator<(const TrackerInfosecond) const
+bool CascadeTracker::TrackerInfo::operator<(const TrackerInfo &second) const
 {
        return (this->mPriority < second.mPriority);
 }
 
-bool CascadeTracker::TrackerInfo::operator==(const TrackerInfosecond) const
+bool CascadeTracker::TrackerInfo::operator==(const TrackerInfo &second) const
 {
        return (this->mTracker == second.mTracker);
 }
 
-bool CascadeTracker::TrackerInfo::operator!=(const TrackerInfosecond) const
+bool CascadeTracker::TrackerInfo::operator!=(const TrackerInfo &second) const
 {
        return !(*this == second);
 }
index b6e10fc..6b8e39c 100644 (file)
 
 #include <new>
 
-namespace MediaVision {
-namespace Image {
-
-FeatureSubstitutionTracker::FeatureSubstitutionTracker(
-               const FeaturesExtractingParams& featuresExtractingParams,
-               const RecognitionParams& recognitionParams,
-               float expectedOffset,
-               float sceneScalingFactor,
-               float objectScalingFactor) :
-                               __isInit(false),
-                               __target(),
-                               __location(),
-                               __featureExtractingParams(featuresExtractingParams),
-                               __recogParams(recognitionParams),
-                               __expectedOffset(expectedOffset),
-                               __sceneScalingFactor(sceneScalingFactor),
-                               __objectScalingFactor(objectScalingFactor)
+namespace MediaVision
+{
+namespace Image
+{
+FeatureSubstitutionTracker::FeatureSubstitutionTracker(const FeaturesExtractingParams &featuresExtractingParams,
+                                                                                                          const RecognitionParams &recognitionParams, float expectedOffset,
+                                                                                                          float sceneScalingFactor, float objectScalingFactor)
+               : __isInit(false)
+               , __target()
+               , __location()
+               , __featureExtractingParams(featuresExtractingParams)
+               , __recogParams(recognitionParams)
+               , __expectedOffset(expectedOffset)
+               , __sceneScalingFactor(sceneScalingFactor)
+               , __objectScalingFactor(objectScalingFactor)
 {
        ; /* NULL */
 }
 
-bool FeatureSubstitutionTracker::track(
-               const cv::Mat& frame,
-               std::vector<cv::Point>& result)
+bool FeatureSubstitutionTracker::track(const cv::Mat &frame, std::vector<cv::Point> &result)
 {
        std::vector<cv::Point2f> contour;
        size_t numberOfContourPoints = __location.size();
@@ -57,14 +53,11 @@ bool FeatureSubstitutionTracker::track(
                if (__location.empty()) {
                        return false;
                } else {
-                       __target = new (std::nothrow)ImageObject;
+                       __target = new (std::nothrow) ImageObject;
                        if (__target == NULL)
                                return false;
 
-                       __target->fill(
-                                       frame,
-                                       __featureExtractingParams,
-                                       contourResize(contour, __objectScalingFactor));
+                       __target->fill(frame, __featureExtractingParams, contourResize(contour, __objectScalingFactor));
                        __target->setContour(contour);
                        __isInit = true;
                        result = __location;
@@ -72,7 +65,7 @@ bool FeatureSubstitutionTracker::track(
                }
        }
 
-       cv::Ptr<ImageObject> sceneImageObject = new (std::nothrow)ImageObject;
+       cv::Ptr<ImageObject> sceneImageObject = new (std::nothrow) ImageObject;
        if (sceneImageObject == NULL)
                return false;
 
@@ -80,19 +73,14 @@ bool FeatureSubstitutionTracker::track(
 
        ImageRecognizer recognizer(*sceneImageObject.get());
 
-       const bool isTracked =
-                       recognizer.recognize(
-                                       *(__target.get()),
-                                       __recogParams,
-                                       contour,
-                                       __objectScalingFactor);
+       const bool isTracked = recognizer.recognize(*(__target.get()), __recogParams, contour, __objectScalingFactor);
 
        if (isTracked) {
                numberOfContourPoints = contour.size();
                __location.resize(numberOfContourPoints);
                for (size_t i = 0u; i < numberOfContourPoints; ++i) {
-                       __location[i].x = (int)contour[i].x;
-                       __location[i].y = (int)contour[i].y;
+                       __location[i].x = (int) contour[i].x;
+                       __location[i].y = (int) contour[i].y;
                }
 
                result = __location;
@@ -106,7 +94,7 @@ bool FeatureSubstitutionTracker::track(
        return isTracked;
 }
 
-void FeatureSubstitutionTracker::reinforcement(const std::vector<cv::Point>location)
+void FeatureSubstitutionTracker::reinforcement(const std::vector<cv::Point> &location)
 {
        __isInit = false;
 
@@ -120,7 +108,7 @@ void FeatureSubstitutionTracker::reinforcement(const std::vector<cv::Point>& loc
 
 cv::Ptr<ObjectTracker> FeatureSubstitutionTracker::clone() const
 {
-       return cv::Ptr<ObjectTracker>(new (std::nothrow)FeatureSubstitutionTracker(*this));
+       return cv::Ptr<ObjectTracker>(new (std::nothrow) FeatureSubstitutionTracker(*this));
 }
 
 std::vector<cv::Point2f> FeatureSubstitutionTracker::computeExpectedArea()
index 87a6c20..4651eb3 100644 (file)
 
 #include "mv_private.h"
 
-namespace MediaVision {
-namespace Image {
-
-ImageContourStabilizator::ImageContourStabilizator() :
-               __movingHistory(),
-               __priorities()
+namespace MediaVision
+{
+namespace Image
+{
+ImageContourStabilizator::ImageContourStabilizator() : __movingHistory(), __priorities()
 {
        reset();
 }
@@ -41,9 +40,8 @@ void ImageContourStabilizator::reset(void)
        __movingHistory.clear();
 }
 
-ImageContourStabilizator::StabilizationError ImageContourStabilizator::stabilize(
-               std::vector<cv::Point2f>& contour,
-               const StabilizationParams& params)
+ImageContourStabilizator::StabilizationError ImageContourStabilizator::stabilize(std::vector<cv::Point2f> &contour,
+                                                                                                                                                                const StabilizationParams &params)
 {
        if (!updateSettings(params)) {
                LOGW("Not stabilized. Invalid settings.");
@@ -112,8 +110,7 @@ ImageContourStabilizator::StabilizationError ImageContourStabilizator::stabilize
                stabilizedState = __lastStabilizedContour;
        }
 
-       const float tolerantShift = getQuadrangleArea(contour.data()) *
-                               __tolerantShift + __tolerantShiftExtra;
+       const float tolerantShift = getQuadrangleArea(contour.data()) * __tolerantShift + __tolerantShiftExtra;
 
        const size_t contourSize = stabilizedState.size();
        for (size_t i = 0u; i < contourSize; ++i) {
@@ -143,13 +140,13 @@ ImageContourStabilizator::StabilizationError ImageContourStabilizator::stabilize
        return Successfully;
 }
 
-bool ImageContourStabilizator::updateSettings(const StabilizationParamsparams)
+bool ImageContourStabilizator::updateSettings(const StabilizationParams &params)
 {
        if (params.mHistoryAmount < 1)
                return false;
 
-       __tolerantShift = (float)params.mTolerantShift;
-       __tolerantShiftExtra = (float)params.mTolerantShiftExtra;
+       __tolerantShift = (float) params.mTolerantShift;
+       __tolerantShiftExtra = (float) params.mTolerantShiftExtra;
 
        if (__historyAmount != params.mHistoryAmount) {
                __historyAmount = params.mHistoryAmount;
@@ -159,8 +156,7 @@ bool ImageContourStabilizator::updateSettings(const StabilizationParams& params)
                /* calculation of priorities for positions in the moving history */
                for (size_t i = 0u; i < __historyAmount; ++i) {
                        /* linear dependence on the elapsed time */
-                       __priorities[i] = ((i + 1) * 2.0f) /
-                                               ((__historyAmount + 1) * __historyAmount);
+                       __priorities[i] = ((i + 1) * 2.0f) / ((__historyAmount + 1) * __historyAmount);
                }
        }
 
@@ -181,8 +177,7 @@ bool ImageContourStabilizator::updateSettings(const StabilizationParams& params)
        if (__speeds.size() > 1) {
                const static float __EPSILON = 0.0001f;
                if (fabs(__speeds[0] - params.mStabilizationSpeed) < __EPSILON &&
-                               fabs((__speeds[1] - __speeds[0]) -
-                               params.mStabilizationAcceleration) < __EPSILON) {
+                       fabs((__speeds[1] - __speeds[0]) - params.mStabilizationAcceleration) < __EPSILON) {
                        speedIsValid = true;
                }
        }
@@ -190,8 +185,7 @@ bool ImageContourStabilizator::updateSettings(const StabilizationParams& params)
        if (!speedIsValid) {
                __speeds.clear();
 
-               int speedsSize = (int)((1 - params.mStabilizationSpeed) /
-                                       params.mStabilizationAcceleration) + 1;
+               int speedsSize = (int) ((1 - params.mStabilizationSpeed) / params.mStabilizationAcceleration) + 1;
 
                if (speedsSize < 1) {
                        __speeds.push_back(1.0f);
@@ -201,8 +195,7 @@ bool ImageContourStabilizator::updateSettings(const StabilizationParams& params)
                        if (speedsSize > MAX_SPEED_SIZE)
                                speedsSize = MAX_SPEED_SIZE;
 
-                       float speed = std::max(0.f,
-                                       std::min((float)params.mStabilizationSpeed, 1.0f));
+                       float speed = std::max(0.f, std::min((float) params.mStabilizationSpeed, 1.0f));
 
                        for (int i = 0; i < speedsSize; ++i) {
                                __speeds.push_back(speed);
@@ -217,12 +210,10 @@ bool ImageContourStabilizator::updateSettings(const StabilizationParams& params)
 std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleContour(void)
 {
        /* final contour */
-       std::vector<cv::Point2f> stabilizedState(
-                               NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f));
+       std::vector<cv::Point2f> stabilizedState(NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f));
 
        /* calculation the direction of contour corners to a new location */
-       std::vector<cv::Point2f> directions(
-                               NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f));
+       std::vector<cv::Point2f> directions(NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f));
 
        /* computing expected directions and outliers searching */
        bool expressiveTime = false;
@@ -235,13 +226,11 @@ std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleCo
                cv::Point2f averageDirections(0.f, 0.f);
 
                for (size_t i = 0u; i < __historyAmount - 1; ++i) {
-                       averageDirections.x += (trackDirections[i].x =
-                                       __movingHistory[i+1][j].x - __movingHistory[i][j].x) /
-                                       (__historyAmount - 1);
+                       averageDirections.x += (trackDirections[i].x = __movingHistory[i + 1][j].x - __movingHistory[i][j].x) /
+                                                                  (__historyAmount - 1);
 
-                       averageDirections.y += (trackDirections[i].y =
-                                       __movingHistory[i+1][j].y - __movingHistory[i][j].y) /
-                                       (__historyAmount - 1);
+                       averageDirections.y += (trackDirections[i].y = __movingHistory[i + 1][j].y - __movingHistory[i][j].y) /
+                                                                  (__historyAmount - 1);
                }
 
                /* calculation a deviations and select outlier */
@@ -250,9 +239,7 @@ std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleCo
                int idxWithMaxDistance = 0;
                int numExpressiveDirection = -1;
                for (size_t i = 0u; i < __historyAmount - 1; ++i) {
-                       directionDistances[i] = getDistance(
-                                       trackDirections[i],
-                                       averageDirections);
+                       directionDistances[i] = getDistance(trackDirections[i], averageDirections);
 
                        if (directionDistances[i] > prevMaxDistance) {
                                if (directionDistances[i] > maxDistance) {
@@ -275,28 +262,24 @@ std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleCo
                /* final direction computing */
                float summPriority = 0.f;
                for (size_t i = 0u; i < __historyAmount - 1; ++i) {
-                       if ((int)i != numExpressiveDirection) {
+                       if ((int) i != numExpressiveDirection) {
                                directions[j].x += trackDirections[i].x * __priorities[i];
                                directions[j].y += trackDirections[i].y * __priorities[i];
                                summPriority += __priorities[i];
                        }
                }
 
-               if (numExpressiveDirection == (int)(__historyAmount - 1))
+               if (numExpressiveDirection == (int) (__historyAmount - 1))
                        expressiveTime = true;
 
                summPriorityWithoutToLastPos[j] = summPriority;
                priorityToLastPos[j] = __priorities[__historyAmount - 1];
 
                directions[j].x -= directionsToLastPos[j].x =
-                                       (__lastStabilizedContour[j].x -
-                                       __movingHistory[__historyAmount - 1][j].x) *
-                                       priorityToLastPos[j];
+                               (__lastStabilizedContour[j].x - __movingHistory[__historyAmount - 1][j].x) * priorityToLastPos[j];
 
                directions[j].y -= directionsToLastPos[j].y =
-                                       (__lastStabilizedContour[j].y -
-                                       __movingHistory[__historyAmount - 1][j].y) *
-                                       priorityToLastPos[j];
+                               (__lastStabilizedContour[j].y - __movingHistory[__historyAmount - 1][j].y) * priorityToLastPos[j];
 
                summPriority += priorityToLastPos[j];
 
@@ -307,13 +290,11 @@ std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleCo
        /* final corners computing */
        for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j) {
                if (expressiveTime) {
-                       directions[j].x *= (summPriorityWithoutToLastPos[j] +
-                                               priorityToLastPos[j]);
+                       directions[j].x *= (summPriorityWithoutToLastPos[j] + priorityToLastPos[j]);
                        directions[j].x -= directionsToLastPos[j].x;
                        directions[j].x /= summPriorityWithoutToLastPos[j];
 
-                       directions[j].y *= (summPriorityWithoutToLastPos[j] +
-                                               priorityToLastPos[j]);
+                       directions[j].y *= (summPriorityWithoutToLastPos[j] + priorityToLastPos[j]);
                        directions[j].y -= directionsToLastPos[j].y;
                        directions[j].y /= summPriorityWithoutToLastPos[j];
                }
index ba83581..bae97dd 100644 (file)
 #include <new>
 #include <iomanip>
 
-namespace MediaVision {
-namespace Image {
-
-ImageTrackingModel::ImageTrackingModel() :
-               __target(),
-               __tracker(),
-               __stabilizator(),
-               __location(),
-               __stabilizationParams()
+namespace MediaVision
+{
+namespace Image
+{
+ImageTrackingModel::ImageTrackingModel()
+               : __target(), __tracker(), __stabilizator(), __location(), __stabilizationParams()
 {
        ; /* NULL */
 }
 
-ImageTrackingModel::ImageTrackingModel(const ImageTrackingModel& copy) :
-               __target(copy.__target),
-               __stabilizator(copy.__stabilizator),
-               __location(copy.__location),
-               __stabilizationParams(copy.__stabilizationParams)
+ImageTrackingModel::ImageTrackingModel(const ImageTrackingModel &copy)
+               : __target(copy.__target)
+               , __stabilizator(copy.__stabilizator)
+               , __location(copy.__location)
+               __stabilizationParams(copy.__stabilizationParams)
 {
        if (!copy.__tracker.empty())
                __tracker = copy.__tracker->clone();
        /* NULL */
 }
 
-int ImageTrackingModel::setTarget(const ImageObjecttarget)
+int ImageTrackingModel::setTarget(const ImageObject &target)
 {
        /* TODO: Here are all the settings.
         *        This can be transferred to configuration file.
@@ -117,7 +114,7 @@ int ImageTrackingModel::setTarget(const ImageObject& target)
 
        /* Creating a basic tracker which will have other trackers */
 
-       cv::Ptr<CascadeTracker> mainTracker = new (std::nothrow)CascadeTracker;
+       cv::Ptr<CascadeTracker> mainTracker = new (std::nothrow) CascadeTracker;
        if (mainTracker == NULL) {
                LOGE("Failed to create mainTracker");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
@@ -125,56 +122,37 @@ int ImageTrackingModel::setTarget(const ImageObject& target)
 
        /* Adding asynchronous recognition based tracker */
 
-       cv::Ptr<RecognitionBasedTracker> recogTracker =
-                       new (std::nothrow)RecognitionBasedTracker(
-                                       target,
-                                       orbFeatureExtractingParams,
-                                       orbRecogParams);
+       cv::Ptr<RecognitionBasedTracker> recogTracker = new (std::nothrow)
+                       RecognitionBasedTracker(target, orbFeatureExtractingParams, orbRecogParams);
        if (recogTracker == NULL)
                LOGE("Failed to create Recognition Tracker");
 
-       cv::Ptr<AsyncTracker> asyncRecogTracker =
-                       new (std::nothrow)AsyncTracker(
-                                       recogTracker,
-                                       true);
+       cv::Ptr<AsyncTracker> asyncRecogTracker = new (std::nothrow) AsyncTracker(recogTracker, true);
        if (asyncRecogTracker == NULL)
                LOGE("Failed to create Async Recognition Tracker");
 
-       mainTracker->enableTracker(
-                       asyncRecogTracker,
-                       recognitionBasedTrackerPriotity);
+       mainTracker->enableTracker(asyncRecogTracker, recognitionBasedTrackerPriotity);
 
        /* Adding asynchronous feature substitution based tracker */
 
-       cv::Ptr<FeatureSubstitutionTracker> substitutionTracker =
-                       new (std::nothrow)FeatureSubstitutionTracker(
-                                       gfttWbriefFeatureExtractingParams,
-                                       gfttWbriefRecogParams,
-                                       expectedOffset);
+       cv::Ptr<FeatureSubstitutionTracker> substitutionTracker = new (std::nothrow)
+                       FeatureSubstitutionTracker(gfttWbriefFeatureExtractingParams, gfttWbriefRecogParams, expectedOffset);
        if (substitutionTracker == NULL)
                LOGE("Failed to create Substitution Tracker");
 
-       cv::Ptr<AsyncTracker> asyncSubstitutionTracker =
-                       new (std::nothrow)AsyncTracker(
-                                       substitutionTracker,
-                                       true);
+       cv::Ptr<AsyncTracker> asyncSubstitutionTracker = new (std::nothrow) AsyncTracker(substitutionTracker, true);
        if (asyncSubstitutionTracker == NULL)
                LOGE("Failed to create Async Substitution Tracker");
 
-       mainTracker->enableTracker(
-                       asyncSubstitutionTracker,
-                       featureSubstitutionTrackerPriotity);
+       mainTracker->enableTracker(asyncSubstitutionTracker, featureSubstitutionTrackerPriotity);
 
        /* Adding median flow tracker */
 
-       cv::Ptr<MFTracker> mfTracker = new (std::nothrow)MFTracker(
-                       medianflowTrackingParams);
+       cv::Ptr<MFTracker> mfTracker = new (std::nothrow) MFTracker(medianflowTrackingParams);
        if (mfTracker == NULL)
                LOGE("Failed to create MFTracker");
 
-       mainTracker->enableTracker(
-                       mfTracker,
-                       medianFlowTrackerPriotity);
+       mainTracker->enableTracker(mfTracker, medianFlowTrackerPriotity);
 
        __tracker = mainTracker;
        __target = target;
@@ -187,7 +165,7 @@ bool ImageTrackingModel::isValid() const
        return !(__target.isEmpty());
 }
 
-bool ImageTrackingModel::track(const cv::Mat& frame, std::vector<cv::Point>& result)
+bool ImageTrackingModel::track(const cv::Mat &frame, std::vector<cv::Point> &result)
 {
        result.clear();
 
@@ -202,14 +180,14 @@ bool ImageTrackingModel::track(const cv::Mat& frame, std::vector<cv::Point>& res
        const size_t numberOfContourPoints = __location.size();
        std::vector<cv::Point2f> stabilizedContour(numberOfContourPoints);
        for (size_t i = 0; i < numberOfContourPoints; ++i) {
-               stabilizedContour[i].x = (float)__location[i].x;
-               stabilizedContour[i].y = (float)__location[i].y;
+               stabilizedContour[i].x = (float) __location[i].x;
+               stabilizedContour[i].y = (float) __location[i].y;
        }
 
        __stabilizator.stabilize(stabilizedContour, __stabilizationParams);
        for (size_t i = 0; i < numberOfContourPoints; ++i) {
-               __location[i].x = (int)stabilizedContour[i].x;
-               __location[i].y = (int)stabilizedContour[i].y;
+               __location[i].x = (int) stabilizedContour[i].x;
+               __location[i].y = (int) stabilizedContour[i].y;
        }
 
        result = __location;
@@ -222,7 +200,7 @@ void ImageTrackingModel::refresh(void)
        __location.clear();
 }
 
-ImageTrackingModel& ImageTrackingModel::operator=(const ImageTrackingModel& copy)
+ImageTrackingModel &ImageTrackingModel::operator=(const ImageTrackingModel &copy)
 {
        if (this != &copy) {
                __target = copy.__target;
@@ -291,7 +269,7 @@ int ImageTrackingModel::load(const char *filepath)
                return MEDIA_VISION_ERROR_PERMISSION_DENIED;
        }
 
-       in>>(*this);
+       in >> (*this);
 
        if (!in.good()) {
                LOGE("[%s] Unexpected end of file.", __FUNCTION__);
@@ -304,7 +282,7 @@ int ImageTrackingModel::load(const char *filepath)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-std::ostream& operator << (std::ostream& os, const ImageTrackingModel& obj)
+std::ostream &operator<<(std::ostream &os, const ImageTrackingModel &obj)
 {
        os << std::setprecision(7);
 
@@ -326,10 +304,10 @@ std::ostream& operator << (std::ostream& os, const ImageTrackingModel& obj)
        return os;
 }
 
-std::istream& operator >> (std::istream& is, ImageTrackingModel& obj)
+std::istream &operator>>(std::istream &is, ImageTrackingModel &obj)
 {
 #define MEDIA_VISION_CHECK_IFSTREAM \
-       if (!is.good()) \
+       if (!is.good())                 \
                return is;
 
        ImageObject target;
index 72695b2..969f00d 100644 (file)
 #include "opencv2/video/tracking.hpp"
 #include "opencv2/imgproc.hpp"
 
-namespace MediaVision {
-namespace Image {
-
-namespace {
+namespace MediaVision
+{
+namespace Image
+{
+namespace
+{
 const float FLOATEPS = 10e-6f;
 
-template<typename T>
-T getMedian(std::vector<T>& values, int size = -1)
+template<typename T> T getMedian(std::vector<T> &values, int size = -1)
 {
        if (size == -1)
-               size = (int)values.size();
+               size = (int) values.size();
 
        std::vector<T> copy(values.begin(), values.begin() + size);
        std::sort(copy.begin(), copy.end());
-       if (size%2 == 0) {
-               return (copy[size / 2 - 1] + copy[size/2]) / ((T)2.0);
+       if (size % 2 == 0) {
+               return (copy[size / 2 - 1] + copy[size / 2]) / ((T) 2.0);
        } else {
                return copy[(size - 1) / 2];
        }
@@ -55,15 +56,14 @@ MFTracker::Params::Params()
        mPyrMaxLevel = 5;
 }
 
-MFTracker::MFTracker(Params params) :
-               __isInit(false),
-               __params(params),
-               __termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.3),
-               __confidence(0.0f)
-{
-}
+MFTracker::MFTracker(Params params)
+               : __isInit(false)
+               , __params(params)
+               , __termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.3)
+               , __confidence(0.0f)
+{}
 
-bool MFTracker::track(const cv::Mat& frame, std::vector<cv::Point>& result)
+bool MFTracker::track(const cv::Mat &frame, std::vector<cv::Point> &result)
 {
        result.clear();
 
@@ -85,16 +85,14 @@ bool MFTracker::track(const cv::Mat& frame, std::vector<cv::Point>& result)
        result.resize(numberOfContourPoints);
 
        for (size_t i = 0; i < numberOfContourPoints; ++i) {
-               result[i].x = static_cast<int>(__boundingBox.x +
-                                                       __startLocation[i].x * __boundingBox.width);
-               result[i].y = static_cast<int>(__boundingBox.y +
-                                                       __startLocation[i].y * __boundingBox.height);
+               result[i].x = static_cast<int>(__boundingBox.x + __startLocation[i].x * __boundingBox.width);
+               result[i].y = static_cast<int>(__boundingBox.y + __startLocation[i].y * __boundingBox.height);
        }
 
        return true;
 }
 
-void MFTracker::reinforcement(const std::vector<cv::Point>location)
+void MFTracker::reinforcement(const std::vector<cv::Point> &location)
 {
        __isInit = false;
 
@@ -108,7 +106,7 @@ void MFTracker::reinforcement(const std::vector<cv::Point>& location)
                return;
        }
 
-       const cv::Rect_<float>boundingBox = cv::boundingRect(location);
+       const cv::Rect_<float> &boundingBox = cv::boundingRect(location);
        __boundingBox = boundingBox;
 
        const size_t numberOfContourPoints = location.size();
@@ -121,26 +119,22 @@ void MFTracker::reinforcement(const std::vector<cv::Point>& location)
 
 cv::Ptr<ObjectTracker> MFTracker::clone() const
 {
-       return cv::Ptr<ObjectTracker>(new (std::nothrow)MFTracker(*this));
+       return cv::Ptr<ObjectTracker>(new (std::nothrow) MFTracker(*this));
 }
 
-bool MFTracker::init(const cv::Matimage)
+bool MFTracker::init(const cv::Mat &image)
 {
        if (image.empty())
                return false;
 
        image.copyTo(__image);
-       buildOpticalFlowPyramid(
-                       __image,
-                       __pyramid,
-                       __params.mWindowSize,
-                       __params.mPyrMaxLevel);
+       buildOpticalFlowPyramid(__image, __pyramid, __params.mWindowSize, __params.mPyrMaxLevel);
 
        __isInit = true;
        return __isInit;
 }
 
-bool MFTracker::update(const cv::Matimage)
+bool MFTracker::update(const cv::Mat &image)
 {
        if (!__isInit || image.empty())
                return false;
@@ -188,8 +182,7 @@ cv::Rect_<float> MFTracker::getLastBoundingBox() const
        return __boundingBox;
 }
 
-bool MFTracker::medianFlowImpl(
-               cv::Mat oldImage_gray, cv::Mat newImage_gray, cv::Rect_<float>& oldBox)
+bool MFTracker::medianFlowImpl(cv::Mat oldImage_gray, cv::Mat newImage_gray, cv::Rect_<float> &oldBox)
 {
        std::vector<cv::Point2f> pointsToTrackOld, pointsToTrackNew;
 
@@ -197,30 +190,18 @@ bool MFTracker::medianFlowImpl(
        const float gridYStep = oldBox.height / __params.mPointsInGrid;
        for (int i = 0; i < __params.mPointsInGrid; i++)
                for (int j = 0; j < __params.mPointsInGrid; j++)
-                       pointsToTrackOld.push_back(
-                                       cv::Point2f(oldBox.x + .5f*gridXStep + 1.f*gridXStep*j,
-                                                       oldBox.y + .5f*gridYStep + 1.f*gridYStep*i));
+                       pointsToTrackOld.push_back(cv::Point2f(oldBox.x + .5f * gridXStep + 1.f * gridXStep * j,
+                                                                                                  oldBox.y + .5f * gridYStep + 1.f * gridYStep * i));
 
        const size_t numberOfPointsToTrackOld = pointsToTrackOld.size();
        std::vector<uchar> status(numberOfPointsToTrackOld);
        std::vector<float> errors(numberOfPointsToTrackOld);
 
        std::vector<cv::Mat> tempPyramid;
-       buildOpticalFlowPyramid(
-                                                       newImage_gray,
-                                                       tempPyramid,
-                                                       __params.mWindowSize,
-                                                       __params.mPyrMaxLevel);
-
-       calcOpticalFlowPyrLK(__pyramid,
-                                                       tempPyramid,
-                                                       pointsToTrackOld,
-                                                       pointsToTrackNew,
-                                                       status,
-                                                       errors,
-                                                       __params.mWindowSize,
-                                                       __params.mPyrMaxLevel,
-                                                       __termcrit);
+       buildOpticalFlowPyramid(newImage_gray, tempPyramid, __params.mWindowSize, __params.mPyrMaxLevel);
+
+       calcOpticalFlowPyrLK(__pyramid, tempPyramid, pointsToTrackOld, pointsToTrackNew, status, errors,
+                                                __params.mWindowSize, __params.mPyrMaxLevel, __termcrit);
 
        std::vector<cv::Point2f> di;
        for (size_t idx = 0u; idx < numberOfPointsToTrackOld; idx++)
@@ -228,16 +209,9 @@ bool MFTracker::medianFlowImpl(
                        di.push_back(pointsToTrackNew[idx] - pointsToTrackOld[idx]);
 
        std::vector<bool> filter_status;
-       check_FB(tempPyramid,
-                       pointsToTrackOld,
-                       pointsToTrackNew,
-                       filter_status);
+       check_FB(tempPyramid, pointsToTrackOld, pointsToTrackNew, filter_status);
 
-       check_NCC(oldImage_gray,
-                       newImage_gray,
-                       pointsToTrackOld,
-                       pointsToTrackNew,
-                       filter_status);
+       check_NCC(oldImage_gray, newImage_gray, pointsToTrackOld, pointsToTrackNew, filter_status);
 
        for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++) {
                if (!filter_status[idx]) {
@@ -252,8 +226,7 @@ bool MFTracker::medianFlowImpl(
                return false;
 
        cv::Point2f mDisplacement;
-       cv::Rect_<float> boxCandidate =
-                               vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement);
+       cv::Rect_<float> boxCandidate = vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement);
 
        std::vector<float> displacements;
        for (size_t idx = 0u; idx < di.size(); idx++) {
@@ -261,8 +234,7 @@ bool MFTracker::medianFlowImpl(
                displacements.push_back(sqrt(di[idx].ddot(di[idx])));
        }
 
-       __confidence =
-                       (10.f - getMedian(displacements, static_cast<int>(displacements.size()))) / 10.f;
+       __confidence = (10.f - getMedian(displacements, static_cast<int>(displacements.size()))) / 10.f;
 
        if (__confidence < 0.f) {
                __confidence = 0.f;
@@ -274,23 +246,18 @@ bool MFTracker::medianFlowImpl(
        return true;
 }
 
-cv::Rect_<float> MFTracker::vote(
-               const std::vector<cv::Point2f>& oldPoints,
-               const std::vector<cv::Point2f>& newPoints,
-               const cv::Rect_<float>& oldRect,
-               cv::Point2f& mD)
+cv::Rect_<float> MFTracker::vote(const std::vector<cv::Point2f> &oldPoints, const std::vector<cv::Point2f> &newPoints,
+                                                                const cv::Rect_<float> &oldRect, cv::Point2f &mD)
 {
        cv::Rect_<float> newRect;
-       cv::Point2f newCenter(
-                       oldRect.x + oldRect.width / 2.f,
-                       oldRect.y + oldRect.height / 2.f);
+       cv::Point2f newCenter(oldRect.x + oldRect.width / 2.f, oldRect.y + oldRect.height / 2.f);
 
-       const int n = (int)oldPoints.size();
-       std::vector<float> buf(std::max(n*(n-1) / 2, 3), 0.f);
+       const int n = (int) oldPoints.size();
+       std::vector<float> buf(std::max(n * (n - 1) / 2, 3), 0.f);
 
        if (oldPoints.size() == 1) {
-               newRect.x = oldRect.x+newPoints[0].x-oldPoints[0].x;
-               newRect.y = oldRect.y+newPoints[0].y-oldPoints[0].y;
+               newRect.x = oldRect.x + newPoints[0].x - oldPoints[0].x;
+               newRect.y = oldRect.y + newPoints[0].y - oldPoints[0].y;
                newRect.width = oldRect.width;
                newRect.height = oldRect.height;
 
@@ -331,20 +298,17 @@ cv::Rect_<float> MFTracker::vote(
                }
        }
 
-       float scale = getMedian(buf, n*(n-1) / 2);
+       float scale = getMedian(buf, n * (n - 1) / 2);
        newRect.x = newCenter.x - scale * oldRect.width / 2.f;
-       newRect.y = newCenter.y-scale * oldRect.height / 2.f;
+       newRect.y = newCenter.y - scale * oldRect.height / 2.f;
        newRect.width = scale * oldRect.width;
        newRect.height = scale * oldRect.height;
 
        return newRect;
 }
 
-void MFTracker::check_FB(
-               std::vector<cv::Mat> newPyramid,
-               const std::vector<cv::Point2f>& oldPoints,
-               const std::vector<cv::Point2f>& newPoints,
-               std::vector<bool>& status)
+void MFTracker::check_FB(std::vector<cv::Mat> newPyramid, const std::vector<cv::Point2f> &oldPoints,
+                                                const std::vector<cv::Point2f> &newPoints, std::vector<bool> &status)
 {
        const size_t numberOfOldPoints = oldPoints.size();
 
@@ -356,15 +320,8 @@ void MFTracker::check_FB(
        std::vector<float> FBerror(numberOfOldPoints);
        std::vector<cv::Point2f> pointsToTrackReprojection;
 
-       calcOpticalFlowPyrLK(newPyramid,
-                                               __pyramid,
-                                               newPoints,
-                                               pointsToTrackReprojection,
-                                               LKstatus,
-                                               errors,
-                                               __params.mWindowSize,
-                                               __params.mPyrMaxLevel,
-                                               __termcrit);
+       calcOpticalFlowPyrLK(newPyramid, __pyramid, newPoints, pointsToTrackReprojection, LKstatus, errors,
+                                                __params.mWindowSize, __params.mPyrMaxLevel, __termcrit);
 
        for (size_t idx = 0u; idx < numberOfOldPoints; idx++)
                FBerror[idx] = l2distance(oldPoints[idx], pointsToTrackReprojection[idx]);
@@ -374,12 +331,8 @@ void MFTracker::check_FB(
                status[idx] = (FBerror[idx] < FBerrorMedian);
 }
 
-void MFTracker::check_NCC(
-               const cv::Mat& oldImage,
-               const cv::Mat& newImage,
-               const std::vector<cv::Point2f>& oldPoints,
-               const std::vector<cv::Point2f>& newPoints,
-               std::vector<bool>& status)
+void MFTracker::check_NCC(const cv::Mat &oldImage, const cv::Mat &newImage, const std::vector<cv::Point2f> &oldPoints,
+                                                 const std::vector<cv::Point2f> &newPoints, std::vector<bool> &status)
 {
        std::vector<float> NCC(oldPoints.size(), 0.f);
        cv::Size patch(30, 30);
index 4e73730..7e69a58 100644 (file)
 
 #include "Tracking/ObjectTracker.h"
 
-namespace MediaVision {
-namespace Image {
+namespace MediaVision
+{
+namespace Image
+{
 ObjectTracker::~ObjectTracker()
 {
        ; /* NULL */
index d3cbfbc..beb6bfa 100644 (file)
 
 #include <new>
 
-namespace MediaVision {
-namespace Image {
-
-RecognitionBasedTracker::RecognitionBasedTracker(
-               const ImageObject& target,
-               const FeaturesExtractingParams& sceneFeaturesExtractingParams,
-               const RecognitionParams& recognitionParams) :
-                               __target(target),
-                               __sceneFeatureExtractingParams(sceneFeaturesExtractingParams),
-                               __recogParams(recognitionParams)
+namespace MediaVision
+{
+namespace Image
+{
+RecognitionBasedTracker::RecognitionBasedTracker(const ImageObject &target,
+                                                                                                const FeaturesExtractingParams &sceneFeaturesExtractingParams,
+                                                                                                const RecognitionParams &recognitionParams)
+               : __target(target)
+               , __sceneFeatureExtractingParams(sceneFeaturesExtractingParams)
+               , __recogParams(recognitionParams)
 {
        ; /* NULL */
 }
@@ -39,9 +39,7 @@ RecognitionBasedTracker::~RecognitionBasedTracker()
        ; /* NULL */
 }
 
-bool RecognitionBasedTracker::track(
-               const cv::Mat& frame,
-               std::vector<cv::Point>& result)
+bool RecognitionBasedTracker::track(const cv::Mat &frame, std::vector<cv::Point> &result)
 {
        result.clear();
 
@@ -57,15 +55,15 @@ bool RecognitionBasedTracker::track(
                size_t numberOfContourPoints = contour.size();
                result.resize(numberOfContourPoints);
                for (size_t i = 0u; i < numberOfContourPoints; ++i) {
-                       result[i].x = (int)contour[i].x;
-                       result[i].y = (int)contour[i].y;
+                       result[i].x = (int) contour[i].x;
+                       result[i].y = (int) contour[i].y;
                }
        }
 
        return isRecognized;
 }
 
-void RecognitionBasedTracker::reinforcement(const std::vector<cv::Point>& /*location*/)
+void RecognitionBasedTracker::reinforcement(const std::vector<cv::Point> & /*location*/)
 {
        ; /* The tracker is based on the recognition on the entire image.
           * The reinforcement does not make a sense.*/
@@ -73,7 +71,7 @@ void RecognitionBasedTracker::reinforcement(const std::vector<cv::Point>& /*loca
 
 cv::Ptr<ObjectTracker> RecognitionBasedTracker::clone() const
 {
-       return cv::Ptr<ObjectTracker>(new (std::nothrow)RecognitionBasedTracker(*this));
+       return cv::Ptr<ObjectTracker>(new (std::nothrow) RecognitionBasedTracker(*this));
 }
 
 } /* Image */
index 7b4ab29..0a8bd83 100644 (file)
  * @brief This file contains the porting layer for Media Vision image module.
  */
 
-int mv_image_recognize(
-               mv_source_h source,
-               const mv_image_object_h *image_objects,
-               int number_of_objects,
-               mv_engine_config_h engine_cfg,
-               mv_image_recognized_cb recognized_cb,
-               void *user_data)
+int mv_image_recognize(mv_source_h source, const mv_image_object_h *image_objects, int number_of_objects,
+                                          mv_engine_config_h engine_cfg, mv_image_recognized_cb recognized_cb, void *user_data)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -43,19 +38,14 @@ int mv_image_recognize(
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = mv_image_recognize_open(source, image_objects,
-                       number_of_objects, engine_cfg, recognized_cb, user_data);
+       int ret = mv_image_recognize_open(source, image_objects, number_of_objects, engine_cfg, recognized_cb, user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_image_track(
-               mv_source_h source,
-               mv_image_tracking_model_h image_tracking_model,
-               mv_engine_config_h engine_cfg,
-               mv_image_tracked_cb tracked_cb,
-               void *user_data)
+int mv_image_track(mv_source_h source, mv_image_tracking_model_h image_tracking_model, mv_engine_config_h engine_cfg,
+                                  mv_image_tracked_cb tracked_cb, void *user_data)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -70,8 +60,7 @@ int mv_image_track(
        return ret;
 }
 
-int mv_image_object_create(
-               mv_image_object_h *image_object)
+int mv_image_object_create(mv_image_object_h *image_object)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(image_object);
@@ -83,8 +72,7 @@ int mv_image_object_create(
        return ret;
 }
 
-int mv_image_object_destroy(
-               mv_image_object_h image_object)
+int mv_image_object_destroy(mv_image_object_h image_object)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(image_object);
@@ -96,11 +84,8 @@ int mv_image_object_destroy(
        return ret;
 }
 
-int mv_image_object_fill(
-               mv_image_object_h image_object,
-               mv_engine_config_h engine_cfg,
-               mv_source_h source,
-               mv_rectangle_s *location)
+int mv_image_object_fill(mv_image_object_h image_object, mv_engine_config_h engine_cfg, mv_source_h source,
+                                                mv_rectangle_s *location)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(image_object);
@@ -114,9 +99,7 @@ int mv_image_object_fill(
        return ret;
 }
 
-int mv_image_object_get_recognition_rate(
-               mv_image_object_h image_object,
-               double *recognition_rate)
+int mv_image_object_get_recognition_rate(mv_image_object_h image_object, double *recognition_rate)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(image_object);
@@ -130,9 +113,7 @@ int mv_image_object_get_recognition_rate(
        return ret;
 }
 
-int mv_image_object_set_label(
-               mv_image_object_h image_object,
-               int label)
+int mv_image_object_set_label(mv_image_object_h image_object, int label)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(image_object);
@@ -145,9 +126,7 @@ int mv_image_object_set_label(
        return ret;
 }
 
-int mv_image_object_get_label(
-               mv_image_object_h image_object,
-               int *label)
+int mv_image_object_get_label(mv_image_object_h image_object, int *label)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(image_object);
@@ -161,9 +140,7 @@ int mv_image_object_get_label(
        return ret;
 }
 
-int mv_image_object_clone(
-               mv_image_object_h src,
-               mv_image_object_h *dst)
+int mv_image_object_clone(mv_image_object_h src, mv_image_object_h *dst)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(src);
@@ -176,8 +153,7 @@ int mv_image_object_clone(
        return ret;
 }
 
-int mv_image_object_save(
-               const char *file_name, mv_image_object_h image_object)
+int mv_image_object_save(const char *file_name, mv_image_object_h image_object)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(image_object);
@@ -195,8 +171,7 @@ int mv_image_object_save(
        return ret;
 }
 
-int mv_image_object_load(
-               const char *file_name, mv_image_object_h *image_object)
+int mv_image_object_load(const char *file_name, mv_image_object_h *image_object)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(image_object);
@@ -214,8 +189,7 @@ int mv_image_object_load(
        return ret;
 }
 
-int mv_image_tracking_model_create(
-               mv_image_tracking_model_h *image_tracking_model)
+int mv_image_tracking_model_create(mv_image_tracking_model_h *image_tracking_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model);
@@ -228,9 +202,7 @@ int mv_image_tracking_model_create(
        return ret;
 }
 
-int mv_image_tracking_model_set_target(
-               mv_image_object_h image_object,
-               mv_image_tracking_model_h image_tracking_model)
+int mv_image_tracking_model_set_target(mv_image_object_h image_object, mv_image_tracking_model_h image_tracking_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
@@ -244,8 +216,7 @@ int mv_image_tracking_model_set_target(
        return ret;
 }
 
-int mv_image_tracking_model_destroy(
-               mv_image_tracking_model_h image_tracking_model)
+int mv_image_tracking_model_destroy(mv_image_tracking_model_h image_tracking_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
@@ -258,26 +229,20 @@ int mv_image_tracking_model_destroy(
        return ret;
 }
 
-int mv_image_tracking_model_refresh(
-               mv_image_tracking_model_h image_tracking_model,
-               mv_engine_config_h engine_cfg)
+int mv_image_tracking_model_refresh(mv_image_tracking_model_h image_tracking_model, mv_engine_config_h engine_cfg)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = mv_image_tracking_model_refresh_open(
-                                       image_tracking_model,
-                                       engine_cfg);
+       int ret = mv_image_tracking_model_refresh_open(image_tracking_model, engine_cfg);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_image_tracking_model_clone(
-               mv_image_tracking_model_h src,
-               mv_image_tracking_model_h *dst)
+int mv_image_tracking_model_clone(mv_image_tracking_model_h src, mv_image_tracking_model_h *dst)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(src);
@@ -291,8 +256,7 @@ int mv_image_tracking_model_clone(
        return ret;
 }
 
-int mv_image_tracking_model_save(
-               const char *file_name, mv_image_tracking_model_h image_tracking_model)
+int mv_image_tracking_model_save(const char *file_name, mv_image_tracking_model_h image_tracking_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
@@ -310,8 +274,7 @@ int mv_image_tracking_model_save(
        return ret;
 }
 
-int mv_image_tracking_model_load(
-               const char *file_name, mv_image_tracking_model_h *image_tracking_model)
+int mv_image_tracking_model_load(const char *file_name, mv_image_tracking_model_h *image_tracking_model)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model);
index 3ba7dd4..8599892 100644 (file)
 #include <opencv2/core.hpp>
 #include <opencv2/imgproc/imgproc_c.h>
 
-namespace {
-
-class DefaultConfiguration {
+namespace
+{
+class DefaultConfiguration
+{
 public:
-       static const DefaultConfigurationgetInstance();
+       static const DefaultConfiguration &getInstance();
 
        MediaVision::Image::FeaturesExtractingParams getObjectFeaturesExtractingParams() const;
 
@@ -64,12 +65,12 @@ private:
 
 DefaultConfiguration DefaultConfiguration::instance;
 
-DefaultConfiguration::DefaultConfiguration() :
-               __objectFeaturesExtractingParams(),
-               __sceneFeaturesExtractingParams(),
-               __recognitionParams(15, 0.33, 0.1),
-               __stabilizationParams(true, 3, 0.00006, 1.3, 2, 0.001),
-               __trackingParams()
+DefaultConfiguration::DefaultConfiguration()
+               : __objectFeaturesExtractingParams()
+               , __sceneFeaturesExtractingParams()
+               , __recognitionParams(15, 0.33, 0.1)
+               , __stabilizationParams(true, 3, 0.00006, 1.3, 2, 0.001)
+               __trackingParams()
 {
        __objectFeaturesExtractingParams.mKeypointType = MediaVision::Image::KT_ORB;
        __objectFeaturesExtractingParams.mDescriptorType = MediaVision::Image::DT_ORB;
@@ -87,44 +88,38 @@ DefaultConfiguration::DefaultConfiguration() :
        __trackingParams.mExpectedOffset = 0.0;
 }
 
-const DefaultConfigurationDefaultConfiguration::getInstance()
+const DefaultConfiguration &DefaultConfiguration::getInstance()
 {
        return instance;
 }
 
-MediaVision::Image::FeaturesExtractingParams
-DefaultConfiguration::getObjectFeaturesExtractingParams() const
+MediaVision::Image::FeaturesExtractingParams DefaultConfiguration::getObjectFeaturesExtractingParams() const
 {
        return __objectFeaturesExtractingParams;
 }
 
-MediaVision::Image::FeaturesExtractingParams
-DefaultConfiguration::getSceneFeaturesExtractingParams() const
+MediaVision::Image::FeaturesExtractingParams DefaultConfiguration::getSceneFeaturesExtractingParams() const
 {
        return __sceneFeaturesExtractingParams;
 }
 
-MediaVision::Image::RecognitionParams
-DefaultConfiguration::getRecognitionParams() const
+MediaVision::Image::RecognitionParams DefaultConfiguration::getRecognitionParams() const
 {
        return __recognitionParams;
 }
 
-MediaVision::Image::StabilizationParams
-DefaultConfiguration::getStabilizationParams() const
+MediaVision::Image::StabilizationParams DefaultConfiguration::getStabilizationParams() const
 {
        return __stabilizationParams;
 }
 
-MediaVision::Image::TrackingParams
-DefaultConfiguration::getTrackingParams() const
+MediaVision::Image::TrackingParams DefaultConfiguration::getTrackingParams() const
 {
        return __trackingParams;
 }
 
-void extractTargetFeaturesExtractingParams(
-               mv_engine_config_h engine_cfg,
-               MediaVision::Image::FeaturesExtractingParams& featuresExtractingParams)
+void extractTargetFeaturesExtractingParams(mv_engine_config_h engine_cfg,
+                                                                                  MediaVision::Image::FeaturesExtractingParams &featuresExtractingParams)
 {
        mv_engine_config_h working_cfg = NULL;
 
@@ -134,26 +129,20 @@ void extractTargetFeaturesExtractingParams(
                working_cfg = engine_cfg;
        }
 
-       featuresExtractingParams =
-                       DefaultConfiguration::getInstance().getObjectFeaturesExtractingParams();
+       featuresExtractingParams = DefaultConfiguration::getInstance().getObjectFeaturesExtractingParams();
 
-       mv_engine_config_get_double_attribute_c(
-                       working_cfg,
-                       MV_IMAGE_RECOGNITION_OBJECT_SCALE_FACTOR,
-                       &featuresExtractingParams.ORB.mScaleFactor);
+       mv_engine_config_get_double_attribute_c(working_cfg, MV_IMAGE_RECOGNITION_OBJECT_SCALE_FACTOR,
+                                                                                       &featuresExtractingParams.ORB.mScaleFactor);
 
-       mv_engine_config_get_int_attribute_c(
-                       working_cfg,
-                       MV_IMAGE_RECOGNITION_OBJECT_MAX_KEYPOINTS_NUM,
-                       &featuresExtractingParams.ORB.mMaximumFeaturesNumber);
+       mv_engine_config_get_int_attribute_c(working_cfg, MV_IMAGE_RECOGNITION_OBJECT_MAX_KEYPOINTS_NUM,
+                                                                                &featuresExtractingParams.ORB.mMaximumFeaturesNumber);
 
        if (NULL == engine_cfg)
                mv_destroy_engine_config(working_cfg);
 }
 
-void extractSceneFeaturesExtractingParams(
-               mv_engine_config_h engine_cfg,
-               MediaVision::Image::FeaturesExtractingParams& featuresExtractingParams)
+void extractSceneFeaturesExtractingParams(mv_engine_config_h engine_cfg,
+                                                                                 MediaVision::Image::FeaturesExtractingParams &featuresExtractingParams)
 {
        mv_engine_config_h working_cfg = NULL;
 
@@ -163,26 +152,19 @@ void extractSceneFeaturesExtractingParams(
                working_cfg = engine_cfg;
        }
 
-       featuresExtractingParams =
-                       DefaultConfiguration::getInstance().getSceneFeaturesExtractingParams();
+       featuresExtractingParams = DefaultConfiguration::getInstance().getSceneFeaturesExtractingParams();
 
-       mv_engine_config_get_double_attribute_c(
-                       working_cfg,
-                       MV_IMAGE_RECOGNITION_SCENE_SCALE_FACTOR,
-                       &featuresExtractingParams.ORB.mScaleFactor);
+       mv_engine_config_get_double_attribute_c(working_cfg, MV_IMAGE_RECOGNITION_SCENE_SCALE_FACTOR,
+                                                                                       &featuresExtractingParams.ORB.mScaleFactor);
 
-       mv_engine_config_get_int_attribute_c(
-                       working_cfg,
-                       MV_IMAGE_RECOGNITION_SCENE_MAX_KEYPOINTS_NUM,
-                       &featuresExtractingParams.ORB.mMaximumFeaturesNumber);
+       mv_engine_config_get_int_attribute_c(working_cfg, MV_IMAGE_RECOGNITION_SCENE_MAX_KEYPOINTS_NUM,
+                                                                                &featuresExtractingParams.ORB.mMaximumFeaturesNumber);
 
        if (NULL == engine_cfg)
                mv_destroy_engine_config(working_cfg);
 }
 
-void extractRecognitionParams(
-               mv_engine_config_h engine_cfg,
-               MediaVision::Image::RecognitionParams& recognitionParams)
+void extractRecognitionParams(mv_engine_config_h engine_cfg, MediaVision::Image::RecognitionParams &recognitionParams)
 {
        mv_engine_config_h working_cfg = NULL;
 
@@ -192,31 +174,23 @@ void extractRecognitionParams(
                working_cfg = engine_cfg;
        }
 
-       recognitionParams =
-                       DefaultConfiguration::getInstance().getRecognitionParams();
+       recognitionParams = DefaultConfiguration::getInstance().getRecognitionParams();
 
-       mv_engine_config_get_int_attribute_c(
-                       working_cfg,
-                       MV_IMAGE_RECOGNITION_MIN_MATCH_NUM,
-                       &recognitionParams.mMinMatchesNumber);
+       mv_engine_config_get_int_attribute_c(working_cfg, MV_IMAGE_RECOGNITION_MIN_MATCH_NUM,
+                                                                                &recognitionParams.mMinMatchesNumber);
 
-       mv_engine_config_get_double_attribute_c(
-                       working_cfg,
-                       MV_IMAGE_RECOGNITION_REQ_MATCH_PART,
-                       &recognitionParams.mRequiredMatchesPart);
+       mv_engine_config_get_double_attribute_c(working_cfg, MV_IMAGE_RECOGNITION_REQ_MATCH_PART,
+                                                                                       &recognitionParams.mRequiredMatchesPart);
 
-       mv_engine_config_get_double_attribute_c(
-                       working_cfg,
-                       MV_IMAGE_RECOGNITION_TOLERANT_MATCH_PART_ERR,
-                       &recognitionParams.mTolerantMatchesPartError);
+       mv_engine_config_get_double_attribute_c(working_cfg, MV_IMAGE_RECOGNITION_TOLERANT_MATCH_PART_ERR,
+                                                                                       &recognitionParams.mTolerantMatchesPartError);
 
        if (NULL == engine_cfg)
                mv_destroy_engine_config(working_cfg);
 }
 
-void extractStabilizationParams(
-               mv_engine_config_h engine_cfg,
-               MediaVision::Image::StabilizationParams& stabilizationParams)
+void extractStabilizationParams(mv_engine_config_h engine_cfg,
+                                                               MediaVision::Image::StabilizationParams &stabilizationParams)
 {
        mv_engine_config_h working_cfg = NULL;
        int _history_amount = 0;
@@ -227,45 +201,28 @@ void extractStabilizationParams(
                working_cfg = engine_cfg;
        }
 
-       stabilizationParams =
-                       DefaultConfiguration::getInstance().getStabilizationParams();
-
-       mv_engine_config_get_bool_attribute_c(
-                       working_cfg,
-                       MV_IMAGE_TRACKING_USE_STABLIZATION,
-                       &stabilizationParams.mIsEnabled);
-
-       mv_engine_config_get_int_attribute_c(
-                       working_cfg,
-                       MV_IMAGE_TRACKING_HISTORY_AMOUNT,
-                       &_history_amount);
-       stabilizationParams.mHistoryAmount =
-               static_cast<size_t>(
-                       static_cast<unsigned int>(_history_amount)
-               );
-
-       mv_engine_config_get_double_attribute_c(
-                       working_cfg,
-                       MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT,
-                       &stabilizationParams.mTolerantShift);
-
-       mv_engine_config_get_double_attribute_c(
-                       working_cfg,
-                       MV_IMAGE_TRACKING_STABLIZATION_SPEED,
-                       &stabilizationParams.mStabilizationSpeed);
-
-       mv_engine_config_get_double_attribute_c(
-                       working_cfg,
-                       MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION,
-                       &stabilizationParams.mStabilizationAcceleration);
+       stabilizationParams = DefaultConfiguration::getInstance().getStabilizationParams();
+
+       mv_engine_config_get_bool_attribute_c(working_cfg, MV_IMAGE_TRACKING_USE_STABLIZATION,
+                                                                                 &stabilizationParams.mIsEnabled);
+
+       mv_engine_config_get_int_attribute_c(working_cfg, MV_IMAGE_TRACKING_HISTORY_AMOUNT, &_history_amount);
+       stabilizationParams.mHistoryAmount = static_cast<size_t>(static_cast<unsigned int>(_history_amount));
+
+       mv_engine_config_get_double_attribute_c(working_cfg, MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT,
+                                                                                       &stabilizationParams.mTolerantShift);
+
+       mv_engine_config_get_double_attribute_c(working_cfg, MV_IMAGE_TRACKING_STABLIZATION_SPEED,
+                                                                                       &stabilizationParams.mStabilizationSpeed);
+
+       mv_engine_config_get_double_attribute_c(working_cfg, MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION,
+                                                                                       &stabilizationParams.mStabilizationAcceleration);
 
        if (NULL == engine_cfg)
                mv_destroy_engine_config(working_cfg);
 }
 
-void extractTrackingParams(
-               mv_engine_config_h engine_cfg,
-               MediaVision::Image::TrackingParams& trackingParams)
+void extractTrackingParams(mv_engine_config_h engine_cfg, MediaVision::Image::TrackingParams &trackingParams)
 {
        mv_engine_config_h working_cfg = NULL;
 
@@ -275,25 +232,16 @@ void extractTrackingParams(
                working_cfg = engine_cfg;
        }
 
-       trackingParams =
-                       DefaultConfiguration::getInstance().getTrackingParams();
+       trackingParams = DefaultConfiguration::getInstance().getTrackingParams();
 
-       extractSceneFeaturesExtractingParams(
-                       working_cfg,
-                       trackingParams.mFramesFeaturesExtractingParams);
+       extractSceneFeaturesExtractingParams(working_cfg, trackingParams.mFramesFeaturesExtractingParams);
 
-       extractRecognitionParams(
-                       working_cfg,
-                       trackingParams.mRecognitionParams);
+       extractRecognitionParams(working_cfg, trackingParams.mRecognitionParams);
 
-       extractStabilizationParams(
-                       working_cfg,
-                       trackingParams.mStabilizationParams);
+       extractStabilizationParams(working_cfg, trackingParams.mStabilizationParams);
 
-       mv_engine_config_get_double_attribute_c(
-                       working_cfg,
-                       MV_IMAGE_TRACKING_EXPECTED_OFFSET,
-                       &trackingParams.mExpectedOffset);
+       mv_engine_config_get_double_attribute_c(working_cfg, MV_IMAGE_TRACKING_EXPECTED_OFFSET,
+                                                                                       &trackingParams.mExpectedOffset);
 
        if (NULL == engine_cfg)
                mv_destroy_engine_config(working_cfg);
@@ -301,13 +249,8 @@ void extractTrackingParams(
 
 } /* anonymous namespace */
 
-int mv_image_recognize_open(
-               mv_source_h source,
-               const mv_image_object_h *image_objects,
-               int number_of_objects,
-               mv_engine_config_h engine_cfg,
-               mv_image_recognized_cb recognized_cb,
-               void *user_data)
+int mv_image_recognize_open(mv_source_h source, const mv_image_object_h *image_objects, int number_of_objects,
+                                                       mv_engine_config_h engine_cfg, mv_image_recognized_cb recognized_cb, void *user_data)
 {
        MEDIA_VISION_INSTANCE_CHECK(source);
        MEDIA_VISION_NULL_ARG_CHECK(image_objects);
@@ -316,9 +259,7 @@ int mv_image_recognize_open(
        MEDIA_VISION_NULL_ARG_CHECK(recognized_cb);
 
        cv::Mat scene;
-       MEDIA_VISION_ASSERT(
-                       MediaVision::Common::convertSourceMV2GrayCV(source, scene),
-                       "Failed to convert mv_source.");
+       MEDIA_VISION_ASSERT(MediaVision::Common::convertSourceMV2GrayCV(source, scene), "Failed to convert mv_source.");
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
@@ -336,37 +277,25 @@ int mv_image_recognize_open(
 
        for (int objectNum = 0; objectNum < number_of_objects; ++objectNum) {
                std::vector<cv::Point2f> resultContour;
-               bool isRecognized = recognizer.recognize(
-                                               *((MediaVision::Image::ImageObject*)image_objects[objectNum]),
-                                               recognitionParams, resultContour);
-               if (isRecognized && (resultContour.size() ==
-                       MediaVision::Image::NumberOfQuadrangleCorners)) {
+               bool isRecognized = recognizer.recognize(*((MediaVision::Image::ImageObject *) image_objects[objectNum]),
+                                                                                                recognitionParams, resultContour);
+               if (isRecognized && (resultContour.size() == MediaVision::Image::NumberOfQuadrangleCorners)) {
                        resultLocations[objectNum] = new mv_quadrangle_s;
                        if (resultLocations[objectNum] == NULL) {
                                ret = MEDIA_VISION_ERROR_OUT_OF_MEMORY;
                                goto ErrorExit;
                        }
 
-                       for (size_t pointNum = 0u;
-                               pointNum < MediaVision::Image::NumberOfQuadrangleCorners;
-                               ++pointNum) {
-                               resultLocations[objectNum]->points[pointNum].x =
-                                                       resultContour[pointNum].x;
-                               resultLocations[objectNum]->points[pointNum].y =
-                                                       resultContour[pointNum].y;
+                       for (size_t pointNum = 0u; pointNum < MediaVision::Image::NumberOfQuadrangleCorners; ++pointNum) {
+                               resultLocations[objectNum]->points[pointNum].x = resultContour[pointNum].x;
+                               resultLocations[objectNum]->points[pointNum].y = resultContour[pointNum].y;
                        }
                } else {
                        resultLocations[objectNum] = NULL;
                }
        }
 
-       recognized_cb(
-                       source,
-                       engine_cfg,
-                       image_objects,
-                       resultLocations,
-                       number_of_objects,
-                       user_data);
+       recognized_cb(source, engine_cfg, image_objects, resultLocations, number_of_objects, user_data);
 
 ErrorExit:
 
@@ -380,18 +309,14 @@ ErrorExit:
        return ret;
 }
 
-int mv_image_track_open(
-               mv_source_h source,
-               mv_image_tracking_model_h image_tracking_model,
-               mv_engine_config_h engine_cfg,
-               mv_image_tracked_cb tracked_cb,
-               void *user_data)
+int mv_image_track_open(mv_source_h source, mv_image_tracking_model_h image_tracking_model,
+                                               mv_engine_config_h engine_cfg, mv_image_tracked_cb tracked_cb, void *user_data)
 {
        MEDIA_VISION_INSTANCE_CHECK(source);
        MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
        MEDIA_VISION_NULL_ARG_CHECK(tracked_cb);
 
-       if (!((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->isValid()) {
+       if (!((MediaVision::Image::ImageTrackingModel *) image_tracking_model)->isValid()) {
                LOGE("[%s] Image tracking model is invalid.", __FUNCTION__);
                return MEDIA_VISION_ERROR_INVALID_DATA;
        }
@@ -400,22 +325,17 @@ int mv_image_track_open(
        extractTrackingParams(engine_cfg, trackingParams);
 
        cv::Mat frame;
-       MEDIA_VISION_ASSERT(
-                       MediaVision::Common::convertSourceMV2GrayCV(source, frame),
-                       "Failed to convert mv_source.");
+       MEDIA_VISION_ASSERT(MediaVision::Common::convertSourceMV2GrayCV(source, frame), "Failed to convert mv_source.");
 
        MediaVision::Image::ImageTrackingModel *trackingModel =
-                       (MediaVision::Image::ImageTrackingModel*)image_tracking_model;
+                       (MediaVision::Image::ImageTrackingModel *) image_tracking_model;
 
        std::vector<cv::Point> resultContour;
        const bool isTracked = trackingModel->track(frame, resultContour);
 
-       if (isTracked &&
-               MediaVision::Image::NumberOfQuadrangleCorners == resultContour.size()) {
+       if (isTracked && MediaVision::Image::NumberOfQuadrangleCorners == resultContour.size()) {
                mv_quadrangle_s result;
-               for (size_t pointNum = 0u;
-                       pointNum < MediaVision::Image::NumberOfQuadrangleCorners;
-                       ++pointNum) {
+               for (size_t pointNum = 0u; pointNum < MediaVision::Image::NumberOfQuadrangleCorners; ++pointNum) {
                        result.points[pointNum].x = resultContour[pointNum].x;
                        result.points[pointNum].y = resultContour[pointNum].y;
                }
@@ -427,41 +347,34 @@ int mv_image_track_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_object_create_open(
-               mv_image_object_h *image_object)
+int mv_image_object_create_open(mv_image_object_h *image_object)
 {
        MEDIA_VISION_NULL_ARG_CHECK(image_object);
 
-       (*image_object) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject();
+       (*image_object) = (mv_image_object_h) new (std::nothrow) MediaVision::Image::ImageObject();
        if (*image_object == NULL)
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_object_destroy_open(
-               mv_image_object_h image_object)
+int mv_image_object_destroy_open(mv_image_object_h image_object)
 {
        MEDIA_VISION_INSTANCE_CHECK(image_object);
 
-       delete (MediaVision::Image::ImageObject*)image_object;
+       delete (MediaVision::Image::ImageObject *) image_object;
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_object_fill_open(
-               mv_image_object_h image_object,
-               mv_engine_config_h engine_cfg,
-               mv_source_h source,
-               mv_rectangle_s *location)
+int mv_image_object_fill_open(mv_image_object_h image_object, mv_engine_config_h engine_cfg, mv_source_h source,
+                                                         mv_rectangle_s *location)
 {
        MEDIA_VISION_INSTANCE_CHECK(image_object);
        MEDIA_VISION_INSTANCE_CHECK(source);
 
        cv::Mat image;
-       MEDIA_VISION_ASSERT(
-                       MediaVision::Common::convertSourceMV2GrayCV(source, image),
-                       "Failed to convert mv_source.");
+       MEDIA_VISION_ASSERT(MediaVision::Common::convertSourceMV2GrayCV(source, image), "Failed to convert mv_source.");
 
        std::vector<cv::Point2f> roi;
        if (NULL != location) {
@@ -483,45 +396,35 @@ int mv_image_object_fill_open(
        MediaVision::Image::FeaturesExtractingParams featuresExtractingParams;
        extractTargetFeaturesExtractingParams(engine_cfg, featuresExtractingParams);
 
-       static_cast<MediaVision::Image::ImageObject*>(image_object)->fill(
-                       image,
-                       featuresExtractingParams,
-                       roi);
+       static_cast<MediaVision::Image::ImageObject *>(image_object)->fill(image, featuresExtractingParams, roi);
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_object_get_recognition_rate_open(
-               mv_image_object_h image_object,
-               double *recognition_rate)
+int mv_image_object_get_recognition_rate_open(mv_image_object_h image_object, double *recognition_rate)
 {
        MEDIA_VISION_INSTANCE_CHECK(image_object);
        MEDIA_VISION_NULL_ARG_CHECK(recognition_rate);
 
-       (*recognition_rate) =
-               ((MediaVision::Image::ImageObject*)image_object)->getRecognitionRate();
+       (*recognition_rate) = ((MediaVision::Image::ImageObject *) image_object)->getRecognitionRate();
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_object_set_label_open(
-               mv_image_object_h image_object,
-               int label)
+int mv_image_object_set_label_open(mv_image_object_h image_object, int label)
 {
        MEDIA_VISION_INSTANCE_CHECK(image_object);
 
-       ((MediaVision::Image::ImageObject*)image_object)->setLabel(label);
+       ((MediaVision::Image::ImageObject *) image_object)->setLabel(label);
 
        return MEDIA_VISION_ERROR_NONE;
 }
-int mv_image_object_get_label_open(
-               mv_image_object_h image_object,
-               int *label)
+int mv_image_object_get_label_open(mv_image_object_h image_object, int *label)
 {
        MEDIA_VISION_INSTANCE_CHECK(image_object);
        MEDIA_VISION_NULL_ARG_CHECK(label);
 
-       if (!((MediaVision::Image::ImageObject*)image_object)->getLabel(*label)) {
+       if (!((MediaVision::Image::ImageObject *) image_object)->getLabel(*label)) {
                LOGW("[%s] Image object haven't a label.", __FUNCTION__);
                return MEDIA_VISION_ERROR_NO_DATA;
        }
@@ -529,25 +432,21 @@ int mv_image_object_get_label_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_object_clone_open(
-               mv_image_object_h src,
-               mv_image_object_h *dst)
+int mv_image_object_clone_open(mv_image_object_h src, mv_image_object_h *dst)
 {
        MEDIA_VISION_INSTANCE_CHECK(src);
        MEDIA_VISION_NULL_ARG_CHECK(dst);
 
-       (*dst) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject();
+       (*dst) = (mv_image_object_h) new (std::nothrow) MediaVision::Image::ImageObject();
        if (*dst == NULL)
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
 
-       *(MediaVision::Image::ImageObject*)(*dst) =
-                       *(MediaVision::Image::ImageObject*)src;
+       *(MediaVision::Image::ImageObject *) (*dst) = *(MediaVision::Image::ImageObject *) src;
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_object_save_open(
-               const char *file_name, mv_image_object_h image_object)
+int mv_image_object_save_open(const char *file_name, mv_image_object_h image_object)
 {
        MEDIA_VISION_INSTANCE_CHECK(image_object);
 
@@ -556,7 +455,7 @@ int mv_image_object_save_open(
                return MEDIA_VISION_ERROR_INVALID_PATH;
        }
 
-       int ret = ((MediaVision::Image::ImageObject*)image_object)->save(file_name);
+       int ret = ((MediaVision::Image::ImageObject *) image_object)->save(file_name);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to save image object.");
                return ret;
@@ -565,8 +464,7 @@ int mv_image_object_save_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_object_load_open(
-               const char *file_name, mv_image_object_h *image_object)
+int mv_image_object_load_open(const char *file_name, mv_image_object_h *image_object)
 {
        MEDIA_VISION_NULL_ARG_CHECK(image_object);
 
@@ -575,11 +473,11 @@ int mv_image_object_load_open(
                return MEDIA_VISION_ERROR_INVALID_PATH;
        }
 
-       (*image_object) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject();
+       (*image_object) = (mv_image_object_h) new (std::nothrow) MediaVision::Image::ImageObject();
        if (*image_object == NULL)
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
 
-       int ret = ((MediaVision::Image::ImageObject*)(*image_object))->load(file_name);
+       int ret = ((MediaVision::Image::ImageObject *) (*image_object))->load(file_name);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to save image object.");
                return ret;
@@ -588,34 +486,32 @@ int mv_image_object_load_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_tracking_model_create_open(
-               mv_image_tracking_model_h *image_tracking_model)
+int mv_image_tracking_model_create_open(mv_image_tracking_model_h *image_tracking_model)
 {
        MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model);
 
-       (*image_tracking_model) = (mv_image_tracking_model_h)
-                       new (std::nothrow)MediaVision::Image::ImageTrackingModel();
+       (*image_tracking_model) = (mv_image_tracking_model_h) new (std::nothrow) MediaVision::Image::ImageTrackingModel();
        if (*image_tracking_model == NULL)
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_tracking_model_set_target_open(
-               mv_image_object_h image_object,
-               mv_image_tracking_model_h image_tracking_model)
+int mv_image_tracking_model_set_target_open(mv_image_object_h image_object,
+                                                                                       mv_image_tracking_model_h image_tracking_model)
 {
        MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
        MEDIA_VISION_INSTANCE_CHECK(image_object);
 
-       if (((MediaVision::Image::ImageObject*)image_object)->isEmpty()) {
+       if (((MediaVision::Image::ImageObject *) image_object)->isEmpty()) {
                LOGE("[%s] Target is empty and can't be set as target of tracking"
-                               "model.", __FUNCTION__);
+                        "model.",
+                        __FUNCTION__);
                return MEDIA_VISION_ERROR_INVALID_DATA;
        }
 
-       int ret = ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->setTarget(
-                       *(MediaVision::Image::ImageObject*)image_object);
+       int ret = ((MediaVision::Image::ImageTrackingModel *) image_tracking_model)
+                                         ->setTarget(*(MediaVision::Image::ImageObject *) image_object);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to set target.");
                return ret;
@@ -624,52 +520,47 @@ int mv_image_tracking_model_set_target_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_tracking_model_destroy_open(
-               mv_image_tracking_model_h image_tracking_model)
+int mv_image_tracking_model_destroy_open(mv_image_tracking_model_h image_tracking_model)
 {
        MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
 
-       delete (MediaVision::Image::ImageTrackingModel*)image_tracking_model;
+       delete (MediaVision::Image::ImageTrackingModel *) image_tracking_model;
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_tracking_model_refresh_open(
-               mv_image_tracking_model_h image_tracking_model,
-               mv_engine_config_h /*engine_cfg*/)
+int mv_image_tracking_model_refresh_open(mv_image_tracking_model_h image_tracking_model,
+                                                                                mv_engine_config_h /*engine_cfg*/)
 {
        MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
 
-       if (!((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->isValid()) {
+       if (!((MediaVision::Image::ImageTrackingModel *) image_tracking_model)->isValid()) {
                LOGE("[%s] Image tracking model is invalid.", __FUNCTION__);
                return MEDIA_VISION_ERROR_INVALID_DATA;
        }
 
-       ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->refresh();
+       ((MediaVision::Image::ImageTrackingModel *) image_tracking_model)->refresh();
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_tracking_model_clone_open(
-               mv_image_tracking_model_h src,
-               mv_image_tracking_model_h *dst)
+int mv_image_tracking_model_clone_open(mv_image_tracking_model_h src, mv_image_tracking_model_h *dst)
 {
        MEDIA_VISION_INSTANCE_CHECK(src);
        MEDIA_VISION_NULL_ARG_CHECK(dst);
 
-       (*dst) = (mv_image_tracking_model_h)new (std::nothrow)MediaVision::Image::ImageTrackingModel();
+       (*dst) = (mv_image_tracking_model_h) new (std::nothrow) MediaVision::Image::ImageTrackingModel();
        if (*dst == NULL) {
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
        }
 
-       *(MediaVision::Image::ImageTrackingModel*)(*dst) = *(MediaVision::Image::ImageTrackingModel*)src;
+       *(MediaVision::Image::ImageTrackingModel *) (*dst) = *(MediaVision::Image::ImageTrackingModel *) src;
 
        LOGD("Image tracking model has been successfully cloned");
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_tracking_model_save_open(
-               const char *file_name, mv_image_tracking_model_h image_tracking_model)
+int mv_image_tracking_model_save_open(const char *file_name, mv_image_tracking_model_h image_tracking_model)
 {
        MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
 
@@ -678,7 +569,7 @@ int mv_image_tracking_model_save_open(
                return MEDIA_VISION_ERROR_INVALID_PATH;
        }
 
-       int ret = ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->save(file_name);
+       int ret = ((MediaVision::Image::ImageTrackingModel *) image_tracking_model)->save(file_name);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Failed to save image tracking model");
                return ret;
@@ -688,8 +579,7 @@ int mv_image_tracking_model_save_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_image_tracking_model_load_open(
-               const char *file_name, mv_image_tracking_model_h *image_tracking_model)
+int mv_image_tracking_model_load_open(const char *file_name, mv_image_tracking_model_h *image_tracking_model)
 {
        MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model);
 
@@ -698,13 +588,12 @@ int mv_image_tracking_model_load_open(
                return MEDIA_VISION_ERROR_INVALID_PATH;
        }
 
-       (*image_tracking_model) = (mv_image_tracking_model_h)
-                       new (std::nothrow)MediaVision::Image::ImageTrackingModel();
+       (*image_tracking_model) = (mv_image_tracking_model_h) new (std::nothrow) MediaVision::Image::ImageTrackingModel();
 
        if (*image_tracking_model == NULL)
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
 
-       int ret = ((MediaVision::Image::ImageTrackingModel*)(*image_tracking_model))->load(file_name);
+       int ret = ((MediaVision::Image::ImageTrackingModel *) (*image_tracking_model))->load(file_name);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Failed to load image tracking model");
                return ret;
index 77932ec..847bbdf 100644 (file)
@@ -24,10 +24,13 @@ namespace mediavision
 {
 namespace common
 {
-class Context {
+class Context
+{
 public:
-       Context() { }
-       ~Context() { }
+       Context()
+       {}
+       ~Context()
+       {}
 
        std::map<std::string, void *> __tasks;
 };
index cacf827..1216ccf 100644 (file)
@@ -22,15 +22,15 @@ namespace mediavision
 namespace common
 {
 // T : parameter type, V : return type
-template <typename T, typename V>
-class ITask {
+template<typename T, typename V> class ITask
+{
 public:
-       virtual ~ITask() { };
+       virtual ~ITask() {};
        virtual void configure() = 0;
        virtual void prepare() = 0;
-       virtual void setInput(Tt) = 0;
+       virtual void setInput(T &t) = 0;
        virtual void perform() = 0;
-       virtual VgetOutput() = 0;
+       virtual V &getOutput() = 0;
 };
 } // namespace
 } // namespace
index 1038c93..6bb857b 100644 (file)
 
 #include <mv_common.h>
 
-namespace mediavision {
-namespace machine_learning {
-namespace exception {
-
-class BaseException : public std::exception {
+namespace mediavision
+{
+namespace machine_learning
+{
+namespace exception
+{
+class BaseException : public std::exception
+{
 private:
        std::string _msg;
        int _errorType;
 
 public:
-       BaseException(std::string msg, int errorType) :
-               _msg(msg), _errorType(errorType) {}
+       BaseException(std::string msg, int errorType) : _msg(msg), _errorType(errorType)
+       {}
        ~BaseException() override = default;
 
-       const char* what() const noexcept override { return _msg.c_str(); }
-       int getError() const { return _errorType; }
+       const char *what() const noexcept override
+       {
+               return _msg.c_str();
+       }
+       int getError() const
+       {
+               return _errorType;
+       }
 };
 
-
-class InvalidParameter : public BaseException {
+class InvalidParameter : public BaseException
+{
 public:
-       InvalidParameter(std::string msg) :
-               BaseException("Invalid parameter: " + msg, MEDIA_VISION_ERROR_INVALID_PARAMETER) {}
+       InvalidParameter(std::string msg) : BaseException("Invalid parameter: " + msg, MEDIA_VISION_ERROR_INVALID_PARAMETER)
+       {}
        ~InvalidParameter() override = default;
 };
 
-class InvalidOperation : public BaseException {
+class InvalidOperation : public BaseException
+{
 public:
-       InvalidOperation(std::string msg, int errorType = MEDIA_VISION_ERROR_INVALID_OPERATION) :
-               BaseException("Invalid operation: " + msg, errorType) {}
-       ~InvalidOperation() final  = default;
+       InvalidOperation(std::string msg, int errorType = MEDIA_VISION_ERROR_INVALID_OPERATION)
+                       : BaseException("Invalid operation: " + msg, errorType)
+       {}
+       ~InvalidOperation() final = default;
 };
 
-class OutOfMemory : public BaseException {
+class OutOfMemory : public BaseException
+{
 public:
-       OutOfMemory(std::string msg) :
-               BaseException("Out of memory: " + msg, MEDIA_VISION_ERROR_OUT_OF_MEMORY) {}
+       OutOfMemory(std::string msg) : BaseException("Out of memory: " + msg, MEDIA_VISION_ERROR_OUT_OF_MEMORY)
+       {}
        ~OutOfMemory() final = default;
 };
 
-class NoData : public BaseException {
+class NoData : public BaseException
+{
 public:
-       NoData(std::string msg) :
-               BaseException("No Data: " + msg, MEDIA_VISION_ERROR_NO_DATA) {}
+       NoData(std::string msg) : BaseException("No Data: " + msg, MEDIA_VISION_ERROR_NO_DATA)
+       {}
        ~NoData() final = default;
 };
 
-
 }; // Exception
 }; // MachineLearning
 }; // Mediavision
index b2164ff..8862d3a 100644 (file)
 
 #include "inference_engine_common_impl.h"
 
-typedef struct {
+typedef struct
+{
        std::string layer_name;
        inference_engine_tensor_info tensor_info;
 } model_layer_info;
 
-class IBackboneModelInfo {
+class IBackboneModelInfo
+{
 public:
-       virtual ~IBackboneModelInfo() { }
+       virtual ~IBackboneModelInfo()
+       {}
 
-       virtual std::vector<model_layer_info>GetInputLayerInfo() = 0;
-       virtual std::vector<model_layer_info>GetOutputLayerInfo() = 0;
+       virtual std::vector<model_layer_info> &GetInputLayerInfo() = 0;
+       virtual std::vector<model_layer_info> &GetOutputLayerInfo() = 0;
        virtual std::string GetModelFilePath() = 0;
 };
 
index e2cf317..8e54704 100644 (file)
@@ -19,7 +19,8 @@
 
 #include "backbone_model_info.h"
 
-class FaceNetInfo : public IBackboneModelInfo {
+class FaceNetInfo : public IBackboneModelInfo
+{
 private:
        std::vector<model_layer_info> _input_layer_info;
        std::vector<model_layer_info> _output_layer_info;
@@ -29,8 +30,8 @@ public:
        FaceNetInfo(std::string model_file_path);
        ~FaceNetInfo();
 
-       std::vector<model_layer_info>GetInputLayerInfo() override;
-       std::vector<model_layer_info>GetOutputLayerInfo() override;
+       std::vector<model_layer_info> &GetInputLayerInfo() override;
+       std::vector<model_layer_info> &GetOutputLayerInfo() override;
        std::string GetModelFilePath() override;
 };
 
index 47e7530..6dcfb3b 100644 (file)
@@ -38,10 +38,10 @@ namespace machine_learning
 {
 namespace face_recognition
 {
-
 namespace status
 {
-enum {
+enum
+{
        NONE = 0,
        INITIALIZED,
        REGISTERED,
@@ -52,7 +52,8 @@ enum {
 
 namespace mode
 {
-enum {
+enum
+{
        REGISTER = 0,
        INFERENCE,
        DELETE
@@ -61,7 +62,8 @@ enum {
 
 } // face_recognition
 
-typedef struct {
+typedef struct
+{
        unsigned int mode;
        std::unordered_map<mv_source_h, std::string> register_src;
        mv_source_h inference_src;
@@ -73,13 +75,15 @@ typedef struct {
  * @details Contains face recognition result such as label, label index, raw data,
  *          and raw data count.
  */
-typedef struct {
+typedef struct
+{
        unsigned int label_idx; /**< label index of label file. */
        std::vector<float> raw_data; /**< raw data to each label. */
        std::string label; /**< label string. */
 } mv_face_recognition_result_s;
 
-typedef struct {
+typedef struct
+{
        mv_inference_target_device_e training_target_device_type;
        mv_inference_backend_type_e training_engine_backend_type;
        mv_inference_target_device_e inference_target_device_type;
@@ -93,7 +97,8 @@ typedef struct {
        double decision_threshold;
 } FaceRecognitionConfig;
 
-class FaceRecognition {
+class FaceRecognition
+{
 private:
        unsigned int _status;
        std::unique_ptr<mediavision::inference::Inference> _internal;
@@ -103,32 +108,33 @@ private:
        std::unique_ptr<LabelManager> _label_manager;
        FaceRecognitionConfig _config;
        mv_face_recognition_result_s _result;
-       std::vector<std::unique_ptr<DataAugment>> _data_augments;
-
+       std::vector<std::unique_ptr<DataAugment> > _data_augments;
 
        // FYI. This function should be called every time a new face is registered.
        void ImportLabel();
-       void CheckFeatureVectorFile(std::unique_ptr<FeatureVectorManager>& old_fvm, std::unique_ptr<FeatureVectorManager>& new_fvm);
+       void CheckFeatureVectorFile(std::unique_ptr<FeatureVectorManager> &old_fvm,
+                                                               std::unique_ptr<FeatureVectorManager> &new_fvm);
        std::unique_ptr<DataSetManager> CreateDSM(const mv_inference_backend_type_e backend_type);
-       std::unique_ptr<FeatureVectorManager> CreateFVM(const mv_inference_backend_type_e backend_type, std::string file_name);
-       void UpdateDataSet(std::unique_ptr<DataSetManager>& data_set, std::vector<float>& feature_vec, const int label_idx, const int label_cnt);
-       void UpdateDataSet(std::unique_ptr<DataSetManager>& data_set);
+       std::unique_ptr<FeatureVectorManager> CreateFVM(const mv_inference_backend_type_e backend_type,
+                                                                                                       std::string file_name);
+       void UpdateDataSet(std::unique_ptr<DataSetManager> &data_set, std::vector<float> &feature_vec, const int label_idx,
+                                          const int label_cnt);
+       void UpdateDataSet(std::unique_ptr<DataSetManager> &data_set);
        int GetAnswer();
-       std::vector<model_layer_info>GetBackboneInputLayerInfo();
-       int GetVecFromMvSource(mv_source_h img_src, std::vector<float>out_vec);
+       std::vector<model_layer_info> &GetBackboneInputLayerInfo();
+       int GetVecFromMvSource(mv_source_h img_src, std::vector<float> &out_vec);
 
 public:
        FaceRecognition();
-       ~ FaceRecognition();
+       ~FaceRecognition();
 
        int Initialize();
-       void SetConfig(FaceRecognitionConfigconfig);
+       void SetConfig(FaceRecognitionConfig &config);
        int RegisterNewFace(mv_source_h img_src, std::string label_name);
        int RecognizeFace(mv_source_h img_src);
        int DeleteLabel(std::string label_name);
        int GetLabel(const char **out_label);
-       mv_face_recognition_result_s& GetResult();
-
+       mv_face_recognition_result_s &GetResult();
 };
 
 } // machine_learning
index 97cebbf..0839b80 100644 (file)
@@ -60,9 +60,8 @@ namespace mediavision
 {
 namespace machine_learning
 {
-
-template <typename T, typename V>
-class FaceRecognitionAdapter : public mediavision::common::ITask<T, V> {
+template<typename T, typename V> class FaceRecognitionAdapter : public mediavision::common::ITask<T, V>
+{
 private:
        std::unique_ptr<FaceRecognition> _face_recognition;
        mv_face_recognition_input_s _source;
@@ -72,16 +71,16 @@ public:
        FaceRecognitionAdapter();
        ~FaceRecognitionAdapter();
 
-       std::unique_ptr<MediaVision::Common::EngineConfig>getConfig()
+       std::unique_ptr<MediaVision::Common::EngineConfig> &getConfig()
        {
                return _config;
        }
 
        void configure() override;
        void prepare() override;
-       void setInput(Tt) override;
+       void setInput(T &t) override;
        void perform() override;
-       VgetOutput() override;
+       V &getOutput() override;
 };
 
 } // machine_learning
index 5055ec5..b698d27 100644 (file)
 #include <mv_face_recognition_type.h>
 
 #ifdef __cplusplus
-extern "C"
-{
+extern "C" {
 #endif /* __cplusplus */
 
-       /**
+/**
         * @brief Create face recognition object handle.
         * @details Use this function to create an face recognition object handle.
         *          After creation the handle has to be prepared with
@@ -48,9 +47,9 @@ extern "C"
         *
         * @see mv_face_recognition_destroy_open()
         */
-       int mv_face_recognition_create_open(mv_face_recognition_h *out_handle);
+int mv_face_recognition_create_open(mv_face_recognition_h *out_handle);
 
-       /**
+/**
         * @brief Destroy face recognition handle and releases all its resources.
         *
         * @since_tizen 7.0
@@ -65,9 +64,9 @@ extern "C"
         *
         * @see mv_face_recognition_create_open()
         */
-       int mv_face_recognition_destroy_open(mv_face_recognition_h handle);
+int mv_face_recognition_destroy_open(mv_face_recognition_h handle);
 
-       /**
+/**
         * @brief Prepare face recognition.
         *
         * @since_tizen 7.0
@@ -82,9 +81,9 @@ extern "C"
         *
         * @see mv_face_recognition_create_open()
         */
-       int mv_face_recognition_prepare_open(mv_face_recognition_h handle);
+int mv_face_recognition_prepare_open(mv_face_recognition_h handle);
 
-       /**
+/**
         * @brief Register a new face on the @a source
         * @details Use this function to register a new face.
         *          Each time when this function is called, a new face on the media source
@@ -107,9 +106,9 @@ extern "C"
         * @pre Create an face recognition handle by calling @ref mv_face_recognition_create_open()
         * @pre Prepare an face recognition by calling @ref mv_face_recognition_prepare_open()
         */
-       int mv_face_recognition_register_open(mv_face_recognition_h handle, mv_source_h source, const char *label);
+int mv_face_recognition_register_open(mv_face_recognition_h handle, mv_source_h source, const char *label);
 
-       /**
+/**
         * @brief Unregister a new face on the @a source
         * @details Use this function to unregister a given label.
         *          Each time when this function is called, all data related to the label
@@ -130,9 +129,9 @@ extern "C"
         * @pre Create an face recognition handle by calling @ref mv_face_recognition_create_open()
         * @pre Prepare an face recognition by calling @ref mv_face_recognition_prepare_open()
         */
-       int mv_face_recognition_unregister_open(mv_face_recognition_h handle, const char *label);
+int mv_face_recognition_unregister_open(mv_face_recognition_h handle, const char *label);
 
-       /**
+/**
         * @brief Inference with a given face on the @a source
         * @details Use this function to inference with a given source.
         *
@@ -154,9 +153,9 @@ extern "C"
         * @pre Prepare an face recognition by calling @ref mv_face_recognition_prepare_open()
         * @pre Register a new face by calling @ref mv_face_recognition_register_open()
         */
-       int mv_face_recognition_inference_open(mv_face_recognition_h handle, mv_source_h source);
+int mv_face_recognition_inference_open(mv_face_recognition_h handle, mv_source_h source);
 
-       /**
+/**
         * @brief Get a label name and store it to @a out_label.
         * @details Use this function to get a label name after calling mv_face_recognition_inference_open function.
         *
@@ -172,7 +171,7 @@ extern "C"
         *
         * @pre Request a inference by calling @ref mv_face_recognition_inference_open()
         */
-       int mv_face_recognition_get_label_open(mv_face_recognition_h handle, const char **out_label);
+int mv_face_recognition_get_label_open(mv_face_recognition_h handle, const char **out_label);
 
 #ifdef __cplusplus
 }
index fda9cc2..a2ece91 100644 (file)
 #ifndef __NNTRAINER_DSM_H__
 #define __NNTRAINER_DSM_H__
 
-
 #include <string>
 
 #include "feature_vector_manager.h"
 #include "data_set_manager.h"
 
-class NNTrainerDSM : public DataSetManager {
+class NNTrainerDSM : public DataSetManager
+{
 private:
-       void PrintHeader(FeaVecHeaderfvh);
+       void PrintHeader(FeaVecHeader &fvh);
 
 public:
        NNTrainerDSM();
index f2647ec..c410fa3 100644 (file)
 #include "feature_vector_manager.h"
 #include "file_util.h"
 
-class NNTrainerFVM : public FeatureVectorManager {
+class NNTrainerFVM : public FeatureVectorManager
+{
 public:
        NNTrainerFVM(const std::string feature_vector_file = "feature_vector_file.dat");
        ~NNTrainerFVM() = default;
 
-       void WriteHeader(size_t feature_size, size_t one_hot_table_size, unsigned int  data_set_cnt) override;
-       void ReadHeader(FeaVecHeaderheader) override;
-       void WriteFeatureVec(std::vector<float>feature_vec, const int max_label, const int label_index) override;
+       void WriteHeader(size_t feature_size, size_t one_hot_table_size, unsigned int data_set_cnt) override;
+       void ReadHeader(FeaVecHeader &header) override;
+       void WriteFeatureVec(std::vector<float> &feature_vec, const int max_label, const int label_index) override;
        void Remove() override;
 };
 
index d2e025d..dc5d63b 100644 (file)
 #include <mv_inference_type.h>
 #include "training_model.h"
 
-class SimpleShot : public TrainingModel {
+class SimpleShot : public TrainingModel
+{
 private:
        TrainingEngineBackendInfo _engine_info;
+
 private:
        void SaveModel(const std::string file_path) override;
        void RemoveModel(const std::string file_path) override;
+
 public:
        SimpleShot(const mv_inference_backend_type_e backend_type = MV_INFERENCE_BACKEND_NNTRAINER,
                           const mv_inference_target_device_e target_type = MV_INFERENCE_TARGET_DEVICE_CPU,
@@ -34,7 +37,7 @@ public:
 
        // Configure layers for SimpleShot learning.
        void ConfigureModel(int num_of_class) override;
-       TrainingEngineBackendInfoGetTrainingEngineInfo() override;
+       TrainingEngineBackendInfo &GetTrainingEngineInfo() override;
 };
 
 #endif
\ No newline at end of file
index 8224787..888e51b 100644 (file)
@@ -22,12 +22,9 @@ FaceNetInfo::FaceNetInfo(string model_file_path)
 {
        _model_file_path = model_file_path;
 
-       const string input_layer_name = {  "input_1" };
+       const string input_layer_name = { "input_1" };
        const inference_engine_tensor_info input_tensor_info = {
-               { 160, 160, 3, 1 },
-               INFERENCE_TENSOR_SHAPE_NCHW,
-               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-               (size_t)(1 * 3 * 160 * 160)
+               { 160, 160, 3, 1 }, INFERENCE_TENSOR_SHAPE_NCHW, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, (size_t)(1 * 3 * 160 * 160)
        };
 
        model_layer_info input_info = { input_layer_name, input_tensor_info };
@@ -35,10 +32,7 @@ FaceNetInfo::FaceNetInfo(string model_file_path)
 
        const string output_layer_name = { "normalize/l2_normalize" };
        const inference_engine_tensor_info output_tensor_info = {
-               { 512, 1, 1, 1 },
-               INFERENCE_TENSOR_SHAPE_NCHW,
-               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-               (size_t)(1 * 512)
+               { 512, 1, 1, 1 }, INFERENCE_TENSOR_SHAPE_NCHW, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, (size_t)(1 * 512)
        };
 
        model_layer_info output_info = { output_layer_name, output_tensor_info };
@@ -56,12 +50,12 @@ string FaceNetInfo::GetModelFilePath()
        return _model_file_path;
 }
 
-vector<model_layer_info>FaceNetInfo::GetInputLayerInfo()
+vector<model_layer_info> &FaceNetInfo::GetInputLayerInfo()
 {
        return _input_layer_info;
 }
 
-vector<model_layer_info>FaceNetInfo::GetOutputLayerInfo()
+vector<model_layer_info> &FaceNetInfo::GetOutputLayerInfo()
 {
        return _output_layer_info;
 }
\ No newline at end of file
index 41a71ed..678ecd3 100644 (file)
@@ -45,9 +45,15 @@ namespace mediavision
 {
 namespace machine_learning
 {
-
-FaceRecognition::FaceRecognition() :
-               _status(NONE), _internal(), _backbone(), _backbone_model_info(), _training_model(), _label_manager(), _config(), _result()
+FaceRecognition::FaceRecognition()
+               : _status(NONE)
+               , _internal()
+               , _backbone()
+               , _backbone_model_info()
+               , _training_model()
+               , _label_manager()
+               , _config()
+               , _result()
 {
        _data_augments.push_back(std::make_unique<DataAugmentDefault>());
        /* Add other data argument classes. */
@@ -59,7 +65,8 @@ FaceRecognition::~FaceRecognition()
                _label_manager->Clear();
 }
 
-void FaceRecognition::CheckFeatureVectorFile(unique_ptr<FeatureVectorManager>& old_fvm, unique_ptr<FeatureVectorManager>& new_fvm)
+void FaceRecognition::CheckFeatureVectorFile(unique_ptr<FeatureVectorManager> &old_fvm,
+                                                                                        unique_ptr<FeatureVectorManager> &new_fvm)
 {
        // Change new feature vector file to existing one in case that current process is terminated just after removing existing feature vector file but
        // new feature vector file isn't changed to existing one yet.
@@ -82,7 +89,7 @@ void FaceRecognition::CheckFeatureVectorFile(unique_ptr<FeatureVectorManager>& o
 
 unique_ptr<DataSetManager> FaceRecognition::CreateDSM(const mv_inference_backend_type_e backend_type)
 {
-       switch(backend_type) {
+       switch (backend_type) {
        case MV_INFERENCE_BACKEND_NNTRAINER:
                return make_unique<NNTrainerDSM>();
        default:
@@ -92,9 +99,10 @@ unique_ptr<DataSetManager> FaceRecognition::CreateDSM(const mv_inference_backend
        throw InvalidParameter("Invalid training engine backend type.");
 }
 
-unique_ptr<FeatureVectorManager> FaceRecognition::CreateFVM(const mv_inference_backend_type_e backend_type, string file_name)
+unique_ptr<FeatureVectorManager> FaceRecognition::CreateFVM(const mv_inference_backend_type_e backend_type,
+                                                                                                                       string file_name)
 {
-       switch(backend_type) {
+       switch (backend_type) {
        case MV_INFERENCE_BACKEND_NNTRAINER:
                return make_unique<NNTrainerFVM>(file_name);
        default:
@@ -104,7 +112,8 @@ unique_ptr<FeatureVectorManager> FaceRecognition::CreateFVM(const mv_inference_b
        throw InvalidParameter("Invalid training engine backend type.");
 }
 
-void FaceRecognition::UpdateDataSet(unique_ptr<DataSetManager>& data_set, vector<float>& feature_vec, const int label_idx, const int label_cnt)
+void FaceRecognition::UpdateDataSet(unique_ptr<DataSetManager> &data_set, vector<float> &feature_vec,
+                                                                       const int label_idx, const int label_cnt)
 {
        size_t data_set_cnt = 0;
 
@@ -124,7 +133,7 @@ void FaceRecognition::UpdateDataSet(unique_ptr<DataSetManager>& data_set, vector
                if (FaceRecogUtil::IsFileExist(fvm->GetFileName())) {
                        data_set->LoadDataSet(fvm->GetFileName());
 
-                       vector<vector<float>> feature_vectors = data_set->GetData();
+                       vector<vector<float> > feature_vectors = data_set->GetData();
                        vector<unsigned int> label_idx_vectors = data_set->GetLabelIdx();
 
                        // 1) Write existing feature vectors and its one-hot encoding table considered
@@ -165,13 +174,13 @@ void FaceRecognition::UpdateDataSet(unique_ptr<DataSetManager>& data_set, vector
 
                data_set->Clear();
                data_set->LoadDataSet(fvm->GetFileName());
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                throw e;
        }
 }
 
-void FaceRecognition::UpdateDataSet(unique_ptr<DataSetManager>data_set)
+void FaceRecognition::UpdateDataSet(unique_ptr<DataSetManager> &data_set)
 {
        try {
                data_set = CreateDSM(_config.training_engine_backend_type);
@@ -182,23 +191,23 @@ void FaceRecognition::UpdateDataSet(unique_ptr<DataSetManager>& data_set)
                        throw InvalidOperation("Feature vector file not found.");
 
                data_set->LoadDataSet(fvm->GetFileName());
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                throw e;
        }
 }
 
-void FaceRecognition::SetConfig(FaceRecognitionConfigconfig)
+void FaceRecognition::SetConfig(FaceRecognitionConfig &config)
 {
        _config = config;
 }
 
-std::vector<model_layer_info>FaceRecognition::GetBackboneInputLayerInfo()
+std::vector<model_layer_info> &FaceRecognition::GetBackboneInputLayerInfo()
 {
        return _backbone_model_info->GetInputLayerInfo();
 }
 
-int FaceRecognition::GetVecFromMvSource(mv_source_h img_src, std::vector<float>out_vec)
+int FaceRecognition::GetVecFromMvSource(mv_source_h img_src, std::vector<float> &out_vec)
 {
        mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
        unsigned int width = 0, height = 0, bufferSize = 0;
@@ -208,7 +217,7 @@ int FaceRecognition::GetVecFromMvSource(mv_source_h img_src, std::vector<float>&
                mv_source_get_height(img_src, &height) != MEDIA_VISION_ERROR_NONE ||
                mv_source_get_colorspace(img_src, &colorspace) != MEDIA_VISION_ERROR_NONE ||
                mv_source_get_buffer(img_src, &buffer, &bufferSize))
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
 
        // TODO. Let's support various color spaces.
 
@@ -217,7 +226,7 @@ int FaceRecognition::GetVecFromMvSource(mv_source_h img_src, std::vector<float>&
                return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
        }
 
-       vector<model_layer_info>input_layer_info = GetBackboneInputLayerInfo();
+       vector<model_layer_info> &input_layer_info = GetBackboneInputLayerInfo();
        // TODO. consider multiple tensor info.
        size_t re_width = input_layer_info[0].tensor_info.shape[0];
        size_t re_height = input_layer_info[0].tensor_info.shape[1];
@@ -246,12 +255,12 @@ int FaceRecognition::Initialize()
        vector<string> input_layer_names, output_layer_names;
        vector<inference_engine_tensor_info> input_tensor_info, output_tensor_info;
 
-       for (autoinput : _backbone_model_info->GetInputLayerInfo()) {
+       for (auto &input : _backbone_model_info->GetInputLayerInfo()) {
                input_layer_names.push_back(input.layer_name);
                input_tensor_info.push_back(input.tensor_info);
        }
 
-       for (autooutput : _backbone_model_info->GetOutputLayerInfo()) {
+       for (auto &output : _backbone_model_info->GetOutputLayerInfo()) {
                output_layer_names.push_back(output.layer_name);
                output_tensor_info.push_back(output.tensor_info);
        }
@@ -264,7 +273,7 @@ int FaceRecognition::Initialize()
                return ret;
 
        // Tensor order is NCHW.
-       vector<model_layer_info>input_layer_info = GetBackboneInputLayerInfo();
+       vector<model_layer_info> &input_layer_info = GetBackboneInputLayerInfo();
        size_t width = input_layer_info[0].tensor_info.shape[0];
        size_t height = input_layer_info[0].tensor_info.shape[1];
        size_t ch = input_layer_info[0].tensor_info.shape[2];
@@ -277,9 +286,8 @@ int FaceRecognition::Initialize()
        if (ret != MEDIA_VISION_ERROR_NONE)
                return ret;
 
-       _training_model = make_unique<SimpleShot>(_config.training_engine_backend_type,
-                                                                                               _config.training_target_device_type,
-                                                                                               _config.internal_model_file_path);
+       _training_model = make_unique<SimpleShot>(_config.training_engine_backend_type, _config.training_target_device_type,
+                                                                                         _config.internal_model_file_path);
 
        _internal = make_unique<Inference>();
 
@@ -287,7 +295,6 @@ int FaceRecognition::Initialize()
        if (ret != MEDIA_VISION_ERROR_NONE)
                return ret;
 
-
        _status = INITIALIZED;
 
        return MEDIA_VISION_ERROR_NONE;
@@ -304,7 +311,7 @@ void FaceRecognition::ImportLabel()
                int cnt = _label_manager->ImportLabel();
 
                LOGD("%d labels have been imported", cnt);
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                throw e;
        }
@@ -312,7 +319,7 @@ void FaceRecognition::ImportLabel()
 
 int FaceRecognition::RegisterNewFace(mv_source_h img_src, string label_name)
 {
-       vector<model_layer_info>output_layer_info = _backbone_model_info->GetOutputLayerInfo();
+       vector<model_layer_info> &output_layer_info = _backbone_model_info->GetOutputLayerInfo();
 
        if (_status < INITIALIZED) {
                LOGE("Initialization not ready yet. (%u)", _status);
@@ -325,7 +332,7 @@ int FaceRecognition::RegisterNewFace(mv_source_h img_src, string label_name)
                ImportLabel();
 
                // 1. Store only label names to label file, which aren't duplicated.
-               bool duplicated  = _label_manager->AddLabelToMap(label_name, label_name);
+               bool duplicated = _label_manager->AddLabelToMap(label_name, label_name);
                if (!duplicated) {
                        int ret = _label_manager->AddLabelToFile(label_name);
                        if (ret == 0)
@@ -344,7 +351,8 @@ int FaceRecognition::RegisterNewFace(mv_source_h img_src, string label_name)
                // 2. Get feature vector from a given vec through inference engine.
                // Ps. output layer size should be 1.
                TensorBuffer tensorBuffer = _backbone->GetOutputTensorBuffer();
-               inference_engine_tensor_buffer *backbone_output_buffer = tensorBuffer.getTensorBuffer(output_layer_info[0].layer_name);
+               inference_engine_tensor_buffer *backbone_output_buffer =
+                               tensorBuffer.getTensorBuffer(output_layer_info[0].layer_name);
                if (!backbone_output_buffer) {
                        LOGE("fail to get output tensor buffer.");
                        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -372,7 +380,7 @@ int FaceRecognition::RegisterNewFace(mv_source_h img_src, string label_name)
                // again in this case. So make sure to clear previous data set before next training.
                _training_model->ClearDataSet(data_set);
                _status = REGISTERED;
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
        }
@@ -387,7 +395,7 @@ int FaceRecognition::GetAnswer()
        string result_str;
 
        try {
-               for (autor : _result.raw_data)
+               for (auto &r : _result.raw_data)
                        result_str += to_string(r) + " ";
 
                LOGD("raw data = %s", result_str.c_str());
@@ -402,7 +410,7 @@ int FaceRecognition::GetAnswer()
                float weighted = _result.raw_data[answer_idx] * _label_manager->GetDecisionWeight();
 
                // Check decision weight threshold.
-               for (autor : _result.raw_data) {
+               for (auto &r : _result.raw_data) {
                        if (_result.raw_data[answer_idx] == r)
                                continue;
 
@@ -411,7 +419,7 @@ int FaceRecognition::GetAnswer()
                }
 
                _result.label_idx = answer_idx;
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
        }
@@ -432,10 +440,10 @@ int FaceRecognition::RecognizeFace(mv_source_h img_src)
        }
 
        TrainingEngineBackendInfo engine_info = _training_model->GetTrainingEngineInfo();
-       vector<string>input_layers = engine_info.input_layer_names;
-       vector<inference_engine_tensor_info>input_tensor_info = engine_info.input_tensor_info;
-       vector<string>output_layers = engine_info.output_layer_names;
-       vector<inference_engine_tensor_info>output_tensor_info = engine_info.output_tensor_info;
+       vector<string> &input_layers = engine_info.input_layer_names;
+       vector<inference_engine_tensor_info> &input_tensor_info = engine_info.input_tensor_info;
+       vector<string> &output_layers = engine_info.output_layer_names;
+       vector<inference_engine_tensor_info> &output_tensor_info = engine_info.output_tensor_info;
 
        // Face Recognition has following steps
        // ------------------------------------
@@ -453,7 +461,8 @@ int FaceRecognition::RecognizeFace(mv_source_h img_src)
                        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
                }
 
-               if (_backbone_model_info->GetOutputLayerInfo().empty() || _backbone_model_info->GetOutputLayerInfo().size() > 1) {
+               if (_backbone_model_info->GetOutputLayerInfo().empty() ||
+                       _backbone_model_info->GetOutputLayerInfo().size() > 1) {
                        LOGE("Invalid output layer size - output layer size should be 1.");
                        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
                }
@@ -469,10 +478,11 @@ int FaceRecognition::RecognizeFace(mv_source_h img_src)
                }
 
                // Get output layer info for facenet model.
-               vector<model_layer_info>output_layer_info = _backbone_model_info->GetOutputLayerInfo();
+               vector<model_layer_info> &output_layer_info = _backbone_model_info->GetOutputLayerInfo();
                // Get output tensor buffer to the output layer.
                TensorBuffer tensorBuffer = _backbone->GetOutputTensorBuffer();
-               inference_engine_tensor_buffer *backbone_output_buffer = tensorBuffer.getTensorBuffer(output_layer_info[0].layer_name);
+               inference_engine_tensor_buffer *backbone_output_buffer =
+                               tensorBuffer.getTensorBuffer(output_layer_info[0].layer_name);
                if (!backbone_output_buffer) {
                        LOGE("fail to get backbone output tensor buffer.");
                        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -522,7 +532,7 @@ int FaceRecognition::RecognizeFace(mv_source_h img_src)
                _status = INFERENCED;
 
                return GetAnswer();
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
        }
@@ -564,7 +574,7 @@ int FaceRecognition::DeleteLabel(string label_name)
 
                data_set->LoadDataSet(fvm->GetFileName());
 
-               vector<vector<float>> feature_vectors = data_set->GetData();
+               vector<vector<float> > feature_vectors = data_set->GetData();
                vector<unsigned int> label_idx_vectors = data_set->GetLabelIdx();
 
                size_t data_set_cnt = 0;
@@ -630,7 +640,7 @@ int FaceRecognition::DeleteLabel(string label_name)
                }
 
                _status = DELETED;
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
        }
@@ -647,7 +657,7 @@ int FaceRecognition::GetLabel(const char **out_label)
 
        try {
                _label_manager->GetLabelString(_result.label, _result.label_idx);
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
        }
@@ -657,20 +667,20 @@ int FaceRecognition::GetLabel(const char **out_label)
        return MEDIA_VISION_ERROR_NONE;
 }
 
- mv_face_recognition_result_s& FaceRecognition::GetResult()
- {
+mv_face_recognition_result_s &FaceRecognition::GetResult()
+{
        if (!_label_manager)
                throw NoData("Label file doesn't exist.");
 
        try {
                _label_manager->GetLabelString(_result.label, _result.label_idx);
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                throw e;
        }
 
        return _result;
- }
+}
 
 } // machine_learning
 } // mediavision
\ No newline at end of file
index dba1829..2fcca3a 100644 (file)
@@ -17,7 +17,7 @@
 #include "machine_learning_exception.h"
 #include "face_recognition_adapter.h"
 
-#define FACE_RECOGNITION_META_FILE_NAME        "face_recognition.json"
+#define FACE_RECOGNITION_META_FILE_NAME "face_recognition.json"
 
 using namespace std;
 using namespace MediaVision::Common;
@@ -28,27 +28,19 @@ namespace mediavision
 {
 namespace machine_learning
 {
-
-template <typename T, typename V>
-FaceRecognitionAdapter<T, V>::FaceRecognitionAdapter()
+template<typename T, typename V> FaceRecognitionAdapter<T, V>::FaceRecognitionAdapter()
 {
        _face_recognition = make_unique<FaceRecognition>();
 }
 
-template <typename T, typename V>
-FaceRecognitionAdapter<T, V>::~FaceRecognitionAdapter()
-{
-
-}
+template<typename T, typename V> FaceRecognitionAdapter<T, V>::~FaceRecognitionAdapter()
+{}
 
-template <typename T, typename V>
-void FaceRecognitionAdapter<T, V>::configure()
+template<typename T, typename V> void FaceRecognitionAdapter<T, V>::configure()
 {
-       _config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) +
-                                                                               string(FACE_RECOGNITION_META_FILE_NAME));
+       _config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) + string(FACE_RECOGNITION_META_FILE_NAME));
        string backboneModelFilePath;
-       int ret = _config->getStringAttribute(string(MV_FACE_RECOGNITION_BACKBONE_MODEL_FILE_PATH),
-                                                                                       &backboneModelFilePath);
+       int ret = _config->getStringAttribute(string(MV_FACE_RECOGNITION_BACKBONE_MODEL_FILE_PATH), &backboneModelFilePath);
        if (ret != MEDIA_VISION_ERROR_NONE)
                throw InvalidParameter("Failed to get an attribute");
 
@@ -56,16 +48,14 @@ void FaceRecognitionAdapter<T, V>::configure()
 
        string defaultPath;
 
-       ret = _config->getStringAttribute(string(MV_FACE_RECOGNITION_DEFAULT_PATH),
-                                                                               &defaultPath);
+       ret = _config->getStringAttribute(string(MV_FACE_RECOGNITION_DEFAULT_PATH), &defaultPath);
        if (ret != MEDIA_VISION_ERROR_NONE)
                throw InvalidOperation("Fail to get default path.");
 
        LOGD("Default path : %s", defaultPath.c_str());
 
        double decisionThreshold = 0.0f;
-       ret = _config->getDoubleAttribute(string(MV_FACE_RECOGNITION_DECISION_THRESHOLD),
-                                                                               &decisionThreshold);
+       ret = _config->getDoubleAttribute(string(MV_FACE_RECOGNITION_DECISION_THRESHOLD), &decisionThreshold);
        if (ret != MEDIA_VISION_ERROR_NONE)
                throw InvalidOperation("Fail to get default decision threshold value.");
 
@@ -86,25 +76,22 @@ void FaceRecognitionAdapter<T, V>::configure()
        _face_recognition->SetConfig(config);
 }
 
-template <typename T, typename V>
-void FaceRecognitionAdapter<T, V>::prepare()
+template<typename T, typename V> void FaceRecognitionAdapter<T, V>::prepare()
 {
        int ret = _face_recognition->Initialize();
        if (ret != MEDIA_VISION_ERROR_NONE)
                throw InvalidOperation("Fail to initialize face recognition.");
 }
 
-template <typename T, typename V>
-void FaceRecognitionAdapter<T, V>::setInput(T& t)
+template<typename T, typename V> void FaceRecognitionAdapter<T, V>::setInput(T &t)
 {
        _source = t;
 }
 
-template <typename T, typename V>
-void FaceRecognitionAdapter<T, V>::perform()
+template<typename T, typename V> void FaceRecognitionAdapter<T, V>::perform()
 {
        if (_source.mode == mode::REGISTER) {
-               for (autos : _source.register_src) {
+               for (auto &s : _source.register_src) {
                        int ret = _face_recognition->RegisterNewFace(s.first, s.second);
                        if (ret != MEDIA_VISION_ERROR_NONE)
                                throw InvalidOperation("Fail to register new face.");
@@ -118,14 +105,14 @@ void FaceRecognitionAdapter<T, V>::perform()
                if (ret == MEDIA_VISION_ERROR_NO_DATA)
                        throw NoData("Label not found.");
 
-               if  (ret != MEDIA_VISION_ERROR_NONE)
+               if (ret != MEDIA_VISION_ERROR_NONE)
                        throw InvalidOperation("Fail to request a recognition.");
 
                return;
        }
 
        if (_source.mode == mode::DELETE) {
-               for (autol : _source.labels) {
+               for (auto &l : _source.labels) {
                        int ret = _face_recognition->DeleteLabel(l);
                        if (ret != MEDIA_VISION_ERROR_NONE)
                                throw InvalidOperation("Fail to unregister a given label.");
@@ -135,8 +122,7 @@ void FaceRecognitionAdapter<T, V>::perform()
        }
 }
 
-template <typename T, typename V>
-V& FaceRecognitionAdapter<T, V>::getOutput()
+template<typename T, typename V> V &FaceRecognitionAdapter<T, V>::getOutput()
 {
        return _face_recognition->GetResult();
 }
index 49ca8b4..1a7cd6a 100644 (file)
@@ -20,8 +20,7 @@
 
 int mv_face_recognition_create(mv_face_recognition_h *out_handle)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-               _mv_inference_face_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
 
        MEDIA_VISION_NULL_ARG_CHECK(out_handle);
 
@@ -38,8 +37,7 @@ int mv_face_recognition_create(mv_face_recognition_h *out_handle)
 
 int mv_face_recognition_destroy(mv_face_recognition_h handle)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-               _mv_inference_face_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
 
        MEDIA_VISION_INSTANCE_CHECK(handle);
 
@@ -56,8 +54,7 @@ int mv_face_recognition_destroy(mv_face_recognition_h handle)
 
 int mv_face_recognition_prepare(mv_face_recognition_h handle)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-               _mv_inference_face_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
 
        MEDIA_VISION_INSTANCE_CHECK(handle);
 
@@ -74,8 +71,7 @@ int mv_face_recognition_prepare(mv_face_recognition_h handle)
 
 int mv_face_recognition_register(mv_face_recognition_h handle, mv_source_h source, const char *label)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-               _mv_inference_face_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
 
        MEDIA_VISION_INSTANCE_CHECK(handle);
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -94,8 +90,7 @@ int mv_face_recognition_register(mv_face_recognition_h handle, mv_source_h sourc
 
 int mv_face_recognition_unregister(mv_face_recognition_h handle, const char *label)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-               _mv_inference_face_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
 
        MEDIA_VISION_INSTANCE_CHECK(handle);
        MEDIA_VISION_INSTANCE_CHECK(label);
@@ -113,8 +108,7 @@ int mv_face_recognition_unregister(mv_face_recognition_h handle, const char *lab
 
 int mv_face_recognition_inference(mv_face_recognition_h handle, mv_source_h source)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-               _mv_inference_face_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
 
        MEDIA_VISION_INSTANCE_CHECK(handle);
        MEDIA_VISION_INSTANCE_CHECK(source);
@@ -132,8 +126,7 @@ int mv_face_recognition_inference(mv_face_recognition_h handle, mv_source_h sour
 
 int mv_face_recognition_get_label(mv_face_recognition_h handle, const char **out_label)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-               _mv_inference_face_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
 
        MEDIA_VISION_INSTANCE_CHECK(handle);
        MEDIA_VISION_INSTANCE_CHECK(out_label);
index 9ac8351..5d305f3 100644 (file)
@@ -37,14 +37,14 @@ int mv_face_recognition_create_open(mv_face_recognition_h *handle)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       Context *context = new (nothrow)Context();
+       Context *context = new (nothrow) Context();
        if (!context) {
                LOGE("Fail to allocate a context.");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
        }
 
-       FaceRecognitionTask *task =
-               new (nothrow)FaceRecognitionAdapter<mv_face_recognition_input_s, mv_face_recognition_result_s>();
+       FaceRecognitionTask *task = new (nothrow)
+                       FaceRecognitionAdapter<mv_face_recognition_input_s, mv_face_recognition_result_s>();
        if (!task) {
                delete context;
                LOGE("Fail to allocate a task.");
@@ -104,7 +104,7 @@ int mv_face_recognition_prepare_open(mv_face_recognition_h handle)
 
                task->configure();
                task->prepare();
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
        }
@@ -133,7 +133,7 @@ int mv_face_recognition_register_open(mv_face_recognition_h handle, mv_source_h
                input.register_src.insert(make_pair(source, string(label)));
                task->setInput(input);
                task->perform();
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
        }
@@ -162,7 +162,7 @@ int mv_face_recognition_unregister_open(mv_face_recognition_h handle, const char
                input.labels.push_back(string(label));
                task->setInput(input);
                task->perform();
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
        }
@@ -190,7 +190,7 @@ int mv_face_recognition_inference_open(mv_face_recognition_h handle, mv_source_h
                input.inference_src = source;
                task->setInput(input);
                task->perform();
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
        }
@@ -214,7 +214,7 @@ int mv_face_recognition_get_label_open(mv_face_recognition_h handle, const char
                auto task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
 
                *out_label = task->getOutput().label.c_str();
-       } catch (const BaseExceptione) {
+       } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
        }
index 2eedaa4..b732f68 100644 (file)
@@ -24,7 +24,7 @@
 using namespace std;
 using namespace mediavision::machine_learning::exception;
 
-void NNTrainerDSM::PrintHeader(FeaVecHeaderfvh)
+void NNTrainerDSM::PrintHeader(FeaVecHeader &fvh)
 {
        LOGD("signature = %u", fvh.signature);
        LOGD("feature vector size = %zu", fvh.feature_size);
@@ -32,11 +32,8 @@ void NNTrainerDSM::PrintHeader(FeaVecHeader& fvh)
        LOGD("data set count = %u", fvh.data_set_cnt);
 }
 
-NNTrainerDSM::NNTrainerDSM()
-       : DataSetManager()
-{
-
-}
+NNTrainerDSM::NNTrainerDSM() : DataSetManager()
+{}
 
 void NNTrainerDSM::LoadDataSet(const string file_name)
 {
@@ -51,7 +48,7 @@ void NNTrainerDSM::LoadDataSet(const string file_name)
 
        FeaVecHeader fvh;
 
-       inFile.read((char *)&fvh, sizeof(FeaVecHeader));
+       inFile.read((char *) &fvh, sizeof(FeaVecHeader));
        if (inFile.gcount() != sizeof(FeaVecHeader))
                throw InvalidOperation("Invalid feature vector file.");
 
@@ -62,8 +59,7 @@ void NNTrainerDSM::LoadDataSet(const string file_name)
        if (FeatureVectorManager::feature_vector_signature != fvh.signature)
                throw InvalidOperation("Wrong feature vector header.");
 
-       size_t line_size_in_bytes = fvh.feature_size * sizeof(float) +
-                                                               fvh.one_hot_table_size * sizeof(float);
+       size_t line_size_in_bytes = fvh.feature_size * sizeof(float) + fvh.one_hot_table_size * sizeof(float);
 
        _feature_vector_size = fvh.feature_size;
        _label_size = fvh.one_hot_table_size;
@@ -72,7 +68,7 @@ void NNTrainerDSM::LoadDataSet(const string file_name)
        vector<float> line_data(fvh.feature_size + fvh.one_hot_table_size);
 
        for (size_t idx = 0; idx < fvh.data_set_cnt; ++idx) {
-               inFile.read((char *)line_data.data(), line_size_in_bytes);
+               inFile.read((char *) line_data.data(), line_size_in_bytes);
 
                vector<float> data;
                copy_n(line_data.begin(), _feature_vector_size, back_inserter(data));
@@ -85,7 +81,7 @@ void NNTrainerDSM::LoadDataSet(const string file_name)
                        if (line_data[fvh.feature_size + num] == 1.0f)
                                label_idx = num;
 
-                       label.push_back((float)line_data[fvh.feature_size + num]);
+                       label.push_back((float) line_data[fvh.feature_size + num]);
                }
 
                _labels.push_back(label);
index b999e84..24f4ad2 100644 (file)
 using namespace std;
 using namespace mediavision::machine_learning::exception;
 
-NNTrainerFVM::NNTrainerFVM(const string feature_vector_file)
-       : FeatureVectorManager(feature_vector_file)
-{
-
-}
+NNTrainerFVM::NNTrainerFVM(const string feature_vector_file) : FeatureVectorManager(feature_vector_file)
+{}
 
-void NNTrainerFVM::WriteHeader(size_t feature_size, size_t one_hot_table_size, unsigned int  data_set_cnt)
+void NNTrainerFVM::WriteHeader(size_t feature_size, size_t one_hot_table_size, unsigned int data_set_cnt)
 {
-       ofstream outFile {
-               _feature_vector_file,
-               ios::out | ios::binary | ios::app
-       };
+       ofstream outFile { _feature_vector_file, ios::out | ios::binary | ios::app };
 
        if (!outFile.is_open())
                throw InvalidOperation("fail to open a file");
 
-       FeaVecHeader fvHeader {
-               FeatureVectorManager::feature_vector_signature,
-               feature_size,
-               one_hot_table_size,
-               data_set_cnt
-       };
+       FeaVecHeader fvHeader { FeatureVectorManager::feature_vector_signature, feature_size, one_hot_table_size,
+                                                       data_set_cnt };
 
-       outFile.write((char *)&fvHeader, sizeof(FeaVecHeader));
+       outFile.write((char *) &fvHeader, sizeof(FeaVecHeader));
 }
 
-void NNTrainerFVM::ReadHeader(FeaVecHeaderheader)
+void NNTrainerFVM::ReadHeader(FeaVecHeader &header)
 {
-       ifstream inFile {
-               _feature_vector_file,
-               ios::in | ios::binary
-       };
+       ifstream inFile { _feature_vector_file, ios::in | ios::binary };
 
        if (!inFile.is_open())
                throw InvalidOperation("fail to open a file.");
 
        inFile.seekg(static_cast<int>(sizeof(FeaVecHeader) * -1), ios::end);
 
-       inFile.read((char *)&header, sizeof(FeaVecHeader));
+       inFile.read((char *) &header, sizeof(FeaVecHeader));
        if (inFile.gcount() != sizeof(FeaVecHeader))
                throw InvalidOperation("Invalid feature vector file.");
 
@@ -68,12 +55,9 @@ void NNTrainerFVM::ReadHeader(FeaVecHeader& header)
                throw InvalidParameter("wrong feature vector file header.");
 }
 
-void NNTrainerFVM::WriteFeatureVec(vector<float>feature_vec, const int max_label, const int label_index)
+void NNTrainerFVM::WriteFeatureVec(vector<float> &feature_vec, const int max_label, const int label_index)
 {
-       ofstream outFile {
-               _feature_vector_file,
-               ios::out | ios::binary | ios::app
-       };
+       ofstream outFile { _feature_vector_file, ios::out | ios::binary | ios::app };
 
        if (!outFile.is_open())
                throw InvalidOperation("fail to open a file.");
@@ -82,7 +66,7 @@ void NNTrainerFVM::WriteFeatureVec(vector<float>& feature_vec, const int max_lab
 
        for (int idx = 0; idx < max_label; ++idx) {
                float oneHotTable = (label_index == idx) ? 1.0f : 0.0f;
-               outFile.write((char *)&oneHotTable, sizeof(float));
+               outFile.write((char *) &oneHotTable, sizeof(float));
        }
 }
 
index d460364..233bdbe 100644 (file)
@@ -33,10 +33,9 @@ using namespace std;
 using namespace TrainingEngineInterface::Common;
 using namespace mediavision::machine_learning::exception;
 
-SimpleShot::SimpleShot(const mv_inference_backend_type_e backend_type,
-                                          const mv_inference_target_device_e target_type,
-                                          const string internal_model_file) :
-                                               TrainingModel(backend_type, target_type, internal_model_file)
+SimpleShot::SimpleShot(const mv_inference_backend_type_e backend_type, const mv_inference_target_device_e target_type,
+                                          const string internal_model_file)
+               : TrainingModel(backend_type, target_type, internal_model_file)
 {
        _engine_info.backend_type = backend_type;
        // TODO. training engine interface has no target type attribute yet.
@@ -45,10 +44,7 @@ SimpleShot::SimpleShot(const mv_inference_backend_type_e backend_type,
        _engine_info.target_device = target_type;
 
        const inference_engine_tensor_info nntrainer_input_tensor_info = {
-               { 512, 1, 1, 1 },
-               INFERENCE_TENSOR_SHAPE_NCHW,
-               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-               (size_t)(512 * 1 * 1 * 1)
+               { 512, 1, 1, 1 }, INFERENCE_TENSOR_SHAPE_NCHW, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, (size_t)(512 * 1 * 1 * 1)
        };
 
        _engine_info.input_layer_names.push_back("preprocess_l2norm0");
@@ -57,12 +53,9 @@ SimpleShot::SimpleShot(const mv_inference_backend_type_e backend_type,
        // size of output tensor will be updated by RecognizeFace function
        // because the size should be changed according to maximum label count
        // so it has 1 in default.
-       inference_engine_tensor_info nntrainer_output_tensor_info = {
-               vector<size_t>{ 1, 1, 1, 1 },
-               INFERENCE_TENSOR_SHAPE_NCHW,
-               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-               1
-       };
+       inference_engine_tensor_info nntrainer_output_tensor_info = { vector<size_t> { 1, 1, 1, 1 },
+                                                                                                                                 INFERENCE_TENSOR_SHAPE_NCHW,
+                                                                                                                                 INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 1 };
 
        _engine_info.output_layer_names.push_back("centroid_knn1");
        _engine_info.output_tensor_info.push_back(nntrainer_output_tensor_info);
@@ -118,7 +111,7 @@ void SimpleShot::ConfigureModel(int num_of_class)
                throw InvalidOperation("Fail to add knn layer.");
 }
 
-TrainingEngineBackendInfoSimpleShot::GetTrainingEngineInfo()
+TrainingEngineBackendInfo &SimpleShot::GetTrainingEngineInfo()
 {
        return _engine_info;
 }
index d4d6c6a..b3eb829 100644 (file)
@@ -62,10 +62,8 @@ private:
        Number number;
 
        std::map<std::string, inference_box_type_e> supportedBoxTypes;
-       std::map<std::string, inference_box_coordinate_type_e>
-                       supportedBoxCoordinateTypes;
-       std::map<std::string, inference_box_decoding_type_e>
-                       supportedBoxDecodingTypes;
+       std::map<std::string, inference_box_coordinate_type_e> supportedBoxCoordinateTypes;
+       std::map<std::string, inference_box_decoding_type_e> supportedBoxDecodingTypes;
 
 public:
        BoxInfo()
@@ -78,22 +76,15 @@ public:
                        , decodingInfo()
 
        {
-               supportedBoxTypes.insert(
-                               { "ORIGIN_LEFTTOP", INFERENCE_BOX_TYPE_ORIGIN_LEFTTOP });
-               supportedBoxTypes.insert(
-                               { "ORIGIN_CENTER", INFERENCE_BOX_TYPE_ORIGIN_CENTER });
-
-               supportedBoxCoordinateTypes.insert(
-                               { "RATIO", INFERENCE_BOX_COORDINATE_TYPE_RATIO });
-               supportedBoxCoordinateTypes.insert(
-                               { "PIXEL", INFERENCE_BOX_COORDINATE_TYPE_PIXEL });
-
-               supportedBoxDecodingTypes.insert(
-                               { "BYPASS", INFERENCE_BOX_DECODING_TYPE_BYPASS });
-               supportedBoxDecodingTypes.insert(
-                               { "SSD_ANCHOR", INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR });
-               supportedBoxDecodingTypes.insert(
-                               { "YOLO_ANCHOR", INFERENCE_BOX_DECODING_TYPE_YOLO_ANCHOR });
+               supportedBoxTypes.insert({ "ORIGIN_LEFTTOP", INFERENCE_BOX_TYPE_ORIGIN_LEFTTOP });
+               supportedBoxTypes.insert({ "ORIGIN_CENTER", INFERENCE_BOX_TYPE_ORIGIN_CENTER });
+
+               supportedBoxCoordinateTypes.insert({ "RATIO", INFERENCE_BOX_COORDINATE_TYPE_RATIO });
+               supportedBoxCoordinateTypes.insert({ "PIXEL", INFERENCE_BOX_COORDINATE_TYPE_PIXEL });
+
+               supportedBoxDecodingTypes.insert({ "BYPASS", INFERENCE_BOX_DECODING_TYPE_BYPASS });
+               supportedBoxDecodingTypes.insert({ "SSD_ANCHOR", INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR });
+               supportedBoxDecodingTypes.insert({ "YOLO_ANCHOR", INFERENCE_BOX_DECODING_TYPE_YOLO_ANCHOR });
        }
 
        ~BoxInfo() = default;
index 6b9b853..150671e 100644 (file)
@@ -11,16 +11,17 @@ namespace mediavision
 {
 namespace inference
 {
-
-  /** Class created for storing motion data from bvh file */
-  class Bvh {
-  public:
-    /** Constructor of Bvh object
+/** Class created for storing motion data from bvh file */
+class Bvh
+{
+public:
+       /** Constructor of Bvh object
      *  @details  Initializes local variables
      */
-    Bvh() : num_frames_(0), frame_time_(0), num_channels_(0) {}
+       Bvh() : num_frames_(0), frame_time_(0), num_channels_(0)
+       {}
 
-    /**
+       /**
      * Recalculation of local transformation matrix for each frame in each joint
      *
      * Should be called to set local_transformation_matrix vectors in joints
@@ -30,79 +31,103 @@ namespace inference
      * matrix will be recalculated, as default it is NULL which will be resolved
      * to root_joint in method body
      */
-    void recalculate_joints_ltm(std::shared_ptr<Joint> start_joint = NULL);
+       void recalculate_joints_ltm(std::shared_ptr<Joint> start_joint = NULL);
 
-    /** Adds joint to Bvh object
+       /** Adds joint to Bvh object
      *  @details  Adds joint and increases number of data channels
      *  @param  joint  The joint that will be added
      */
-    void add_joint(const std::shared_ptr<Joint> joint) {
-      joints_.push_back(joint);
-      num_channels_ += joint->num_channels();
-    }
+       void add_joint(const std::shared_ptr<Joint> joint)
+       {
+               joints_.push_back(joint);
+               num_channels_ += joint->num_channels();
+       }
 
-    /** Gets the root joint
+       /** Gets the root joint
      *  @return  The root joint
      */
-    const std::shared_ptr<Joint> root_joint() const { return root_joint_; }
+       const std::shared_ptr<Joint> root_joint() const
+       {
+               return root_joint_;
+       }
 
-    /** Gets all joints
+       /** Gets all joints
      *  @return  The all joints
      */
-    const std::vector <std::shared_ptr <Joint>> joints() const {
-      return joints_;
-    }
+       const std::vector<std::shared_ptr<Joint> > joints() const
+       {
+               return joints_;
+       }
 
-    /** Gets the number of data frames
+       /** Gets the number of data frames
      *  @return  The number of frames
      */
-    unsigned num_frames() const { return num_frames_; }
+       unsigned num_frames() const
+       {
+               return num_frames_;
+       }
 
-    /** Gets the frame time
+       /** Gets the frame time
      *  @return  The single frame time (in second)
      */
-    double frame_time() const { return frame_time_; }
+       double frame_time() const
+       {
+               return frame_time_;
+       }
 
-    /** Gets the total number of channels
+       /** Gets the total number of channels
      *  @return  The number of data channels
      */
-    unsigned num_channels() const { return num_channels_; }
+       unsigned num_channels() const
+       {
+               return num_channels_;
+       }
 
-    /** Sets the root joint
+       /** Sets the root joint
      *  @param  arg  The root joint to be set
      */
-    void set_root_joint(const std::shared_ptr<Joint> arg) { root_joint_ = arg; }
+       void set_root_joint(const std::shared_ptr<Joint> arg)
+       {
+               root_joint_ = arg;
+       }
 
-    /** Sets the all joint at once
+       /** Sets the all joint at once
      *  @param  arg  The all joints to be set
      */
-    void set_joints(const std::vector <std::shared_ptr <Joint>> arg) {
-      joints_ = arg;
-    }
+       void set_joints(const std::vector<std::shared_ptr<Joint> > arg)
+       {
+               joints_ = arg;
+       }
 
-    /** Sets the number of data frames
+       /** Sets the number of data frames
      *  @param  arg  The number of frames to be set
      */
-    void set_num_frames(const unsigned arg) { num_frames_ = arg; }
+       void set_num_frames(const unsigned arg)
+       {
+               num_frames_ = arg;
+       }
 
-    /** Sets the single data frame time
+       /** Sets the single data frame time
      *  @param  arg  The time of frame to be set
      */
-    void set_frame_time(const double arg) { frame_time_ = arg; }
-
-  private:
-    /** A root joint in this bvh file */
-    std::shared_ptr<Joint> root_joint_;
-    /** All joints in file in order of parse */
-    std::vector <std::shared_ptr <Joint>> joints_;
-    /** A number of motion frames in this bvh file */
-    unsigned num_frames_;
-    /** A time of single frame */
-    double frame_time_;
-    /** Number of channels of all joints */
-    unsigned num_channels_;
-  };
+       void set_frame_time(const double arg)
+       {
+               frame_time_ = arg;
+       }
+
+private:
+       /** A root joint in this bvh file */
+       std::shared_ptr<Joint> root_joint_;
+       /** All joints in file in order of parse */
+       std::vector<std::shared_ptr<Joint> > joints_;
+       /** A number of motion frames in this bvh file */
+       unsigned num_frames_;
+       /** A time of single frame */
+       double frame_time_;
+       /** Number of channels of all joints */
+       unsigned num_channels_;
+};
 
 } // namespace
 }
-#endif  // __MEDIA_VISION_BVH_H__
+#endif // __MEDIA_VISION_BVH_H__
index c96bedb..5a89fec 100644 (file)
@@ -13,66 +13,65 @@ namespace mediavision
 {
 namespace inference
 {
+/** Bvh Parser class that is responsible for parsing .bvh file */
+class BvhParser
+{
+public:
+       BvhParser() : bvh_(NULL) {};
+       ~BvhParser() = default;
 
-    /** Bvh Parser class that is responsible for parsing .bvh file */
-    class BvhParser {
-    public:
-    BvhParser() : bvh_(NULL) {};
-    ~BvhParser() = default;
-
-    /** Parses single bvh file and stored data into bvh structure
+       /** Parses single bvh file and stored data into bvh structure
      *  @param  path  The path to file to be parsed
      *  @param  bvh   The pointer to bvh object where parsed data will be stored
      *  @return  0 if success, -1 otherwise
      */
-    int parse(const std::string& path, Bvh* bvh);
+       int parse(const std::string &path, Bvh *bvh);
 
-    private:
-    /** Parses single hierarchy in bvh file
+private:
+       /** Parses single hierarchy in bvh file
      *  @param  file  The input stream that is needed for reading file content
      *  @return  0 if success, -1 otherwise
      */
-    int parse_hierarchy(std::ifstream& file);
+       int parse_hierarchy(std::ifstream &file);
 
-    /** Parses joint and its children in bvh file
+       /** Parses joint and its children in bvh file
      *  @param  file    The input stream that is needed for reading file content
      *  @param  parent  The pointer to parent joint
      *  @param  parsed  The output parameter, here will be stored parsed joint
      *  @return  0 if success, -1 otherwise
      */
-    int parse_joint(std::ifstream& file, std::shared_ptr <Joint> parent,
-        std::shared_ptr <Joint>& parsed);
+       int parse_joint(std::ifstream &file, std::shared_ptr<Joint> parent, std::shared_ptr<Joint> &parsed);
 
-    /** Parses order of channel for single joint
+       /** Parses order of channel for single joint
      *  @param  file    The input stream that is needed for reading file content
      *  @param  joint   The pointer to joint that channels order will be parsed
      *  @return  0 if success, -1 otherwise
      */
-    int parse_channel_order(std::ifstream& file, std::shared_ptr <Joint> joint);
+       int parse_channel_order(std::ifstream &file, std::shared_ptr<Joint> joint);
 
-    /** Parses motion part data
+       /** Parses motion part data
      *  @param  file    The input stream that is needed for reading file content
      *  @return  0 if success, -1 otherwise
      */
-    int parse_motion(std::ifstream& file);
+       int parse_motion(std::ifstream &file);
 
-    /** Trims the string, removes leading and trailing whitespace from it
+       /** Trims the string, removes leading and trailing whitespace from it
      *  @param  s   The string, which leading and trailing whitespace will be
      *              trimmed
      */
-    inline void trim(std::string &s) {
-        s.erase( std::remove_if( s.begin(), s.end(),
-            std::bind( std::isspace<char>, std::placeholders::_1,
-            std::locale::classic() ) ), s.end() );
-    }
-
+       inline void trim(std::string &s)
+       {
+               s.erase(std::remove_if(s.begin(), s.end(),
+                                                          std::bind(std::isspace<char>, std::placeholders::_1, std::locale::classic())),
+                               s.end());
+       }
 
-    /** The path to file that was parsed previously */
-    std::string path_;
+       /** The path to file that was parsed previously */
+       std::string path_;
 
-    /** The bvh object to store parsed data */
-    Bvh* bvh_;
-    };
+       /** The bvh object to store parsed data */
+       Bvh *bvh_;
+};
 }
 } // namespace
-#endif  // __MEDIA_VISION_BVH_PARSER_H__
+#endif // __MEDIA_VISION_BVH_PARSER_H__
index 9d1a131..ce07273 100644 (file)
@@ -12,12 +12,12 @@ namespace mediavision
 {
 namespace inference
 {
-
 /** Enumeration class for axis */
-enum class Axis {
-  X,
-  Y,
-  Z
+enum class Axis
+{
+       X,
+       Y,
+       Z
 };
 
 /** Creates rotation matrix
index dba3647..83c53db 100644 (file)
@@ -33,144 +33,151 @@ namespace inference
 {
 namespace box
 {
-       struct AnchorParam {
-               int mode; /**< 0: generate anchor, 1:load pre-anchor*/
-               int numLayers;
-               float minScale;
-               float maxScale;
-               int inputSizeHeight;
-               int inputSizeWidth;
-               float anchorOffsetX;
-               float anchorOffsetY;
-               std::vector<int> strides;
-               std::vector<float> aspectRatios;
-               bool isReduceBoxedInLowestLayer;
-               float interpolatedScaleAspectRatio;
-               bool isFixedAnchorSize;
-               bool isExponentialBoxScale;
-               float xScale;
-               float yScale;
-               float wScale;
-               float hScale;
-       };
-
-       struct CellParam {
-               int numScales;
-               std::vector<int> scales;
-               int offsetScales;
-               inference_score_type_e type;
-               std::map<std::string, inference_score_type_e> supportedCellType;
-       };
-
-       struct NMSParam {
-               inference_box_nms_type_e mode; /**< 0: standard */
-               float iouThreshold;
-               std::map<std::string, inference_box_nms_type_e> supportedBoxNmsTypes;
-       };
-
-       struct RotateParam {
-               int startPointIndex;
-               int endPointIndex;
-               cv::Point2f startPoint;
-               cv::Point2f endPoint;
-               float baseAngle;
-       };
-
-       struct RoiOptionParam {
-               int startPointIndex;
-               int endPointIndex;
-               int centerPointIndex;
-               cv::Point2f centerPoint;
-               float shiftX;
-               float shiftY;
-               float scaleX;
-               float scaleY;
-               int mode;
-       };
-
-       class DecodeInfo {
-       private:
-               AnchorParam anchorParam;
-               std::vector<cv::Rect2f> anchorBoxes;
-               CellParam cellParam;
-               NMSParam nmsParam;
-               RotateParam rotParam;
-               RoiOptionParam roiOptParam;
-
-       public:
-               DecodeInfo() {
-                       cellParam.type = INFERENCE_SCORE_TYPE_NORMAL;
-                       cellParam.supportedCellType.insert({"NORMAL", INFERENCE_SCORE_TYPE_NORMAL});
-                       cellParam.supportedCellType.insert({"SIGMOID", INFERENCE_SCORE_TYPE_SIGMOID});
-
-                       nmsParam.mode = INFERENCE_BOX_NMS_TYPE_NONE;
-                       nmsParam.iouThreshold = 0.2f;
-                       nmsParam.supportedBoxNmsTypes.insert({"STANDARD", INFERENCE_BOX_NMS_TYPE_STANDARD});
-
-                       rotParam.startPointIndex = -1;
-                       rotParam.endPointIndex = -1;
-                       rotParam.startPoint = cv::Point2f(0.f,0.f);
-                       rotParam.endPoint = cv::Point2f(0.f,0.f);
-                       rotParam.baseAngle = 0.f;
-
-                       roiOptParam.startPointIndex = -1;
-                       roiOptParam.endPointIndex = -1;
-                       roiOptParam.centerPointIndex = -1;
-                       roiOptParam.centerPoint = cv::Point2f(0.f, 0.f);
-                       roiOptParam.shiftX = 0.f;
-                       roiOptParam.shiftY = 0.f;
-                       roiOptParam.scaleX = 1.f;
-                       roiOptParam.scaleY = 1.f;
-                       roiOptParam.mode = -1;
-               }
-
-               ~DecodeInfo() = default;
-
-               std::vector<cv::Rect2f>& GetAnchorBoxAll();
-               bool IsAnchorBoxEmpty();
-               void AddAnchorBox(cv::Rect2f& ahcnor);
-               void ClearAnchorBox();
-
-               // Anchor param
-               int ParseAnchorParam(JsonObject *root);
-               int GenerateAnchor();
-               bool IsFixedAnchorSize();
-               bool IsExponentialBoxScale();
-               float GetAnchorXscale();
-               float GetAnchorYscale();
-               float GetAnchorWscale();
-               float GetAnchorHscale();
-               float CalculateScale(float min, float max, int index, int maxStride);
-
-               // Cell param
-               int ParseCellParam(JsonObject *root);
-               std::vector<int>& GetCellScalesAll();
-               int GetCellNumScales();
-               int GetCellOffsetScales();
-               inference_score_type_e GetCellType();
-
-               // Nms param
-               int ParseNms(JsonObject *root);
-               int GetNmsMode();
-               float GetNmsIouThreshold();
-
-               // Rotate param
-               int ParseRotate(JsonObject *root);
-               int GetRotStartPointIndex();
-               int GetRotEndPointIndex();
-               float GetBaseAngle();
-
-               // Roi option param
-               int ParseRoiOption(JsonObject *root);
-               int GetRoiMode();
-               int GetRoiCenterPointIndex();
-               int GetRoiStartPointIndex();
-               int GetRoiEndPointIndex();
-               float GetShiftX();
-               float GetShiftY();
-               float GetScaleX();
-               float GetScaleY();
-       };
+struct AnchorParam
+{
+       int mode; /**< 0: generate anchor, 1:load pre-anchor*/
+       int numLayers;
+       float minScale;
+       float maxScale;
+       int inputSizeHeight;
+       int inputSizeWidth;
+       float anchorOffsetX;
+       float anchorOffsetY;
+       std::vector<int> strides;
+       std::vector<float> aspectRatios;
+       bool isReduceBoxedInLowestLayer;
+       float interpolatedScaleAspectRatio;
+       bool isFixedAnchorSize;
+       bool isExponentialBoxScale;
+       float xScale;
+       float yScale;
+       float wScale;
+       float hScale;
+};
+
+struct CellParam
+{
+       int numScales;
+       std::vector<int> scales;
+       int offsetScales;
+       inference_score_type_e type;
+       std::map<std::string, inference_score_type_e> supportedCellType;
+};
+
+struct NMSParam
+{
+       inference_box_nms_type_e mode; /**< 0: standard */
+       float iouThreshold;
+       std::map<std::string, inference_box_nms_type_e> supportedBoxNmsTypes;
+};
+
+struct RotateParam
+{
+       int startPointIndex;
+       int endPointIndex;
+       cv::Point2f startPoint;
+       cv::Point2f endPoint;
+       float baseAngle;
+};
+
+struct RoiOptionParam
+{
+       int startPointIndex;
+       int endPointIndex;
+       int centerPointIndex;
+       cv::Point2f centerPoint;
+       float shiftX;
+       float shiftY;
+       float scaleX;
+       float scaleY;
+       int mode;
+};
+
+class DecodeInfo
+{
+private:
+       AnchorParam anchorParam;
+       std::vector<cv::Rect2f> anchorBoxes;
+       CellParam cellParam;
+       NMSParam nmsParam;
+       RotateParam rotParam;
+       RoiOptionParam roiOptParam;
+
+public:
+       DecodeInfo()
+       {
+               cellParam.type = INFERENCE_SCORE_TYPE_NORMAL;
+               cellParam.supportedCellType.insert({ "NORMAL", INFERENCE_SCORE_TYPE_NORMAL });
+               cellParam.supportedCellType.insert({ "SIGMOID", INFERENCE_SCORE_TYPE_SIGMOID });
+
+               nmsParam.mode = INFERENCE_BOX_NMS_TYPE_NONE;
+               nmsParam.iouThreshold = 0.2f;
+               nmsParam.supportedBoxNmsTypes.insert({ "STANDARD", INFERENCE_BOX_NMS_TYPE_STANDARD });
+
+               rotParam.startPointIndex = -1;
+               rotParam.endPointIndex = -1;
+               rotParam.startPoint = cv::Point2f(0.f, 0.f);
+               rotParam.endPoint = cv::Point2f(0.f, 0.f);
+               rotParam.baseAngle = 0.f;
+
+               roiOptParam.startPointIndex = -1;
+               roiOptParam.endPointIndex = -1;
+               roiOptParam.centerPointIndex = -1;
+               roiOptParam.centerPoint = cv::Point2f(0.f, 0.f);
+               roiOptParam.shiftX = 0.f;
+               roiOptParam.shiftY = 0.f;
+               roiOptParam.scaleX = 1.f;
+               roiOptParam.scaleY = 1.f;
+               roiOptParam.mode = -1;
+       }
+
+       ~DecodeInfo() = default;
+
+       std::vector<cv::Rect2f> &GetAnchorBoxAll();
+       bool IsAnchorBoxEmpty();
+       void AddAnchorBox(cv::Rect2f &ahcnor);
+       void ClearAnchorBox();
+
+       // Anchor param
+       int ParseAnchorParam(JsonObject *root);
+       int GenerateAnchor();
+       bool IsFixedAnchorSize();
+       bool IsExponentialBoxScale();
+       float GetAnchorXscale();
+       float GetAnchorYscale();
+       float GetAnchorWscale();
+       float GetAnchorHscale();
+       float CalculateScale(float min, float max, int index, int maxStride);
+
+       // Cell param
+       int ParseCellParam(JsonObject *root);
+       std::vector<int> &GetCellScalesAll();
+       int GetCellNumScales();
+       int GetCellOffsetScales();
+       inference_score_type_e GetCellType();
+
+       // Nms param
+       int ParseNms(JsonObject *root);
+       int GetNmsMode();
+       float GetNmsIouThreshold();
+
+       // Rotate param
+       int ParseRotate(JsonObject *root);
+       int GetRotStartPointIndex();
+       int GetRotEndPointIndex();
+       float GetBaseAngle();
+
+       // Roi option param
+       int ParseRoiOption(JsonObject *root);
+       int GetRoiMode();
+       int GetRoiCenterPointIndex();
+       int GetRoiStartPointIndex();
+       int GetRoiEndPointIndex();
+       float GetShiftX();
+       float GetShiftY();
+       float GetScaleX();
+       float GetScaleY();
+};
 } /* box */
 } /* Inference */
 } /* MediaVision */
index 64e0f05..3c0782a 100644 (file)
@@ -24,29 +24,29 @@ namespace mediavision
 {
 namespace inference
 {
-       class DimInfo
-       {
-       private:
-               std::vector<int> dims;
+class DimInfo
+{
+private:
+       std::vector<int> dims;
 
-       public:
-               std::vector<int> GetValidIndexAll() const
-               {
-                       LOGI("ENTER");
+public:
+       std::vector<int> GetValidIndexAll() const
+       {
+               LOGI("ENTER");
 
-                       LOGI("LEAVE");
-                       return dims;
-               }
+               LOGI("LEAVE");
+               return dims;
+       }
 
-               void SetValidIndex(int index)
-               {
-                       LOGI("ENTER");
+       void SetValidIndex(int index)
+       {
+               LOGI("ENTER");
 
-                       dims.push_back(index);
+               dims.push_back(index);
 
-                       LOGI("LEAVE");
-               }
-       };
+               LOGI("LEAVE");
+       }
+};
 } /* Inference */
 } /* MediaVision */
 
index f43dcf5..b9685bd 100644 (file)
@@ -30,63 +30,71 @@ namespace mediavision
 {
 namespace inference
 {
-       class DispVec
+class DispVec
+{
+private:
+       std::string name;
+       DimInfo dimInfo;
+       inference_displacement_type_e type;
+       int shapeType;
+       std::map<std::string, inference_displacement_type_e> supportedDispTypes;
+
+public:
+       DispVec() : name(), dimInfo(), type(INFERENCE_DISPLACEMENT_TYPE_FORWARD), shapeType(INFERENCE_TENSOR_SHAPE_NCHW)
        {
-       private:
-               std::string name;
-               DimInfo dimInfo;
-               inference_displacement_type_e type;
-               int shapeType;
-               std::map<std::string, inference_displacement_type_e> supportedDispTypes;
-
-       public:
-               DispVec() :
-                       name(),
-                       dimInfo(),
-                       type(INFERENCE_DISPLACEMENT_TYPE_FORWARD),
-                       shapeType(INFERENCE_TENSOR_SHAPE_NCHW)
-               {
-                       supportedDispTypes.insert({"FORWARD", INFERENCE_DISPLACEMENT_TYPE_FORWARD});
-                       supportedDispTypes.insert({"BACKWARD", INFERENCE_DISPLACEMENT_TYPE_BACKWARD});
-               }
+               supportedDispTypes.insert({ "FORWARD", INFERENCE_DISPLACEMENT_TYPE_FORWARD });
+               supportedDispTypes.insert({ "BACKWARD", INFERENCE_DISPLACEMENT_TYPE_BACKWARD });
+       }
 
-               ~DispVec() = default;
+       ~DispVec() = default;
 
-               std::string GetName() { return name; }
-               DimInfo GetDimInfo() { return dimInfo; }
-               inference_displacement_type_e GetType() { return type; }
-               int GetShapeType() { return shapeType; }
-
-               int ParseDisplacement(JsonObject *root, const std::map<std::string, inference_tensor_shape_type_e>& supportedShapeType)
-               {
-                       LOGI("ENTER");
+       std::string GetName()
+       {
+               return name;
+       }
+       DimInfo GetDimInfo()
+       {
+               return dimInfo;
+       }
+       inference_displacement_type_e GetType()
+       {
+               return type;
+       }
+       int GetShapeType()
+       {
+               return shapeType;
+       }
 
-                       name = static_cast<const char*>(json_object_get_string_member(root,"name"));
-                       LOGI("layer: %s", name.c_str());
+       int ParseDisplacement(JsonObject *root,
+                                                 const std::map<std::string, inference_tensor_shape_type_e> &supportedShapeType)
+       {
+               LOGI("ENTER");
 
-                       JsonArray * array = json_object_get_array_member(root, "index");
-                       unsigned int elements2 = json_array_get_length(array);
+               name = static_cast<const char *>(json_object_get_string_member(root, "name"));
+               LOGI("layer: %s", name.c_str());
 
-                       LOGI("range dim: size[%u]", elements2);
+               JsonArray *array = json_object_get_array_member(root, "index");
+               unsigned int elements2 = json_array_get_length(array);
 
-                       for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
-                               if(static_cast<int>(json_array_get_int_element(array, elem2)) == 1)
-                                       dimInfo.SetValidIndex(elem2);
-                       }
+               LOGI("range dim: size[%u]", elements2);
 
-                       try {
-                               shapeType = GetSupportedType(root, "shape_type", supportedShapeType);
-                               type = GetSupportedType(root, "type", supportedDispTypes);
-                       } catch (const std::exception& e) {
-                               LOGE("Invalid %s", e.what());
-                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                       }
+               for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
+                       if (static_cast<int>(json_array_get_int_element(array, elem2)) == 1)
+                               dimInfo.SetValidIndex(elem2);
+               }
 
-                       LOGI("LEAVE");
-                       return MEDIA_VISION_ERROR_NONE;
+               try {
+                       shapeType = GetSupportedType(root, "shape_type", supportedShapeType);
+                       type = GetSupportedType(root, "type", supportedDispTypes);
+               } catch (const std::exception &e) {
+                       LOGE("Invalid %s", e.what());
+                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
                }
 
-       };
+               LOGI("LEAVE");
+               return MEDIA_VISION_ERROR_NONE;
+       }
+};
 } /* Inference */
 } /* MediaVision */
 
index 80c0216..338bee1 100644 (file)
@@ -25,43 +25,46 @@ namespace mediavision
 {
 namespace inference
 {
-       class Edge
-       {
-       private:
-               std::vector<std::pair<int, int>> edges;
-
-       public:
-               Edge() = default;
+class Edge
+{
+private:
+       std::vector<std::pair<int, int> > edges;
 
-               ~Edge() = default;
+public:
+       Edge() = default;
 
-               int ParseEdge(JsonObject *root)
-               {
-                       LOGI("ENTER");
+       ~Edge() = default;
 
-                       JsonArray * rootArray = json_object_get_array_member(root, "edgemap");
-                       unsigned int elements = json_array_get_length(rootArray);
+       int ParseEdge(JsonObject *root)
+       {
+               LOGI("ENTER");
 
-                       // TODO: handling error
-                       int pEdgeNode, cEdgeNode;
+               JsonArray *rootArray = json_object_get_array_member(root, "edgemap");
+               unsigned int elements = json_array_get_length(rootArray);
 
-                       for (unsigned int elem = 0; elem < elements; ++elem) {
-                               JsonNode *pNode = json_array_get_element(rootArray, elem);
-                               JsonObject *pObject = json_node_get_object(pNode);
+               // TODO: handling error
+               int pEdgeNode, cEdgeNode;
 
-                               pEdgeNode = json_object_get_int_member(pObject, "parent");
-                               cEdgeNode = json_object_get_int_member(pObject, "child");
+               for (unsigned int elem = 0; elem < elements; ++elem) {
+                       JsonNode *pNode = json_array_get_element(rootArray, elem);
+                       JsonObject *pObject = json_node_get_object(pNode);
 
-                               edges.push_back(std::make_pair(pEdgeNode, cEdgeNode));
-                               LOGI("%ud: parent - child: %d - %d", elem, pEdgeNode, cEdgeNode);
-                       }
+                       pEdgeNode = json_object_get_int_member(pObject, "parent");
+                       cEdgeNode = json_object_get_int_member(pObject, "child");
 
-                       LOGI("LEAVE");
-                       return MEDIA_VISION_ERROR_NONE;
+                       edges.push_back(std::make_pair(pEdgeNode, cEdgeNode));
+                       LOGI("%ud: parent - child: %d - %d", elem, pEdgeNode, cEdgeNode);
                }
 
-               std::vector<std::pair<int, int>>& GetEdgesAll() { return edges; }
-       };
+               LOGI("LEAVE");
+               return MEDIA_VISION_ERROR_NONE;
+       }
+
+       std::vector<std::pair<int, int> > &GetEdgesAll()
+       {
+               return edges;
+       }
+};
 } /* Inference */
 } /* MediaVision */
 
index 84a63d2..76c7cb9 100644 (file)
  */
 using namespace InferenceEngineInterface::Common;
 
-typedef struct _ImageClassficationResults {
+typedef struct _ImageClassficationResults
+{
        int number_of_classes;
        std::vector<int> indices;
        std::vector<std::string> names;
        std::vector<float> confidences;
 } ImageClassificationResults; /**< structure ImageClassificationResults */
 
-typedef struct _ObjectDetectionResults {
+typedef struct _ObjectDetectionResults
+{
        int number_of_objects;
        std::vector<int> indices;
        std::vector<std::string> names;
@@ -59,18 +61,21 @@ typedef struct _ObjectDetectionResults {
        std::vector<cv::Rect> locations;
 } ObjectDetectionResults; /**< structure ObjectDetectionResults */
 
-typedef struct _FaceDetectionResults {
+typedef struct _FaceDetectionResults
+{
        int number_of_faces;
        std::vector<float> confidences;
        std::vector<cv::Rect> locations;
 } FaceDetectionResults; /**< structure FaceDetectionResults */
 
-typedef struct _FacialLandMarkDetectionResults {
+typedef struct _FacialLandMarkDetectionResults
+{
        int number_of_landmarks;
        std::vector<cv::Point> locations;
 } FacialLandMarkDetectionResults; /**< structure FacialLandMarkDetectionResults */
 
-typedef struct _PoseLandmarkDetectionResults {
+typedef struct _PoseLandmarkDetectionResults
+{
        int number_of_landmarks;
        std::vector<cv::Point2f> locations;
        std::vector<float> score;
@@ -80,122 +85,121 @@ namespace mediavision
 {
 namespace inference
 {
-       struct TensorInfo {
-               int width;
-               int height;
-               int dim;
-               int ch;
-       };
-
-       struct InferenceConfig {
-               /**
+struct TensorInfo
+{
+       int width;
+       int height;
+       int dim;
+       int ch;
+};
+
+struct InferenceConfig
+{
+       /**
                 * @brief Default constructor for the @ref InferenceConfig
                 *
                 * @since_tizen 5.0
                 */
-               InferenceConfig();
+       InferenceConfig();
 
-               std::string mConfigFilePath; /**< Path of a model configuration file */
+       std::string mConfigFilePath; /**< Path of a model configuration file */
 
-               std::string mWeightFilePath; /**< Path of a model weight file */
+       std::string mWeightFilePath; /**< Path of a model weight file */
 
-               std::string mUserFilePath; /**< Path of model user file */
+       std::string mUserFilePath; /**< Path of model user file */
 
-               TensorInfo mTensorInfo; /**< Tensor information */
+       TensorInfo mTensorInfo; /**< Tensor information */
 
-               mv_inference_data_type_e mDataType; /**< Data type of a input tensor */
+       mv_inference_data_type_e mDataType; /**< Data type of a input tensor */
 
-               int mTargetTypes; /**< Target type to run inference */
+       int mTargetTypes; /**< Target type to run inference */
 
-               double mConfidenceThresHold; /**< Confidence threshold value */
+       double mConfidenceThresHold; /**< Confidence threshold value */
 
-               double mMeanValue; /**< The mean value for normalization */
+       double mMeanValue; /**< The mean value for normalization */
 
-               double mStdValue; /**< The scale factor value for normalization */
+       double mStdValue; /**< The scale factor value for normalization */
 
-               int mMaxOutputNumbers;
+       int mMaxOutputNumbers;
 
-               std::vector<std::string> mInputLayerNames; /**< The input layer names */
-               std::vector<std::string> mOutputLayerNames; /**< The output layer names */
-       };
+       std::vector<std::string> mInputLayerNames; /**< The input layer names */
+       std::vector<std::string> mOutputLayerNames; /**< The output layer names */
+};
 
-       class Inference
-       {
-       public:
-               /**
+class Inference
+{
+public:
+       /**
                 * @brief   Creates an Inference class instance.
                 *
                 * @since_tizen 5.5
                 */
-               Inference();
+       Inference();
 
-               /**
+       /**
                 * @brief   Destroys an Inference class instance including
                 *           its all resources.
                 *
                 * @since_tizen 5.5
                 */
-               ~Inference();
+       ~Inference();
 
-               /**
+       /**
                 * @brief   Configure modelfiles
                 *
                 * @since_tizen 5.5
                 */
-               void ConfigureModelFiles(const std::string modelConfigFilePath,
-                                                                const std::string modelWeightFilePath,
-                                                                const std::string modelUserFilePath);
+       void ConfigureModelFiles(const std::string modelConfigFilePath, const std::string modelWeightFilePath,
+                                                        const std::string modelUserFilePath);
 
-               /**
+       /**
                 * @brief Configure input information
                 *
                 * @since_tizen 6.0
                 */
-               void ConfigureInputInfo(int width, int height, int dim, int ch,
-                                                               double stdValue, double meanValue, int dataType,
-                                                               const std::vector<std::string> names);
+       void ConfigureInputInfo(int width, int height, int dim, int ch, double stdValue, double meanValue, int dataType,
+                                                       const std::vector<std::string> names);
 
-               void ConfigureOutputInfo(std::vector<std::string> names,
-                                                                std::vector<inference_engine_tensor_info>& tensors_info);
+       void ConfigureOutputInfo(std::vector<std::string> names, std::vector<inference_engine_tensor_info> &tensors_info);
 
-               /**
+       /**
                 * @brief   Configure a inference target device type such as CPU, GPU or NPU. (only one type can be set)
                 * @details Internally, a given device type will be converted to new type.
                 *                      This API is just used for backward compatibility.
                 *
                 * @since_tizen 6.0 (Deprecated)
                 */
-               int ConfigureTargetTypes(int targetType, bool isNewVersion);
+       int ConfigureTargetTypes(int targetType, bool isNewVersion);
 
-               /**
+       /**
                 * @brief   Configure inference target devices such as CPU, GPU or NPU. (one more types can be combined)
                 *
                 * @since_tizen 6.0
                 */
-               int ConfigureTargetDevices(const int targetDevices);
+       int ConfigureTargetDevices(const int targetDevices);
 
-               /**
+       /**
                 * @brief   Configure the maximum number of inference results
                 *
                 * @since_tizen 5.5
                 */
-               void ConfigureOutput(const int maxOutputNumbers);
+       void ConfigureOutput(const int maxOutputNumbers);
 
-               /**
+       /**
                 * @brief   Configure the confidence threshold
                 *
                 * @since_tizen 5.5
                 */
-               void ConfigureThreshold(const double threshold);
+       void ConfigureThreshold(const double threshold);
 
-               /**
+       /**
                 * @brief   Parses the metadata file path
                 *
                 * @since_tizen 6.5
                 */
-               int ParseMetadata(const std::string filePath);
+       int ParseMetadata(const std::string filePath);
 
-               /**
+       /**
                 * @brief   Bind a backend engine
                 * @details Use this function to bind a backend engine for the inference.
                 *                      This creates a inference engine common class object, and loads a backend
@@ -212,9 +216,9 @@ namespace inference
                 * @retval #MEDIA_VISION_ERROR_NONE Successful
                 * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
                 */
-               int Bind(int backend_type, int device_type);
+       int Bind(int backend_type, int device_type);
 
-               /**
+       /**
                 * @brief   Load model files
                 * @details Use this function to load given model files for the inference.
                 *
@@ -226,9 +230,9 @@ namespace inference
                 * @retval #MEDIA_VISION_ERROR_NONE Successful
                 * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
                 */
-               int Load();
+       int Load();
 
-               /**
+       /**
                 * @brief       Runs inference with a region of a given image
                 * @details Use this function to run forward pass with the given image.
                 *          The given image is preprocessed and the region of the image is
@@ -238,10 +242,9 @@ namespace inference
                 * @since_tizen 5.5
                 * @return @c true on success, otherwise a negative error value
                 */
-               int Run(std::vector<mv_source_h> &mvSources,
-                               std::vector<mv_rectangle_s> &rects);
+       int Run(std::vector<mv_source_h> &mvSources, std::vector<mv_rectangle_s> &rects);
 
-               /**
+       /**
                 * @brief       Runs inference with a region of a given image
                 * @details Use this function to run forward pass with the given image.
                 *          The given image is preprocessed and the region of the image is
@@ -251,113 +254,111 @@ namespace inference
                 * @since_tizen 7.0
                 * @return @c true on success, otherwise a negative error value
                 */
-               int Run(std::vector<void *>& buffer_objs);
+       int Run(std::vector<void *> &buffer_objs);
 
-               /**
+       /**
                 * @brief       Gets that given engine is supported or not
                 *
                 * @since_tizen 5.5
                 * @return @c true on success, otherwise a negative error value
                 */
-               std::pair<std::string, bool> GetSupportedInferenceBackend(int backend);
+       std::pair<std::string, bool> GetSupportedInferenceBackend(int backend);
 
-               /**
+       /**
                 * @brief       Gets the ImageClassificationResults
                 *
                 * @since_tizen 5.5
                 * @return @c true on success, otherwise a negative error value
                 */
-               int GetClassficationResults(ImageClassificationResults *results);
+       int GetClassficationResults(ImageClassificationResults *results);
 
-               /**
+       /**
                 * @brief       Gets the ObjectDetectioResults
                 *
                 * @since_tizen 5.5
                 * @return @c true on success, otherwise a negative error value
                 */
-               int GetObjectDetectionResults(ObjectDetectionResults *results);
+       int GetObjectDetectionResults(ObjectDetectionResults *results);
 
-               /**
+       /**
                 * @brief       Gets the FaceDetectioResults
                 *
                 * @since_tizen 5.5
                 * @return @c true on success, otherwise a negative error value
                 */
-               int GetFaceDetectionResults(FaceDetectionResults *results);
+       int GetFaceDetectionResults(FaceDetectionResults *results);
 
-               /**
+       /**
                 * @brief       Gets the FacialLandmarkDetectionResults
                 *
                 * @since_tizen 5.5
                 * @return @c true on success, otherwise a negative error value
                 */
-               int GetFacialLandMarkDetectionResults(FacialLandMarkDetectionResults *results);
+       int GetFacialLandMarkDetectionResults(FacialLandMarkDetectionResults *results);
 
-               /**
+       /**
                 * @brief       Gets the PoseLandmarkDetectionResults
                 *
                 * @since_tizen 6.0
                 * @return @c true on success, otherwise a negative error value
                 */
-               int GetPoseLandmarkDetectionResults(std::unique_ptr<mv_inference_pose_s> &detectionResults,
-                                                                               int width, int height);
-
-               mv_engine_config_h GetEngineConfig(void)
-               {
-                       return engine_config;
-               }
-
-               void SetEngineConfig(mv_engine_config_h config)
-               {
-                       engine_config = config;
-               }
-
-               int GetTargetType()
-               {
-                       return mConfig.mTargetTypes;
-               }
-
-               TensorBuffer& GetOutputTensorBuffer()
-               {
-                       return mOutputTensorBuffers;
-               }
-
-       private:
-               bool mCanRun; /**< The flag indicating ready to run Inference */
-               InferenceConfig mConfig;
-               inference_engine_capacity mBackendCapacity;
-               std::map<int, std::pair<std::string, bool> > mSupportedInferenceBackend;
-               cv::Size mInputSize;
-               cv::Size mSourceSize;
-               mv_engine_config_h engine_config;
-               InferenceEngineCommon *mBackend;
-               std::map<std::string, int> mModelFormats;
-               std::vector<std::string> mUserListName;
-               //std::map<std::string, inference_engine_tensor_buffer> mInputTensorBuffers;
-               TensorBuffer mInputTensorBuffers;
-               inference_engine_layer_property mInputLayerProperty;
-               //std::map<std::string, inference_engine_tensor_buffer> mOutputTensorBuffers;
-               TensorBuffer mOutputTensorBuffers;
-               inference_engine_layer_property mOutputLayerProperty;
-
-               Metadata mMetadata;
-               PreProcess mPreProc;
-
-       private:
-               void CheckSupportedInferenceBackend();
-               int CheckBackendType(const mv_inference_backend_type_e backendType);
-               bool IsTargetDeviceSupported(const int targetDevices);
-               int ConvertEngineErrorToVisionError(int error);
-               int ConvertTargetTypes(int given_types);
-               int ConvertToCv(int given_type);
-               int ConvertOutputDataTypeToFloat();
-               int Preprocess(std::vector<mv_source_h>& mv_sources, std::vector<cv::Mat>& cv_sources);
-               inference_tensor_data_type_e ConvertToIE(int given_type);
-               int PrepareTenosrBuffers(void);
-               void CleanupTensorBuffers(void);
-               int SetUserFile(std::string filename);
-
-       };
+       int GetPoseLandmarkDetectionResults(std::unique_ptr<mv_inference_pose_s> &detectionResults, int width, int height);
+
+       mv_engine_config_h GetEngineConfig(void)
+       {
+               return engine_config;
+       }
+
+       void SetEngineConfig(mv_engine_config_h config)
+       {
+               engine_config = config;
+       }
+
+       int GetTargetType()
+       {
+               return mConfig.mTargetTypes;
+       }
+
+       TensorBuffer &GetOutputTensorBuffer()
+       {
+               return mOutputTensorBuffers;
+       }
+
+private:
+       bool mCanRun; /**< The flag indicating ready to run Inference */
+       InferenceConfig mConfig;
+       inference_engine_capacity mBackendCapacity;
+       std::map<int, std::pair<std::string, bool> > mSupportedInferenceBackend;
+       cv::Size mInputSize;
+       cv::Size mSourceSize;
+       mv_engine_config_h engine_config;
+       InferenceEngineCommon *mBackend;
+       std::map<std::string, int> mModelFormats;
+       std::vector<std::string> mUserListName;
+       //std::map<std::string, inference_engine_tensor_buffer> mInputTensorBuffers;
+       TensorBuffer mInputTensorBuffers;
+       inference_engine_layer_property mInputLayerProperty;
+       //std::map<std::string, inference_engine_tensor_buffer> mOutputTensorBuffers;
+       TensorBuffer mOutputTensorBuffers;
+       inference_engine_layer_property mOutputLayerProperty;
+
+       Metadata mMetadata;
+       PreProcess mPreProc;
+
+private:
+       void CheckSupportedInferenceBackend();
+       int CheckBackendType(const mv_inference_backend_type_e backendType);
+       bool IsTargetDeviceSupported(const int targetDevices);
+       int ConvertEngineErrorToVisionError(int error);
+       int ConvertTargetTypes(int given_types);
+       int ConvertToCv(int given_type);
+       int ConvertOutputDataTypeToFloat();
+       int Preprocess(std::vector<mv_source_h> &mv_sources, std::vector<cv::Mat> &cv_sources);
+       inference_tensor_data_type_e ConvertToIE(int given_type);
+       int PrepareTenosrBuffers(void);
+       void CleanupTensorBuffers(void);
+       int SetUserFile(std::string filename);
+};
 
 } /* Inference */
 } /* MediaVision */
index 7a58614..3a06a61 100644 (file)
@@ -25,46 +25,46 @@ namespace mediavision
 {
 namespace inference
 {
-       class InferenceInI
-       {
-       public:
-               /**
+class InferenceInI
+{
+public:
+       /**
                 * @brief   Creates an Inference class instance.
                 *
                 * @since_tizen 5.5
                 */
-               InferenceInI();
+       InferenceInI();
 
-               /**
+       /**
                 * @brief   Destroys an Inference class instance including
                 *           its all resources.
                 *
                 * @since_tizen 5.5
                 */
-               ~InferenceInI();
+       ~InferenceInI();
 
-               /**
+       /**
                 * @brief   Load()
                 *
                 * @since_tizen 5.5
                 */
-               int LoadInI();
+       int LoadInI();
 
-               /**
+       /**
                 * @brief   Unload()
                 *
                 * @since_tizen 5.5
                 */
-               void UnLoadInI();
+       void UnLoadInI();
 
-               std::vector<int> GetSupportedInferenceEngines();
+       std::vector<int> GetSupportedInferenceEngines();
 
-       private:
-               std::vector<int> mSupportedInferenceBackend;
-               std::string mIniDefaultPath;
-               std::string mDefaultBackend;
-               std::string mDelimeter;
-       };
+private:
+       std::vector<int> mSupportedInferenceBackend;
+       std::string mIniDefaultPath;
+       std::string mDefaultBackend;
+       std::string mDelimeter;
+};
 
 } /* Inference */
 } /* MediaVision */
index 77a5c71..f1c762c 100644 (file)
@@ -35,97 +35,110 @@ namespace mediavision
 {
 namespace inference
 {
+struct Normalization
+{
+       bool use { false };
+       std::vector<double> mean;
+       std::vector<double> std;
+};
 
-       struct Normalization {
-               bool use { false };
-               std::vector<double> mean;
-               std::vector<double> std;
-       };
+struct Quantization
+{
+       bool use { false };
+       std::vector<double> scale;
+       std::vector<double> zeropoint;
+};
 
-       struct Quantization {
-               bool use { false };
-               std::vector<double> scale;
-               std::vector<double> zeropoint;
-       };
+struct Options
+{
+       Normalization normalization;
+       Quantization quantization;
+};
 
-       struct Options {
-               Normalization normalization;
-               Quantization  quantization;
-       };
+struct LayerInfo
+{
+       std::string name;
+       std::vector<int> dims;
+       mv_colorspace_e colorSpace {};
+       mv_inference_data_type_e dataType {};
+       inference_tensor_shape_type_e shapeType {}; // TODO: define mv_inference_shape_type_e
 
-       struct LayerInfo
+       int getWidth() const
        {
-               std::string name;
-               std::vector<int> dims;
-               mv_colorspace_e colorSpace {};
-               mv_inference_data_type_e dataType {};
-               inference_tensor_shape_type_e shapeType {}; // TODO: define mv_inference_shape_type_e
-
-               int getWidth() const {
-                       if (shapeType == INFERENCE_TENSOR_SHAPE_NCHW) {
-                               return dims[3];
-                       } else { // INFERENCE_TENSOR_SHAPE_NHWC
-                               return dims[2];
-                       }
+               if (shapeType == INFERENCE_TENSOR_SHAPE_NCHW) {
+                       return dims[3];
+               } else { // INFERENCE_TENSOR_SHAPE_NHWC
+                       return dims[2];
                }
+       }
 
-               int getHeight() const {
-                       if (shapeType == INFERENCE_TENSOR_SHAPE_NCHW) {
-                               return dims[2];
-                       } else { // INFERENCE_TENSOR_SHAPE_NHWC
-                               return dims[1];
-                       }
+       int getHeight() const
+       {
+               if (shapeType == INFERENCE_TENSOR_SHAPE_NCHW) {
+                       return dims[2];
+               } else { // INFERENCE_TENSOR_SHAPE_NHWC
+                       return dims[1];
                }
+       }
 
-               int getChannel() const {
-                       if (shapeType == INFERENCE_TENSOR_SHAPE_NCHW) {
-                               return dims[1];
-                       } else { // INFERENCE_TENSOR_SHAPE_NHWC
-                               return dims[3];
-                       }
+       int getChannel() const
+       {
+               if (shapeType == INFERENCE_TENSOR_SHAPE_NCHW) {
+                       return dims[1];
+               } else { // INFERENCE_TENSOR_SHAPE_NHWC
+                       return dims[3];
                }
-       };
+       }
+};
 
-       class InputMetadata
-       {
-       public:
-               /**
+class InputMetadata
+{
+public:
+       /**
                 * @brief   Creates an InputMetadata class instance.
                 *
                 * @since_tizen 6.5
                 */
-               InputMetadata();
+       InputMetadata();
 
-               /**
+       /**
                 * @brief   Destroys an InputMetadata class instance including
                 *          its all resources.
                 *
                 * @since_tizen 6.5
                 */
-               ~InputMetadata() = default;
+       ~InputMetadata() = default;
 
-               /**
+       /**
                 * @brief Parses an InputMetadata
                 *
                 * @since_tizen 6.5
                 */
-               int Parse(JsonObject *root);
-               bool IsParsed(void) { return parsed; }
-               std::map<std::string, LayerInfo>& GetLayer() { return layer; }
-               std::map<std::string, Options>& GetOption() { return option; }
-
-       private:
-               bool parsed;
-               std::map<std::string, inference_tensor_shape_type_e> mSupportedShapeType;
-               std::map<std::string, mv_inference_data_type_e> mSupportedDataType;
-               std::map<std::string, mv_colorspace_e> mSupportedColorSpace;
-               std::map<std::string, LayerInfo> layer;
-               std::map<std::string, Options> option;
-
-               int GetTensorInfo(JsonObject* root);
-               int GetPreProcess(JsonObject* root);
-
-       };
+       int Parse(JsonObject *root);
+       bool IsParsed(void)
+       {
+               return parsed;
+       }
+       std::map<std::string, LayerInfo> &GetLayer()
+       {
+               return layer;
+       }
+       std::map<std::string, Options> &GetOption()
+       {
+               return option;
+       }
+
+private:
+       bool parsed;
+       std::map<std::string, inference_tensor_shape_type_e> mSupportedShapeType;
+       std::map<std::string, mv_inference_data_type_e> mSupportedDataType;
+       std::map<std::string, mv_colorspace_e> mSupportedColorSpace;
+       std::map<std::string, LayerInfo> layer;
+       std::map<std::string, Options> option;
+
+       int GetTensorInfo(JsonObject *root);
+       int GetPreProcess(JsonObject *root);
+};
 
 } /* Inference */
 } /* MediaVision */
index 6f4512b..7d052c8 100644 (file)
@@ -10,222 +10,255 @@ namespace mediavision
 {
 namespace inference
 {
+/** Class created for storing single joint data from bvh file */
+class Joint
+{
+public:
+       /** A struct that keep offset of joint in relation to parent */
+       struct Offset
+       {
+               float x;
+               float y;
+               float z;
+       };
+
+       /** A enumeration type useful for set order of channels for every joint */
+       enum class Channel
+       {
+               XPOSITION,
+               YPOSITION,
+               ZPOSITION,
+               ZROTATION,
+               XROTATION,
+               YROTATION
+       };
+
+       /** A string names for each channel */
+       const std::vector<std::string> channel_name_str = { "XPOSITION", "YPOSITION", "ZPOSITION",
+                                                                                                               "ZROTATION", "XROTATION", "YROTATION" };
 
-  /** Class created for storing single joint data from bvh file */
-  class Joint {
-  public:
-    /** A struct that keep offset of joint in relation to parent */
-    struct Offset {
-      float x;
-      float y;
-      float z;
-    };
-
-    /** A enumeration type useful for set order of channels for every joint */
-    enum class Channel {
-      XPOSITION,
-      YPOSITION,
-      ZPOSITION,
-      ZROTATION,
-      XROTATION,
-      YROTATION
-    };
-
-    /** A string names for each channel */
-    const std::vector<std::string> channel_name_str = {
-      "XPOSITION",
-      "YPOSITION",
-      "ZPOSITION",
-      "ZROTATION",
-      "XROTATION",
-      "YROTATION"
-    };
-
-    /** Adds single frame motion data
+       /** Adds single frame motion data
      *  @param  data    The motion data to be added
      */
-    void add_frame_motion_data(const std::vector <float>& data) {
-      channel_data_.push_back(data);
-    }
+       void add_frame_motion_data(const std::vector<float> &data)
+       {
+               channel_data_.push_back(data);
+       }
 
-    /** Gets the parent joint of this joint
+       /** Gets the parent joint of this joint
      *  @return  The parent joint
      */
-    std::shared_ptr <Joint> parent() const { return parent_; }
+       std::shared_ptr<Joint> parent() const
+       {
+               return parent_;
+       }
 
-    /** Gets the name of this joint
+       /** Gets the name of this joint
      *  @return  The joint's name
      */
-    std::string name() const { return name_; }
+       std::string name() const
+       {
+               return name_;
+       }
 
-    /** Gets the offset of this joint
+       /** Gets the offset of this joint
      *  @return  The joint's offset
      */
-    Offset offset() const { return offset_; }
+       Offset offset() const
+       {
+               return offset_;
+       }
 
-    /** Gets the channels order of this joint
+       /** Gets the channels order of this joint
      *  @return  The joint's channels order
      */
-    std::vector <Channel> channels_order() const {
-      return channels_order_;
-    }
+       std::vector<Channel> channels_order() const
+       {
+               return channels_order_;
+       }
 
-    /** Gets the all children joints of this joint
+       /** Gets the all children joints of this joint
      *  @return  The joint's children
      */
-    std::vector <std::shared_ptr <Joint>> children() const {
-      return children_;
-    }
+       std::vector<std::shared_ptr<Joint> > children() const
+       {
+               return children_;
+       }
 
-    /** Gets the channels data of this joint for all frames
+       /** Gets the channels data of this joint for all frames
      *  @return  The joint's channel data
      */
-    const std::vector <std::vector <float>>& channel_data() const {
-      return channel_data_;
-    }
+       const std::vector<std::vector<float> > &channel_data() const
+       {
+               return channel_data_;
+       }
 
-    /** Gets the channel data of this joint for selected frame
+       /** Gets the channel data of this joint for selected frame
      *  @param   frame   The frame for which channel data will be returned
      *  @return  The joint's channel data for selected frame
      */
-    const std::vector <float>& channel_data(unsigned frame) const {
-      return channel_data_[frame];
-    }
+       const std::vector<float> &channel_data(unsigned frame) const
+       {
+               return channel_data_[frame];
+       }
 
-    /** Gets the channel data of this joint for selected frame and channel
+       /** Gets the channel data of this joint for selected frame and channel
      *  @param   frame        The frame for which channel data will be returned
      *  @param   channel_num  The number of channel which data will be returned
      *  @return  The joint's channel data for selected frame and channel
      */
-    float channel_data(unsigned frame, unsigned channel_num) const {
-      return channel_data_[frame][channel_num];
-    }
+       float channel_data(unsigned frame, unsigned channel_num) const
+       {
+               return channel_data_[frame][channel_num];
+       }
 
-    /** Gets the local transformation matrix for this joint for all frames
+       /** Gets the local transformation matrix for this joint for all frames
      *  @return  The joint's local transformation matrix
      */
-    std::vector <cv::Mat> ltm() const {
-      return ltm_;
-    }
+       std::vector<cv::Mat> ltm() const
+       {
+               return ltm_;
+       }
 
-    /** Gets the local transformation matrix for this joint for selected frame
+       /** Gets the local transformation matrix for this joint for selected frame
      *  @param   frame    The frame for which ltm will be returned
      *  @return  The joint's local transformation matrix for selected frame
      */
-    cv::Mat ltm(unsigned frame) const {
-      return ltm_[frame];
-    }
+       cv::Mat ltm(unsigned frame) const
+       {
+               return ltm_[frame];
+       }
 
-    /** Gets the position for this joint for all frames
+       /** Gets the position for this joint for all frames
      *  @return  The joint's position
      */
-    std::vector <cv::Vec3f> pos() const {
-      return pos_;
-    }
+       std::vector<cv::Vec3f> pos() const
+       {
+               return pos_;
+       }
 
-    /** Gets the position for this joint for selected frame
+       /** Gets the position for this joint for selected frame
      *  @param   frame    The frame for which ltm will be returned
      *  @return  The joint's position for selected frame
      */
-    cv::Vec3f pos(unsigned frame) const {
-      return pos_[frame];
-    }
+       cv::Vec3f pos(unsigned frame) const
+       {
+               return pos_[frame];
+       }
 
-    /** Gets the number of channels of this joint
+       /** Gets the number of channels of this joint
      *  @return  The joint's channels number
      */
-    unsigned num_channels() const { return channels_order_.size(); }
+       unsigned num_channels() const
+       {
+               return channels_order_.size();
+       }
 
-    /** Sets the this joint parent joint
+       /** Sets the this joint parent joint
      *  @param   arg    The parent joint of this joint
      */
-    void set_parent(const std::shared_ptr <Joint> arg) { parent_ = arg; }
+       void set_parent(const std::shared_ptr<Joint> arg)
+       {
+               parent_ = arg;
+       }
 
-    /** Sets the this joint name
+       /** Sets the this joint name
      *  @param   arg    The name of this joint
      */
-    void set_name(const std::string arg) { name_ = arg; }
+       void set_name(const std::string arg)
+       {
+               name_ = arg;
+       }
 
-    /** Sets the this joint offset
+       /** Sets the this joint offset
      *  @param   arg    The offset of this joint
      */
-    void set_offset(const Offset arg) { offset_ = arg; }
+       void set_offset(const Offset arg)
+       {
+               offset_ = arg;
+       }
 
-    /** Sets the this joint channels order
+       /** Sets the this joint channels order
      *  @param   arg    The channels order of this joint
      */
-    void set_channels_order(const std::vector <Channel>& arg) {
-      channels_order_ = arg;
-    }
+       void set_channels_order(const std::vector<Channel> &arg)
+       {
+               channels_order_ = arg;
+       }
 
-    /** Sets the this joint children
+       /** Sets the this joint children
      *  @param   arg    The children of this joint
      */
-    void set_children(const std::vector <std::shared_ptr <Joint>>& arg) {
-      children_ = arg;
-    }
+       void set_children(const std::vector<std::shared_ptr<Joint> > &arg)
+       {
+               children_ = arg;
+       }
 
-    /** Sets the this joint channels data
+       /** Sets the this joint channels data
      *  @param   arg    The channels data of this joint
      */
-    void set_channel_data(const std::vector <std::vector <float>>& arg) {
-      channel_data_ = arg;
-    }
+       void set_channel_data(const std::vector<std::vector<float> > &arg)
+       {
+               channel_data_ = arg;
+       }
 
-    /** Sets local transformation matrix for selected frame
+       /** Sets local transformation matrix for selected frame
      *  @param  matrix  The local transformation matrix to be set
      *  @param  frame   The number of frame for which you want set ltm. As
      *                  default it is set to 0.
      */
-    void set_ltm(const cv::Mat matrix, unsigned frame = 0) {
-      if (frame > 0 && frame < ltm_.size())
-        ltm_[frame] = matrix;
-      else
-        ltm_.push_back(matrix);
-    }
+       void set_ltm(const cv::Mat matrix, unsigned frame = 0)
+       {
+               if (frame > 0 && frame < ltm_.size())
+                       ltm_[frame] = matrix;
+               else
+                       ltm_.push_back(matrix);
+       }
 
-    /** Sets local transformation matrix for selected frame
+       /** Sets local transformation matrix for selected frame
      *  @param  pos     The position of joint in selected frame to be set
      *  @param  frame   The number of frame for which you want set position. As
      *                  default it is set to 0.
      */
-    void set_pos(const cv::Vec3f pos, unsigned frame = 0) {
-      if (frame > 0 && frame < pos_.size())
-        pos_[frame] = pos;
-      else
-        pos_.push_back(pos);
-    }
+       void set_pos(const cv::Vec3f pos, unsigned frame = 0)
+       {
+               if (frame > 0 && frame < pos_.size())
+                       pos_[frame] = pos;
+               else
+                       pos_.push_back(pos);
+       }
 
-    /** Gets channels name of this joint
+       /** Gets channels name of this joint
      *  @return The joint's channels name
      */
-    const std::vector<std::string> get_channels_name() const {
-      std::vector<std::string> channel_names;
-
-      for (const auto &channel : channels_order_)
-        channel_names.push_back(channel_name_str[static_cast<int>(channel)]);
-
-      return channel_names;
-    }
-
-  private:
-    /** Parent joint in file hierarchy */
-    std::shared_ptr <Joint> parent_;
-    std::string name_;
-    Offset offset_;
-    /** Order of joint's input channels */
-    std::vector <Channel> channels_order_;
-    /** Pointers to joints that are children of this in hierarchy */
-    std::vector <std::shared_ptr <Joint>> children_;
-    /** Structure for keep joint's channel's data.
+       const std::vector<std::string> get_channels_name() const
+       {
+               std::vector<std::string> channel_names;
+
+               for (const auto &channel : channels_order_)
+                       channel_names.push_back(channel_name_str[static_cast<int>(channel)]);
+
+               return channel_names;
+       }
+
+private:
+       /** Parent joint in file hierarchy */
+       std::shared_ptr<Joint> parent_;
+       std::string name_;
+       Offset offset_;
+       /** Order of joint's input channels */
+       std::vector<Channel> channels_order_;
+       /** Pointers to joints that are children of this in hierarchy */
+       std::vector<std::shared_ptr<Joint> > children_;
+       /** Structure for keep joint's channel's data.
      *  Each vector keep data for one channel.
      */
-    std::vector <std::vector <float> > channel_data_;
-    /** Local transformation matrix for each frame */
-    std::vector <cv::Mat> ltm_;
-    /** Vector x, y, z of joint position for each frame */
-    std::vector <cv::Vec3f> pos_;
-  };
+       std::vector<std::vector<float> > channel_data_;
+       /** Local transformation matrix for each frame */
+       std::vector<cv::Mat> ltm_;
+       /** Vector x, y, z of joint position for each frame */
+       std::vector<cv::Vec3f> pos_;
+};
 }
 } // namespace
 #endif // __MEDIA_VISION_JOINT_H__
index 87f0c03..f429361 100644 (file)
@@ -37,252 +37,262 @@ namespace mediavision
 {
 namespace inference
 {
-       typedef struct _LandmarkPoint
-       {
-               float score;
-               cv::Point heatMapLoc;
-               cv::Point2f decodedLoc;
-               int id;
-               bool valid;
-       } LandmarkPoint;
-
-       typedef struct _LandmarkResults
-       {
-               std::vector<LandmarkPoint> landmarks;
-               float score;
-       } LandmarkResults;
-
-       typedef struct _HeatMapInfo {
-               int wIdx;
-               int hIdx;
-               int cIdx;
-               float nmsRadius;
-               inference_tensor_shape_type_e shapeType;
-       } HeatMapInfo;
-
-       class Landmark
-       {
-       private:
-               std::string name;
-               DimInfo dimInfo;
-               inference_landmark_type_e type; /**< 0: 2D_SINGLE, 1: 2D_MULTI, 2: 3D_SINGLE */
-               int offset;
-               inference_landmark_coorindate_type_e coordinate; /**< 0: RATIO, 1: PIXEL */
-               inference_landmark_decoding_type_e decodingType; /**< 0: decoding  unnecessary,
+typedef struct _LandmarkPoint
+{
+       float score;
+       cv::Point heatMapLoc;
+       cv::Point2f decodedLoc;
+       int id;
+       bool valid;
+} LandmarkPoint;
+
+typedef struct _LandmarkResults
+{
+       std::vector<LandmarkPoint> landmarks;
+       float score;
+} LandmarkResults;
+
+typedef struct _HeatMapInfo
+{
+       int wIdx;
+       int hIdx;
+       int cIdx;
+       float nmsRadius;
+       inference_tensor_shape_type_e shapeType;
+} HeatMapInfo;
+
+class Landmark
+{
+private:
+       std::string name;
+       DimInfo dimInfo;
+       inference_landmark_type_e type; /**< 0: 2D_SINGLE, 1: 2D_MULTI, 2: 3D_SINGLE */
+       int offset;
+       inference_landmark_coorindate_type_e coordinate; /**< 0: RATIO, 1: PIXEL */
+       inference_landmark_decoding_type_e decodingType; /**< 0: decoding  unnecessary,
                                                                                                                        1: decoding heatmap,
                                                                                                                        2: decoding heatmap with refinement */
-               HeatMapInfo heatMapInfo;
-               std::vector<DispVec> dispVecs;
-               Edge edgeMap;
-
-               std::map<std::string, inference_landmark_type_e> supportedLandmarkTypes;
-               std::map<std::string, inference_landmark_coorindate_type_e> supportedLandmarkCoordinateTypes;
-               std::map<std::string, inference_landmark_decoding_type_e> supportedLandmarkDecodingTypes;
-
-       public:
-
-               Landmark() :
-                       name(),
-                       dimInfo(),
-                       type(INFERENCE_LANDMARK_TYPE_2D_SINGLE),
-                       offset(),
-                       coordinate(INFERENCE_LANDMARK_COORDINATE_TYPE_RATIO),
-                       decodingType(INFERENCE_LANDMARK_DECODING_TYPE_BYPASS),
-                       heatMapInfo()
-
-               {
-                       supportedLandmarkTypes.insert({"2D_SINGLE", INFERENCE_LANDMARK_TYPE_2D_SINGLE});
-                       supportedLandmarkTypes.insert({"2D_MULTI",  INFERENCE_LANDMARK_TYPE_2D_MULTI});
-                       supportedLandmarkTypes.insert({"3D_SINGLE", INFERENCE_LANDMARK_TYPE_3D_SINGLE});
-
-                       supportedLandmarkCoordinateTypes.insert({"RATIO", INFERENCE_LANDMARK_COORDINATE_TYPE_RATIO});
-                       supportedLandmarkCoordinateTypes.insert({"PIXEL", INFERENCE_LANDMARK_COORDINATE_TYPE_PIXEL});
-
-                       supportedLandmarkDecodingTypes.insert({"BYPASS", INFERENCE_LANDMARK_DECODING_TYPE_BYPASS});
-                       supportedLandmarkDecodingTypes.insert({"BYPASS_MULTICHANNEL", INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL});
-                       supportedLandmarkDecodingTypes.insert({"HEATMAP", INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP});
-                       supportedLandmarkDecodingTypes.insert({"HEATMAP_REFINE", INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP_REFINE});
-               }
+       HeatMapInfo heatMapInfo;
+       std::vector<DispVec> dispVecs;
+       Edge edgeMap;
+
+       std::map<std::string, inference_landmark_type_e> supportedLandmarkTypes;
+       std::map<std::string, inference_landmark_coorindate_type_e> supportedLandmarkCoordinateTypes;
+       std::map<std::string, inference_landmark_decoding_type_e> supportedLandmarkDecodingTypes;
+
+public:
+       Landmark()
+                       : name()
+                       , dimInfo()
+                       , type(INFERENCE_LANDMARK_TYPE_2D_SINGLE)
+                       , offset()
+                       , coordinate(INFERENCE_LANDMARK_COORDINATE_TYPE_RATIO)
+                       , decodingType(INFERENCE_LANDMARK_DECODING_TYPE_BYPASS)
+                       , heatMapInfo()
+
+       {
+               supportedLandmarkTypes.insert({ "2D_SINGLE", INFERENCE_LANDMARK_TYPE_2D_SINGLE });
+               supportedLandmarkTypes.insert({ "2D_MULTI", INFERENCE_LANDMARK_TYPE_2D_MULTI });
+               supportedLandmarkTypes.insert({ "3D_SINGLE", INFERENCE_LANDMARK_TYPE_3D_SINGLE });
+
+               supportedLandmarkCoordinateTypes.insert({ "RATIO", INFERENCE_LANDMARK_COORDINATE_TYPE_RATIO });
+               supportedLandmarkCoordinateTypes.insert({ "PIXEL", INFERENCE_LANDMARK_COORDINATE_TYPE_PIXEL });
+
+               supportedLandmarkDecodingTypes.insert({ "BYPASS", INFERENCE_LANDMARK_DECODING_TYPE_BYPASS });
+               supportedLandmarkDecodingTypes.insert(
+                               { "BYPASS_MULTICHANNEL", INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL });
+               supportedLandmarkDecodingTypes.insert({ "HEATMAP", INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP });
+               supportedLandmarkDecodingTypes.insert({ "HEATMAP_REFINE", INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP_REFINE });
+       }
+
+       ~Landmark() = default;
+
+       int ParseLandmark(JsonObject *root)
+       {
+               // box
+               JsonArray *rootArray = json_object_get_array_member(root, "landmark");
+               unsigned int elements = json_array_get_length(rootArray);
+
+               // TODO: handling error
+               for (unsigned int elem = 0; elem < elements; ++elem) {
+                       JsonNode *pNode = json_array_get_element(rootArray, elem);
+                       JsonObject *pObject = json_node_get_object(pNode);
+
+                       name = static_cast<const char *>(json_object_get_string_member(pObject, "name"));
+                       LOGI("layer: %s", name.c_str());
+
+                       JsonArray *array = json_object_get_array_member(pObject, "index");
+                       unsigned int elements2 = json_array_get_length(array);
+                       LOGI("range dim: size[%u]", elements2);
+                       for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
+                               if (static_cast<int>(json_array_get_int_element(array, elem2)) == 1)
+                                       dimInfo.SetValidIndex(elem2);
+                       }
 
-               ~Landmark() = default;
-
-               int ParseLandmark(JsonObject *root)
-               {
-                       // box
-                       JsonArray * rootArray = json_object_get_array_member(root, "landmark");
-                       unsigned int elements = json_array_get_length(rootArray);
-
-                       // TODO: handling error
-                       for (unsigned int elem = 0; elem < elements; ++elem) {
-
-                               JsonNode *pNode = json_array_get_element(rootArray, elem);
-                               JsonObject *pObject = json_node_get_object(pNode);
-
-                               name =
-                                       static_cast<const char*>(json_object_get_string_member(pObject,"name"));
-                               LOGI("layer: %s", name.c_str());
-
-                               JsonArray * array = json_object_get_array_member(pObject, "index");
-                               unsigned int elements2 = json_array_get_length(array);
-                               LOGI("range dim: size[%u]", elements2);
-                               for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
-                                       if (static_cast<int>(json_array_get_int_element(array, elem2)) == 1)
-                                               dimInfo.SetValidIndex(elem2);
-                               }
-
-                               try {
-                                       type = GetSupportedType(pObject, "landmark_type", supportedLandmarkTypes);
-                                       coordinate = GetSupportedType(pObject, "landmark_coordinate", supportedLandmarkCoordinateTypes);
-                                       decodingType = GetSupportedType(pObject, "decoding_type", supportedLandmarkDecodingTypes);
-                               } catch (const std::exception& e) {
-                                       LOGE("Invalid %s", e.what());
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
-                               if (json_object_has_member(pObject, "landmark_offset")) {
-                                       offset = static_cast<int>(json_object_get_int_member(pObject, "landmark_offset"));
-                                       LOGI("(optional) landmark offset: %d", offset);
-                               }
+                       try {
+                               type = GetSupportedType(pObject, "landmark_type", supportedLandmarkTypes);
+                               coordinate = GetSupportedType(pObject, "landmark_coordinate", supportedLandmarkCoordinateTypes);
+                               decodingType = GetSupportedType(pObject, "decoding_type", supportedLandmarkDecodingTypes);
+                       } catch (const std::exception &e) {
+                               LOGE("Invalid %s", e.what());
+                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+                       }
+                       if (json_object_has_member(pObject, "landmark_offset")) {
+                               offset = static_cast<int>(json_object_get_int_member(pObject, "landmark_offset"));
+                               LOGI("(optional) landmark offset: %d", offset);
                        }
+               }
+
+               LOGI("LEAVE");
+               return MEDIA_VISION_ERROR_NONE;
+       }
+
+       int ParseDisplacement(JsonObject *root,
+                                                 const std::map<std::string, inference_tensor_shape_type_e> &supportedShapeType)
+       {
+               LOGI("ENTER");
 
+               if (!json_object_has_member(root, "displacement")) {
+                       LOGI("No displacement outputmetadata");
                        LOGI("LEAVE");
-                       return MEDIA_VISION_ERROR_NONE;
+                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
                }
 
-               int ParseDisplacement(JsonObject *root,
-                                                         const std::map<std::string, inference_tensor_shape_type_e>& supportedShapeType)
-               {
-                       LOGI("ENTER");
+               JsonArray *rootArray = json_object_get_array_member(root, "displacement");
+               unsigned int elements = json_array_get_length(rootArray);
 
-                       if (!json_object_has_member(root, "displacement")) {
-                               LOGI("No displacement outputmetadata");
-                               LOGI("LEAVE");
-                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                       }
+               dispVecs.resize(elements);
+               unsigned int elem = 0;
+               for (auto &disp : dispVecs) {
+                       JsonNode *pNode = json_array_get_element(rootArray, elem++);
+                       JsonObject *pObject = json_node_get_object(pNode);
 
-                       JsonArray * rootArray = json_object_get_array_member(root, "displacement");
-                       unsigned int elements = json_array_get_length(rootArray);
+                       disp.ParseDisplacement(pObject, supportedShapeType);
+               }
 
-                       dispVecs.resize(elements);
-                       unsigned int elem = 0;
-                       for (auto& disp : dispVecs) {
-                               JsonNode *pNode = json_array_get_element(rootArray, elem++);
-                               JsonObject *pObject = json_node_get_object(pNode);
+               LOGI("LEAVE");
+               return MEDIA_VISION_ERROR_NONE;
+       }
 
-                               disp.ParseDisplacement(pObject, supportedShapeType);
-                       }
+       int ParseEdgeMap(JsonObject *root)
+       {
+               LOGI("ENTER");
 
+               if (!json_object_has_member(root, "edgemap")) {
+                       LOGI("No edgemap outputmetadata");
                        LOGI("LEAVE");
-                       return MEDIA_VISION_ERROR_NONE;
+                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
                }
 
-               int ParseEdgeMap(JsonObject * root)
-               {
-                       LOGI("ENTER");
+               edgeMap.ParseEdge(root);
+
+               LOGI("LEAVE");
+               return MEDIA_VISION_ERROR_NONE;
+       }
 
-                       if (!json_object_has_member(root, "edgemap")) {
-                               LOGI("No edgemap outputmetadata");
+       int ParseDecodeInfo(JsonObject *root,
+                                               const std::map<std::string, inference_tensor_shape_type_e> &supportedShapeType)
+       {
+               LOGI("ENTER");
+
+               // box
+               JsonArray *rootArray = json_object_get_array_member(root, "landmark");
+               unsigned int elements = json_array_get_length(rootArray);
+
+               // TODO: handling error
+               for (unsigned int elem = 0; elem < elements; ++elem) {
+                       JsonNode *pNode = json_array_get_element(rootArray, elem);
+                       JsonObject *pObject = json_node_get_object(pNode);
+
+                       if (!json_object_has_member(pObject, "decoding_info")) {
+                               LOGE("decoding_info is mandatory. Invalid metadata");
                                LOGI("LEAVE");
+
                                return MEDIA_VISION_ERROR_INVALID_OPERATION;
                        }
 
-                       edgeMap.ParseEdge(root);
+                       JsonObject *cObject = json_object_get_object_member(pObject, "decoding_info");
+                       if (!json_object_has_member(cObject, "heatmap")) {
+                               LOGE("heatmap is mandatory. Invalid metadata");
+                               LOGI("LEAVE");
 
-                       LOGI("LEAVE");
-                       return MEDIA_VISION_ERROR_NONE;
-               }
+                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+                       }
 
-               int ParseDecodeInfo(JsonObject *root,
-                                                       const std::map<std::string, inference_tensor_shape_type_e>& supportedShapeType)
-               {
-                       LOGI("ENTER");
-
-                       // box
-                       JsonArray * rootArray = json_object_get_array_member(root, "landmark");
-                       unsigned int elements = json_array_get_length(rootArray);
-
-                       // TODO: handling error
-                       for (unsigned int elem = 0; elem < elements; ++elem) {
-
-                               JsonNode *pNode = json_array_get_element(rootArray, elem);
-                               JsonObject *pObject = json_node_get_object(pNode);
-
-                               if (!json_object_has_member(pObject, "decoding_info")) {
-                                       LOGE("decoding_info is mandatory. Invalid metadata");
-                                       LOGI("LEAVE");
-
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
-
-                               JsonObject *cObject = json_object_get_object_member(pObject, "decoding_info");
-                               if (!json_object_has_member(cObject, "heatmap")) {
-                                       LOGE("heatmap is mandatory. Invalid metadata");
-                                       LOGI("LEAVE");
-
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
-
-                               JsonObject *object = json_object_get_object_member(cObject, "heatmap") ;
-                               try {
-                                       GetHeatMapInfo().shapeType = GetSupportedType(object, "shape_type", supportedShapeType);
-                               } catch (const std::exception& e) {
-                                       LOGE("Invalid %s", e.what());
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
-
-                               std::vector<int> heatMapIndexes = GetDimInfo().GetValidIndexAll();
-                               if (GetHeatMapInfo().shapeType == INFERENCE_TENSOR_SHAPE_NCHW) {
-                                       GetHeatMapInfo().cIdx = heatMapIndexes[0];
-                                       GetHeatMapInfo().hIdx = heatMapIndexes[1];
-                                       GetHeatMapInfo().wIdx = heatMapIndexes[2];
-                               } else {
-                                       GetHeatMapInfo().hIdx = heatMapIndexes[0];
-                                       GetHeatMapInfo().wIdx = heatMapIndexes[1];
-                                       GetHeatMapInfo().cIdx = heatMapIndexes[2];
-                               }
-
-                               if (json_object_has_member(object, "nms_radius")) {
-                                       GetHeatMapInfo().nmsRadius = static_cast<float>(json_object_get_double_member(object, "nms_radius"));
-                                       LOGI("nms is enabled with %3.f", GetHeatMapInfo().nmsRadius );
-                               }
+                       JsonObject *object = json_object_get_object_member(cObject, "heatmap");
+                       try {
+                               GetHeatMapInfo().shapeType = GetSupportedType(object, "shape_type", supportedShapeType);
+                       } catch (const std::exception &e) {
+                               LOGE("Invalid %s", e.what());
+                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
                        }
 
-                       LOGI("LEAVE");
-                       return MEDIA_VISION_ERROR_NONE;
-               }
+                       std::vector<int> heatMapIndexes = GetDimInfo().GetValidIndexAll();
+                       if (GetHeatMapInfo().shapeType == INFERENCE_TENSOR_SHAPE_NCHW) {
+                               GetHeatMapInfo().cIdx = heatMapIndexes[0];
+                               GetHeatMapInfo().hIdx = heatMapIndexes[1];
+                               GetHeatMapInfo().wIdx = heatMapIndexes[2];
+                       } else {
+                               GetHeatMapInfo().hIdx = heatMapIndexes[0];
+                               GetHeatMapInfo().wIdx = heatMapIndexes[1];
+                               GetHeatMapInfo().cIdx = heatMapIndexes[2];
+                       }
 
-               inference_landmark_type_e GetType()
-               {
-                       return type;
+                       if (json_object_has_member(object, "nms_radius")) {
+                               GetHeatMapInfo().nmsRadius = static_cast<float>(json_object_get_double_member(object, "nms_radius"));
+                               LOGI("nms is enabled with %3.f", GetHeatMapInfo().nmsRadius);
+                       }
                }
 
-               int GetOffset()
-               {
-                       return offset;
-               }
+               LOGI("LEAVE");
+               return MEDIA_VISION_ERROR_NONE;
+       }
 
-               inference_landmark_coorindate_type_e GetCoordinate()
-               {
-                       return coordinate;
-               }
+       inference_landmark_type_e GetType()
+       {
+               return type;
+       }
 
-               inference_landmark_decoding_type_e GetDecodingType()
-               {
-                       return decodingType;
-               }
+       int GetOffset()
+       {
+               return offset;
+       }
 
-               HeatMapInfo& GetHeatMapInfo()
-               {
-                       return heatMapInfo;
-               }
+       inference_landmark_coorindate_type_e GetCoordinate()
+       {
+               return coordinate;
+       }
 
-               std::string GetName() { return name; }
+       inference_landmark_decoding_type_e GetDecodingType()
+       {
+               return decodingType;
+       }
 
-               DimInfo GetDimInfo() { return dimInfo; }
+       HeatMapInfo &GetHeatMapInfo()
+       {
+               return heatMapInfo;
+       }
+
+       std::string GetName()
+       {
+               return name;
+       }
 
-               std::vector<DispVec>& GetDispVecAll() { return dispVecs; }
+       DimInfo GetDimInfo()
+       {
+               return dimInfo;
+       }
 
-               std::vector<std::pair<int, int>>& GetEdges() { return edgeMap.GetEdgesAll(); }
-       };
+       std::vector<DispVec> &GetDispVecAll()
+       {
+               return dispVecs;
+       }
+
+       std::vector<std::pair<int, int> > &GetEdges()
+       {
+               return edgeMap.GetEdgesAll();
+       }
+};
 } /* Inference */
 } /* MediaVision */
 
index ecf9ef6..6ff9c39 100644 (file)
@@ -37,52 +37,52 @@ namespace mediavision
 {
 namespace inference
 {
-       class Metadata
-       {
-       public:
-               /**
+class Metadata
+{
+public:
+       /**
                 * @brief   Creates an Metadata class instance.
                 *
                 * @since_tizen 6.5
                 */
-               Metadata() = default;
+       Metadata() = default;
 
-               /**
+       /**
                 * @brief   Destroys an Metadata class instance including
                 *          its all resources.
                 *
                 * @since_tizen 6.5
                 */
-               ~Metadata() = default;
+       ~Metadata() = default;
 
-               /**
+       /**
                 * @brief Initializes an Metadata class
                 *
                 * @since_tizen 6.5
                 */
-               int Init(const std::string& filename);
+       int Init(const std::string &filename);
 
-               /**
+       /**
                 * @brief Parses a metafile and set values to InputMetadata
                 *        and OutputMetadata
                 *
                 * @since_tizen 6.5
                 */
-               int Parse();
+       int Parse();
 
-               InputMetadata& GetInputMeta();
-               OutputMetadata& GetOutputMeta();
+       InputMetadata &GetInputMeta();
+       OutputMetadata &GetOutputMeta();
 
-       private:
-               int ParseInputMeta(JsonObject *object);
-               int ParseOutputMeta(JsonObject *object);
+private:
+       int ParseInputMeta(JsonObject *object);
+       int ParseOutputMeta(JsonObject *object);
 
-       private:
-               std::string mMetafile;
+private:
+       std::string mMetafile;
 
-               InputMetadata mInputMeta;
-               OutputMetadata mOutputMeta;
-       };
+       InputMetadata mInputMeta;
+       OutputMetadata mOutputMeta;
+};
 
 } /* Inference */
 } /* MediaVision */
old mode 100755 (executable)
new mode 100644 (file)
index 9d725ee..50817d7
@@ -37,35 +37,38 @@ namespace mediavision
 {
 namespace inference
 {
-       class ObjectDecoder
-       {
-       private:
-               TensorBuffer mTensorBuffer;
-               OutputMetadata mMeta;
-               int mBoxOffset;
-               int mNumberOfOjects;
-               float mScaleW;
-               float mScaleH;
-               Boxes mResultBoxes;
+class ObjectDecoder
+{
+private:
+       TensorBuffer mTensorBuffer;
+       OutputMetadata mMeta;
+       int mBoxOffset;
+       int mNumberOfOjects;
+       float mScaleW;
+       float mScaleH;
+       Boxes mResultBoxes;
 
-               float decodeScore(int idx);
-               Box decodeBox(int idx, float score, int label = -1, int offset = 0);
-               Box decodeBoxWithAnchor(int idx, int anchorIdx, float score, cv::Rect2f& anchor);
+       float decodeScore(int idx);
+       Box decodeBox(int idx, float score, int label = -1, int offset = 0);
+       Box decodeBoxWithAnchor(int idx, int anchorIdx, float score, cv::Rect2f &anchor);
 
-       public:
-               ObjectDecoder(TensorBuffer& buffer, OutputMetadata& metaData,
-                                       int boxOffset, float scaleW, float scaleH, int numberOfObjects = 0) :
-                                       mTensorBuffer(buffer), mMeta(metaData),
-                                       mBoxOffset(boxOffset), mNumberOfOjects(numberOfObjects),
-                                       mScaleW(scaleW), mScaleH(scaleH), mResultBoxes() {
-                                       };
+public:
+       ObjectDecoder(TensorBuffer &buffer, OutputMetadata &metaData, int boxOffset, float scaleW, float scaleH,
+                                 int numberOfObjects = 0)
+                       : mTensorBuffer(buffer)
+                       , mMeta(metaData)
+                       , mBoxOffset(boxOffset)
+                       , mNumberOfOjects(numberOfObjects)
+                       , mScaleW(scaleW)
+                       , mScaleH(scaleH)
+                       , mResultBoxes() {};
 
-               ~ObjectDecoder() = default;
+       ~ObjectDecoder() = default;
 
-               int init();
-               int decode();
-               Boxes& getObjectAll();
-       };
+       int init();
+       int decode();
+       Boxes &getObjectAll();
+};
 
 } /* Inference */
 } /* MediaVision */
index c5fe30b..e3504ba 100644 (file)
@@ -29,55 +29,63 @@ namespace mediavision
 {
 namespace inference
 {
-       class OffsetVec
-       {
-       private:
-               std::string name;
-               DimInfo dimInfo;
-               int shapeType;
-       public:
-               OffsetVec() : name(), dimInfo(), shapeType() { }
-               ~OffsetVec() = default;
-               std::string GetName() { return name; }
-               DimInfo GetDimInfo() { return dimInfo; }
-               int GetShapeType() { return shapeType; }
-
-               int ParseOffset(JsonObject *root, const std::map<std::string, inference_tensor_shape_type_e>& supportedShapeType)
-               {
-                       JsonArray * rootArray = json_object_get_array_member(root, "offset");
-                       unsigned int elements = json_array_get_length(rootArray);
-
-                       // TODO: handling error
-                       for (unsigned int elem = 0; elem < elements; ++elem) {
+class OffsetVec
+{
+private:
+       std::string name;
+       DimInfo dimInfo;
+       int shapeType;
 
-                               JsonNode *pNode = json_array_get_element(rootArray, elem);
-                               JsonObject *pObject = json_node_get_object(pNode);
+public:
+       OffsetVec() : name(), dimInfo(), shapeType()
+       {}
+       ~OffsetVec() = default;
+       std::string GetName()
+       {
+               return name;
+       }
+       DimInfo GetDimInfo()
+       {
+               return dimInfo;
+       }
+       int GetShapeType()
+       {
+               return shapeType;
+       }
 
-                               name =
-                                       static_cast<const char*>(json_object_get_string_member(pObject,"name"));
-                               LOGI("layer: %s", name.c_str());
+       int ParseOffset(JsonObject *root, const std::map<std::string, inference_tensor_shape_type_e> &supportedShapeType)
+       {
+               JsonArray *rootArray = json_object_get_array_member(root, "offset");
+               unsigned int elements = json_array_get_length(rootArray);
 
-                               JsonArray * array = json_object_get_array_member(pObject, "index");
-                               unsigned int elements2 = json_array_get_length(array);
-                               LOGI("range dim: size[%u]", elements2);
-                               for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
-                                       if (static_cast<int>(json_array_get_int_element(array, elem2)) == 1)
-                                               dimInfo.SetValidIndex(elem2);
-                               }
+               // TODO: handling error
+               for (unsigned int elem = 0; elem < elements; ++elem) {
+                       JsonNode *pNode = json_array_get_element(rootArray, elem);
+                       JsonObject *pObject = json_node_get_object(pNode);
 
-                               try {
-                                       shapeType = GetSupportedType(pObject, "shape_type", supportedShapeType);
-                               } catch (const std::exception& e) {
-                                       LOGE("Invalid %s", e.what());
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
+                       name = static_cast<const char *>(json_object_get_string_member(pObject, "name"));
+                       LOGI("layer: %s", name.c_str());
 
+                       JsonArray *array = json_object_get_array_member(pObject, "index");
+                       unsigned int elements2 = json_array_get_length(array);
+                       LOGI("range dim: size[%u]", elements2);
+                       for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
+                               if (static_cast<int>(json_array_get_int_element(array, elem2)) == 1)
+                                       dimInfo.SetValidIndex(elem2);
                        }
 
-                       LOGI("LEAVE");
-                       return MEDIA_VISION_ERROR_NONE;
+                       try {
+                               shapeType = GetSupportedType(pObject, "shape_type", supportedShapeType);
+                       } catch (const std::exception &e) {
+                               LOGE("Invalid %s", e.what());
+                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+                       }
                }
-       };
+
+               LOGI("LEAVE");
+               return MEDIA_VISION_ERROR_NONE;
+       }
+};
 } /* Inference */
 } /* MediaVision */
 
index 9385aa7..a19ef59 100644 (file)
@@ -46,75 +46,162 @@ namespace mediavision
 {
 namespace inference
 {
-       class OutputMetadata
-       {
-       private:
-               bool parsed;
-               ScoreInfo score;
-               box::BoxInfo box;
-               Landmark landmark;
-               OffsetVec offsetVec;
-               std::map<std::string, inference_tensor_shape_type_e> mSupportedShapeType;
+class OutputMetadata
+{
+private:
+       bool parsed;
+       ScoreInfo score;
+       box::BoxInfo box;
+       Landmark landmark;
+       OffsetVec offsetVec;
+       std::map<std::string, inference_tensor_shape_type_e> mSupportedShapeType;
 
-               int ParseScore(JsonObject *root);
-               int ParseBox(JsonObject *root);
-               int ParseLandmark(JsonObject *root);
-               int ParseOffset(JsonObject *root);
+       int ParseScore(JsonObject *root);
+       int ParseBox(JsonObject *root);
+       int ParseLandmark(JsonObject *root);
+       int ParseOffset(JsonObject *root);
 
-       public:
-               /**
+public:
+       /**
                 * @brief   Creates an OutputMetadata class instance.
                 *
                 * @since_tizen 6.5
                 */
-               OutputMetadata();
+       OutputMetadata();
 
-               /**
+       /**
                 * @brief   Destroys an OutputMetadata class instance including
                 *          its all resources.
                 *
                 * @since_tizen 6.5
                 */
-               ~OutputMetadata() = default;
+       ~OutputMetadata() = default;
 
-               /** @brief Parses an OutputMetadata
+       /** @brief Parses an OutputMetadata
                 *
                 * @since_tizen 6.5
                 */
-               int Parse(JsonObject *root);
+       int Parse(JsonObject *root);
 
-               bool IsParsed() { return parsed; }
+       bool IsParsed()
+       {
+               return parsed;
+       }
 
-               std::string GetScoreName() { return score.GetName(); }
-               DimInfo GetScoreDimInfo() { return score.GetDimInfo(); }
-               inference_score_type_e GetScoreType() { return score.GetType(); }
-               double GetScoreThreshold() { return score.GetThresHold(); }
-               int GetScoreTopNumber() { return score.GetTopNumber(); }
-               std::shared_ptr<DeQuantization> GetScoreDeQuant() { return score.GetDeQuant(); }
-               double GetScoreDeQuantScale() { return score.GetDeQuantScale(); }
-               double GetScoreDeQuantZeroPoint() { return score.GetDeQuantZeroPoint(); }
-               std::string GetBoxName() { return box.GetName(); }
-               DimInfo GetBoxDimInfo() { return box.GetDimInfo(); }
-               std::vector<int> GetBoxOrder() { return box.GetOrder(); }
-               box::DecodeInfo& GetBoxDecodeInfo() { return box.GetDecodeInfo(); }
-               inference_box_type_e GetBoxType() { return box.GetType(); }
-               std::string GetBoxLabelName() { return box.GetLabelName(); }
-               std::string GetBoxNumberName() { return box.GetNumberName(); }
-               DimInfo GetBoxNumberDimInfo() { return box.GetNumberDimInfo(); }
+       std::string GetScoreName()
+       {
+               return score.GetName();
+       }
+       DimInfo GetScoreDimInfo()
+       {
+               return score.GetDimInfo();
+       }
+       inference_score_type_e GetScoreType()
+       {
+               return score.GetType();
+       }
+       double GetScoreThreshold()
+       {
+               return score.GetThresHold();
+       }
+       int GetScoreTopNumber()
+       {
+               return score.GetTopNumber();
+       }
+       std::shared_ptr<DeQuantization> GetScoreDeQuant()
+       {
+               return score.GetDeQuant();
+       }
+       double GetScoreDeQuantScale()
+       {
+               return score.GetDeQuantScale();
+       }
+       double GetScoreDeQuantZeroPoint()
+       {
+               return score.GetDeQuantZeroPoint();
+       }
+       std::string GetBoxName()
+       {
+               return box.GetName();
+       }
+       DimInfo GetBoxDimInfo()
+       {
+               return box.GetDimInfo();
+       }
+       std::vector<int> GetBoxOrder()
+       {
+               return box.GetOrder();
+       }
+       box::DecodeInfo &GetBoxDecodeInfo()
+       {
+               return box.GetDecodeInfo();
+       }
+       inference_box_type_e GetBoxType()
+       {
+               return box.GetType();
+       }
+       std::string GetBoxLabelName()
+       {
+               return box.GetLabelName();
+       }
+       std::string GetBoxNumberName()
+       {
+               return box.GetNumberName();
+       }
+       DimInfo GetBoxNumberDimInfo()
+       {
+               return box.GetNumberDimInfo();
+       }
 
-               int GetScoreCoordinate() { return box.GetCoordinate(); }
-               std::string GetLandmarkName() { return landmark.GetName(); }
-               int GetLandmarkOffset() { return landmark.GetOffset(); }
-               inference_landmark_type_e GetLandmarkType() { return landmark.GetType(); }
-               DimInfo GetLandmarkDimInfo() { return landmark.GetDimInfo(); }
-               HeatMapInfo& GetLandmarkHeatMapInfo() { return landmark.GetHeatMapInfo(); }
-               inference_landmark_coorindate_type_e GetLandmarkCoordinate() { return landmark.GetCoordinate(); }
-               inference_landmark_decoding_type_e GetLandmarkDecodingType() { return landmark.GetDecodingType(); }
-               std::vector<DispVec>& GetLandmarkDispVecAll() { return landmark.GetDispVecAll(); }
-               std::vector<std::pair<int, int>>& GetLandmarkEdges() { return landmark.GetEdges(); }
-               std::string GetOffsetVecName() { return offsetVec.GetName(); }
-               inference_box_decoding_type_e GetBoxDecodingType() { return box.GetDecodingType(); }
-       };
+       int GetScoreCoordinate()
+       {
+               return box.GetCoordinate();
+       }
+       std::string GetLandmarkName()
+       {
+               return landmark.GetName();
+       }
+       int GetLandmarkOffset()
+       {
+               return landmark.GetOffset();
+       }
+       inference_landmark_type_e GetLandmarkType()
+       {
+               return landmark.GetType();
+       }
+       DimInfo GetLandmarkDimInfo()
+       {
+               return landmark.GetDimInfo();
+       }
+       HeatMapInfo &GetLandmarkHeatMapInfo()
+       {
+               return landmark.GetHeatMapInfo();
+       }
+       inference_landmark_coorindate_type_e GetLandmarkCoordinate()
+       {
+               return landmark.GetCoordinate();
+       }
+       inference_landmark_decoding_type_e GetLandmarkDecodingType()
+       {
+               return landmark.GetDecodingType();
+       }
+       std::vector<DispVec> &GetLandmarkDispVecAll()
+       {
+               return landmark.GetDispVecAll();
+       }
+       std::vector<std::pair<int, int> > &GetLandmarkEdges()
+       {
+               return landmark.GetEdges();
+       }
+       std::string GetOffsetVecName()
+       {
+               return offsetVec.GetName();
+       }
+       inference_box_decoding_type_e GetBoxDecodingType()
+       {
+               return box.GetDecodingType();
+       }
+};
 } /* Inference */
 } /* MediaVision */
 
index 10fe920..1ce8c9e 100644 (file)
@@ -26,57 +26,66 @@ namespace mediavision
 {
 namespace inference
 {
-       // score
-    typedef enum {
-               INFERENCE_SCORE_TYPE_NORMAL,
-               INFERENCE_SCORE_TYPE_SIGMOID
-       } inference_score_type_e;
+// score
+typedef enum
+{
+       INFERENCE_SCORE_TYPE_NORMAL,
+       INFERENCE_SCORE_TYPE_SIGMOID
+} inference_score_type_e;
 
-       // box
-       typedef enum {
-               INFERENCE_BOX_TYPE_ORIGIN_LEFTTOP,
-               INFERENCE_BOX_TYPE_ORIGIN_CENTER
-       } inference_box_type_e;
+// box
+typedef enum
+{
+       INFERENCE_BOX_TYPE_ORIGIN_LEFTTOP,
+       INFERENCE_BOX_TYPE_ORIGIN_CENTER
+} inference_box_type_e;
 
-       typedef enum {
-               INFERENCE_BOX_COORDINATE_TYPE_RATIO,
-               INFERENCE_BOX_COORDINATE_TYPE_PIXEL
-       } inference_box_coordinate_type_e;
+typedef enum
+{
+       INFERENCE_BOX_COORDINATE_TYPE_RATIO,
+       INFERENCE_BOX_COORDINATE_TYPE_PIXEL
+} inference_box_coordinate_type_e;
 
-       typedef enum {
-               INFERENCE_BOX_DECODING_TYPE_BYPASS,
-               INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR,
-               INFERENCE_BOX_DECODING_TYPE_YOLO_ANCHOR
-       } inference_box_decoding_type_e;
+typedef enum
+{
+       INFERENCE_BOX_DECODING_TYPE_BYPASS,
+       INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR,
+       INFERENCE_BOX_DECODING_TYPE_YOLO_ANCHOR
+} inference_box_decoding_type_e;
 
-       typedef enum {
-               INFERENCE_BOX_NMS_TYPE_NONE = -1,
-               INFERENCE_BOX_NMS_TYPE_STANDARD
-       } inference_box_nms_type_e;
+typedef enum
+{
+       INFERENCE_BOX_NMS_TYPE_NONE = -1,
+       INFERENCE_BOX_NMS_TYPE_STANDARD
+} inference_box_nms_type_e;
 
-       // landmark
-       typedef enum {
-               INFERENCE_LANDMARK_TYPE_2D_SINGLE,
-               INFERENCE_LANDMARK_TYPE_2D_MULTI,
-               INFERENCE_LANDMARK_TYPE_3D_SINGLE
-       } inference_landmark_type_e;
+// landmark
+typedef enum
+{
+       INFERENCE_LANDMARK_TYPE_2D_SINGLE,
+       INFERENCE_LANDMARK_TYPE_2D_MULTI,
+       INFERENCE_LANDMARK_TYPE_3D_SINGLE
+} inference_landmark_type_e;
 
-       typedef enum {
-               INFERENCE_LANDMARK_COORDINATE_TYPE_RATIO,
-               INFERENCE_LANDMARK_COORDINATE_TYPE_PIXEL
-       } inference_landmark_coorindate_type_e;
+typedef enum
+{
+       INFERENCE_LANDMARK_COORDINATE_TYPE_RATIO,
+       INFERENCE_LANDMARK_COORDINATE_TYPE_PIXEL
+} inference_landmark_coorindate_type_e;
 
-       typedef enum {
-               INFERENCE_LANDMARK_DECODING_TYPE_BYPASS,
-               INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL,
-               INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP,
-               INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP_REFINE
-       } inference_landmark_decoding_type_e;
+typedef enum
+{
+       INFERENCE_LANDMARK_DECODING_TYPE_BYPASS,
+       INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL,
+       INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP,
+       INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP_REFINE
+} inference_landmark_decoding_type_e;
 
-       typedef enum {
-               INFERENCE_DISPLACEMENT_TYPE_FORWARD,
-               INFERENCE_DISPLACEMENT_TYPE_BACKWARD
-       } inference_displacement_type_e;
+typedef enum
+{
+       INFERENCE_DISPLACEMENT_TYPE_FORWARD,
+       INFERENCE_DISPLACEMENT_TYPE_BACKWARD
+} inference_displacement_type_e;
 }
 }
 
index 4960d68..22e3fc1 100644 (file)
@@ -36,58 +36,56 @@ namespace mediavision
 {
 namespace inference
 {
-       class PoseDecoder
+class PoseDecoder
+{
+private:
+       TensorBuffer mTensorBuffer;
+       OutputMetadata mMeta;
+       int mHeatMapWidth;
+       int mHeatMapHeight;
+       int mHeatMapChannel;
+       int mNumberOfLandmarks;
+
+       std::list<LandmarkPoint> mCandidates;
+       std::vector<LandmarkResults> mPoseLandmarks;
+
+       int getIndexToPos(LandmarkPoint &point, float scaleW, float scaleH);
+       int getPosToIndex(LandmarkPoint &landmark);
+       int getOffsetValue(LandmarkPoint &landmark, cv::Point2f &offsetVal);
+       int findPose(LandmarkPoint &root, std::vector<LandmarkPoint> &decodedLandmarks, float scaleW, float scaleH);
+       int traverseToNeighbor(int edgeId, int toId, inference_displacement_type_e dir, LandmarkPoint fromLandmark,
+                                                  LandmarkPoint &toLandmark, float scaleW, float scaleH);
+       int getEdgeVector(cv::Point index, int edgeId, inference_displacement_type_e dir, cv::Point2f &vector);
+
+       int convertXYZtoX(int x, int y, int c);
+
+       cv::Point convertXYZtoXY(int x, int y, int c);
+
+public:
+       PoseDecoder(TensorBuffer &buffer, const OutputMetadata &metaData, int heatMapWidth, int heatMapHeight,
+                               int heatMapChannel, int numberOfLandmarks)
+                       : mTensorBuffer(buffer)
+                       , mHeatMapWidth(heatMapWidth)
+                       , mHeatMapHeight(heatMapHeight)
+                       , mHeatMapChannel(heatMapChannel)
+                       , mNumberOfLandmarks(numberOfLandmarks)
        {
-       private:
-               TensorBuffer mTensorBuffer;
-               OutputMetadata mMeta;
-               int mHeatMapWidth;
-               int mHeatMapHeight;
-               int mHeatMapChannel;
-               int mNumberOfLandmarks;
-
-               std::list<LandmarkPoint> mCandidates;
-               std::vector<LandmarkResults> mPoseLandmarks;
-
-               int getIndexToPos(LandmarkPoint& point, float scaleW, float scaleH);
-               int getPosToIndex(LandmarkPoint& landmark);
-               int getOffsetValue(LandmarkPoint& landmark, cv::Point2f &offsetVal);
-               int findPose(LandmarkPoint& root, std::vector<LandmarkPoint>& decodedLandmarks,
-                                                       float scaleW, float scaleH);
-               int traverseToNeighbor(int edgeId, int toId, inference_displacement_type_e dir,
-                                                               LandmarkPoint fromLandmark, LandmarkPoint& toLandmark,
-                                                               float scaleW, float scaleH);
-               int getEdgeVector(cv::Point index, int edgeId, inference_displacement_type_e dir, cv::Point2f& vector);
-
-               int convertXYZtoX(int x, int y, int c);
-
-               cv::Point convertXYZtoXY(int x, int y, int c);
-
-       public:
-               PoseDecoder(TensorBuffer& buffer, const OutputMetadata& metaData,
-                                       int heatMapWidth, int heatMapHeight, int heatMapChannel,
-                                       int numberOfLandmarks) :
-                                       mTensorBuffer(buffer),
-                                       mHeatMapWidth(heatMapWidth),
-                                       mHeatMapHeight(heatMapHeight),
-                                       mHeatMapChannel(heatMapChannel),
-                                       mNumberOfLandmarks(numberOfLandmarks) {
-                                               mMeta = metaData;
-                                       };
-
-               ~PoseDecoder() = default;
-
-               int init();
-
-               int decode(float scaleWidth, float scaleHeight, float thresHoldRadius);
-
-               int getNumberOfPose();
-
-               float getPointX(int poseIdx, int partIdx);
-               float getPointY(int poseIdx, int partIdx);
-               float getScore(int poseIdx, int partIdx);
+               mMeta = metaData;
        };
 
+       ~PoseDecoder() = default;
+
+       int init();
+
+       int decode(float scaleWidth, float scaleHeight, float thresHoldRadius);
+
+       int getNumberOfPose();
+
+       float getPointX(int poseIdx, int partIdx);
+       float getPointY(int poseIdx, int partIdx);
+       float getScore(int poseIdx, int partIdx);
+};
+
 } /* Inference */
 } /* MediaVision */
 
index fbf64be..33fa95d 100644 (file)
@@ -27,7 +27,6 @@
 #include <opencv2/core.hpp>
 #include <opencv2/imgproc.hpp>
 
-
 /**
  * @file PostProcess.h
  * @brief This file contains the PostProcess class definition which
@@ -39,8 +38,9 @@
  * @details Box structure includes index, score, location.
  *
  */
-typedef struct _Box {
-       int index;   /**< index of box belonging to a category */
+typedef struct _Box
+{
+       int index; /**< index of box belonging to a category */
        float score; /**< score of box belonging to the index */
        cv::Rect2f location; /**< location of a box */
 } Box;
@@ -52,45 +52,44 @@ namespace mediavision
 {
 namespace inference
 {
-       class PostProcess
-       {
-       public:
-               /**
+class PostProcess
+{
+public:
+       /**
                 * @brief   Creates an PostProcess class instance.
                 *
                 * @since_tizen 6.5
                 */
-               PostProcess() : mMaxScoreSize(3) {};
+       PostProcess() : mMaxScoreSize(3) {};
 
-               /**
+       /**
                 * @brief   Destroys an PostProcess class instance including
                 *           its all resources.
                 *
                 * @since_tizen 6.5
                 */
-               ~PostProcess() = default;
+       ~PostProcess() = default;
 
-               /**
+       /**
                 * @brief   Calculates sigmoid.
                 *
                 * @since_tizen 6.5
                 */
-               static float sigmoid(float value);
-               static float dequant(float value, float scale, float zeropoint);
+       static float sigmoid(float value);
+       static float dequant(float value, float scale, float zeropoint);
 
-               int ScoreClear(int size);
-               int ScorePush(float value, int index);
-               int ScorePop(std::vector<std::pair<float, int>>& top);
-               int Nms(BoxesList& boxeslist, int mode, float threshold, Boxes& nmsboxes);
+       int ScoreClear(int size);
+       int ScorePush(float value, int index);
+       int ScorePop(std::vector<std::pair<float, int> > &top);
+       int Nms(BoxesList &boxeslist, int mode, float threshold, Boxes &nmsboxes);
 
-       private:
-               std::priority_queue<std::pair<float, int>,
-                                                       std::vector<std::pair<float, int>>,
-                                                       std::greater<std::pair<float, int>>> mScore;
-       private:
-               int mMaxScoreSize;
+private:
+       std::priority_queue<std::pair<float, int>, std::vector<std::pair<float, int> >, std::greater<std::pair<float, int> > >
+                       mScore;
 
-       };
+private:
+       int mMaxScoreSize;
+};
 
 } /* Inference */
 } /* MediaVision */
index 4c67fdc..0359d70 100644 (file)
@@ -37,57 +37,53 @@ namespace mediavision
 {
 namespace inference
 {
-       class Posture
-       {
-       public:
-               /**
+class Posture
+{
+public:
+       /**
                 * @brief   Creates an Posture class instance.
                 *
                 * @since_tizen 6.0
                 */
-               Posture();
+       Posture();
 
-               /**
+       /**
                 * @brief   Destroys an Posture class instance including
                 *           its all resources.
                 *
                 * @since_tizen 6.0
                 */
-               ~Posture();
+       ~Posture();
 
-               /**
+       /**
                 * @brief   Sets file path
                 *
                 * @since_tizen 6.0
                 */
-               int setPoseFromFile(const std::string motionCaptureFilePath,
-                                                       const std::string motionMappingFilePath);
+       int setPoseFromFile(const std::string motionCaptureFilePath, const std::string motionMappingFilePath);
 
-               /**
+       /**
                 * @brief   Compares a pose for @a part and returns score
                 *
                 * @since_tizen 6.0
                 */
-               int compare(int parts, std::vector<std::pair<bool, cv::Point>> action,
-                                       float* score);
+       int compare(int parts, std::vector<std::pair<bool, cv::Point> > action, float *score);
 
-       private:
-        cv::Vec2f getUnitVectors(cv::Point point1, cv::Point point2);
-               int getParts(int parts,
-                                       std::vector<std::pair<bool, cv::Point>>& pose,
-                                       std::vector<std::pair<bool, std::vector<cv::Vec2f>>>& posePart);
-               float getSimilarity(int parts,
-                                       std::vector<std::pair<bool, std::vector<cv::Vec2f>>>& posePart,
-                                       std::vector<std::pair<bool, std::vector<cv::Vec2f>>>& actionPart);
-               float cosineSimilarity(std::vector<cv::Vec2f> vec1, std::vector<cv::Vec2f> vec2, int size);
+private:
+       cv::Vec2f getUnitVectors(cv::Point point1, cv::Point point2);
+       int getParts(int parts, std::vector<std::pair<bool, cv::Point> > &pose,
+                                std::vector<std::pair<bool, std::vector<cv::Vec2f> > > &posePart);
+       float getSimilarity(int parts, std::vector<std::pair<bool, std::vector<cv::Vec2f> > > &posePart,
+                                               std::vector<std::pair<bool, std::vector<cv::Vec2f> > > &actionPart);
+       float cosineSimilarity(std::vector<cv::Vec2f> vec1, std::vector<cv::Vec2f> vec2, int size);
 
-       private:
-        BvhParser mBvhParser;
-        Bvh mBvh;
-               std::map<std::string, int> mMotionToPoseMap; /**< name, index */
-               std::vector<std::pair<bool, cv::Point>> mPose;
-               std::vector<std::pair<bool, std::vector<cv::Vec2f>>> mPoseParts;
-       };
+private:
+       BvhParser mBvhParser;
+       Bvh mBvh;
+       std::map<std::string, int> mMotionToPoseMap; /**< name, index */
+       std::vector<std::pair<bool, cv::Point> > mPose;
+       std::vector<std::pair<bool, std::vector<cv::Vec2f> > > mPoseParts;
+};
 
 } /* Inference */
 } /* MediaVision */
index 4121557..de76926 100644 (file)
@@ -26,7 +26,6 @@
 #include <opencv2/core.hpp>
 #include <opencv2/imgproc.hpp>
 
-
 /**
  * @file PreProcess.h
  * @brief This file contains the PreProcess class definition which
@@ -37,41 +36,39 @@ namespace mediavision
 {
 namespace inference
 {
-       class PreProcess
-       {
-       public:
-               /**
+class PreProcess
+{
+public:
+       /**
                 * @brief   Creates an PreProcess class instance.
                 *
                 * @since_tizen 6.5
                 */
-               PreProcess() = default;
+       PreProcess() = default;
 
-               /**
+       /**
                 * @brief   Destroys an PreProcess class instance including
                 *           its all resources.
                 *
                 * @since_tizen 6.5
                 */
-               ~PreProcess() = default;
+       ~PreProcess() = default;
 
-               /**
+       /**
                 * @brief   Runs PreProcess with layerInfo and options
                 *
                 * @since_tizen 6.5
                 */
-               int Run(cv::Mat& source, const int colorSpace, const int dataType, const LayerInfo& layerInfo,
-                               const Options& options, void* buffer);
-
-       private:
-               int Resize(cv::Mat& source, cv::Mat& dest, cv::Size size);
-               int ColorConvert(cv::Mat& source, cv::Mat& dest, int sType, int dType);
-               int Normalize(cv::Mat& source, cv::Mat& dest,
-                                       const std::vector<double>& mean, const std::vector<double>& std);
-               int Quantize(cv::Mat& source, cv::Mat& dest,
-                                       const std::vector<double>& scale, const std::vector<double>& zeropoint);
+       int Run(cv::Mat &source, const int colorSpace, const int dataType, const LayerInfo &layerInfo,
+                       const Options &options, void *buffer);
 
-       };
+private:
+       int Resize(cv::Mat &source, cv::Mat &dest, cv::Size size);
+       int ColorConvert(cv::Mat &source, cv::Mat &dest, int sType, int dType);
+       int Normalize(cv::Mat &source, cv::Mat &dest, const std::vector<double> &mean, const std::vector<double> &std);
+       int Quantize(cv::Mat &source, cv::Mat &dest, const std::vector<double> &scale,
+                                const std::vector<double> &zeropoint);
+};
 
 } /* Inference */
 } /* MediaVision */
index 5810324..bf861c0 100644 (file)
@@ -113,9 +113,7 @@ public:
                MEDIA_VISION_NULL_ARG_CHECK(array);
 
                unsigned int elements1 = json_array_get_length(array);
-               MEDIA_VISION_CHECK_CONDITION(elements1 > 0,
-                                                                        MEDIA_VISION_ERROR_INVALID_PARAMETER,
-                                                                        "No name on meta file");
+               MEDIA_VISION_CHECK_CONDITION(elements1 > 0, MEDIA_VISION_ERROR_INVALID_PARAMETER, "No name on meta file");
 
                for (unsigned int elem1 = 0; elem1 < elements1; ++elem1) {
                        names.push_back(json_array_get_string_element(array, elem1));
@@ -129,13 +127,11 @@ public:
                                dimInfo.SetValidIndex(elem2);
                }
                if (json_object_has_member(pObject, "top_number"))
-                       topNumber = static_cast<int>(
-                                       json_object_get_int_member(pObject, "top_number"));
+                       topNumber = static_cast<int>(json_object_get_int_member(pObject, "top_number"));
                LOGI("top number: %d", topNumber);
 
                if (json_object_has_member(pObject, "threshold"))
-                       threshold = static_cast<double>(
-                                       json_object_get_double_member(pObject, "threshold"));
+                       threshold = static_cast<double>(json_object_get_double_member(pObject, "threshold"));
                LOGI("threshold: %1.3f", threshold);
 
                try {
@@ -150,9 +146,8 @@ public:
                        JsonNode *node = json_array_get_element(array, 0);
                        JsonObject *object = json_node_get_object(node);
 
-                       deQuantization = std::make_shared<DeQuantization>(
-                                       json_object_get_double_member(object, "scale"),
-                                       json_object_get_double_member(object, "zeropoint"));
+                       deQuantization = std::make_shared<DeQuantization>(json_object_get_double_member(object, "scale"),
+                                                                                                                         json_object_get_double_member(object, "zeropoint"));
                }
 
                LOGI("LEAVE");
index 2f48d4c..05bbb20 100644 (file)
@@ -25,7 +25,6 @@
 #include <inference_engine_type.h>
 #include <mv_inference_type.h>
 
-
 /**
  * @file TensorBuffer.h
  * @brief This file contains the tensor buffer class definition which
@@ -37,32 +36,29 @@ namespace mediavision
 {
 namespace inference
 {
-       class TensorBuffer
-       {
-       private:
-               IETensorBuffer _tensorBuffer;
+class TensorBuffer
+{
+private:
+       IETensorBuffer _tensorBuffer;
 
-       public:
-               TensorBuffer() = default;
-               ~TensorBuffer() = default;
+public:
+       TensorBuffer() = default;
+       ~TensorBuffer() = default;
 
-               bool empty();
-               bool exist(std::string name);
-               int allocate(inference_engine_tensor_buffer& tensor_buffer,
-                                        const inference_engine_tensor_info& tensor_info);
-               void release();
-               size_t size();
+       bool empty();
+       bool exist(std::string name);
+       int allocate(inference_engine_tensor_buffer &tensor_buffer, const inference_engine_tensor_info &tensor_info);
+       void release();
+       size_t size();
 
-               IETensorBuffer& getIETensorBuffer();
-               bool addTensorBuffer(std::string name, inference_engine_tensor_buffer& buffer);
-               int GetTensorInfo(inference_engine_layer_property& layerProperty, tensor_t& tensorInfo);
-               inference_engine_tensor_buffer* getTensorBuffer(std::string name);
-               template <typename T>
-               int convertToFloat(inference_engine_tensor_buffer *tensorBuffer);
+       IETensorBuffer &getIETensorBuffer();
+       bool addTensorBuffer(std::string name, inference_engine_tensor_buffer &buffer);
+       int GetTensorInfo(inference_engine_layer_property &layerProperty, tensor_t &tensorInfo);
+       inference_engine_tensor_buffer *getTensorBuffer(std::string name);
+       template<typename T> int convertToFloat(inference_engine_tensor_buffer *tensorBuffer);
 
-               template <typename T>
-               T getValue(std::string name, int idx);
-       };
+       template<typename T> T getValue(std::string name, int idx);
+};
 } /* Inference */
 } /* MediaVision */
 
index d5db2b5..1a15cd5 100644 (file)
@@ -29,18 +29,18 @@ namespace mediavision
 {
 namespace inference
 {
-       template <typename T>
-       T GetSupportedType(JsonObject* root, std::string typeName, const std::map<std::string, T>& supportedTypes)
-       {
-               auto supportedType = supportedTypes.find(json_object_get_string_member(root, typeName.c_str()));
-               if (supportedType == supportedTypes.end()) {
-                       throw std::invalid_argument(typeName);
-               }
+template<typename T>
+T GetSupportedType(JsonObject *root, std::string typeName, const std::map<std::string, T> &supportedTypes)
+{
+       auto supportedType = supportedTypes.find(json_object_get_string_member(root, typeName.c_str()));
+       if (supportedType == supportedTypes.end()) {
+               throw std::invalid_argument(typeName);
+       }
 
-               LOGI("%s: %d:%s", typeName.c_str(), supportedType->second, supportedType->first.c_str());
+       LOGI("%s: %d:%s", typeName.c_str(), supportedType->second, supportedType->first.c_str());
 
-               return supportedType->second;
-       }
+       return supportedType->second;
+}
 } /* Inference */
 } /* MediaVision */
 
index c005bd0..1001a29 100644 (file)
 #include <mv_inference.h>
 
 #ifdef __cplusplus
-extern "C"
-{
+extern "C" {
 #endif /* __cplusplus */
 
-       /**
+/**
         * @file   mv_inference_open.h
         * @brief  This file contains the Media Vision Inference Open API.
         */
 
-       /*************/
-       /* Inference */
-       /*************/
+/*************/
+/* Inference */
+/*************/
 
-       mv_engine_config_h mv_inference_get_engine_config(mv_inference_h infer);
+mv_engine_config_h mv_inference_get_engine_config(mv_inference_h infer);
 
-       /**
+/**
         * @brief Create infernce handle.
         * @details Use this function to create an inference handle. After creation
         *          the inference handle has to be prepared with
@@ -59,9 +58,9 @@ extern "C"
         * @see mv_inference_destroy_open()
         * @see mv_inference_prepare_open()
         */
-       int mv_inference_create_open(mv_inference_h *infer);
+int mv_inference_create_open(mv_inference_h *infer);
 
-       /**
+/**
         * @brief Destroy inference handle and releases all its resources.
         *
         * @since_tizen 5.5
@@ -76,9 +75,9 @@ extern "C"
         *
         * @see mv_inference_create_open()
         */
-       int mv_inference_destroy_open(mv_inference_h infer);
+int mv_inference_destroy_open(mv_inference_h infer);
 
-       /**
+/**
         * @brief Configure the backend to the inference handle
         *
         * @since_tizen 5.5
@@ -97,10 +96,9 @@ extern "C"
         * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
         * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
         */
-       int mv_inference_configure_engine_open(mv_inference_h infer,
-                                                                                  mv_engine_config_h engine_config);
+int mv_inference_configure_engine_open(mv_inference_h infer, mv_engine_config_h engine_config);
 
-       /**
+/**
         * @brief Configure the number of output to the inference handle
         *
         * @since_tizen 5.5
@@ -120,10 +118,9 @@ extern "C"
         * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
         * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
         */
-       int mv_inference_configure_output_open(mv_inference_h infer,
-                                                                                  mv_engine_config_h engine_config);
+int mv_inference_configure_output_open(mv_inference_h infer, mv_engine_config_h engine_config);
 
-       /**
+/**
         * @brief Configure the confidence threshold value to the inference handle
         *
         * @since_tizen 5.5
@@ -143,10 +140,9 @@ extern "C"
         * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
         * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
         */
-       int mv_inference_configure_confidence_threshold_open(
-                       mv_inference_h infer, mv_engine_config_h engine_config);
+int mv_inference_configure_confidence_threshold_open(mv_inference_h infer, mv_engine_config_h engine_config);
 
-       /**
+/**
         * @brief Configure the set of output node names to the inference handle
         *
         * @since_tizen 5.5
@@ -166,10 +162,9 @@ extern "C"
         * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
         * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
         */
-       int mv_inference_configure_output_node_names_open(
-                       mv_inference_h infer, mv_engine_config_h engine_config);
+int mv_inference_configure_output_node_names_open(mv_inference_h infer, mv_engine_config_h engine_config);
 
-       /**
+/**
         * @brief Prepare inference.
         * @details Use this function to prepare inference based on
         *          the configured network.
@@ -187,9 +182,9 @@ extern "C"
         * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
         * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
         */
-       int mv_inference_prepare_open(mv_inference_h infer);
+int mv_inference_prepare_open(mv_inference_h infer);
 
-       /**
+/**
         * @brief Traverses the list of supported engines for inference.
         * @details Using this function the supported engines can be obtained.
         *          The names can be used with mv_engine_config_h related
@@ -211,11 +206,10 @@ extern "C"
         * @see mv_engine_config_set_string_attribute()
         * @see mv_engine_config_get_string_attribute()
         */
-       int mv_inference_foreach_supported_engine_open(
-                       mv_inference_h infer, mv_inference_supported_engine_cb callback,
-                       void *user_data);
+int mv_inference_foreach_supported_engine_open(mv_inference_h infer, mv_inference_supported_engine_cb callback,
+                                                                                          void *user_data);
 
-       /**
+/**
         * @brief Performs image classification on the @a source
         * @details Use this function to launch image classification.
         *          Each time when mv_inference_image_classify is
@@ -252,11 +246,10 @@ extern "C"
         *
         * @see mv_inference_image_classified_cb
         */
-       int mv_inference_image_classify_open(
-                       mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
-                       mv_inference_image_classified_cb classified_cb, void *user_data);
+int mv_inference_image_classify_open(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+                                                                        mv_inference_image_classified_cb classified_cb, void *user_data);
 
-       /**
+/**
         * @brief Performs object detection on the @a source
         * @details Use this function to launch object detection.
         *          Each time when mv_inference_object_detection is
@@ -290,12 +283,10 @@ extern "C"
         *
         * @see mv_inference_object_detected_cb
         */
-       int
-       mv_inference_object_detect_open(mv_source_h source, mv_inference_h infer,
-                                                                       mv_inference_object_detected_cb detected_cb,
-                                                                       void *user_data);
+int mv_inference_object_detect_open(mv_source_h source, mv_inference_h infer,
+                                                                       mv_inference_object_detected_cb detected_cb, void *user_data);
 
-       /**
+/**
         * @brief Performs face detection on the @a source
         * @details Use this function to launch face detection.
         *          Each time when mv_inference_face_detection is
@@ -329,11 +320,10 @@ extern "C"
         *
         * @see mv_inference_face_detected_cb
         */
-       int mv_inference_face_detect_open(mv_source_h source, mv_inference_h infer,
-                                                                         mv_inference_face_detected_cb detected_cb,
-                                                                         void *user_data);
+int mv_inference_face_detect_open(mv_source_h source, mv_inference_h infer, mv_inference_face_detected_cb detected_cb,
+                                                                 void *user_data);
 
-       /**
+/**
         * @brief Performs facial landmarks detection on the @a source
         * @details Use this function to launch facial landmark detection.
         *          Each time when mv_inference_facial_landmark_detect() is
@@ -371,12 +361,10 @@ extern "C"
         *
         * @see mv_inference_facial_landmark_detected_cb
         */
-       int mv_inference_facial_landmark_detect_open(
-                       mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
-                       mv_inference_facial_landmark_detected_cb detected_cb,
-                       void *user_data);
+int mv_inference_facial_landmark_detect_open(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+                                                                                        mv_inference_facial_landmark_detected_cb detected_cb, void *user_data);
 
-       /**
+/**
         * @brief Performs pose landmarks detection on the @a source.
         * @details Use this function to launch pose landmark detection.
         *          Each time when mv_inference_pose_landmark_detect_open() is
@@ -411,12 +399,10 @@ extern "C"
         *
         * @see mv_inference_pose_landmark_detected_cb()
         */
-       int mv_inference_pose_landmark_detect_open(
-                       mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
-                       mv_inference_pose_landmark_detected_cb detected_cb,
-                       void *user_data);
+int mv_inference_pose_landmark_detect_open(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+                                                                                  mv_inference_pose_landmark_detected_cb detected_cb, void *user_data);
 
-       /**
+/**
         * @brief Gets the number of pose.
         *
         * @since_tizen 6.0
@@ -429,10 +415,9 @@ extern "C"
         * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
         * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
         */
-       int mv_inference_pose_get_number_of_poses_open(
-               mv_inference_pose_result_h result, int *number_of_poses);
+int mv_inference_pose_get_number_of_poses_open(mv_inference_pose_result_h result, int *number_of_poses);
 
-       /**
+/**
         * @brief Gets the number of landmark per a pose.
         *
         * @since_tizen 6.0
@@ -445,10 +430,9 @@ extern "C"
         * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
         * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
         */
-       int mv_inference_pose_get_number_of_landmarks_open(
-               mv_inference_pose_result_h result, int *number_of_landmarks);
+int mv_inference_pose_get_number_of_landmarks_open(mv_inference_pose_result_h result, int *number_of_landmarks);
 
-       /**
+/**
         * @brief Gets landmark location of a part of a pose.
         *
         * @since_tizen 6.0
@@ -465,10 +449,10 @@ extern "C"
         * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
         *
         */
-       int mv_inference_pose_get_landmark_open(
-               mv_inference_pose_result_h result, int pose_index, int part_index, mv_point_s *location, float *score);
+int mv_inference_pose_get_landmark_open(mv_inference_pose_result_h result, int pose_index, int part_index,
+                                                                               mv_point_s *location, float *score);
 
-       /**
+/**
         * @brief Gets a label of a pose.
         *
         * @since_tizen 6.0
@@ -489,10 +473,9 @@ extern "C"
         * @see mv_inference_pose_landmark_detected_cb()
         * @see mv_inference_pose_result_h
         */
-       int mv_inference_pose_get_label_open(
-               mv_inference_pose_result_h result, int pose_index, int *label);
+int mv_inference_pose_get_label_open(mv_inference_pose_result_h result, int pose_index, int *label);
 
-       /**
+/**
         * @brief Creates pose handle.
         * @details Use this function to create a pose.
         *
@@ -510,9 +493,9 @@ extern "C"
         *
         * @see mv_pose_destroy_open()
         */
-       int mv_pose_create_open(mv_pose_h *pose);
+int mv_pose_create_open(mv_pose_h *pose);
 
-       /**
+/**
         * @brief Destroys pose handle and releases all its resources.
         *
         * @since_tizen 6.0
@@ -528,9 +511,9 @@ extern "C"
         *
         * @see mv_pose_create_open()
         */
-       int mv_pose_destroy_open(mv_pose_h pose);
+int mv_pose_destroy_open(mv_pose_h pose);
 
-       /**
+/**
         * @brief Sets a motion capture file and its pose mapping file to the pose.
         * @details Use this function to set a motion capture file and
         *          its pose mapping file. These are used by mv_pose_compare_open()
@@ -558,9 +541,9 @@ extern "C"
         * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of file paths
         * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
         */
-       int mv_pose_set_from_file_open(mv_pose_h pose, const char *motionCaptureFilePath, const char *motionMappingFilePath);
+int mv_pose_set_from_file_open(mv_pose_h pose, const char *motionCaptureFilePath, const char *motionMappingFilePath);
 
-       /**
+/**
         * @brief Compares an action pose with the pose which is set by mv_pose_set_from_file_open().
         * @details Use this function to compare action pose with the pose
         *          which is set by mv_pose_set_from_file_open().
@@ -582,7 +565,7 @@ extern "C"
         *
         * @pre Sets the pose by using mv_pose_set_from_file()
         */
-       int mv_pose_compare_open(mv_pose_h pose, mv_inference_pose_result_h action, int parts, float *score);
+int mv_pose_compare_open(mv_pose_h pose, mv_inference_pose_result_h action, int parts, float *score);
 
 #ifdef __cplusplus
 }
index 1e57a19..5091db2 100644 (file)
@@ -44,9 +44,7 @@ int BoxInfo::ParseBox(JsonObject *root)
        MEDIA_VISION_NULL_ARG_CHECK(array);
 
        unsigned int elements1 = json_array_get_length(array);
-       MEDIA_VISION_CHECK_CONDITION(elements1 > 0,
-                                                                MEDIA_VISION_ERROR_INVALID_PARAMETER,
-                                                                "No name on meta file");
+       MEDIA_VISION_CHECK_CONDITION(elements1 > 0, MEDIA_VISION_ERROR_INVALID_PARAMETER, "No name on meta file");
 
        for (unsigned int elem1 = 0; elem1 < elements1; ++elem1) {
                names.push_back(json_array_get_string_element(array, elem1));
@@ -63,10 +61,8 @@ int BoxInfo::ParseBox(JsonObject *root)
 
        try {
                type = GetSupportedType(pObject, "box_type", supportedBoxTypes);
-               coordinate = GetSupportedType(pObject, "box_coordinate",
-                                                                         supportedBoxCoordinateTypes);
-               decodingType = GetSupportedType(pObject, "decoding_type",
-                                                                               supportedBoxDecodingTypes);
+               coordinate = GetSupportedType(pObject, "box_coordinate", supportedBoxCoordinateTypes);
+               decodingType = GetSupportedType(pObject, "decoding_type", supportedBoxDecodingTypes);
        } catch (const std::exception &e) {
                LOGE("Invalid %s", e.what());
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
@@ -77,8 +73,7 @@ int BoxInfo::ParseBox(JsonObject *root)
        LOGI("box order should have 4 elements and it has [%u]", elements2);
 
        for (unsigned int elem2_idx = 0; elem2_idx < elements2; ++elem2_idx) {
-               auto val =
-                               static_cast<int>(json_array_get_int_element(array, elem2_idx));
+               auto val = static_cast<int>(json_array_get_int_element(array, elem2_idx));
                order.push_back(val);
                LOGI("%d", val);
        }
@@ -177,8 +172,7 @@ int BoxInfo::ParseDecodeInfo(JsonObject *root)
        }
 
        int ret = MEDIA_VISION_ERROR_NONE;
-       JsonObject *cObject =
-                       json_object_get_object_member(pObject, "decoding_info");
+       JsonObject *cObject = json_object_get_object_member(pObject, "decoding_info");
        if (json_object_has_member(cObject, "anchor")) {
                ret = GetDecodeInfo().ParseAnchorParam(cObject);
                if (ret != MEDIA_VISION_ERROR_NONE) {
index 1322eac..6e56276 100644 (file)
@@ -14,7 +14,6 @@
  * limitations under the License.
  */
 
-
 #include "Bvh.h"
 #include "BvhUtils.h"
 #include "mv_private.h"
@@ -24,73 +23,71 @@ namespace mediavision
 {
 namespace inference
 {
-
-  void Bvh::recalculate_joints_ltm(std::shared_ptr<Joint> start_joint) {
-
-    LOGI("ENTER");
-
-    if (start_joint == NULL)
-    {
-      if (root_joint_ == NULL)
-        return;
-      else
-        start_joint = root_joint_;
-    }
-
-    LOGD("%s joint", start_joint->name().c_str());
-    //LOG(DEBUG) << "recalculate_joints_ltm: " << start_joint->name();
-    cv::Mat offmat_backup = cv::Mat::eye(4,4, CV_32F);
-    offmat_backup.at<float>(0,3) = start_joint->offset().x;
-    offmat_backup.at<float>(1,3) = start_joint->offset().y;
-    offmat_backup.at<float>(2,3) = start_joint->offset().z;
-
-    std::vector<std::vector<float>> data = start_joint->channel_data();
-
-    for (unsigned i = 0; i < num_frames_; i++) {
-      cv::Mat offmat = offmat_backup; // offset matrix
-      cv::Mat rmat = cv::Mat::eye(4,4,CV_32F);  // identity matrix set on rotation matrix
-      cv::Mat tmat = cv::Mat::eye(4,4,CV_32F);  // identity matrix set on translation matrix
-
-      for (size_t j = 0;  j < start_joint->channels_order().size(); j++) {
-        if (start_joint->channels_order()[j] == Joint::Channel::XPOSITION)
-          tmat.at<float>(0,3) = data[i][j];
-        else if (start_joint->channels_order()[j] == Joint::Channel::YPOSITION)
-          tmat.at<float>(1,3) = data[i][j];
-        else if (start_joint->channels_order()[j] == Joint::Channel::ZPOSITION)
-          tmat.at<float>(2,3) = data[i][j];
-        else if (start_joint->channels_order()[j] == Joint::Channel::XROTATION)
-          rmat = rotate(rmat, data[i][j], Axis::X);
-        else if (start_joint->channels_order()[j] == Joint::Channel::YROTATION)
-          rmat = rotate(rmat, data[i][j], Axis::Y);
-        else if (start_joint->channels_order()[j] == Joint::Channel::ZROTATION)
-          rmat = rotate(rmat, data[i][j], Axis::Z);
-      }
-
-      cv::Mat ltm = cv::Mat::eye(4,4,CV_32F); // local transformation matrix
-
-      if (start_joint->parent() != NULL)
-        ltm = start_joint->parent()->ltm(i) * offmat;
-      else
-        ltm = tmat * offmat;
-
-      cv::Vec3f wPos(ltm.at<float>(0,3),ltm.at<float>(1,3), ltm.at<float>(2,3));
-      start_joint->set_pos(wPos);
-      //LOG(TRACE) << "Joint world position: " << utils::vec3tos(ltm[3]);
-      LOGD("Joint world position: %f, %f, %f", wPos[0], wPos[1], wPos[2]);
-
-      ltm = ltm * rmat;
-
-      //LOG(TRACE) << "Local transformation matrix: \n" << utils::mat4tos(ltm);
-
-      start_joint->set_ltm(ltm, i);
-    } // num frame
-
-    for (auto& child : start_joint->children()) {
-      recalculate_joints_ltm(child);
-    }
-
-    LOGI("LEAVE");
-  } // recalculate_joints_ltm
-
-}  // end of bvh
+void Bvh::recalculate_joints_ltm(std::shared_ptr<Joint> start_joint)
+{
+       LOGI("ENTER");
+
+       if (start_joint == NULL) {
+               if (root_joint_ == NULL)
+                       return;
+               else
+                       start_joint = root_joint_;
+       }
+
+       LOGD("%s joint", start_joint->name().c_str());
+       //LOG(DEBUG) << "recalculate_joints_ltm: " << start_joint->name();
+       cv::Mat offmat_backup = cv::Mat::eye(4, 4, CV_32F);
+       offmat_backup.at<float>(0, 3) = start_joint->offset().x;
+       offmat_backup.at<float>(1, 3) = start_joint->offset().y;
+       offmat_backup.at<float>(2, 3) = start_joint->offset().z;
+
+       std::vector<std::vector<float> > data = start_joint->channel_data();
+
+       for (unsigned i = 0; i < num_frames_; i++) {
+               cv::Mat offmat = offmat_backup; // offset matrix
+               cv::Mat rmat = cv::Mat::eye(4, 4, CV_32F); // identity matrix set on rotation matrix
+               cv::Mat tmat = cv::Mat::eye(4, 4, CV_32F); // identity matrix set on translation matrix
+
+               for (size_t j = 0; j < start_joint->channels_order().size(); j++) {
+                       if (start_joint->channels_order()[j] == Joint::Channel::XPOSITION)
+                               tmat.at<float>(0, 3) = data[i][j];
+                       else if (start_joint->channels_order()[j] == Joint::Channel::YPOSITION)
+                               tmat.at<float>(1, 3) = data[i][j];
+                       else if (start_joint->channels_order()[j] == Joint::Channel::ZPOSITION)
+                               tmat.at<float>(2, 3) = data[i][j];
+                       else if (start_joint->channels_order()[j] == Joint::Channel::XROTATION)
+                               rmat = rotate(rmat, data[i][j], Axis::X);
+                       else if (start_joint->channels_order()[j] == Joint::Channel::YROTATION)
+                               rmat = rotate(rmat, data[i][j], Axis::Y);
+                       else if (start_joint->channels_order()[j] == Joint::Channel::ZROTATION)
+                               rmat = rotate(rmat, data[i][j], Axis::Z);
+               }
+
+               cv::Mat ltm = cv::Mat::eye(4, 4, CV_32F); // local transformation matrix
+
+               if (start_joint->parent() != NULL)
+                       ltm = start_joint->parent()->ltm(i) * offmat;
+               else
+                       ltm = tmat * offmat;
+
+               cv::Vec3f wPos(ltm.at<float>(0, 3), ltm.at<float>(1, 3), ltm.at<float>(2, 3));
+               start_joint->set_pos(wPos);
+               //LOG(TRACE) << "Joint world position: " << utils::vec3tos(ltm[3]);
+               LOGD("Joint world position: %f, %f, %f", wPos[0], wPos[1], wPos[2]);
+
+               ltm = ltm * rmat;
+
+               //LOG(TRACE) << "Local transformation matrix: \n" << utils::mat4tos(ltm);
+
+               start_joint->set_ltm(ltm, i);
+       } // num frame
+
+       for (auto &child : start_joint->children()) {
+               recalculate_joints_ltm(child);
+       }
+
+       LOGI("LEAVE");
+} // recalculate_joints_ltm
+
+} // end of bvh
 }
index 66c040b..317c529 100644 (file)
@@ -27,8 +27,8 @@
   */
 #define MULTI_HIERARCHY 0
 
-namespace {
-
+namespace
+{
 const std::string kChannels = "CHANNELS";
 const std::string kEnd = "End";
 const std::string kEndSite = "End Site";
@@ -53,345 +53,344 @@ namespace mediavision
 {
 namespace inference
 {
+//##############################################################################
+// Main parse function
+//##############################################################################
+int BvhParser::parse(const std::string &path, Bvh *bvh)
+{
+       LOGI("ENTER");
+       //LOG(INFO) << "Parsing file : " << path;
+
+       path_ = path;
+       bvh_ = bvh;
+
+       std::ifstream file;
+       file.open(path_);
+
+       if (file.is_open()) {
+               std::string token;
+
+#if MULTI_HIERARCHY == 1
+               while (file.good()) {
+#endif
+                       file >> token;
+                       if (token == kHierarchy) {
+                               int ret = parse_hierarchy(file);
+                               if (ret)
+                                       return ret;
+                       } else {
+                               //LOG(ERROR) << "Bad structure of .bvh file. " << kHierarchy
+                               //          << " should be on the top of the file";
+                               return -1;
+                       }
+#if MULTI_HIERARCHY == 1
+               }
+#endif
+       } else {
+               //LOG(ERROR) << "Cannot open file to parse : " << path_;
+               return -1;
+       }
+
+       LOGI("LEAVE");
+       return 0;
+}
+
+//##############################################################################
+// Function parsing hierarchy
+//##############################################################################
+int BvhParser::parse_hierarchy(std::ifstream &file)
+{
+       //LOG(INFO) << "Parsing hierarchy";
+
+       std::string token;
+       int ret;
+
+       if (file.good()) {
+               file >> token;
+
+               //##########################################################################
+               // Parsing joints
+               //##########################################################################
+               if (token == kRoot) {
+                       std::shared_ptr<Joint> rootJoint;
+                       ret = parse_joint(file, nullptr, rootJoint);
+
+                       if (ret)
+                               return ret;
+
+                       LOGI("There is %d data channels", bvh_->num_channels());
+
+                       bvh_->set_root_joint(rootJoint);
+               } else {
+                       LOGE("Bad structure of .bvh file.");
+
+                       return -1;
+               }
+       }
+
+       if (file.good()) {
+               file >> token;
+
+               //##########################################################################
+               // Parsing motion data
+               //##########################################################################
+               if (token == kMotion) {
+                       ret = parse_motion(file);
+
+                       if (ret)
+                               return ret;
+               } else {
+                       LOGE("Bad structure of .bvh file.");
+
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+//##############################################################################
+// Function parsing joint
+//##############################################################################
+int BvhParser::parse_joint(std::ifstream &file, std::shared_ptr<Joint> parent, std::shared_ptr<Joint> &parsed)
+{
+       //LOG(TRACE) << "Parsing joint";
+
+       std::shared_ptr<Joint> joint = std::make_shared<Joint>();
+       joint->set_parent(parent);
+
+       std::string name;
+       file >> name;
 
-  //##############################################################################
-  // Main parse function
-  //##############################################################################
-  int BvhParser::parse(const std::string& path, Bvh* bvh) {
-    LOGI("ENTER");
-    //LOG(INFO) << "Parsing file : " << path;
-
-    path_ = path;
-    bvh_ = bvh;
-
-    std::ifstream file;
-    file.open(path_);
-
-    if (file.is_open()) {
-      std::string token;
-
-  #if MULTI_HIERARCHY == 1
-      while (file.good()) {
-  #endif
-        file >> token;
-        if (token == kHierarchy) {
-          int ret = parse_hierarchy(file);
-          if (ret)
-            return ret;
-        } else {
-          //LOG(ERROR) << "Bad structure of .bvh file. " << kHierarchy
-          //          << " should be on the top of the file";
-          return -1;
-        }
-  #if MULTI_HIERARCHY == 1
-      }
-  #endif
-    } else {
-      //LOG(ERROR) << "Cannot open file to parse : " << path_;
-      return -1;
-    }
-
-    LOGI("LEAVE");
-    return 0;
-  }
-
-  //##############################################################################
-  // Function parsing hierarchy
-  //##############################################################################
-  int BvhParser::parse_hierarchy(std::ifstream& file) {
-    //LOG(INFO) << "Parsing hierarchy";
-
-    std::string token;
-    int ret;
-
-    if (file.good()) {
-      file >> token;
-
-      //##########################################################################
-      // Parsing joints
-      //##########################################################################
-      if (token == kRoot) {
-        std::shared_ptr <Joint> rootJoint;
-        ret = parse_joint(file, nullptr, rootJoint);
-
-        if (ret)
-          return ret;
-
-        LOGI("There is %d data channels", bvh_->num_channels());
-
-        bvh_->set_root_joint(rootJoint);
-      } else {
-        LOGE("Bad structure of .bvh file.");
-
-        return -1;
-      }
-    }
-
-    if (file.good()) {
-      file >> token;
-
-      //##########################################################################
-      // Parsing motion data
-      //##########################################################################
-      if (token == kMotion) {
-        ret = parse_motion(file);
-
-        if (ret)
-          return ret;
-      } else {
-        LOGE("Bad structure of .bvh file.");
-
-        return -1;
-      }
-    }
-    return 0;
-  }
-
-  //##############################################################################
-  // Function parsing joint
-  //##############################################################################
-  int BvhParser::parse_joint(std::ifstream& file,
-      std::shared_ptr <Joint> parent, std::shared_ptr <Joint>& parsed) {
-
-    //LOG(TRACE) << "Parsing joint";
-
-    std::shared_ptr<Joint> joint = std::make_shared<Joint>();
-    joint->set_parent(parent);
-
-    std::string name;
-    file >> name;
-
-    LOGD("Joint name %s", name.c_str());
-
-    joint->set_name(name);
-
-    std::string token;
-    std::vector <std::shared_ptr <Joint>> children;
-    int ret;
-
-    file >> token;  // Consuming '{'
-    file >> token;
-
-    //############################################################################
-    // Offset parsing
-    //############################################################################
-    if (token == kOffset) {
-      Joint::Offset offset;
-
-      try {
-        file >> offset.x >> offset.y >> offset.z;
-      } catch (const std::ios_base::failure& e) {
-        //LOG(ERROR) << "Failure while parsing offset";
-        return -1;
-      }
+       LOGD("Joint name %s", name.c_str());
 
-      joint->set_offset(offset);
+       joint->set_name(name);
 
-      //LOG(TRACE) << "Offset x: " << offset.x << ", y: " << offset.y << ", z: "
-      //          << offset.z;
+       std::string token;
+       std::vector<std::shared_ptr<Joint> > children;
+       int ret;
 
-    } else {
-      //LOG(ERROR) << "Bad structure of .bvh file. Expected " << kOffset << ", but "
-      //          << "found \"" << token << "\"";
+       file >> token; // Consuming '{'
+       file >> token;
 
-      return -1;
-    }
+       //############################################################################
+       // Offset parsing
+       //############################################################################
+       if (token == kOffset) {
+               Joint::Offset offset;
 
-    file >> token;
+               try {
+                       file >> offset.x >> offset.y >> offset.z;
+               } catch (const std::ios_base::failure &e) {
+                       //LOG(ERROR) << "Failure while parsing offset";
+                       return -1;
+               }
 
-    //############################################################################
-    // Channels parsing
-    //############################################################################
-    if (token == kChannels) {
-      ret = parse_channel_order(file, joint);
+               joint->set_offset(offset);
 
-      //LOG(TRACE) << "Joint has " << joint->num_channels() << " data channels";
+               //LOG(TRACE) << "Offset x: " << offset.x << ", y: " << offset.y << ", z: "
+               //          << offset.z;
 
-      if (ret)
-        return ret;
-    } else {
-      //LOG(ERROR) << "Bad structure of .bvh file. Expected " << kChannels
-      //           << ", but found \"" << token << "\"";
+       } else {
+               //LOG(ERROR) << "Bad structure of .bvh file. Expected " << kOffset << ", but "
+               //          << "found \"" << token << "\"";
 
-      return -1;
-    }
+               return -1;
+       }
 
-    file >> token;
+       file >> token;
 
-    bvh_->add_joint(joint);
+       //############################################################################
+       // Channels parsing
+       //############################################################################
+       if (token == kChannels) {
+               ret = parse_channel_order(file, joint);
 
-    //############################################################################
-    // Children parsing
-    //############################################################################
+               //LOG(TRACE) << "Joint has " << joint->num_channels() << " data channels";
 
-    while (file.good()) {
-      //##########################################################################
-      // Child joint parsing
-      //##########################################################################
-      if (token == kJoint) {
-        std::shared_ptr <Joint> child;
-        ret = parse_joint(file, joint, child);
+               if (ret)
+                       return ret;
+       } else {
+               //LOG(ERROR) << "Bad structure of .bvh file. Expected " << kChannels
+               //           << ", but found \"" << token << "\"";
 
-        if (ret)
-          return ret;
+               return -1;
+       }
 
-        children.push_back(child);
+       file >> token;
 
-      //##########################################################################
-      // Child joint parsing
-      //##########################################################################
-      } else if (token == kEnd) {
-        file >> token >> token;  // Consuming "Site {"
-
-        std::shared_ptr <Joint> tmp_joint = std::make_shared <Joint> ();
-
-        tmp_joint->set_parent(joint);
-        tmp_joint->set_name(kEndSite);
-        children.push_back(tmp_joint);
+       bvh_->add_joint(joint);
 
-        file >> token;
+       //############################################################################
+       // Children parsing
+       //############################################################################
 
-        //########################################################################
-        // End site offset parsing
-        //########################################################################
-        if (token == kOffset) {
-          Joint::Offset offset;
+       while (file.good()) {
+               //##########################################################################
+               // Child joint parsing
+               //##########################################################################
+               if (token == kJoint) {
+                       std::shared_ptr<Joint> child;
+                       ret = parse_joint(file, joint, child);
 
-          try {
-            file >> offset.x >> offset.y >> offset.z;
-          } catch (const std::ios_base::failure& e) {
-            //LOG(ERROR) << "Failure while parsing offset";
-            return -1;
-          }
-
-          tmp_joint->set_offset(offset);
-
-          // LOG(TRACE) << "Joint name : EndSite";
-          // LOG(TRACE) << "Offset x: " << offset.x << ", y: " << offset.y << ", z: "
-          //           << offset.z;
-
-          file >> token;  // Consuming "}"
-
-        } else {
-          //LOG(ERROR) << "Bad structure of .bvh file. Expected " << kOffset
-          //           << ", but found \"" << token << "\"";
-
-          return -1;
-        }
-
-        bvh_->add_joint(tmp_joint);
-      //##########################################################################
-      // End joint parsing
-      //##########################################################################
-      } else if (token == "}") {
-        joint->set_children(children);
-        parsed = joint;
-        return 0;
-      }
-
-      file >> token;
-    }
-
-    //LOG(ERROR) << "Cannot parse joint, unexpected end of file. Last token : "
-    //           << token;
-    return -1;
-  }
-
-  //##############################################################################
-  // Motion data parse function
-  //##############################################################################
-  int BvhParser::parse_motion(std::ifstream& file) {
-
-    LOGI("ENTER");
-
-    std::string token;
-    file >> token;
-
-    int frames_num;
-
-    if (token == kFrames) {
-      file >> frames_num;
-      bvh_->set_num_frames(frames_num);
-      LOGD("Num of frames: %d", frames_num);
-    } else {
-      LOGE("Bad structure of .bvh file");
-
-      return -1;
-    }
-
-    file >> token;
-
-    double frame_time;
-
-    if (token == kFrame) {
-      file >> token;  // Consuming 'Time:'
-      file >> frame_time;
-      bvh_->set_frame_time(frame_time);
-      LOGD("Frame time: %f",frame_time);
-
-      float number;
-      for (int i = 0; i < frames_num; i++) {
-        for (auto joint : bvh_->joints()) {
-          std::vector <float> data;
-          for (unsigned j = 0; j < joint->num_channels(); j++) {
-            file >> number;
-            data.push_back(number);
-          }
-          LOGD("%s joint", joint->name().c_str());
-          joint->add_frame_motion_data(data);
-        }
-      }
-    } else {
-      LOGE("Bad structure of .bvh file.");
-      return -1;
-    }
-
-    LOGI("LEAVE");
-
-    return 0;
-  }
-
-  //##############################################################################
-  // Channels order parse function
-  //##############################################################################
-  int BvhParser::parse_channel_order(std::ifstream& file,
-      std::shared_ptr <Joint> joint) {
-
-    LOGI("ENTER");
-
-    int num;
-    file >> num;
-    LOGD("Number of channels: %d",num);
-
-    std::vector <Joint::Channel> channels;
-    std::string token;
-
-    for (int i = 0; i < num; i++) {
-      file >> token;
-      if (token == kXpos)
-        channels.push_back(Joint::Channel::XPOSITION);
-      else if (token == kYpos)
-        channels.push_back(Joint::Channel::YPOSITION);
-      else if (token == kZpos)
-        channels.push_back(Joint::Channel::ZPOSITION);
-      else if (token == kXrot)
-        channels.push_back(Joint::Channel::XROTATION);
-      else if (token == kYrot)
-        channels.push_back(Joint::Channel::YROTATION);
-      else if (token == kZrot)
-        channels.push_back(Joint::Channel::ZROTATION);
-      else {
-        //LOG(ERROR) << "Not valid channel!";
-        return -1;
-      }
-    }
-
-    joint->set_channels_order(channels);
-
-    LOGI("LEAVE");
-
-    return 0;
-  }
+                       if (ret)
+                               return ret;
+
+                       children.push_back(child);
+
+                       //##########################################################################
+                       // Child joint parsing
+                       //##########################################################################
+               } else if (token == kEnd) {
+                       file >> token >> token; // Consuming "Site {"
+
+                       std::shared_ptr<Joint> tmp_joint = std::make_shared<Joint>();
+
+                       tmp_joint->set_parent(joint);
+                       tmp_joint->set_name(kEndSite);
+                       children.push_back(tmp_joint);
+
+                       file >> token;
+
+                       //########################################################################
+                       // End site offset parsing
+                       //########################################################################
+                       if (token == kOffset) {
+                               Joint::Offset offset;
+
+                               try {
+                                       file >> offset.x >> offset.y >> offset.z;
+                               } catch (const std::ios_base::failure &e) {
+                                       //LOG(ERROR) << "Failure while parsing offset";
+                                       return -1;
+                               }
+
+                               tmp_joint->set_offset(offset);
+
+                               // LOG(TRACE) << "Joint name : EndSite";
+                               // LOG(TRACE) << "Offset x: " << offset.x << ", y: " << offset.y << ", z: "
+                               //           << offset.z;
+
+                               file >> token; // Consuming "}"
+
+                       } else {
+                               //LOG(ERROR) << "Bad structure of .bvh file. Expected " << kOffset
+                               //           << ", but found \"" << token << "\"";
+
+                               return -1;
+                       }
+
+                       bvh_->add_joint(tmp_joint);
+                       //##########################################################################
+                       // End joint parsing
+                       //##########################################################################
+               } else if (token == "}") {
+                       joint->set_children(children);
+                       parsed = joint;
+                       return 0;
+               }
+
+               file >> token;
+       }
+
+       //LOG(ERROR) << "Cannot parse joint, unexpected end of file. Last token : "
+       //           << token;
+       return -1;
+}
+
+//##############################################################################
+// Motion data parse function
+//##############################################################################
+int BvhParser::parse_motion(std::ifstream &file)
+{
+       LOGI("ENTER");
+
+       std::string token;
+       file >> token;
+
+       int frames_num;
+
+       if (token == kFrames) {
+               file >> frames_num;
+               bvh_->set_num_frames(frames_num);
+               LOGD("Num of frames: %d", frames_num);
+       } else {
+               LOGE("Bad structure of .bvh file");
+
+               return -1;
+       }
+
+       file >> token;
+
+       double frame_time;
+
+       if (token == kFrame) {
+               file >> token; // Consuming 'Time:'
+               file >> frame_time;
+               bvh_->set_frame_time(frame_time);
+               LOGD("Frame time: %f", frame_time);
+
+               float number;
+               for (int i = 0; i < frames_num; i++) {
+                       for (auto joint : bvh_->joints()) {
+                               std::vector<float> data;
+                               for (unsigned j = 0; j < joint->num_channels(); j++) {
+                                       file >> number;
+                                       data.push_back(number);
+                               }
+                               LOGD("%s joint", joint->name().c_str());
+                               joint->add_frame_motion_data(data);
+                       }
+               }
+       } else {
+               LOGE("Bad structure of .bvh file.");
+               return -1;
+       }
+
+       LOGI("LEAVE");
+
+       return 0;
+}
+
+//##############################################################################
+// Channels order parse function
+//##############################################################################
+int BvhParser::parse_channel_order(std::ifstream &file, std::shared_ptr<Joint> joint)
+{
+       LOGI("ENTER");
+
+       int num;
+       file >> num;
+       LOGD("Number of channels: %d", num);
+
+       std::vector<Joint::Channel> channels;
+       std::string token;
+
+       for (int i = 0; i < num; i++) {
+               file >> token;
+               if (token == kXpos)
+                       channels.push_back(Joint::Channel::XPOSITION);
+               else if (token == kYpos)
+                       channels.push_back(Joint::Channel::YPOSITION);
+               else if (token == kZpos)
+                       channels.push_back(Joint::Channel::ZPOSITION);
+               else if (token == kXrot)
+                       channels.push_back(Joint::Channel::XROTATION);
+               else if (token == kYrot)
+                       channels.push_back(Joint::Channel::YROTATION);
+               else if (token == kZrot)
+                       channels.push_back(Joint::Channel::ZROTATION);
+               else {
+                       //LOG(ERROR) << "Not valid channel!";
+                       return -1;
+               }
+       }
+
+       joint->set_channels_order(channels);
+
+       LOGI("LEAVE");
+
+       return 0;
+}
 
 }
 } // namespace
index ba11a91..181c46e 100644 (file)
 #include "BvhUtils.h"
 #include <cmath>
 
-#define DegreeToRadian(degree) ((degree) * (M_PI/180.f))
+#define DegreeToRadian(degree) ((degree) * (M_PI / 180.f))
 
 namespace mediavision
 {
 namespace inference
 {
-  cv::Mat rotation_matrix(float angle, Axis axis) {
-    cv::Mat matrix = cv::Mat::eye(4,4,CV_32F);
+cv::Mat rotation_matrix(float angle, Axis axis)
+{
+       cv::Mat matrix = cv::Mat::eye(4, 4, CV_32F);
 
-    float rangle = DegreeToRadian(angle);
+       float rangle = DegreeToRadian(angle);
 
-    // We want to unique situation when in matrix are -0.0f, so we perform
-    // additional checking
-    float sin_a = sin(rangle);
-    if (fabs(sin_a) < std::numeric_limits<float>::epsilon())
-      sin_a = 0.0f;
-    float cos_a = cos(rangle);
-    if (fabs(cos_a) < std::numeric_limits<float>::epsilon())
-      cos_a = 0.0f;
-    float msin_a = fabs(sin_a) < std::numeric_limits<float>::epsilon() ?
-        0.0f : (-1.0f) * sin_a;
+       // We want to unique situation when in matrix are -0.0f, so we perform
+       // additional checking
+       float sin_a = sin(rangle);
+       if (fabs(sin_a) < std::numeric_limits<float>::epsilon())
+               sin_a = 0.0f;
+       float cos_a = cos(rangle);
+       if (fabs(cos_a) < std::numeric_limits<float>::epsilon())
+               cos_a = 0.0f;
+       float msin_a = fabs(sin_a) < std::numeric_limits<float>::epsilon() ? 0.0f : (-1.0f) * sin_a;
 
-    if (axis == Axis::X) {
-        matrix.at<float>(1,1) = cos_a;
-        matrix.at<float>(2,1) = sin_a;
-        matrix.at<float>(1,2) = msin_a;
-        matrix.at<float>(2,2) = cos_a;
-    } else if (axis == Axis::Y) {
-      matrix.at<float>(0,0) = cos_a;
-      matrix.at<float>(2,0) = msin_a;
-      matrix.at<float>(0,2) = sin_a;
-      matrix.at<float>(2,2) = cos_a;
-    } else {
-      matrix.at<float>(0,0) = cos_a;
-      matrix.at<float>(1,0) = sin_a;
-      matrix.at<float>(0,1) = msin_a;
-      matrix.at<float>(1,1) = cos_a;
-    }
+       if (axis == Axis::X) {
+               matrix.at<float>(1, 1) = cos_a;
+               matrix.at<float>(2, 1) = sin_a;
+               matrix.at<float>(1, 2) = msin_a;
+               matrix.at<float>(2, 2) = cos_a;
+       } else if (axis == Axis::Y) {
+               matrix.at<float>(0, 0) = cos_a;
+               matrix.at<float>(2, 0) = msin_a;
+               matrix.at<float>(0, 2) = sin_a;
+               matrix.at<float>(2, 2) = cos_a;
+       } else {
+               matrix.at<float>(0, 0) = cos_a;
+               matrix.at<float>(1, 0) = sin_a;
+               matrix.at<float>(0, 1) = msin_a;
+               matrix.at<float>(1, 1) = cos_a;
+       }
 
-    return matrix;
-  }
+       return matrix;
+}
 
-  /** Rotates matrix
+/** Rotates matrix
    *  @param  matrix  The matrix to be rotated
    *  @param  angle   The rotation angle
    *  @param  axis    The rotation axis
    *  @return  The rotation matrix
    */
-  cv::Mat rotate(cv::Mat matrix, float angle, Axis axis) {
-    return matrix * rotation_matrix(angle, axis);
-  }
+cv::Mat rotate(cv::Mat matrix, float angle, Axis axis)
+{
+       return matrix * rotation_matrix(angle, axis);
+}
 }
 }
\ No newline at end of file
old mode 100755 (executable)
new mode 100644 (file)
index 6a5a6ca..150c3d5
@@ -34,7 +34,8 @@
 #define MV_INFERENCE_CONFIDENCE_THRESHOLD_MAX 1.0
 #define MV_INFERENCE_CONFIDENCE_THRESHOLD_MIN 0.0
 
-typedef enum {
+typedef enum
+{
        InputAttrNoType = 0,
        InputAttrFloat32 = 1,
        InputAttrInt32 = 2,
@@ -50,1649 +51,1615 @@ namespace mediavision
 {
 namespace inference
 {
-       InferenceConfig::InferenceConfig() :
-                       mConfigFilePath(),
-                       mWeightFilePath(),
-                       mUserFilePath(),
-                       mDataType(MV_INFERENCE_DATA_FLOAT32),
-                       mTargetTypes(MV_INFERENCE_TARGET_DEVICE_CPU),
-                       mConfidenceThresHold(),
-                       mMeanValue(),
-                       mStdValue(),
-                       mMaxOutputNumbers(1)
-       {
-               mTensorInfo.width = -1;
-               mTensorInfo.height = -1;
-               mTensorInfo.dim = -1;
-               mTensorInfo.ch = -1;
-       }
-
-       Inference::Inference() :
-                       mCanRun(),
-                       mConfig(),
-                       mBackendCapacity(),
-                       mSupportedInferenceBackend(),
-                       mInputSize(cv::Size()),
-                       mSourceSize(cv::Size()),
-                       engine_config(),
-                       mBackend(),
-                       mMetadata(),
-                       mPreProc()
-       {
-               LOGI("ENTER");
-
-               // Mediavision can support several inference engines via ML Single API
-               // "mlapi" means that the inference backend is used via ML Single API.
-               mSupportedInferenceBackend.insert(std::make_pair(
-                               MV_INFERENCE_BACKEND_OPENCV, std::make_pair("opencv", false)));
-               mSupportedInferenceBackend.insert(std::make_pair(
-                               MV_INFERENCE_BACKEND_TFLITE, std::make_pair("tflite", false)));
-               mSupportedInferenceBackend.insert(std::make_pair(
-                               MV_INFERENCE_BACKEND_ARMNN, std::make_pair("armnn", false)));
-               mSupportedInferenceBackend.insert(std::make_pair(
-                               MV_INFERENCE_BACKEND_MLAPI, std::make_pair("mlapi", false)));
-               mSupportedInferenceBackend.insert(std::make_pair(
-                               MV_INFERENCE_BACKEND_ONE, std::make_pair("mlapi", false)));
-               mSupportedInferenceBackend.insert(std::make_pair(
-                               MV_INFERENCE_BACKEND_NNTRAINER, std::make_pair("mlapi", false)));
-               mSupportedInferenceBackend.insert(std::make_pair(
-                               MV_INFERENCE_BACKEND_SNPE, std::make_pair("mlapi", false)));
-
-               CheckSupportedInferenceBackend();
-
-               for (auto& backend : mSupportedInferenceBackend) {
-                       LOGI("%s: %s", backend.second.first.c_str(),
-                                backend.second.second ? "TRUE" : "FALSE");
-               }
+InferenceConfig::InferenceConfig()
+               : mConfigFilePath()
+               , mWeightFilePath()
+               , mUserFilePath()
+               , mDataType(MV_INFERENCE_DATA_FLOAT32)
+               , mTargetTypes(MV_INFERENCE_TARGET_DEVICE_CPU)
+               , mConfidenceThresHold()
+               , mMeanValue()
+               , mStdValue()
+               , mMaxOutputNumbers(1)
+{
+       mTensorInfo.width = -1;
+       mTensorInfo.height = -1;
+       mTensorInfo.dim = -1;
+       mTensorInfo.ch = -1;
+}
+
+Inference::Inference()
+               : mCanRun()
+               , mConfig()
+               , mBackendCapacity()
+               , mSupportedInferenceBackend()
+               , mInputSize(cv::Size())
+               , mSourceSize(cv::Size())
+               , engine_config()
+               , mBackend()
+               , mMetadata()
+               , mPreProc()
+{
+       LOGI("ENTER");
+
+       // Mediavision can support several inference engines via ML Single API
+       // "mlapi" means that the inference backend is used via ML Single API.
+       mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_OPENCV, std::make_pair("opencv", false)));
+       mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_TFLITE, std::make_pair("tflite", false)));
+       mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_ARMNN, std::make_pair("armnn", false)));
+       mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_MLAPI, std::make_pair("mlapi", false)));
+       mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_ONE, std::make_pair("mlapi", false)));
+       mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_NNTRAINER, std::make_pair("mlapi", false)));
+       mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_SNPE, std::make_pair("mlapi", false)));
+
+       CheckSupportedInferenceBackend();
+
+       for (auto &backend : mSupportedInferenceBackend) {
+               LOGI("%s: %s", backend.second.first.c_str(), backend.second.second ? "TRUE" : "FALSE");
+       }
 
-               mModelFormats.insert(std::make_pair<std::string, int>(
-                               "caffemodel", INFERENCE_MODEL_CAFFE));
-               mModelFormats.insert(
-                               std::make_pair<std::string, int>("pb", INFERENCE_MODEL_TF));
-               mModelFormats.insert(std::make_pair<std::string, int>(
-                               "tflite", INFERENCE_MODEL_TFLITE));
-               mModelFormats.insert(
-                               std::make_pair<std::string, int>("t7", INFERENCE_MODEL_TORCH));
-               mModelFormats.insert(std::make_pair<std::string, int>(
-                               "weights", INFERENCE_MODEL_DARKNET));
-               mModelFormats.insert(
-                               std::make_pair<std::string, int>("bin", INFERENCE_MODEL_DLDT));
-               mModelFormats.insert(
-                               std::make_pair<std::string, int>("onnx", INFERENCE_MODEL_ONNX));
-               mModelFormats.insert(std::make_pair<std::string, int>(
-                               "nb", INFERENCE_MODEL_VIVANTE));
-               mModelFormats.insert(std::make_pair<std::string, int>(
-                               "ini", INFERENCE_MODEL_NNTRAINER));
-               mModelFormats.insert(std::make_pair<std::string, int>(
-                               "dlc", INFERENCE_MODEL_SNPE));
-
-               LOGI("LEAVE");
-       }
-
-       Inference::~Inference()
-       {
-               CleanupTensorBuffers();
-
-               if (!mInputLayerProperty.layers.empty()) {
-                       mInputLayerProperty.layers.clear();
-                       std::map<std::string, inference_engine_tensor_info>().swap(
-                                       mInputLayerProperty.layers);
-               }
-               if (!mOutputLayerProperty.layers.empty()) {
-                       mOutputLayerProperty.layers.clear();
-                       std::map<std::string, inference_engine_tensor_info>().swap(
-                                       mOutputLayerProperty.layers);
-               }
+       mModelFormats.insert(std::make_pair<std::string, int>("caffemodel", INFERENCE_MODEL_CAFFE));
+       mModelFormats.insert(std::make_pair<std::string, int>("pb", INFERENCE_MODEL_TF));
+       mModelFormats.insert(std::make_pair<std::string, int>("tflite", INFERENCE_MODEL_TFLITE));
+       mModelFormats.insert(std::make_pair<std::string, int>("t7", INFERENCE_MODEL_TORCH));
+       mModelFormats.insert(std::make_pair<std::string, int>("weights", INFERENCE_MODEL_DARKNET));
+       mModelFormats.insert(std::make_pair<std::string, int>("bin", INFERENCE_MODEL_DLDT));
+       mModelFormats.insert(std::make_pair<std::string, int>("onnx", INFERENCE_MODEL_ONNX));
+       mModelFormats.insert(std::make_pair<std::string, int>("nb", INFERENCE_MODEL_VIVANTE));
+       mModelFormats.insert(std::make_pair<std::string, int>("ini", INFERENCE_MODEL_NNTRAINER));
+       mModelFormats.insert(std::make_pair<std::string, int>("dlc", INFERENCE_MODEL_SNPE));
+
+       LOGI("LEAVE");
+}
+
+Inference::~Inference()
+{
+       CleanupTensorBuffers();
 
-               mModelFormats.clear();
+       if (!mInputLayerProperty.layers.empty()) {
+               mInputLayerProperty.layers.clear();
+               std::map<std::string, inference_engine_tensor_info>().swap(mInputLayerProperty.layers);
+       }
+       if (!mOutputLayerProperty.layers.empty()) {
+               mOutputLayerProperty.layers.clear();
+               std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayerProperty.layers);
+       }
 
-               // Release backend engine.
-               if (mBackend) {
-                       mBackend->UnbindBackend();
-                       delete mBackend;
-               }
+       mModelFormats.clear();
 
-               LOGI("Released backend engine.");
+       // Release backend engine.
+       if (mBackend) {
+               mBackend->UnbindBackend();
+               delete mBackend;
        }
 
-       void Inference::CheckSupportedInferenceBackend()
-       {
-               LOGI("ENTER");
+       LOGI("Released backend engine.");
+}
 
-               InferenceInI ini;
-               ini.LoadInI();
+void Inference::CheckSupportedInferenceBackend()
+{
+       LOGI("ENTER");
 
-               std::vector<int> supportedBackend = ini.GetSupportedInferenceEngines();
-               for (auto& backend : supportedBackend) {
-                       LOGI("engine: %d", backend);
+       InferenceInI ini;
+       ini.LoadInI();
 
-                       mSupportedInferenceBackend[backend].second = true;
-               }
+       std::vector<int> supportedBackend = ini.GetSupportedInferenceEngines();
+       for (auto &backend : supportedBackend) {
+               LOGI("engine: %d", backend);
 
-               LOGI("LEAVE");
+               mSupportedInferenceBackend[backend].second = true;
        }
 
-       int Inference::ConvertEngineErrorToVisionError(int error)
-       {
-               int ret = MEDIA_VISION_ERROR_NONE;
+       LOGI("LEAVE");
+}
 
-               switch (error) {
-               case INFERENCE_ENGINE_ERROR_NONE:
-                       ret = MEDIA_VISION_ERROR_NONE;
-                       break;
-               case INFERENCE_ENGINE_ERROR_NOT_SUPPORTED:
-                       ret = MEDIA_VISION_ERROR_NOT_SUPPORTED;
-                       break;
-               case INFERENCE_ENGINE_ERROR_MSG_TOO_LONG:
-                       ret = MEDIA_VISION_ERROR_MSG_TOO_LONG;
-                       break;
-               case INFERENCE_ENGINE_ERROR_NO_DATA:
-                       ret = MEDIA_VISION_ERROR_NO_DATA;
-                       break;
-               case INFERENCE_ENGINE_ERROR_KEY_NOT_AVAILABLE:
-                       ret = MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
-                       break;
-               case INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY:
-                       ret = MEDIA_VISION_ERROR_OUT_OF_MEMORY;
-                       break;
-               case INFERENCE_ENGINE_ERROR_INVALID_PARAMETER:
-                       ret = MEDIA_VISION_ERROR_INVALID_PARAMETER;
-                       break;
-               case INFERENCE_ENGINE_ERROR_INVALID_OPERATION:
-                       ret = MEDIA_VISION_ERROR_INVALID_OPERATION;
-                       break;
-               case INFERENCE_ENGINE_ERROR_PERMISSION_DENIED:
-                       ret = MEDIA_VISION_ERROR_PERMISSION_DENIED;
-                       break;
-               case INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT:
-                       ret = MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
-                       break;
-               case INFERENCE_ENGINE_ERROR_INTERNAL:
-                       ret = MEDIA_VISION_ERROR_INTERNAL;
-                       break;
-               case INFERENCE_ENGINE_ERROR_INVALID_DATA:
-                       ret = MEDIA_VISION_ERROR_INVALID_DATA;
-                       break;
-               case INFERENCE_ENGINE_ERROR_INVALID_PATH:
-                       ret = MEDIA_VISION_ERROR_INVALID_PATH;
-                       break;
-               default:
-                       LOGE("Unknown inference engine error type");
-               }
+int Inference::ConvertEngineErrorToVisionError(int error)
+{
+       int ret = MEDIA_VISION_ERROR_NONE;
 
-               return ret;
+       switch (error) {
+       case INFERENCE_ENGINE_ERROR_NONE:
+               ret = MEDIA_VISION_ERROR_NONE;
+               break;
+       case INFERENCE_ENGINE_ERROR_NOT_SUPPORTED:
+               ret = MEDIA_VISION_ERROR_NOT_SUPPORTED;
+               break;
+       case INFERENCE_ENGINE_ERROR_MSG_TOO_LONG:
+               ret = MEDIA_VISION_ERROR_MSG_TOO_LONG;
+               break;
+       case INFERENCE_ENGINE_ERROR_NO_DATA:
+               ret = MEDIA_VISION_ERROR_NO_DATA;
+               break;
+       case INFERENCE_ENGINE_ERROR_KEY_NOT_AVAILABLE:
+               ret = MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+               break;
+       case INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY:
+               ret = MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+               break;
+       case INFERENCE_ENGINE_ERROR_INVALID_PARAMETER:
+               ret = MEDIA_VISION_ERROR_INVALID_PARAMETER;
+               break;
+       case INFERENCE_ENGINE_ERROR_INVALID_OPERATION:
+               ret = MEDIA_VISION_ERROR_INVALID_OPERATION;
+               break;
+       case INFERENCE_ENGINE_ERROR_PERMISSION_DENIED:
+               ret = MEDIA_VISION_ERROR_PERMISSION_DENIED;
+               break;
+       case INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT:
+               ret = MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+               break;
+       case INFERENCE_ENGINE_ERROR_INTERNAL:
+               ret = MEDIA_VISION_ERROR_INTERNAL;
+               break;
+       case INFERENCE_ENGINE_ERROR_INVALID_DATA:
+               ret = MEDIA_VISION_ERROR_INVALID_DATA;
+               break;
+       case INFERENCE_ENGINE_ERROR_INVALID_PATH:
+               ret = MEDIA_VISION_ERROR_INVALID_PATH;
+               break;
+       default:
+               LOGE("Unknown inference engine error type");
        }
 
-       int Inference::ConvertTargetTypes(int given_types)
-       {
-               int target_types = INFERENCE_TARGET_NONE;
-
-               if (given_types & MV_INFERENCE_TARGET_DEVICE_CPU)
-                       target_types |= INFERENCE_TARGET_CPU;
-               if (given_types & MV_INFERENCE_TARGET_DEVICE_GPU)
-                       target_types |= INFERENCE_TARGET_GPU;
-               if (given_types & MV_INFERENCE_TARGET_DEVICE_CUSTOM)
-                       target_types |= INFERENCE_TARGET_CUSTOM;
+       return ret;
+}
 
-               return target_types;
-       }
+int Inference::ConvertTargetTypes(int given_types)
+{
+       int target_types = INFERENCE_TARGET_NONE;
 
-       int Inference::ConvertToCv(int given_type)
-       {
-               int type = 0;
-               const int ch = mConfig.mTensorInfo.ch;
+       if (given_types & MV_INFERENCE_TARGET_DEVICE_CPU)
+               target_types |= INFERENCE_TARGET_CPU;
+       if (given_types & MV_INFERENCE_TARGET_DEVICE_GPU)
+               target_types |= INFERENCE_TARGET_GPU;
+       if (given_types & MV_INFERENCE_TARGET_DEVICE_CUSTOM)
+               target_types |= INFERENCE_TARGET_CUSTOM;
 
-               switch (given_type) {
-               case INFERENCE_TENSOR_DATA_TYPE_UINT8:
-                       LOGI("Type is %d ch with UINT8", ch);
-                       type = ch == 1 ? CV_8UC1 : CV_8UC3;
-                       break;
-               case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
-                       LOGI("Type is %d ch with FLOAT32", ch);
-                       type = ch == 1 ? CV_32FC1 : CV_32FC3;
-                       break;
-               default:
-                       LOGI("unknown data type so FLOAT32 data type will be used in default");
-                       type = ch == 1 ? CV_32FC1 : CV_32FC3;
-                       break;
-               }
+       return target_types;
+}
 
-               return type;
+int Inference::ConvertToCv(int given_type)
+{
+       int type = 0;
+       const int ch = mConfig.mTensorInfo.ch;
+
+       switch (given_type) {
+       case INFERENCE_TENSOR_DATA_TYPE_UINT8:
+               LOGI("Type is %d ch with UINT8", ch);
+               type = ch == 1 ? CV_8UC1 : CV_8UC3;
+               break;
+       case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
+               LOGI("Type is %d ch with FLOAT32", ch);
+               type = ch == 1 ? CV_32FC1 : CV_32FC3;
+               break;
+       default:
+               LOGI("unknown data type so FLOAT32 data type will be used in default");
+               type = ch == 1 ? CV_32FC1 : CV_32FC3;
+               break;
        }
 
-       inference_tensor_data_type_e Inference::ConvertToIE(int given_type)
-       {
-               inference_tensor_data_type_e type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+       return type;
+}
 
-               switch (given_type) {
-               case MV_INFERENCE_DATA_FLOAT32:
-                       type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
-                       break;
-               case MV_INFERENCE_DATA_UINT8:
-                       type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
-                       break;
-               default:
-                       LOGI("unknown data type so FLOAT32 data type will be used in default");
-                       break;
-               }
-
-               return type;
+inference_tensor_data_type_e Inference::ConvertToIE(int given_type)
+{
+       inference_tensor_data_type_e type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+
+       switch (given_type) {
+       case MV_INFERENCE_DATA_FLOAT32:
+               type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+               break;
+       case MV_INFERENCE_DATA_UINT8:
+               type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+               break;
+       default:
+               LOGI("unknown data type so FLOAT32 data type will be used in default");
+               break;
        }
 
-       int Inference::SetUserFile(std::string filename)
-       {
-               std::ifstream fp(filename.c_str());
-               if (!fp.is_open()) {
-                       return MEDIA_VISION_ERROR_INVALID_PATH;
-               }
-
-               std::string userListName;
-               while (!fp.eof()) {
-                       std::getline(fp, userListName);
-                       if (userListName.length())
-                               mUserListName.push_back(userListName);
-               }
-
-               fp.close();
+       return type;
+}
 
-               return MEDIA_VISION_ERROR_NONE;
+int Inference::SetUserFile(std::string filename)
+{
+       std::ifstream fp(filename.c_str());
+       if (!fp.is_open()) {
+               return MEDIA_VISION_ERROR_INVALID_PATH;
        }
 
-       void Inference::ConfigureModelFiles(const std::string modelConfigFilePath,
-                                                                               const std::string modelWeightFilePath,
-                                                                               const std::string modelUserFilePath)
-       {
-               LOGI("ENTER");
+       std::string userListName;
+       while (!fp.eof()) {
+               std::getline(fp, userListName);
+               if (userListName.length())
+                       mUserListName.push_back(userListName);
+       }
 
-               mConfig.mConfigFilePath = modelConfigFilePath;
-               mConfig.mWeightFilePath = modelWeightFilePath;
-               mConfig.mUserFilePath = modelUserFilePath;
+       fp.close();
 
-               LOGI("LEAVE");
-       }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-       void Inference::ConfigureInputInfo(int width, int height, int dim, int ch,
-                                                                          double stdValue, double meanValue,
-                                                                          int dataType,
-                                                                          const std::vector<std::string> names)
-       {
-               LOGI("ENTER");
+void Inference::ConfigureModelFiles(const std::string modelConfigFilePath, const std::string modelWeightFilePath,
+                                                                       const std::string modelUserFilePath)
+{
+       LOGI("ENTER");
 
-               // FIXME: mConfig should be removed
-               mConfig.mTensorInfo = { width, height, dim, ch };
-               mConfig.mStdValue = stdValue;
-               mConfig.mMeanValue = meanValue;
-               mConfig.mDataType = static_cast<mv_inference_data_type_e>(dataType);
-               mConfig.mInputLayerNames = names;
+       mConfig.mConfigFilePath = modelConfigFilePath;
+       mConfig.mWeightFilePath = modelWeightFilePath;
+       mConfig.mUserFilePath = modelUserFilePath;
 
-               if (mMetadata.GetInputMeta().IsParsed()) {
-                       LOGI("use input meta");
-                       auto& layerInfo = mMetadata.GetInputMeta().GetLayer().begin()->second;
-                       if (layerInfo.shapeType == INFERENCE_TENSOR_SHAPE_NCHW) { // NCHW
-                               mConfig.mTensorInfo.ch = layerInfo.dims[1];
-                               mConfig.mTensorInfo.dim = layerInfo.dims[0];
-                               mConfig.mTensorInfo.width = layerInfo.dims[3];
-                               mConfig.mTensorInfo.height = layerInfo.dims[2];
-                       } else if (layerInfo.shapeType == INFERENCE_TENSOR_SHAPE_NHWC) {// NHWC
-                               mConfig.mTensorInfo.ch = layerInfo.dims[3];
-                               mConfig.mTensorInfo.dim = layerInfo.dims[0];
-                               mConfig.mTensorInfo.width = layerInfo.dims[2];
-                               mConfig.mTensorInfo.height = layerInfo.dims[1];
-                       } else {
-                               LOGE("Invalid shape type[%d]", layerInfo.shapeType);
-                       }
+       LOGI("LEAVE");
+}
 
-                       if (!mMetadata.GetInputMeta().GetOption().empty()) {
-                               auto& option = mMetadata.GetInputMeta().GetOption().begin()->second;
-                               if (option.normalization.use) {
-                                       mConfig.mMeanValue = option.normalization.mean[0];
-                                       mConfig.mStdValue = option.normalization.std[0];
-                               }
-                       }
+void Inference::ConfigureInputInfo(int width, int height, int dim, int ch, double stdValue, double meanValue,
+                                                                  int dataType, const std::vector<std::string> names)
+{
+       LOGI("ENTER");
+
+       // FIXME: mConfig should be removed
+       mConfig.mTensorInfo = { width, height, dim, ch };
+       mConfig.mStdValue = stdValue;
+       mConfig.mMeanValue = meanValue;
+       mConfig.mDataType = static_cast<mv_inference_data_type_e>(dataType);
+       mConfig.mInputLayerNames = names;
+
+       if (mMetadata.GetInputMeta().IsParsed()) {
+               LOGI("use input meta");
+               auto &layerInfo = mMetadata.GetInputMeta().GetLayer().begin()->second;
+               if (layerInfo.shapeType == INFERENCE_TENSOR_SHAPE_NCHW) { // NCHW
+                       mConfig.mTensorInfo.ch = layerInfo.dims[1];
+                       mConfig.mTensorInfo.dim = layerInfo.dims[0];
+                       mConfig.mTensorInfo.width = layerInfo.dims[3];
+                       mConfig.mTensorInfo.height = layerInfo.dims[2];
+               } else if (layerInfo.shapeType == INFERENCE_TENSOR_SHAPE_NHWC) { // NHWC
+                       mConfig.mTensorInfo.ch = layerInfo.dims[3];
+                       mConfig.mTensorInfo.dim = layerInfo.dims[0];
+                       mConfig.mTensorInfo.width = layerInfo.dims[2];
+                       mConfig.mTensorInfo.height = layerInfo.dims[1];
+               } else {
+                       LOGE("Invalid shape type[%d]", layerInfo.shapeType);
+               }
 
-                       mConfig.mDataType = layerInfo.dataType;
-                       mConfig.mInputLayerNames.clear();
-                       for (auto& layer : mMetadata.GetInputMeta().GetLayer()) {
-                               mConfig.mInputLayerNames.push_back(layer.first);
+               if (!mMetadata.GetInputMeta().GetOption().empty()) {
+                       auto &option = mMetadata.GetInputMeta().GetOption().begin()->second;
+                       if (option.normalization.use) {
+                               mConfig.mMeanValue = option.normalization.mean[0];
+                               mConfig.mStdValue = option.normalization.std[0];
                        }
                }
 
-               mInputSize = cv::Size(mConfig.mTensorInfo.width, mConfig.mTensorInfo.height);
-
-               inference_engine_layer_property property;
-               // In case of that a inference plugin deosn't support to get properties,
-               // the tensor info given by a user will be used.
-               // If the plugin supports that, the given info will be ignored.
+               mConfig.mDataType = layerInfo.dataType;
+               mConfig.mInputLayerNames.clear();
+               for (auto &layer : mMetadata.GetInputMeta().GetLayer()) {
+                       mConfig.mInputLayerNames.push_back(layer.first);
+               }
+       }
 
-               for (auto& name : mConfig.mInputLayerNames) {
-                       inference_engine_tensor_info tensor_info;
-                       tensor_info.data_type = ConvertToIE(mConfig.mDataType);
+       mInputSize = cv::Size(mConfig.mTensorInfo.width, mConfig.mTensorInfo.height);
 
-                       // In case of OpenCV, only supports NCHW
-                       tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
-                       // modify to handle multiple tensor infos
-                       tensor_info.shape.push_back(mConfig.mTensorInfo.dim);
-                       tensor_info.shape.push_back(mConfig.mTensorInfo.ch);
-                       tensor_info.shape.push_back(mConfig.mTensorInfo.height);
-                       tensor_info.shape.push_back(mConfig.mTensorInfo.width);
+       inference_engine_layer_property property;
+       // In case of that a inference plugin deosn't support to get properties,
+       // the tensor info given by a user will be used.
+       // If the plugin supports that, the given info will be ignored.
 
-                       tensor_info.size = 1;
-                       for (auto& dim : tensor_info.shape) {
-                               tensor_info.size *= dim;
-                       }
+       for (auto &name : mConfig.mInputLayerNames) {
+               inference_engine_tensor_info tensor_info;
+               tensor_info.data_type = ConvertToIE(mConfig.mDataType);
 
-                       property.layers.insert(std::make_pair(name, tensor_info));
-               }
+               // In case of OpenCV, only supports NCHW
+               tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
+               // modify to handle multiple tensor infos
+               tensor_info.shape.push_back(mConfig.mTensorInfo.dim);
+               tensor_info.shape.push_back(mConfig.mTensorInfo.ch);
+               tensor_info.shape.push_back(mConfig.mTensorInfo.height);
+               tensor_info.shape.push_back(mConfig.mTensorInfo.width);
 
-               int ret = mBackend->SetInputLayerProperty(property);
-               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                       LOGE("Fail to set input layer property");
+               tensor_info.size = 1;
+               for (auto &dim : tensor_info.shape) {
+                       tensor_info.size *= dim;
                }
 
-               LOGI("InputSize is %d x %d\n", mInputSize.width, mInputSize.height);
-               LOGI("mean %.4f, deviation %.4f", mConfig.mMeanValue,  mConfig.mStdValue);
-               LOGI("outputNumber %d", mConfig.mMaxOutputNumbers);
+               property.layers.insert(std::make_pair(name, tensor_info));
+       }
 
-               LOGI("LEAVE");
+       int ret = mBackend->SetInputLayerProperty(property);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to set input layer property");
        }
 
-       void Inference::ConfigureOutputInfo(const std::vector<std::string> names,
-                                                                               std::vector<inference_engine_tensor_info>& tensors_info)
-       {
-               LOGI("ENTER");
+       LOGI("InputSize is %d x %d\n", mInputSize.width, mInputSize.height);
+       LOGI("mean %.4f, deviation %.4f", mConfig.mMeanValue, mConfig.mStdValue);
+       LOGI("outputNumber %d", mConfig.mMaxOutputNumbers);
 
-               mConfig.mOutputLayerNames = names;
+       LOGI("LEAVE");
+}
 
-               OutputMetadata& outputMeta = mMetadata.GetOutputMeta();
-               if (mMetadata.GetOutputMeta().IsParsed()) {
-                       mConfig.mOutputLayerNames.clear();
-                       if (!outputMeta.GetScoreName().empty())
-                               mConfig.mOutputLayerNames.push_back(outputMeta.GetScoreName());
+void Inference::ConfigureOutputInfo(const std::vector<std::string> names,
+                                                                       std::vector<inference_engine_tensor_info> &tensors_info)
+{
+       LOGI("ENTER");
 
-                       if (!outputMeta.GetBoxName().empty())
-                               mConfig.mOutputLayerNames.push_back(outputMeta.GetBoxName());
+       mConfig.mOutputLayerNames = names;
 
-                       if (!outputMeta.GetBoxLabelName().empty())
-                               mConfig.mOutputLayerNames.push_back(outputMeta.GetBoxLabelName());
+       OutputMetadata &outputMeta = mMetadata.GetOutputMeta();
+       if (mMetadata.GetOutputMeta().IsParsed()) {
+               mConfig.mOutputLayerNames.clear();
+               if (!outputMeta.GetScoreName().empty())
+                       mConfig.mOutputLayerNames.push_back(outputMeta.GetScoreName());
 
-                       if (!outputMeta.GetBoxNumberName().empty())
-                               mConfig.mOutputLayerNames.push_back(outputMeta.GetBoxNumberName());
+               if (!outputMeta.GetBoxName().empty())
+                       mConfig.mOutputLayerNames.push_back(outputMeta.GetBoxName());
 
-                       if (!outputMeta.GetLandmarkName().empty())
-                               mConfig.mOutputLayerNames.push_back(outputMeta.GetLandmarkName());
+               if (!outputMeta.GetBoxLabelName().empty())
+                       mConfig.mOutputLayerNames.push_back(outputMeta.GetBoxLabelName());
 
-                       if (!outputMeta.GetOffsetVecName().empty())
-                               mConfig.mOutputLayerNames.push_back(outputMeta.GetOffsetVecName());
+               if (!outputMeta.GetBoxNumberName().empty())
+                       mConfig.mOutputLayerNames.push_back(outputMeta.GetBoxNumberName());
 
-                       for (auto& dispVec : outputMeta.GetLandmarkDispVecAll()) {
-                               mConfig.mOutputLayerNames.push_back(dispVec.GetName());
-                       }
-               }
-
-               inference_engine_layer_property property;
+               if (!outputMeta.GetLandmarkName().empty())
+                       mConfig.mOutputLayerNames.push_back(outputMeta.GetLandmarkName());
 
-               if (tensors_info.empty()) {
-                       inference_engine_tensor_info tensor_info = { std::vector<size_t>{1},
-                                                               INFERENCE_TENSOR_SHAPE_NCHW,
-                                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                                               1};
+               if (!outputMeta.GetOffsetVecName().empty())
+                       mConfig.mOutputLayerNames.push_back(outputMeta.GetOffsetVecName());
 
-                       for (auto& name : mConfig.mOutputLayerNames) {
-                               LOGI("Configure %s layer as output", name.c_str());
-                               property.layers.insert(std::make_pair(name, tensor_info));
-                       }
-               } else {
-                       if (mConfig.mOutputLayerNames.size() != tensors_info.size()) {
-                               LOGE("Output layer count is different from tensor info count.");
-                               return;
-                       }
-
-                       for (size_t idx = 0; idx < mConfig.mOutputLayerNames.size(); ++idx) {
-                               LOGI("Configure %s layer as output", mConfig.mOutputLayerNames[idx].c_str());
-                               property.layers.insert(std::make_pair(mConfig.mOutputLayerNames[idx], tensors_info[idx]));
-                       }
+               for (auto &dispVec : outputMeta.GetLandmarkDispVecAll()) {
+                       mConfig.mOutputLayerNames.push_back(dispVec.GetName());
                }
+       }
 
-               int ret = mBackend->SetOutputLayerProperty(property);
-               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                       LOGE("Fail to set output layer property");
-               }
+       inference_engine_layer_property property;
 
-               LOGI("LEAVE");
-       }
+       if (tensors_info.empty()) {
+               inference_engine_tensor_info tensor_info = { std::vector<size_t> { 1 }, INFERENCE_TENSOR_SHAPE_NCHW,
+                                                                                                        INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 1 };
 
-       int Inference::CheckBackendType(const mv_inference_backend_type_e backendType)
-       {
-               // Check if a given backend type is valid or not.
-               if (backendType <= MV_INFERENCE_BACKEND_NONE ||
-                               backendType >= MV_INFERENCE_BACKEND_MAX) {
-                       LOGE("Invalid backend type.");
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+               for (auto &name : mConfig.mOutputLayerNames) {
+                       LOGI("Configure %s layer as output", name.c_str());
+                       property.layers.insert(std::make_pair(name, tensor_info));
                }
-
-               std::pair<std::string, bool> backend =
-                               mSupportedInferenceBackend[backendType];
-               if (backend.second == false) {
-                       LOGE("%s type is not supported", (backend.first).c_str());
-                       return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+       } else {
+               if (mConfig.mOutputLayerNames.size() != tensors_info.size()) {
+                       LOGE("Output layer count is different from tensor info count.");
+                       return;
                }
 
-               LOGI("backend engine : %d", backendType);
-
-               return MEDIA_VISION_ERROR_NONE;
+               for (size_t idx = 0; idx < mConfig.mOutputLayerNames.size(); ++idx) {
+                       LOGI("Configure %s layer as output", mConfig.mOutputLayerNames[idx].c_str());
+                       property.layers.insert(std::make_pair(mConfig.mOutputLayerNames[idx], tensors_info[idx]));
+               }
        }
 
-       int Inference::ConfigureTargetTypes(int targetType, bool isNewVersion)
-       {
-               if (isNewVersion) {
-                       if (MV_INFERENCE_TARGET_DEVICE_NONE >= targetType ||
-                               MV_INFERENCE_TARGET_DEVICE_MAX <= targetType) {
-                               LOGE("Invalid target device.");
-                               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-                       }
-               } else {
-                       if (MV_INFERENCE_TARGET_NONE >= targetType ||
-                               MV_INFERENCE_TARGET_MAX <= targetType) {
-                               LOGE("Invalid target device.");
-                               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-                       }
-
-                       LOGI("Before converting target types : %d", targetType);
+       int ret = mBackend->SetOutputLayerProperty(property);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to set output layer property");
+       }
 
-                       // Convert old type to new one.
-                       switch (targetType) {
-                       case MV_INFERENCE_TARGET_CPU:
-                               targetType = MV_INFERENCE_TARGET_DEVICE_CPU;
-                               break;
-                       case MV_INFERENCE_TARGET_GPU:
+       LOGI("LEAVE");
+}
 
-                               targetType = MV_INFERENCE_TARGET_DEVICE_GPU;
-                               break;
-                       case MV_INFERENCE_TARGET_CUSTOM:
-                               targetType = MV_INFERENCE_TARGET_DEVICE_CUSTOM;
-                               break;
-                       }
+int Inference::CheckBackendType(const mv_inference_backend_type_e backendType)
+{
+       // Check if a given backend type is valid or not.
+       if (backendType <= MV_INFERENCE_BACKEND_NONE || backendType >= MV_INFERENCE_BACKEND_MAX) {
+               LOGE("Invalid backend type.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
 
-                       LOGI("After converting target types : %d", targetType);
-               }
+       std::pair<std::string, bool> backend = mSupportedInferenceBackend[backendType];
+       if (backend.second == false) {
+               LOGE("%s type is not supported", (backend.first).c_str());
+               return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+       }
 
-               mConfig.mTargetTypes = targetType;
+       LOGI("backend engine : %d", backendType);
 
-               return MEDIA_VISION_ERROR_NONE;
-       }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-       int Inference::ConfigureTargetDevices(const int targetDevices)
-       {
-               // Check if given target types are valid or not.
-               if (MV_INFERENCE_TARGET_DEVICE_NONE >= targetDevices ||
-                       MV_INFERENCE_TARGET_DEVICE_MAX <= targetDevices) {
+int Inference::ConfigureTargetTypes(int targetType, bool isNewVersion)
+{
+       if (isNewVersion) {
+               if (MV_INFERENCE_TARGET_DEVICE_NONE >= targetType || MV_INFERENCE_TARGET_DEVICE_MAX <= targetType) {
+                       LOGE("Invalid target device.");
+                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+               }
+       } else {
+               if (MV_INFERENCE_TARGET_NONE >= targetType || MV_INFERENCE_TARGET_MAX <= targetType) {
                        LOGE("Invalid target device.");
                        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
                }
 
-               LOGI("target devices : %d", targetDevices);
+               LOGI("Before converting target types : %d", targetType);
 
-               if (!(mBackendCapacity.supported_accel_devices & targetDevices)) {
-                       LOGE("Backend doesn't support a given device acceleration.");
-                       return MEDIA_VISION_ERROR_NOT_SUPPORTED;
-               }
+               // Convert old type to new one.
+               switch (targetType) {
+               case MV_INFERENCE_TARGET_CPU:
+                       targetType = MV_INFERENCE_TARGET_DEVICE_CPU;
+                       break;
+               case MV_INFERENCE_TARGET_GPU:
 
-               mConfig.mTargetTypes = targetDevices;
+                       targetType = MV_INFERENCE_TARGET_DEVICE_GPU;
+                       break;
+               case MV_INFERENCE_TARGET_CUSTOM:
+                       targetType = MV_INFERENCE_TARGET_DEVICE_CUSTOM;
+                       break;
+               }
 
-               return MEDIA_VISION_ERROR_NONE;
+               LOGI("After converting target types : %d", targetType);
        }
 
-       bool Inference::IsTargetDeviceSupported(const int targetDevices)
-       {
-               if (!(mBackendCapacity.supported_accel_devices & targetDevices)) {
-                       LOGE("Backend doesn't support a given %x device acceleration.", targetDevices);
-                       return false;
-               }
+       mConfig.mTargetTypes = targetType;
+
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               return true;
+int Inference::ConfigureTargetDevices(const int targetDevices)
+{
+       // Check if given target types are valid or not.
+       if (MV_INFERENCE_TARGET_DEVICE_NONE >= targetDevices || MV_INFERENCE_TARGET_DEVICE_MAX <= targetDevices) {
+               LOGE("Invalid target device.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       void Inference::ConfigureOutput(const int maxOutputNumbers)
-       {
-               mConfig.mMaxOutputNumbers = std::max(
-                               std::min(maxOutputNumbers, MV_INFERENCE_OUTPUT_NUMBERS_MAX),
-                               MV_INFERENCE_OUTPUT_NUMBERS_MIN);
+       LOGI("target devices : %d", targetDevices);
+
+       if (!(mBackendCapacity.supported_accel_devices & targetDevices)) {
+               LOGE("Backend doesn't support a given device acceleration.");
+               return MEDIA_VISION_ERROR_NOT_SUPPORTED;
        }
 
-       void Inference::ConfigureThreshold(const double threshold)
-       {
-               mConfig.mConfidenceThresHold = std::max(
-                               std::min(threshold, MV_INFERENCE_CONFIDENCE_THRESHOLD_MAX),
-                               MV_INFERENCE_CONFIDENCE_THRESHOLD_MIN);
+       mConfig.mTargetTypes = targetDevices;
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+bool Inference::IsTargetDeviceSupported(const int targetDevices)
+{
+       if (!(mBackendCapacity.supported_accel_devices & targetDevices)) {
+               LOGE("Backend doesn't support a given %x device acceleration.", targetDevices);
+               return false;
        }
 
-       int Inference::ParseMetadata(const std::string filePath)
-       {
-               LOGI("ENTER");
-               int ret = mMetadata.Init(filePath);
-               if (ret != MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Fail to init metadata[%d]", ret);
-                       return ret;
-               }
+       return true;
+}
 
-               ret = mMetadata.Parse();
-               if (ret != MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Fail to parse metadata[%d]", ret);
-                       return ret;
-               }
+void Inference::ConfigureOutput(const int maxOutputNumbers)
+{
+       mConfig.mMaxOutputNumbers =
+                       std::max(std::min(maxOutputNumbers, MV_INFERENCE_OUTPUT_NUMBERS_MAX), MV_INFERENCE_OUTPUT_NUMBERS_MIN);
+}
 
-               LOGI("LEAVE");
+void Inference::ConfigureThreshold(const double threshold)
+{
+       mConfig.mConfidenceThresHold =
+                       std::max(std::min(threshold, MV_INFERENCE_CONFIDENCE_THRESHOLD_MAX), MV_INFERENCE_CONFIDENCE_THRESHOLD_MIN);
+}
 
-               return MEDIA_VISION_ERROR_NONE;
+int Inference::ParseMetadata(const std::string filePath)
+{
+       LOGI("ENTER");
+       int ret = mMetadata.Init(filePath);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to init metadata[%d]", ret);
+               return ret;
        }
 
-       void Inference::CleanupTensorBuffers(void)
-       {
-               LOGI("ENTER");
+       ret = mMetadata.Parse();
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to parse metadata[%d]", ret);
+               return ret;
+       }
 
-               if (!mInputTensorBuffers.empty()) {
-                       mInputTensorBuffers.release();
-               }
+       LOGI("LEAVE");
 
-               if (!mOutputTensorBuffers.empty()) {
-                       mOutputTensorBuffers.release();
-               }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               LOGI("LEAVE");
+void Inference::CleanupTensorBuffers(void)
+{
+       LOGI("ENTER");
+
+       if (!mInputTensorBuffers.empty()) {
+               mInputTensorBuffers.release();
        }
 
-       int Inference::PrepareTenosrBuffers(void)
-       {
-               // If there are input and output tensor buffers allocated before then release the buffers.
-               // They will be allocated again according to a new model file to be loaded.
-               CleanupTensorBuffers();
+       if (!mOutputTensorBuffers.empty()) {
+               mOutputTensorBuffers.release();
+       }
 
-               // IF model file is loaded again then the model type could be different so
-               // clean up input and output layer properties so that they can be updated again
-               // after reloading the model file.
-               if (!mInputLayerProperty.layers.empty()) {
-                       mInputLayerProperty.layers.clear();
-                       std::map<std::string, inference_engine_tensor_info>().swap(
-                                       mInputLayerProperty.layers);
-               }
-               if (!mOutputLayerProperty.layers.empty()) {
-                       mOutputLayerProperty.layers.clear();
-                       std::map<std::string, inference_engine_tensor_info>().swap(
-                                       mOutputLayerProperty.layers);
-               }
+       LOGI("LEAVE");
+}
 
-               // Get input tensor buffers from a backend engine if the backend engine allocated.
-               auto& inputTensorBuffers = mInputTensorBuffers.getIETensorBuffer();
-               int ret = mBackend->GetInputTensorBuffers(inputTensorBuffers);
-               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                       LOGE("Fail to get input tensor buffers from backend engine.");
-                       return ConvertEngineErrorToVisionError(ret);
-               }
+int Inference::PrepareTenosrBuffers(void)
+{
+       // If there are input and output tensor buffers allocated before then release the buffers.
+       // They will be allocated again according to a new model file to be loaded.
+       CleanupTensorBuffers();
+
+       // IF model file is loaded again then the model type could be different so
+       // clean up input and output layer properties so that they can be updated again
+       // after reloading the model file.
+       if (!mInputLayerProperty.layers.empty()) {
+               mInputLayerProperty.layers.clear();
+               std::map<std::string, inference_engine_tensor_info>().swap(mInputLayerProperty.layers);
+       }
+       if (!mOutputLayerProperty.layers.empty()) {
+               mOutputLayerProperty.layers.clear();
+               std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayerProperty.layers);
+       }
 
-               ret = mBackend->GetInputLayerProperty(mInputLayerProperty);
-               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                       LOGE("Fail to get input layer property from backend engine.");
-                       return ConvertEngineErrorToVisionError(ret);
-               }
+       // Get input tensor buffers from a backend engine if the backend engine allocated.
+       auto &inputTensorBuffers = mInputTensorBuffers.getIETensorBuffer();
+       int ret = mBackend->GetInputTensorBuffers(inputTensorBuffers);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to get input tensor buffers from backend engine.");
+               return ConvertEngineErrorToVisionError(ret);
+       }
 
-               // If the backend engine isn't able to allocate input tensor buffers internally,
-               // then allocate the buffers at here.
-               if (mInputTensorBuffers.empty()) {
-                       for(auto& layer : mInputLayerProperty.layers) {
-                               inference_engine_tensor_buffer tensor_buffer;
-
-                               ret = mInputTensorBuffers.allocate(tensor_buffer, layer.second);
-                               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                                       LOGE("Fail to allocate tensor buffer.");
-                                       mInputTensorBuffers.release();
-                                       return ret;
-                               }
+       ret = mBackend->GetInputLayerProperty(mInputLayerProperty);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to get input layer property from backend engine.");
+               return ConvertEngineErrorToVisionError(ret);
+       }
 
-                               mInputTensorBuffers.addTensorBuffer(layer.first, tensor_buffer);
+       // If the backend engine isn't able to allocate input tensor buffers internally,
+       // then allocate the buffers at here.
+       if (mInputTensorBuffers.empty()) {
+               for (auto &layer : mInputLayerProperty.layers) {
+                       inference_engine_tensor_buffer tensor_buffer;
+
+                       ret = mInputTensorBuffers.allocate(tensor_buffer, layer.second);
+                       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+                               LOGE("Fail to allocate tensor buffer.");
+                               mInputTensorBuffers.release();
+                               return ret;
                        }
+
+                       mInputTensorBuffers.addTensorBuffer(layer.first, tensor_buffer);
                }
+       }
 
-               LOGI("Input tensor buffer count is %zu", mInputTensorBuffers.size());
+       LOGI("Input tensor buffer count is %zu", mInputTensorBuffers.size());
 
-               // Get output tensor buffers from a backend engine if the backend engine allocated.
-               auto& outputTensorBuffers = mOutputTensorBuffers.getIETensorBuffer();
-               ret = mBackend->GetOutputTensorBuffers(outputTensorBuffers);
-               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                       LOGE("Fail to get output tensor buffers from backend engine.");
-                       return ConvertEngineErrorToVisionError(ret);
-               }
+       // Get output tensor buffers from a backend engine if the backend engine allocated.
+       auto &outputTensorBuffers = mOutputTensorBuffers.getIETensorBuffer();
+       ret = mBackend->GetOutputTensorBuffers(outputTensorBuffers);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to get output tensor buffers from backend engine.");
+               return ConvertEngineErrorToVisionError(ret);
+       }
 
-               ret = mBackend->GetOutputLayerProperty(mOutputLayerProperty);
-               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                       LOGE("Fail to get output layer property from backend engine.");
-                       return ConvertEngineErrorToVisionError(ret);
-               }
+       ret = mBackend->GetOutputLayerProperty(mOutputLayerProperty);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to get output layer property from backend engine.");
+               return ConvertEngineErrorToVisionError(ret);
+       }
 
-               // If the backend engine isn't able to allocate output tensor buffers internally,
-               // then allocate the buffers at here.
-               if (mOutputTensorBuffers.empty()) {
-                       for (auto& layer : mOutputLayerProperty.layers) {
-                               inference_engine_tensor_buffer tensor_buffer;
-
-                               ret = mInputTensorBuffers.allocate(tensor_buffer, layer.second);
-                               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                                       LOGE("Fail to allocate tensor buffer.");
-                                       mInputTensorBuffers.release();
-                                       return ret;
-                               }
+       // If the backend engine isn't able to allocate output tensor buffers internally,
+       // then allocate the buffers at here.
+       if (mOutputTensorBuffers.empty()) {
+               for (auto &layer : mOutputLayerProperty.layers) {
+                       inference_engine_tensor_buffer tensor_buffer;
 
-                               mOutputTensorBuffers.addTensorBuffer(layer.first, tensor_buffer);
+                       ret = mInputTensorBuffers.allocate(tensor_buffer, layer.second);
+                       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+                               LOGE("Fail to allocate tensor buffer.");
+                               mInputTensorBuffers.release();
+                               return ret;
                        }
+
+                       mOutputTensorBuffers.addTensorBuffer(layer.first, tensor_buffer);
                }
+       }
 
-               LOGI("Output tensor buffer count is %zu", mOutputTensorBuffers.size());
+       LOGI("Output tensor buffer count is %zu", mOutputTensorBuffers.size());
 
-               return MEDIA_VISION_ERROR_NONE;
-       }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-       int Inference::ConvertOutputDataTypeToFloat()
-       {
-               IETensorBuffer& ieTensorBuffers = mOutputTensorBuffers.getIETensorBuffer();
+int Inference::ConvertOutputDataTypeToFloat()
+{
+       IETensorBuffer &ieTensorBuffers = mOutputTensorBuffers.getIETensorBuffer();
 
-               for (auto& ieTensorBuffer : ieTensorBuffers) {
-                       auto& tensorBuffer = ieTensorBuffer.second;
+       for (auto &ieTensorBuffer : ieTensorBuffers) {
+               auto &tensorBuffer = ieTensorBuffer.second;
 
-                       // Normalize output tensor data converting it to float type in case of quantized model.
-                       if (tensorBuffer.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
-                               int ret = mOutputTensorBuffers.convertToFloat<unsigned char>(&tensorBuffer);
-                               if (ret != MEDIA_VISION_ERROR_NONE) {
-                                       LOGE("Fail to convert tensor data to float type.");
-                                       return ret;
-                               }
+               // Normalize output tensor data converting it to float type in case of quantized model.
+               if (tensorBuffer.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+                       int ret = mOutputTensorBuffers.convertToFloat<unsigned char>(&tensorBuffer);
+                       if (ret != MEDIA_VISION_ERROR_NONE) {
+                               LOGE("Fail to convert tensor data to float type.");
+                               return ret;
                        }
+               }
 
-                       if (tensorBuffer.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16) {
-                               int ret = mOutputTensorBuffers.convertToFloat<unsigned short>(&tensorBuffer);
-                               if (ret != MEDIA_VISION_ERROR_NONE) {
-                                       LOGE("Fail to convert tensor data to float type.");
-                                       return ret;
-                               }
+               if (tensorBuffer.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16) {
+                       int ret = mOutputTensorBuffers.convertToFloat<unsigned short>(&tensorBuffer);
+                       if (ret != MEDIA_VISION_ERROR_NONE) {
+                               LOGE("Fail to convert tensor data to float type.");
+                               return ret;
                        }
                }
-
-               return MEDIA_VISION_ERROR_NONE;
        }
 
-       int Inference::Bind(int backend_type, int device_type)
-       {
-               LOGI("ENTER");
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               int ret = CheckBackendType(static_cast<mv_inference_backend_type_e>(backend_type));
-               if (ret != MEDIA_VISION_ERROR_NONE)
-                       return ret;
+int Inference::Bind(int backend_type, int device_type)
+{
+       LOGI("ENTER");
+
+       int ret = CheckBackendType(static_cast<mv_inference_backend_type_e>(backend_type));
+       if (ret != MEDIA_VISION_ERROR_NONE)
+               return ret;
 
-               std::string backendName = mSupportedInferenceBackend[backend_type].first;
-               LOGI("backend string name: %s", backendName.c_str());
+       std::string backendName = mSupportedInferenceBackend[backend_type].first;
+       LOGI("backend string name: %s", backendName.c_str());
 
-               inference_engine_config config = {
-                       .backend_name = backendName,
-                       .backend_type = backend_type,
-                       // As a default, Target device is CPU. If user defined desired device type in json file
-                       // then the device type will be set by Load callback.
-                       .target_devices = device_type,
-               };
+       inference_engine_config config = {
+               .backend_name = backendName,
+               .backend_type = backend_type,
+               // As a default, Target device is CPU. If user defined desired device type in json file
+               // then the device type will be set by Load callback.
+               .target_devices = device_type,
+       };
 
-               // Create a backend class object.
-               try {
-                       mBackend = new InferenceEngineCommon();
+       // Create a backend class object.
+       try {
+               mBackend = new InferenceEngineCommon();
 
 #if ENABLE_INFERENCE_PROFILER
-                       mBackend->EnableProfiler(true);
-                       mBackend->DumpProfileToFile("profile_data_" + backendName + ".txt");
+               mBackend->EnableProfiler(true);
+               mBackend->DumpProfileToFile("profile_data_" + backendName + ".txt");
 #endif
-               } catch (const std::bad_alloc &ex) {
-                       LOGE("Fail to create backend : %s", ex.what());
-                       return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
-               }
+       } catch (const std::bad_alloc &ex) {
+               LOGE("Fail to create backend : %s", ex.what());
+               return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+       }
 
-               ret = MEDIA_VISION_ERROR_NONE;
+       ret = MEDIA_VISION_ERROR_NONE;
 
-               // Load configuration file if a given backend type is mlapi.
-               if (config.backend_type == MV_INFERENCE_BACKEND_MLAPI) {
-                       ret = mBackend->LoadConfigFile();
-                       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                       }
-               }
-
-               // Bind a backend library.
-               ret = mBackend->BindBackend(&config);
+       // Load configuration file if a given backend type is mlapi.
+       if (config.backend_type == MV_INFERENCE_BACKEND_MLAPI) {
+               ret = mBackend->LoadConfigFile();
                if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                       LOGE("Fail to bind backend library.(%d)", ret);
                        return MEDIA_VISION_ERROR_INVALID_OPERATION;
                }
+       }
 
-               // Get capacity information from a backend.
-               ret = mBackend->GetBackendCapacity(&mBackendCapacity);
-               if (ret != MEDIA_VISION_ERROR_NONE) {
-                       mBackend->UnbindBackend();
-                       LOGE("Fail to get backend capacity.");
-                       return ret;
-               }
+       // Bind a backend library.
+       ret = mBackend->BindBackend(&config);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to bind backend library.(%d)", ret);
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+       }
 
-               if (!IsTargetDeviceSupported(mConfig.mTargetTypes)) {
-                       mBackend->UnbindBackend();
-                       LOGE("Tried to configure invalid target types.");
-                       return MEDIA_VISION_ERROR_NOT_SUPPORTED;
-               }
+       // Get capacity information from a backend.
+       ret = mBackend->GetBackendCapacity(&mBackendCapacity);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               mBackend->UnbindBackend();
+               LOGE("Fail to get backend capacity.");
+               return ret;
+       }
+
+       if (!IsTargetDeviceSupported(mConfig.mTargetTypes)) {
+               mBackend->UnbindBackend();
+               LOGE("Tried to configure invalid target types.");
+               return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+       }
 
-               LOGI("LEAVE");
+       LOGI("LEAVE");
 
-               return MEDIA_VISION_ERROR_NONE;
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int Inference::Load(void)
+{
+       LOGI("ENTER");
+
+       std::string label_file = mConfig.mUserFilePath;
+       size_t userFileLength = label_file.length();
+       if (userFileLength > 0 && access(label_file.c_str(), F_OK)) {
+               LOGE("Label file path in [%s] ", label_file.c_str());
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       int Inference::Load(void)
-       {
-               LOGI("ENTER");
+       int ret = (userFileLength > 0) ? SetUserFile(label_file) : MEDIA_VISION_ERROR_NONE;
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to load label file.");
+               return ret;
+       }
 
-               std::string label_file = mConfig.mUserFilePath;
-               size_t userFileLength = label_file.length();
-               if (userFileLength > 0 && access(label_file.c_str(), F_OK)) {
-                       LOGE("Label file path in [%s] ", label_file.c_str());
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               }
+       // Check if model file is valid or not.
+       std::string ext_str = mConfig.mWeightFilePath.substr(mConfig.mWeightFilePath.find_last_of(".") + 1);
+       std::map<std::string, int>::iterator key = mModelFormats.find(ext_str);
+       if (key == mModelFormats.end()) {
+               LOGE("Invalid model file format.(ext = %s)", ext_str.c_str());
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
+
+       LOGI("%s model file has been detected.", ext_str.c_str());
+
+       std::vector<std::string> models;
+
+       inference_model_format_e model_format = static_cast<inference_model_format_e>(key->second);
+
+       // Push model file information to models vector properly according to detected model format.
+       switch (model_format) {
+       case INFERENCE_MODEL_CAFFE:
+       case INFERENCE_MODEL_TF:
+       case INFERENCE_MODEL_DARKNET:
+       case INFERENCE_MODEL_DLDT:
+       case INFERENCE_MODEL_ONNX:
+       case INFERENCE_MODEL_VIVANTE:
+               models.push_back(mConfig.mWeightFilePath);
+               models.push_back(mConfig.mConfigFilePath);
+               break;
+       case INFERENCE_MODEL_TFLITE:
+       case INFERENCE_MODEL_TORCH:
+       case INFERENCE_MODEL_NNTRAINER:
+       case INFERENCE_MODEL_SNPE:
+               models.push_back(mConfig.mWeightFilePath);
+               break;
+       default:
+               break;
+       }
+
+       // Request model loading to backend engine.
+       ret = mBackend->Load(models, model_format);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to load model");
+               mCanRun = false;
+               std::vector<std::string>().swap(models);
+               return ConvertEngineErrorToVisionError(ret);
+       }
+
+       std::vector<std::string>().swap(models);
+
+       // Prepare input and output tensor buffers.
+       ret = PrepareTenosrBuffers();
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to prepare buffer");
+               mCanRun = false;
+               return ret;
+       }
+
+       mCanRun = true;
+
+       LOGI("LEAVE");
+
+       return ConvertEngineErrorToVisionError(ret);
+}
 
-               int ret = (userFileLength > 0) ? SetUserFile(label_file) :
-                                                                                MEDIA_VISION_ERROR_NONE;
+int Inference::Preprocess(std::vector<mv_source_h> &mv_sources, std::vector<cv::Mat> &cv_sources)
+{
+       unsigned int src_idx = 0;
+
+       for (auto &buffer : mInputTensorBuffers.getIETensorBuffer()) {
+               inference_engine_tensor_buffer &tensor_buffer = buffer.second;
+               int data_type = ConvertToCv(tensor_buffer.data_type);
+               LayerInfo layerInfo;
+               Options opt;
+               mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
+
+               mv_source_get_colorspace(mv_sources[src_idx], &colorspace);
+
+               if (mMetadata.GetInputMeta().IsParsed()) {
+                       layerInfo = mMetadata.GetInputMeta().GetLayer().at(buffer.first);
+
+                       if (!mMetadata.GetInputMeta().GetOption().empty())
+                               opt = mMetadata.GetInputMeta().GetOption().at(buffer.first);
+               } else {
+                       // Ps. in case of legacy way, there is no way to set model specific dequantization parameters - zero point and scale.
+                       // TODO. find a proper way for it.
+                       opt.normalization.use = true;
+                       opt.normalization.mean.assign(3, mConfig.mMeanValue);
+                       opt.normalization.std.assign(3, mConfig.mStdValue);
+
+                       layerInfo.name = buffer.first;
+                       layerInfo.dims.push_back(mConfig.mTensorInfo.dim);
+                       layerInfo.dims.push_back(mConfig.mTensorInfo.height);
+                       layerInfo.dims.push_back(mConfig.mTensorInfo.width);
+                       layerInfo.dims.push_back(mConfig.mTensorInfo.ch);
+
+                       // Ps. in case of legacy way, there is no way to use model specific color space but only fixed one.
+                       // TODO. find a proper way for it.
+                       layerInfo.colorSpace = MEDIA_VISION_COLORSPACE_RGB888;
+                       layerInfo.dataType = mConfig.mDataType;
+                       // TODO. find a proper way for setting the shape type. In case of legacy way, there is no way to change the shape type properly.
+                       //       According to a given inference engine, different shape type can be needed.
+                       layerInfo.shapeType = INFERENCE_TENSOR_SHAPE_NHWC;
+               }
+
+               // TODO: try-catch{} error handling
+               int ret = mPreProc.Run(cv_sources[src_idx++], colorspace, data_type, layerInfo, opt, tensor_buffer.buffer);
                if (ret != MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Fail to load label file.");
+                       LOGE("Fail to run pre-process.");
                        return ret;
                }
+       }
 
-               // Check if model file is valid or not.
-               std::string ext_str = mConfig.mWeightFilePath.substr(
-                               mConfig.mWeightFilePath.find_last_of(".") + 1);
-               std::map<std::string, int>::iterator key = mModelFormats.find(ext_str);
-               if (key == mModelFormats.end()) {
-                       LOGE("Invalid model file format.(ext = %s)", ext_str.c_str());
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               LOGI("%s model file has been detected.", ext_str.c_str());
+int Inference::Run(std::vector<mv_source_h> &mvSources, std::vector<mv_rectangle_s> &rects)
+{
+       int ret = INFERENCE_ENGINE_ERROR_NONE;
 
-               std::vector<std::string> models;
+       if (!mCanRun) {
+               LOGE("Invalid to run inference");
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+       }
 
-               inference_model_format_e model_format =
-                               static_cast<inference_model_format_e>(key->second);
+       if (mvSources.empty()) {
+               LOGE("mvSources should contain only one cv source.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
 
-               // Push model file information to models vector properly according to detected model format.
-               switch (model_format) {
-               case INFERENCE_MODEL_CAFFE:
-               case INFERENCE_MODEL_TF:
-               case INFERENCE_MODEL_DARKNET:
-               case INFERENCE_MODEL_DLDT:
-               case INFERENCE_MODEL_ONNX:
-               case INFERENCE_MODEL_VIVANTE:
-                       models.push_back(mConfig.mWeightFilePath);
-                       models.push_back(mConfig.mConfigFilePath);
-                       break;
-               case INFERENCE_MODEL_TFLITE:
-               case INFERENCE_MODEL_TORCH:
-               case INFERENCE_MODEL_NNTRAINER:
-               case INFERENCE_MODEL_SNPE:
-                       models.push_back(mConfig.mWeightFilePath);
-                       break;
-               default:
-                       break;
-               }
+       // We are able to request Only one input data for the inference as of now.
+       if (mvSources.size() > 1) {
+               LOGE("It allows only one mv source for the inference.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
 
-               // Request model loading to backend engine.
-               ret = mBackend->Load(models, model_format);
-               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                       LOGE("Fail to load model");
-                       mCanRun = false;
-                       std::vector<std::string>().swap(models);
-                       return ConvertEngineErrorToVisionError(ret);
-               }
+       if (!rects.empty() && rects.size() != mvSources.size()) {
+               LOGE("mvSources.size() should be same as rects.size() if rects.empty() is false.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
 
-               std::vector<std::string>().swap(models);
+       if (mConfig.mTensorInfo.ch != 1 && mConfig.mTensorInfo.ch != 3) {
+               LOGE("Channel not supported.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
 
-               // Prepare input and output tensor buffers.
-               ret = PrepareTenosrBuffers();
-               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                       LOGE("Fail to prepare buffer");
-                       mCanRun = false;
-                       return ret;
-               }
+       std::vector<cv::Mat> cvSources;
 
-               mCanRun = true;
+       ret = ConvertToCvSource(mvSources, cvSources, rects);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to convert mv source to cv source.");
+               return ret;
+       }
 
-               LOGI("LEAVE");
+       // mSourceSize is original input image's size
+       // TODO. consider multiple cv sources.
+       mSourceSize = cvSources[0].size();
 
-               return ConvertEngineErrorToVisionError(ret);
+       ret = Preprocess(mvSources, cvSources);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to preprocess given input sources.");
+               return ret;
        }
 
-       int Inference::Preprocess(std::vector<mv_source_h>& mv_sources, std::vector<cv::Mat>& cv_sources)
-       {
-               unsigned int src_idx = 0;
-
-               for (auto& buffer : mInputTensorBuffers.getIETensorBuffer()) {
-                       inference_engine_tensor_buffer& tensor_buffer = buffer.second;
-                       int data_type = ConvertToCv(tensor_buffer.data_type);
-                       LayerInfo layerInfo;
-                       Options opt;
-                       mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
-
-                       mv_source_get_colorspace(mv_sources[src_idx], &colorspace);
-
-                       if (mMetadata.GetInputMeta().IsParsed()) {
-                               layerInfo = mMetadata.GetInputMeta().GetLayer().at(buffer.first);
-
-                               if (!mMetadata.GetInputMeta().GetOption().empty())
-                                       opt = mMetadata.GetInputMeta().GetOption().at(buffer.first);
-                       } else {
-                               // Ps. in case of legacy way, there is no way to set model specific dequantization parameters - zero point and scale.
-                               // TODO. find a proper way for it.
-                               opt.normalization.use = true;
-                               opt.normalization.mean.assign(3, mConfig.mMeanValue);
-                               opt.normalization.std.assign(3, mConfig.mStdValue);
-
-                               layerInfo.name = buffer.first;
-                               layerInfo.dims.push_back(mConfig.mTensorInfo.dim);
-                               layerInfo.dims.push_back(mConfig.mTensorInfo.height);
-                               layerInfo.dims.push_back(mConfig.mTensorInfo.width);
-                               layerInfo.dims.push_back(mConfig.mTensorInfo.ch);
-
-                               // Ps. in case of legacy way, there is no way to use model specific color space but only fixed one.
-                               // TODO. find a proper way for it.
-                               layerInfo.colorSpace = MEDIA_VISION_COLORSPACE_RGB888;
-                               layerInfo.dataType = mConfig.mDataType;
-                               // TODO. find a proper way for setting the shape type. In case of legacy way, there is no way to change the shape type properly.
-                               //       According to a given inference engine, different shape type can be needed.
-                               layerInfo.shapeType = INFERENCE_TENSOR_SHAPE_NHWC;
-                       }
+       ret = mBackend->Run(mInputTensorBuffers.getIETensorBuffer(), mOutputTensorBuffers.getIETensorBuffer());
+       if (ret != INFERENCE_ENGINE_ERROR_NONE)
+               return ret;
 
-                       // TODO: try-catch{} error handling
-                       int ret = mPreProc.Run(cv_sources[src_idx++], colorspace, data_type, layerInfo, opt, tensor_buffer.buffer);
-                       if (ret != MEDIA_VISION_ERROR_NONE) {
-                               LOGE("Fail to run pre-process.");
-                               return ret;
-                       }
-               }
+       return ConvertOutputDataTypeToFloat();
+}
 
-               return MEDIA_VISION_ERROR_NONE;
+int Inference::Run(std::vector<void *> &buffer_objs)
+{
+       int ret = INFERENCE_ENGINE_ERROR_NONE;
 
+       if (!mCanRun) {
+               LOGE("Invalid to run inference");
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       int Inference::Run(std::vector<mv_source_h> &mvSources,
-                                          std::vector<mv_rectangle_s> &rects)
-       {
-               int ret = INFERENCE_ENGINE_ERROR_NONE;
+       if (buffer_objs.empty()) {
+               LOGE("cvSources should contain only one cv source.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
 
-               if (!mCanRun) {
-                       LOGE("Invalid to run inference");
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-               }
+       // We are able to request Only one input data for the inference as of now.
+       if (buffer_objs.size() > 1) {
+               LOGE("It allows only one source for the inference.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
 
-               if (mvSources.empty()) {
-                       LOGE("mvSources should contain only one cv source.");
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               }
+       if (mInputTensorBuffers.getIETensorBuffer().size() != buffer_objs.size()) {
+               LOGE("Raw source count is not invalid.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
 
-               // We are able to request Only one input data for the inference as of now.
-               if (mvSources.size() > 1) {
-                       LOGE("It allows only one mv source for the inference.");
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               }
+       unsigned int buffer_idx = 0;
 
-               if (!rects.empty() && rects.size() != mvSources.size()) {
-                       LOGE("mvSources.size() should be same as rects.size() if rects.empty() is false.");
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               }
+       for (auto &buffer : mInputTensorBuffers.getIETensorBuffer()) {
+               inference_engine_tensor_buffer &tensor_buffer = buffer.second;
+               inference_engine_tensor_buffer *buffer_obj =
+                               static_cast<inference_engine_tensor_buffer *>(buffer_objs[buffer_idx]);
 
-               if (mConfig.mTensorInfo.ch != 1 && mConfig.mTensorInfo.ch != 3) {
-                       LOGE("Channel not supported.");
+               if (tensor_buffer.size != buffer_obj->size) {
+                       LOGE("Raw buffer size is invalid.");
                        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
                }
 
-               std::vector<cv::Mat> cvSources;
+               LOGI("A number of tensor bytes : %zu", buffer_obj->size);
 
-               ret = ConvertToCvSource(mvSources, cvSources, rects);
-               if (ret != MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Fail to convert mv source to cv source.");
-                       return ret;
-               }
+               memcpy(tensor_buffer.buffer, buffer_obj->buffer, tensor_buffer.size);
+               buffer_idx++;
+       }
 
-               // mSourceSize is original input image's size
-               // TODO. consider multiple cv sources.
-               mSourceSize = cvSources[0].size();
+       ret = mBackend->Run(mInputTensorBuffers.getIETensorBuffer(), mOutputTensorBuffers.getIETensorBuffer());
+       if (ret != INFERENCE_ENGINE_ERROR_NONE)
+               return ret;
 
-               ret = Preprocess(mvSources, cvSources);
-               if (ret != MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Fail to preprocess given input sources.");
-                       return ret;
-               }
+       return ConvertOutputDataTypeToFloat();
+}
 
-               ret = mBackend->Run(mInputTensorBuffers.getIETensorBuffer(),
-                                                       mOutputTensorBuffers.getIETensorBuffer());
-               if (ret != INFERENCE_ENGINE_ERROR_NONE)
-                       return ret;
+std::pair<std::string, bool> Inference::GetSupportedInferenceBackend(int backend)
+{
+       return mSupportedInferenceBackend[backend];
+}
 
-               return ConvertOutputDataTypeToFloat();
+int Inference::GetClassficationResults(ImageClassificationResults *results)
+{
+       // Will contain top N results in ascending order.
+       std::vector<std::pair<float, int> > topScore;
+       auto threadHold = mConfig.mConfidenceThresHold;
+       constexpr unsigned int default_top_number = 5;
+       tensor_t outputTensorInfo;
+
+       // Get inference result and contain it to outputTensorInfo.
+       int ret = mOutputTensorBuffers.GetTensorInfo(mOutputLayerProperty, outputTensorInfo);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to get output result.");
+               return ret;
        }
 
-       int Inference::Run(std::vector<void *>& buffer_objs)
-       {
-               int ret = INFERENCE_ENGINE_ERROR_NONE;
+       PostProcess postProc;
+       unsigned int classes = outputTensorInfo.dimInfo[0][1];
+       unsigned int top_number = default_top_number;
+
+       if (mMetadata.GetOutputMeta().IsParsed()) {
+               OutputMetadata outputMetadata = mMetadata.GetOutputMeta();
+               std::vector<int> indexes = outputMetadata.GetScoreDimInfo().GetValidIndexAll();
 
-               if (!mCanRun) {
-                       LOGE("Invalid to run inference");
+               if (indexes.size() != 1) {
+                       LOGE("Invalid dim size. It should be 1");
                        return MEDIA_VISION_ERROR_INVALID_OPERATION;
                }
 
-               if (buffer_objs.empty()) {
-                       LOGE("cvSources should contain only one cv source.");
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+               if (!mOutputTensorBuffers.exist(outputMetadata.GetScoreName())) {
+                       LOGE("output buffe is NULL");
+                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
                }
 
-               // We are able to request Only one input data for the inference as of now.
-               if (buffer_objs.size() > 1) {
-                       LOGE("It allows only one source for the inference.");
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               }
+               top_number = outputMetadata.GetScoreTopNumber();
+               threadHold = outputMetadata.GetScoreThreshold();
 
-               if (mInputTensorBuffers.getIETensorBuffer().size() != buffer_objs.size()) {
-                       LOGE("Raw source count is not invalid.");
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               }
+               classes = mOutputLayerProperty.layers[outputMetadata.GetScoreName()].shape[indexes[0]];
+       }
 
-               unsigned int buffer_idx = 0;
+       postProc.ScoreClear(top_number);
 
-               for (auto& buffer : mInputTensorBuffers.getIETensorBuffer()) {
-                       inference_engine_tensor_buffer& tensor_buffer = buffer.second;
-                       inference_engine_tensor_buffer *buffer_obj = static_cast<inference_engine_tensor_buffer *>(buffer_objs[buffer_idx]);
+       auto *prediction = reinterpret_cast<float *>(outputTensorInfo.data[0]);
 
-                       if (tensor_buffer.size != buffer_obj->size) {
-                               LOGE("Raw buffer size is invalid.");
-                               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-                       }
+       LOGI("class count: %d", classes);
+
+       for (unsigned int idx = 0; idx < classes; ++idx) {
+               float value = prediction[idx];
+
+               if (mMetadata.GetOutputMeta().IsParsed()) {
+                       OutputMetadata outputMetadata = mMetadata.GetOutputMeta();
 
-                       LOGI("A number of tensor bytes : %zu", buffer_obj->size);
+                       if (outputMetadata.GetScoreDeQuant()) {
+                               value = PostProcess::dequant(value, outputMetadata.GetScoreDeQuantScale(),
+                                                                                        outputMetadata.GetScoreDeQuantZeroPoint());
+                       }
 
-                       memcpy(tensor_buffer.buffer, buffer_obj->buffer, tensor_buffer.size);
-                       buffer_idx++;
+                       if (outputMetadata.GetScoreType() == INFERENCE_SCORE_TYPE_SIGMOID)
+                               value = PostProcess::sigmoid(value);
                }
 
-               ret = mBackend->Run(mInputTensorBuffers.getIETensorBuffer(),
-                                                       mOutputTensorBuffers.getIETensorBuffer());
-               if (ret != INFERENCE_ENGINE_ERROR_NONE)
-                       return ret;
+               if (value < threadHold)
+                       continue;
 
-               return ConvertOutputDataTypeToFloat();
+               postProc.ScorePush(value, idx);
        }
 
-       std::pair<std::string, bool>
-       Inference::GetSupportedInferenceBackend(int backend)
-       {
-               return mSupportedInferenceBackend[backend];
+       postProc.ScorePop(topScore);
+       results->number_of_classes = 0;
+
+       for (auto &score : topScore) {
+               LOGI("score: %.3f, threshold: %.3f", score.first, threadHold);
+               LOGI("idx:%d", score.second);
+               LOGI("classProb: %.3f", score.first);
+
+               results->indices.push_back(score.second);
+               results->confidences.push_back(score.first);
+               results->names.push_back(mUserListName[score.second]);
+               results->number_of_classes++;
        }
 
-       int Inference::GetClassficationResults(ImageClassificationResults *results)
-       {
-               // Will contain top N results in ascending order.
-               std::vector<std::pair<float, int>> topScore;
-               auto threadHold = mConfig.mConfidenceThresHold;
-               constexpr unsigned int default_top_number = 5;
-               tensor_t outputTensorInfo;
+       LOGE("Inference: GetClassificationResults: %d\n", results->number_of_classes);
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               // Get inference result and contain it to outputTensorInfo.
-               int ret = mOutputTensorBuffers.GetTensorInfo(mOutputLayerProperty, outputTensorInfo);
-               if (ret != MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Fail to get output result.");
-                       return ret;
+int Inference::GetObjectDetectionResults(ObjectDetectionResults *results)
+{
+       if (mMetadata.GetOutputMeta().IsParsed()) {
+               OutputMetadata &outputMeta = mMetadata.GetOutputMeta();
+
+               // decoding type
+               if (!mOutputTensorBuffers.exist(outputMeta.GetBoxName()) ||
+                       !mOutputTensorBuffers.exist(outputMeta.GetScoreName())) {
+                       LOGE("output buffers named of %s or %s are NULL", outputMeta.GetBoxName().c_str(),
+                                outputMeta.GetScoreName().c_str());
+                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
                }
 
-               PostProcess postProc;
-               unsigned int classes = outputTensorInfo.dimInfo[0][1];
-               unsigned int top_number = default_top_number;
+               int boxOffset = 0;
+               int numberOfObjects = 0;
 
-               if (mMetadata.GetOutputMeta().IsParsed()) {
-                       OutputMetadata outputMetadata = mMetadata.GetOutputMeta();
-                       std::vector<int> indexes = outputMetadata.GetScoreDimInfo().GetValidIndexAll();
-
-                       if (indexes.size() != 1) {
+               if (outputMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_BYPASS) {
+                       std::vector<int> boxIndexes = outputMeta.GetBoxDimInfo().GetValidIndexAll();
+                       if (boxIndexes.size() != 1) {
                                LOGE("Invalid dim size. It should be 1");
                                return MEDIA_VISION_ERROR_INVALID_OPERATION;
                        }
-
-                       if (!mOutputTensorBuffers.exist(outputMetadata.GetScoreName())) {
-                               LOGE("output buffe is NULL");
+                       boxOffset = mOutputLayerProperty.layers[outputMeta.GetBoxName()].shape[boxIndexes[0]];
+               } else if (outputMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR) {
+                       std::vector<int> boxIndexes = outputMeta.GetBoxDimInfo().GetValidIndexAll();
+                       if (boxIndexes.size() != 1) {
+                               LOGE("Invalid dim size. It should be 1");
                                return MEDIA_VISION_ERROR_INVALID_OPERATION;
                        }
+                       boxOffset = mOutputLayerProperty.layers[outputMeta.GetBoxName()].shape[boxIndexes[0]];
 
-                       top_number = outputMetadata.GetScoreTopNumber();
-                       threadHold = outputMetadata.GetScoreThreshold();
+                       std::vector<int> scoreIndexes = outputMeta.GetScoreDimInfo().GetValidIndexAll();
+                       if (scoreIndexes.size() != 1) {
+                               LOGE("Invalid dim size. It should be 1");
+                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+                       }
+                       numberOfObjects = mOutputLayerProperty.layers[outputMeta.GetScoreName()].shape[scoreIndexes[0]];
+               } else { // INFERENCE_BOX_DECODING_TYPE_YOLO_ANCHOR
+                       std::vector<int> boxIndexes = outputMeta.GetBoxDimInfo().GetValidIndexAll();
+                       if (boxIndexes.size() != 1) {
+                               LOGE("Invalid dim size. It should be 1");
+                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+                       }
+                       boxOffset = mOutputLayerProperty.layers[outputMeta.GetBoxName()].shape[boxIndexes[0]];
+                       numberOfObjects = boxOffset / outputMeta.GetBoxDecodeInfo().GetCellNumScales() - 5;
+               }
+
+               ObjectDecoder objDecoder(mOutputTensorBuffers, outputMeta, boxOffset,
+                                                                static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getWidth()),
+                                                                static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getHeight()),
+                                                                numberOfObjects);
+
+               objDecoder.init();
+               objDecoder.decode();
+               results->number_of_objects = 0;
+
+               for (auto &box : objDecoder.getObjectAll()) {
+                       results->indices.push_back(box.index);
+                       results->names.push_back(mUserListName[box.index]);
+                       results->confidences.push_back(box.score);
+                       results->locations.push_back(
+                                       cv::Rect(static_cast<int>((box.location.x - box.location.width * 0.5f) *
+                                                                                         static_cast<float>(mSourceSize.width)),
+                                                        static_cast<int>((box.location.y - box.location.height * 0.5f) *
+                                                                                         static_cast<float>(mSourceSize.height)),
+                                                        static_cast<int>(box.location.width * static_cast<float>(mSourceSize.width)),
+                                                        static_cast<int>(box.location.height * static_cast<float>(mSourceSize.height))));
+                       results->number_of_objects++;
+               }
+
+               LOGI("Inference: GetObjectDetectionResults: %d\n", results->number_of_objects);
+       } else {
+               tensor_t outputTensorInfo;
 
-                       classes = mOutputLayerProperty.layers[outputMetadata.GetScoreName()].shape[indexes[0]];
+               // Get inference result and contain it to outputTensorInfo.
+               int ret = mOutputTensorBuffers.GetTensorInfo(mOutputLayerProperty, outputTensorInfo);
+               if (ret != MEDIA_VISION_ERROR_NONE) {
+                       LOGE("Fail to get output result.");
+                       return ret;
                }
 
-               postProc.ScoreClear(top_number);
+               // In case of object detection,
+               // a model may apply post-process but others may not.
+               // Thus, those cases should be hanlded separately.
+
+               float *boxes = nullptr;
+               float *classes = nullptr;
+               float *scores = nullptr;
+               int number_of_detections = 0;
+
+               if (outputTensorInfo.dimInfo.size() == 1) {
+                       // there is no way to know how many objects are detect unless the number of objects aren't
+                       // provided. In the case, each backend should provide the number of results manually.
+                       // For example, in OpenCV, MobilenetV1-SSD doesn't provide it so the number of objects are
+                       // written to the 1st element i.e., outputTensorInfo.data[0] (the shape is 1x1xNx7 and the 1st of 7
+                       // indicates the image id. But it is useless if a batch mode isn't supported.
+                       // So, use the 1st of 7.
+
+                       number_of_detections = static_cast<int>(*reinterpret_cast<float *>(outputTensorInfo.data[0]));
+                       cv::Mat cvOutputData(number_of_detections, outputTensorInfo.dimInfo[0][3], CV_32F,
+                                                                outputTensorInfo.data[0]);
+
+                       // boxes
+                       cv::Mat cvLeft = cvOutputData.col(3).clone();
+                       cv::Mat cvTop = cvOutputData.col(4).clone();
+                       cv::Mat cvRight = cvOutputData.col(5).clone();
+                       cv::Mat cvBottom = cvOutputData.col(6).clone();
+                       cv::Mat cvScores, cvClasses, cvBoxes;
+                       cv::Mat cvBoxElems[] = { cvTop, cvLeft, cvBottom, cvRight };
 
-               auto *prediction = reinterpret_cast<float *>(outputTensorInfo.data[0]);
+                       cv::hconcat(cvBoxElems, 4, cvBoxes);
 
-               LOGI("class count: %d", classes);
+                       // classes
+                       cvClasses = cvOutputData.col(1).clone();
 
-               for (unsigned int idx = 0; idx < classes; ++idx) {
-                       float value = prediction[idx];
+                       // scores
+                       cvScores = cvOutputData.col(2).clone();
 
-                       if (mMetadata.GetOutputMeta().IsParsed()) {
-                               OutputMetadata outputMetadata = mMetadata.GetOutputMeta();
+                       boxes = cvBoxes.ptr<float>(0);
+                       classes = cvClasses.ptr<float>(0);
+                       scores = cvScores.ptr<float>(0);
+               } else {
+                       boxes = reinterpret_cast<float *>(outputTensorInfo.data[0]);
+                       classes = reinterpret_cast<float *>(outputTensorInfo.data[1]);
+                       scores = reinterpret_cast<float *>(outputTensorInfo.data[2]);
+                       number_of_detections = (int) (*reinterpret_cast<float *>(outputTensorInfo.data[3]));
+               }
 
-                               if (outputMetadata.GetScoreDeQuant()) {
-                                       value = PostProcess::dequant(value,
-                                                                                       outputMetadata.GetScoreDeQuantScale(),
-                                                                                       outputMetadata.GetScoreDeQuantZeroPoint());
-                               }
+               LOGI("number_of_detections = %d", number_of_detections);
 
-                               if (outputMetadata.GetScoreType() == INFERENCE_SCORE_TYPE_SIGMOID)
-                                       value = PostProcess::sigmoid(value);
-                       }
+               results->number_of_objects = 0;
 
-                       if (value < threadHold)
+               for (int idx = 0; idx < number_of_detections; ++idx) {
+                       if (scores[idx] < mConfig.mConfidenceThresHold)
                                continue;
 
-                       postProc.ScorePush(value, idx);
-               }
+                       int left = static_cast<int>(boxes[idx * 4 + 1] * mSourceSize.width);
+                       int top = static_cast<int>(boxes[idx * 4 + 0] * mSourceSize.height);
+                       int right = static_cast<int>(boxes[idx * 4 + 3] * mSourceSize.width);
+                       int bottom = static_cast<int>(boxes[idx * 4 + 2] * mSourceSize.height);
+                       cv::Rect loc;
 
-               postProc.ScorePop(topScore);
-               results->number_of_classes = 0;
+                       loc.x = left;
+                       loc.y = top;
+                       loc.width = right - left + 1;
+                       loc.height = bottom - top + 1;
 
-               for (auto& score : topScore) {
-                       LOGI("score: %.3f, threshold: %.3f", score.first, threadHold);
-                       LOGI("idx:%d", score.second);
-                       LOGI("classProb: %.3f", score.first);
+                       results->indices.push_back(static_cast<int>(classes[idx]));
+                       results->confidences.push_back(scores[idx]);
+                       results->names.push_back(mUserListName[static_cast<int>(classes[idx])]);
+                       results->locations.push_back(loc);
+                       results->number_of_objects++;
 
-                       results->indices.push_back(score.second);
-                       results->confidences.push_back(score.first);
-                       results->names.push_back(mUserListName[score.second]);
-                       results->number_of_classes++;
+                       LOGI("objectClass: %d", static_cast<int>(classes[idx]));
+                       LOGI("confidence:%f", scores[idx]);
+                       LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom);
                }
 
-               LOGE("Inference: GetClassificationResults: %d\n", results->number_of_classes);
-               return MEDIA_VISION_ERROR_NONE;
+               LOGI("Inference: GetObjectDetectionResults: %d\n", results->number_of_objects);
        }
 
-       int Inference::GetObjectDetectionResults(
-                       ObjectDetectionResults *results)
-       {
-               if (mMetadata.GetOutputMeta().IsParsed()) {
-                       OutputMetadata& outputMeta = mMetadata.GetOutputMeta();
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-                       // decoding type
-                       if (!mOutputTensorBuffers.exist(outputMeta.GetBoxName()) ||
-                               !mOutputTensorBuffers.exist(outputMeta.GetScoreName()) ){
-                               LOGE("output buffers named of %s or %s are NULL",
-                                       outputMeta.GetBoxName().c_str(), outputMeta.GetScoreName().c_str());
-                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                       }
-
-                       int boxOffset = 0;
-                       int numberOfObjects = 0;
-
-                       if (outputMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_BYPASS) {
-                               std::vector<int> boxIndexes = outputMeta.GetBoxDimInfo().GetValidIndexAll();
-                               if (boxIndexes.size() != 1) {
-                                       LOGE("Invalid dim size. It should be 1");
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
-                               boxOffset = mOutputLayerProperty.layers[outputMeta.GetBoxName()].shape[boxIndexes[0]];
-                       } else if (outputMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR) {
-                               std::vector<int> boxIndexes = outputMeta.GetBoxDimInfo().GetValidIndexAll();
-                               if (boxIndexes.size() != 1) {
-                                       LOGE("Invalid dim size. It should be 1");
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
-                               boxOffset = mOutputLayerProperty.layers[outputMeta.GetBoxName()].shape[boxIndexes[0]];
+int Inference::GetFaceDetectionResults(FaceDetectionResults *results)
+{
+       if (mMetadata.GetOutputMeta().IsParsed()) {
+               OutputMetadata &outputMeta = mMetadata.GetOutputMeta();
+
+               // decoding type
+               if (!mOutputTensorBuffers.exist(outputMeta.GetBoxName()) ||
+                       !mOutputTensorBuffers.exist(outputMeta.GetScoreName())) {
+                       LOGE("output buffers named of %s or %s are NULL", outputMeta.GetBoxName().c_str(),
+                                outputMeta.GetScoreName().c_str());
+                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
+               }
 
-                               std::vector<int> scoreIndexes = outputMeta.GetScoreDimInfo().GetValidIndexAll();
-                               if (scoreIndexes.size() != 1) {
-                                       LOGE("Invalid dim size. It should be 1");
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
-                               numberOfObjects = mOutputLayerProperty.layers[outputMeta.GetScoreName()].shape[scoreIndexes[0]];
-                       } else { // INFERENCE_BOX_DECODING_TYPE_YOLO_ANCHOR
-                               std::vector<int> boxIndexes = outputMeta.GetBoxDimInfo().GetValidIndexAll();
-                               if (boxIndexes.size() != 1) {
-                                       LOGE("Invalid dim size. It should be 1");
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
-                               boxOffset = mOutputLayerProperty.layers[outputMeta.GetBoxName()].shape[boxIndexes[0]];
-                               numberOfObjects = boxOffset / outputMeta.GetBoxDecodeInfo().GetCellNumScales() - 5;
-                       }
+               int boxOffset = 0;
+               int numberOfFaces = 0;
 
-                       ObjectDecoder objDecoder(mOutputTensorBuffers, outputMeta, boxOffset,
-                                               static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getWidth()),
-                                               static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getHeight()),
-                                               numberOfObjects);
-
-                       objDecoder.init();
-                       objDecoder.decode();
-                       results->number_of_objects = 0;
-
-                       for (auto& box : objDecoder.getObjectAll()) {
-                               results->indices.push_back(box.index);
-                               results->names.push_back(mUserListName[box.index]);
-                               results->confidences.push_back(box.score);
-                               results->locations.push_back(cv::Rect(
-                                               static_cast<int>((box.location.x -  box.location.width * 0.5f) * static_cast<float>(mSourceSize.width)),
-                                               static_cast<int>((box.location.y -  box.location.height * 0.5f) * static_cast<float>(mSourceSize.height)),
-                                               static_cast<int>(box.location.width *  static_cast<float>(mSourceSize.width)),
-                                               static_cast<int>(box.location.height * static_cast<float>(mSourceSize.height))));
-                               results->number_of_objects++;
+               if (outputMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_BYPASS) {
+                       std::vector<int> boxIndexes = outputMeta.GetBoxDimInfo().GetValidIndexAll();
+                       if (boxIndexes.size() != 1) {
+                               LOGE("Invalid dim size. It should be 1");
+                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
                        }
-
-                       LOGI("Inference: GetObjectDetectionResults: %d\n", results->number_of_objects);
+                       boxOffset = mOutputLayerProperty.layers[outputMeta.GetBoxName()].shape[boxIndexes[0]];
                } else {
-                       tensor_t outputTensorInfo;
-
-                       // Get inference result and contain it to outputTensorInfo.
-                       int ret = mOutputTensorBuffers.GetTensorInfo(mOutputLayerProperty, outputTensorInfo);
-                       if (ret != MEDIA_VISION_ERROR_NONE) {
-                               LOGE("Fail to get output result.");
-                               return ret;
-                       }
-
-                       // In case of object detection,
-                       // a model may apply post-process but others may not.
-                       // Thus, those cases should be hanlded separately.
-
-                       float *boxes = nullptr;
-                       float *classes = nullptr;
-                       float *scores = nullptr;
-                       int number_of_detections = 0;
-
-                       if (outputTensorInfo.dimInfo.size() == 1) {
-                               // there is no way to know how many objects are detect unless the number of objects aren't
-                               // provided. In the case, each backend should provide the number of results manually.
-                               // For example, in OpenCV, MobilenetV1-SSD doesn't provide it so the number of objects are
-                               // written to the 1st element i.e., outputTensorInfo.data[0] (the shape is 1x1xNx7 and the 1st of 7
-                               // indicates the image id. But it is useless if a batch mode isn't supported.
-                               // So, use the 1st of 7.
-
-                               number_of_detections = static_cast<int>(
-                                               *reinterpret_cast<float *>(outputTensorInfo.data[0]));
-                               cv::Mat cvOutputData(number_of_detections, outputTensorInfo.dimInfo[0][3],
-                                                                       CV_32F, outputTensorInfo.data[0]);
-
-                               // boxes
-                               cv::Mat cvLeft = cvOutputData.col(3).clone();
-                               cv::Mat cvTop = cvOutputData.col(4).clone();
-                               cv::Mat cvRight = cvOutputData.col(5).clone();
-                               cv::Mat cvBottom = cvOutputData.col(6).clone();
-                               cv::Mat cvScores, cvClasses, cvBoxes;
-                               cv::Mat cvBoxElems[] = { cvTop, cvLeft, cvBottom, cvRight };
-
-                               cv::hconcat(cvBoxElems, 4, cvBoxes);
-
-                               // classes
-                               cvClasses = cvOutputData.col(1).clone();
-
-                               // scores
-                               cvScores = cvOutputData.col(2).clone();
-
-                               boxes = cvBoxes.ptr<float>(0);
-                               classes = cvClasses.ptr<float>(0);
-                               scores = cvScores.ptr<float>(0);
-                       } else {
-                               boxes = reinterpret_cast<float *>(outputTensorInfo.data[0]);
-                               classes = reinterpret_cast<float *>(outputTensorInfo.data[1]);
-                               scores = reinterpret_cast<float *>(outputTensorInfo.data[2]);
-                               number_of_detections = (int) (*reinterpret_cast<float *>(outputTensorInfo.data[3]));
+                       std::vector<int> boxIndexes = outputMeta.GetBoxDimInfo().GetValidIndexAll();
+                       if (boxIndexes.size() != 1) {
+                               LOGE("Invalid dim size. It should be 1");
+                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
                        }
+                       boxOffset = mOutputLayerProperty.layers[outputMeta.GetBoxName()].shape[boxIndexes[0]];
 
-                       LOGI("number_of_detections = %d", number_of_detections);
-
-                       results->number_of_objects = 0;
-
-                       for (int idx = 0; idx < number_of_detections; ++idx) {
-                               if (scores[idx] < mConfig.mConfidenceThresHold)
-                                       continue;
-
-                               int left = static_cast<int>(boxes[idx * 4 + 1] * mSourceSize.width);
-                               int top = static_cast<int>(boxes[idx * 4 + 0] * mSourceSize.height);
-                               int right = static_cast<int>(boxes[idx * 4 + 3] * mSourceSize.width);
-                               int bottom = static_cast<int>(boxes[idx * 4 + 2] * mSourceSize.height);
-                               cv::Rect loc;
-
-                               loc.x = left;
-                               loc.y = top;
-                               loc.width = right - left + 1;
-                               loc.height = bottom - top + 1;
-
-                               results->indices.push_back(static_cast<int>(classes[idx]));
-                               results->confidences.push_back(scores[idx]);
-                               results->names.push_back(
-                                               mUserListName[static_cast<int>(classes[idx])]);
-                               results->locations.push_back(loc);
-                               results->number_of_objects++;
-
-                               LOGI("objectClass: %d", static_cast<int>(classes[idx]));
-                               LOGI("confidence:%f", scores[idx]);
-                               LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right,
-                                       bottom);
+                       std::vector<int> scoreIndexes = outputMeta.GetScoreDimInfo().GetValidIndexAll();
+                       if (scoreIndexes.size() != 1) {
+                               LOGE("Invaid dim size. It should be 1");
+                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
                        }
-
-                       LOGI("Inference: GetObjectDetectionResults: %d\n", results->number_of_objects);
+                       numberOfFaces = mOutputLayerProperty.layers[outputMeta.GetScoreName()].shape[scoreIndexes[0]];
                }
 
-               return MEDIA_VISION_ERROR_NONE;
-       }
+               ObjectDecoder objDecoder(mOutputTensorBuffers, outputMeta, boxOffset,
+                                                                static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getWidth()),
+                                                                static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getHeight()),
+                                                                numberOfFaces);
 
-       int Inference::GetFaceDetectionResults(FaceDetectionResults *results)
-       {
-               if (mMetadata.GetOutputMeta().IsParsed()) {
-                       OutputMetadata& outputMeta = mMetadata.GetOutputMeta();
+               objDecoder.init();
+               objDecoder.decode();
+               results->number_of_faces = 0;
 
-                       // decoding type
-                       if (!mOutputTensorBuffers.exist(outputMeta.GetBoxName()) ||
-                               !mOutputTensorBuffers.exist(outputMeta.GetScoreName())){
-                               LOGE("output buffers named of %s or %s are NULL",
-                                       outputMeta.GetBoxName().c_str(), outputMeta.GetScoreName().c_str());
-                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                       }
-
-                       int boxOffset = 0;
-                       int numberOfFaces = 0;
-
-                       if (outputMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_BYPASS) {
-                               std::vector<int> boxIndexes = outputMeta.GetBoxDimInfo().GetValidIndexAll();
-                               if (boxIndexes.size() != 1) {
-                                       LOGE("Invalid dim size. It should be 1");
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
-                               boxOffset = mOutputLayerProperty.layers[outputMeta.GetBoxName()].shape[boxIndexes[0]];
-                       } else {
-                               std::vector<int> boxIndexes = outputMeta.GetBoxDimInfo().GetValidIndexAll();
-                               if (boxIndexes.size() != 1) {
-                                       LOGE("Invalid dim size. It should be 1");
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
-                               boxOffset = mOutputLayerProperty.layers[outputMeta.GetBoxName()].shape[boxIndexes[0]];
+               for (auto &face : objDecoder.getObjectAll()) {
+                       results->confidences.push_back(face.score);
+                       results->locations.push_back(
+                                       cv::Rect(static_cast<int>((face.location.x - face.location.width * 0.5f) *
+                                                                                         static_cast<float>(mSourceSize.width)),
+                                                        static_cast<int>((face.location.y - face.location.height * 0.5f) *
+                                                                                         static_cast<float>(mSourceSize.height)),
+                                                        static_cast<int>(face.location.width * static_cast<float>(mSourceSize.width)),
+                                                        static_cast<int>(face.location.height * static_cast<float>(mSourceSize.height))));
+                       results->number_of_faces++;
+               }
 
-                               std::vector<int> scoreIndexes = outputMeta.GetScoreDimInfo().GetValidIndexAll();
-                               if (scoreIndexes.size() != 1) {
-                                       LOGE("Invaid dim size. It should be 1");
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
-                               numberOfFaces = mOutputLayerProperty.layers[outputMeta.GetScoreName()].shape[scoreIndexes[0]];
-                       }
+               LOGE("Inference: GetFaceDetectionResults: %d\n", results->number_of_faces);
+       } else {
+               tensor_t outputTensorInfo;
 
-                       ObjectDecoder objDecoder(mOutputTensorBuffers, outputMeta, boxOffset,
-                                               static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getWidth()),
-                                               static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getHeight()),
-                                               numberOfFaces);
-
-                       objDecoder.init();
-                       objDecoder.decode();
-                       results->number_of_faces = 0;
-
-                       for (auto& face : objDecoder.getObjectAll()) {
-                               results->confidences.push_back(face.score);
-                               results->locations.push_back(cv::Rect(
-                                               static_cast<int>((face.location.x -  face.location.width * 0.5f) * static_cast<float>(mSourceSize.width)),
-                                               static_cast<int>((face.location.y -  face.location.height * 0.5f) * static_cast<float>(mSourceSize.height)),
-                                               static_cast<int>(face.location.width *  static_cast<float>(mSourceSize.width)),
-                                               static_cast<int>(face.location.height * static_cast<float>(mSourceSize.height))));
-                               results->number_of_faces++;
-                       }
+               // Get inference result and contain it to outputTensorInfo.
+               int ret = mOutputTensorBuffers.GetTensorInfo(mOutputLayerProperty, outputTensorInfo);
+               if (ret != MEDIA_VISION_ERROR_NONE) {
+                       LOGE("Fail to get output result.");
+                       return ret;
+               }
 
-                       LOGE("Inference: GetFaceDetectionResults: %d\n", results->number_of_faces);
+               // In case of object detection,
+               // a model may apply post-process but others may not.
+               // Thus, those cases should be handled separately.
+
+               float *boxes = nullptr;
+               float *classes = nullptr;
+               float *scores = nullptr;
+               int number_of_detections = 0;
+               cv::Mat cvScores, cvClasses, cvBoxes;
+
+               if (outputTensorInfo.dimInfo.size() == 1) {
+                       // there is no way to know how many objects are detect unless the number of objects aren't
+                       // provided. In the case, each backend should provide the number of results manually.
+                       // For example, in OpenCV, MobilenetV1-SSD doesn't provide it so the number of objects are
+                       // written to the 1st element i.e., outputTensorInfo.data[0] (the shape is 1x1xNx7 and the 1st of 7
+                       // indicates the image id. But it is useless if a batch mode isn't supported.
+                       // So, use the 1st of 7.
+
+                       number_of_detections = static_cast<int>(*reinterpret_cast<float *>(outputTensorInfo.data[0]));
+                       cv::Mat cvOutputData(number_of_detections, outputTensorInfo.dimInfo[0][3], CV_32F,
+                                                                outputTensorInfo.data[0]);
+
+                       // boxes
+                       cv::Mat cvLeft = cvOutputData.col(3).clone();
+                       cv::Mat cvTop = cvOutputData.col(4).clone();
+                       cv::Mat cvRight = cvOutputData.col(5).clone();
+                       cv::Mat cvBottom = cvOutputData.col(6).clone();
+                       cv::Mat cvBoxElems[] = { cvTop, cvLeft, cvBottom, cvRight };
+                       cv::hconcat(cvBoxElems, 4, cvBoxes);
+
+                       // classes
+                       cvClasses = cvOutputData.col(1).clone();
+
+                       // scores
+                       cvScores = cvOutputData.col(2).clone();
+
+                       boxes = cvBoxes.ptr<float>(0);
+                       classes = cvClasses.ptr<float>(0);
+                       scores = cvScores.ptr<float>(0);
                } else {
-                       tensor_t outputTensorInfo;
-
-                       // Get inference result and contain it to outputTensorInfo.
-                       int ret = mOutputTensorBuffers.GetTensorInfo(mOutputLayerProperty, outputTensorInfo);
-                       if (ret != MEDIA_VISION_ERROR_NONE) {
-                               LOGE("Fail to get output result.");
-                               return ret;
-                       }
+                       boxes = reinterpret_cast<float *>(outputTensorInfo.data[0]);
+                       classes = reinterpret_cast<float *>(outputTensorInfo.data[1]);
+                       scores = reinterpret_cast<float *>(outputTensorInfo.data[2]);
+                       number_of_detections = static_cast<int>(*reinterpret_cast<float *>(outputTensorInfo.data[3]));
+               }
 
-                       // In case of object detection,
-                       // a model may apply post-process but others may not.
-                       // Thus, those cases should be handled separately.
+               results->number_of_faces = 0;
 
-                       float *boxes = nullptr;
-                       float *classes = nullptr;
-                       float *scores = nullptr;
-                       int number_of_detections = 0;
-                       cv::Mat cvScores, cvClasses, cvBoxes;
+               for (int idx = 0; idx < number_of_detections; ++idx) {
+                       if (scores[idx] < mConfig.mConfidenceThresHold)
+                               continue;
 
-                       if (outputTensorInfo.dimInfo.size() == 1) {
-                               // there is no way to know how many objects are detect unless the number of objects aren't
-                               // provided. In the case, each backend should provide the number of results manually.
-                               // For example, in OpenCV, MobilenetV1-SSD doesn't provide it so the number of objects are
-                               // written to the 1st element i.e., outputTensorInfo.data[0] (the shape is 1x1xNx7 and the 1st of 7
-                               // indicates the image id. But it is useless if a batch mode isn't supported.
-                               // So, use the 1st of 7.
-
-                               number_of_detections = static_cast<int>(*reinterpret_cast<float *>(outputTensorInfo.data[0]));
-                               cv::Mat cvOutputData(number_of_detections, outputTensorInfo.dimInfo[0][3], CV_32F, outputTensorInfo.data[0]);
-
-                               // boxes
-                               cv::Mat cvLeft = cvOutputData.col(3).clone();
-                               cv::Mat cvTop = cvOutputData.col(4).clone();
-                               cv::Mat cvRight = cvOutputData.col(5).clone();
-                               cv::Mat cvBottom = cvOutputData.col(6).clone();
-                               cv::Mat cvBoxElems[] = { cvTop, cvLeft, cvBottom, cvRight };
-                               cv::hconcat(cvBoxElems, 4, cvBoxes);
-
-                               // classes
-                               cvClasses = cvOutputData.col(1).clone();
-
-                               // scores
-                               cvScores = cvOutputData.col(2).clone();
-
-                               boxes = cvBoxes.ptr<float>(0);
-                               classes = cvClasses.ptr<float>(0);
-                               scores = cvScores.ptr<float>(0);
-                       } else {
-                               boxes = reinterpret_cast<float *>(outputTensorInfo.data[0]);
-                               classes = reinterpret_cast<float *>(outputTensorInfo.data[1]);
-                               scores = reinterpret_cast<float *>(outputTensorInfo.data[2]);
-                               number_of_detections = static_cast<int>(*reinterpret_cast<float *>(outputTensorInfo.data[3]));
-                       }
+                       int left = static_cast<int>(boxes[idx * 4 + 1] * mSourceSize.width);
+                       int top = static_cast<int>(boxes[idx * 4 + 0] * mSourceSize.height);
+                       int right = static_cast<int>(boxes[idx * 4 + 3] * mSourceSize.width);
+                       int bottom = static_cast<int>(boxes[idx * 4 + 2] * mSourceSize.height);
+                       cv::Rect loc;
 
-                       results->number_of_faces = 0;
-
-                       for (int idx = 0; idx < number_of_detections; ++idx) {
-                               if (scores[idx] < mConfig.mConfidenceThresHold)
-                                       continue;
-
-                               int left = static_cast<int>(boxes[idx * 4 + 1] * mSourceSize.width);
-                               int top = static_cast<int>(boxes[idx * 4 + 0] * mSourceSize.height);
-                               int right = static_cast<int>(boxes[idx * 4 + 3] * mSourceSize.width);
-                               int bottom = static_cast<int>(boxes[idx * 4 + 2] * mSourceSize.height);
-                               cv::Rect loc;
-
-                               loc.x = left;
-                               loc.y = top;
-                               loc.width = right - left + 1;
-                               loc.height = bottom - top + 1;
-                               results->confidences.push_back(scores[idx]);
-                               results->locations.push_back(loc);
-                               results->number_of_faces++;
-
-                               LOGI("confidence:%f", scores[idx]);
-                               LOGI("class: %f", classes[idx]);
-                               LOGI("left:%f, top:%f, right:%f, bottom:%f", boxes[idx * 4 + 1],
-                                       boxes[idx * 4 + 0], boxes[idx * 4 + 3], boxes[idx * 4 + 2]);
-                               LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom);
-                       }
+                       loc.x = left;
+                       loc.y = top;
+                       loc.width = right - left + 1;
+                       loc.height = bottom - top + 1;
+                       results->confidences.push_back(scores[idx]);
+                       results->locations.push_back(loc);
+                       results->number_of_faces++;
 
-                       LOGE("Inference: GetFaceDetectionResults: %d\n", results->number_of_faces);
+                       LOGI("confidence:%f", scores[idx]);
+                       LOGI("class: %f", classes[idx]);
+                       LOGI("left:%f, top:%f, right:%f, bottom:%f", boxes[idx * 4 + 1], boxes[idx * 4 + 0], boxes[idx * 4 + 3],
+                                boxes[idx * 4 + 2]);
+                       LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom);
                }
 
-               return MEDIA_VISION_ERROR_NONE;
+               LOGE("Inference: GetFaceDetectionResults: %d\n", results->number_of_faces);
        }
 
-       int Inference::GetFacialLandMarkDetectionResults(FacialLandMarkDetectionResults *results)
-       {
-               LOGI("ENTER");
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               if (mMetadata.GetOutputMeta().IsParsed()) {
-                       OutputMetadata& outputMeta = mMetadata.GetOutputMeta();
+int Inference::GetFacialLandMarkDetectionResults(FacialLandMarkDetectionResults *results)
+{
+       LOGI("ENTER");
 
-                       if (!mOutputTensorBuffers.exist(outputMeta.GetLandmarkName()) ||
-                               !mOutputTensorBuffers.exist(outputMeta.GetScoreName())) {
-                               LOGE("output buffers named of %s or %s are NULL",
-                                       outputMeta.GetLandmarkName().c_str(), outputMeta.GetScoreName().c_str());
-                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                       }
+       if (mMetadata.GetOutputMeta().IsParsed()) {
+               OutputMetadata &outputMeta = mMetadata.GetOutputMeta();
 
-                       int heatMapWidth = 0;
-                       int heatMapHeight = 0;
-                       int heatMapChannel = 0;
-                       std::vector<int> channelIndexes = outputMeta.GetLandmarkDimInfo().GetValidIndexAll();
-                       int number_of_landmarks = heatMapChannel;
-
-                       if (outputMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS) {
-                               LOGI("landmark dim size: %zd and idx[0] is %d", channelIndexes.size(), channelIndexes[0]);
-                               number_of_landmarks = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[channelIndexes[0]]
-                                                                       / outputMeta.GetLandmarkOffset();
-                       } else if (outputMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL) {
-                               number_of_landmarks = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[channelIndexes[0]];
-                       } else {
-                               heatMapWidth = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[outputMeta.GetLandmarkHeatMapInfo().wIdx];
-                               heatMapHeight = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[outputMeta.GetLandmarkHeatMapInfo().hIdx];
-                               heatMapChannel = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[outputMeta.GetLandmarkHeatMapInfo().cIdx];
-                       }
+               if (!mOutputTensorBuffers.exist(outputMeta.GetLandmarkName()) ||
+                       !mOutputTensorBuffers.exist(outputMeta.GetScoreName())) {
+                       LOGE("output buffers named of %s or %s are NULL", outputMeta.GetLandmarkName().c_str(),
+                                outputMeta.GetScoreName().c_str());
+                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
+               }
 
-                       LOGI("heatMap: w[%d], h[%d], c[%d]", heatMapWidth, heatMapHeight, heatMapChannel);
+               int heatMapWidth = 0;
+               int heatMapHeight = 0;
+               int heatMapChannel = 0;
+               std::vector<int> channelIndexes = outputMeta.GetLandmarkDimInfo().GetValidIndexAll();
+               int number_of_landmarks = heatMapChannel;
 
-                       // decoding
-                       PoseDecoder poseDecoder(mOutputTensorBuffers, outputMeta,
-                                                                       heatMapWidth, heatMapHeight, heatMapChannel,
-                                                                       number_of_landmarks);
+               if (outputMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS) {
+                       LOGI("landmark dim size: %zd and idx[0] is %d", channelIndexes.size(), channelIndexes[0]);
+                       number_of_landmarks = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[channelIndexes[0]] /
+                                                                 outputMeta.GetLandmarkOffset();
+               } else if (outputMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL) {
+                       number_of_landmarks = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[channelIndexes[0]];
+               } else {
+                       heatMapWidth = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()]
+                                                                  .shape[outputMeta.GetLandmarkHeatMapInfo().wIdx];
+                       heatMapHeight = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()]
+                                                                       .shape[outputMeta.GetLandmarkHeatMapInfo().hIdx];
+                       heatMapChannel = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()]
+                                                                        .shape[outputMeta.GetLandmarkHeatMapInfo().cIdx];
+               }
 
-                       // initialize decorder queue with landmarks to be decoded.
-                       int ret = poseDecoder.init();
-                       if (ret != MEDIA_VISION_ERROR_NONE) {
-                               LOGE("Fail to init poseDecoder");
-                               return ret;
-                       }
+               LOGI("heatMap: w[%d], h[%d], c[%d]", heatMapWidth, heatMapHeight, heatMapChannel);
 
-                       float inputW = 1.f;
-                       float inputH = 1.f;
+               // decoding
+               PoseDecoder poseDecoder(mOutputTensorBuffers, outputMeta, heatMapWidth, heatMapHeight, heatMapChannel,
+                                                               number_of_landmarks);
 
-                       if (outputMeta.GetLandmarkCoordinate() == INFERENCE_LANDMARK_COORDINATE_TYPE_PIXEL) {
-                               inputW = static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getWidth());
-                               inputH = static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getHeight());
-                       }
+               // initialize decorder queue with landmarks to be decoded.
+               int ret = poseDecoder.init();
+               if (ret != MEDIA_VISION_ERROR_NONE) {
+                       LOGE("Fail to init poseDecoder");
+                       return ret;
+               }
+
+               float inputW = 1.f;
+               float inputH = 1.f;
+
+               if (outputMeta.GetLandmarkCoordinate() == INFERENCE_LANDMARK_COORDINATE_TYPE_PIXEL) {
+                       inputW = static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getWidth());
+                       inputH = static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getHeight());
+               }
 
-                       float thresRadius = outputMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE ? 0.0 :
-                                                                                                               outputMeta.GetLandmarkHeatMapInfo().nmsRadius;
+               float thresRadius = outputMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE ?
+                                                                       0.0 :
+                                                                       outputMeta.GetLandmarkHeatMapInfo().nmsRadius;
 
-                       poseDecoder.decode(inputW, inputH, thresRadius);
+               poseDecoder.decode(inputW, inputH, thresRadius);
 
-                       for (int landmarkIndex = 0; landmarkIndex < number_of_landmarks; landmarkIndex++) {
-                               results->locations.push_back(
+               for (int landmarkIndex = 0; landmarkIndex < number_of_landmarks; landmarkIndex++) {
+                       results->locations.push_back(
                                        cv::Point(poseDecoder.getPointX(0, landmarkIndex) * static_cast<float>(mSourceSize.width),
                                                          poseDecoder.getPointY(0, landmarkIndex) * static_cast<float>(mSourceSize.height)));
-                       }
+               }
 
-                       results->number_of_landmarks = results->locations.size();
-               } else {
-                       tensor_t outputTensorInfo;
+               results->number_of_landmarks = results->locations.size();
+       } else {
+               tensor_t outputTensorInfo;
 
-                       // Get inference result and contain it to outputTensorInfo.
-                       int ret = mOutputTensorBuffers.GetTensorInfo(mOutputLayerProperty, outputTensorInfo);
-                       if (ret != MEDIA_VISION_ERROR_NONE) {
-                               LOGE("Fail to get output result.");
-                               return ret;
-                       }
+               // Get inference result and contain it to outputTensorInfo.
+               int ret = mOutputTensorBuffers.GetTensorInfo(mOutputLayerProperty, outputTensorInfo);
+               if (ret != MEDIA_VISION_ERROR_NONE) {
+                       LOGE("Fail to get output result.");
+                       return ret;
+               }
 
-                       int number_of_detections = outputTensorInfo.dimInfo[0][1] >> 1;
+               int number_of_detections = outputTensorInfo.dimInfo[0][1] >> 1;
 
-                       results->number_of_landmarks = number_of_detections;
-                       results->locations.resize(number_of_detections);
+               results->number_of_landmarks = number_of_detections;
+               results->locations.resize(number_of_detections);
 
-                       LOGI("imgW:%d, imgH:%d", mSourceSize.width, mSourceSize.height);
+               LOGI("imgW:%d, imgH:%d", mSourceSize.width, mSourceSize.height);
 
-                       float *loc = reinterpret_cast<float *>(outputTensorInfo.data[0]);
+               float *loc = reinterpret_cast<float *>(outputTensorInfo.data[0]);
 
-                       for (auto& point : results->locations) {
-                               point.x = static_cast<int>(*loc++ * mSourceSize.width);
-                               point.y = static_cast<int>(*loc++ * mSourceSize.height);
+               for (auto &point : results->locations) {
+                       point.x = static_cast<int>(*loc++ * mSourceSize.width);
+                       point.y = static_cast<int>(*loc++ * mSourceSize.height);
 
-                               LOGI("x:%d, y:%d", point.x, point.y);
-                       }
+                       LOGI("x:%d, y:%d", point.x, point.y);
                }
-
-               LOGI("Inference: FacialLandmarkDetectionResults: %d\n",
-                        results->number_of_landmarks);
-               return MEDIA_VISION_ERROR_NONE;
        }
 
-       int Inference::GetPoseLandmarkDetectionResults(
-                       std::unique_ptr<mv_inference_pose_s> &detectionResults, int width, int height)
-       {
-               LOGI("ENTER");
+       LOGI("Inference: FacialLandmarkDetectionResults: %d\n", results->number_of_landmarks);
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               auto poseResult = std::make_unique<mv_inference_pose_s>();
+int Inference::GetPoseLandmarkDetectionResults(std::unique_ptr<mv_inference_pose_s> &detectionResults, int width,
+                                                                                          int height)
+{
+       LOGI("ENTER");
 
-               if (mMetadata.GetOutputMeta().IsParsed()) {
-                       OutputMetadata& outputMeta = mMetadata.GetOutputMeta();
+       auto poseResult = std::make_unique<mv_inference_pose_s>();
 
-                       if (!mOutputTensorBuffers.exist(outputMeta.GetLandmarkName()) ||
-                               !mOutputTensorBuffers.exist(outputMeta.GetScoreName())) {
-                               LOGE("output buffers named of %s or %s are NULL",
-                                       outputMeta.GetLandmarkName().c_str(), outputMeta.GetScoreName().c_str());
-                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                       }
+       if (mMetadata.GetOutputMeta().IsParsed()) {
+               OutputMetadata &outputMeta = mMetadata.GetOutputMeta();
 
-                       int heatMapWidth = 0;
-                       int heatMapHeight = 0;
-                       int heatMapChannel = 0;
+               if (!mOutputTensorBuffers.exist(outputMeta.GetLandmarkName()) ||
+                       !mOutputTensorBuffers.exist(outputMeta.GetScoreName())) {
+                       LOGE("output buffers named of %s or %s are NULL", outputMeta.GetLandmarkName().c_str(),
+                                outputMeta.GetScoreName().c_str());
+                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
+               }
 
-                       if (outputMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP ||
-                               outputMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP_REFINE) {
-                               heatMapWidth = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[outputMeta.GetLandmarkHeatMapInfo().wIdx];
-                               heatMapHeight = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[outputMeta.GetLandmarkHeatMapInfo().hIdx];
-                               heatMapChannel = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[outputMeta.GetLandmarkHeatMapInfo().cIdx];
-                       }
+               int heatMapWidth = 0;
+               int heatMapHeight = 0;
+               int heatMapChannel = 0;
 
-                       LOGI("heatMap: w[%d], h[%d], c[%d]", heatMapWidth, heatMapHeight, heatMapChannel);
+               if (outputMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP ||
+                       outputMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP_REFINE) {
+                       heatMapWidth = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()]
+                                                                  .shape[outputMeta.GetLandmarkHeatMapInfo().wIdx];
+                       heatMapHeight = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()]
+                                                                       .shape[outputMeta.GetLandmarkHeatMapInfo().hIdx];
+                       heatMapChannel = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()]
+                                                                        .shape[outputMeta.GetLandmarkHeatMapInfo().cIdx];
+               }
 
-                       std::vector<int> channelIndexes = outputMeta.GetLandmarkDimInfo().GetValidIndexAll();
+               LOGI("heatMap: w[%d], h[%d], c[%d]", heatMapWidth, heatMapHeight, heatMapChannel);
 
-                       // If INFERENCE_LANDMARK_DECODING_TYPE_BYPASS,
-                       // the landmarkChannel is guessed from the shape of the landmark output tensor.
-                       // Otherwise, it is guessed from the heatMapChannel. (heatMapChannel is used in default).
-                       int landmarkChannel = heatMapChannel;
+               std::vector<int> channelIndexes = outputMeta.GetLandmarkDimInfo().GetValidIndexAll();
 
-                       if (outputMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS)
-                               landmarkChannel = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[channelIndexes[0]] / outputMeta.GetLandmarkOffset();
-                       else if (outputMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL)
-                               landmarkChannel = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[channelIndexes[0]];
+               // If INFERENCE_LANDMARK_DECODING_TYPE_BYPASS,
+               // the landmarkChannel is guessed from the shape of the landmark output tensor.
+               // Otherwise, it is guessed from the heatMapChannel. (heatMapChannel is used in default).
+               int landmarkChannel = heatMapChannel;
 
-                       poseResult->number_of_landmarks_per_pose = mUserListName.empty() ? landmarkChannel :
-                                                                                                               static_cast<int>(mUserListName.size());
+               if (outputMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS)
+                       landmarkChannel = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[channelIndexes[0]] /
+                                                         outputMeta.GetLandmarkOffset();
+               else if (outputMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL)
+                       landmarkChannel = mOutputLayerProperty.layers[outputMeta.GetLandmarkName()].shape[channelIndexes[0]];
 
-                       LOGE("number of landmarks per pose: %d", poseResult->number_of_landmarks_per_pose );
+               poseResult->number_of_landmarks_per_pose = mUserListName.empty() ? landmarkChannel :
+                                                                                                                                                  static_cast<int>(mUserListName.size());
 
-                       if (poseResult->number_of_landmarks_per_pose >= MAX_NUMBER_OF_LANDMARKS_PER_POSE) {
-                               LOGE("Exceeded maxinum number of landmarks per pose(%d >= %d).",
-                                       poseResult->number_of_landmarks_per_pose, MAX_NUMBER_OF_LANDMARKS_PER_POSE);
-                               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-                       }
+               LOGE("number of landmarks per pose: %d", poseResult->number_of_landmarks_per_pose);
 
-                       // decoding
-                       PoseDecoder poseDecoder(mOutputTensorBuffers, outputMeta,
-                                                                       heatMapWidth, heatMapHeight, heatMapChannel,
-                                                                       poseResult->number_of_landmarks_per_pose);
+               if (poseResult->number_of_landmarks_per_pose >= MAX_NUMBER_OF_LANDMARKS_PER_POSE) {
+                       LOGE("Exceeded maxinum number of landmarks per pose(%d >= %d).", poseResult->number_of_landmarks_per_pose,
+                                MAX_NUMBER_OF_LANDMARKS_PER_POSE);
+                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+               }
 
-                       // initialize decorder queue with landmarks to be decoded.
-                       int ret = poseDecoder.init();
-                       if (ret != MEDIA_VISION_ERROR_NONE) {
-                               LOGE("Fail to init poseDecoder");
-                               return ret;
-                       }
+               // decoding
+               PoseDecoder poseDecoder(mOutputTensorBuffers, outputMeta, heatMapWidth, heatMapHeight, heatMapChannel,
+                                                               poseResult->number_of_landmarks_per_pose);
 
-                       float inputW = 1.f;
-                       float inputH = 1.f;
-                       float thresRadius = outputMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE ? 0.0 :
-                                                                                                               outputMeta.GetLandmarkHeatMapInfo().nmsRadius;
-                       if (outputMeta.GetLandmarkCoordinate() == INFERENCE_LANDMARK_COORDINATE_TYPE_PIXEL) {
-                               inputW = static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getWidth());
-                               inputH = static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getHeight());
-                       }
+               // initialize decorder queue with landmarks to be decoded.
+               int ret = poseDecoder.init();
+               if (ret != MEDIA_VISION_ERROR_NONE) {
+                       LOGE("Fail to init poseDecoder");
+                       return ret;
+               }
 
-                       poseDecoder.decode(inputW, inputH, thresRadius);
-                       poseResult->number_of_poses = poseDecoder.getNumberOfPose();
-
-                       for (int poseIndex = 0; poseIndex < poseResult->number_of_poses; ++poseIndex) {
-                               for (int landmarkIndex = 0; landmarkIndex < poseResult->number_of_landmarks_per_pose; ++ landmarkIndex) {
-                                       int part = landmarkIndex;
-                                       if (!mUserListName.empty()) {
-                                               part = std::stoi(mUserListName[landmarkIndex]) - 1;
-                                               if (part < 0) {
-                                                       continue;
-                                               }
-                                       }
+               float inputW = 1.f;
+               float inputH = 1.f;
+               float thresRadius = outputMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE ?
+                                                                       0.0 :
+                                                                       outputMeta.GetLandmarkHeatMapInfo().nmsRadius;
+               if (outputMeta.GetLandmarkCoordinate() == INFERENCE_LANDMARK_COORDINATE_TYPE_PIXEL) {
+                       inputW = static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getWidth());
+                       inputH = static_cast<float>(mMetadata.GetInputMeta().GetLayer().begin()->second.getHeight());
+               }
 
-                                       poseResult->landmarks[poseIndex][landmarkIndex].isAvailable = true;
-                                       poseResult->landmarks[poseIndex][landmarkIndex].point.x =
-                                                                               poseDecoder.getPointX(poseIndex, part) * static_cast<float>(mSourceSize.width);
-                                       poseResult->landmarks[poseIndex][landmarkIndex].point.y =
-                                                                               poseDecoder.getPointY(poseIndex, part) * static_cast<float>(mSourceSize.height);
-                                       poseResult->landmarks[poseIndex][landmarkIndex].label = landmarkIndex;
-                                       poseResult->landmarks[poseIndex][landmarkIndex].score =
-                                                                               poseDecoder.getScore(poseIndex, part);
+               poseDecoder.decode(inputW, inputH, thresRadius);
+               poseResult->number_of_poses = poseDecoder.getNumberOfPose();
+
+               for (int poseIndex = 0; poseIndex < poseResult->number_of_poses; ++poseIndex) {
+                       for (int landmarkIndex = 0; landmarkIndex < poseResult->number_of_landmarks_per_pose; ++landmarkIndex) {
+                               int part = landmarkIndex;
+                               if (!mUserListName.empty()) {
+                                       part = std::stoi(mUserListName[landmarkIndex]) - 1;
+                                       if (part < 0) {
+                                               continue;
+                                       }
                                }
+
+                               poseResult->landmarks[poseIndex][landmarkIndex].isAvailable = true;
+                               poseResult->landmarks[poseIndex][landmarkIndex].point.x =
+                                               poseDecoder.getPointX(poseIndex, part) * static_cast<float>(mSourceSize.width);
+                               poseResult->landmarks[poseIndex][landmarkIndex].point.y =
+                                               poseDecoder.getPointY(poseIndex, part) * static_cast<float>(mSourceSize.height);
+                               poseResult->landmarks[poseIndex][landmarkIndex].label = landmarkIndex;
+                               poseResult->landmarks[poseIndex][landmarkIndex].score = poseDecoder.getScore(poseIndex, part);
                        }
+               }
 
-                       detectionResults = std::move(poseResult);
-               } else {
-                       tensor_t outputTensorInfo;
+               detectionResults = std::move(poseResult);
+       } else {
+               tensor_t outputTensorInfo;
 
-                       // Get inference result and contain it to outputTensorInfo.
-                       int ret = mOutputTensorBuffers.GetTensorInfo(mOutputLayerProperty, outputTensorInfo);
-                       if (ret != MEDIA_VISION_ERROR_NONE) {
-                                       LOGE("Fail to get output result.");
-                                       return ret;
-                       }
+               // Get inference result and contain it to outputTensorInfo.
+               int ret = mOutputTensorBuffers.GetTensorInfo(mOutputLayerProperty, outputTensorInfo);
+               if (ret != MEDIA_VISION_ERROR_NONE) {
+                       LOGE("Fail to get output result.");
+                       return ret;
+               }
 
-                       cv::Mat reShapeTest(cv::Size(outputTensorInfo.dimInfo[0][2], outputTensorInfo.dimInfo[0][1]),
-                                                                                CV_32FC(outputTensorInfo.dimInfo[0][3]), outputTensorInfo.data[0]);
-                       cv::Mat multiChannels[outputTensorInfo.dimInfo[0][3]];
+               cv::Mat reShapeTest(cv::Size(outputTensorInfo.dimInfo[0][2], outputTensorInfo.dimInfo[0][1]),
+                                                       CV_32FC(outputTensorInfo.dimInfo[0][3]), outputTensorInfo.data[0]);
+               cv::Mat multiChannels[outputTensorInfo.dimInfo[0][3]];
 
-                       split(reShapeTest, multiChannels);
+               split(reShapeTest, multiChannels);
 
-                       float ratioX = static_cast<float>(outputTensorInfo.dimInfo[0][2]);
-                       float ratioY = static_cast<float>(outputTensorInfo.dimInfo[0][1]);
+               float ratioX = static_cast<float>(outputTensorInfo.dimInfo[0][2]);
+               float ratioY = static_cast<float>(outputTensorInfo.dimInfo[0][1]);
 
-                       poseResult->number_of_poses = 1;
-                       poseResult->number_of_landmarks_per_pose = outputTensorInfo.dimInfo[0][3];
+               poseResult->number_of_poses = 1;
+               poseResult->number_of_landmarks_per_pose = outputTensorInfo.dimInfo[0][3];
 
-                       if (poseResult->number_of_landmarks_per_pose >= MAX_NUMBER_OF_LANDMARKS_PER_POSE) {
-                               LOGE("Exeeded maxinum number of landmarks per pose(%d >= %d).",
-                                       poseResult->number_of_landmarks_per_pose, MAX_NUMBER_OF_LANDMARKS_PER_POSE);
-                               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-                       }
+               if (poseResult->number_of_landmarks_per_pose >= MAX_NUMBER_OF_LANDMARKS_PER_POSE) {
+                       LOGE("Exeeded maxinum number of landmarks per pose(%d >= %d).", poseResult->number_of_landmarks_per_pose,
+                                MAX_NUMBER_OF_LANDMARKS_PER_POSE);
+                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+               }
 
-                       for (int poseIndex = 0; poseIndex < poseResult->number_of_poses; ++poseIndex) {
-                               for (int landmarkIndex = 0; landmarkIndex < poseResult->number_of_landmarks_per_pose; landmarkIndex++) {
-                                       int part = landmarkIndex;
-                                       if (!mUserListName.empty()) {
-                                               part = std::stoi(mUserListName[landmarkIndex]) - 1;
-                                               if (part < 0) {
-                                                       continue;
-                                               }
+               for (int poseIndex = 0; poseIndex < poseResult->number_of_poses; ++poseIndex) {
+                       for (int landmarkIndex = 0; landmarkIndex < poseResult->number_of_landmarks_per_pose; landmarkIndex++) {
+                               int part = landmarkIndex;
+                               if (!mUserListName.empty()) {
+                                       part = std::stoi(mUserListName[landmarkIndex]) - 1;
+                                       if (part < 0) {
+                                               continue;
                                        }
+                               }
 
-                                       cv::Mat heatMap = multiChannels[part];
-                                       double score;
-                                       cv::Point loc;
-                                       cv::Point2f loc2f;
-                                       cv::Mat blurredHeatMap;
+                               cv::Mat heatMap = multiChannels[part];
+                               double score;
+                               cv::Point loc;
+                               cv::Point2f loc2f;
+                               cv::Mat blurredHeatMap;
 
-                                       cv::GaussianBlur(heatMap, blurredHeatMap, cv::Size(), 5.0, 5.0);
-                                       cv::minMaxLoc(heatMap, NULL, &score, NULL, &loc);
+                               cv::GaussianBlur(heatMap, blurredHeatMap, cv::Size(), 5.0, 5.0);
+                               cv::minMaxLoc(heatMap, NULL, &score, NULL, &loc);
 
-                                       loc2f.x = (static_cast<float>(loc.x) / ratioX);
-                                       loc2f.y = (static_cast<float>(loc.y) / ratioY);
+                               loc2f.x = (static_cast<float>(loc.x) / ratioX);
+                               loc2f.y = (static_cast<float>(loc.y) / ratioY);
 
-                                       LOGI("landmarkIndex[%2d] - mapping to [%2d]: x[%.3f], y[%.3f], score[%.3f]",
-                                                       landmarkIndex, part, loc2f.x, loc2f.y, score);
+                               LOGI("landmarkIndex[%2d] - mapping to [%2d]: x[%.3f], y[%.3f], score[%.3f]", landmarkIndex, part,
+                                        loc2f.x, loc2f.y, score);
 
-                                       poseResult->landmarks[poseIndex][landmarkIndex].isAvailable = true;
-                                       poseResult->landmarks[poseIndex][landmarkIndex].point.x = static_cast<int>(static_cast<float>(width) * loc2f.x);
-                                       poseResult->landmarks[poseIndex][landmarkIndex].point.y = static_cast<int>(static_cast<float>(height) * loc2f.y);
-                                       poseResult->landmarks[poseIndex][landmarkIndex].score = score;
-                                       poseResult->landmarks[poseIndex][landmarkIndex].label = -1;
-                               }
+                               poseResult->landmarks[poseIndex][landmarkIndex].isAvailable = true;
+                               poseResult->landmarks[poseIndex][landmarkIndex].point.x =
+                                               static_cast<int>(static_cast<float>(width) * loc2f.x);
+                               poseResult->landmarks[poseIndex][landmarkIndex].point.y =
+                                               static_cast<int>(static_cast<float>(height) * loc2f.y);
+                               poseResult->landmarks[poseIndex][landmarkIndex].score = score;
+                               poseResult->landmarks[poseIndex][landmarkIndex].label = -1;
                        }
-
-                       detectionResults = std::move(poseResult);
                }
 
-               return MEDIA_VISION_ERROR_NONE;
+               detectionResults = std::move(poseResult);
        }
 
+       return MEDIA_VISION_ERROR_NONE;
+}
+
 } /* Inference */
 } /* MediaVision */
index 7dc5fa5..a0c978f 100644 (file)
@@ -25,80 +25,72 @@ namespace mediavision
 {
 namespace inference
 {
-       const std::string INFERENCE_INI_FILENAME =
-                       "/inference/inference_engine.ini";
-
-       static inline std::string &rtrim(std::string &s,
-                                                                        const char *t = " \t\n\r\f\v")
-       {
-               s.erase(s.find_last_not_of(t) + 1);
-               return s;
-       }
+const std::string INFERENCE_INI_FILENAME = "/inference/inference_engine.ini";
 
-       static inline std::string &ltrim(std::string &s,
-                                                                        const char *t = " \t\n\r\f\v")
-       {
-               s.erase(s.find_first_not_of(t) + 1);
-               return s;
-       }
+static inline std::string &rtrim(std::string &s, const char *t = " \t\n\r\f\v")
+{
+       s.erase(s.find_last_not_of(t) + 1);
+       return s;
+}
 
-       static inline std::string &trim(std::string &s,
-                                                                       const char *t = " \t\n\r\f\v")
-       {
-               return ltrim(rtrim(s, t), t);
-       }
+static inline std::string &ltrim(std::string &s, const char *t = " \t\n\r\f\v")
+{
+       s.erase(s.find_first_not_of(t) + 1);
+       return s;
+}
 
-       InferenceInI::InferenceInI()
-                       : mIniDefaultPath(SYSCONFDIR)
-                       , mDefaultBackend("OPENCV")
-                       , mDelimeter(",")
-       {
-               mIniDefaultPath += INFERENCE_INI_FILENAME;
-       }
+static inline std::string &trim(std::string &s, const char *t = " \t\n\r\f\v")
+{
+       return ltrim(rtrim(s, t), t);
+}
+
+InferenceInI::InferenceInI() : mIniDefaultPath(SYSCONFDIR), mDefaultBackend("OPENCV"), mDelimeter(",")
+{
+       mIniDefaultPath += INFERENCE_INI_FILENAME;
+}
 
-       InferenceInI::~InferenceInI()
-       {}
-
-       int InferenceInI::LoadInI()
-       {
-               LOGI("ENTER");
-               dictionary *dict = iniparser_load(mIniDefaultPath.c_str());
-               if (dict == NULL) {
-                       LOGE("Fail to load ini");
-                       return -1;
-               }
-
-               std::string list = std::string(iniparser_getstring(
-                               dict, "inference backend:supported backend types",
-                               (char *) mDefaultBackend.c_str()));
-
-               size_t pos = 0;
-               while ((pos = list.find(mDelimeter)) != std::string::npos) {
-                       std::string tmp = list.substr(0, pos);
-                       mSupportedInferenceBackend.push_back(atoi(tmp.c_str()));
-
-                       list.erase(0, pos + mDelimeter.length());
-               }
-               mSupportedInferenceBackend.push_back(atoi(list.c_str()));
-
-               if (dict) {
-                       iniparser_freedict(dict);
-                       dict = NULL;
-               }
-
-               LOGI("LEAVE");
-               return 0;
+InferenceInI::~InferenceInI()
+{}
+
+int InferenceInI::LoadInI()
+{
+       LOGI("ENTER");
+       dictionary *dict = iniparser_load(mIniDefaultPath.c_str());
+       if (dict == NULL) {
+               LOGE("Fail to load ini");
+               return -1;
        }
 
-       void InferenceInI::UnLoadInI()
-       {
-               ;
+       std::string list = std::string(
+                       iniparser_getstring(dict, "inference backend:supported backend types", (char *) mDefaultBackend.c_str()));
+
+       size_t pos = 0;
+       while ((pos = list.find(mDelimeter)) != std::string::npos) {
+               std::string tmp = list.substr(0, pos);
+               mSupportedInferenceBackend.push_back(atoi(tmp.c_str()));
+
+               list.erase(0, pos + mDelimeter.length());
        }
+       mSupportedInferenceBackend.push_back(atoi(list.c_str()));
 
-       std::vector<int> InferenceInI::GetSupportedInferenceEngines()
-       {
-               return mSupportedInferenceBackend;
+       if (dict) {
+               iniparser_freedict(dict);
+               dict = NULL;
        }
 
+       LOGI("LEAVE");
+       return 0;
+}
+
+void InferenceInI::UnLoadInI()
+{
+       ;
+}
+
+std::vector<int> InferenceInI::GetSupportedInferenceEngines()
+{
+       return mSupportedInferenceBackend;
+}
+
 } /* Inference */
 } /* MediaVision */
index 22b8383..a2bbceb 100644 (file)
@@ -29,181 +29,177 @@ namespace mediavision
 {
 namespace inference
 {
-       InputMetadata::InputMetadata() :
-                       parsed(false),
-                       layer(),
-                       option()
-       {
-               // shape_type
-               mSupportedShapeType.insert({"NCHW", INFERENCE_TENSOR_SHAPE_NCHW});
-               mSupportedShapeType.insert({"NHWC", INFERENCE_TENSOR_SHAPE_NHWC});
-
-               // data_type
-               mSupportedDataType.insert({"FLOAT32", MV_INFERENCE_DATA_FLOAT32});
-               mSupportedDataType.insert({"UINT8", MV_INFERENCE_DATA_UINT8});
-
-               // color_space
-               mSupportedColorSpace.insert({"RGB888", MEDIA_VISION_COLORSPACE_RGB888});
-               mSupportedColorSpace.insert({"GRAY8", MEDIA_VISION_COLORSPACE_Y800});
-       }
+InputMetadata::InputMetadata() : parsed(false), layer(), option()
+{
+       // shape_type
+       mSupportedShapeType.insert({ "NCHW", INFERENCE_TENSOR_SHAPE_NCHW });
+       mSupportedShapeType.insert({ "NHWC", INFERENCE_TENSOR_SHAPE_NHWC });
 
-       int InputMetadata::GetTensorInfo(JsonObject *root)
-       {
-               LOGI("ENTER");
+       // data_type
+       mSupportedDataType.insert({ "FLOAT32", MV_INFERENCE_DATA_FLOAT32 });
+       mSupportedDataType.insert({ "UINT8", MV_INFERENCE_DATA_UINT8 });
 
-               if (!json_object_has_member(root, "tensor_info")) {
-                       LOGE("No tensor_info inputmetadata");
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-               }
+       // color_space
+       mSupportedColorSpace.insert({ "RGB888", MEDIA_VISION_COLORSPACE_RGB888 });
+       mSupportedColorSpace.insert({ "GRAY8", MEDIA_VISION_COLORSPACE_Y800 });
+}
 
-               // tensor_info
-               int ret = MEDIA_VISION_ERROR_NONE;
-               JsonArray * rootArray = json_object_get_array_member(root, "tensor_info");
-               unsigned int elements = json_array_get_length(rootArray);
-
-               std::map<std::string, LayerInfo>().swap(layer);
-               // TODO: handling error
-               // FIXEME: LayerInfo.set()??
-               for (unsigned int elem = 0; elem < elements; ++elem) {
-                       LayerInfo info;
-                       JsonNode *pNode = json_array_get_element(rootArray, elem);
-                       JsonObject *pObject = json_node_get_object(pNode);
-
-                       info.name =
-                                       static_cast<const char*>(json_object_get_string_member(pObject,"name"));
-                       LOGI("layer: %s", info.name.c_str());
-
-                       try {
-                               info.shapeType = GetSupportedType(pObject, "shape_type", mSupportedShapeType);
-                               info.dataType = GetSupportedType(pObject, "data_type", mSupportedDataType);
-                               info.colorSpace = GetSupportedType(pObject, "color_space", mSupportedColorSpace);
-                       } catch (const std::exception& e) {
-                               LOGE("Invalid %s", e.what());
-                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                       }
+int InputMetadata::GetTensorInfo(JsonObject *root)
+{
+       LOGI("ENTER");
 
-                       // dims
-                       JsonArray * array = json_object_get_array_member(pObject, "shape_dims");
-                       unsigned int elements2 = json_array_get_length(array);
-                       LOGI("shape dim: size[%u]", elements2);
-                       for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
-                               auto dim = static_cast<int>(json_array_get_int_element(array, elem2));
-                               info.dims.push_back(dim);
-                               LOGI("%d", dim);
-                       }
+       if (!json_object_has_member(root, "tensor_info")) {
+               LOGE("No tensor_info inputmetadata");
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+       }
 
-                       layer.insert(std::make_pair(info.name, info));
+       // tensor_info
+       int ret = MEDIA_VISION_ERROR_NONE;
+       JsonArray *rootArray = json_object_get_array_member(root, "tensor_info");
+       unsigned int elements = json_array_get_length(rootArray);
+
+       std::map<std::string, LayerInfo>().swap(layer);
+       // TODO: handling error
+       // FIXEME: LayerInfo.set()??
+       for (unsigned int elem = 0; elem < elements; ++elem) {
+               LayerInfo info;
+               JsonNode *pNode = json_array_get_element(rootArray, elem);
+               JsonObject *pObject = json_node_get_object(pNode);
+
+               info.name = static_cast<const char *>(json_object_get_string_member(pObject, "name"));
+               LOGI("layer: %s", info.name.c_str());
+
+               try {
+                       info.shapeType = GetSupportedType(pObject, "shape_type", mSupportedShapeType);
+                       info.dataType = GetSupportedType(pObject, "data_type", mSupportedDataType);
+                       info.colorSpace = GetSupportedType(pObject, "color_space", mSupportedColorSpace);
+               } catch (const std::exception &e) {
+                       LOGE("Invalid %s", e.what());
+                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
                }
 
-               LOGI("LEAVE");
+               // dims
+               JsonArray *array = json_object_get_array_member(pObject, "shape_dims");
+               unsigned int elements2 = json_array_get_length(array);
+               LOGI("shape dim: size[%u]", elements2);
+               for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
+                       auto dim = static_cast<int>(json_array_get_int_element(array, elem2));
+                       info.dims.push_back(dim);
+                       LOGI("%d", dim);
+               }
 
-               return ret;
+               layer.insert(std::make_pair(info.name, info));
        }
 
-       int InputMetadata::GetPreProcess(JsonObject *root)
-       {
-               LOGI("ENTER");
+       LOGI("LEAVE");
 
-               if (!json_object_has_member(root, "preprocess")) {
-                       LOGI("No preprocess inputmetadata");
-                       return MEDIA_VISION_ERROR_NONE;
-               }
+       return ret;
+}
+
+int InputMetadata::GetPreProcess(JsonObject *root)
+{
+       LOGI("ENTER");
+
+       if (!json_object_has_member(root, "preprocess")) {
+               LOGI("No preprocess inputmetadata");
+               return MEDIA_VISION_ERROR_NONE;
+       }
 
-               // preprocess
-               JsonArray * rootArray = json_object_get_array_member(root, "preprocess");
-               unsigned int elements = json_array_get_length(rootArray);
-
-               std::map<std::string, Options>().swap(option);
-               // TODO: iterLayer should be the same with elements.
-               auto iterLayer = layer.begin();
-               // TODO: handling error
-               for (unsigned int elem = 0; elem < elements; ++elem, ++iterLayer) {
-                       Options opt;
-
-                       JsonNode *pNode = json_array_get_element(rootArray, elem);
-                       JsonObject *pObject = json_node_get_object(pNode);
-
-                       // normalization
-                       if (json_object_has_member(pObject, "normalization")) {
-                               JsonArray * array = json_object_get_array_member(pObject, "normalization");
-                               JsonNode *  node = json_array_get_element(array, 0);
-                               JsonObject * object = json_node_get_object(node);
-
-                               opt.normalization.use = true;
-                               LOGI("use normalization");
-
-                               JsonArray * arrayMean = json_object_get_array_member(object, "mean");
-                               JsonArray * arrayStd = json_object_get_array_member(object, "std");
-                               unsigned int elemMean = json_array_get_length(arrayMean);
-                               unsigned int elemStd = json_array_get_length(arrayStd);
-                               if (elemMean != elemStd) {
-                                       LOGE("Invalid mean and std values");
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
-
-                               for (unsigned int elem = 0; elem < elemMean; ++elem) {
-                                       auto m = static_cast<double>(json_array_get_double_element(arrayMean, elem));
-                                       auto s = static_cast<double>(json_array_get_double_element(arrayStd, elem));
-                                       opt.normalization.mean.push_back(m);
-                                       opt.normalization.std.push_back(s);
-                                       LOGI("%u: mean[%3.2f], std[%3.2f]", elem, m, s);
-                               }
+       // preprocess
+       JsonArray *rootArray = json_object_get_array_member(root, "preprocess");
+       unsigned int elements = json_array_get_length(rootArray);
+
+       std::map<std::string, Options>().swap(option);
+       // TODO: iterLayer should be the same with elements.
+       auto iterLayer = layer.begin();
+       // TODO: handling error
+       for (unsigned int elem = 0; elem < elements; ++elem, ++iterLayer) {
+               Options opt;
+
+               JsonNode *pNode = json_array_get_element(rootArray, elem);
+               JsonObject *pObject = json_node_get_object(pNode);
+
+               // normalization
+               if (json_object_has_member(pObject, "normalization")) {
+                       JsonArray *array = json_object_get_array_member(pObject, "normalization");
+                       JsonNode *node = json_array_get_element(array, 0);
+                       JsonObject *object = json_node_get_object(node);
+
+                       opt.normalization.use = true;
+                       LOGI("use normalization");
+
+                       JsonArray *arrayMean = json_object_get_array_member(object, "mean");
+                       JsonArray *arrayStd = json_object_get_array_member(object, "std");
+                       unsigned int elemMean = json_array_get_length(arrayMean);
+                       unsigned int elemStd = json_array_get_length(arrayStd);
+                       if (elemMean != elemStd) {
+                               LOGE("Invalid mean and std values");
+                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
                        }
 
-                       if (json_object_has_member(pObject, "quantization")) {
-                               JsonArray * array = json_object_get_array_member(pObject, "quantization");
-                               JsonNode *  node = json_array_get_element(array, 0);
-                               JsonObject * object = json_node_get_object(node);
-
-                               opt.quantization.use = true;
-                               LOGI("use quantization");
-
-                               JsonArray * arrayScale = json_object_get_array_member(object, "scale");
-                               JsonArray * arrayZero = json_object_get_array_member(object, "zeropoint");
-                               unsigned int elemScale = json_array_get_length(arrayScale);
-                               unsigned int elemZero= json_array_get_length(arrayZero);
-                               if (elemScale != elemZero) {
-                                       LOGE("Invalid scale and zero values");
-                                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                               }
-
-                               for (unsigned int elem = 0; elem < elemScale; ++elem) {
-                                       auto s = static_cast<double>(json_array_get_double_element(arrayScale, elem));
-                                       auto z = static_cast<double>(json_array_get_double_element(arrayZero, elem));
-                                       opt.quantization.scale.push_back(s);
-                                       opt.quantization.zeropoint.push_back(z);
-                                       LOGI("%u: scale[%3.2f], zeropoint[%3.2f]", elem, s, z);
-                               }
+                       for (unsigned int elem = 0; elem < elemMean; ++elem) {
+                               auto m = static_cast<double>(json_array_get_double_element(arrayMean, elem));
+                               auto s = static_cast<double>(json_array_get_double_element(arrayStd, elem));
+                               opt.normalization.mean.push_back(m);
+                               opt.normalization.std.push_back(s);
+                               LOGI("%u: mean[%3.2f], std[%3.2f]", elem, m, s);
                        }
-                       option.insert(std::make_pair(iterLayer->first, opt));
                }
 
-               LOGI("LEAVE");
+               if (json_object_has_member(pObject, "quantization")) {
+                       JsonArray *array = json_object_get_array_member(pObject, "quantization");
+                       JsonNode *node = json_array_get_element(array, 0);
+                       JsonObject *object = json_node_get_object(node);
 
-               return MEDIA_VISION_ERROR_NONE;
-       }
+                       opt.quantization.use = true;
+                       LOGI("use quantization");
 
-       int InputMetadata::Parse(JsonObject *root)
-       {
-               LOGI("ENTER");
+                       JsonArray *arrayScale = json_object_get_array_member(object, "scale");
+                       JsonArray *arrayZero = json_object_get_array_member(object, "zeropoint");
+                       unsigned int elemScale = json_array_get_length(arrayScale);
+                       unsigned int elemZero = json_array_get_length(arrayZero);
+                       if (elemScale != elemZero) {
+                               LOGE("Invalid scale and zero values");
+                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+                       }
 
-               int ret = GetTensorInfo(root);
-               if (ret != MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Fail to GetTensorInfo[%d]", ret);
-                       return ret;
+                       for (unsigned int elem = 0; elem < elemScale; ++elem) {
+                               auto s = static_cast<double>(json_array_get_double_element(arrayScale, elem));
+                               auto z = static_cast<double>(json_array_get_double_element(arrayZero, elem));
+                               opt.quantization.scale.push_back(s);
+                               opt.quantization.zeropoint.push_back(z);
+                               LOGI("%u: scale[%3.2f], zeropoint[%3.2f]", elem, s, z);
+                       }
                }
+               option.insert(std::make_pair(iterLayer->first, opt));
+       }
 
-               ret = GetPreProcess(root);
-               if (ret != MEDIA_VISION_ERROR_NONE)      {
-                       LOGE("Fail to GetPreProcess[%d]", ret);
-                       return ret;
-               }
+       LOGI("LEAVE");
 
-               parsed = true;
-               LOGI("LEAVE");
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               return MEDIA_VISION_ERROR_NONE;
+int InputMetadata::Parse(JsonObject *root)
+{
+       LOGI("ENTER");
+
+       int ret = GetTensorInfo(root);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to GetTensorInfo[%d]", ret);
+               return ret;
+       }
+
+       ret = GetPreProcess(root);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to GetPreProcess[%d]", ret);
+               return ret;
        }
 
+       parsed = true;
+       LOGI("LEAVE");
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
 } /* Inference */
 } /* MediaVision */
index b2ae9ff..34f83a6 100644 (file)
@@ -29,93 +29,92 @@ namespace mediavision
 {
 namespace inference
 {
-       int Metadata::Init(const std::string& filename)
-       {
-               LOGI("ENTER");
+int Metadata::Init(const std::string &filename)
+{
+       LOGI("ENTER");
+
+       if (access(filename.c_str(), F_OK | R_OK)) {
+               LOGE("meta file is in [%s] ", filename.c_str());
+               return MEDIA_VISION_ERROR_INVALID_PATH;
+       }
+
+       mMetafile = filename;
+
+       LOGI("LEAVE");
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int Metadata::Parse()
+{
+       LOGI("ENTER");
+
+       if (mMetafile.empty()) {
+               LOGE("meta file is empty");
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+       }
 
-               if (access(filename.c_str(), F_OK | R_OK)) {
-                       LOGE("meta file is in [%s] ", filename.c_str());
-                       return MEDIA_VISION_ERROR_INVALID_PATH;
-               }
+       GError *error = NULL;
+       JsonNode *node = NULL;
+       JsonObject *object = NULL;
+       int ret = MEDIA_VISION_ERROR_NONE;
 
-               mMetafile = filename;
+       JsonParser *parser = json_parser_new();
+       if (parser == NULL) {
+               LOGE("Fail to create json parser");
+               return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+       }
 
-               LOGI("LEAVE");
+       gboolean jsonRet = json_parser_load_from_file(parser, mMetafile.c_str(), &error);
+       if (!jsonRet) {
+               LOGE("Unable to parser file %s by %s", mMetafile.c_str(), error == NULL ? "Unknown" : error->message);
+               g_error_free(error);
+               ret = MEDIA_VISION_ERROR_INVALID_DATA;
+               goto _ERROR_;
+       }
 
-               return MEDIA_VISION_ERROR_NONE;
+       node = json_parser_get_root(parser);
+       if (JSON_NODE_TYPE(node) != JSON_NODE_OBJECT) {
+               LOGE("Fail to json_parser_get_root. It's an incorrect markup");
+               ret = MEDIA_VISION_ERROR_INVALID_DATA;
+               goto _ERROR_;
        }
 
-       int Metadata::Parse()
-       {
-               LOGI("ENTER");
-
-               if (mMetafile.empty()) {
-                       LOGE("meta file is empty");
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-               }
-
-               GError *error = NULL;
-               JsonNode *node = NULL;
-               JsonObject *object = NULL;
-               int ret = MEDIA_VISION_ERROR_NONE;
-
-               JsonParser *parser = json_parser_new();
-               if (parser == NULL) {
-                       LOGE("Fail to create json parser");
-                       return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
-               }
-
-               gboolean jsonRet = json_parser_load_from_file(parser, mMetafile.c_str(), &error);
-               if (!jsonRet) {
-                       LOGE("Unable to parser file %s by %s", mMetafile.c_str(),
-                                                               error == NULL ? "Unknown" : error->message);
-                       g_error_free(error);
-                       ret =  MEDIA_VISION_ERROR_INVALID_DATA;
-                       goto _ERROR_;
-               }
-
-               node = json_parser_get_root(parser);
-               if (JSON_NODE_TYPE(node) != JSON_NODE_OBJECT) {
-                       LOGE("Fail to json_parser_get_root. It's an incorrect markup");
-                       ret =  MEDIA_VISION_ERROR_INVALID_DATA;
-                       goto _ERROR_;
-               }
-
-               object = json_node_get_object(node);
-               if (!object) {
-                       LOGE("Fail to json_node_get_object. object is NULL");
-                       ret =  MEDIA_VISION_ERROR_INVALID_DATA;
-                       goto _ERROR_;
-               }
-
-               ret = mInputMeta.Parse(json_object_get_object_member(object, "inputmetadata"));
-               if (ret != MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Fail to parse input Meta[%d]",ret);
-                       goto _ERROR_;
-               }
-
-               ret = mOutputMeta.Parse(json_object_get_object_member(object, "outputmetadata"));
-               if (ret != MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Fail to parse output meta[%d]",ret);
-                       goto _ERROR_;
-               }
-
-       _ERROR_ :
-               g_object_unref(parser);
-               parser = NULL;
-               LOGI("LEAVE");
-
-               return ret;
+       object = json_node_get_object(node);
+       if (!object) {
+               LOGE("Fail to json_node_get_object. object is NULL");
+               ret = MEDIA_VISION_ERROR_INVALID_DATA;
+               goto _ERROR_;
        }
 
-       InputMetadata& Metadata::GetInputMeta()
-       {
-               return mInputMeta;
+       ret = mInputMeta.Parse(json_object_get_object_member(object, "inputmetadata"));
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to parse input Meta[%d]", ret);
+               goto _ERROR_;
        }
 
-       OutputMetadata& Metadata::GetOutputMeta()
-       {
-               return mOutputMeta;
+       ret = mOutputMeta.Parse(json_object_get_object_member(object, "outputmetadata"));
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to parse output meta[%d]", ret);
+               goto _ERROR_;
        }
+
+_ERROR_:
+       g_object_unref(parser);
+       parser = NULL;
+       LOGI("LEAVE");
+
+       return ret;
+}
+
+InputMetadata &Metadata::GetInputMeta()
+{
+       return mInputMeta;
+}
+
+OutputMetadata &Metadata::GetOutputMeta()
+{
+       return mOutputMeta;
+}
 } /* Inference */
 } /* MediaVision */
old mode 100755 (executable)
new mode 100644 (file)
index 96193da..c617f2d
@@ -25,230 +25,214 @@ namespace mediavision
 {
 namespace inference
 {
-       int ObjectDecoder::init()
-       {
-               if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_BYPASS) {
-                       if (!mTensorBuffer.exist(mMeta.GetBoxLabelName()) ||
-                               !mTensorBuffer.exist(mMeta.GetBoxNumberName()) ) {
-                               LOGE("buffer buffers named of %s or %s are NULL",
-                                       mMeta.GetBoxLabelName().c_str(), mMeta.GetBoxNumberName().c_str());
-
-                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                       }
+int ObjectDecoder::init()
+{
+       if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_BYPASS) {
+               if (!mTensorBuffer.exist(mMeta.GetBoxLabelName()) || !mTensorBuffer.exist(mMeta.GetBoxNumberName())) {
+                       LOGE("buffer buffers named of %s or %s are NULL", mMeta.GetBoxLabelName().c_str(),
+                                mMeta.GetBoxNumberName().c_str());
 
-                       std::vector<int> indexes = mMeta.GetBoxNumberDimInfo().GetValidIndexAll();
-                       if (indexes.size() != 1) {
-                               LOGE("Invalid dim size. It should be 1");
-                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                       }
+                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
+               }
 
-                       // mNumberOfObjects is set again if INFERENCE_BOX_DECODING_TYPE_BYPASS.
-                       // Otherwise it is set already within ctor.
-                       mNumberOfOjects = mTensorBuffer.getValue<int>(
-                                                               mMeta.GetBoxNumberName(), indexes[0]);
-               } else if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR) {
-                       if (mMeta.GetBoxDecodeInfo().IsAnchorBoxEmpty()) {
-                               LOGE("Anchor boxes are required but empty.");
-                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                       }
-               } else {
-                       LOGI("YOLO_ANCHOR does nothing");
+               std::vector<int> indexes = mMeta.GetBoxNumberDimInfo().GetValidIndexAll();
+               if (indexes.size() != 1) {
+                       LOGE("Invalid dim size. It should be 1");
+                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
                }
 
-               return MEDIA_VISION_ERROR_NONE;
+               // mNumberOfObjects is set again if INFERENCE_BOX_DECODING_TYPE_BYPASS.
+               // Otherwise it is set already within ctor.
+               mNumberOfOjects = mTensorBuffer.getValue<int>(mMeta.GetBoxNumberName(), indexes[0]);
+       } else if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR) {
+               if (mMeta.GetBoxDecodeInfo().IsAnchorBoxEmpty()) {
+                       LOGE("Anchor boxes are required but empty.");
+                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
+               }
+       } else {
+               LOGI("YOLO_ANCHOR does nothing");
        }
 
-       float ObjectDecoder::decodeScore(int idx)
-       {
-               float score = mTensorBuffer.getValue<float>(mMeta.GetScoreName(), idx);
-               if (mMeta.GetScoreType() == INFERENCE_SCORE_TYPE_SIGMOID) {
-                       score = PostProcess::sigmoid(score);
-               }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               return score < mMeta.GetScoreThreshold() ? 0.0f : score;
+float ObjectDecoder::decodeScore(int idx)
+{
+       float score = mTensorBuffer.getValue<float>(mMeta.GetScoreName(), idx);
+       if (mMeta.GetScoreType() == INFERENCE_SCORE_TYPE_SIGMOID) {
+               score = PostProcess::sigmoid(score);
        }
 
-       Box ObjectDecoder::decodeBox(int idx, float score, int label, int offset)
-       {
-               // assume type is (cx,cy,w,h)
-               // left or cx
-               float cx = mTensorBuffer.getValue<float>(mMeta.GetBoxName(),
-                                                                       idx * mBoxOffset + offset + mMeta.GetBoxOrder()[0]);
-               // top or cy
-               float cy = mTensorBuffer.getValue<float>(mMeta.GetBoxName(),
-                                                                       idx * mBoxOffset + offset + mMeta.GetBoxOrder()[1]);
-               // right or width
-               float cWidth = mTensorBuffer.getValue<float>(mMeta.GetBoxName(),
-                                                                       idx * mBoxOffset + offset + mMeta.GetBoxOrder()[2]);
-               // bottom or height
-               float cHeight = mTensorBuffer.getValue<float>(mMeta.GetBoxName(),
-                                                                       idx * mBoxOffset + offset + mMeta.GetBoxOrder()[3]);
-
-               if (mMeta.GetBoxDecodeInfo().GetCellType() == INFERENCE_SCORE_TYPE_SIGMOID) {
-                       cx = PostProcess::sigmoid(cx);
-                       cy = PostProcess::sigmoid(cy);
-                       cWidth = PostProcess::sigmoid(cWidth);
-                       cHeight = PostProcess::sigmoid(cHeight);
-               }
-
-               LOGI("cx:%.2f, cy:%.2f, cW:%.2f, cH:%.2f", cx, cy, cWidth, cHeight);
-               // convert type to ORIGIN_CENTER if ORIGIN_LEFTTOP
-               if (mMeta.GetBoxType() == INFERENCE_BOX_TYPE_ORIGIN_LEFTTOP) {
-                       float tmpCx = cx;
-                       float tmpCy = cy;
-                       cx = (cx + cWidth) * 0.5f; // (left + right)/2
-                       cy = (cy + cHeight) * 0.5f; // (top + bottom)/2
-                       cWidth = cWidth - tmpCx ; // right - left
-                       cHeight = cHeight - tmpCy; // bottom - top
-               }
+       return score < mMeta.GetScoreThreshold() ? 0.0f : score;
+}
 
-               // convert coordinate to RATIO if PIXEL
-               if (mMeta.GetScoreCoordinate() == INFERENCE_BOX_COORDINATE_TYPE_PIXEL) {
-                       cx /= mScaleW;
-                       cy /= mScaleH;
-                       cWidth /= mScaleW;
-                       cHeight /= mScaleH;
-               }
+Box ObjectDecoder::decodeBox(int idx, float score, int label, int offset)
+{
+       // assume type is (cx,cy,w,h)
+       // left or cx
+       float cx = mTensorBuffer.getValue<float>(mMeta.GetBoxName(), idx * mBoxOffset + offset + mMeta.GetBoxOrder()[0]);
+       // top or cy
+       float cy = mTensorBuffer.getValue<float>(mMeta.GetBoxName(), idx * mBoxOffset + offset + mMeta.GetBoxOrder()[1]);
+       // right or width
+       float cWidth =
+                       mTensorBuffer.getValue<float>(mMeta.GetBoxName(), idx * mBoxOffset + offset + mMeta.GetBoxOrder()[2]);
+       // bottom or height
+       float cHeight =
+                       mTensorBuffer.getValue<float>(mMeta.GetBoxName(), idx * mBoxOffset + offset + mMeta.GetBoxOrder()[3]);
+
+       if (mMeta.GetBoxDecodeInfo().GetCellType() == INFERENCE_SCORE_TYPE_SIGMOID) {
+               cx = PostProcess::sigmoid(cx);
+               cy = PostProcess::sigmoid(cy);
+               cWidth = PostProcess::sigmoid(cWidth);
+               cHeight = PostProcess::sigmoid(cHeight);
+       }
 
-               Box box = {
-                       .index = mMeta.GetBoxLabelName().empty() ?
-                                               label :
-                                               mTensorBuffer.getValue<int>(mMeta.GetBoxLabelName(), idx),
-                       .score = score,
-                       .location = cv::Rect2f(cx, cy, cWidth, cHeight)
-               };
+       LOGI("cx:%.2f, cy:%.2f, cW:%.2f, cH:%.2f", cx, cy, cWidth, cHeight);
+       // convert type to ORIGIN_CENTER if ORIGIN_LEFTTOP
+       if (mMeta.GetBoxType() == INFERENCE_BOX_TYPE_ORIGIN_LEFTTOP) {
+               float tmpCx = cx;
+               float tmpCy = cy;
+               cx = (cx + cWidth) * 0.5f; // (left + right)/2
+               cy = (cy + cHeight) * 0.5f; // (top + bottom)/2
+               cWidth = cWidth - tmpCx; // right - left
+               cHeight = cHeight - tmpCy; // bottom - top
+       }
 
-               return box;
+       // convert coordinate to RATIO if PIXEL
+       if (mMeta.GetScoreCoordinate() == INFERENCE_BOX_COORDINATE_TYPE_PIXEL) {
+               cx /= mScaleW;
+               cy /= mScaleH;
+               cWidth /= mScaleW;
+               cHeight /= mScaleH;
        }
 
-       Box ObjectDecoder::decodeBoxWithAnchor(int idx, int anchorIdx, float score, cv::Rect2f& anchor)
-       {
-               // location coordinate of box, the output of decodeBox(), is relative between 0 ~ 1
-               Box box = decodeBox(anchorIdx, score, idx);
-
-               if (mMeta.GetBoxDecodeInfo().IsFixedAnchorSize()) {
-                       box.location.x += anchor.x;
-                       box.location.y += anchor.y;
-               } else {
-                       box.location.x = box.location.x / mMeta.GetBoxDecodeInfo().GetAnchorXscale() *
-                                                        anchor.width + anchor.x;
-                       box.location.y = box.location.y / mMeta.GetBoxDecodeInfo().GetAnchorYscale() *
-                                                        anchor.height + anchor.y;
-               }
+       Box box = { .index = mMeta.GetBoxLabelName().empty() ? label :
+                                                                                                                  mTensorBuffer.getValue<int>(mMeta.GetBoxLabelName(), idx),
+                               .score = score,
+                               .location = cv::Rect2f(cx, cy, cWidth, cHeight) };
 
-               if (mMeta.GetBoxDecodeInfo().IsExponentialBoxScale()) {
-                       box.location.width = anchor.width *
-                                               std::exp(box.location.width / mMeta.GetBoxDecodeInfo().GetAnchorWscale());
-                       box.location.height = anchor.height *
-                                               std::exp(box.location.height / mMeta.GetBoxDecodeInfo().GetAnchorHscale());
-               } else {
-                       box.location.width = anchor.width *
-                                               box.location.width / mMeta.GetBoxDecodeInfo().GetAnchorWscale();
-                       box.location.height = anchor.height *
-                                               box.location.height / mMeta.GetBoxDecodeInfo().GetAnchorHscale();
-               }
+       return box;
+}
+
+Box ObjectDecoder::decodeBoxWithAnchor(int idx, int anchorIdx, float score, cv::Rect2f &anchor)
+{
+       // location coordinate of box, the output of decodeBox(), is relative between 0 ~ 1
+       Box box = decodeBox(anchorIdx, score, idx);
+
+       if (mMeta.GetBoxDecodeInfo().IsFixedAnchorSize()) {
+               box.location.x += anchor.x;
+               box.location.y += anchor.y;
+       } else {
+               box.location.x = box.location.x / mMeta.GetBoxDecodeInfo().GetAnchorXscale() * anchor.width + anchor.x;
+               box.location.y = box.location.y / mMeta.GetBoxDecodeInfo().GetAnchorYscale() * anchor.height + anchor.y;
+       }
 
-               return box;
+       if (mMeta.GetBoxDecodeInfo().IsExponentialBoxScale()) {
+               box.location.width = anchor.width * std::exp(box.location.width / mMeta.GetBoxDecodeInfo().GetAnchorWscale());
+               box.location.height =
+                               anchor.height * std::exp(box.location.height / mMeta.GetBoxDecodeInfo().GetAnchorHscale());
+       } else {
+               box.location.width = anchor.width * box.location.width / mMeta.GetBoxDecodeInfo().GetAnchorWscale();
+               box.location.height = anchor.height * box.location.height / mMeta.GetBoxDecodeInfo().GetAnchorHscale();
        }
 
-       int ObjectDecoder::decode()
-       {
-               LOGI("ENTER");
-
-               BoxesList boxList;
-               Boxes boxes;
-               int ret = MEDIA_VISION_ERROR_NONE;
-               int totalIdx = mNumberOfOjects;
-
-               if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_YOLO_ANCHOR) {
-                       totalIdx = 0;
-                       for (auto& scale : mMeta.GetBoxDecodeInfo().GetCellScalesAll()) {
-                               totalIdx += (static_cast<int>(mScaleW) / scale
-                                                       * static_cast<int>(mScaleH) / scale)
-                                                       * mMeta.GetBoxDecodeInfo().GetCellNumScales()
-                                                       / mMeta.GetBoxDecodeInfo().GetCellOffsetScales();
-                       }
-                       boxList.reserve(mNumberOfOjects);
+       return box;
+}
+
+int ObjectDecoder::decode()
+{
+       LOGI("ENTER");
+
+       BoxesList boxList;
+       Boxes boxes;
+       int ret = MEDIA_VISION_ERROR_NONE;
+       int totalIdx = mNumberOfOjects;
+
+       if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_YOLO_ANCHOR) {
+               totalIdx = 0;
+               for (auto &scale : mMeta.GetBoxDecodeInfo().GetCellScalesAll()) {
+                       totalIdx += (static_cast<int>(mScaleW) / scale * static_cast<int>(mScaleH) / scale) *
+                                               mMeta.GetBoxDecodeInfo().GetCellNumScales() / mMeta.GetBoxDecodeInfo().GetCellOffsetScales();
                }
+               boxList.reserve(mNumberOfOjects);
+       }
 
-               for (int idx = 0; idx < totalIdx; ++idx) {
-                       if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_BYPASS) {
-                               float score = decodeScore(idx);
-                               if (score <= 0.0f)
-                                       continue;
+       for (int idx = 0; idx < totalIdx; ++idx) {
+               if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_BYPASS) {
+                       float score = decodeScore(idx);
+                       if (score <= 0.0f)
+                               continue;
 
-                               Box box = decodeBox(idx, score);
-                               mResultBoxes.push_back(box);
-                       } else if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR) {
-                               int anchorIdx = -1;
+                       Box box = decodeBox(idx, score);
+                       mResultBoxes.push_back(box);
+               } else if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR) {
+                       int anchorIdx = -1;
 
-                               boxes.clear();
-                               for (auto& anchorBox : mMeta.GetBoxDecodeInfo().GetAnchorBoxAll()) {
-                                       anchorIdx++;
+                       boxes.clear();
+                       for (auto &anchorBox : mMeta.GetBoxDecodeInfo().GetAnchorBoxAll()) {
+                               anchorIdx++;
 
-                                       float score = decodeScore(anchorIdx * mNumberOfOjects + idx);
+                               float score = decodeScore(anchorIdx * mNumberOfOjects + idx);
 
-                                       if (score <= 0.0f)
-                                               continue;
+                               if (score <= 0.0f)
+                                       continue;
 
-                                       Box box = decodeBoxWithAnchor(idx, anchorIdx, score, anchorBox);
-                                       boxes.push_back(box);
+                               Box box = decodeBoxWithAnchor(idx, anchorIdx, score, anchorBox);
+                               boxes.push_back(box);
+                       }
+                       boxList.push_back(boxes);
+               } else { // INFERENCE_BOX_DECODING_TYPE_YOLO_ANCHOR
+                       int cellIdx = idx * mBoxOffset;
+                       for (int j = 0; j < mMeta.GetBoxDecodeInfo().GetCellOffsetScales(); ++j) {
+                               float score = decodeScore(cellIdx + (mNumberOfOjects + 5) * j + 4);
+                               if (score <= 0.0f) {
+                                       continue;
                                }
-                               boxList.push_back(boxes);
-                       } else { // INFERENCE_BOX_DECODING_TYPE_YOLO_ANCHOR
-                               int cellIdx = idx * mBoxOffset;
-                               for (int j = 0; j < mMeta.GetBoxDecodeInfo().GetCellOffsetScales(); ++j) {
-                                       float score = decodeScore(cellIdx + (mNumberOfOjects + 5) * j + 4);
-                                       if (score <= 0.0f) {
-                                               continue;
-                                       }
-                                       LOGI("score[%d]: %.2f", j, score);
-                                       // need to check the score
-                                       float topObjScore = 0.0f;
-                                       int topObjIdx = 0;
-                                       for (int objIdx_ = 0; objIdx_ < mNumberOfOjects; ++objIdx_) {
-                                               float objScore_ = decodeScore(cellIdx + (mNumberOfOjects + 5) * j + 5 + objIdx_);
-                                               if (objScore_ > topObjScore) {
-                                                       topObjScore = objScore_;
-                                                       topObjIdx = objIdx_;
-                                               }
+                               LOGI("score[%d]: %.2f", j, score);
+                               // need to check the score
+                               float topObjScore = 0.0f;
+                               int topObjIdx = 0;
+                               for (int objIdx_ = 0; objIdx_ < mNumberOfOjects; ++objIdx_) {
+                                       float objScore_ = decodeScore(cellIdx + (mNumberOfOjects + 5) * j + 5 + objIdx_);
+                                       if (objScore_ > topObjScore) {
+                                               topObjScore = objScore_;
+                                               topObjIdx = objIdx_;
                                        }
+                               }
 
-                                       if (topObjScore <  mMeta.GetScoreThreshold())
-                                               continue;
+                               if (topObjScore < mMeta.GetScoreThreshold())
+                                       continue;
 
-                                       Box box = decodeBox(idx, topObjScore, topObjIdx, (mNumberOfOjects + 5) * j);
-                                       boxes.push_back(box);
-                               }
+                               Box box = decodeBox(idx, topObjScore, topObjIdx, (mNumberOfOjects + 5) * j);
+                               boxes.push_back(box);
                        }
                }
+       }
 
-               if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR ||
-                       mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_YOLO_ANCHOR)
-                       boxList.push_back(boxes);
-
-               if (!boxList.empty()) {
-                       PostProcess postProc;
-                       ret = postProc.Nms(boxList,
-                                       mMeta.GetBoxDecodeInfo().GetNmsMode(),
-                                       mMeta.GetBoxDecodeInfo().GetNmsIouThreshold(),
-                                       mResultBoxes);
-                       if (ret != MEDIA_VISION_ERROR_NONE) {
-                               LOGE("Fail to non-maximum suppression[%d]", ret);
-                               return ret;
-                       }
-               } else {
-                       LOGW("boxlist empty!");
+       if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR ||
+               mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_YOLO_ANCHOR)
+               boxList.push_back(boxes);
+
+       if (!boxList.empty()) {
+               PostProcess postProc;
+               ret = postProc.Nms(boxList, mMeta.GetBoxDecodeInfo().GetNmsMode(),
+                                                  mMeta.GetBoxDecodeInfo().GetNmsIouThreshold(), mResultBoxes);
+               if (ret != MEDIA_VISION_ERROR_NONE) {
+                       LOGE("Fail to non-maximum suppression[%d]", ret);
+                       return ret;
                }
+       } else {
+               LOGW("boxlist empty!");
+       }
 
-               LOGI("LEAVE");
+       LOGI("LEAVE");
 
-               return ret;
-       }
+       return ret;
+}
 
-       Boxes& ObjectDecoder::getObjectAll()
-       {
-               return mResultBoxes;
-       }
+Boxes &ObjectDecoder::getObjectAll()
+{
+       return mResultBoxes;
+}
 }
 }
old mode 100755 (executable)
new mode 100644 (file)
index 12a2da3..4f44bb2
@@ -31,522 +31,505 @@ namespace mediavision
 {
 namespace inference
 {
-       OutputMetadata::OutputMetadata() :
-                       parsed(false),
-                       score(),
-                       box(),
-                       landmark(),
-                       offsetVec()
-       {
-               // shape_type
-               mSupportedShapeType.insert({"NCHW", INFERENCE_TENSOR_SHAPE_NCHW});
-               mSupportedShapeType.insert({"NHWC", INFERENCE_TENSOR_SHAPE_NHWC});
-       }
-
-       int OutputMetadata::ParseScore(JsonObject *root)
-       {
-               if (!json_object_has_member(root, "score")) {
-                       LOGI("No score outputmetadata");
-                       return MEDIA_VISION_ERROR_NONE;
-               }
-
-               return score.ParseScore(root);
-       }
-
-       void DecodeInfo::AddAnchorBox(cv::Rect2f& anchor)
-       {
-               anchorBoxes.push_back(anchor);
-       }
-
-       void DecodeInfo::ClearAnchorBox()
-       {
-               anchorBoxes.clear();
-       }
+OutputMetadata::OutputMetadata() : parsed(false), score(), box(), landmark(), offsetVec()
+{
+       // shape_type
+       mSupportedShapeType.insert({ "NCHW", INFERENCE_TENSOR_SHAPE_NCHW });
+       mSupportedShapeType.insert({ "NHWC", INFERENCE_TENSOR_SHAPE_NHWC });
+}
 
-       std::vector<cv::Rect2f>& DecodeInfo::GetAnchorBoxAll()
-       {
-               return anchorBoxes;
+int OutputMetadata::ParseScore(JsonObject *root)
+{
+       if (!json_object_has_member(root, "score")) {
+               LOGI("No score outputmetadata");
+               return MEDIA_VISION_ERROR_NONE;
        }
 
-       bool DecodeInfo::IsAnchorBoxEmpty()
-       {
-               return anchorBoxes.empty();
-       }
+       return score.ParseScore(root);
+}
 
-       int OutputMetadata::ParseBox(JsonObject *root)
-       {
-               if (!json_object_has_member(root, "box")) {
-                       LOGE("No box outputmetadata");
-                       return MEDIA_VISION_ERROR_NONE;
-               }
+void DecodeInfo::AddAnchorBox(cv::Rect2f &anchor)
+{
+       anchorBoxes.push_back(anchor);
+}
 
-               return box.ParseBox(root);
-       }
+void DecodeInfo::ClearAnchorBox()
+{
+       anchorBoxes.clear();
+}
 
-       int DecodeInfo::ParseAnchorParam(JsonObject *root)
-       {
-               JsonObject *object = json_object_get_object_member(root, "anchor") ;
-
-               this->anchorParam.mode = static_cast<int>(json_object_get_int_member(object, "mode"));
-
-               this->anchorParam.numLayers = static_cast<int>(json_object_get_int_member(object, "num_layers"));
-               this->anchorParam.minScale = static_cast<float>(json_object_get_double_member(object, "min_scale"));
-               this->anchorParam.maxScale = static_cast<float>(json_object_get_double_member(object, "max_scale"));
-               this->anchorParam.inputSizeHeight = static_cast<int>(json_object_get_int_member(object, "input_size_height"));
-               this->anchorParam.inputSizeWidth = static_cast<int>(json_object_get_int_member(object, "input_size_width"));
-               this->anchorParam.anchorOffsetX = static_cast<float>(json_object_get_double_member(object, "anchor_offset_x"));
-               this->anchorParam.anchorOffsetY = static_cast<float>(json_object_get_double_member(object, "anchor_offset_y"));
-               this->anchorParam.isReduceBoxedInLowestLayer =
-                                                                               static_cast<bool>(json_object_get_boolean_member(object, "reduce_boxed_in_lowest_layer"));
-               this->anchorParam.interpolatedScaleAspectRatio =
-                                                                               static_cast<float>(json_object_get_double_member(object, "interpolated_scale_aspect_ratio"));
-               this->anchorParam.isFixedAnchorSize =
-                                                                               static_cast<bool>(json_object_get_boolean_member(object, "fixed_anchor_size"));
-               this->anchorParam.isExponentialBoxScale =
-                                                                               static_cast<bool>(json_object_get_boolean_member(object, "exponential_box_scale"));
-
-               this->anchorParam.xScale = static_cast<float>(json_object_get_double_member(object, "x_scale"));
-               this->anchorParam.yScale = static_cast<float>(json_object_get_double_member(object, "y_scale"));
-               this->anchorParam.wScale = static_cast<float>(json_object_get_double_member(object, "w_scale"));
-               this->anchorParam.hScale = static_cast<float>(json_object_get_double_member(object, "h_scale"));
-
-               JsonArray * array = json_object_get_array_member(object, "strides");
-               unsigned int elements2 = json_array_get_length(array);
-               for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
-                       auto stride = static_cast<int>(json_array_get_int_element(array, elem2));
-                       this->anchorParam.strides.push_back(stride);
-                       LOGI("stride: %d", stride);
-               }
+std::vector<cv::Rect2f> &DecodeInfo::GetAnchorBoxAll()
+{
+       return anchorBoxes;
+}
 
-               array = json_object_get_array_member(object, "aspect_ratios");
-               elements2 = json_array_get_length(array);
-               for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
-                       auto aspectRatio = static_cast<float>(json_array_get_double_element(array, elem2));
-                       this->anchorParam.aspectRatios.push_back(aspectRatio);
-                       LOGI("aspectRatio: %.4f", aspectRatio);
-               }
+bool DecodeInfo::IsAnchorBoxEmpty()
+{
+       return anchorBoxes.empty();
+}
 
+int OutputMetadata::ParseBox(JsonObject *root)
+{
+       if (!json_object_has_member(root, "box")) {
+               LOGE("No box outputmetadata");
                return MEDIA_VISION_ERROR_NONE;
        }
 
-       int DecodeInfo::ParseCellParam(JsonObject *root)
-       {
-               JsonObject *object = json_object_get_object_member(root, "cell") ;
-
-               this->cellParam.numScales = static_cast<int>(json_object_get_int_member(object, "num_scales"));
+       return box.ParseBox(root);
+}
 
-               JsonArray * array = json_object_get_array_member(object, "scales");
-               unsigned int elements2 = json_array_get_length(array);
-               for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
-                       auto scale = static_cast<int>(json_array_get_int_element(array, elem2));
-                       this->cellParam.scales.push_back(scale);
-                       LOGI("scale: %d", scale);
-               }
+int DecodeInfo::ParseAnchorParam(JsonObject *root)
+{
+       JsonObject *object = json_object_get_object_member(root, "anchor");
+
+       this->anchorParam.mode = static_cast<int>(json_object_get_int_member(object, "mode"));
+
+       this->anchorParam.numLayers = static_cast<int>(json_object_get_int_member(object, "num_layers"));
+       this->anchorParam.minScale = static_cast<float>(json_object_get_double_member(object, "min_scale"));
+       this->anchorParam.maxScale = static_cast<float>(json_object_get_double_member(object, "max_scale"));
+       this->anchorParam.inputSizeHeight = static_cast<int>(json_object_get_int_member(object, "input_size_height"));
+       this->anchorParam.inputSizeWidth = static_cast<int>(json_object_get_int_member(object, "input_size_width"));
+       this->anchorParam.anchorOffsetX = static_cast<float>(json_object_get_double_member(object, "anchor_offset_x"));
+       this->anchorParam.anchorOffsetY = static_cast<float>(json_object_get_double_member(object, "anchor_offset_y"));
+       this->anchorParam.isReduceBoxedInLowestLayer =
+                       static_cast<bool>(json_object_get_boolean_member(object, "reduce_boxed_in_lowest_layer"));
+       this->anchorParam.interpolatedScaleAspectRatio =
+                       static_cast<float>(json_object_get_double_member(object, "interpolated_scale_aspect_ratio"));
+       this->anchorParam.isFixedAnchorSize =
+                       static_cast<bool>(json_object_get_boolean_member(object, "fixed_anchor_size"));
+       this->anchorParam.isExponentialBoxScale =
+                       static_cast<bool>(json_object_get_boolean_member(object, "exponential_box_scale"));
+
+       this->anchorParam.xScale = static_cast<float>(json_object_get_double_member(object, "x_scale"));
+       this->anchorParam.yScale = static_cast<float>(json_object_get_double_member(object, "y_scale"));
+       this->anchorParam.wScale = static_cast<float>(json_object_get_double_member(object, "w_scale"));
+       this->anchorParam.hScale = static_cast<float>(json_object_get_double_member(object, "h_scale"));
+
+       JsonArray *array = json_object_get_array_member(object, "strides");
+       unsigned int elements2 = json_array_get_length(array);
+       for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
+               auto stride = static_cast<int>(json_array_get_int_element(array, elem2));
+               this->anchorParam.strides.push_back(stride);
+               LOGI("stride: %d", stride);
+       }
+
+       array = json_object_get_array_member(object, "aspect_ratios");
+       elements2 = json_array_get_length(array);
+       for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
+               auto aspectRatio = static_cast<float>(json_array_get_double_element(array, elem2));
+               this->anchorParam.aspectRatios.push_back(aspectRatio);
+               LOGI("aspectRatio: %.4f", aspectRatio);
+       }
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int DecodeInfo::ParseCellParam(JsonObject *root)
+{
+       JsonObject *object = json_object_get_object_member(root, "cell");
 
-               this->cellParam.offsetScales = static_cast<int>(json_object_get_int_member(object, "offset_scales"));
-               try {
-                       this->cellParam.type = GetSupportedType(object, "type", this->cellParam.supportedCellType);
-               } catch (const std::exception& e) {
-                       LOGE("Invalid %s", e.what());
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-               }
+       this->cellParam.numScales = static_cast<int>(json_object_get_int_member(object, "num_scales"));
 
-               return MEDIA_VISION_ERROR_NONE;
+       JsonArray *array = json_object_get_array_member(object, "scales");
+       unsigned int elements2 = json_array_get_length(array);
+       for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
+               auto scale = static_cast<int>(json_array_get_int_element(array, elem2));
+               this->cellParam.scales.push_back(scale);
+               LOGI("scale: %d", scale);
        }
 
-       std::vector<int>& DecodeInfo::GetCellScalesAll()
-       {
-               return this->cellParam.scales;
+       this->cellParam.offsetScales = static_cast<int>(json_object_get_int_member(object, "offset_scales"));
+       try {
+               this->cellParam.type = GetSupportedType(object, "type", this->cellParam.supportedCellType);
+       } catch (const std::exception &e) {
+               LOGE("Invalid %s", e.what());
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       int DecodeInfo::GetCellNumScales()
-       {
-               return this->cellParam.numScales;
-       }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-       int DecodeInfo::GetCellOffsetScales()
-       {
-               return this->cellParam.offsetScales;
-       }
+std::vector<int> &DecodeInfo::GetCellScalesAll()
+{
+       return this->cellParam.scales;
+}
 
-       inference_score_type_e DecodeInfo::GetCellType()
-       {
-               return this->cellParam.type;
-       }
+int DecodeInfo::GetCellNumScales()
+{
+       return this->cellParam.numScales;
+}
 
-       float DecodeInfo::CalculateScale(float min, float max, int index, int maxStride)
-       {
-               return min + (max - min) * 1.0 * index / (maxStride - 1.0f);
-       }
+int DecodeInfo::GetCellOffsetScales()
+{
+       return this->cellParam.offsetScales;
+}
 
-       bool DecodeInfo::IsFixedAnchorSize()
-       {
-               return this->anchorParam.isFixedAnchorSize;;
-       }
+inference_score_type_e DecodeInfo::GetCellType()
+{
+       return this->cellParam.type;
+}
 
-       bool DecodeInfo::IsExponentialBoxScale()
-       {
-               return this->anchorParam.isExponentialBoxScale;
-       }
+float DecodeInfo::CalculateScale(float min, float max, int index, int maxStride)
+{
+       return min + (max - min) * 1.0 * index / (maxStride - 1.0f);
+}
 
-       float DecodeInfo::GetAnchorXscale()
-       {
-               return this->anchorParam.xScale;
-       }
+bool DecodeInfo::IsFixedAnchorSize()
+{
+       return this->anchorParam.isFixedAnchorSize;
+       ;
+}
 
-       float DecodeInfo::GetAnchorYscale()
-       {
-               return this->anchorParam.yScale;
-       }
+bool DecodeInfo::IsExponentialBoxScale()
+{
+       return this->anchorParam.isExponentialBoxScale;
+}
 
-       float DecodeInfo::GetAnchorWscale()
-       {
-               return this->anchorParam.wScale;
-       }
+float DecodeInfo::GetAnchorXscale()
+{
+       return this->anchorParam.xScale;
+}
 
-       float DecodeInfo::GetAnchorHscale()
-       {
-               return this->anchorParam.hScale;
-       }
+float DecodeInfo::GetAnchorYscale()
+{
+       return this->anchorParam.yScale;
+}
 
-       int DecodeInfo::GenerateAnchor()
-       {
-               if (this->anchorParam.strides.empty() ||
-                       this->anchorParam.aspectRatios.empty()) {
-                       LOGE("Invalid anchor parameters");
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-               }
+float DecodeInfo::GetAnchorWscale()
+{
+       return this->anchorParam.wScale;
+}
 
-               int layerId = 0;
-               this->ClearAnchorBox();
-               while (layerId < this->anchorParam.numLayers) {
-                       std::vector<float> anchorHeight;
-                       std::vector<float> anchorWidth;
-                       std::vector<float> aspectRatios;
-                       std::vector<float> scales;
-
-                       int lastSameStrideLayer = layerId;
-                       std::vector<float>::iterator iter1, iter2;
-                       while ((lastSameStrideLayer < this->anchorParam.numLayers) &&
-                               (this->anchorParam.strides[lastSameStrideLayer] ==
-                                this->anchorParam.strides[layerId])) {
-                               const float scale = CalculateScale(this->anchorParam.minScale,
-                                                                               this->anchorParam.maxScale,
-                                                                               lastSameStrideLayer,
-                                                                               this->anchorParam.strides.size());
-
-                               if (lastSameStrideLayer == 0 &&
-                                       this->anchorParam.isReduceBoxedInLowestLayer) {
-                                       aspectRatios.push_back(1.0);
-                                       aspectRatios.push_back(2.0);
-                                       aspectRatios.push_back(0.5);
-                                       scales.push_back(0.1);
-                                       scales.push_back(scale);
+float DecodeInfo::GetAnchorHscale()
+{
+       return this->anchorParam.hScale;
+}
+
+int DecodeInfo::GenerateAnchor()
+{
+       if (this->anchorParam.strides.empty() || this->anchorParam.aspectRatios.empty()) {
+               LOGE("Invalid anchor parameters");
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+       }
+
+       int layerId = 0;
+       this->ClearAnchorBox();
+       while (layerId < this->anchorParam.numLayers) {
+               std::vector<float> anchorHeight;
+               std::vector<float> anchorWidth;
+               std::vector<float> aspectRatios;
+               std::vector<float> scales;
+
+               int lastSameStrideLayer = layerId;
+               std::vector<float>::iterator iter1, iter2;
+               while ((lastSameStrideLayer < this->anchorParam.numLayers) &&
+                          (this->anchorParam.strides[lastSameStrideLayer] == this->anchorParam.strides[layerId])) {
+                       const float scale = CalculateScale(this->anchorParam.minScale, this->anchorParam.maxScale,
+                                                                                          lastSameStrideLayer, this->anchorParam.strides.size());
+
+                       if (lastSameStrideLayer == 0 && this->anchorParam.isReduceBoxedInLowestLayer) {
+                               aspectRatios.push_back(1.0);
+                               aspectRatios.push_back(2.0);
+                               aspectRatios.push_back(0.5);
+                               scales.push_back(0.1);
+                               scales.push_back(scale);
+                               scales.push_back(scale);
+                       } else {
+                               for (iter1 = this->anchorParam.aspectRatios.begin(); iter1 != this->anchorParam.aspectRatios.end();
+                                        ++iter1) {
+                                       aspectRatios.push_back((*iter1));
                                        scales.push_back(scale);
-                               } else {
-                                       for (iter1 = this->anchorParam.aspectRatios.begin();
-                                               iter1 != this->anchorParam.aspectRatios.end();
-                                               ++iter1) {
-                                               aspectRatios.push_back((*iter1));
-                                               scales.push_back(scale);
-                                       }
-                                       if (this->anchorParam.interpolatedScaleAspectRatio > 0.0f) {
-                                               const float scaleNext =
-                                                       lastSameStrideLayer == static_cast<int>(this->anchorParam.strides.size()) -1
-                                                                               ? 1.0f
-                                                                               : CalculateScale(this->anchorParam.minScale,
-                                                                                               this->anchorParam.maxScale,
-                                                                                               lastSameStrideLayer + 1,
-                                                                                               this->anchorParam.strides.size());
-                                               scales.push_back(std::sqrt(scale * scaleNext));
-                                               aspectRatios.push_back(this->anchorParam.interpolatedScaleAspectRatio);
-                                       }
                                }
-                               lastSameStrideLayer++;
-                       }
-
-                       for (iter1 = aspectRatios.begin(), iter2 = scales.begin();
-                               (iter1 != aspectRatios.end() && iter2 != scales.end());
-                               ++iter1, ++iter2) {
-                               const float ratioSqrts = std::sqrt((*iter1));
-                               anchorHeight.push_back((*iter2) / ratioSqrts);
-                               anchorWidth.push_back((*iter2) * ratioSqrts);
-                       }
-
-                       const int stride = this->anchorParam.strides[layerId];
-                       int featureMapHeight = std::ceil(1.0f * this->anchorParam.inputSizeHeight / stride);
-                       int featureMapWidth = std::ceil(1.0f * this->anchorParam.inputSizeWidth / stride);
-
-                       for (int y = 0; y < featureMapHeight; ++y) {
-                               for (int x = 0; x < featureMapWidth; ++x) {
-                                       for (int anchorId = 0; anchorId < (int)anchorHeight.size(); ++anchorId) {
-                                               cv::Rect2f anchor = {
-                                                       cv::Point2f {
-                                                               (x + this->anchorParam.anchorOffsetX) * 1.0f / featureMapWidth,
-                                                               (y + this->anchorParam.anchorOffsetY) * 1.0f / featureMapHeight
-                                                       },
-                                                       this->anchorParam.isFixedAnchorSize ?
-                                                               cv::Size2f {1.0f, 1.0f} :
-                                                               cv::Size2f {anchorWidth[anchorId], anchorWidth[anchorId]}
-                                               };
-                                               this->AddAnchorBox(anchor);
-                                       }
+                               if (this->anchorParam.interpolatedScaleAspectRatio > 0.0f) {
+                                       const float scaleNext =
+                                                       lastSameStrideLayer == static_cast<int>(this->anchorParam.strides.size()) - 1 ?
+                                                                       1.0f :
+                                                                       CalculateScale(this->anchorParam.minScale, this->anchorParam.maxScale,
+                                                                                                  lastSameStrideLayer + 1, this->anchorParam.strides.size());
+                                       scales.push_back(std::sqrt(scale * scaleNext));
+                                       aspectRatios.push_back(this->anchorParam.interpolatedScaleAspectRatio);
                                }
                        }
-                       layerId = lastSameStrideLayer;
+                       lastSameStrideLayer++;
                }
 
-               if (this->IsAnchorBoxEmpty()) {
-                       LOGE("Anchor boxes are empty");
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
+               for (iter1 = aspectRatios.begin(), iter2 = scales.begin();
+                        (iter1 != aspectRatios.end() && iter2 != scales.end()); ++iter1, ++iter2) {
+                       const float ratioSqrts = std::sqrt((*iter1));
+                       anchorHeight.push_back((*iter2) / ratioSqrts);
+                       anchorWidth.push_back((*iter2) * ratioSqrts);
                }
 
-               return MEDIA_VISION_ERROR_NONE;
-       }
-
-       int DecodeInfo::ParseNms(JsonObject *root)
-       {
-               if (!json_object_has_member(root, "nms")) {
-                       LOGI("nms is empty. skip it");
-                       return MEDIA_VISION_ERROR_NONE;
+               const int stride = this->anchorParam.strides[layerId];
+               int featureMapHeight = std::ceil(1.0f * this->anchorParam.inputSizeHeight / stride);
+               int featureMapWidth = std::ceil(1.0f * this->anchorParam.inputSizeWidth / stride);
+
+               for (int y = 0; y < featureMapHeight; ++y) {
+                       for (int x = 0; x < featureMapWidth; ++x) {
+                               for (int anchorId = 0; anchorId < (int) anchorHeight.size(); ++anchorId) {
+                                       cv::Rect2f anchor = { cv::Point2f { (x + this->anchorParam.anchorOffsetX) * 1.0f / featureMapWidth,
+                                                                                                               (y + this->anchorParam.anchorOffsetY) * 1.0f /
+                                                                                                                               featureMapHeight },
+                                                                                 this->anchorParam.isFixedAnchorSize ?
+                                                                                                 cv::Size2f { 1.0f, 1.0f } :
+                                                                                                 cv::Size2f { anchorWidth[anchorId], anchorWidth[anchorId] } };
+                                       this->AddAnchorBox(anchor);
+                               }
+                       }
                }
+               layerId = lastSameStrideLayer;
+       }
 
-               JsonObject *object = json_object_get_object_member(root, "nms");
-               try {
-                       this->nmsParam.mode = GetSupportedType(object, "mode", this->nmsParam.supportedBoxNmsTypes);
-               } catch (const std::exception& e) {
-                       LOGE("Invalid %s", e.what());
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-               }
+       if (this->IsAnchorBoxEmpty()) {
+               LOGE("Anchor boxes are empty");
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+       }
 
-               this->nmsParam.iouThreshold = static_cast<float>(json_object_get_double_member(object,"iou_threshold"));
+       return MEDIA_VISION_ERROR_NONE;
+}
 
+int DecodeInfo::ParseNms(JsonObject *root)
+{
+       if (!json_object_has_member(root, "nms")) {
+               LOGI("nms is empty. skip it");
                return MEDIA_VISION_ERROR_NONE;
        }
 
-       int DecodeInfo::GetNmsMode()
-       {
-               return this->nmsParam.mode;
+       JsonObject *object = json_object_get_object_member(root, "nms");
+       try {
+               this->nmsParam.mode = GetSupportedType(object, "mode", this->nmsParam.supportedBoxNmsTypes);
+       } catch (const std::exception &e) {
+               LOGE("Invalid %s", e.what());
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       float DecodeInfo::GetNmsIouThreshold()
-       {
-               return this->nmsParam.iouThreshold;
-       }
+       this->nmsParam.iouThreshold = static_cast<float>(json_object_get_double_member(object, "iou_threshold"));
 
-       int DecodeInfo::ParseRotate(JsonObject *root)
-       {
-               if (!json_object_has_member(root, "rotate")) {
-                       LOGI("rotate is empty. skip it");
-                       return MEDIA_VISION_ERROR_NONE;
-               }
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int DecodeInfo::GetNmsMode()
+{
+       return this->nmsParam.mode;
+}
 
-               JsonObject *object = json_object_get_object_member(root, "rotate");
-               this->rotParam.baseAngle = static_cast<float>(json_object_get_double_member(object, "base_angle"));
-               this->rotParam.startPointIndex = static_cast<int>(json_object_get_int_member(object, "start_point_index"));
-               this->rotParam.endPointIndex = static_cast<int>(json_object_get_int_member(object, "end_point_index"));
+float DecodeInfo::GetNmsIouThreshold()
+{
+       return this->nmsParam.iouThreshold;
+}
 
+int DecodeInfo::ParseRotate(JsonObject *root)
+{
+       if (!json_object_has_member(root, "rotate")) {
+               LOGI("rotate is empty. skip it");
                return MEDIA_VISION_ERROR_NONE;
        }
 
-       int DecodeInfo::GetRotStartPointIndex()
-       {
-               return this->rotParam.startPointIndex;
-       }
+       JsonObject *object = json_object_get_object_member(root, "rotate");
+       this->rotParam.baseAngle = static_cast<float>(json_object_get_double_member(object, "base_angle"));
+       this->rotParam.startPointIndex = static_cast<int>(json_object_get_int_member(object, "start_point_index"));
+       this->rotParam.endPointIndex = static_cast<int>(json_object_get_int_member(object, "end_point_index"));
 
-       int DecodeInfo::GetRotEndPointIndex()
-       {
-               return this->rotParam.endPointIndex;
-       }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-       float DecodeInfo::GetBaseAngle()
-       {
-               return this->rotParam.baseAngle;
-       }
+int DecodeInfo::GetRotStartPointIndex()
+{
+       return this->rotParam.startPointIndex;
+}
 
-       int DecodeInfo::GetRoiMode()
-       {
-               return this->roiOptParam.mode;
-       }
+int DecodeInfo::GetRotEndPointIndex()
+{
+       return this->rotParam.endPointIndex;
+}
 
-       int DecodeInfo::GetRoiStartPointIndex()
-       {
-               return this->roiOptParam.startPointIndex;
-       }
+float DecodeInfo::GetBaseAngle()
+{
+       return this->rotParam.baseAngle;
+}
 
-       int DecodeInfo::GetRoiEndPointIndex()
-       {
-               return this->roiOptParam.endPointIndex;
-       }
+int DecodeInfo::GetRoiMode()
+{
+       return this->roiOptParam.mode;
+}
 
-       int DecodeInfo::GetRoiCenterPointIndex()
-       {
-               return this->roiOptParam.centerPointIndex;
-       }
+int DecodeInfo::GetRoiStartPointIndex()
+{
+       return this->roiOptParam.startPointIndex;
+}
 
-       float DecodeInfo::GetShiftX()
-       {
-               return this->roiOptParam.shiftX;
-       }
+int DecodeInfo::GetRoiEndPointIndex()
+{
+       return this->roiOptParam.endPointIndex;
+}
 
-       float DecodeInfo::GetShiftY()
-       {
-               return this->roiOptParam.shiftY;
-       }
+int DecodeInfo::GetRoiCenterPointIndex()
+{
+       return this->roiOptParam.centerPointIndex;
+}
 
-       float DecodeInfo::GetScaleX()
-       {
-               return this->roiOptParam.scaleX;
-       }
+float DecodeInfo::GetShiftX()
+{
+       return this->roiOptParam.shiftX;
+}
 
-       float DecodeInfo::GetScaleY()
-       {
-               return this->roiOptParam.scaleY;
-       }
+float DecodeInfo::GetShiftY()
+{
+       return this->roiOptParam.shiftY;
+}
 
-       int DecodeInfo::ParseRoiOption(JsonObject *root)
-       {
-               if (!json_object_has_member(root, "roi")) {
-                       LOGI("roi is empty. skip it");
-                       return MEDIA_VISION_ERROR_NONE;
-               }
+float DecodeInfo::GetScaleX()
+{
+       return this->roiOptParam.scaleX;
+}
 
-               JsonObject *object = json_object_get_object_member(root, "roi");
-               this->roiOptParam.startPointIndex = static_cast<int>(json_object_get_int_member(object, "start_point_index"));
-               this->roiOptParam.endPointIndex = static_cast<int>(json_object_get_int_member(object, "end_point_index"));
-               this->roiOptParam.centerPointIndex = static_cast<int>(json_object_get_int_member(object, "center_point_index"));
-               this->roiOptParam.shiftX = static_cast<float>(json_object_get_double_member(object, "shift_x"));
-               this->roiOptParam.shiftY = static_cast<float>(json_object_get_double_member(object, "shift_y"));
-               this->roiOptParam.scaleX = static_cast<float>(json_object_get_double_member(object, "scale_x"));
-               this->roiOptParam.scaleY = static_cast<float>(json_object_get_double_member(object, "scale_y"));
-               this->roiOptParam.mode = static_cast<int>(json_object_get_int_member(object, "scale_mode"));
+float DecodeInfo::GetScaleY()
+{
+       return this->roiOptParam.scaleY;
+}
 
+int DecodeInfo::ParseRoiOption(JsonObject *root)
+{
+       if (!json_object_has_member(root, "roi")) {
+               LOGI("roi is empty. skip it");
                return MEDIA_VISION_ERROR_NONE;
        }
 
-       int OutputMetadata::ParseLandmark(JsonObject *root)
-       {
-               LOGI("ENTER");
+       JsonObject *object = json_object_get_object_member(root, "roi");
+       this->roiOptParam.startPointIndex = static_cast<int>(json_object_get_int_member(object, "start_point_index"));
+       this->roiOptParam.endPointIndex = static_cast<int>(json_object_get_int_member(object, "end_point_index"));
+       this->roiOptParam.centerPointIndex = static_cast<int>(json_object_get_int_member(object, "center_point_index"));
+       this->roiOptParam.shiftX = static_cast<float>(json_object_get_double_member(object, "shift_x"));
+       this->roiOptParam.shiftY = static_cast<float>(json_object_get_double_member(object, "shift_y"));
+       this->roiOptParam.scaleX = static_cast<float>(json_object_get_double_member(object, "scale_x"));
+       this->roiOptParam.scaleY = static_cast<float>(json_object_get_double_member(object, "scale_y"));
+       this->roiOptParam.mode = static_cast<int>(json_object_get_int_member(object, "scale_mode"));
 
-               if (!json_object_has_member(root, "landmark")) {
-                       LOGI("No landmark outputmetadata");
-                       LOGI("LEAVE");
-                       return MEDIA_VISION_ERROR_NONE;
-               }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               landmark.ParseLandmark(root);
+int OutputMetadata::ParseLandmark(JsonObject *root)
+{
+       LOGI("ENTER");
 
+       if (!json_object_has_member(root, "landmark")) {
+               LOGI("No landmark outputmetadata");
                LOGI("LEAVE");
                return MEDIA_VISION_ERROR_NONE;
        }
 
-       int OutputMetadata::ParseOffset(JsonObject *root)
-       {
-               LOGI("ENTER");
+       landmark.ParseLandmark(root);
 
-               if (!json_object_has_member(root, "offset")) {
-                       LOGI("No offset outputmetadata");
-                       LOGI("LEAVE");
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-               }
+       LOGI("LEAVE");
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               offsetVec.ParseOffset(root, mSupportedShapeType);
+int OutputMetadata::ParseOffset(JsonObject *root)
+{
+       LOGI("ENTER");
 
+       if (!json_object_has_member(root, "offset")) {
+               LOGI("No offset outputmetadata");
                LOGI("LEAVE");
-               return MEDIA_VISION_ERROR_NONE;
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       int OutputMetadata::Parse(JsonObject *root)
-       {
-               LOGI("ENTER");
+       offsetVec.ParseOffset(root, mSupportedShapeType);
 
-               int ret = ParseScore(root);
-               if (ret != MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Fail to GetScore[%d]", ret);
-                       return ret;
-               }
+       LOGI("LEAVE");
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               ret = ParseBox(root);
-               if (ret != MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Fail to GetBox[%d]", ret);
-                       return ret;
-               }
+int OutputMetadata::Parse(JsonObject *root)
+{
+       LOGI("ENTER");
 
-               if (!box.GetName().empty()) {
-                       // addtional parsing is required according to decoding type
-                       if (box.GetDecodingType() == INFERENCE_BOX_DECODING_TYPE_BYPASS) {
+       int ret = ParseScore(root);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to GetScore[%d]", ret);
+               return ret;
+       }
 
-                               ret = box.ParseLabel(root);
-                               if (ret != MEDIA_VISION_ERROR_NONE) {
-                                       LOGE("Fail to GetLabel[%d]", ret);
-                                       return ret;
-                               }
+       ret = ParseBox(root);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to GetBox[%d]", ret);
+               return ret;
+       }
 
-                               ret = box.ParseNumber(root);
-                               if (ret != MEDIA_VISION_ERROR_NONE) {
-                                       LOGE("Fail to GetNumber[%d]", ret);
-                                       return ret;
-                               }
+       if (!box.GetName().empty()) {
+               // addtional parsing is required according to decoding type
+               if (box.GetDecodingType() == INFERENCE_BOX_DECODING_TYPE_BYPASS) {
+                       ret = box.ParseLabel(root);
+                       if (ret != MEDIA_VISION_ERROR_NONE) {
+                               LOGE("Fail to GetLabel[%d]", ret);
+                               return ret;
+                       }
 
-                       } else {
-                               ret = box.ParseDecodeInfo(root);
+                       ret = box.ParseNumber(root);
+                       if (ret != MEDIA_VISION_ERROR_NONE) {
+                               LOGE("Fail to GetNumber[%d]", ret);
+                               return ret;
+                       }
+
+               } else {
+                       ret = box.ParseDecodeInfo(root);
+                       if (ret != MEDIA_VISION_ERROR_NONE) {
+                               LOGE("Fail to GetBoxDecodeInfo[%d]", ret);
+                               return ret;
+                       }
+
+                       if (box.GetDecodingType() == INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR) {
+                               ret = box.GetDecodeInfo().GenerateAnchor();
                                if (ret != MEDIA_VISION_ERROR_NONE) {
-                                       LOGE("Fail to GetBoxDecodeInfo[%d]", ret);
+                                       LOGE("Fail to GenerateAnchor[%d]", ret);
                                        return ret;
                                }
-
-                               if (box.GetDecodingType() == INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR) {
-                                       ret = box.GetDecodeInfo().GenerateAnchor();
-                                       if (ret != MEDIA_VISION_ERROR_NONE) {
-                                               LOGE("Fail to GenerateAnchor[%d]", ret);
-                                               return ret;
-                                       }
-                               }
                        }
                }
+       }
 
-               ret = ParseLandmark(root);
-               if (ret != MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Fail to GetLandmark[%d]", ret);
-                       return ret;
-               }
+       ret = ParseLandmark(root);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to GetLandmark[%d]", ret);
+               return ret;
+       }
 
-               if (!landmark.GetName().empty()) {
-                       if (landmark.GetDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP ||
-                               landmark.GetDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP_REFINE) {
-                               ret = landmark.ParseDecodeInfo(root, mSupportedShapeType);
-                               if (ret != MEDIA_VISION_ERROR_NONE) {
-                                       LOGE("Fail to GetLandmarkDecodeInfo[%d]", ret);
-                                       return ret;
-                               }
+       if (!landmark.GetName().empty()) {
+               if (landmark.GetDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP ||
+                       landmark.GetDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP_REFINE) {
+                       ret = landmark.ParseDecodeInfo(root, mSupportedShapeType);
+                       if (ret != MEDIA_VISION_ERROR_NONE) {
+                               LOGE("Fail to GetLandmarkDecodeInfo[%d]", ret);
+                               return ret;
                        }
+               }
 
-                       if (landmark.GetDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP_REFINE) {
-                               ret = ParseOffset(root);
-                               if (ret != MEDIA_VISION_ERROR_NONE) {
-                                       LOGE("Fail to GetOffsetVector[%d]", ret);
-                                       return ret;
-                               }
+               if (landmark.GetDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP_REFINE) {
+                       ret = ParseOffset(root);
+                       if (ret != MEDIA_VISION_ERROR_NONE) {
+                               LOGE("Fail to GetOffsetVector[%d]", ret);
+                               return ret;
+                       }
 
-                               ret = landmark.ParseDisplacement(root, mSupportedShapeType);
-                               if (ret != MEDIA_VISION_ERROR_NONE) {
-                                       LOGE("Fail to GetDispVector[%d]", ret);
-                                       return ret;
-                               }
+                       ret = landmark.ParseDisplacement(root, mSupportedShapeType);
+                       if (ret != MEDIA_VISION_ERROR_NONE) {
+                               LOGE("Fail to GetDispVector[%d]", ret);
+                               return ret;
+                       }
 
-                               ret = landmark.ParseEdgeMap(root);
-                               if (ret != MEDIA_VISION_ERROR_NONE) {
-                                       LOGE("Fail to GetEdgeConnection[%d]", ret);
-                                       return ret;
-                               }
+                       ret = landmark.ParseEdgeMap(root);
+                       if (ret != MEDIA_VISION_ERROR_NONE) {
+                               LOGE("Fail to GetEdgeConnection[%d]", ret);
+                               return ret;
                        }
                }
+       }
 
-               parsed = true;
+       parsed = true;
 
-               LOGI("LEAVE");
+       LOGI("LEAVE");
 
-               return MEDIA_VISION_ERROR_NONE;
-       }
+       return MEDIA_VISION_ERROR_NONE;
+}
 } /* Inference */
 } /* MediaVision */
index 09c10d8..99ad700 100644 (file)
@@ -28,468 +28,449 @@ namespace mediavision
 {
 namespace inference
 {
-       int PoseDecoder::convertXYZtoX(int x, int y, int c)
-       {
-               return y * mHeatMapWidth * mHeatMapChannel
-                                       + x * mHeatMapChannel
-                                       + c;
-       }
+int PoseDecoder::convertXYZtoX(int x, int y, int c)
+{
+       return y * mHeatMapWidth * mHeatMapChannel + x * mHeatMapChannel + c;
+}
 
-       cv::Point PoseDecoder::convertXYZtoXY(int x, int y, int c)
-       {
-               int idxY = y * mHeatMapWidth * mHeatMapChannel * 2
-                                       + x * mHeatMapChannel * 2
-                                       + c;
+cv::Point PoseDecoder::convertXYZtoXY(int x, int y, int c)
+{
+       int idxY = y * mHeatMapWidth * mHeatMapChannel * 2 + x * mHeatMapChannel * 2 + c;
 
-               int idxX = idxY + mHeatMapChannel;
+       int idxX = idxY + mHeatMapChannel;
 
-               return cv::Point(idxX, idxY);
-       }
+       return cv::Point(idxX, idxY);
+}
 
-       int PoseDecoder::init()
-       {
-               LOGI("ENTER");
+int PoseDecoder::init()
+{
+       LOGI("ENTER");
 
-               if (mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS ||
-                       mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL) {
-                       LOGI("Skip init");
-                       return MEDIA_VISION_ERROR_NONE;
-               }
+       if (mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS ||
+               mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL) {
+               LOGI("Skip init");
+               return MEDIA_VISION_ERROR_NONE;
+       }
 
-               int x,y,c;
-               int sx, sy, ex, ey, dx, dy;
-               float score, localScore;
-               int idx;
-               bool isLocalMax;
+       int x, y, c;
+       int sx, sy, ex, ey, dx, dy;
+       float score, localScore;
+       int idx;
+       bool isLocalMax;
 
-               mCandidates.clear();
+       mCandidates.clear();
 
-               if (mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE ||
-                       mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_3D_SINGLE) {
-                       mCandidates.resize(mHeatMapChannel);
-               }
+       if (mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE ||
+               mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_3D_SINGLE) {
+               mCandidates.resize(mHeatMapChannel);
+       }
 
-               for (y = 0; y < mHeatMapHeight; ++y) {
-                       for (x = 0; x < mHeatMapWidth; ++x) {
-                               std::list<LandmarkPoint>::iterator candidate = mCandidates.begin();
-                               for (c = 0; c < mHeatMapChannel; ++c, candidate++) {
-                                       isLocalMax = true;
-                                       idx = convertXYZtoX(x, y, c);
-                                       score = mTensorBuffer.getValue<float>(mMeta.GetScoreName(), idx);
-                                       if (mMeta.GetScoreType() == INFERENCE_SCORE_TYPE_SIGMOID) {
-                                               score = PostProcess::sigmoid(score);
-                                       }
+       for (y = 0; y < mHeatMapHeight; ++y) {
+               for (x = 0; x < mHeatMapWidth; ++x) {
+                       std::list<LandmarkPoint>::iterator candidate = mCandidates.begin();
+                       for (c = 0; c < mHeatMapChannel; ++c, candidate++) {
+                               isLocalMax = true;
+                               idx = convertXYZtoX(x, y, c);
+                               score = mTensorBuffer.getValue<float>(mMeta.GetScoreName(), idx);
+                               if (mMeta.GetScoreType() == INFERENCE_SCORE_TYPE_SIGMOID) {
+                                       score = PostProcess::sigmoid(score);
+                               }
 
-                                       if (score < mMeta.GetScoreThreshold())
+                               if (score < mMeta.GetScoreThreshold())
+                                       continue;
+
+                               if (mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE ||
+                                       mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_3D_SINGLE) {
+                                       if (score <= candidate->score)
                                                continue;
 
-                                       if (mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE ||
-                                               mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_3D_SINGLE) {
-                                               if (score <= candidate->score)
-                                                       continue;
-
-                                               candidate->score = score;
-                                               candidate->heatMapLoc.x = x;
-                                               candidate->heatMapLoc.y = y;
-                                               candidate->id = c;
-
-                                       } else { //landmarkInfo.type == 1
-                                               sx = std::max(x - 1, 0);
-                                               sy = std::max(y - 1, 0);
-                                               ex = std::min(x + 2, mHeatMapWidth);
-                                               ey = std::min(y + 2, mHeatMapHeight);
-
-                                               for (dy = sy; dy < ey; ++dy) {
-                                                       for (dx = sx; dx < ex; ++dx) {
-                                                               idx = convertXYZtoX(dx, dy, c);
-                                                               localScore =  mTensorBuffer.getValue<float>(mMeta.GetScoreName(), idx);
-                                                               if (mMeta.GetScoreType() == INFERENCE_SCORE_TYPE_SIGMOID) {
-                                                                       localScore = PostProcess::sigmoid(localScore);
-                                                               }
-                                                               if (localScore > score) {
-                                                                       isLocalMax = false;
-                                                                       break;
-                                                               }
+                                       candidate->score = score;
+                                       candidate->heatMapLoc.x = x;
+                                       candidate->heatMapLoc.y = y;
+                                       candidate->id = c;
+
+                               } else { //landmarkInfo.type == 1
+                                       sx = std::max(x - 1, 0);
+                                       sy = std::max(y - 1, 0);
+                                       ex = std::min(x + 2, mHeatMapWidth);
+                                       ey = std::min(y + 2, mHeatMapHeight);
+
+                                       for (dy = sy; dy < ey; ++dy) {
+                                               for (dx = sx; dx < ex; ++dx) {
+                                                       idx = convertXYZtoX(dx, dy, c);
+                                                       localScore = mTensorBuffer.getValue<float>(mMeta.GetScoreName(), idx);
+                                                       if (mMeta.GetScoreType() == INFERENCE_SCORE_TYPE_SIGMOID) {
+                                                               localScore = PostProcess::sigmoid(localScore);
                                                        }
-                                                       if (isLocalMax == false)
+                                                       if (localScore > score) {
+                                                               isLocalMax = false;
                                                                break;
+                                                       }
                                                }
-
                                                if (isLocalMax == false)
-                                                       continue;
+                                                       break;
+                                       }
 
-                                               // add this to list
-                                               LOGI("[%d x %d][%d]: score %.3f", y, x, c, score);
-                                               std::list<LandmarkPoint>::iterator iter;
-                                               for (iter = mCandidates.begin(); iter != mCandidates.end(); ++iter) {
-                                                       if ((*iter).score < score) {
-                                                               break;
-                                                       }
-                                               }
+                                       if (isLocalMax == false)
+                                               continue;
 
-                                               LandmarkPoint localLandmark;
-                                               localLandmark.score = score;
-                                               localLandmark.heatMapLoc.x = x;
-                                               localLandmark.heatMapLoc.y = y;
-                                               localLandmark.id = c;
-                                               localLandmark.valid = false;
-                                               mCandidates.insert(iter, localLandmark);
+                                       // add this to list
+                                       LOGI("[%d x %d][%d]: score %.3f", y, x, c, score);
+                                       std::list<LandmarkPoint>::iterator iter;
+                                       for (iter = mCandidates.begin(); iter != mCandidates.end(); ++iter) {
+                                               if ((*iter).score < score) {
+                                                       break;
+                                               }
                                        }
+
+                                       LandmarkPoint localLandmark;
+                                       localLandmark.score = score;
+                                       localLandmark.heatMapLoc.x = x;
+                                       localLandmark.heatMapLoc.y = y;
+                                       localLandmark.id = c;
+                                       localLandmark.valid = false;
+                                       mCandidates.insert(iter, localLandmark);
                                }
                        }
-               } // end of init
+               }
+       } // end of init
 
-               LOGI("LEAVE");
+       LOGI("LEAVE");
+
+       return MEDIA_VISION_ERROR_NONE;
+}
 
+int PoseDecoder::getNumberOfPose()
+{
+       return std::min(static_cast<int>(mPoseLandmarks.size()), MAX_NUMBER_OF_POSE);
+}
+
+int PoseDecoder::getOffsetValue(LandmarkPoint &landmark, cv::Point2f &offsetVal)
+{
+       if (!mTensorBuffer.exist(mMeta.GetOffsetVecName())) {
+               offsetVal.x = offsetVal.y = 0.f;
+               LOGI("No offset value");
+               LOGI("LEAVE");
                return MEDIA_VISION_ERROR_NONE;
        }
 
-       int PoseDecoder::getNumberOfPose()
-       {
-               return std::min(static_cast<int>(mPoseLandmarks.size()), MAX_NUMBER_OF_POSE);
-       }
+       cv::Point idx = convertXYZtoXY(landmark.heatMapLoc.x, landmark.heatMapLoc.y, landmark.id);
 
-       int PoseDecoder::getOffsetValue(LandmarkPoint& landmark, cv::Point2f &offsetVal)
-       {
-               if (!mTensorBuffer.exist(mMeta.GetOffsetVecName())) {
-                       offsetVal.x = offsetVal.y = 0.f;
-                       LOGI("No offset value");
-                       LOGI("LEAVE");
-                       return MEDIA_VISION_ERROR_NONE;
-               }
+       try {
+               offsetVal.x = mTensorBuffer.getValue<float>(mMeta.GetOffsetVecName(), idx.x);
+               offsetVal.y = mTensorBuffer.getValue<float>(mMeta.GetOffsetVecName(), idx.y);
+       } catch (const std::exception &e) {
+               LOGE("Fail to get value at (%d, %d) from %s", idx.x, idx.y, mMeta.GetOffsetVecName().c_str());
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+       }
 
-               cv::Point idx = convertXYZtoXY(landmark.heatMapLoc.x, landmark.heatMapLoc.y, landmark.id);
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               try {
-                       offsetVal.x = mTensorBuffer.getValue<float>(mMeta.GetOffsetVecName(), idx.x);
-                       offsetVal.y = mTensorBuffer.getValue<float>(mMeta.GetOffsetVecName(), idx.y);
-               } catch (const std::exception& e) {
-                       LOGE("Fail to get value at (%d, %d) from %s",
-                                               idx.x, idx.y, mMeta.GetOffsetVecName().c_str());
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-               }
+float PoseDecoder::getPointX(int poseIdx, int partIdx)
+{
+       LOGI("idx[%d]-part[%d]", poseIdx, partIdx);
+       return mPoseLandmarks[poseIdx].landmarks[partIdx].decodedLoc.x;
+}
 
-               return MEDIA_VISION_ERROR_NONE;
-       }
+float PoseDecoder::getPointY(int poseIdx, int partIdx)
+{
+       LOGI("idx[%d]-part[%d]", poseIdx, partIdx);
+       return mPoseLandmarks[poseIdx].landmarks[partIdx].decodedLoc.y;
+}
 
-       float PoseDecoder::getPointX(int poseIdx, int partIdx)
-       {
-               LOGI("idx[%d]-part[%d]", poseIdx, partIdx);
-               return mPoseLandmarks[poseIdx].landmarks[partIdx].decodedLoc.x;
-       }
+float PoseDecoder::getScore(int poseIdx, int partIdx)
+{
+       return mPoseLandmarks[poseIdx].landmarks[partIdx].score;
+}
 
-       float PoseDecoder::getPointY(int poseIdx, int partIdx)
-       {
-               LOGI("idx[%d]-part[%d]", poseIdx, partIdx);
-               return mPoseLandmarks[poseIdx].landmarks[partIdx].decodedLoc.y;
+int PoseDecoder::getIndexToPos(LandmarkPoint &point, float scaleW, float scaleH)
+{
+       if (scaleW <= 0.0f || scaleH <= 0.0f) {
+               LOGE("scale width(%.4f) or height(%.4f) is less than or equal to zero", scaleW, scaleH);
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       float PoseDecoder::getScore(int poseIdx, int partIdx)
-       {
-               return mPoseLandmarks[poseIdx].landmarks[partIdx].score;
-       }
+       cv::Point2f offsetVal;
+       getOffsetValue(point, offsetVal);
 
-       int PoseDecoder::getIndexToPos(LandmarkPoint& point, float scaleW, float scaleH)
-       {
-               if (scaleW <= 0.0f || scaleH <= 0.0f) {
-                       LOGE("scale width(%.4f) or height(%.4f) is less than or equal to zero", scaleW, scaleH);
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               }
+       point.decodedLoc.x = static_cast<float>(point.heatMapLoc.x) / static_cast<float>(mHeatMapWidth - 1);
+       point.decodedLoc.y = static_cast<float>(point.heatMapLoc.y) / static_cast<float>(mHeatMapHeight - 1);
 
-               cv::Point2f offsetVal;
-               getOffsetValue(point, offsetVal);
+       point.decodedLoc.x += offsetVal.x / scaleW;
+       point.decodedLoc.y += offsetVal.y / scaleH;
 
-               point.decodedLoc.x = static_cast<float>(point.heatMapLoc.x) / static_cast<float>(mHeatMapWidth - 1);
-               point.decodedLoc.y = static_cast<float>(point.heatMapLoc.y) / static_cast<float>(mHeatMapHeight - 1);
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               point.decodedLoc.x += offsetVal.x / scaleW;
-               point.decodedLoc.y += offsetVal.y / scaleH;
+int PoseDecoder::getPosToIndex(LandmarkPoint &point)
+{
+       cv::Point posVal;
 
-               return MEDIA_VISION_ERROR_NONE;
-       }
+       posVal.x = roundf(point.decodedLoc.x * static_cast<float>(mHeatMapWidth - 1));
+       posVal.y = roundf(point.decodedLoc.y * static_cast<float>(mHeatMapHeight - 1));
 
-       int PoseDecoder::getPosToIndex(LandmarkPoint& point)
-       {
-               cv::Point posVal;
+       posVal.x = std::max(std::min(posVal.x, mHeatMapWidth - 1), 0);
+       posVal.y = std::max(std::min(posVal.y, mHeatMapHeight - 1), 0);
 
-               posVal.x = roundf(point.decodedLoc.x * static_cast<float>(mHeatMapWidth - 1));
-               posVal.y = roundf(point.decodedLoc.y * static_cast<float>(mHeatMapHeight - 1));
+       point.heatMapLoc = posVal;
 
-               posVal.x = std::max(std::min(posVal.x, mHeatMapWidth - 1), 0);
-               posVal.y = std::max(std::min(posVal.y, mHeatMapHeight - 1), 0);
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               point.heatMapLoc = posVal;
+int PoseDecoder::decode(float scaleWidth, float scaleHeight, float thresHoldRadius)
+{
+       LOGI("ENTER");
 
-               return MEDIA_VISION_ERROR_NONE;
+       if (scaleWidth <= 0.0f || scaleHeight <= 0.0f) {
+               LOGE("scale width(%.4f) or height(%.4f) is less than or equal to zero", scaleWidth, scaleHeight);
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       int PoseDecoder::decode(float scaleWidth, float scaleHeight, float thresHoldRadius)
-       {
-               LOGI("ENTER");
-
-               if (scaleWidth <= 0.0f || scaleHeight <= 0.0f) {
-                       LOGE("scale width(%.4f) or height(%.4f) is less than or equal to zero", scaleWidth, scaleHeight);
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               }
-
-               mPoseLandmarks.clear();
+       mPoseLandmarks.clear();
 
-               LandmarkPoint initValue = {0.0f, cv::Point(0,0), cv::Point2f(0.0f, 0.0f), -1, false};
+       LandmarkPoint initValue = { 0.0f, cv::Point(0, 0), cv::Point2f(0.0f, 0.0f), -1, false };
 
-               if (mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE ||
-                       mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_3D_SINGLE) {
-                       mPoseLandmarks.resize(1);
+       if (mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE ||
+               mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_3D_SINGLE) {
+               mPoseLandmarks.resize(1);
 
-                       if (mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS ||
-                               mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL) {
-                               mPoseLandmarks[0].landmarks.resize(mNumberOfLandmarks);
-                       } else {
-                               mPoseLandmarks[0].landmarks.resize(mHeatMapChannel);
-                       }
+               if (mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS ||
+                       mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL) {
+                       mPoseLandmarks[0].landmarks.resize(mNumberOfLandmarks);
+               } else {
+                       mPoseLandmarks[0].landmarks.resize(mHeatMapChannel);
                }
+       }
 
-               if (mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP ||
-                       mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP_REFINE) {
-                       while (!mCandidates.empty()) {
+       if (mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP ||
+               mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP_REFINE) {
+               while (!mCandidates.empty()) {
+                       LandmarkPoint &root = mCandidates.front();
 
-                               LandmarkPoint &root = mCandidates.front();
+                       getIndexToPos(root, scaleWidth, scaleHeight);
 
-                               getIndexToPos(root, scaleWidth, scaleHeight);
+                       if (mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE) {
+                               root.valid = true;
+                               mPoseLandmarks[0].landmarks[root.id] = root;
+                               mPoseLandmarks[0].score += root.score;
+                               mCandidates.pop_front();
+                               continue;
+                       }
 
-                               if (mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE) {
-                                       root.valid = true;
-                                       mPoseLandmarks[0].landmarks[root.id] = root;
-                                       mPoseLandmarks[0].score += root.score;
+                       LOGI("root id: %d", root.id);
+
+                       if (thresHoldRadius > 0.0f) {
+                               bool isSkip = false;
+                               for (auto &result : mPoseLandmarks) {
+                                       cv::Point2f dfRadius = result.landmarks[root.id].decodedLoc;
+                                       dfRadius -= root.decodedLoc;
+                                       float radius = std::pow(dfRadius.x * scaleWidth, 2.0f) + std::pow(dfRadius.y * scaleHeight, 2.0f);
+                                       LOGI("id[%d], radius: %.f vs. %.f", root.id, radius, std::pow(thresHoldRadius, 2.0f));
+                                       if (radius <= std::pow(thresHoldRadius, 2.0f)) {
+                                               LOGI("Not local maximum, Skip this");
+                                               isSkip = true;
+                                               break;
+                                       }
+                               }
+                               if (isSkip) {
                                        mCandidates.pop_front();
                                        continue;
                                }
+                       }
 
-                               LOGI("root id: %d", root.id);
-
-                               if (thresHoldRadius > 0.0f) {
-                                       bool isSkip = false;
-                                       for (auto& result : mPoseLandmarks) {
-                                               cv::Point2f dfRadius = result.landmarks[root.id].decodedLoc;
-                                               dfRadius -= root.decodedLoc;
-                                               float radius =
-                                                       std::pow(dfRadius.x * scaleWidth, 2.0f) +
-                                                       std::pow(dfRadius.y     * scaleHeight, 2.0f);
-                                               LOGI("id[%d], radius: %.f vs. %.f", root.id, radius, std::pow(thresHoldRadius, 2.0f));
-                                               if (radius <= std::pow(thresHoldRadius, 2.0f)) {
-                                                       LOGI("Not local maximum, Skip this");
-                                                       isSkip = true;
-                                                       break;
-                                               }
-                                       }
-                                       if (isSkip) {
-                                               mCandidates.pop_front();
-                                               continue;
-                                       }
-                               }
-
-                               LOGI("Local maximum. Add this");
-
-                               std::vector<LandmarkPoint> decodedLandmarks(mHeatMapChannel, initValue);
+                       LOGI("Local maximum. Add this");
 
-                               findPose(root, decodedLandmarks, scaleWidth, scaleHeight);
+                       std::vector<LandmarkPoint> decodedLandmarks(mHeatMapChannel, initValue);
 
-                               float poseScore = 0.0f;
-                               for (auto& landmark : decodedLandmarks) {
-                                       poseScore += landmark.score;
-                                       LOGI("%.3f, %.3f", landmark.decodedLoc.x, landmark.decodedLoc.y);
-                               }
+                       findPose(root, decodedLandmarks, scaleWidth, scaleHeight);
 
-                               mPoseLandmarks.push_back(LandmarkResults {decodedLandmarks, poseScore});
-                               if (mPoseLandmarks.size() > MAX_NUMBER_OF_POSE)
-                                       break;
-                               mCandidates.pop_front();
+                       float poseScore = 0.0f;
+                       for (auto &landmark : decodedLandmarks) {
+                               poseScore += landmark.score;
+                               LOGI("%.3f, %.3f", landmark.decodedLoc.x, landmark.decodedLoc.y);
                        }
 
-                       for (auto& pose : mPoseLandmarks) {
-                               pose.score /= static_cast<float>(mHeatMapChannel);
-                       }
-               } else if (mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL) {
-                       int landmarkOffset = mMeta.GetLandmarkOffset();
-                       for (int idx = 0; idx < mNumberOfLandmarks; ++idx) {
-                                       float py = mTensorBuffer.getValue<float>(mMeta.GetLandmarkName(), idx * landmarkOffset);
-                                       float px = mTensorBuffer.getValue<float>(mMeta.GetLandmarkName(), idx * landmarkOffset + 1);
-                                       float pscore = mTensorBuffer.getValue<float>(mMeta.GetScoreName(), idx * landmarkOffset + 2);
-
-                                       mPoseLandmarks[0].landmarks[idx].score = pscore;
-                                       mPoseLandmarks[0].landmarks[idx].heatMapLoc = cv::Point(-1, -1);
-                                       mPoseLandmarks[0].landmarks[idx].decodedLoc = cv::Point2f(px / scaleWidth, py / scaleHeight);
-                                       mPoseLandmarks[0].landmarks[idx].id = idx;
-                                       mPoseLandmarks[0].landmarks[idx].valid =  true;
-
-                                       LOGI("idx[%d]: %.4f, %.4f, score: %.4f", idx, px, py, pscore);
+                       mPoseLandmarks.push_back(LandmarkResults { decodedLandmarks, poseScore });
+                       if (mPoseLandmarks.size() > MAX_NUMBER_OF_POSE)
+                               break;
+                       mCandidates.pop_front();
+               }
+
+               for (auto &pose : mPoseLandmarks) {
+                       pose.score /= static_cast<float>(mHeatMapChannel);
+               }
+       } else if (mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS_MULTICHANNEL) {
+               int landmarkOffset = mMeta.GetLandmarkOffset();
+               for (int idx = 0; idx < mNumberOfLandmarks; ++idx) {
+                       float py = mTensorBuffer.getValue<float>(mMeta.GetLandmarkName(), idx * landmarkOffset);
+                       float px = mTensorBuffer.getValue<float>(mMeta.GetLandmarkName(), idx * landmarkOffset + 1);
+                       float pscore = mTensorBuffer.getValue<float>(mMeta.GetScoreName(), idx * landmarkOffset + 2);
+
+                       mPoseLandmarks[0].landmarks[idx].score = pscore;
+                       mPoseLandmarks[0].landmarks[idx].heatMapLoc = cv::Point(-1, -1);
+                       mPoseLandmarks[0].landmarks[idx].decodedLoc = cv::Point2f(px / scaleWidth, py / scaleHeight);
+                       mPoseLandmarks[0].landmarks[idx].id = idx;
+                       mPoseLandmarks[0].landmarks[idx].valid = true;
+
+                       LOGI("idx[%d]: %.4f, %.4f, score: %.4f", idx, px, py, pscore);
+               }
+       } else {
+               // multi pose is not supported
+               std::vector<int> scoreIndexes = mMeta.GetScoreDimInfo().GetValidIndexAll();
+               float poseScore = mMeta.GetScoreThreshold();
+               if (!scoreIndexes.empty()) {
+                       poseScore = mTensorBuffer.getValue<float>(mMeta.GetScoreName(), scoreIndexes[scoreIndexes[0]]);
+                       if (mMeta.GetScoreType() == INFERENCE_SCORE_TYPE_SIGMOID) {
+                               poseScore = PostProcess::sigmoid(poseScore);
                        }
-               } else {
-                       // multi pose is not supported
-                       std::vector<int> scoreIndexes = mMeta.GetScoreDimInfo().GetValidIndexAll();
-                       float poseScore = mMeta.GetScoreThreshold();
-                       if (!scoreIndexes.empty()) {
-                               poseScore  = mTensorBuffer.getValue<float>(mMeta.GetScoreName(), scoreIndexes[scoreIndexes[0]]);
-                               if (mMeta.GetScoreType() == INFERENCE_SCORE_TYPE_SIGMOID) {
-                                       poseScore = PostProcess::sigmoid(poseScore);
-                               }
-                               if (poseScore < mMeta.GetScoreThreshold()) {
-                                       LOGI("pose score %.4f is lower than %.4f\n[LEAVE]", poseScore, mMeta.GetScoreThreshold());
-                                       return MEDIA_VISION_ERROR_NONE;
-                               }
+                       if (poseScore < mMeta.GetScoreThreshold()) {
+                               LOGI("pose score %.4f is lower than %.4f\n[LEAVE]", poseScore, mMeta.GetScoreThreshold());
+                               return MEDIA_VISION_ERROR_NONE;
                        }
+               }
 
-                       int landmarkOffset = (mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE ||
-                                                                 mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_MULTI) ? 2 : 3;
-                       if (mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS) {
-                               landmarkOffset = mMeta.GetLandmarkOffset();
-                       }
-                       for (int idx = 0; idx < mNumberOfLandmarks; ++idx) {
-                                       float px = mTensorBuffer.getValue<float>(mMeta.GetLandmarkName(), idx * landmarkOffset);
-                                       float py = mTensorBuffer.getValue<float>(mMeta.GetLandmarkName(), idx * landmarkOffset + 1);
+               int landmarkOffset = (mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_SINGLE ||
+                                                         mMeta.GetLandmarkType() == INFERENCE_LANDMARK_TYPE_2D_MULTI) ?
+                                                                        2 :
+                                                                        3;
+               if (mMeta.GetLandmarkDecodingType() == INFERENCE_LANDMARK_DECODING_TYPE_BYPASS) {
+                       landmarkOffset = mMeta.GetLandmarkOffset();
+               }
+               for (int idx = 0; idx < mNumberOfLandmarks; ++idx) {
+                       float px = mTensorBuffer.getValue<float>(mMeta.GetLandmarkName(), idx * landmarkOffset);
+                       float py = mTensorBuffer.getValue<float>(mMeta.GetLandmarkName(), idx * landmarkOffset + 1);
 
-                                       mPoseLandmarks[0].landmarks[idx].score = poseScore;
-                                       mPoseLandmarks[0].landmarks[idx].heatMapLoc = cv::Point(-1, -1);
-                                       mPoseLandmarks[0].landmarks[idx].decodedLoc = cv::Point2f(px/scaleWidth, py/scaleHeight);
-                                       mPoseLandmarks[0].landmarks[idx].id = idx;
-                                       mPoseLandmarks[0].landmarks[idx].valid =  true;
+                       mPoseLandmarks[0].landmarks[idx].score = poseScore;
+                       mPoseLandmarks[0].landmarks[idx].heatMapLoc = cv::Point(-1, -1);
+                       mPoseLandmarks[0].landmarks[idx].decodedLoc = cv::Point2f(px / scaleWidth, py / scaleHeight);
+                       mPoseLandmarks[0].landmarks[idx].id = idx;
+                       mPoseLandmarks[0].landmarks[idx].valid = true;
 
-                                       LOGI("idx[%d]: %.4f, %.4f", idx, px, py);
-                       }
-
-                       mPoseLandmarks[0].score = poseScore;
+                       LOGI("idx[%d]: %.4f, %.4f", idx, px, py);
                }
 
-               LOGI("LEAVE");
-               return MEDIA_VISION_ERROR_NONE;
+               mPoseLandmarks[0].score = poseScore;
        }
 
-       int PoseDecoder::findPose(LandmarkPoint& root, std::vector<LandmarkPoint>& decodedLandmarks,
-                                                       float scaleW, float scaleH)
-       {
-               LOGI("ENTER");
+       LOGI("LEAVE");
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               if (scaleW <= 0.0f || scaleH <= 0.0f) {
-                       LOGE("scale width(%.4f) or height(%.4f) is less than or equal to zero", scaleW, scaleH);
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               }
+int PoseDecoder::findPose(LandmarkPoint &root, std::vector<LandmarkPoint> &decodedLandmarks, float scaleW, float scaleH)
+{
+       LOGI("ENTER");
 
-               decodedLandmarks[root.id] = root;
-               decodedLandmarks[root.id].valid = true;
-               LOGI("KeyId: [%d], heatMap: %d, %d", root.id, root.heatMapLoc.x, root.heatMapLoc.y);
-               LOGI("KeyId: [%d], decoded: %.4f, %.4f, score %.3f", root.id, root.decodedLoc.x, root.decodedLoc.y, root.score);
-
-               int index = static_cast<int>(mMeta.GetLandmarkEdges().size()) - 1;
-               for (auto riter = mMeta.GetLandmarkEdges().rbegin();
-                       riter != mMeta.GetLandmarkEdges().rend(); ++riter) {
-                       int fromKeyId = riter->second;
-                       int toKeyId = riter->first;
-
-                       if (decodedLandmarks[fromKeyId].valid == true &&
-                               decodedLandmarks[toKeyId].valid == false) {
-                               LOGI("BackTravers: from %d to %d", fromKeyId, toKeyId);
-                               traverseToNeighbor(index, toKeyId,  INFERENCE_DISPLACEMENT_TYPE_BACKWARD,
-                                                       decodedLandmarks[fromKeyId], decodedLandmarks[toKeyId],
-                                                       scaleW, scaleH);
-                               LOGI("tgt_key_id[%d]: %.4f, %.4f, %.4f", toKeyId,
-                                                                               decodedLandmarks[toKeyId].decodedLoc.x,
-                                                                               decodedLandmarks[toKeyId].decodedLoc.y,
-                                                                               decodedLandmarks[toKeyId].score);
-                       }
-                       index--;
-               }
+       if (scaleW <= 0.0f || scaleH <= 0.0f) {
+               LOGE("scale width(%.4f) or height(%.4f) is less than or equal to zero", scaleW, scaleH);
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
 
-               index = 0;
-               for (auto iter = mMeta.GetLandmarkEdges().begin();
-                       iter != mMeta.GetLandmarkEdges().end(); ++iter) {
-                       int fromKeyId = iter->first;
-                       int toKeyId = iter->second;
-
-                       if (decodedLandmarks[fromKeyId].valid == true &&
-                               decodedLandmarks[toKeyId].valid == false) {
-                               LOGI("FrwdTravers: form %d to %d", fromKeyId, toKeyId);
-                               traverseToNeighbor(index, toKeyId,  INFERENCE_DISPLACEMENT_TYPE_FORWARD,
-                                                       decodedLandmarks[fromKeyId], decodedLandmarks[toKeyId],
-                                                       scaleW, scaleH);
-                       }
-                       index++;
+       decodedLandmarks[root.id] = root;
+       decodedLandmarks[root.id].valid = true;
+       LOGI("KeyId: [%d], heatMap: %d, %d", root.id, root.heatMapLoc.x, root.heatMapLoc.y);
+       LOGI("KeyId: [%d], decoded: %.4f, %.4f, score %.3f", root.id, root.decodedLoc.x, root.decodedLoc.y, root.score);
+
+       int index = static_cast<int>(mMeta.GetLandmarkEdges().size()) - 1;
+       for (auto riter = mMeta.GetLandmarkEdges().rbegin(); riter != mMeta.GetLandmarkEdges().rend(); ++riter) {
+               int fromKeyId = riter->second;
+               int toKeyId = riter->first;
+
+               if (decodedLandmarks[fromKeyId].valid == true && decodedLandmarks[toKeyId].valid == false) {
+                       LOGI("BackTravers: from %d to %d", fromKeyId, toKeyId);
+                       traverseToNeighbor(index, toKeyId, INFERENCE_DISPLACEMENT_TYPE_BACKWARD, decodedLandmarks[fromKeyId],
+                                                          decodedLandmarks[toKeyId], scaleW, scaleH);
+                       LOGI("tgt_key_id[%d]: %.4f, %.4f, %.4f", toKeyId, decodedLandmarks[toKeyId].decodedLoc.x,
+                                decodedLandmarks[toKeyId].decodedLoc.y, decodedLandmarks[toKeyId].score);
                }
-               LOGI("LEAVE");
-               return MEDIA_VISION_ERROR_NONE;
+               index--;
        }
 
-       int PoseDecoder::traverseToNeighbor(int edgeId, int toId, inference_displacement_type_e dir,
-                                                               LandmarkPoint fromLandmark, LandmarkPoint& toLandmark,
-                                                               float scaleW, float scaleH)
-       {
-               if (scaleW <= 0.0f || scaleH <= 0.0f) {
-                       LOGE("scale width(%.4f) or height(%.4f) is less than or equal to zero", scaleW, scaleH);
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               }
+       index = 0;
+       for (auto iter = mMeta.GetLandmarkEdges().begin(); iter != mMeta.GetLandmarkEdges().end(); ++iter) {
+               int fromKeyId = iter->first;
+               int toKeyId = iter->second;
 
-               cv::Point2f edgeVector(0.f, 0.f);
-               cv::Point nearHeatMapLoc;
+               if (decodedLandmarks[fromKeyId].valid == true && decodedLandmarks[toKeyId].valid == false) {
+                       LOGI("FrwdTravers: form %d to %d", fromKeyId, toKeyId);
+                       traverseToNeighbor(index, toKeyId, INFERENCE_DISPLACEMENT_TYPE_FORWARD, decodedLandmarks[fromKeyId],
+                                                          decodedLandmarks[toKeyId], scaleW, scaleH);
+               }
+               index++;
+       }
+       LOGI("LEAVE");
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               LOGI("org: %.4f, %.4f", fromLandmark.decodedLoc.x, fromLandmark.decodedLoc.y);
+int PoseDecoder::traverseToNeighbor(int edgeId, int toId, inference_displacement_type_e dir, LandmarkPoint fromLandmark,
+                                                                       LandmarkPoint &toLandmark, float scaleW, float scaleH)
+{
+       if (scaleW <= 0.0f || scaleH <= 0.0f) {
+               LOGE("scale width(%.4f) or height(%.4f) is less than or equal to zero", scaleW, scaleH);
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
 
-               // update heatMapLoc from decodedLoc;
-               nearHeatMapLoc.x = roundf(fromLandmark.decodedLoc.x
-                                       * static_cast<float>(mHeatMapWidth - 1));
-               nearHeatMapLoc.y = roundf(fromLandmark.decodedLoc.y
-                                       * static_cast<float>(mHeatMapHeight - 1));
+       cv::Point2f edgeVector(0.f, 0.f);
+       cv::Point nearHeatMapLoc;
 
-               nearHeatMapLoc.x = std::max(std::min(nearHeatMapLoc.x, mHeatMapWidth - 1), 0);
-               nearHeatMapLoc.y = std::max(std::min(nearHeatMapLoc.y, mHeatMapHeight - 1), 0);
+       LOGI("org: %.4f, %.4f", fromLandmark.decodedLoc.x, fromLandmark.decodedLoc.y);
 
-               LOGI("src: %d, %d", nearHeatMapLoc.x, nearHeatMapLoc.y);
+       // update heatMapLoc from decodedLoc;
+       nearHeatMapLoc.x = roundf(fromLandmark.decodedLoc.x * static_cast<float>(mHeatMapWidth - 1));
+       nearHeatMapLoc.y = roundf(fromLandmark.decodedLoc.y * static_cast<float>(mHeatMapHeight - 1));
 
-               getEdgeVector(nearHeatMapLoc, edgeId, dir, edgeVector);
+       nearHeatMapLoc.x = std::max(std::min(nearHeatMapLoc.x, mHeatMapWidth - 1), 0);
+       nearHeatMapLoc.y = std::max(std::min(nearHeatMapLoc.y, mHeatMapHeight - 1), 0);
 
-               LOGI("vector: %.4f, %.4f with edgeId %d", edgeVector.x, edgeVector.y, edgeId);
-               toLandmark.decodedLoc.x = fromLandmark.decodedLoc.x + edgeVector.x / scaleW;
-               toLandmark.decodedLoc.y = fromLandmark.decodedLoc.y + edgeVector.y / scaleH;
-               toLandmark.id = toId;
-               LOGI("tgt: %.4f, %.4f", toLandmark.decodedLoc.x, toLandmark.decodedLoc.y);
+       LOGI("src: %d, %d", nearHeatMapLoc.x, nearHeatMapLoc.y);
 
-               for (int iter = 0; iter < MAX_NUMBER_OF_CORRECTION; ++iter) {
-                       getPosToIndex(toLandmark);
-                       getIndexToPos(toLandmark, scaleW, scaleH);
-               }
+       getEdgeVector(nearHeatMapLoc, edgeId, dir, edgeVector);
 
-               int idx  = convertXYZtoX(toLandmark.heatMapLoc.x, toLandmark.heatMapLoc.y, toLandmark.id);
-               toLandmark.score = mTensorBuffer.getValue<float>(mMeta.GetScoreName(), idx);
-               if (mMeta.GetScoreType() == INFERENCE_SCORE_TYPE_SIGMOID) {
-                       toLandmark.score = PostProcess::sigmoid(toLandmark.score);
-               }
+       LOGI("vector: %.4f, %.4f with edgeId %d", edgeVector.x, edgeVector.y, edgeId);
+       toLandmark.decodedLoc.x = fromLandmark.decodedLoc.x + edgeVector.x / scaleW;
+       toLandmark.decodedLoc.y = fromLandmark.decodedLoc.y + edgeVector.y / scaleH;
+       toLandmark.id = toId;
+       LOGI("tgt: %.4f, %.4f", toLandmark.decodedLoc.x, toLandmark.decodedLoc.y);
 
-               toLandmark.valid = true;
-               LOGI("Final: %.4f, %.4f", toLandmark.decodedLoc.x, toLandmark.decodedLoc.y);
+       for (int iter = 0; iter < MAX_NUMBER_OF_CORRECTION; ++iter) {
+               getPosToIndex(toLandmark);
+               getIndexToPos(toLandmark, scaleW, scaleH);
+       }
 
-               return MEDIA_VISION_ERROR_NONE;
+       int idx = convertXYZtoX(toLandmark.heatMapLoc.x, toLandmark.heatMapLoc.y, toLandmark.id);
+       toLandmark.score = mTensorBuffer.getValue<float>(mMeta.GetScoreName(), idx);
+       if (mMeta.GetScoreType() == INFERENCE_SCORE_TYPE_SIGMOID) {
+               toLandmark.score = PostProcess::sigmoid(toLandmark.score);
        }
 
-       int PoseDecoder::getEdgeVector(cv::Point index, int edgeId, inference_displacement_type_e type, cv::Point2f& vector)
-       {
-               LOGI("ENTER");
+       toLandmark.valid = true;
+       LOGI("Final: %.4f, %.4f", toLandmark.decodedLoc.x, toLandmark.decodedLoc.y);
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int PoseDecoder::getEdgeVector(cv::Point index, int edgeId, inference_displacement_type_e type, cv::Point2f &vector)
+{
+       LOGI("ENTER");
 
-               LOGI("edge size: %zd", mMeta.GetLandmarkEdges().size());
+       LOGI("edge size: %zd", mMeta.GetLandmarkEdges().size());
 
-               int idxY = index.y * mHeatMapWidth
-                                       * static_cast<int>(mMeta.GetLandmarkEdges().size()) * 2;
+       int idxY = index.y * mHeatMapWidth * static_cast<int>(mMeta.GetLandmarkEdges().size()) * 2;
 
-               idxY += index.x * static_cast<int>(mMeta.GetLandmarkEdges().size()) * 2 + edgeId;
+       idxY += index.x * static_cast<int>(mMeta.GetLandmarkEdges().size()) * 2 + edgeId;
 
-               int idxX = idxY + static_cast<int>(mMeta.GetLandmarkEdges().size());
+       int idxX = idxY + static_cast<int>(mMeta.GetLandmarkEdges().size());
 
-               for(auto& dispVec : mMeta.GetLandmarkDispVecAll()){
-                       if (dispVec.GetType() == type) { // 0: forward
-                               LOGI("%s", dispVec.GetName().c_str());
-                               vector.x = mTensorBuffer.getValue<float>(dispVec.GetName(), idxX);
-                               vector.y = mTensorBuffer.getValue<float>(dispVec.GetName(), idxY);
-                       }
+       for (auto &dispVec : mMeta.GetLandmarkDispVecAll()) {
+               if (dispVec.GetType() == type) { // 0: forward
+                       LOGI("%s", dispVec.GetName().c_str());
+                       vector.x = mTensorBuffer.getValue<float>(dispVec.GetName(), idxX);
+                       vector.y = mTensorBuffer.getValue<float>(dispVec.GetName(), idxY);
                }
-
-               LOGI("LEAVE");
-               return MEDIA_VISION_ERROR_NONE;
        }
+
+       LOGI("LEAVE");
+       return MEDIA_VISION_ERROR_NONE;
+}
 }
 }
old mode 100755 (executable)
new mode 100644 (file)
index c7335b3..7060da7
@@ -26,133 +26,132 @@ namespace mediavision
 {
 namespace inference
 {
-       float PostProcess::sigmoid(float value)
-       {
-               return 1.0/(1.0+ exp(-value));
-       }
+float PostProcess::sigmoid(float value)
+{
+       return 1.0 / (1.0 + exp(-value));
+}
 
-       float PostProcess::dequant(float value, float scale, float zeropoint)
-       {
-               return value/scale + zeropoint;
-       }
+float PostProcess::dequant(float value, float scale, float zeropoint)
+{
+       return value / scale + zeropoint;
+}
 
-       int PostProcess::ScoreClear(int size)
-       {
-               std::priority_queue<std::pair<float, int>,
-                                                       std::vector<std::pair<float, int>>,
-                                                       std::greater<std::pair<float, int>>>().swap(mScore);
-               mMaxScoreSize = size;
+int PostProcess::ScoreClear(int size)
+{
+       std::priority_queue<std::pair<float, int>, std::vector<std::pair<float, int> >,
+                                               std::greater<std::pair<float, int> > >()
+                       .swap(mScore);
+       mMaxScoreSize = size;
 
-               return MEDIA_VISION_ERROR_NONE;
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int PostProcess::ScorePush(float value, int index)
+{
+       mScore.push(std::pair<float, int>(value, index));
+       if (mScore.size() > (size_t) mMaxScoreSize) {
+               mScore.pop();
        }
 
-       int PostProcess::ScorePush(float value, int index)
-       {
-               mScore.push(std::pair<float, int>(value, index));
-               if (mScore.size() > (size_t)mMaxScoreSize) {
-                       mScore.pop();
-               }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               return MEDIA_VISION_ERROR_NONE;
+int PostProcess::ScorePop(std::vector<std::pair<float, int> > &top)
+{
+       top.clear();
+       while (!mScore.empty()) {
+               top.push_back(mScore.top());
+               LOGI("%.3f", mScore.top().first);
+               mScore.pop();
        }
 
-       int PostProcess::ScorePop(std::vector<std::pair<float, int>>& top)
-       {
-               top.clear();
-               while (!mScore.empty()) {
-                       top.push_back(mScore.top());
-                       LOGI("%.3f", mScore.top().first);
-                       mScore.pop();
-               }
+       std::reverse(top.begin(), top.end());
 
-               std::reverse(top.begin(), top.end());
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               return MEDIA_VISION_ERROR_NONE;
-       }
+static bool compareScore(Box box0, Box box1)
+{
+       return box0.score > box1.score;
+}
 
-       static bool compareScore(Box box0, Box box1)
-       {
-               return box0.score > box1.score;
-       }
+static float calcIntersectionOverUnion(Box box0, Box box1)
+{
+       float area0 = box0.location.width * box0.location.height;
+       float area1 = box1.location.width * box1.location.height;
+
+       if (area0 <= 0.0f || area1 <= 0.0f)
+               return 0.0f;
+
+       float sx0 = box0.location.x - box0.location.width * 0.5f;
+       float sy0 = box0.location.y - box0.location.height * 0.5f;
+       float ex0 = box0.location.x + box0.location.width * 0.5f;
+       float ey0 = box0.location.y + box0.location.height * 0.5f;
+       float sx1 = box1.location.x - box1.location.width * 0.5f;
+       float sy1 = box1.location.y - box1.location.height * 0.5f;
+       float ex1 = box1.location.x + box1.location.width * 0.5f;
+       float ey1 = box1.location.y + box1.location.height * 0.5f;
+
+       float xmin0 = std::min(sx0, ex0);
+       float ymin0 = std::min(sy0, ey0);
+       float xmax0 = std::max(sx0, ex0);
+       float ymax0 = std::max(sy0, ey0);
+       float xmin1 = std::min(sx1, ex1);
+       float ymin1 = std::min(sy1, ey1);
+       float xmax1 = std::max(sx1, ex1);
+       float ymax1 = std::max(sy1, ey1);
+
+       float intersectXmin = std::max(xmin0, xmin1);
+       float intersectYmin = std::max(ymin0, ymin1);
+       float intersectXmax = std::min(xmax0, xmax1);
+       float intersectYmax = std::min(ymax0, ymax1);
+
+       float intersectArea =
+                       std::max((intersectYmax - intersectYmin), 0.0f) * std::max((intersectXmax - intersectXmin), 0.0f);
+       return intersectArea / (area0 + area1 - intersectArea);
+}
+
+int PostProcess::Nms(BoxesList &boxeslist, int mode, float threshold, Boxes &nmsboxes)
+{
+       LOGI("ENTER");
 
-       static float calcIntersectionOverUnion(Box box0, Box box1)
-       {
-
-               float area0 = box0.location.width * box0.location.height;
-               float area1 = box1.location.width * box1.location.height;
-
-               if (area0 <= 0.0f || area1 <= 0.0f)
-                       return 0.0f;
-
-               float sx0 = box0.location.x - box0.location.width * 0.5f;
-               float sy0 = box0.location.y - box0.location.height * 0.5f;
-               float ex0 = box0.location.x + box0.location.width * 0.5f;
-               float ey0 = box0.location.y + box0.location.height * 0.5f;
-               float sx1 = box1.location.x - box1.location.width * 0.5f;
-               float sy1 = box1.location.y - box1.location.height * 0.5f;
-               float ex1 = box1.location.x + box1.location.width * 0.5f;
-               float ey1 = box1.location.y + box1.location.height * 0.5f;
-
-               float xmin0 = std::min (sx0, ex0);
-               float ymin0 = std::min (sy0, ey0);
-               float xmax0 = std::max (sx0, ex0);
-               float ymax0 = std::max (sy0, ey0);
-               float xmin1 = std::min (sx1, ex1);
-               float ymin1 = std::min (sy1, ey1);
-               float xmax1 = std::max (sx1, ex1);
-               float ymax1 = std::max (sy1, ey1);
-
-               float intersectXmin = std::max(xmin0, xmin1);
-               float intersectYmin = std::max(ymin0, ymin1);
-               float intersectXmax = std::min(xmax0, xmax1);
-               float intersectYmax = std::min(ymax0, ymax1);
-
-               float intersectArea = std::max((intersectYmax - intersectYmin), 0.0f) *
-                                                         std::max((intersectXmax - intersectXmin), 0.0f);
-               return intersectArea / (area0 + area1 - intersectArea);
+       if (mode != 0) {
+               LOGI("Skip Nms");
+               LOGI("LEAVE");
+               return MEDIA_VISION_ERROR_NONE;
        }
 
-       int PostProcess::Nms(BoxesList& boxeslist, int mode, float threshold, Boxes& nmsboxes)
-       {
-               LOGI("ENTER");
-
-               if (mode != 0) {
-                       LOGI("Skip Nms");
-                       LOGI("LEAVE");
-                       return MEDIA_VISION_ERROR_NONE;
-               }
-
-               LOGI("threshold: %.3f", threshold);
-               bool isIgnore = false;
-               Boxes candidateBoxes;
-               for (auto& boxList : boxeslist) {
-                       if (boxList.size() <=0 )
-                               continue;
-
-                       std::sort(boxList.begin(), boxList.end(), compareScore);
-                       candidateBoxes.clear();
-                       for (auto& decodedBox : boxList) {
-                               isIgnore = false;
-                               for (auto candidateBox = candidateBoxes.rbegin(); candidateBox != candidateBoxes.rend(); ++candidateBox) {
-                                       // compare decodedBox with previous one
-                                       float iouValue = calcIntersectionOverUnion(decodedBox, (*candidateBox));
-                                       LOGI("iouValue: %.3f", iouValue);
-                                       if (iouValue >= threshold) {
-                                               isIgnore = true;
-                                               break;
-                                       }
-                               }
-                               if (!isIgnore) {
-                                       candidateBoxes.push_back(decodedBox);
+       LOGI("threshold: %.3f", threshold);
+       bool isIgnore = false;
+       Boxes candidateBoxes;
+       for (auto &boxList : boxeslist) {
+               if (boxList.size() <= 0)
+                       continue;
+
+               std::sort(boxList.begin(), boxList.end(), compareScore);
+               candidateBoxes.clear();
+               for (auto &decodedBox : boxList) {
+                       isIgnore = false;
+                       for (auto candidateBox = candidateBoxes.rbegin(); candidateBox != candidateBoxes.rend(); ++candidateBox) {
+                               // compare decodedBox with previous one
+                               float iouValue = calcIntersectionOverUnion(decodedBox, (*candidateBox));
+                               LOGI("iouValue: %.3f", iouValue);
+                               if (iouValue >= threshold) {
+                                       isIgnore = true;
+                                       break;
                                }
                        }
-                       if (candidateBoxes.size() > 0) {
-                               nmsboxes.insert(nmsboxes.begin(), candidateBoxes.begin(), candidateBoxes.end());
+                       if (!isIgnore) {
+                               candidateBoxes.push_back(decodedBox);
                        }
                }
-               LOGI("LEAVE");
-
-               return MEDIA_VISION_ERROR_NONE;
+               if (candidateBoxes.size() > 0) {
+                       nmsboxes.insert(nmsboxes.begin(), candidateBoxes.begin(), candidateBoxes.end());
+               }
        }
+       LOGI("LEAVE");
+
+       return MEDIA_VISION_ERROR_NONE;
+}
 } /* Inference */
 } /* MediaVision */
index 14c0cec..f9d237d 100644 (file)
@@ -30,332 +30,319 @@ namespace mediavision
 {
 namespace inference
 {
-Posture::Posture() :
-    mBvhParser(),
-    mBvh(),
-    mPose()
+Posture::Posture() : mBvhParser(), mBvh(), mPose()
 {
-    LOGI("ENTER");
+       LOGI("ENTER");
 
-    mMotionToPoseMap.clear();
-    mPose.assign(HUMAN_POSE_MAX_LANDMARKS, std::make_pair(false, cv::Point(-1,-1)));
+       mMotionToPoseMap.clear();
+       mPose.assign(HUMAN_POSE_MAX_LANDMARKS, std::make_pair(false, cv::Point(-1, -1)));
 
-    mPoseParts.assign(HUMAN_POSE_MAX_PARTS, std::make_pair(false, std::vector<cv::Vec2f>()));
+       mPoseParts.assign(HUMAN_POSE_MAX_PARTS, std::make_pair(false, std::vector<cv::Vec2f>()));
 
-    LOGI("LEAVE");
+       LOGI("LEAVE");
 }
 
 Posture::~Posture()
 {
-    LOGI("ENTER");
+       LOGI("ENTER");
 
-    std::vector<std::pair<bool, cv::Point>>().swap(mPose);
+       std::vector<std::pair<bool, cv::Point> >().swap(mPose);
 
-    LOGI("LEAVE");
+       LOGI("LEAVE");
 }
 
-
-int Posture::getParts(int parts,
-                    std::vector<std::pair<bool, cv::Point>>& pose,
-                    std::vector<std::pair<bool, std::vector<cv::Vec2f>>>& posePart)
+int Posture::getParts(int parts, std::vector<std::pair<bool, cv::Point> > &pose,
+                                         std::vector<std::pair<bool, std::vector<cv::Vec2f> > > &posePart)
 {
-    LOGI("ENTER");
-    // head
-    if (parts & MV_INFERENCE_HUMAN_BODY_PART_HEAD) {
-        LOGI("HEAD");
-        if (pose[0].first == false || pose[1].first == false || pose[2].first == false) {
-            posePart[0].first = false;
-        } else {
-            posePart[0].first = true;
-            posePart[0].second.push_back(getUnitVectors(pose[0].second, pose[1].second));
-            posePart[0].second.push_back(getUnitVectors(pose[1].second, pose[2].second));
-        }
-    }
-
-    // right arm
-    if (parts & MV_INFERENCE_HUMAN_BODY_PART_ARM_RIGHT) {
-        LOGI("ARM-R");
-        if (pose[3].first == false || pose[4].first == false || pose[5].first == false) {
-            posePart[1].first = false;
-        } else {
-            posePart[1].first = true;
-            posePart[1].second.push_back(getUnitVectors(pose[3].second, pose[4].second));
-            posePart[1].second.push_back(getUnitVectors(pose[4].second, pose[5].second));
-        }
-    }
-
-    // left arm
-    if (parts & MV_INFERENCE_HUMAN_BODY_PART_ARM_LEFT) {
-        LOGI("ARM-L");
-        if (pose[6].first == false || pose[7].first == false || pose[8].first == false) {
-            posePart[2].first = false;
-        } else {
-            posePart[2].first = true;
-            posePart[2].second.push_back(getUnitVectors(pose[6].second, pose[7].second));
-            posePart[2].second.push_back(getUnitVectors(pose[7].second, pose[8].second));
-        }
-    }
-
-    // right leg
-    if (parts & MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT) {
-        LOGI("LEG-R");
-        if (pose[10].first == false || pose[11].first == false || pose[12].first == false) {
-            posePart[3].first = false;
-        } else {
-            posePart[3].first = true;
-            posePart[3].second.push_back(getUnitVectors(pose[10].second, pose[11].second));
-            posePart[3].second.push_back(getUnitVectors(pose[11].second, pose[12].second));
-        }
-    }
-
-    // left leg
-    if (parts & MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT) {
-        LOGI("LEG-L");
-        if (pose[13].first == false || pose[14].first == false || pose[15].first == false) {
-            posePart[4].first = false;
-        } else {
-            posePart[4].first = true;
-            posePart[4].second.push_back(getUnitVectors(pose[13].second, pose[14].second));
-            posePart[4].second.push_back(getUnitVectors(pose[14].second, pose[15].second));
-
-        }
-    }
-
-    // body
-    if (parts & MV_INFERENCE_HUMAN_BODY_PART_BODY) {
-        LOGI("BODY");
-        if (pose[2].first == false || pose[9].first == false ||
-            pose[10].first == false || pose[13].first == false) {
-            posePart[5].first = false;
-        } else {
-            posePart[5].first = true;
-            posePart[5].second.push_back(getUnitVectors(pose[2].second, pose[9].second));
-            posePart[5].second.push_back(getUnitVectors(pose[9].second, pose[10].second));
-            posePart[5].second.push_back(getUnitVectors(pose[9].second, pose[13].second));
-        }
-    }
-
-    LOGI("LEAVE");
-
-    return MEDIA_VISION_ERROR_NONE;
+       LOGI("ENTER");
+       // head
+       if (parts & MV_INFERENCE_HUMAN_BODY_PART_HEAD) {
+               LOGI("HEAD");
+               if (pose[0].first == false || pose[1].first == false || pose[2].first == false) {
+                       posePart[0].first = false;
+               } else {
+                       posePart[0].first = true;
+                       posePart[0].second.push_back(getUnitVectors(pose[0].second, pose[1].second));
+                       posePart[0].second.push_back(getUnitVectors(pose[1].second, pose[2].second));
+               }
+       }
+
+       // right arm
+       if (parts & MV_INFERENCE_HUMAN_BODY_PART_ARM_RIGHT) {
+               LOGI("ARM-R");
+               if (pose[3].first == false || pose[4].first == false || pose[5].first == false) {
+                       posePart[1].first = false;
+               } else {
+                       posePart[1].first = true;
+                       posePart[1].second.push_back(getUnitVectors(pose[3].second, pose[4].second));
+                       posePart[1].second.push_back(getUnitVectors(pose[4].second, pose[5].second));
+               }
+       }
+
+       // left arm
+       if (parts & MV_INFERENCE_HUMAN_BODY_PART_ARM_LEFT) {
+               LOGI("ARM-L");
+               if (pose[6].first == false || pose[7].first == false || pose[8].first == false) {
+                       posePart[2].first = false;
+               } else {
+                       posePart[2].first = true;
+                       posePart[2].second.push_back(getUnitVectors(pose[6].second, pose[7].second));
+                       posePart[2].second.push_back(getUnitVectors(pose[7].second, pose[8].second));
+               }
+       }
+
+       // right leg
+       if (parts & MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT) {
+               LOGI("LEG-R");
+               if (pose[10].first == false || pose[11].first == false || pose[12].first == false) {
+                       posePart[3].first = false;
+               } else {
+                       posePart[3].first = true;
+                       posePart[3].second.push_back(getUnitVectors(pose[10].second, pose[11].second));
+                       posePart[3].second.push_back(getUnitVectors(pose[11].second, pose[12].second));
+               }
+       }
+
+       // left leg
+       if (parts & MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT) {
+               LOGI("LEG-L");
+               if (pose[13].first == false || pose[14].first == false || pose[15].first == false) {
+                       posePart[4].first = false;
+               } else {
+                       posePart[4].first = true;
+                       posePart[4].second.push_back(getUnitVectors(pose[13].second, pose[14].second));
+                       posePart[4].second.push_back(getUnitVectors(pose[14].second, pose[15].second));
+               }
+       }
+
+       // body
+       if (parts & MV_INFERENCE_HUMAN_BODY_PART_BODY) {
+               LOGI("BODY");
+               if (pose[2].first == false || pose[9].first == false || pose[10].first == false || pose[13].first == false) {
+                       posePart[5].first = false;
+               } else {
+                       posePart[5].first = true;
+                       posePart[5].second.push_back(getUnitVectors(pose[2].second, pose[9].second));
+                       posePart[5].second.push_back(getUnitVectors(pose[9].second, pose[10].second));
+                       posePart[5].second.push_back(getUnitVectors(pose[9].second, pose[13].second));
+               }
+       }
+
+       LOGI("LEAVE");
+
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int Posture::setPoseFromFile(const std::string motionCaptureFilePath, const std::string motionMappingFilePath)
 {
-    LOGI("ENTER");
-
-    int ret = MEDIA_VISION_ERROR_NONE;
-
-    // parsing motion capture file
-    LOGD("%s", motionCaptureFilePath.c_str());
-    LOGD("%s", motionMappingFilePath.c_str());
-    ret = mBvhParser.parse(motionCaptureFilePath.c_str(), &mBvh);
-    LOGD("frames: %d",mBvh.num_frames());
-
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        LOGE("Fail to parse a file [%s]", motionCaptureFilePath.c_str());
-        return MEDIA_VISION_ERROR_INTERNAL;
-    }
-
-    mBvh.recalculate_joints_ltm();
-
-    LOGD("reading motion mapping....");
-    // read motion mapping file
-    std::ifstream fp(motionMappingFilePath.c_str());
-    if (!fp.is_open()) {
-        LOGE("Fail to open %s", motionMappingFilePath.c_str());
-        return MEDIA_VISION_ERROR_INVALID_PATH;
-    }
-
-    std::string line;
-    mMotionToPoseMap.clear();
-    while (!fp.eof()) {
-        std::getline(fp, line);
-
-        if (line.empty())
-            continue;
-
-        LOGD("%s", line.c_str());
-        std::istringstream lineStream(line);
-        std::string token;
-        std::vector<std::string> parsedString;
-        while(getline(lineStream, token, ',')) {
-            parsedString.push_back(token);
-        }
-        LOGD("name: %s, mapping index: %d", parsedString[0].c_str(), std::stoi(parsedString[1]));
-        mMotionToPoseMap.insert(make_pair(parsedString[0], std::stoi(parsedString[1])-1));
-    }
-
-    fp.close();
-    LOGD("mapping size is %zd", mMotionToPoseMap.size());
-
-    // convert bvh to pose
-    //convertBvhToPose();
-    //for (std::shared_ptr<Bvh::Joint>)
-
-    float pointX, pointY, minX, minY, maxX, maxY;
-    minX = minY = FLT_MAX;
-    maxX = maxY = FLT_MIN;
-    for (std::shared_ptr<Joint> joint : mBvh.joints()) {
-        std::map<std::string, int>::iterator it = mMotionToPoseMap.find(std::string(joint->name()));
-        if (it != mMotionToPoseMap.end()) {
-            pointX = joint->pos(0)[0];
-            pointY = joint->pos(0)[1];
-            if (pointX < minX)
-                minX = pointX;
-
-            if (pointY < minY)
-                minY = pointY;
-
-            if (pointX > maxX)
-                maxX = pointX;
-
-            if (pointY > maxY)
-                maxY = pointY;
-
-            mPose[it->second].first = true;
-            mPose[it->second].second = cv::Point(pointX, pointY);
-            LOGD("%d: (%f,%f)", it->second, pointX, pointY);
-        }
-    }
-
-    // add offset to make x > 0 and y > 0
-    int height = (int)maxY - (int)minY + POSE_OFFSET_VALUE;
-    for (std::vector<std::pair<bool, cv::Point>>::iterator iter = mPose.begin();
-        iter != mPose.end(); iter++) {
-        if (iter->first == false)
-            continue;
-
-        iter->second.x -= minX;
-        iter->second.y -= minY;
-
-        iter->second.x += POSE_OFFSET_VALUE;
-        iter->second.y += POSE_OFFSET_VALUE;
-
-        iter->second.y = height - iter->second.y;
-
-        LOGI("(%d, %d)", iter->second.x, iter->second.y);
-    }
-
-    ret = getParts((MV_INFERENCE_HUMAN_BODY_PART_HEAD |
-                    MV_INFERENCE_HUMAN_BODY_PART_ARM_RIGHT |
-                    MV_INFERENCE_HUMAN_BODY_PART_ARM_LEFT |
-                    MV_INFERENCE_HUMAN_BODY_PART_BODY |
-                    MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT |
-                    MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT),
-                    mPose, mPoseParts);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        LOGE("Fail to getPartse");
-        return ret;
-    }
-
-    LOGI("LEAVE");
-
-    return ret;
+       LOGI("ENTER");
+
+       int ret = MEDIA_VISION_ERROR_NONE;
+
+       // parsing motion capture file
+       LOGD("%s", motionCaptureFilePath.c_str());
+       LOGD("%s", motionMappingFilePath.c_str());
+       ret = mBvhParser.parse(motionCaptureFilePath.c_str(), &mBvh);
+       LOGD("frames: %d", mBvh.num_frames());
+
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to parse a file [%s]", motionCaptureFilePath.c_str());
+               return MEDIA_VISION_ERROR_INTERNAL;
+       }
+
+       mBvh.recalculate_joints_ltm();
+
+       LOGD("reading motion mapping....");
+       // read motion mapping file
+       std::ifstream fp(motionMappingFilePath.c_str());
+       if (!fp.is_open()) {
+               LOGE("Fail to open %s", motionMappingFilePath.c_str());
+               return MEDIA_VISION_ERROR_INVALID_PATH;
+       }
+
+       std::string line;
+       mMotionToPoseMap.clear();
+       while (!fp.eof()) {
+               std::getline(fp, line);
+
+               if (line.empty())
+                       continue;
+
+               LOGD("%s", line.c_str());
+               std::istringstream lineStream(line);
+               std::string token;
+               std::vector<std::string> parsedString;
+               while (getline(lineStream, token, ',')) {
+                       parsedString.push_back(token);
+               }
+               LOGD("name: %s, mapping index: %d", parsedString[0].c_str(), std::stoi(parsedString[1]));
+               mMotionToPoseMap.insert(make_pair(parsedString[0], std::stoi(parsedString[1]) - 1));
+       }
+
+       fp.close();
+       LOGD("mapping size is %zd", mMotionToPoseMap.size());
+
+       // convert bvh to pose
+       //convertBvhToPose();
+       //for (std::shared_ptr<Bvh::Joint>)
+
+       float pointX, pointY, minX, minY, maxX, maxY;
+       minX = minY = FLT_MAX;
+       maxX = maxY = FLT_MIN;
+       for (std::shared_ptr<Joint> joint : mBvh.joints()) {
+               std::map<std::string, int>::iterator it = mMotionToPoseMap.find(std::string(joint->name()));
+               if (it != mMotionToPoseMap.end()) {
+                       pointX = joint->pos(0)[0];
+                       pointY = joint->pos(0)[1];
+                       if (pointX < minX)
+                               minX = pointX;
+
+                       if (pointY < minY)
+                               minY = pointY;
+
+                       if (pointX > maxX)
+                               maxX = pointX;
+
+                       if (pointY > maxY)
+                               maxY = pointY;
+
+                       mPose[it->second].first = true;
+                       mPose[it->second].second = cv::Point(pointX, pointY);
+                       LOGD("%d: (%f,%f)", it->second, pointX, pointY);
+               }
+       }
+
+       // add offset to make x > 0 and y > 0
+       int height = (int) maxY - (int) minY + POSE_OFFSET_VALUE;
+       for (std::vector<std::pair<bool, cv::Point> >::iterator iter = mPose.begin(); iter != mPose.end(); iter++) {
+               if (iter->first == false)
+                       continue;
+
+               iter->second.x -= minX;
+               iter->second.y -= minY;
+
+               iter->second.x += POSE_OFFSET_VALUE;
+               iter->second.y += POSE_OFFSET_VALUE;
+
+               iter->second.y = height - iter->second.y;
+
+               LOGI("(%d, %d)", iter->second.x, iter->second.y);
+       }
+
+       ret = getParts((MV_INFERENCE_HUMAN_BODY_PART_HEAD | MV_INFERENCE_HUMAN_BODY_PART_ARM_RIGHT |
+                                       MV_INFERENCE_HUMAN_BODY_PART_ARM_LEFT | MV_INFERENCE_HUMAN_BODY_PART_BODY |
+                                       MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT | MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT),
+                                  mPose, mPoseParts);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to getPartse");
+               return ret;
+       }
+
+       LOGI("LEAVE");
+
+       return ret;
 }
 
 cv::Vec2f Posture::getUnitVectors(cv::Point point1, cv::Point point2)
 {
-    LOGI("ENTER");
+       LOGI("ENTER");
 
-    cv::Vec2i vec(point1.x - point2.x, point1.y - point2.y);
-    cv::Vec2f unitVec (vec[0]/cv::norm(vec, cv::NORM_L1), vec[1]/cv::norm(vec, cv::NORM_L1));
+       cv::Vec2i vec(point1.x - point2.x, point1.y - point2.y);
+       cv::Vec2f unitVec(vec[0] / cv::norm(vec, cv::NORM_L1), vec[1] / cv::norm(vec, cv::NORM_L1));
 
-    LOGI("LEAVE");
+       LOGI("LEAVE");
 
-    return unitVec;
+       return unitVec;
 }
 
 float Posture::cosineSimilarity(std::vector<cv::Vec2f> vec1, std::vector<cv::Vec2f> vec2, int size)
 {
-    float numer = 0.0f;
-    float denom1 = 0.0f;
-    float denom2 = 0.0f;
-
-    float value = 0.0f;
-
-    for (int k = 0; k < size; ++k) {
-        numer = denom1 = denom2 = 0.0f;
-        for (int dim = 0; dim <2; ++dim) {
-            numer += (vec1[k][dim] * vec2[k][dim]);
-            denom1 += (vec1[k][dim] * vec1[k][dim]);
-            denom2 += (vec2[k][dim] * vec2[k][dim]);
-        }
-        LOGI("similarity: %f", numer / sqrt( denom1 * denom2));
-        value += numer / sqrt( denom1 * denom2);
-
-    }
-
-    return value;
+       float numer = 0.0f;
+       float denom1 = 0.0f;
+       float denom2 = 0.0f;
+
+       float value = 0.0f;
+
+       for (int k = 0; k < size; ++k) {
+               numer = denom1 = denom2 = 0.0f;
+               for (int dim = 0; dim < 2; ++dim) {
+                       numer += (vec1[k][dim] * vec2[k][dim]);
+                       denom1 += (vec1[k][dim] * vec1[k][dim]);
+                       denom2 += (vec2[k][dim] * vec2[k][dim]);
+               }
+               LOGI("similarity: %f", numer / sqrt(denom1 * denom2));
+               value += numer / sqrt(denom1 * denom2);
+       }
+
+       return value;
 }
 
-float Posture::getSimilarity(int parts,
-                                       std::vector<std::pair<bool, std::vector<cv::Vec2f>>>& posePart,
-                                       std::vector<std::pair<bool, std::vector<cv::Vec2f>>>& actionPart)
+float Posture::getSimilarity(int parts, std::vector<std::pair<bool, std::vector<cv::Vec2f> > > &posePart,
+                                                        std::vector<std::pair<bool, std::vector<cv::Vec2f> > > &actionPart)
 {
-    float score = 0.0f;
-    unsigned int bodyCount = 0;
-    std::vector<int> index;
+       float score = 0.0f;
+       unsigned int bodyCount = 0;
+       std::vector<int> index;
 
-    if (parts & MV_INFERENCE_HUMAN_BODY_PART_HEAD) {
-        index.push_back(0);
-    }
+       if (parts & MV_INFERENCE_HUMAN_BODY_PART_HEAD) {
+               index.push_back(0);
+       }
 
-    if (parts & MV_INFERENCE_HUMAN_BODY_PART_ARM_RIGHT) {
-        index.push_back(1);
-    }
+       if (parts & MV_INFERENCE_HUMAN_BODY_PART_ARM_RIGHT) {
+               index.push_back(1);
+       }
 
-    if (parts & MV_INFERENCE_HUMAN_BODY_PART_ARM_LEFT) {
-        index.push_back(2);
-    }
+       if (parts & MV_INFERENCE_HUMAN_BODY_PART_ARM_LEFT) {
+               index.push_back(2);
+       }
 
-    if (parts & MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT) {
-        index.push_back(3);
-    }
+       if (parts & MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT) {
+               index.push_back(3);
+       }
 
-    if (parts & MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT) {
-        index.push_back(4);
-    }
+       if (parts & MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT) {
+               index.push_back(4);
+       }
 
-    if (parts & MV_INFERENCE_HUMAN_BODY_PART_BODY) {
-        index.push_back(5);
-    }
+       if (parts & MV_INFERENCE_HUMAN_BODY_PART_BODY) {
+               index.push_back(5);
+       }
 
-    for (std::vector<int>::iterator it = index.begin(); it != index.end(); ++it) {
-        if (posePart[(*it)].first && actionPart[(*it)].first &&
-            (posePart[(*it)].second.size() == actionPart[(*it)].second.size())) {
-            score += cosineSimilarity(posePart[(*it)].second, actionPart[(*it)].second, posePart[(*it)].second.size());
+       for (std::vector<int>::iterator it = index.begin(); it != index.end(); ++it) {
+               if (posePart[(*it)].first && actionPart[(*it)].first &&
+                       (posePart[(*it)].second.size() == actionPart[(*it)].second.size())) {
+                       score += cosineSimilarity(posePart[(*it)].second, actionPart[(*it)].second, posePart[(*it)].second.size());
 
-            bodyCount += posePart[(*it)].second.size();
-            LOGI("body[%d], score[%f], count[%u]", (*it), score, bodyCount);
-        }
-    }
+                       bodyCount += posePart[(*it)].second.size();
+                       LOGI("body[%d], score[%f], count[%u]", (*it), score, bodyCount);
+               }
+       }
 
-    if (bodyCount > 0)
-        score /= (float)bodyCount;
+       if (bodyCount > 0)
+               score /= (float) bodyCount;
 
-    LOGD("score: %1.3f", score);
+       LOGD("score: %1.3f", score);
 
-    return score;
+       return score;
 }
 
-int Posture::compare(int parts, std::vector<std::pair<bool, cv::Point>> action, float* score)
+int Posture::compare(int parts, std::vector<std::pair<bool, cv::Point> > action, float *score)
 {
-    LOGI("ENTER");
+       LOGI("ENTER");
 
-    std::vector<std::pair<bool, std::vector<cv::Vec2f>>> actionParts;
-    actionParts.assign(6, std::make_pair(false, std::vector<cv::Vec2f>()));
-    int ret = getParts(parts, action, actionParts);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        LOGE("Fail to getPartse");
-        return ret;
-    }
+       std::vector<std::pair<bool, std::vector<cv::Vec2f> > > actionParts;
+       actionParts.assign(6, std::make_pair(false, std::vector<cv::Vec2f>()));
+       int ret = getParts(parts, action, actionParts);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to getPartse");
+               return ret;
+       }
 
-    *score = getSimilarity(parts, mPoseParts, actionParts);
+       *score = getSimilarity(parts, mPoseParts, actionParts);
 
-    LOGI("LEAVE");
+       LOGI("LEAVE");
 
-    return MEDIA_VISION_ERROR_NONE;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 }
index 0529124..475753b 100644 (file)
 #include "PreProcess.h"
 
 const int colorConvertTable[][12] = {
-       { 0,  0,                  0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
-       { 0, -1,                  0, 0, 0, 0, 0, 0, 0, cv::COLOR_GRAY2BGR565, cv::COLOR_GRAY2RGB,     cv::COLOR_GRAY2RGBA },
-       { 0, cv::COLOR_YUV2GRAY_I420, -1, 0, 0, 0, 0, 0, 0, 0,                cv::COLOR_RGBA2GRAY,    cv::COLOR_YUV2RGBA_I420 },
-       { 0, cv::COLOR_YUV2GRAY_NV12, 0, -1, 0, 0, 0, 0, 0, 0,                cv::COLOR_YUV2RGB_NV12, cv::COLOR_YUV2RGBA_NV12 },
-       { 0, cv::COLOR_YUV2GRAY_YV12, 0, 0, -1, 0, 0, 0, 0, 0,                cv::COLOR_YUV2RGB_YV12, cv::COLOR_YUV2RGBA_YV12 },
-       { 0, cv::COLOR_YUV2GRAY_NV21, 0, 0, 0, -1, 0, 0, 0, 0,                cv::COLOR_YUV2RGB_NV21, cv::COLOR_YUV2RGBA_NV21 },
-       { 0, cv::COLOR_YUV2GRAY_YUYV, 0, 0, 0, 0, -1, 0, 0, 0,                cv::COLOR_YUV2RGB_YUYV, cv::COLOR_YUV2RGBA_YUYV },
-       { 0, cv::COLOR_YUV2GRAY_UYVY, 0, 0, 0, 0, 0, -1, 0, 0,                cv::COLOR_YUV2BGR_UYVY, cv::COLOR_YUV2BGRA_UYVY },
-       { 0, cv::COLOR_YUV2GRAY_Y422, 0, 0, 0, 0, 0, 0, -1, 0,                cv::COLOR_YUV2RGB_Y422, cv::COLOR_YUV2RGBA_Y422 },
-       { 0, cv::COLOR_BGR5652GRAY,   0, 0, 0, 0, 0, 0, 0, -1,                cv::COLOR_BGR5652BGR,   cv::COLOR_BGR5652BGRA   },
-       { 0, cv::COLOR_RGB2GRAY,      0, 0, 0, 0, 0, 0, 0,  0,               -1,                      cv::COLOR_RGB2RGBA      },
-       { 0, cv::COLOR_RGBA2GRAY,     0, 0, 0, 0, 0, 0, 0, cv::COLOR_BGRA2BGR565, cv::COLOR_RGBA2RGB,   -1}
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       { 0, -1, 0, 0, 0, 0, 0, 0, 0, cv::COLOR_GRAY2BGR565, cv::COLOR_GRAY2RGB, cv::COLOR_GRAY2RGBA },
+       { 0, cv::COLOR_YUV2GRAY_I420, -1, 0, 0, 0, 0, 0, 0, 0, cv::COLOR_RGBA2GRAY, cv::COLOR_YUV2RGBA_I420 },
+       { 0, cv::COLOR_YUV2GRAY_NV12, 0, -1, 0, 0, 0, 0, 0, 0, cv::COLOR_YUV2RGB_NV12, cv::COLOR_YUV2RGBA_NV12 },
+       { 0, cv::COLOR_YUV2GRAY_YV12, 0, 0, -1, 0, 0, 0, 0, 0, cv::COLOR_YUV2RGB_YV12, cv::COLOR_YUV2RGBA_YV12 },
+       { 0, cv::COLOR_YUV2GRAY_NV21, 0, 0, 0, -1, 0, 0, 0, 0, cv::COLOR_YUV2RGB_NV21, cv::COLOR_YUV2RGBA_NV21 },
+       { 0, cv::COLOR_YUV2GRAY_YUYV, 0, 0, 0, 0, -1, 0, 0, 0, cv::COLOR_YUV2RGB_YUYV, cv::COLOR_YUV2RGBA_YUYV },
+       { 0, cv::COLOR_YUV2GRAY_UYVY, 0, 0, 0, 0, 0, -1, 0, 0, cv::COLOR_YUV2BGR_UYVY, cv::COLOR_YUV2BGRA_UYVY },
+       { 0, cv::COLOR_YUV2GRAY_Y422, 0, 0, 0, 0, 0, 0, -1, 0, cv::COLOR_YUV2RGB_Y422, cv::COLOR_YUV2RGBA_Y422 },
+       { 0, cv::COLOR_BGR5652GRAY, 0, 0, 0, 0, 0, 0, 0, -1, cv::COLOR_BGR5652BGR, cv::COLOR_BGR5652BGRA },
+       { 0, cv::COLOR_RGB2GRAY, 0, 0, 0, 0, 0, 0, 0, 0, -1, cv::COLOR_RGB2RGBA },
+       { 0, cv::COLOR_RGBA2GRAY, 0, 0, 0, 0, 0, 0, 0, cv::COLOR_BGRA2BGR565, cv::COLOR_RGBA2RGB, -1 }
 };
 
 namespace mediavision
 {
 namespace inference
 {
-       int PreProcess::Resize(cv::Mat& source, cv::Mat& dest, cv::Size size)
-       {
-               LOGI("ENTER");
-
-               try {
-                       cv::resize(source, dest, size);
-               } catch (cv::Exception& e) {
-                       LOGE("Fail to resize with msg: %s", e.what());
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-               }
-
-               LOGI("LEAVE");
+int PreProcess::Resize(cv::Mat &source, cv::Mat &dest, cv::Size size)
+{
+       LOGI("ENTER");
 
-               return MEDIA_VISION_ERROR_NONE;
+       try {
+               cv::resize(source, dest, size);
+       } catch (cv::Exception &e) {
+               LOGE("Fail to resize with msg: %s", e.what());
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
+       LOGI("LEAVE");
 
-       int PreProcess::ColorConvert(cv::Mat& source, cv::Mat& dest, int sType, int dType)
-       {
-               LOGI("ENTER");
-
-               auto conversionColor = static_cast<int>(colorConvertTable[sType][dType]);
-               if (conversionColor == -1) {/* Don't need conversion */
-                       dest = source;
-               } else if (conversionColor > 0) {
-                       /* Class for representation the given image as cv::Mat before conversion */
-                       cv::cvtColor(source, dest, conversionColor);
-               } else {
-                       LOGE("Fail to ColorConvert");
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-               }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               LOGI("LEAVE");
-
-               return MEDIA_VISION_ERROR_NONE;
+int PreProcess::ColorConvert(cv::Mat &source, cv::Mat &dest, int sType, int dType)
+{
+       LOGI("ENTER");
+
+       auto conversionColor = static_cast<int>(colorConvertTable[sType][dType]);
+       if (conversionColor == -1) { /* Don't need conversion */
+               dest = source;
+       } else if (conversionColor > 0) {
+               /* Class for representation the given image as cv::Mat before conversion */
+               cv::cvtColor(source, dest, conversionColor);
+       } else {
+               LOGE("Fail to ColorConvert");
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       int PreProcess::Normalize(cv::Mat& source, cv::Mat& dest,
-                                                       const std::vector<double>& mean, const std::vector<double>& std)
-       {
-               LOGI("ENTER");
-
-               try {
-                       cv::subtract(source, cv::Scalar(mean[0], mean[1], mean[2]), dest);
-                       source = dest;
-                       cv::divide(source, cv::Scalar(std[0], std[1], std[2]), dest);
-               } catch (cv::Exception& e) {
-                       LOGE("Fail to substract/divide with msg: %s", e.what());
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-               }
+       LOGI("LEAVE");
 
-               LOGI("LEAVE");
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               return MEDIA_VISION_ERROR_NONE;
+int PreProcess::Normalize(cv::Mat &source, cv::Mat &dest, const std::vector<double> &mean,
+                                                 const std::vector<double> &std)
+{
+       LOGI("ENTER");
+
+       try {
+               cv::subtract(source, cv::Scalar(mean[0], mean[1], mean[2]), dest);
+               source = dest;
+               cv::divide(source, cv::Scalar(std[0], std[1], std[2]), dest);
+       } catch (cv::Exception &e) {
+               LOGE("Fail to substract/divide with msg: %s", e.what());
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       int PreProcess::Quantize(cv::Mat& source, cv::Mat& dest,
-                                                       const std::vector<double>& scale, const std::vector<double>& zeropoint)
-       {
-               LOGI("ENTER");
-
-               try {
-                       cv::subtract(source, cv::Scalar(zeropoint[0], zeropoint[1], zeropoint[2]), dest);
-                       source = dest;
-                       cv::multiply(source, cv::Scalar(scale[0], scale[1], scale[2]), dest);
-               } catch (cv::Exception& e) {
-                       LOGE("Fail to subtract/multiply with msg: %s", e.what());
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-               }
+       LOGI("LEAVE");
 
-               LOGI("LEAVE");
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               return MEDIA_VISION_ERROR_NONE;
+int PreProcess::Quantize(cv::Mat &source, cv::Mat &dest, const std::vector<double> &scale,
+                                                const std::vector<double> &zeropoint)
+{
+       LOGI("ENTER");
+
+       try {
+               cv::subtract(source, cv::Scalar(zeropoint[0], zeropoint[1], zeropoint[2]), dest);
+               source = dest;
+               cv::multiply(source, cv::Scalar(scale[0], scale[1], scale[2]), dest);
+       } catch (cv::Exception &e) {
+               LOGE("Fail to subtract/multiply with msg: %s", e.what());
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       int PreProcess::Run(cv::Mat& source, const int colorSpace,
-                                                       const int dataType, const LayerInfo& layerInfo,
-                                                       const Options& options, void* buffer)
-       {
-               LOGI("ENTER");
+       LOGI("LEAVE");
 
-               // dest is a wrapper of the buffer
-               cv::Mat dest(cv::Size(layerInfo.getWidth(), layerInfo.getHeight()),
-                                       dataType, buffer);
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               cv::Mat cvSource, cvDest;
-               // cvSource has new allocation with dest.size()
-               Resize(source, cvSource, dest.size());
+int PreProcess::Run(cv::Mat &source, const int colorSpace, const int dataType, const LayerInfo &layerInfo,
+                                       const Options &options, void *buffer)
+{
+       LOGI("ENTER");
 
-               // cvDest has new allocation if it's colorSpace is not RGB888
-               // cvDest share the data with cvSource it's colorSpace is RGB888
-               ColorConvert(cvSource, cvDest, colorSpace, layerInfo.colorSpace);
+       // dest is a wrapper of the buffer
+       cv::Mat dest(cv::Size(layerInfo.getWidth(), layerInfo.getHeight()), dataType, buffer);
 
-               cvDest.convertTo(dest, dest.type());
+       cv::Mat cvSource, cvDest;
+       // cvSource has new allocation with dest.size()
+       Resize(source, cvSource, dest.size());
 
-               if (options.normalization.use) {
-                       Normalize(dest, dest, options.normalization.mean, options.normalization.std);
-               }
+       // cvDest has new allocation if it's colorSpace is not RGB888
+       // cvDest share the data with cvSource it's colorSpace is RGB888
+       ColorConvert(cvSource, cvDest, colorSpace, layerInfo.colorSpace);
 
-               if (options.quantization.use) {
-                       Quantize(dest, dest, options.quantization.scale, options.quantization.zeropoint);
-               }
+       cvDest.convertTo(dest, dest.type());
 
-               LOGI("LEAVE");
+       if (options.normalization.use) {
+               Normalize(dest, dest, options.normalization.mean, options.normalization.std);
+       }
 
-               return MEDIA_VISION_ERROR_NONE;
+       if (options.quantization.use) {
+               Quantize(dest, dest, options.quantization.scale, options.quantization.zeropoint);
        }
 
+       LOGI("LEAVE");
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
 } /* Inference */
 } /* MediaVision */
index 161f72f..2d48f52 100644 (file)
@@ -28,200 +28,191 @@ namespace mediavision
 {
 namespace inference
 {
+bool TensorBuffer::empty()
+{
+       return _tensorBuffer.empty();
+}
 
-       bool TensorBuffer::empty()
-       {
-               return _tensorBuffer.empty();
-       }
+bool TensorBuffer::exist(std::string name)
+{
+       return getTensorBuffer(name) != nullptr;
+}
 
-       bool TensorBuffer::exist(std::string name)
-       {
-               return getTensorBuffer(name) != nullptr;
+int TensorBuffer::allocate(inference_engine_tensor_buffer &tensor_buffer,
+                                                  const inference_engine_tensor_info &tensor_info)
+{
+       if (tensor_info.data_type <= INFERENCE_TENSOR_DATA_TYPE_NONE ||
+               tensor_info.data_type >= INFERENCE_TENSOR_DATA_TYPE_MAX) {
+               LOGE("Invalid tensor data type.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       int TensorBuffer::allocate(inference_engine_tensor_buffer& tensor_buffer,
-                                                          const inference_engine_tensor_info& tensor_info)
-       {
-               if (tensor_info.data_type <= INFERENCE_TENSOR_DATA_TYPE_NONE ||
-                               tensor_info.data_type >= INFERENCE_TENSOR_DATA_TYPE_MAX) {
-                       LOGE("Invalid tensor data type.");
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               }
-
-               try {
-                       if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
-                               tensor_buffer.buffer = new float[tensor_info.size];
-                       else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64)
-                               tensor_buffer.buffer = new long long[tensor_info.size];
-                       else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT64)
-                               tensor_buffer.buffer = new unsigned long long[tensor_info.size];
-                       else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT32)
-                               tensor_buffer.buffer = new unsigned int[tensor_info.size];
-                       else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8)
-                               tensor_buffer.buffer = new char[tensor_info.size];
-                       else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16)
-                               tensor_buffer.buffer = new unsigned short[tensor_info.size];
-               }  catch (const std::bad_alloc& e) {
-                       LOGE("Fail to allocate tensor buffer.(%s)", e.what());
-                       return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
-               }
+       try {
+               if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
+                       tensor_buffer.buffer = new float[tensor_info.size];
+               else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64)
+                       tensor_buffer.buffer = new long long[tensor_info.size];
+               else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT64)
+                       tensor_buffer.buffer = new unsigned long long[tensor_info.size];
+               else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT32)
+                       tensor_buffer.buffer = new unsigned int[tensor_info.size];
+               else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8)
+                       tensor_buffer.buffer = new char[tensor_info.size];
+               else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16)
+                       tensor_buffer.buffer = new unsigned short[tensor_info.size];
+       } catch (const std::bad_alloc &e) {
+               LOGE("Fail to allocate tensor buffer.(%s)", e.what());
+               return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+       }
 
-               tensor_buffer.size = tensor_info.size;
+       tensor_buffer.size = tensor_info.size;
 
-               LOGI("Allocated tensor buffer(size = %zu, data type = %d)",
-                               tensor_info.size, tensor_info.data_type);
-               tensor_buffer.owner_is_backend = 0;
-               tensor_buffer.data_type = tensor_info.data_type;
+       LOGI("Allocated tensor buffer(size = %zu, data type = %d)", tensor_info.size, tensor_info.data_type);
+       tensor_buffer.owner_is_backend = 0;
+       tensor_buffer.data_type = tensor_info.data_type;
 
-               return MEDIA_VISION_ERROR_NONE;
-       }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-       void TensorBuffer::release()
-       {
-               for (auto& tensorBuffer : _tensorBuffer) {
-                       auto& tBuffer = tensorBuffer.second;
-                       if (tBuffer.owner_is_backend) {
-                               continue;
-                       }
-
-                       if (tBuffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
-                               delete[] static_cast<float *>(tBuffer.buffer);
-                       } else if (tBuffer.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64) {
-                               delete[] static_cast<long long *>(tBuffer.buffer);
-                       } else if (tBuffer.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT32) {
-                               delete[] static_cast<uint32_t *>(tBuffer.buffer);
-                       } else if (tBuffer.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16) {
-                               delete[] static_cast<unsigned short *>(tBuffer.buffer);
-                       } else {
-                               delete[] static_cast<unsigned char *>(tBuffer.buffer);
-                       }
+void TensorBuffer::release()
+{
+       for (auto &tensorBuffer : _tensorBuffer) {
+               auto &tBuffer = tensorBuffer.second;
+               if (tBuffer.owner_is_backend) {
+                       continue;
                }
 
-               LOGI("Tensor(%zu) have been released.", _tensorBuffer.size());
-               IETensorBuffer().swap(_tensorBuffer);
+               if (tBuffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
+                       delete[] static_cast<float *>(tBuffer.buffer);
+               } else if (tBuffer.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64) {
+                       delete[] static_cast<long long *>(tBuffer.buffer);
+               } else if (tBuffer.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT32) {
+                       delete[] static_cast<uint32_t *>(tBuffer.buffer);
+               } else if (tBuffer.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16) {
+                       delete[] static_cast<unsigned short *>(tBuffer.buffer);
+               } else {
+                       delete[] static_cast<unsigned char *>(tBuffer.buffer);
+               }
        }
 
-       size_t TensorBuffer::size()
-       {
-               return _tensorBuffer.size();
-       }
+       LOGI("Tensor(%zu) have been released.", _tensorBuffer.size());
+       IETensorBuffer().swap(_tensorBuffer);
+}
 
-       IETensorBuffer& TensorBuffer::getIETensorBuffer()
-       {
-               return _tensorBuffer;
-       }
+size_t TensorBuffer::size()
+{
+       return _tensorBuffer.size();
+}
 
-       inference_engine_tensor_buffer* TensorBuffer::getTensorBuffer(std::string name)
-       {
-               if (_tensorBuffer.find(name) == _tensorBuffer.end()){
-                       return nullptr;
-               }
+IETensorBuffer &TensorBuffer::getIETensorBuffer()
+{
+       return _tensorBuffer;
+}
 
-               return &_tensorBuffer[name];
+inference_engine_tensor_buffer *TensorBuffer::getTensorBuffer(std::string name)
+{
+       if (_tensorBuffer.find(name) == _tensorBuffer.end()) {
+               return nullptr;
        }
 
-       bool TensorBuffer::addTensorBuffer(std::string name, inference_engine_tensor_buffer& buffer)
-       {
-               if (name.empty() || buffer.buffer == nullptr) {
-                       LOGE("Invalid parameters: %s, %p", name.c_str(), buffer.buffer);
-                       return false;
-               }
+       return &_tensorBuffer[name];
+}
 
-               auto ret = _tensorBuffer.insert(std::make_pair(name, buffer));
-               if (ret.second == false) {
-                       LOGE("Fail to insert %s with buffer %p", name.c_str(), buffer.buffer);
-                       return false;
-               }
+bool TensorBuffer::addTensorBuffer(std::string name, inference_engine_tensor_buffer &buffer)
+{
+       if (name.empty() || buffer.buffer == nullptr) {
+               LOGE("Invalid parameters: %s, %p", name.c_str(), buffer.buffer);
+               return false;
+       }
 
-               return true;
+       auto ret = _tensorBuffer.insert(std::make_pair(name, buffer));
+       if (ret.second == false) {
+               LOGE("Fail to insert %s with buffer %p", name.c_str(), buffer.buffer);
+               return false;
        }
 
-       int TensorBuffer::GetTensorInfo(inference_engine_layer_property& layerProperty, tensor_t& outputTensorInfo)
-       {
-               for (auto& layer : layerProperty.layers) {
-                       const inference_engine_tensor_info& tensor_info = layer.second;
+       return true;
+}
 
-                       std::vector<int> dimInfo;
+int TensorBuffer::GetTensorInfo(inference_engine_layer_property &layerProperty, tensor_t &outputTensorInfo)
+{
+       for (auto &layer : layerProperty.layers) {
+               const inference_engine_tensor_info &tensor_info = layer.second;
 
-                       for (auto& dim : tensor_info.shape) {
-                               dimInfo.push_back(dim);
-                       }
+               std::vector<int> dimInfo;
 
-                       outputTensorInfo.dimInfo.push_back(dimInfo);
+               for (auto &dim : tensor_info.shape) {
+                       dimInfo.push_back(dim);
+               }
 
-                       inference_engine_tensor_buffer* tensorBuffer = getTensorBuffer(layer.first);
-                       if (tensorBuffer == NULL) {
-                               LOGE("Fail to getTensorBuffer with name %s", layer.first.c_str());
-                               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-                       }
+               outputTensorInfo.dimInfo.push_back(dimInfo);
 
-                       outputTensorInfo.data.push_back(static_cast<void *>(tensorBuffer->buffer));
+               inference_engine_tensor_buffer *tensorBuffer = getTensorBuffer(layer.first);
+               if (tensorBuffer == NULL) {
+                       LOGE("Fail to getTensorBuffer with name %s", layer.first.c_str());
+                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
                }
 
-               return MEDIA_VISION_ERROR_NONE;
+               outputTensorInfo.data.push_back(static_cast<void *>(tensorBuffer->buffer));
        }
 
-       template <typename T>
-       int TensorBuffer::convertToFloat(inference_engine_tensor_buffer *tensorBuffer)
-       {
-               float *new_buf = new(std::nothrow) float[tensorBuffer->size];
-               if (new_buf == NULL) {
-                       LOGE("Fail to allocate a new output tensor buffer.");
-                       return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
-               }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               auto ori_buf = static_cast<T *>(tensorBuffer->buffer);
+template<typename T> int TensorBuffer::convertToFloat(inference_engine_tensor_buffer *tensorBuffer)
+{
+       float *new_buf = new (std::nothrow) float[tensorBuffer->size];
+       if (new_buf == NULL) {
+               LOGE("Fail to allocate a new output tensor buffer.");
+               return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+       }
 
-               for (size_t idx = 0; idx < tensorBuffer->size; idx++)
-                       new_buf[idx] = static_cast<float>(ori_buf[idx]) / 255.0f;
+       auto ori_buf = static_cast<T *>(tensorBuffer->buffer);
 
-               // replace original buffer with new one, and release origin one.
-               tensorBuffer->buffer = new_buf;
+       for (size_t idx = 0; idx < tensorBuffer->size; idx++)
+               new_buf[idx] = static_cast<float>(ori_buf[idx]) / 255.0f;
 
-               if (!tensorBuffer->owner_is_backend)
-                       delete[] ori_buf;
+       // replace original buffer with new one, and release origin one.
+       tensorBuffer->buffer = new_buf;
 
-               tensorBuffer->data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
-               tensorBuffer->owner_is_backend = false;
+       if (!tensorBuffer->owner_is_backend)
+               delete[] ori_buf;
 
-               return MEDIA_VISION_ERROR_NONE;
-       }
+       tensorBuffer->data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+       tensorBuffer->owner_is_backend = false;
 
-       template <typename T>
-       T TensorBuffer::getValue(std::string name, int idx)
-       {
-               inference_engine_tensor_buffer* tBuffer =
-                                                       getTensorBuffer(name);
-               if (tBuffer == nullptr) {
-                       throw std::invalid_argument(name);
-               }
+       return MEDIA_VISION_ERROR_NONE;
+}
 
-               switch (tBuffer->data_type) {
-               case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
-                       return static_cast<T>(static_cast<float*>(tBuffer->buffer)[idx]);
-               case INFERENCE_TENSOR_DATA_TYPE_INT64:
-                       return static_cast<T>(
-                                       static_cast<long long*>(tBuffer->buffer)[idx]);
-               case INFERENCE_TENSOR_DATA_TYPE_UINT32:
-                       return static_cast<T>(
-                                       static_cast<unsigned int*>(tBuffer->buffer)[idx]);
-               case INFERENCE_TENSOR_DATA_TYPE_UINT8:
-                       return static_cast<T>(
-                                       static_cast<unsigned char*>(tBuffer->buffer)[idx]);
-               case INFERENCE_TENSOR_DATA_TYPE_UINT16:
-                       return static_cast<T>(
-                                       static_cast<unsigned short*>(tBuffer->buffer)[idx]);
-               default:
-                       break;
-               }
+template<typename T> T TensorBuffer::getValue(std::string name, int idx)
+{
+       inference_engine_tensor_buffer *tBuffer = getTensorBuffer(name);
+       if (tBuffer == nullptr) {
+               throw std::invalid_argument(name);
+       }
 
-               throw std::invalid_argument("Invalid data type");
+       switch (tBuffer->data_type) {
+       case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
+               return static_cast<T>(static_cast<float *>(tBuffer->buffer)[idx]);
+       case INFERENCE_TENSOR_DATA_TYPE_INT64:
+               return static_cast<T>(static_cast<long long *>(tBuffer->buffer)[idx]);
+       case INFERENCE_TENSOR_DATA_TYPE_UINT32:
+               return static_cast<T>(static_cast<unsigned int *>(tBuffer->buffer)[idx]);
+       case INFERENCE_TENSOR_DATA_TYPE_UINT8:
+               return static_cast<T>(static_cast<unsigned char *>(tBuffer->buffer)[idx]);
+       case INFERENCE_TENSOR_DATA_TYPE_UINT16:
+               return static_cast<T>(static_cast<unsigned short *>(tBuffer->buffer)[idx]);
+       default:
+               break;
        }
 
-       template float TensorBuffer::getValue<float>(std::string, int);
-       template int TensorBuffer::getValue<int>(std::string, int);
-       template int TensorBuffer::convertToFloat<unsigned char>(_inference_engine_tensor_buffer*);
-       template int TensorBuffer::convertToFloat<unsigned short>(_inference_engine_tensor_buffer*);
+       throw std::invalid_argument("Invalid data type");
+}
+
+template float TensorBuffer::getValue<float>(std::string, int);
+template int TensorBuffer::getValue<int>(std::string, int);
+template int TensorBuffer::convertToFloat<unsigned char>(_inference_engine_tensor_buffer *);
+template int TensorBuffer::convertToFloat<unsigned short>(_inference_engine_tensor_buffer *);
 
 } /* Inference */
 } /* MediaVision */
index bd14a88..f722cd9 100644 (file)
@@ -25,8 +25,7 @@
 
 int mv_inference_create(mv_inference_h *infer)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(infer);
 
        MEDIA_VISION_FUNCTION_ENTER();
@@ -41,8 +40,7 @@ int mv_inference_create(mv_inference_h *infer)
 
 int mv_inference_destroy(mv_inference_h infer)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(infer);
 
        MEDIA_VISION_FUNCTION_ENTER();
@@ -55,11 +53,9 @@ int mv_inference_destroy(mv_inference_h infer)
        return ret;
 }
 
-int mv_inference_configure(mv_inference_h infer,
-                                                  mv_engine_config_h engine_config)
+int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_config)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(infer);
        MEDIA_VISION_INSTANCE_CHECK(engine_config);
 
@@ -79,8 +75,7 @@ int mv_inference_configure(mv_inference_h infer,
 
 int mv_inference_prepare(mv_inference_h infer)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(infer);
 
        MEDIA_VISION_FUNCTION_ENTER();
@@ -93,33 +88,27 @@ int mv_inference_prepare(mv_inference_h infer)
        return ret;
 }
 
-int mv_inference_foreach_supported_engine(
-               mv_inference_h infer, mv_inference_supported_engine_cb callback,
-               void *user_data)
+int mv_inference_foreach_supported_engine(mv_inference_h infer, mv_inference_supported_engine_cb callback,
+                                                                                 void *user_data)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(infer);
        MEDIA_VISION_NULL_ARG_CHECK(callback);
        MEDIA_VISION_FUNCTION_ENTER();
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_inference_foreach_supported_engine_open(infer, callback,
-                                                                                                        user_data);
+       ret = mv_inference_foreach_supported_engine_open(infer, callback, user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_inference_image_classify(mv_source_h source, mv_inference_h infer,
-                                                               mv_rectangle_s *roi,
-                                                               mv_inference_image_classified_cb classified_cb,
-                                                               void *user_data)
+int mv_inference_image_classify(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+                                                               mv_inference_image_classified_cb classified_cb, void *user_data)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_image_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
        MEDIA_VISION_INSTANCE_CHECK(infer);
        MEDIA_VISION_NULL_ARG_CHECK(classified_cb);
@@ -128,20 +117,17 @@ int mv_inference_image_classify(mv_source_h source, mv_inference_h infer,
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_inference_image_classify_open(source, infer, roi, classified_cb,
-                                                                                  user_data);
+       ret = mv_inference_image_classify_open(source, infer, roi, classified_cb, user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_inference_object_detect(mv_source_h source, mv_inference_h infer,
-                                                          mv_inference_object_detected_cb detected_cb,
+int mv_inference_object_detect(mv_source_h source, mv_inference_h infer, mv_inference_object_detected_cb detected_cb,
                                                           void *user_data)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_image_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
        MEDIA_VISION_INSTANCE_CHECK(infer);
        MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
@@ -150,20 +136,17 @@ int mv_inference_object_detect(mv_source_h source, mv_inference_h infer,
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_inference_object_detect_open(source, infer, detected_cb,
-                                                                                 user_data);
+       ret = mv_inference_object_detect_open(source, infer, detected_cb, user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_inference_face_detect(mv_source_h source, mv_inference_h infer,
-                                                        mv_inference_face_detected_cb detected_cb,
+int mv_inference_face_detect(mv_source_h source, mv_inference_h infer, mv_inference_face_detected_cb detected_cb,
                                                         void *user_data)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_face_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
        MEDIA_VISION_INSTANCE_CHECK(infer);
        MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
@@ -179,12 +162,10 @@ int mv_inference_face_detect(mv_source_h source, mv_inference_h infer,
        return ret;
 }
 
-int mv_inference_facial_landmark_detect(
-               mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
-               mv_inference_facial_landmark_detected_cb detected_cb, void *user_data)
+int mv_inference_facial_landmark_detect(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+                                                                               mv_inference_facial_landmark_detected_cb detected_cb, void *user_data)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_face_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
        MEDIA_VISION_INSTANCE_CHECK(infer);
        MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
@@ -193,20 +174,17 @@ int mv_inference_facial_landmark_detect(
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_inference_facial_landmark_detect_open(source, infer, roi,
-                                                                                                  detected_cb, user_data);
+       ret = mv_inference_facial_landmark_detect_open(source, infer, roi, detected_cb, user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_inference_pose_landmark_detect(
-               mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
-               mv_inference_pose_landmark_detected_cb detected_cb, void *user_data)
+int mv_inference_pose_landmark_detect(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+                                                                         mv_inference_pose_landmark_detected_cb detected_cb, void *user_data)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_face_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(source);
        MEDIA_VISION_INSTANCE_CHECK(infer);
        MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
@@ -215,9 +193,7 @@ int mv_inference_pose_landmark_detect(
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
-
-       ret = mv_inference_pose_landmark_detect_open(source, infer, roi,
-                                                                                                detected_cb, user_data);
+       ret = mv_inference_pose_landmark_detect_open(source, infer, roi, detected_cb, user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
@@ -226,8 +202,7 @@ int mv_inference_pose_landmark_detect(
 
 int mv_inference_pose_get_number_of_poses(mv_inference_pose_result_h result, int *number_of_poses)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(result);
 
        MEDIA_VISION_NULL_ARG_CHECK(number_of_poses);
@@ -243,11 +218,9 @@ int mv_inference_pose_get_number_of_poses(mv_inference_pose_result_h result, int
        return ret;
 }
 
-
 int mv_inference_pose_get_number_of_landmarks(mv_inference_pose_result_h result, int *number_of_landmarks)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(result);
 
        MEDIA_VISION_NULL_ARG_CHECK(number_of_landmarks);
@@ -263,11 +236,10 @@ int mv_inference_pose_get_number_of_landmarks(mv_inference_pose_result_h result,
        return ret;
 }
 
-int mv_inference_pose_get_landmark(mv_inference_pose_result_h result,
-               int pose_index, int part_index, mv_point_s *location, float *score)
+int mv_inference_pose_get_landmark(mv_inference_pose_result_h result, int pose_index, int part_index,
+                                                                  mv_point_s *location, float *score)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(result);
 
        MEDIA_VISION_NULL_ARG_CHECK(location);
@@ -289,8 +261,7 @@ int mv_inference_pose_get_landmark(mv_inference_pose_result_h result,
 
 int mv_inference_pose_get_label(mv_inference_pose_result_h result, int pose_index, int *label)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(result);
 
        MEDIA_VISION_NULL_ARG_CHECK(label);
@@ -309,11 +280,9 @@ int mv_inference_pose_get_label(mv_inference_pose_result_h result, int pose_inde
        return ret;
 }
 
-
 int mv_pose_create(mv_pose_h *pose)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(pose);
 
        MEDIA_VISION_FUNCTION_ENTER();
@@ -329,8 +298,7 @@ int mv_pose_create(mv_pose_h *pose)
 
 int mv_pose_destroy(mv_pose_h pose)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(pose);
 
        MEDIA_VISION_FUNCTION_ENTER();
@@ -346,8 +314,7 @@ int mv_pose_destroy(mv_pose_h pose)
 
 int mv_pose_set_from_file(mv_pose_h pose, const char *motion_capture_file_path, const char *motion_mapping_file_path)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(pose);
        MEDIA_VISION_NULL_ARG_CHECK(motion_capture_file_path);
        MEDIA_VISION_NULL_ARG_CHECK(motion_mapping_file_path);
@@ -365,8 +332,7 @@ int mv_pose_set_from_file(mv_pose_h pose, const char *motion_capture_file_path,
 
 int mv_pose_compare(mv_pose_h pose, mv_inference_pose_result_h action, int parts, float *score)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(pose);
        MEDIA_VISION_INSTANCE_CHECK(action);
        MEDIA_VISION_NULL_ARG_CHECK(score);
index da48ac8..28a052e 100644 (file)
 
 using namespace mediavision::inference;
 
-static int check_mv_inference_engine_version(mv_engine_config_h engine_config,
-                                                                                        bool *is_new_version)
+static int check_mv_inference_engine_version(mv_engine_config_h engine_config, bool *is_new_version)
 {
        int oldType = 0, newType = 0;
 
-       int ret = mv_engine_config_get_int_attribute(
-                       engine_config, MV_INFERENCE_TARGET_TYPE, &oldType);
+       int ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_TARGET_TYPE, &oldType);
        if (ret != MEDIA_VISION_ERROR_NONE)
                oldType = -1;
 
-       ret = mv_engine_config_get_int_attribute(
-                       engine_config, MV_INFERENCE_TARGET_DEVICE_TYPE, &newType);
+       ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_TARGET_DEVICE_TYPE, &newType);
        if (ret != MEDIA_VISION_ERROR_NONE)
                newType = -1;
 
@@ -48,8 +45,7 @@ static int check_mv_inference_engine_version(mv_engine_config_h engine_config,
 
        // If values of both types are changed then return an error.
        // only one of two types should be used.
-       if (oldType != MV_INFERENCE_TARGET_CPU &&
-               newType != MV_INFERENCE_TARGET_DEVICE_CPU) {
+       if (oldType != MV_INFERENCE_TARGET_CPU && newType != MV_INFERENCE_TARGET_DEVICE_CPU) {
                LOGE("Please use only one of below two device types.");
                LOGE("MV_INFERENCE_TARGET_TYPE(deprecated) or MV_INFERENCE_TARGET_DEVICE_TYPE(recommended).");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -63,8 +59,7 @@ static int check_mv_inference_engine_version(mv_engine_config_h engine_config,
        //   (oldType == MV_INFERENCE_TARGET_CPU && newType == MV_INFERENCE_TARGET_DEVICE_CPU)
        // - default value of only new type is changed.
        //   (oldType == MV_INFERENCE_TARGET_CPU && (newType != -1 && newType != MV_INFERENCE_TARGET_DEVICE_CPU))
-       if ((oldType != -1 && oldType != MV_INFERENCE_TARGET_CPU) &&
-               newType == MV_INFERENCE_TARGET_DEVICE_CPU)
+       if ((oldType != -1 && oldType != MV_INFERENCE_TARGET_CPU) && newType == MV_INFERENCE_TARGET_DEVICE_CPU)
                *is_new_version = false;
        else
                *is_new_version = true;
@@ -112,15 +107,14 @@ int mv_inference_destroy_open(mv_inference_h infer)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-static bool IsJsonFile(const std::stringfileName)
+static bool IsJsonFile(const std::string &fileName)
 {
        return (!fileName.substr(fileName.find_last_of(".") + 1).compare("json"));
 }
 
 static bool IsValidBackendType(const int backend_type)
 {
-       return (backend_type > MV_INFERENCE_BACKEND_NONE &&
-                       backend_type < MV_INFERENCE_BACKEND_MAX);
+       return (backend_type > MV_INFERENCE_BACKEND_NONE && backend_type < MV_INFERENCE_BACKEND_MAX);
 }
 
 static bool IsConfigFilePathRequired(const int target_device_type, const int backend_type)
@@ -128,8 +122,7 @@ static bool IsConfigFilePathRequired(const int target_device_type, const int bac
        LOGW("DEPRECATION WARNING : MV_INFERENCE_BACKEND_MLAPI type is deprecated and will be removed from next release.");
 
        // In case of MV_INFERENCE_TARGET_DEVICE_CUSTOM via MLAPI backend, config file path is required.
-       return (backend_type == MV_INFERENCE_BACKEND_MLAPI &&
-                       target_device_type & MV_INFERENCE_TARGET_DEVICE_CUSTOM);
+       return (backend_type == MV_INFERENCE_BACKEND_MLAPI && target_device_type & MV_INFERENCE_TARGET_DEVICE_CUSTOM);
 }
 
 static int configure_model_open(Inference *pInfer, mv_engine_config_h engine_config)
@@ -143,32 +136,27 @@ static int configure_model_open(Inference *pInfer, mv_engine_config_h engine_con
        int backendType = 0;
        size_t userFileLength = 0;
 
-       int ret = mv_engine_config_get_string_attribute(
-                       engine_config, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
-                       &modelConfigFilePath);
+       int ret = mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+                                                                                                       &modelConfigFilePath);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get model configuration file path");
                goto out_of_function;
        }
 
-       ret = mv_engine_config_get_string_attribute(
-                       engine_config, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                       &modelWeightFilePath);
+       ret = mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+                                                                                               &modelWeightFilePath);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get model weight file path");
                goto release_model_config_file_path;
        }
 
-       ret = mv_engine_config_get_string_attribute(
-                       engine_config, MV_INFERENCE_MODEL_USER_FILE_PATH,
-                       &modelUserFilePath);
+       ret = mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_MODEL_USER_FILE_PATH, &modelUserFilePath);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get model user file path");
                goto release_model_weight_file_path;
        }
 
-       ret = mv_engine_config_get_int_attribute(
-                       engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
+       ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference backend type");
                goto release_model_user_file_path;
@@ -202,13 +190,10 @@ static int configure_model_open(Inference *pInfer, mv_engine_config_h engine_con
                goto release_model_user_file_path;
        }
 
-       pInfer->ConfigureModelFiles(std::string(modelConfigFilePath),
-                                                               std::string(modelWeightFilePath),
+       pInfer->ConfigureModelFiles(std::string(modelConfigFilePath), std::string(modelWeightFilePath),
                                                                std::string(modelUserFilePath));
 
-       ret = mv_engine_config_get_string_attribute(
-                       engine_config, MV_INFERENCE_MODEL_META_FILE_PATH,
-                       &modelMetaFilePath);
+       ret = mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_MODEL_META_FILE_PATH, &modelMetaFilePath);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get model meta file path");
                goto release_model_user_file_path;
@@ -261,58 +246,50 @@ static int configure_input_info_open(Inference *pInfer, mv_engine_config_h engin
        char *node_name = NULL;
        int dataType = 0;
 
-       int ret = mv_engine_config_get_int_attribute(
-                       engine_config, MV_INFERENCE_INPUT_TENSOR_WIDTH, &tensorWidth);
+       int ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_INPUT_TENSOR_WIDTH, &tensorWidth);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor width");
                goto out_of_function;
        }
 
-       ret = mv_engine_config_get_int_attribute(
-                       engine_config, MV_INFERENCE_INPUT_TENSOR_HEIGHT, &tensorHeight);
+       ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_INPUT_TENSOR_HEIGHT, &tensorHeight);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor height");
                goto out_of_function;
        }
 
-       ret = mv_engine_config_get_int_attribute(
-                       engine_config, MV_INFERENCE_INPUT_TENSOR_CHANNELS, &tensorCh);
+       ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_INPUT_TENSOR_CHANNELS, &tensorCh);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor channels");
                goto out_of_function;
        }
 
-       ret = mv_engine_config_get_double_attribute(
-                       engine_config, MV_INFERENCE_MODEL_MEAN_VALUE, &meanValue);
+       ret = mv_engine_config_get_double_attribute(engine_config, MV_INFERENCE_MODEL_MEAN_VALUE, &meanValue);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get meanValue");
                goto out_of_function;
        }
 
-       ret = mv_engine_config_get_double_attribute(
-                       engine_config, MV_INFERENCE_MODEL_STD_VALUE, &stdValue);
+       ret = mv_engine_config_get_double_attribute(engine_config, MV_INFERENCE_MODEL_STD_VALUE, &stdValue);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get stdValue");
                goto out_of_function;
        }
 
-       ret = mv_engine_config_get_int_attribute(
-                       engine_config, MV_INFERENCE_INPUT_DATA_TYPE, &dataType);
+       ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_INPUT_DATA_TYPE, &dataType);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get a input tensor data type");
                goto out_of_function;
        }
 
-       ret = mv_engine_config_get_string_attribute(
-                       engine_config, MV_INFERENCE_INPUT_NODE_NAME, &node_name);
+       ret = mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_INPUT_NODE_NAME, &node_name);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor width");
                goto out_of_function;
        }
 
-       pInfer->ConfigureInputInfo(
-                       tensorWidth, tensorHeight, 1, tensorCh, stdValue, meanValue,
-                       dataType, std::vector<std::string>(1, std::string(node_name)));
+       pInfer->ConfigureInputInfo(tensorWidth, tensorHeight, 1, tensorCh, stdValue, meanValue, dataType,
+                                                          std::vector<std::string>(1, std::string(node_name)));
 
        if (node_name) {
                free(node_name);
@@ -325,8 +302,7 @@ out_of_function:
        return ret;
 }
 
-int mv_inference_configure_engine_open(mv_inference_h infer,
-                                                                          mv_engine_config_h engine_config)
+int mv_inference_configure_engine_open(mv_inference_h infer, mv_engine_config_h engine_config)
 {
        LOGI("ENTER");
 
@@ -336,15 +312,13 @@ int mv_inference_configure_engine_open(mv_inference_h infer,
 
        pInfer->SetEngineConfig(engine_config);
 
-       int ret = mv_engine_config_get_int_attribute(
-                       engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
+       int ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference backend type");
                goto out_of_function;
        }
 
-       ret = mv_engine_config_get_int_attribute(
-                       engine_config, MV_INFERENCE_TARGET_DEVICE_TYPE, &targetTypes);
+       ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_TARGET_DEVICE_TYPE, &targetTypes);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference target type");
                goto out_of_function;
@@ -363,8 +337,7 @@ int mv_inference_configure_engine_open(mv_inference_h infer,
                goto out_of_function;
 
        // Convert old type to new one and then use it if is_new_version is false
-       if (pInfer->ConfigureTargetTypes(targetTypes, is_new_version) !=
-               MEDIA_VISION_ERROR_NONE) {
+       if (pInfer->ConfigureTargetTypes(targetTypes, is_new_version) != MEDIA_VISION_ERROR_NONE) {
                LOGE("Tried to configure invalid target types.");
                goto out_of_function;
        }
@@ -384,16 +357,14 @@ out_of_function:
        return ret;
 }
 
-int mv_inference_configure_output_open(mv_inference_h infer,
-                                                                          mv_engine_config_h engine_config)
+int mv_inference_configure_output_open(mv_inference_h infer, mv_engine_config_h engine_config)
 {
        LOGI("ENTER");
 
        Inference *pInfer = static_cast<Inference *>(infer);
        int maxOutput = 0;
 
-       int ret = mv_engine_config_get_int_attribute(
-                       engine_config, MV_INFERENCE_OUTPUT_MAX_NUMBER, &maxOutput);
+       int ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_OUTPUT_MAX_NUMBER, &maxOutput);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference output maximum numbers");
                goto out_of_function;
@@ -407,16 +378,14 @@ out_of_function:
        return ret;
 }
 
-int mv_inference_configure_confidence_threshold_open(
-               mv_inference_h infer, mv_engine_config_h engine_config)
+int mv_inference_configure_confidence_threshold_open(mv_inference_h infer, mv_engine_config_h engine_config)
 {
        LOGI("ENTER");
 
        Inference *pInfer = static_cast<Inference *>(infer);
        double threshold = 0;
 
-       int ret = mv_engine_config_get_double_attribute(
-                       engine_config, MV_INFERENCE_CONFIDENCE_THRESHOLD, &threshold);
+       int ret = mv_engine_config_get_double_attribute(engine_config, MV_INFERENCE_CONFIDENCE_THRESHOLD, &threshold);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference confidence threshold value");
                goto out_of_function;
@@ -437,8 +406,7 @@ static int configure_post_process_info_open(Inference *pInfer, mv_engine_config_
        int maxOutput = 0;
        double threshold = 0;
 
-       int ret = mv_engine_config_get_int_attribute(
-                       engine_config, MV_INFERENCE_OUTPUT_MAX_NUMBER, &maxOutput);
+       int ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_OUTPUT_MAX_NUMBER, &maxOutput);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference output maximum numbers");
                goto out_of_function;
@@ -446,8 +414,7 @@ static int configure_post_process_info_open(Inference *pInfer, mv_engine_config_
 
        pInfer->ConfigureOutput(maxOutput);
 
-       ret = mv_engine_config_get_double_attribute(
-                       engine_config, MV_INFERENCE_CONFIDENCE_THRESHOLD, &threshold);
+       ret = mv_engine_config_get_double_attribute(engine_config, MV_INFERENCE_CONFIDENCE_THRESHOLD, &threshold);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference confidence threshold value");
                goto out_of_function;
@@ -470,8 +437,8 @@ static int configure_output_info_open(Inference *pInfer, mv_engine_config_h engi
        int size = 0;
        std::vector<std::string> names;
 
-       int ret = mv_engine_config_get_array_string_attribute(
-                       engine_config, MV_INFERENCE_OUTPUT_NODE_NAMES, &node_names, &size);
+       int ret = mv_engine_config_get_array_string_attribute(engine_config, MV_INFERENCE_OUTPUT_NODE_NAMES, &node_names,
+                                                                                                                 &size);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get _output_node_names");
                return ret;
@@ -541,9 +508,8 @@ int mv_inference_prepare_open(mv_inference_h infer)
        return ret;
 }
 
-int mv_inference_foreach_supported_engine_open(
-               mv_inference_h infer, mv_inference_supported_engine_cb callback,
-               void *user_data)
+int mv_inference_foreach_supported_engine_open(mv_inference_h infer, mv_inference_supported_engine_cb callback,
+                                                                                          void *user_data)
 {
        LOGI("ENTER");
 
@@ -560,9 +526,8 @@ int mv_inference_foreach_supported_engine_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_inference_image_classify_open(
-               mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
-               mv_inference_image_classified_cb classified_cb, void *user_data)
+int mv_inference_image_classify_open(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+                                                                        mv_inference_image_classified_cb classified_cb, void *user_data)
 {
        Inference *pInfer = static_cast<Inference *>(infer);
        std::vector<mv_source_h> sources;
@@ -610,8 +575,7 @@ int mv_inference_image_classify_open(
 }
 
 int mv_inference_object_detect_open(mv_source_h source, mv_inference_h infer,
-                                                                       mv_inference_object_detected_cb detected_cb,
-                                                                       void *user_data)
+                                                                       mv_inference_object_detected_cb detected_cb, void *user_data)
 {
        Inference *pInfer = static_cast<Inference *>(infer);
        std::vector<mv_source_h> sources;
@@ -650,23 +614,19 @@ int mv_inference_object_detect_open(mv_source_h source, mv_inference_h infer,
                locations[output_idx].point.y = objectDetectionResults.locations[output_idx].y;
                locations[output_idx].width = objectDetectionResults.locations[output_idx].width;
                locations[output_idx].height = objectDetectionResults.locations[output_idx].height;
-               LOGI("%d, %d, %d, %d", locations[output_idx].point.x,
-                                                               locations[output_idx].point.y,
-                                                               locations[output_idx].width,
-                                                               locations[output_idx].height);
+               LOGI("%d, %d, %d, %d", locations[output_idx].point.x, locations[output_idx].point.y,
+                        locations[output_idx].width, locations[output_idx].height);
        }
 
        int *indices = objectDetectionResults.indices.data();
        float *confidences = objectDetectionResults.confidences.data();
 
-       detected_cb(source, numberOfOutputs, indices, names.data(), confidences,
-                               locations.data(), user_data);
+       detected_cb(source, numberOfOutputs, indices, names.data(), confidences, locations.data(), user_data);
 
        return ret;
 }
 
-int mv_inference_face_detect_open(mv_source_h source, mv_inference_h infer,
-                                                                 mv_inference_face_detected_cb detected_cb,
+int mv_inference_face_detect_open(mv_source_h source, mv_inference_h infer, mv_inference_face_detected_cb detected_cb,
                                                                  void *user_data)
 {
        Inference *pInfer = static_cast<Inference *>(infer);
@@ -701,15 +661,13 @@ int mv_inference_face_detect_open(mv_source_h source, mv_inference_h infer,
 
        float *confidences = faceDetectionResults.confidences.data();
 
-       detected_cb(source, numberOfOutputs, confidences, locations.data(),
-                               user_data);
+       detected_cb(source, numberOfOutputs, confidences, locations.data(), user_data);
 
        return ret;
 }
 
-int mv_inference_facial_landmark_detect_open(
-               mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
-               mv_inference_facial_landmark_detected_cb detected_cb, void *user_data)
+int mv_inference_facial_landmark_detect_open(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+                                                                                        mv_inference_facial_landmark_detected_cb detected_cb, void *user_data)
 {
        Inference *pInfer = static_cast<Inference *>(infer);
        std::vector<mv_source_h> sources;
@@ -747,9 +705,8 @@ int mv_inference_facial_landmark_detect_open(
        return ret;
 }
 
-int mv_inference_pose_landmark_detect_open(
-               mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
-               mv_inference_pose_landmark_detected_cb detected_cb, void *user_data)
+int mv_inference_pose_landmark_detect_open(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+                                                                                  mv_inference_pose_landmark_detected_cb detected_cb, void *user_data)
 {
        Inference *pInfer = static_cast<Inference *>(infer);
        unsigned int width, height;
@@ -761,7 +718,7 @@ int mv_inference_pose_landmark_detect_open(
        }
 
        ret = mv_source_get_height(source, &height);
-               if (ret != MEDIA_VISION_ERROR_NONE) {
+       if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get height");
                return ret;
        }
@@ -782,8 +739,7 @@ int mv_inference_pose_landmark_detect_open(
 
        std::unique_ptr<mv_inference_pose_s> pose;
 
-       ret = pInfer->GetPoseLandmarkDetectionResults(
-                       pose, width, height);
+       ret = pInfer->GetPoseLandmarkDetectionResults(pose, width, height);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference results");
                return ret;
@@ -792,9 +748,9 @@ int mv_inference_pose_landmark_detect_open(
        for (int pose_index = 0; pose_index < pose->number_of_poses; ++pose_index) {
                for (int landmark_index = 0; landmark_index < pose->number_of_landmarks_per_pose; ++landmark_index) {
                        LOGI("PoseIdx[%2d]: x[%d], y[%d], score[%.3f]", landmark_index,
-                                                                               pose->landmarks[pose_index][landmark_index].point.x,
-                                                                               pose->landmarks[pose_index][landmark_index].point.y,
-                                                                               pose->landmarks[pose_index][landmark_index].score);
+                                pose->landmarks[pose_index][landmark_index].point.x,
+                                pose->landmarks[pose_index][landmark_index].point.y,
+                                pose->landmarks[pose_index][landmark_index].score);
                }
        }
 
@@ -803,9 +759,7 @@ int mv_inference_pose_landmark_detect_open(
        return ret;
 }
 
-int mv_inference_pose_get_number_of_poses_open(
-               mv_inference_pose_result_h result,
-               int *number_of_poses)
+int mv_inference_pose_get_number_of_poses_open(mv_inference_pose_result_h result, int *number_of_poses)
 {
        mv_inference_pose_s *handle = static_cast<mv_inference_pose_s *>(result);
        *number_of_poses = handle->number_of_poses;
@@ -814,9 +768,7 @@ int mv_inference_pose_get_number_of_poses_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_inference_pose_get_number_of_landmarks_open(
-               mv_inference_pose_result_h result,
-               int *number_of_landmarks)
+int mv_inference_pose_get_number_of_landmarks_open(mv_inference_pose_result_h result, int *number_of_landmarks)
 {
        mv_inference_pose_s *handle = static_cast<mv_inference_pose_s *>(result);
        *number_of_landmarks = handle->number_of_landmarks_per_pose;
@@ -825,12 +777,8 @@ int mv_inference_pose_get_number_of_landmarks_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_inference_pose_get_landmark_open(
-               mv_inference_pose_result_h result,
-               int pose_index,
-               int part_index,
-               mv_point_s *location,
-               float *score)
+int mv_inference_pose_get_landmark_open(mv_inference_pose_result_h result, int pose_index, int part_index,
+                                                                               mv_point_s *location, float *score)
 {
        mv_inference_pose_s *pose_obj = static_cast<mv_inference_pose_s *>(result);
 
@@ -847,10 +795,7 @@ int mv_inference_pose_get_landmark_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_inference_pose_get_label_open(
-               mv_inference_pose_result_h result,
-               int pose_index,
-               int *label)
+int mv_inference_pose_get_label_open(mv_inference_pose_result_h result, int pose_index, int *label)
 {
        mv_inference_pose_s *pose_obj = static_cast<mv_inference_pose_s *>(result);
 
@@ -896,22 +841,19 @@ int mv_pose_destroy_open(mv_pose_h pose)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_pose_set_from_file_open(mv_pose_h pose,
-               const char *motionCaptureFilePath,
-               const char *motionMappingFilePath)
+int mv_pose_set_from_file_open(mv_pose_h pose, const char *motionCaptureFilePath, const char *motionMappingFilePath)
 {
        Posture *pPose = static_cast<Posture *>(pose);
 
        // check file
        if (access(motionCaptureFilePath, F_OK) || access(motionMappingFilePath, F_OK)) {
-        LOGE("Invalid Motion Capture file path [%s]", motionCaptureFilePath);
+               LOGE("Invalid Motion Capture file path [%s]", motionCaptureFilePath);
                LOGE("Invalid Motion Mapping file path [%s]", motionMappingFilePath);
 
-        return MEDIA_VISION_ERROR_INVALID_PATH;
-    }
+               return MEDIA_VISION_ERROR_INVALID_PATH;
+       }
 
-       int ret = pPose->setPoseFromFile(std::string(motionCaptureFilePath),
-                                                               std::string(motionMappingFilePath));
+       int ret = pPose->setPoseFromFile(std::string(motionCaptureFilePath), std::string(motionMappingFilePath));
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to setPoseFromFile");
                return ret;
@@ -923,18 +865,17 @@ int mv_pose_set_from_file_open(mv_pose_h pose,
 int mv_pose_compare_open(mv_pose_h pose, mv_inference_pose_result_h action, int parts, float *score)
 {
        Posture *pPose = static_cast<Posture *>(pose);
-       std::vector<std::pair<bool, cv::Point>> actionParts;
+       std::vector<std::pair<bool, cv::Point> > actionParts;
        mv_inference_pose_s *pAction = static_cast<mv_inference_pose_s *>(action);
 
        for (int k = 0; k < HUMAN_POSE_MAX_LANDMARKS; ++k) {
                if (pAction->landmarks[0][k].point.x == -1 || pAction->landmarks[0][k].point.y == -1) {
-                       actionParts.push_back(std::make_pair(false, cv::Point(-1,-1)));
+                       actionParts.push_back(std::make_pair(false, cv::Point(-1, -1)));
                        continue;
                }
 
-               actionParts.push_back(std::make_pair(true, cv::Point(pAction->landmarks[0][k].point.x,
-                                                                                                                       pAction->landmarks[0][k].point.y)));
-
+               actionParts.push_back(
+                               std::make_pair(true, cv::Point(pAction->landmarks[0][k].point.x, pAction->landmarks[0][k].point.y)));
        }
 
        int ret = pPose->compare(parts, actionParts, score);
index 58e1c46..3c5afa7 100644 (file)
 #include <opencv2/opencv.hpp>
 #include <opencv2/imgproc/imgproc.hpp>
 
-class DataAugment {
+class DataAugment
+{
 public:
        DataAugment();
        virtual ~DataAugment();
 
-       virtual void Preprocess(std::vector<float>& in_vec, std::vector<float>& out_vec, int width, int height) = 0;
+       virtual void Preprocess(std::vector<float> &in_vec, std::vector<float> &out_vec, int width, int height) = 0;
 };
 
 #endif
\ No newline at end of file
index 58e1597..5175ed1 100644 (file)
 
 #include "data_augment.h"
 
-class DataAugmentDefault final : public DataAugment {
+class DataAugmentDefault final : public DataAugment
+{
 public:
        DataAugmentDefault();
        ~DataAugmentDefault();
 
-       void Preprocess(std::vector<float>& in_vec, std::vector<float>& out_vec, int width, int height) final;
+       void Preprocess(std::vector<float> &in_vec, std::vector<float> &out_vec, int width, int height) final;
 };
 
 #endif
\ No newline at end of file
index 153c49e..d7b53c7 100644 (file)
 
 #include "data_augment.h"
 
-class DataAugmentFlip final : public DataAugment {
+class DataAugmentFlip final : public DataAugment
+{
 public:
        DataAugmentFlip();
        ~DataAugmentFlip();
 
-       void Preprocess(std::vector<float>& in_vec, std::vector<float>& out_vec, int width, int height) final;
+       void Preprocess(std::vector<float> &in_vec, std::vector<float> &out_vec, int width, int height) final;
 };
 
 #endif
\ No newline at end of file
index 43fec62..f727aec 100644 (file)
@@ -22,7 +22,8 @@
 
 #include "data_augment.h"
 
-class DataAugmentRotate final : public DataAugment {
+class DataAugmentRotate final : public DataAugment
+{
 private:
        unsigned int _degree;
 
@@ -30,7 +31,7 @@ public:
        DataAugmentRotate(unsigned int degree = 90);
        ~DataAugmentRotate();
 
-       void Preprocess(std::vector<float>& in_vec, std::vector<float>& out_vec, int width, int height) final;
+       void Preprocess(std::vector<float> &in_vec, std::vector<float> &out_vec, int width, int height) final;
 };
 
 #endif
\ No newline at end of file
index 56cbfd3..6e86b3b 100644 (file)
 
 #include "feature_vector_manager.h"
 
-class DataSetManager {
+class DataSetManager
+{
 protected:
-       std::vector<std::vector<float>> _data;
-       std::vector<std::vector<float>> _labels;
+       std::vector<std::vector<float> > _data;
+       std::vector<std::vector<float> > _labels;
        std::vector<unsigned int> _label_index;
        size_t _feature_vector_size;
        size_t _label_size;
@@ -37,13 +38,13 @@ public:
        virtual ~DataSetManager();
 
        void Clear();
-       bool IsFeatureVectorDuplicated(const std::vector<float>vec);
-       std::vector<std::vector<float>>& GetData(void);
-       std::vector<std::vector<float>>& GetLabel(void);
+       bool IsFeatureVectorDuplicated(const std::vector<float> &vec);
+       std::vector<std::vector<float> > &GetData(void);
+       std::vector<std::vector<float> > &GetLabel(void);
        size_t GetFeaVecSize(void);
        size_t GetLabelSize(void);
        size_t GetDataSetLen(void);
-       std::vector<unsigned int>GetLabelIdx(void);
+       std::vector<unsigned int> &GetLabelIdx(void);
 
        virtual void LoadDataSet(const std::string file_name) = 0;
 };
index aeca7d9..f1ffc22 100644 (file)
 
 #include "file_util.h"
 
-typedef struct {
+typedef struct
+{
        unsigned int signature;
        size_t feature_size;
        size_t one_hot_table_size;
        unsigned int data_set_cnt;
 } FeaVecHeader;
 
-class FeatureVectorManager {
+class FeatureVectorManager
+{
 protected:
        std::string _feature_vector_file;
 
@@ -40,20 +42,18 @@ public:
        FeatureVectorManager(const std::string feature_vector_file = "feature_vector_file.dat");
        virtual ~FeatureVectorManager() = default;
 
-       const std::stringGetFileName();
+       const std::string &GetFileName();
 
-       static void GetVecFromImg(const std::string image_file, std::vector<float>& vec,
-                                                         unsigned int width, unsigned int height);
-       static void GetVecFromRGB(unsigned char *in_data, std::vector<float>& vec,
-                                                         unsigned int width, unsigned int height,
+       static void GetVecFromImg(const std::string image_file, std::vector<float> &vec, unsigned int width,
+                                                         unsigned int height);
+       static void GetVecFromRGB(unsigned char *in_data, std::vector<float> &vec, unsigned int width, unsigned int height,
                                                          size_t re_width, size_t re_height);
-       static void GetVecFromXRGB(unsigned char *in_data, std::vector<float>& vec,
-                                                          unsigned int in_width, unsigned int in_height,
-                                                          unsigned int re_width, unsigned int re_height);
+       static void GetVecFromXRGB(unsigned char *in_data, std::vector<float> &vec, unsigned int in_width,
+                                                          unsigned int in_height, unsigned int re_width, unsigned int re_height);
 
-       virtual void WriteHeader(size_t feature_size, size_t one_hot_table_size, unsigned int  data_set_cnt) = 0;
-       virtual void ReadHeader(FeaVecHeaderheader) = 0;
-       virtual void WriteFeatureVec(std::vector<float>feature_vec, const int max_label, const int label_index) = 0;
+       virtual void WriteHeader(size_t feature_size, size_t one_hot_table_size, unsigned int data_set_cnt) = 0;
+       virtual void ReadHeader(FeaVecHeader &header) = 0;
+       virtual void WriteFeatureVec(std::vector<float> &feature_vec, const int max_label, const int label_index) = 0;
        virtual void Remove() = 0;
 
        static constexpr unsigned int feature_vector_signature = 0xFEA09841;
index 5df0c9a..bcbe3ec 100644 (file)
@@ -22,8 +22,8 @@
 
 namespace FaceRecogUtil
 {
-    bool IsFileExist(const std::string file_path);
-    bool IsImageFile(const std::string image_file);
+bool IsFileExist(const std::string file_path);
+bool IsImageFile(const std::string image_file);
 }
 
 #endif
\ No newline at end of file
index 9c44f22..1902569 100644 (file)
@@ -27,7 +27,8 @@
 
 #include "file_util.h"
 
-class LabelManager {
+class LabelManager
+{
 private:
        std::map<std::string, std::string> _labels_and_files;
        std::string _label_file;
@@ -43,13 +44,13 @@ public:
        unsigned int GetLabelIndex(const std::string given_label);
        bool IsExist(const std::string given_label);
        unsigned int RemoveLabel(const std::string given_label);
-       int GetLabelString(std::stringlabel, const int idx);
+       int GetLabelString(std::string &label, const int idx);
        unsigned int AddLabelToFile(std::string given_label);
        int ImportLabel(void);
        bool AddLabelToMap(const std::string given_label, const std::string image_file);
        size_t GetMaxLabel(const std::string label_file);
        size_t GetMaxLabel();
-       std::string GetLabelFromAnswer(const std::vector<float>result);
+       std::string GetLabelFromAnswer(const std::vector<float> &result);
        void Remove();
 };
 
index 3667534..6bb0b20 100644 (file)
@@ -28,7 +28,8 @@
 #include "data_set_manager.h"
 #include "feature_vector_manager.h"
 
-typedef struct {
+typedef struct
+{
        int backend_type;
        unsigned int target_device;
        std::vector<std::string> input_layer_names;
@@ -39,29 +40,32 @@ typedef struct {
        training_engine_compile_property compile_property;
 } TrainingEngineBackendInfo;
 
-class TrainingModel {
+class TrainingModel
+{
 private:
        virtual void SaveModel(const std::string file_path) = 0;
        virtual void RemoveModel(const std::string file_path) = 0;
+
 protected:
        std::unique_ptr<TrainingEngineInterface::Common::TrainingEngineCommon> _training;
        std::unique_ptr<training_engine_model> _model;
        std::unique_ptr<training_engine_dataset> _data_set;
        std::string _internal_model_file;
+
 public:
        TrainingModel(const mv_inference_backend_type_e backend_type = MV_INFERENCE_BACKEND_NNTRAINER,
                                  const mv_inference_target_device_e target_type = MV_INFERENCE_TARGET_DEVICE_CPU,
                                  const std::string internal_model_file = "model_and_weights.ini");
        virtual ~TrainingModel();
 
-       void ApplyDataSet(std::unique_ptr<DataSetManager>data_set);
-       void ClearDataSet(std::unique_ptr<DataSetManager>data_set);
+       void ApplyDataSet(std::unique_ptr<DataSetManager> &data_set);
+       void ClearDataSet(std::unique_ptr<DataSetManager> &data_set);
        void Compile();
        void Train();
        void RemoveModel();
 
        virtual void ConfigureModel(int num_of_class) = 0;
-       virtual TrainingEngineBackendInfoGetTrainingEngineInfo() = 0;
+       virtual TrainingEngineBackendInfo &GetTrainingEngineInfo() = 0;
 };
 
 #endif
\ No newline at end of file
index 01d2f49..aa3b4f8 100644 (file)
 using namespace std;
 
 DataAugment::DataAugment()
-{
-
-}
+{}
 
 DataAugment::~DataAugment()
-{
-
-}
\ No newline at end of file
+{}
\ No newline at end of file
index fdf7feb..26e7c98 100644 (file)
 using namespace std;
 
 DataAugmentDefault::DataAugmentDefault() : DataAugment()
-{
-
-}
+{}
 
 DataAugmentDefault::~DataAugmentDefault()
-{
-
-}
+{}
 
-void DataAugmentDefault::Preprocess(vector<float>& in_vec, vector<float>& out_vec, int width, int height)
+void DataAugmentDefault::Preprocess(vector<float> &in_vec, vector<float> &out_vec, int width, int height)
 {
        cv::Mat cvSrc = cv::Mat(cv::Size(width, height), CV_32FC3, in_vec.data()).clone();
 
-       out_vec.assign((float *)cvSrc.data, (float *)cvSrc.data + cvSrc.total() * cvSrc.channels());
+       out_vec.assign((float *) cvSrc.data, (float *) cvSrc.data + cvSrc.total() * cvSrc.channels());
 }
\ No newline at end of file
index d7bca95..381751b 100644 (file)
 using namespace std;
 
 DataAugmentFlip::DataAugmentFlip() : DataAugment()
-{
-
-}
+{}
 
 DataAugmentFlip::~DataAugmentFlip()
-{
-
-}
+{}
 
-void DataAugmentFlip::Preprocess(vector<float>& in_vec, vector<float>& out_vec, int width, int height)
+void DataAugmentFlip::Preprocess(vector<float> &in_vec, vector<float> &out_vec, int width, int height)
 {
        cv::Mat cvSrc = cv::Mat(cv::Size(width, height), CV_32FC3, in_vec.data()).clone();
 
@@ -36,5 +32,5 @@ void DataAugmentFlip::Preprocess(vector<float>& in_vec, vector<float>& out_vec,
 
        cv::flip(cvSrc, cvFlip, 1);
 
-       out_vec.assign((float *)cvFlip.data, (float *)cvFlip.data + cvFlip.total() * cvFlip.channels());
+       out_vec.assign((float *) cvFlip.data, (float *) cvFlip.data + cvFlip.total() * cvFlip.channels());
 }
\ No newline at end of file
index 4b20623..2d82a5d 100644 (file)
@@ -21,16 +21,12 @@ using namespace std;
 using namespace mediavision::machine_learning::exception;
 
 DataAugmentRotate::DataAugmentRotate(unsigned int degree) : _degree(degree)
-{
-
-}
+{}
 
 DataAugmentRotate::~DataAugmentRotate()
-{
-
-}
+{}
 
-void DataAugmentRotate::Preprocess(vector<float>& in_vec, vector<float>& out_vec, int width, int height)
+void DataAugmentRotate::Preprocess(vector<float> &in_vec, vector<float> &out_vec, int width, int height)
 {
        cv::Mat cvSrc = cv::Mat(cv::Size(width, height), CV_32FC3, in_vec.data()).clone();
 
@@ -54,5 +50,5 @@ void DataAugmentRotate::Preprocess(vector<float>& in_vec, vector<float>& out_vec
 
        cv::rotate(cvSrc, cvRotate, rotate_code);
 
-       out_vec.assign((float *)cvRotate.data, (float *)cvRotate.data + cvRotate.total() * cvRotate.channels());
+       out_vec.assign((float *) cvRotate.data, (float *) cvRotate.data + cvRotate.total() * cvRotate.channels());
 }
\ No newline at end of file
index d661f31..e8952cc 100644 (file)
 
 using namespace std;
 
-DataSetManager::DataSetManager() : _data(), _labels(), _label_index(), _feature_vector_size(), _label_size(), _data_set_length()
-{
-
-}
+DataSetManager::DataSetManager()
+               : _data(), _labels(), _label_index(), _feature_vector_size(), _label_size(), _data_set_length()
+{}
 
 DataSetManager::~DataSetManager()
 {
@@ -30,36 +29,36 @@ DataSetManager::~DataSetManager()
 
 void DataSetManager::Clear()
 {
-       for (autodata : _data)
+       for (auto &data : _data)
                data.clear();
 
        _data.clear();
 
-       for (autolabel : _labels)
+       for (auto &label : _labels)
                label.clear();
 
        _labels.clear();
        _label_index.clear();
 }
 
-bool DataSetManager::IsFeatureVectorDuplicated(const vector<float>vec)
+bool DataSetManager::IsFeatureVectorDuplicated(const vector<float> &vec)
 {
        if (_data.empty())
                return false;
 
-       for (const autodata : _data)
+       for (const auto &data : _data)
                if (data == vec)
                        return true;
 
        return false;
 }
 
-vector<vector<float>>& DataSetManager::GetData(void)
+vector<vector<float> > &DataSetManager::GetData(void)
 {
        return _data;
 }
 
-vector<vector<float>>& DataSetManager::GetLabel(void)
+vector<vector<float> > &DataSetManager::GetLabel(void)
 {
        return _labels;
 }
@@ -79,7 +78,7 @@ size_t DataSetManager::GetDataSetLen(void)
        return _data_set_length;
 }
 
-vector<unsigned int>DataSetManager::GetLabelIdx(void)
+vector<unsigned int> &DataSetManager::GetLabelIdx(void)
 {
        return _label_index;
 }
\ No newline at end of file
index 62d90a3..4de8a12 100644 (file)
 using namespace std;
 using namespace mediavision::machine_learning::exception;
 
-FeatureVectorManager::FeatureVectorManager(const string feature_vector_file)
-       : _feature_vector_file(feature_vector_file)
-{
-}
+FeatureVectorManager::FeatureVectorManager(const string feature_vector_file) : _feature_vector_file(feature_vector_file)
+{}
 
-const stringFeatureVectorManager::GetFileName()
+const string &FeatureVectorManager::GetFileName()
 {
        return _feature_vector_file;
 }
 
-void FeatureVectorManager::GetVecFromImg(const string image_file, vector<float>& vec,
-                                                                                unsigned int width, unsigned int height)
+void FeatureVectorManager::GetVecFromImg(const string image_file, vector<float> &vec, unsigned int width,
+                                                                                unsigned int height)
 {
        cv::Mat src, dst;
 
@@ -61,12 +59,11 @@ void FeatureVectorManager::GetVecFromImg(const string image_file, vector<float>&
        cv::subtract(floatSrc, meaned, dst);
        dst /= 127.5f;
 
-       vec.assign((float *)dst.data, (float *)dst.data + dst.total() * dst.channels());
+       vec.assign((float *) dst.data, (float *) dst.data + dst.total() * dst.channels());
 }
 
-void FeatureVectorManager::GetVecFromRGB(unsigned char *in_data, vector<float>& vec,
-                                                                                unsigned int width, unsigned int height,
-                                                                                size_t re_width, size_t re_height)
+void FeatureVectorManager::GetVecFromRGB(unsigned char *in_data, vector<float> &vec, unsigned int width,
+                                                                                unsigned int height, size_t re_width, size_t re_height)
 {
        cv::Mat cvSrc = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3), in_data).clone();
 
@@ -84,18 +81,17 @@ void FeatureVectorManager::GetVecFromRGB(unsigned char *in_data, vector<float>&
        cv::subtract(floatSrc, meaned, dst);
        dst /= 127.5f;
 
-       vec.assign((float *)dst.data, (float *)dst.data + dst.total() * dst.channels());
+       vec.assign((float *) dst.data, (float *) dst.data + dst.total() * dst.channels());
 }
 
-void FeatureVectorManager::GetVecFromXRGB(unsigned char *in_data, vector<float>& vec,
-                                                       unsigned int in_width, unsigned int in_height, unsigned int re_width,
-                                                       unsigned int re_height)
+void FeatureVectorManager::GetVecFromXRGB(unsigned char *in_data, vector<float> &vec, unsigned int in_width,
+                                                                                 unsigned int in_height, unsigned int re_width, unsigned int re_height)
 {
        cv::Mat argb(cv::Size(in_width, in_height), CV_8UC4, in_data);
 
        cv::Mat split_rgbx[4];
-       cv::split (argb, split_rgbx);
-       cv::Mat splitted[] = {split_rgbx[0], split_rgbx[1], split_rgbx[2]};
+       cv::split(argb, split_rgbx);
+       cv::Mat splitted[] = { split_rgbx[0], split_rgbx[1], split_rgbx[2] };
        cv::Mat rgb;
        cv::merge(splitted, 3, rgb);
 
@@ -114,5 +110,5 @@ void FeatureVectorManager::GetVecFromXRGB(unsigned char *in_data, vector<float>&
        cv::subtract(floatSrc, meaned, dst);
        dst /= 127.5f;
 
-       vec.assign((float *)dst.data, (float *)dst.data + dst.total() * dst.channels());
+       vec.assign((float *) dst.data, (float *) dst.data + dst.total() * dst.channels());
 }
\ No newline at end of file
index 72396df..482af4b 100644 (file)
 
 namespace FaceRecogUtil
 {
-    bool IsFileExist(const std::string file_path)
-    {
-        struct stat fileStat;
-
-        if (stat(file_path.c_str(), &fileStat))
-            return false;
+bool IsFileExist(const std::string file_path)
+{
+       struct stat fileStat;
 
-        if (!S_ISREG(fileStat.st_mode))
-            return false;
+       if (stat(file_path.c_str(), &fileStat))
+               return false;
 
-        return true;
-    }
+       if (!S_ISREG(fileStat.st_mode))
+               return false;
 
-    bool IsImageFile(const std::string image_file)
-       {
+       return true;
+}
 
-               size_t size = image_file.size();
+bool IsImageFile(const std::string image_file)
+{
+       size_t size = image_file.size();
 
-               // At least, the length of a image file name should be more then 5. i.e., a.bmp, a.jpg, a.png, ...
-               if (size < 5)
-                       return false;
+       // At least, the length of a image file name should be more then 5. i.e., a.bmp, a.jpg, a.png, ...
+       if (size < 5)
+               return false;
 
-               std::string ext = image_file.substr(size - 3);
-               if (ext.compare("bmp") != 0 && ext.compare("jpg") != 0 && ext.compare("png") != 0)
-                       return false;
+       std::string ext = image_file.substr(size - 3);
+       if (ext.compare("bmp") != 0 && ext.compare("jpg") != 0 && ext.compare("png") != 0)
+               return false;
 
-               return true;
-       }
+       return true;
+}
 }
\ No newline at end of file
index 3e03a57..feee3bb 100644 (file)
@@ -31,17 +31,21 @@ LabelManager::LabelManager(string label_file, double decision_threshold) : _labe
        LOGD("decision_threshold value is %lf", _decision_threshold);
 }
 
-LabelManager::~LabelManager() { }
+LabelManager::~LabelManager()
+{}
 
-void LabelManager::Clear() {
+void LabelManager::Clear()
+{
        _labels_and_files.clear();
 }
 
-float LabelManager::GetDecisionThreshold() {
+float LabelManager::GetDecisionThreshold()
+{
        return _decision_threshold;
 }
 
-float LabelManager::GetDecisionWeight() {
+float LabelManager::GetDecisionWeight()
+{
        return _decision_weight;
 }
 
@@ -148,7 +152,7 @@ unsigned int LabelManager::RemoveLabel(const string given_label)
        return label_index;
 }
 
-int LabelManager::GetLabelString(stringlabel, const int idx)
+int LabelManager::GetLabelString(string &label, const int idx)
 {
        ifstream readFile;
 
@@ -223,7 +227,6 @@ int LabelManager::ImportLabel(void)
        return label_cnt;
 }
 
-
 bool LabelManager::AddLabelToMap(const string given_label, const string image_file)
 {
        // Find same one if not empty. If same one exists in the map then skip.
@@ -240,7 +243,6 @@ bool LabelManager::AddLabelToMap(const string given_label, const string image_fi
 
 size_t LabelManager::GetMaxLabel(const string label_file)
 {
-
        // label count is 0 if lael file doesn't exist.
        if (!FaceRecogUtil::IsFileExist(label_file))
                return 0;
@@ -269,7 +271,7 @@ size_t LabelManager::GetMaxLabel()
        return GetMaxLabel(_label_file);
 }
 
-string LabelManager::GetLabelFromAnswer(const vector<float>result)
+string LabelManager::GetLabelFromAnswer(const vector<float> &result)
 {
        if (result.empty())
                throw InvalidParameter("result vector is empty.");
index 5229b6b..ee022ed 100644 (file)
@@ -34,8 +34,7 @@ using namespace TrainingEngineInterface::Common;
 using namespace mediavision::machine_learning::exception;
 
 TrainingModel::TrainingModel(const mv_inference_backend_type_e backend_type,
-                                                        const mv_inference_target_device_e target_type,
-                                                        const string internal_model_file)
+                                                        const mv_inference_target_device_e target_type, const string internal_model_file)
 {
        _internal_model_file = internal_model_file;
        _training = make_unique<TrainingEngineInterface::Common::TrainingEngineCommon>();
@@ -52,16 +51,16 @@ TrainingModel::TrainingModel(const mv_inference_backend_type_e backend_type,
                throw InvalidOperation("Fail to get backend capacity.");
 }
 
-TrainingModel::~ TrainingModel()
+TrainingModel::~TrainingModel()
 {
        if (_training)
                _training->UnbindBackend();
 }
 
-void TrainingModel::ApplyDataSet(unique_ptr<DataSetManager>data_set)
+void TrainingModel::ApplyDataSet(unique_ptr<DataSetManager> &data_set)
 {
-       autovalues = data_set->GetData();
-       autolabels = data_set->GetLabel();
+       auto &values = data_set->GetData();
+       auto &labels = data_set->GetLabel();
 
        LOGD("Generating feature vectors for training");
 
@@ -84,7 +83,7 @@ void TrainingModel::ApplyDataSet(unique_ptr<DataSetManager>& data_set)
                throw InvalidOperation("Fail to set dataset to model.", ret);
 }
 
-void TrainingModel::ClearDataSet(unique_ptr<DataSetManager>data_set)
+void TrainingModel::ClearDataSet(unique_ptr<DataSetManager> &data_set)
 {
        data_set->Clear();
        _training->DestroyDataset(_data_set.get());
index 3b6d6fd..edd7b49 100644 (file)
 #include <opencv2/tracking.hpp>
 #include <opencv2/tracking/tracking_legacy.hpp>
 
-namespace MediaVision {
-namespace ROITracker {
-
+namespace MediaVision
+{
+namespace ROITracker
+{
 using TrackerResult = std::tuple<int, int, int, int>;
 
-class ROITracker {
+class ROITracker
+{
 public:
        ROITracker() = default;
        virtual ~ROITracker() = default;
 
        void setType(mv_roi_tracker_type_e type) noexcept;
        void setRoi(int x, int y, int width, int height) noexcept;
-       void perform(cv::Matframe);
+       void perform(cv::Mat &frame);
        TrackerResult result();
 
 private:
-       void initialize(cv::Matframe);
-       void update(cv::Matframe);
+       void initialize(cv::Mat &frame);
+       void update(cv::Mat &frame);
 
        cv::Ptr<cv::Tracker> __cvTracker;
        cv::Rect __boundingBox;
index 6bc90b9..8807236 100644 (file)
 #ifndef __MEDIA_VISION_ROITRACKERUTIL_H__
 #define __MEDIA_VISION_ROITRACKERUTIL_H__
 
-namespace MediaVision {
-namespace ROITracker {
-    cv::Ptr<cv::Tracker> createTrackerByName(mv_roi_tracker_type_e type);
+namespace MediaVision
+{
+namespace ROITracker
+{
+cv::Ptr<cv::Tracker> createTrackerByName(mv_roi_tracker_type_e type);
 
 } /* ROITracker */
 } /* MediaVision */
index 3100f56..b2f5738 100644 (file)
@@ -23,8 +23,7 @@
 #include "mv_roi_tracker_type.h"
 
 #ifdef __cplusplus
-extern "C"
-{
+extern "C" {
 #endif /* __cplusplus */
 
 /**
@@ -86,8 +85,7 @@ int mv_roi_tracker_destroy_open(mv_roi_tracker_h handle);
  * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
  * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
  */
-int mv_roi_tracker_configure_engine_open(mv_roi_tracker_h handle,
-                                        mv_engine_config_h engine_config);
+int mv_roi_tracker_configure_engine_open(mv_roi_tracker_h handle, mv_engine_config_h engine_config);
 
 /**
  * @brief Prepare roi tracker.
@@ -133,7 +131,8 @@ int mv_roi_tracker_prepare_open(mv_roi_tracker_h handle, int x, int y, int width
  * @pre Create a source handle by calling @ref mv_create_source()
  * @pre Create an tracker handle by calling @ref mv_roi_tracker_create_open()
  */
-int mv_roi_tracker_perform_open(mv_roi_tracker_h handle, mv_source_h source, mv_roi_tracker_tracked_cb tracked_cb, void *user_data);
+int mv_roi_tracker_perform_open(mv_roi_tracker_h handle, mv_source_h source, mv_roi_tracker_tracked_cb tracked_cb,
+                                                               void *user_data);
 
 #ifdef __cplusplus
 }
index c473865..52e8aae 100644 (file)
 #include "ROITracker.h"
 #include "ROITrackerUtil.h"
 
-namespace MediaVision {
-namespace ROITracker {
-
-void ROITracker::initialize(cv::Mat& frame)
+namespace MediaVision
+{
+namespace ROITracker
+{
+void ROITracker::initialize(cv::Mat &frame)
 {
        if (__cvTracker) {
                LOGE("cvTracker already exists. 'mv_roi_tracker_destroy' should be called for removing cvTracker.");
                throw std::runtime_error("tracker Initialize failed.");
        }
 
-       LOGD("Init pos : x:%d, y:%d, w:%d, h:%d is set.",
-               __boundingBox.x, __boundingBox.y, __boundingBox.width, __boundingBox.height);
+       LOGD("Init pos : x:%d, y:%d, w:%d, h:%d is set.", __boundingBox.x, __boundingBox.y, __boundingBox.width,
+                __boundingBox.height);
 
        __cvTracker = createTrackerByName(__type);
        __cvTracker->init(frame, __boundingBox);
@@ -56,7 +57,7 @@ void ROITracker::initialize(cv::Mat& frame)
        LOGI("Initialized done");
 }
 
-void ROITracker::update(cv::Matframe)
+void ROITracker::update(cv::Mat &frame)
 {
        if (!__cvTracker->update(frame, __boundingBox)) {
                LOGE("update failed.");
@@ -64,11 +65,11 @@ void ROITracker::update(cv::Mat& frame)
                throw std::runtime_error("tracker update failed.");
        }
 
-       LOGD(" Updated: x: %d, y: %d, w: %d, h: %d",
-               __boundingBox.x, __boundingBox.y, __boundingBox.width, __boundingBox.height);
+       LOGD(" Updated: x: %d, y: %d, w: %d, h: %d", __boundingBox.x, __boundingBox.y, __boundingBox.width,
+                __boundingBox.height);
 }
 
-void ROITracker::perform(cv::Matframe)
+void ROITracker::perform(cv::Mat &frame)
 {
        if (!__initialized)
                initialize(frame);
@@ -95,8 +96,8 @@ void ROITracker::setRoi(int x, int y, int width, int height) noexcept
 {
        __boundingBox = { x, y, width, height };
 
-       LOGD("ROI : x:%d, y:%d, w:%d, h:%d is set.",
-                       __boundingBox.x, __boundingBox.y, __boundingBox.width, __boundingBox.height);
+       LOGD("ROI : x:%d, y:%d, w:%d, h:%d is set.", __boundingBox.x, __boundingBox.y, __boundingBox.width,
+                __boundingBox.height);
 }
 
 } /* ROITracker */
index 6732bb7..5493738 100644 (file)
 #include <opencv2/tracking/tracking_legacy.hpp>
 #include <iostream>
 
-namespace MediaVision {
-namespace ROITracker {
-    cv::Ptr<cv::Tracker> createTrackerByName(mv_roi_tracker_type_e type)
-    {
-        switch (type) {
-        case MV_ROI_TRACKER_TYPE_BALANCE:
-            return cv::TrackerKCF::create();
-        case MV_ROI_TRACKER_TYPE_SPEED:
-            return cv::legacy::upgradeTrackingAPI(cv::legacy::TrackerMedianFlow::create());
-        case MV_ROI_TRACKER_TYPE_ACCURACY:
-            return cv::TrackerCSRT::create();
-        default:
-            throw std::runtime_error("Unexpected type");
-        }
-    }
+namespace MediaVision
+{
+namespace ROITracker
+{
+cv::Ptr<cv::Tracker> createTrackerByName(mv_roi_tracker_type_e type)
+{
+       switch (type) {
+       case MV_ROI_TRACKER_TYPE_BALANCE:
+               return cv::TrackerKCF::create();
+       case MV_ROI_TRACKER_TYPE_SPEED:
+               return cv::legacy::upgradeTrackingAPI(cv::legacy::TrackerMedianFlow::create());
+       case MV_ROI_TRACKER_TYPE_ACCURACY:
+               return cv::TrackerCSRT::create();
+       default:
+               throw std::runtime_error("Unexpected type");
+       }
+}
 } /* ROITracker */
 } /* MediaVision */
\ No newline at end of file
index 12cefe2..6b8b650 100644 (file)
@@ -20,8 +20,7 @@
 
 int mv_roi_tracker_create(mv_roi_tracker_h *handle)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-               _mv_roi_tracking_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_roi_tracking_check_system_info_feature_supported());
 
        MEDIA_VISION_NULL_ARG_CHECK(handle);
 
@@ -36,8 +35,7 @@ int mv_roi_tracker_create(mv_roi_tracker_h *handle)
 
 int mv_roi_tracker_destroy(mv_roi_tracker_h handle)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-               _mv_roi_tracking_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_roi_tracking_check_system_info_feature_supported());
 
        MEDIA_VISION_INSTANCE_CHECK(handle);
 
@@ -50,11 +48,9 @@ int mv_roi_tracker_destroy(mv_roi_tracker_h handle)
        return ret;
 }
 
-int mv_roi_tracker_configure(mv_roi_tracker_h handle,
-                                                  mv_engine_config_h engine_config)
+int mv_roi_tracker_configure(mv_roi_tracker_h handle, mv_engine_config_h engine_config)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_roi_tracking_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_roi_tracking_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(handle);
        MEDIA_VISION_INSTANCE_CHECK(engine_config);
 
@@ -74,8 +70,7 @@ int mv_roi_tracker_configure(mv_roi_tracker_h handle,
 
 int mv_roi_tracker_prepare(mv_roi_tracker_h handle, int x, int y, int width, int height)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-                       _mv_roi_tracking_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_roi_tracking_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(handle);
        MEDIA_VISION_NULL_ARG_CHECK(x);
        MEDIA_VISION_NULL_ARG_CHECK(y);
@@ -92,10 +87,10 @@ int mv_roi_tracker_prepare(mv_roi_tracker_h handle, int x, int y, int width, int
        return ret;
 }
 
-int mv_roi_tracker_perform(mv_roi_tracker_h handle, mv_source_h source, mv_roi_tracker_tracked_cb tracked_cb, void *user_data)
+int mv_roi_tracker_perform(mv_roi_tracker_h handle, mv_source_h source, mv_roi_tracker_tracked_cb tracked_cb,
+                                                  void *user_data)
 {
-       MEDIA_VISION_SUPPORT_CHECK(
-               _mv_roi_tracking_check_system_info_feature_supported());
+       MEDIA_VISION_SUPPORT_CHECK(_mv_roi_tracking_check_system_info_feature_supported());
 
        MEDIA_VISION_INSTANCE_CHECK(handle);
        MEDIA_VISION_INSTANCE_CHECK(source);
index 5fe8f0b..32b3488 100644 (file)
@@ -23,7 +23,8 @@
 using namespace std;
 using namespace MediaVision::ROITracker;
 
-static cv::Mat getTrackerFrame(mv_colorspace_e colorspace, unsigned int width, unsigned int height, unsigned char* buffer)
+static cv::Mat getTrackerFrame(mv_colorspace_e colorspace, unsigned int width, unsigned int height,
+                                                          unsigned char *buffer)
 {
        switch (colorspace) {
        case MEDIA_VISION_COLORSPACE_Y800:
@@ -32,7 +33,7 @@ static cv::Mat getTrackerFrame(mv_colorspace_e colorspace, unsigned int width, u
        case MEDIA_VISION_COLORSPACE_NV12:
        case MEDIA_VISION_COLORSPACE_NV21:
        case MEDIA_VISION_COLORSPACE_YV12:
-               return cv::Mat(width, height * 3/2, CV_8UC1, buffer).clone();
+               return cv::Mat(width, height * 3 / 2, CV_8UC1, buffer).clone();
        case MEDIA_VISION_COLORSPACE_YUYV:
        case MEDIA_VISION_COLORSPACE_UYVY:
        case MEDIA_VISION_COLORSPACE_422P:
@@ -52,7 +53,7 @@ int mv_roi_tracker_create_open(mv_roi_tracker_h *handle)
 {
        LOGD("ENTER");
 
-       ROITracker *pTracker = new (std::nothrow)ROITracker;
+       ROITracker *pTracker = new (std::nothrow) ROITracker;
        if (!pTracker) {
                LOGE("Failed to create tracker");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
@@ -82,8 +83,7 @@ int mv_roi_tracker_destroy_open(mv_roi_tracker_h handle)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_roi_tracker_configure_engine_open(mv_roi_tracker_h handle,
-                                                                          mv_engine_config_h engine_config)
+int mv_roi_tracker_configure_engine_open(mv_roi_tracker_h handle, mv_engine_config_h engine_config)
 {
        LOGI("ENTER");
 
@@ -93,8 +93,8 @@ int mv_roi_tracker_configure_engine_open(mv_roi_tracker_h handle,
        }
 
        int tracker_type;
-       if (mv_engine_config_get_int_attribute(engine_config,
-                       MV_ROI_TRACKER_TYPE, &tracker_type) != MEDIA_VISION_ERROR_NONE)
+       if (mv_engine_config_get_int_attribute(engine_config, MV_ROI_TRACKER_TYPE, &tracker_type) !=
+               MEDIA_VISION_ERROR_NONE)
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
 
        auto pTracker = static_cast<ROITracker *>(handle);
@@ -120,7 +120,8 @@ int mv_roi_tracker_prepare_open(mv_roi_tracker_h handle, int x, int y, int width
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_roi_tracker_perform_open(mv_roi_tracker_h handle, mv_source_h source, mv_roi_tracker_tracked_cb tracked_cb, void *user_data)
+int mv_roi_tracker_perform_open(mv_roi_tracker_h handle, mv_source_h source, mv_roi_tracker_tracked_cb tracked_cb,
+                                                               void *user_data)
 {
        LOGD("ENTER");
 
@@ -135,14 +136,10 @@ int mv_roi_tracker_perform_open(mv_roi_tracker_h handle, mv_source_h source, mv_
        unsigned char *buffer = NULL;
        mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
 
-       MEDIA_VISION_ASSERT(mv_source_get_width(source, &width),
-                       "Failed to get the width.");
-       MEDIA_VISION_ASSERT(mv_source_get_height(source, &height),
-                       "Failed to get the height.");
-       MEDIA_VISION_ASSERT(mv_source_get_colorspace(source, &colorspace),
-                       "Failed to get the colorspace.");
-       MEDIA_VISION_ASSERT(mv_source_get_buffer(source, &buffer, &bufferSize),
-                       "Failed to get the buffer size.");
+       MEDIA_VISION_ASSERT(mv_source_get_width(source, &width), "Failed to get the width.");
+       MEDIA_VISION_ASSERT(mv_source_get_height(source, &height), "Failed to get the height.");
+       MEDIA_VISION_ASSERT(mv_source_get_colorspace(source, &colorspace), "Failed to get the colorspace.");
+       MEDIA_VISION_ASSERT(mv_source_get_buffer(source, &buffer, &bufferSize), "Failed to get the buffer size.");
 
        LOGD(" w: %d, h: %d, c: %d", width, height, channels);
        try {
index 9d92ade..0fd812e 100644 (file)
 
 #include <opencv2/opencv.hpp>
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 typedef std::map<std::string, std::vector<std::string> > EventTypesMap;
 typedef EventTypesMap::iterator EventTypesMapIter;
 typedef EventTypesMap::const_iterator EventTypesMapConstIter;
index dc802a7..8bb4011 100644 (file)
 #include "EventTrigger.h"
 #include "EventDefs.h"
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 class EventManager;
 
 /**
@@ -36,9 +37,9 @@ class EventManager;
  *
  * @since_tizen 3.0
  */
-class EventManagerDestroyer {
+class EventManagerDestroyer
+{
 public:
-
        /**
         * @brief Default destructor.
         *
@@ -55,7 +56,6 @@ public:
        void initialize(EventManager *pointer);
 
 private:
-
        EventManager *__pInstance;
 };
 
@@ -66,15 +66,15 @@ private:
  * @since_tizen 3.0
  */
 
-class EventManager {
+class EventManager
+{
 public:
-
        /**
         * @brief Gets EventManager instance.
         *
         * @since_tizen 3.0
         */
-       static EventManagergetInstance();
+       static EventManager &getInstance();
 
        /**
         * @brief Registers event.
@@ -92,19 +92,11 @@ public:
         * @param [in] isInternal        Interpretation event as internal in surveillance
         * @return @c 0 on success, otherwise a negative error value
         */
-       int registerEvent(
-               mv_surveillance_event_trigger_h eventTrigger,
-               long int triggerId,
-               const char *eventType,
-               int videoStreamId,
-               mv_engine_config_h engineCfg,
-               mv_surveillance_event_occurred_cb callback,
-               void *user_data,
-               int numberOfPoints,
-               mv_point_s *roi,
-               bool isInternal);
-
-    /**
+       int registerEvent(mv_surveillance_event_trigger_h eventTrigger, long int triggerId, const char *eventType,
+                                         int videoStreamId, mv_engine_config_h engineCfg, mv_surveillance_event_occurred_cb callback,
+                                         void *user_data, int numberOfPoints, mv_point_s *roi, bool isInternal);
+
+       /**
      * @brief Unregisters event.
      *
      * @since_tizen 3.0
@@ -114,7 +106,7 @@ public:
      *                             will be unregistered
      * @return @c 0 on success, otherwise a negative error value
      */
-    int unregisterEvent(long int triggerId, int videoStreamId);
+       int unregisterEvent(long int triggerId, int videoStreamId);
 
        /**
         * @brief Pushes media source to run event triggers.
@@ -133,7 +125,7 @@ public:
         * @param [out] eventTypes    The supported event types
         * @return @c 0 on success, otherwise a negative error value
         */
-       static int getSupportedEventTypes(StringVectoreventTypes);
+       static int getSupportedEventTypes(StringVector &eventTypes);
 
        /**
         * @brief Gets all supported event result value names.
@@ -142,7 +134,7 @@ public:
         * @param [out] eventResValNames    The supported event result value names
         * @return @c 0 on success, otherwise a negative error value
         */
-       static int getSupportedEventResultValueNames(StringVectoreventResValNames);
+       static int getSupportedEventResultValueNames(StringVector &eventResValNames);
 
        /**
         * @brief Gets supported event result value names for an event type.
@@ -153,28 +145,24 @@ public:
         * @param [out] eventResValNames    The supported event result value names
         * @return @c 0 on success, otherwise a negative error value
         */
-       static int getSupportedEventResultValueNames(
-               const std::string& eventTypeName,
-               StringVector& eventResValNames);
+       static int getSupportedEventResultValueNames(const std::string &eventTypeName, StringVector &eventResValNames);
 
 private:
-
        EventManager();
 
-       EventManager(const EventManager&);
+       EventManager(const EventManager &);
 
-       EventManager& operator=(EventManager&);
+       EventManager &operator=(EventManager &);
 
        ~EventManager();
 
        static void setSupportedEventTypes();
 
-    EventTriggersIter isTriggerExists(EventTrigger *trigger, int videoStreamId);
+       EventTriggersIter isTriggerExists(EventTrigger *trigger, int videoStreamId);
 
        friend class EventManagerDestroyer;
 
 private:
-
        static EventManager *__pInstance;
 
        static EventManagerDestroyer Destroyer;
@@ -182,7 +170,6 @@ private:
        static EventTypesMap SupportedEventTypes;
 
 private:
-
        EventTriggersMap __eventTriggers;
 };
 
index 7ce8f45..495100f 100644 (file)
  * @brief This file contains interface for event trigger.
  */
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 /**
  * @class    EventResult
  * @brief    This class contains event result interface.
  *
  * @since_tizen 3.0
  */
-class EventResult {
+class EventResult
+{
 public:
        /**
         * @brief Default destructor.
         *
         * @since_tizen 3.0
         */
-       virtual ~EventResult() {}
+       virtual ~EventResult()
+       {}
 
        /**
         * @brief Gets result value.
@@ -52,7 +55,6 @@ public:
        virtual int getResultValue(const char *valueName, void *value) const = 0;
 };
 
-
 } /* surveillance */
 } /* mediavision */
 
index f2fbd2d..014a200 100644 (file)
 #include <map>
 #include <list>
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 /**
  * @class    EventTrigger
  * @brief    This class contains event trigger interface.
  *
  * @since_tizen 3.0
  */
-class EventTrigger {
+class EventTrigger
+{
 public:
        /**
         * @brief Default constructor.
@@ -55,15 +57,9 @@ public:
         * @param [in] roi               The intput array with ROI points
         * @param [in] isInternal        Interpretation event as internal in surveillance
         */
-       EventTrigger(
-               mv_surveillance_event_trigger_h eventTrigger,
-               long int triggerId,
-               int videoStreamId,
-               mv_surveillance_event_occurred_cb callback,
-               void *userData,
-               int numberOfPoints,
-               mv_point_s *roi,
-               bool isInternal);
+       EventTrigger(mv_surveillance_event_trigger_h eventTrigger, long int triggerId, int videoStreamId,
+                                mv_surveillance_event_occurred_cb callback, void *userData, int numberOfPoints, mv_point_s *roi,
+                                bool isInternal);
 
        /**
         * @brief Default destructor.
@@ -90,10 +86,7 @@ public:
         * @param [in] grayImage     The converted to gray scale source
         * @return @c 0 on success, otherwise a negative error value
         */
-       virtual int pushSource(
-                                       mv_source_h source,
-                                       mv_source_h graySource,
-                                       const cv::Mat& grayImage) = 0;
+       virtual int pushSource(mv_source_h source, mv_source_h graySource, const cv::Mat &grayImage) = 0;
 
        /**
         * @brief Gets event type.
@@ -132,14 +125,9 @@ public:
         * @param [in] isInternal        Interpretation event as internal in surveillance
         * @return @c true on success, false otherwise
         */
-       bool subscribeCallback(
-                       mv_surveillance_event_trigger_h eventTrigger,
-                       long int triggerId,
-                       mv_surveillance_event_occurred_cb callback,
-                       void *userData,
-                       int numberOfPoints,
-                       mv_point_s *roi,
-                       bool isInternal);
+       bool subscribeCallback(mv_surveillance_event_trigger_h eventTrigger, long int triggerId,
+                                                  mv_surveillance_event_occurred_cb callback, void *userData, int numberOfPoints,
+                                                  mv_point_s *roi, bool isInternal);
 
        /**
         * @brief Unsubscibes callback with unique identifier.
@@ -170,13 +158,8 @@ public:
         * @param [in] scaleY         The scale for Y ROI point coordinate
         * @return @c true on success, false otherwise
         */
-       int applyROIToImage(
-                       unsigned char *image,
-                       int imageWidth,
-                       int imageHeight,
-                       bool scalePoints = false,
-                       int scaleX = 1,
-                       int scaleY = 1);
+       int applyROIToImage(unsigned char *image, int imageWidth, int imageHeight, bool scalePoints = false, int scaleX = 1,
+                                               int scaleY = 1);
 
        /**
         * @brief Comparison operator for equal case.
@@ -184,7 +167,7 @@ public:
         * @since_tizen 3.0
         * @return true if event trigger is equal to other, false otherwise
         */
-       virtual bool operator==(const EventTriggerother) const;
+       virtual bool operator==(const EventTrigger &other) const;
 
        /**
         * @brief Comparison operator for not equal case.
@@ -192,10 +175,11 @@ public:
         * @since_tizen 3.0
         * @return true if event trigger is not equal to other, false otherwise
         */
-       virtual bool operator!=(const EventTriggerother) const;
+       virtual bool operator!=(const EventTrigger &other) const;
 
 protected:
-       struct CallbackData {
+       struct CallbackData
+       {
                mv_surveillance_event_trigger_h eventTrigger;
 
                mv_surveillance_event_occurred_cb callback;
@@ -222,7 +206,7 @@ protected:
        CallbackDataMap __callbackDataMap;
 };
 
-typedef std::list<EventTrigger*> EventTriggers;
+typedef std::list<EventTrigger *> EventTriggers;
 typedef std::map<int, EventTriggers> EventTriggersMap;
 typedef EventTriggers::const_iterator EventTriggersConstIter;
 typedef EventTriggers::iterator EventTriggersIter;
index e775446..f593f68 100644 (file)
 #include "EventResult.h"
 #include "EventDefs.h"
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 /**
  * @class EventResultMovementDetection
  * @brief This class contains movement detection event results.
  *
  * @since_tizen 3.0
  */
-class EventResultMovementDetection : public EventResult {
+class EventResultMovementDetection : public EventResult
+{
 public:
        /**
         * @brief Gets result value.
@@ -61,7 +63,8 @@ public:
  *
  * @since_tizen 3.0
  */
-class EventTriggerMovementDetection : public EventTrigger {
+class EventTriggerMovementDetection : public EventTrigger
+{
 public:
        /**
         * @brief Default constructor.
@@ -76,15 +79,9 @@ public:
         * @param [in] roi               The intput array with ROI points
         * @param [in] isInternal        Interpretation event as internal in surveillance
         */
-       EventTriggerMovementDetection(
-               mv_surveillance_event_trigger_h eventTrigger,
-               long int triggerId,
-               int videoStreamId,
-               mv_surveillance_event_occurred_cb callback,
-               void *userData,
-               int numberOfPoints,
-               mv_point_s *roi,
-               bool isInternal);
+       EventTriggerMovementDetection(mv_surveillance_event_trigger_h eventTrigger, long int triggerId, int videoStreamId,
+                                                                 mv_surveillance_event_occurred_cb callback, void *userData, int numberOfPoints,
+                                                                 mv_point_s *roi, bool isInternal);
 
        /**
         * @brief Default destructor.
@@ -111,10 +108,7 @@ public:
         * @param [in] grayImage     The converted to gray scale source
         * @return @c 0 on success, otherwise a negative error value
         */
-       virtual int pushSource(
-                                       mv_source_h source,
-                                       mv_source_h graySource,
-                                       const cv::Mat& grayImage);
+       virtual int pushSource(mv_source_h source, mv_source_h graySource, const cv::Mat &grayImage);
 
        /**
         * @brief Gets event type.
index 4355107..3e9d947 100644 (file)
 
 #include <sys/time.h>
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 /**
  * @class EventResultPersonAppearance
  * @brief This class contains person appeared / disapeared event results.
  *
  * @since_tizen 3.0
  */
-class EventResultPersonAppearance : public EventResult {
+class EventResultPersonAppearance : public EventResult
+{
 public:
        /**
         * @brief Gets result value.
@@ -72,7 +74,8 @@ public:
  *
  * @since_tizen 3.0
  */
-class EventTriggerPersonAppearance : public EventTrigger {
+class EventTriggerPersonAppearance : public EventTrigger
+{
 public:
        /**
         * @brief Default constructor.
@@ -87,15 +90,9 @@ public:
         * @param [in] roi               The intput array with ROI points
         * @param [in] isInternal        Interpretation event as internal in surveillance
         */
-       EventTriggerPersonAppearance(
-               mv_surveillance_event_trigger_h eventTrigger,
-               long int triggerId,
-               int videoStreamId,
-               mv_surveillance_event_occurred_cb callback,
-               void *userData,
-               int numberOfPoints,
-               mv_point_s *roi,
-               bool isInternal);
+       EventTriggerPersonAppearance(mv_surveillance_event_trigger_h eventTrigger, long int triggerId, int videoStreamId,
+                                                                mv_surveillance_event_occurred_cb callback, void *userData, int numberOfPoints,
+                                                                mv_point_s *roi, bool isInternal);
 
        /**
         * @brief Default destructor.
@@ -122,10 +119,7 @@ public:
         * @param [in] grayImage     The converted to gray scale source
         * @return @c 0 on success, otherwise a negative error value
         */
-       virtual int pushSource(
-                                       mv_source_h source,
-                                       mv_source_h graySource,
-                                       const cv::Mat& grayImage);
+       virtual int pushSource(mv_source_h source, mv_source_h graySource, const cv::Mat &grayImage);
 
        /**
         * @brief Gets event type.
@@ -136,22 +130,17 @@ public:
        virtual std::string getEventType() const;
 
 private:
-       static void movementDetectedCB(
-               mv_surveillance_event_trigger_h event_trigger,
-               mv_source_h source,
-               int video_stream_id,
-               mv_surveillance_result_h event_result,
-               void *user_data);
+       static void movementDetectedCB(mv_surveillance_event_trigger_h event_trigger, mv_source_h source,
+                                                                  int video_stream_id, mv_surveillance_result_h event_result, void *user_data);
 
 private:
-
        void runCallbacks(mv_source_h source);
 
-       std::vector<bool> reinforceTrackedPersons(const CVRectanglesappearedPersons);
+       std::vector<bool> reinforceTrackedPersons(const CVRectangles &appearedPersons);
 
 private:
-
-       class TrackedRectangle {
+       class TrackedRectangle
+       {
        public:
                TrackedRectangle(cv::Rect _rect, struct timeval _appearanceTime);
 
index bcfce95..57f9aea 100644 (file)
 
 #include "EventTriggerPersonAppearance.h"
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 /**
  * @class EventResultPersonRecogniton
  * @brief This class contains person recognized event results.
  *
  * @since_tizen 3.0
  */
-class EventResultPersonRecognition : public EventResult {
+class EventResultPersonRecognition : public EventResult
+{
 public:
        /**
         * @brief Gets result value.
@@ -67,7 +69,8 @@ public:
  *
  * @since_tizen 3.0
  */
-class EventTriggerPersonRecognition : public EventTrigger {
+class EventTriggerPersonRecognition : public EventTrigger
+{
 public:
        /**
         * @brief Default constructor.
@@ -82,15 +85,9 @@ public:
         * @param [in] roi               The intput array with ROI points
         * @param [in] isInternal        Interpretation event as internal in surveillance
         */
-       EventTriggerPersonRecognition(
-               mv_surveillance_event_trigger_h eventTrigger,
-               long int triggerId,
-               int videoStreamId,
-               mv_surveillance_event_occurred_cb callback,
-               void *userData,
-               int numberOfPoints,
-               mv_point_s *roi,
-               bool isInternal);
+       EventTriggerPersonRecognition(mv_surveillance_event_trigger_h eventTrigger, long int triggerId, int videoStreamId,
+                                                                 mv_surveillance_event_occurred_cb callback, void *userData, int numberOfPoints,
+                                                                 mv_point_s *roi, bool isInternal);
 
        /**
         * @brief Default destructor.
@@ -117,10 +114,7 @@ public:
         * @param [in] grayImage     The converted to gray scale source
         * @return @c 0 on success, otherwise a negative error value
         */
-       virtual int pushSource(
-                                       mv_source_h source,
-                                       mv_source_h graySource,
-                                       const cv::Mat& grayImage);
+       virtual int pushSource(mv_source_h source, mv_source_h graySource, const cv::Mat &grayImage);
 
        /**
         * @brief Gets event type.
@@ -142,10 +136,7 @@ public:
         *                             (value from 0.0 to 1.0).
         * @return @c 0 on success, otherwise a negative error value
         */
-       void setEventResults(
-               mv_rectangle_s faceLocation,
-               int faceLabel,
-               double confidence);
+       void setEventResults(mv_rectangle_s faceLocation, int faceLabel, double confidence);
 
 private:
        mv_face_recognition_model_h __faceRecognitionModel;
@@ -155,21 +146,12 @@ private:
        EventResultPersonRecognition *__eventResult;
 
 private:
-       static void faceDetectedCB(
-                                       mv_source_h source,
-                                       mv_engine_config_h engine_cfg,
-                                       mv_rectangle_s *faces_locations,
-                                       int number_of_faces,
-                                       void *user_data);
-
-       static void faceRecognizedCB(
-                                       mv_source_h source,
-                                       mv_face_recognition_model_h recognition_model,
-                                       mv_engine_config_h engine_cfg,
-                                       mv_rectangle_s *face_location,
-                                       const int *face_label,
-                                       double confidence,
-                                       void *user_data);
+       static void faceDetectedCB(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s *faces_locations,
+                                                          int number_of_faces, void *user_data);
+
+       static void faceRecognizedCB(mv_source_h source, mv_face_recognition_model_h recognition_model,
+                                                                mv_engine_config_h engine_cfg, mv_rectangle_s *face_location, const int *face_label,
+                                                                double confidence, void *user_data);
 };
 
 } /* surveillance */
index b93267a..4ca2804 100644 (file)
 #include <opencv2/core.hpp>
 #include <opencv2/tracking.hpp>
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 /**
  * @class    MFTracker
  * @brief    Median Flow tracker implementation.
  *
  * @since_tizen 3.0
  */
-class MFTracker {
+class MFTracker
+{
 public:
-       struct Params {
+       struct Params
+       {
                /**
                 * @brief TrackerMedianFlow algorithm parameters constructor
                 */
@@ -66,7 +69,7 @@ public:
         * @param [out]  result   Result contour
         * @return true if object is tracked, otherwise return false
         */
-       bool track(const cv::Mat& frame, cv::Rect_<float>& result);
+       bool track(const cv::Mat &frame, cv::Rect_<float> &result);
 
        /**
         * @brief Provides the current location of a target.
@@ -74,60 +77,50 @@ public:
         * @since_tizen 3.0
         * @param [in] location  Current location of a target
         */
-       void reinforcement(const cv::Rect_<float>location);
+       void reinforcement(const cv::Rect_<float> &location);
 
 private:
        bool isInited() const;
 
-       bool init(const cv::Matimage);
+       bool init(const cv::Mat &image);
 
-       bool update(const cv::Matimage);
+       bool update(const cv::Mat &image);
 
        float getLastConfidence() const;
 
        cv::Rect_<float> getLastBoundingBox() const;
 
-       bool medianFlowImpl(cv::Mat oldImage, cv::Mat newImage, cv::Rect_<float>oldBox);
+       bool medianFlowImpl(cv::Mat oldImage, cv::Mat newImage, cv::Rect_<float> &oldBox);
 
-       cv::Rect_<float> vote(
-               const std::vector<cv::Point2f>& oldPoints,
-               const std::vector<cv::Point2f>& newPoints,
-               const cv::Rect_<float>& oldRect,
-               cv::Point2f& mD);
+       cv::Rect_<float> vote(const std::vector<cv::Point2f> &oldPoints, const std::vector<cv::Point2f> &newPoints,
+                                                 const cv::Rect_<float> &oldRect, cv::Point2f &mD);
 
-       void check_FB(
-               std::vector<cv::Mat> newPyramid,
-               const std::vector<cv::Point2f>& oldPoints,
-               const std::vector<cv::Point2f>& newPoints,
-               std::vector<bool>& status);
+       void check_FB(std::vector<cv::Mat> newPyramid, const std::vector<cv::Point2f> &oldPoints,
+                                 const std::vector<cv::Point2f> &newPoints, std::vector<bool> &status);
 
-       void check_NCC(
-               const cv::Mat& oldImage,
-               const cv::Mat& newImage,
-               const std::vector<cv::Point2f>& oldPoints,
-               const std::vector<cv::Point2f>& newPoints,
-               std::vector<bool>& status);
+       void check_NCC(const cv::Mat &oldImage, const cv::Mat &newImage, const std::vector<cv::Point2f> &oldPoints,
+                                  const std::vector<cv::Point2f> &newPoints, std::vector<bool> &status);
 
 private:
-       bool __isInit;                /**< Flag is used to determine the model
+       bool __isInit; /**< Flag is used to determine the model
                                        initialization */
 
-       Params __params;              /**< Parameters used during tracking, see
+       Params __params; /**< Parameters used during tracking, see
                                        @ref TrackerMedianFlow::Params */
 
-       cv::TermCriteria __termcrit;  /**< Terminating criteria for OpenCV
+       cv::TermCriteria __termcrit; /**< Terminating criteria for OpenCV
                                        Lucas–Kanade optical flow algorithm used
                                        during tracking */
 
-       cv::Rect_<float> __boundingBox;  /**< Tracking object bounding box */
+       cv::Rect_<float> __boundingBox; /**< Tracking object bounding box */
 
-       float __confidence;              /**< Confidence that object was tracked
+       float __confidence; /**< Confidence that object was tracked
                                           correctly at the last tracking iteration */
 
-       cv::Mat __image;                 /**< Last image for which tracking was
+       cv::Mat __image; /**< Last image for which tracking was
                                           performed */
 
-       std::vector<cv::Mat> __pyramid;  /**< The pyramid had been calculated for
+       std::vector<cv::Mat> __pyramid; /**< The pyramid had been calculated for
                                           the previous frame(or when
                                           initialize the model) */
 };
index 62fb5b7..d884805 100644 (file)
 
 #include <opencv2/core.hpp>
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 /**
  * @class    SurveillanceHelper
  * @brief    This class contains surveillance helper interface (common class for
@@ -36,9 +37,9 @@ namespace surveillance {
  *
  * @since_tizen 3.0
  */
-class SurveillanceHelper {
+class SurveillanceHelper
+{
 public:
-
 #ifdef ENABLE_NEON
        /**
         * @brief Converts mediavision source to cv::Mat in gray scale with NEON.
@@ -49,11 +50,10 @@ public:
         * @param [out] cvSource    The outut matrix with gray scaled image
         * @return @c 0 on success, otherwise a negative error value
         */
-       static int convertSourceMVRGB2GrayCVNeon(mv_source_h mvSource, cv::MatcvSource);
+       static int convertSourceMVRGB2GrayCVNeon(mv_source_h mvSource, cv::Mat &cvSource);
 #endif
 };
 
-
 } /* surveillance */
 } /* mediavision */
 
index 1ad0a8a..b6afffa 100644 (file)
@@ -40,17 +40,11 @@ extern "C" {
  * @retval #MEDIA_VISION_ERROR_NONE Successful
  * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
  */
-int mv_absdiff(
-               uint8_t *__restrict__ src1,
-               uint8_t *__restrict__ src2,
-               int width,
-               int height,
-               int stride,
-               uint8_t *__restrict__ dst);
+int mv_absdiff(uint8_t *__restrict__ src1, uint8_t *__restrict__ src2, int width, int height, int stride,
+                          uint8_t *__restrict__ dst);
 
 #ifdef __cplusplus
 }
 #endif /* __cplusplus */
 
 #endif /* __MEDIA_VISION_MV_ABSDIFF_H__ */
-
index a639c03..d45cc42 100644 (file)
@@ -40,17 +40,11 @@ extern "C" {
  * @retval #MEDIA_VISION_ERROR_NONE Successful
  * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
  */
-int mv_apply_mask(
-               uint8_t *src_buffer,
-               uint8_t *__restrict mask,
-               int width,
-               int height,
-               int stride,
-               uint8_t *dst_buffer);
+int mv_apply_mask(uint8_t *src_buffer, uint8_t *__restrict mask, int width, int height, int stride,
+                                 uint8_t *dst_buffer);
 
 #ifdef __cplusplus
 }
 #endif /* __cplusplus */
 
 #endif /* __MEDIA_VISION_MV_APPLY_MASK_H__ */
-
index abc690f..9207316 100644 (file)
@@ -40,16 +40,11 @@ extern "C" {
  *
  * @post Free memory for mask_buffer.
  */
-int mv_get_mask_buffer(
-               unsigned int buffer_width,
-               unsigned int buffer_height,
-               mv_point_s *polygon,
-               unsigned int points_number,
-               unsigned char **mask_buffer);
+int mv_get_mask_buffer(unsigned int buffer_width, unsigned int buffer_height, mv_point_s *polygon,
+                                          unsigned int points_number, unsigned char **mask_buffer);
 
 #ifdef __cplusplus
 }
 #endif /* __cplusplus */
 
 #endif /* __MEDIA_VISION_MV_MASK_BUFFER_H__ */
-
index 3f7cfd1..734237a 100644 (file)
@@ -55,12 +55,9 @@ extern "C" {
  * @see mv_surveillance_event_trigger_s
  * @see mv_surveillance_unsubscribe_event_trigger_open()
  */
-int mv_surveillance_subscribe_event_trigger_open(
-               mv_surveillance_event_trigger_h event_trigger,
-               int video_stream_id,
-               mv_engine_config_h engine_cfg,
-               mv_surveillance_event_occurred_cb callback,
-               void *user_data);
+int mv_surveillance_subscribe_event_trigger_open(mv_surveillance_event_trigger_h event_trigger, int video_stream_id,
+                                                                                                mv_engine_config_h engine_cfg,
+                                                                                                mv_surveillance_event_occurred_cb callback, void *user_data);
 
 /**
  * @brief Allows to unsubscribe from the event and stop calling @a callback.
@@ -82,9 +79,7 @@ int mv_surveillance_subscribe_event_trigger_open(
  * @see mv_surveillance_event_trigger_s
  * @see mv_surveillance_subscribe_event_trigger_open()
  */
-int mv_surveillance_unsubscribe_event_trigger_open(
-               mv_surveillance_event_trigger_h event_trigger,
-               int video_stream_id);
+int mv_surveillance_unsubscribe_event_trigger_open(mv_surveillance_event_trigger_h event_trigger, int video_stream_id);
 
 /**
  * @brief Allows to push source to the event trigger and start calling @a callback.
@@ -102,9 +97,7 @@ int mv_surveillance_unsubscribe_event_trigger_open(
  * @see mv_surveillance_subscribe_event_trigger_open()
  * @see mv_surveillance_unsubscribe_event_trigger_open()
  */
-int mv_surveillance_push_source_open(
-               mv_source_h source,
-               int video_stream_id);
+int mv_surveillance_push_source_open(mv_source_h source, int video_stream_id);
 
 /**
  * @brief Starts traversing through list of supported event types.
@@ -125,9 +118,7 @@ int mv_surveillance_push_source_open(
  * @see mv_surveillance_event_type_cb
  * @see mv_surveillance_foreach_event_result_value_name_open()
  */
-int mv_surveillance_foreach_event_type_open(
-               mv_surveillance_event_type_cb callback,
-               void *user_data);
+int mv_surveillance_foreach_event_type_open(mv_surveillance_event_type_cb callback, void *user_data);
 
 /**
  * @brief Starts traversing through list of supported event result value names.
@@ -154,10 +145,9 @@ int mv_surveillance_foreach_event_type_open(
  * @see mv_surveillance_foreach_event_type_open()
  * @see mv_surveillance_get_result_value_open()
  */
-int mv_surveillance_foreach_event_result_value_name_open(
-               const char *event_type,
-               mv_surveillance_event_result_name_cb callback,
-               void *user_data);
+int mv_surveillance_foreach_event_result_value_name_open(const char *event_type,
+                                                                                                                mv_surveillance_event_result_name_cb callback,
+                                                                                                                void *user_data);
 
 /**
  * @brief Gets result value.
@@ -182,10 +172,7 @@ int mv_surveillance_foreach_event_result_value_name_open(
  * @see mv_surveillance_unsubscribe_event_trigger_open()
  * @see mv_surveillance_query_events_open()
  */
-int mv_surveillance_get_result_value_open(
-               mv_surveillance_result_h result,
-               const char *value_name,
-               void *value);
+int mv_surveillance_get_result_value_open(mv_surveillance_result_h result, const char *value_name, void *value);
 
 #ifdef __cplusplus
 }
index d6879f4..4948cf3 100644 (file)
 #include "mv_private.h"
 #include <CommonUtils.h>
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 static const int MAX_VALUE_NAME_LENGTH = 255;
 
 EventManager *EventManager::__pInstance = 0;
@@ -39,14 +40,14 @@ EventManagerDestroyer::~EventManagerDestroyer()
        delete __pInstance;
 }
 
-void EventManagerDestroyer::initialize(EventManagerpointer)
+void EventManagerDestroyer::initialize(EventManager *pointer)
 {
        __pInstance = pointer;
 }
 
-EventManagerEventManager::getInstance()
+EventManager &EventManager::getInstance()
 {
-       if(!__pInstance) {
+       if (!__pInstance) {
                __pInstance = new EventManager();
                Destroyer.initialize(__pInstance);
                setSupportedEventTypes();
@@ -58,32 +59,31 @@ EventManager& EventManager::getInstance()
 void EventManager::setSupportedEventTypes()
 {
        /* Add supported event types here */
-       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED]
-               .push_back(MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS);
-       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED]
-               .push_back(MV_SURVEILLANCE_MOVEMENT_REGIONS);
-
-       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED]
-               .push_back(MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER);
-       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED]
-               .push_back(MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER);
-       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED]
-               .push_back(MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER);
-       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED]
-               .push_back(MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS);
-       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED]
-               .push_back(MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS);
-       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED]
-               .push_back(MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS);
-
-       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED]
-               .push_back(MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER);
-       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED]
-               .push_back(MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS);
-       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED]
-               .push_back(MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS);
-       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED]
-               .push_back(MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES);
+       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED].push_back(
+                       MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS);
+       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED].push_back(MV_SURVEILLANCE_MOVEMENT_REGIONS);
+
+       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED].push_back(
+                       MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER);
+       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED].push_back(
+                       MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER);
+       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED].push_back(
+                       MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER);
+       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED].push_back(
+                       MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS);
+       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED].push_back(
+                       MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS);
+       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED].push_back(
+                       MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS);
+
+       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED].push_back(
+                       MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER);
+       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED].push_back(
+                       MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS);
+       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED].push_back(
+                       MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS);
+       SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED].push_back(
+                       MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES);
 }
 
 EventManager::EventManager()
@@ -96,17 +96,10 @@ EventManager::~EventManager()
        ; /* NULL */
 }
 
-int EventManager::registerEvent(
-                                       mv_surveillance_event_trigger_h eventTrigger,
-                                       long int triggerId,
-                                       const char *eventType,
-                                       int videoStreamId,
-                                       mv_engine_config_h engineCfg,
-                                       mv_surveillance_event_occurred_cb callback,
-                                       void *user_data,
-                                       int numberOfPoints,
-                                       mv_point_s *roi,
-                                       bool isInternal)
+int EventManager::registerEvent(mv_surveillance_event_trigger_h eventTrigger, long int triggerId, const char *eventType,
+                                                               int videoStreamId, mv_engine_config_h engineCfg,
+                                                               mv_surveillance_event_occurred_cb callback, void *user_data, int numberOfPoints,
+                                                               mv_point_s *roi, bool isInternal)
 {
        if (NULL == callback || NULL == eventType) {
                LOGE("Input event trigger or callback is NULL. Event registering failed.");
@@ -118,24 +111,16 @@ int EventManager::registerEvent(
        for (; iter != __eventTriggers[videoStreamId].end(); ++iter) {
                if ((*iter)->isCallbackSubscribed(triggerId)) {
                        LOGE("Callback with id %ld is already subscribed. "
-                                       "Event registering failed.", triggerId);
+                                "Event registering failed.",
+                                triggerId);
                        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
                }
        }
 
        /* Add appropriate event trigger here */
-       if (strncmp(eventType, MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED,
-                               MAX_VALUE_NAME_LENGTH) == 0) {
-               EventTriggerPersonAppearance* trigger =
-                       new EventTriggerPersonAppearance(
-                                       eventTrigger,
-                                       triggerId,
-                                       videoStreamId,
-                                       callback,
-                                       user_data,
-                                       numberOfPoints,
-                                       roi,
-                                       isInternal);
+       if (strncmp(eventType, MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED, MAX_VALUE_NAME_LENGTH) == 0) {
+               EventTriggerPersonAppearance *trigger = new EventTriggerPersonAppearance(
+                               eventTrigger, triggerId, videoStreamId, callback, user_data, numberOfPoints, roi, isInternal);
                const int error = trigger->parseEngineConfig(engineCfg);
 
                if (error != MEDIA_VISION_ERROR_NONE) {
@@ -148,31 +133,15 @@ int EventManager::registerEvent(
                EventTriggersIter iter2 = isTriggerExists(trigger, videoStreamId);
 
                if (iter2 != __eventTriggers[videoStreamId].end()) {
-                       (*iter2)->subscribeCallback(
-                                               eventTrigger,
-                                               triggerId,
-                                               callback,
-                                               user_data,
-                                               numberOfPoints,
-                                               roi,
-                                               isInternal);
-
-                                               delete trigger;
+                       (*iter2)->subscribeCallback(eventTrigger, triggerId, callback, user_data, numberOfPoints, roi, isInternal);
+
+                       delete trigger;
                } else {
                        __eventTriggers[videoStreamId].push_back(trigger);
                }
-       } else if (strncmp(eventType, MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED,
-                                               MAX_VALUE_NAME_LENGTH) == 0) {
-               EventTriggerPersonRecognition* trigger =
-                                               new EventTriggerPersonRecognition(
-                                                                               eventTrigger,
-                                                                               triggerId,
-                                                                               videoStreamId,
-                                                                               callback,
-                                                                               user_data,
-                                                                               numberOfPoints,
-                                                                               roi,
-                                                                               isInternal);
+       } else if (strncmp(eventType, MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED, MAX_VALUE_NAME_LENGTH) == 0) {
+               EventTriggerPersonRecognition *trigger = new EventTriggerPersonRecognition(
+                               eventTrigger, triggerId, videoStreamId, callback, user_data, numberOfPoints, roi, isInternal);
 
                const int error = trigger->parseEngineConfig(engineCfg);
                if (error != MEDIA_VISION_ERROR_NONE) {
@@ -185,31 +154,15 @@ int EventManager::registerEvent(
                EventTriggersIter iter2 = isTriggerExists(trigger, videoStreamId);
 
                if (iter2 != __eventTriggers[videoStreamId].end()) {
-                       (*iter2)->subscribeCallback(
-                                               eventTrigger,
-                                               triggerId,
-                                               callback,
-                                               user_data,
-                                               numberOfPoints,
-                                               roi,
-                                               isInternal);
+                       (*iter2)->subscribeCallback(eventTrigger, triggerId, callback, user_data, numberOfPoints, roi, isInternal);
 
                        delete trigger;
                } else {
                        __eventTriggers[videoStreamId].push_back(trigger);
                }
-       } else if (strncmp(eventType, MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED,
-                               MAX_VALUE_NAME_LENGTH) == 0) {
-                       EventTriggerMovementDetection* trigger =
-                                               new EventTriggerMovementDetection(
-                                                                       eventTrigger,
-                                                                       triggerId,
-                                                                       videoStreamId,
-                                                                       callback,
-                                                                       user_data,
-                                                                       numberOfPoints,
-                                                                       roi,
-                                                                       isInternal);
+       } else if (strncmp(eventType, MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, MAX_VALUE_NAME_LENGTH) == 0) {
+               EventTriggerMovementDetection *trigger = new EventTriggerMovementDetection(
+                               eventTrigger, triggerId, videoStreamId, callback, user_data, numberOfPoints, roi, isInternal);
 
                const int error = trigger->parseEngineConfig(engineCfg);
 
@@ -223,14 +176,7 @@ int EventManager::registerEvent(
                EventTriggersIter iter2 = isTriggerExists(trigger, videoStreamId);
 
                if (iter2 != __eventTriggers[videoStreamId].end()) {
-                       (*iter2)->subscribeCallback(
-                                               eventTrigger,
-                                               triggerId,
-                                               callback,
-                                               user_data,
-                                               numberOfPoints,
-                                               roi,
-                                               isInternal);
+                       (*iter2)->subscribeCallback(eventTrigger, triggerId, callback, user_data, numberOfPoints, roi, isInternal);
 
                        delete trigger;
                } else {
@@ -285,12 +231,9 @@ int EventManager::pushSource(mv_source_h source, int videoStreamId)
        unsigned int height = 0;
        mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
 
-       MEDIA_VISION_ASSERT(mv_source_get_width(source, &width),
-                                               "Failed to get the width.");
-       MEDIA_VISION_ASSERT(mv_source_get_height(source, &height),
-                                               "Failed to get the height.");
-       MEDIA_VISION_ASSERT(mv_source_get_colorspace(source, &colorspace),
-                                               "Failed to get the colorspace.");
+       MEDIA_VISION_ASSERT(mv_source_get_width(source, &width), "Failed to get the width.");
+       MEDIA_VISION_ASSERT(mv_source_get_height(source, &height), "Failed to get the height.");
+       MEDIA_VISION_ASSERT(mv_source_get_colorspace(source, &colorspace), "Failed to get the colorspace.");
 
        cv::Mat grayImage;
 
@@ -315,13 +258,8 @@ int EventManager::pushSource(mv_source_h source, int videoStreamId)
                return error;
        }
 
-       error = mv_source_fill_by_buffer(
-                               graySource,
-                               grayImage.data,
-                               grayImage.cols * grayImage.rows,
-                               grayImage.cols,
-                               grayImage.rows,
-                               MEDIA_VISION_COLORSPACE_Y800);
+       error = mv_source_fill_by_buffer(graySource, grayImage.data, grayImage.cols * grayImage.rows, grayImage.cols,
+                                                                        grayImage.rows, MEDIA_VISION_COLORSPACE_Y800);
 
        if (MEDIA_VISION_ERROR_NONE != error) {
                mv_destroy_source(graySource);
@@ -346,7 +284,7 @@ int EventManager::pushSource(mv_source_h source, int videoStreamId)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int EventManager::getSupportedEventTypes(StringVectoreventTypes)
+int EventManager::getSupportedEventTypes(StringVector &eventTypes)
 {
        eventTypes.clear();
 
@@ -363,8 +301,7 @@ int EventManager::getSupportedEventTypes(StringVector& eventTypes)
 }
 
 // LCOV_EXCL_START
-int EventManager::getSupportedEventResultValueNames(
-       StringVector& eventResValNames)
+int EventManager::getSupportedEventResultValueNames(StringVector &eventResValNames)
 {
        eventResValNames.clear();
 
@@ -373,10 +310,7 @@ int EventManager::getSupportedEventResultValueNames(
 
        EventTypesMapConstIter etIter = SupportedEventTypes.begin();
        while (etIter != SupportedEventTypes.end()) {
-               eventResValNames.insert(
-                                       eventResValNames.end(),
-                                       etIter->second.begin(),
-                                       etIter->second.end());
+               eventResValNames.insert(eventResValNames.end(), etIter->second.begin(), etIter->second.end());
                ++etIter;
        }
 
@@ -384,9 +318,7 @@ int EventManager::getSupportedEventResultValueNames(
 }
 // LCOV_EXCL_STOP
 
-int EventManager::getSupportedEventResultValueNames(
-       const std::string& eventTypeName,
-       StringVector& eventResValNames)
+int EventManager::getSupportedEventResultValueNames(const std::string &eventTypeName, StringVector &eventResValNames)
 {
        eventResValNames.clear();
 
@@ -402,9 +334,7 @@ int EventManager::getSupportedEventResultValueNames(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-EventTriggersIter EventManager::isTriggerExists(
-               EventTrigger* trigger,
-               int videoStreamId)
+EventTriggersIter EventManager::isTriggerExists(EventTrigger *trigger, int videoStreamId)
 {
        EventTriggersIter iter = __eventTriggers[videoStreamId].begin();
 
@@ -417,4 +347,3 @@ EventTriggersIter EventManager::isTriggerExists(
 
 } /* surveillance */
 } /* mediavision */
-
index e9968fc..94642de 100644 (file)
 #include <mv_mask_buffer.h>
 #include <mv_apply_mask.h>
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 long int EventTrigger::InternalTriggersCounter = -1l;
 
-EventTrigger::EventTrigger(
-               mv_surveillance_event_trigger_h eventTrigger,
-               long int triggerId,
-               int videoStreamId,
-               mv_surveillance_event_occurred_cb callback,
-               void *userData,
-               int numberOfPoints,
-               mv_point_s *roi,
-               bool isInternal):
-       __videoStreamId(videoStreamId),
-       __roi(numberOfPoints)
+EventTrigger::EventTrigger(mv_surveillance_event_trigger_h eventTrigger, long int triggerId, int videoStreamId,
+                                                  mv_surveillance_event_occurred_cb callback, void *userData, int numberOfPoints,
+                                                  mv_point_s *roi, bool isInternal)
+               : __videoStreamId(videoStreamId), __roi(numberOfPoints)
 {
        CallbackData callbackData;
        callbackData.eventTrigger = eventTrigger;
@@ -64,18 +58,14 @@ bool EventTrigger::isCallbackSubscribed(long int triggerId) const
        return __callbackDataMap.find(triggerId) != __callbackDataMap.end();
 }
 
-bool EventTrigger::subscribeCallback(
-               mv_surveillance_event_trigger_h eventTrigger,
-               long int triggerId,
-               mv_surveillance_event_occurred_cb callback,
-               void *userData,
-               int numberOfPoints,
-               mv_point_s *roi,
-               bool isInternal)
+bool EventTrigger::subscribeCallback(mv_surveillance_event_trigger_h eventTrigger, long int triggerId,
+                                                                        mv_surveillance_event_occurred_cb callback, void *userData, int numberOfPoints,
+                                                                        mv_point_s *roi, bool isInternal)
 {
        if (isCallbackSubscribed(triggerId)) {
                LOGE("Callback with id %ld is already subscribed. "
-                       "Callback subscribing failed.", triggerId);
+                        "Callback subscribing failed.",
+                        triggerId);
                return false;
        }
 
@@ -103,7 +93,8 @@ bool EventTrigger::unsubscribeCallback(long int triggerId)
 
        if (iter == __callbackDataMap.end()) {
                LOGE("Callback with id %ld was not subscribed. "
-                       "Callback unsubscribing failed.", triggerId);
+                        "Callback unsubscribing failed.",
+                        triggerId);
                return false;
        }
 
@@ -120,13 +111,8 @@ bool EventTrigger::isCallbacksEmpty() const
 }
 
 // LCOV_EXCL_START
-int EventTrigger::applyROIToImage(
-                                       unsigned char *image,
-                                       int imageWidth,
-                                       int imageHeight,
-                                       bool scalePoints,
-                                       int scaleX,
-                                       int scaleY)
+int EventTrigger::applyROIToImage(unsigned char *image, int imageWidth, int imageHeight, bool scalePoints, int scaleX,
+                                                                 int scaleY)
 {
        const size_t roiSize = __roi.size();
 
@@ -141,12 +127,7 @@ int EventTrigger::applyROIToImage(
 
                unsigned char *maskBuffer = NULL;
 
-               int error = mv_get_mask_buffer(
-                                               imageWidth,
-                                               imageHeight,
-                                               scaledPoints.data(),
-                                               (int) roiSize,
-                                               &maskBuffer);
+               int error = mv_get_mask_buffer(imageWidth, imageHeight, scaledPoints.data(), (int) roiSize, &maskBuffer);
 
                if (error != MEDIA_VISION_ERROR_NONE || maskBuffer == NULL) {
                        if (maskBuffer != NULL) {
@@ -158,13 +139,7 @@ int EventTrigger::applyROIToImage(
                        return error;
                }
 
-               error = mv_apply_mask(
-                                       image,
-                                       maskBuffer,
-                                       imageWidth / 16 * 16,
-                                       imageHeight,
-                                       imageWidth,
-                                       image);
+               error = mv_apply_mask(image, maskBuffer, imageWidth / 16 * 16, imageHeight, imageWidth, image);
 
                free(maskBuffer);
                maskBuffer = NULL;
@@ -179,14 +154,13 @@ int EventTrigger::applyROIToImage(
 }
 // LCOV_EXCL_STOP
 
-bool EventTrigger::operator==(const EventTriggerother) const
+bool EventTrigger::operator==(const EventTrigger &other) const
 {
        const std::string currentEventType = this->getEventType();
        const std::string otherEventType = other.getEventType();
 
-       if (__videoStreamId != other.__videoStreamId ||
-                       currentEventType.compare(otherEventType) != 0 ||
-                       __roi.size() != other.__roi.size())
+       if (__videoStreamId != other.__videoStreamId || currentEventType.compare(otherEventType) != 0 ||
+               __roi.size() != other.__roi.size())
                return false;
 
        size_t size = __roi.size();
@@ -197,7 +171,7 @@ bool EventTrigger::operator==(const EventTrigger& other) const
        return true;
 }
 
-bool EventTrigger::operator!=(const EventTriggerother) const
+bool EventTrigger::operator!=(const EventTrigger &other) const
 {
        return !(*this == other);
 }
index 72c387b..d880e8c 100644 (file)
 #include "opencv2/highgui.hpp"
 #include "opencv2/imgproc/imgproc_c.h"
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 static const int DEFAULT_DIFF_THRESHOLD = 10;
 
 static const int MAX_VALUE_NAME_LENGTH = 255;
 
-const cv::Mat EventTriggerMovementDetection::__ERODE_KERNEL =
-       cv::getStructuringElement(cv::MORPH_RECT, cv::Size(4, 4));
+const cv::Mat EventTriggerMovementDetection::__ERODE_KERNEL = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(4, 4));
 
 const cv::Mat EventTriggerMovementDetection::__DILATE_KERNEL =
-       cv::getStructuringElement(cv::MORPH_RECT, cv::Size(24, 24));
+               cv::getStructuringElement(cv::MORPH_RECT, cv::Size(24, 24));
 
 static const cv::Rect DEFAULT_RECT = cv::Rect(0, 0, 0, 0);
 
-namespace {
-
-inline void convertRectCV2MV(const cv::Rect& src, mv_rectangle_s& dst)
+namespace
+{
+inline void convertRectCV2MV(const cv::Rect &src, mv_rectangle_s &dst)
 {
        dst.point.x = src.x;
        dst.point.y = src.y;
@@ -50,7 +50,7 @@ inline void convertRectCV2MV(const cv::Rect& src, mv_rectangle_s& dst)
        dst.height = src.height;
 }
 
-void mergeOverlappedRects(CVRectanglesrects)
+void mergeOverlappedRects(CVRectangles &rects)
 {
        const size_t rectsSize = rects.size();
 
@@ -61,8 +61,7 @@ void mergeOverlappedRects(CVRectangles& rects)
                        const int area2 = rects[j].area();
                        const int intersectionArea = (rects[i] & rects[j]).area();
 
-                       if (intersectionArea != 0 &&
-                               intersectionArea > std::min(area1, area2) / 2) {
+                       if (intersectionArea != 0 && intersectionArea > std::min(area1, area2) / 2) {
                                rects[j] |= rects[i];
                                rects[i] = DEFAULT_RECT;
                                break;
@@ -73,9 +72,7 @@ void mergeOverlappedRects(CVRectangles& rects)
 
 } /* anonymous namespace */
 
-int EventResultMovementDetection::getResultValue(
-       const char *valueName,
-       void *value) const
+int EventResultMovementDetection::getResultValue(const char *valueName, void *value) const
 {
        if (valueName == NULL) {
                LOGE("Invalid pointer for value name. Getting result value failed.");
@@ -87,13 +84,11 @@ int EventResultMovementDetection::getResultValue(
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       if (strncmp(valueName, MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS,
-                               MAX_VALUE_NAME_LENGTH) == 0) {
-               size_t *const numberOfDetectedMovements = (size_t*) value;
+       if (strncmp(valueName, MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS, MAX_VALUE_NAME_LENGTH) == 0) {
+               size_t *const numberOfDetectedMovements = (size_t *) value;
                *numberOfDetectedMovements = __movementRegions.size();
-       } else if (strncmp(valueName, MV_SURVEILLANCE_MOVEMENT_REGIONS,
-                               MAX_VALUE_NAME_LENGTH) == 0) {
-               mv_rectangle_s *const movementsRegions = (mv_rectangle_s*) value;
+       } else if (strncmp(valueName, MV_SURVEILLANCE_MOVEMENT_REGIONS, MAX_VALUE_NAME_LENGTH) == 0) {
+               mv_rectangle_s *const movementsRegions = (mv_rectangle_s *) value;
 
                const size_t numberOfDetectedMovements = __movementRegions.size();
 
@@ -108,26 +103,14 @@ int EventResultMovementDetection::getResultValue(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-EventTriggerMovementDetection::EventTriggerMovementDetection(
-               mv_surveillance_event_trigger_h eventTrigger,
-               long int triggerId,
-               int videoStreamId,
-               mv_surveillance_event_occurred_cb callback,
-               void *userData,
-               int numberOfPoints,
-               mv_point_s *roi,
-               bool isInternal) : EventTrigger(
-                                                               eventTrigger,
-                                                               triggerId,
-                                                               videoStreamId,
-                                                               callback,
-                                                               userData,
-                                                               numberOfPoints,
-                                                               roi,
-                                                               isInternal),
-               __previousImage(),
-               __eventResult(new EventResultMovementDetection()),
-               __diffThreshold(DEFAULT_DIFF_THRESHOLD)
+EventTriggerMovementDetection::EventTriggerMovementDetection(mv_surveillance_event_trigger_h eventTrigger,
+                                                                                                                        long int triggerId, int videoStreamId,
+                                                                                                                        mv_surveillance_event_occurred_cb callback, void *userData,
+                                                                                                                        int numberOfPoints, mv_point_s *roi, bool isInternal)
+               : EventTrigger(eventTrigger, triggerId, videoStreamId, callback, userData, numberOfPoints, roi, isInternal)
+               , __previousImage()
+               , __eventResult(new EventResultMovementDetection())
+               , __diffThreshold(DEFAULT_DIFF_THRESHOLD)
 {
        ; /* NULL */
 }
@@ -144,10 +127,8 @@ int EventTriggerMovementDetection::parseEngineConfig(mv_engine_config_h engineCo
                return MEDIA_VISION_ERROR_NONE;
        }
 
-       const int error = mv_engine_config_get_int_attribute(
-                       engineConfig,
-                       MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD,
-                       &__diffThreshold);
+       const int error = mv_engine_config_get_int_attribute(engineConfig, MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD,
+                                                                                                                &__diffThreshold);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                LOGE("Getting movement detection threshold from engine configuration failed.");
@@ -157,10 +138,7 @@ int EventTriggerMovementDetection::parseEngineConfig(mv_engine_config_h engineCo
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int EventTriggerMovementDetection::pushSource(
-               mv_source_h source,
-               mv_source_h graySource,
-               const cv::Mat& grayImage)
+int EventTriggerMovementDetection::pushSource(mv_source_h source, mv_source_h graySource, const cv::Mat &grayImage)
 {
        if (source == NULL || graySource == NULL || grayImage.empty()) {
                LOGE("Media source is NULL. Pushing source failed.");
@@ -181,18 +159,12 @@ int EventTriggerMovementDetection::pushSource(
        cv::Mat image = grayImage.clone();
 
        const int bufSize = image.cols * image.rows * sizeof(uint8_t);
-       uint8_t *diffBuffer = (uint8_t*) malloc(bufSize * sizeof(uint8_t));
+       uint8_t *diffBuffer = (uint8_t *) malloc(bufSize * sizeof(uint8_t));
        if (diffBuffer != NULL) {
                memset(diffBuffer, 0, bufSize);
        }
 
-       error = mv_absdiff(
-                       image.data,
-                       __previousImage.data,
-                       image.cols,
-                       image.rows,
-                       image.cols,
-                       diffBuffer);
+       error = mv_absdiff(image.data, __previousImage.data, image.cols, image.rows, image.cols, diffBuffer);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                free(diffBuffer);
@@ -208,8 +180,7 @@ int EventTriggerMovementDetection::pushSource(
                return error;
        }
 
-       cv::Mat imgDiff = cv::Mat(cv::Size(image.cols, image.rows),
-                       CV_8UC1, diffBuffer);
+       cv::Mat imgDiff = cv::Mat(cv::Size(image.cols, image.rows), CV_8UC1, diffBuffer);
 
        cv::erode(imgDiff, imgDiff, __ERODE_KERNEL);
        cv::dilate(imgDiff, imgDiff, __DILATE_KERNEL);
@@ -250,12 +221,7 @@ int EventTriggerMovementDetection::pushSource(
                mv_surveillance_event_occurred_cb callback = iter->second.callback;
 
                if (__eventResult->__movementRegions.size() > 0 || iter->second.isInternal)
-                       callback(
-                               iter->second.eventTrigger,
-                               source,
-                               __videoStreamId,
-                               __eventResult,
-                               iter->second.userData);
+                       callback(iter->second.eventTrigger, source, __videoStreamId, __eventResult, iter->second.userData);
        }
 
        return MEDIA_VISION_ERROR_NONE;
index bcd9f31..e8781e9 100644 (file)
 #include <sys/time.h>
 #include <unistd.h>
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 // LCOV_EXCL_START
 using namespace cv;
 
@@ -55,26 +56,25 @@ static const float MINIMAL_INTERSECTION = 0.1f;
 
 static const float TRACKING_MARGIN = 0.2f;
 
-static const std::vector<float> DEFAULT_SVM_PEOPLE_DETECTOR =
-               cv::HOGDescriptor::getDefaultPeopleDetector();
-
-namespace {
+static const std::vector<float> DEFAULT_SVM_PEOPLE_DETECTOR = cv::HOGDescriptor::getDefaultPeopleDetector();
 
+namespace
+{
 cv::Rect operator&(cv::Rect r1, cv::Rect_<float> r2)
 {
-       cv::Rect r2int((int)r2.x, (int)r2.y, (int)r2.width, (int)r2.height);
+       cv::Rect r2int((int) r2.x, (int) r2.y, (int) r2.width, (int) r2.height);
 
        return r1 & r2int;
 }
 
 cv::Rect operator|=(cv::Rect r1, cv::Rect_<float> r2)
 {
-       cv::Rect r2int((int)r2.x, (int)r2.y, (int)r2.width, (int)r2.height);
+       cv::Rect r2int((int) r2.x, (int) r2.y, (int) r2.width, (int) r2.height);
 
        return r1 |= r2int;
 }
 
-cv::Rect_<float> cutRectForTracking(const cv::Rect_<float>rect)
+cv::Rect_<float> cutRectForTracking(const cv::Rect_<float> &rect)
 {
        cv::Rect_<float> res;
 
@@ -89,7 +89,7 @@ cv::Rect_<float> cutRectForTracking(const cv::Rect_<float>& rect)
        return res;
 }
 
-cv::Rect_<float> supplementRectAfterTracking(const cv::Rect_<float>rect)
+cv::Rect_<float> supplementRectAfterTracking(const cv::Rect_<float> &rect)
 {
        cv::Rect_<float> res;
 
@@ -101,28 +101,26 @@ cv::Rect_<float> supplementRectAfterTracking(const cv::Rect_<float>& rect)
        return res;
 }
 
-float sizeDifferenceFactor(const cv::Rect& r1, const cv::Rect& r2)
+float sizeDifferenceFactor(const cv::Rect &r1, const cv::Rect &r2)
 {
-       float widthDiffFactor = r1.width / (float)r2.width;
-       float heightDiffFactor = r1.height / (float)r2.height;
+       float widthDiffFactor = r1.width / (float) r2.width;
+       float heightDiffFactor = r1.height / (float) r2.height;
 
        if (widthDiffFactor > 1.f)
                widthDiffFactor = 1.f / widthDiffFactor;
        if (heightDiffFactor > 1.f)
                heightDiffFactor = 1.f / heightDiffFactor;
 
-       return widthDiffFactor * heightDiffFactor *
-                       (1.f - fabs(widthDiffFactor - heightDiffFactor));
+       return widthDiffFactor * heightDiffFactor * (1.f - fabs(widthDiffFactor - heightDiffFactor));
 }
 
-bool isPossibleToMerge(const cv::Rect& r1, const cv::Rect& r2)
+bool isPossibleToMerge(const cv::Rect &r1, const cv::Rect &r2)
 {
        return sizeDifferenceFactor(r1, r2) > POSSIBLE_SIZE_DIFFERENCE &&
-                       ((r1.area() + r2.area()) * MINIMAL_INTERSECTION / 2.f) <
-                       (r1 & r2).area();
+                  ((r1.area() + r2.area()) * MINIMAL_INTERSECTION / 2.f) < (r1 & r2).area();
 }
 
-inline void convertRectMV2CV(const mv_rectangle_s& src, cv::Rect& dst)
+inline void convertRectMV2CV(const mv_rectangle_s &src, cv::Rect &dst)
 {
        dst.x = src.point.x;
        dst.y = src.point.y;
@@ -130,7 +128,7 @@ inline void convertRectMV2CV(const mv_rectangle_s& src, cv::Rect& dst)
        dst.height = src.height;
 }
 
-inline void convertRectCV2MV(const cv::Rect& src, mv_rectangle_s& dst)
+inline void convertRectCV2MV(const cv::Rect &src, mv_rectangle_s &dst)
 {
        dst.point.x = src.x;
        dst.point.y = src.y;
@@ -140,16 +138,14 @@ inline void convertRectCV2MV(const cv::Rect& src, mv_rectangle_s& dst)
 
 } /* Anonymous namespace*/
 
-EventTriggerPersonAppearance::TrackedRectangle::TrackedRectangle(cv::Rect _rect,
-               struct timeval _appearanceTime)
+EventTriggerPersonAppearance::TrackedRectangle::TrackedRectangle(cv::Rect _rect, struct timeval _appearanceTime)
 {
        rect = _rect;
        appearanceTime = _appearanceTime;
        tracker.reinforcement(cutRectForTracking(rect));
 }
 
-int EventResultPersonAppearance::getResultValue(const char *valueName,
-               void *value) const
+int EventResultPersonAppearance::getResultValue(const char *valueName, void *value) const
 {
        if (valueName == NULL) {
                LOGE("Invalid pointer for value name. Getting result value failed.");
@@ -161,37 +157,31 @@ int EventResultPersonAppearance::getResultValue(const char *valueName,
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER,
-                       MAX_VALUE_NAME_LENGHT) == 0) {
-               size_t * const numberOfAppearedPersons = (size_t*) value;
+       if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER, MAX_VALUE_NAME_LENGHT) == 0) {
+               size_t *const numberOfAppearedPersons = (size_t *) value;
                *numberOfAppearedPersons = __appearedLocations.size();
-       } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS,
-                       MAX_VALUE_NAME_LENGHT) == 0) {
-               mv_rectangle_s * const appearedLocations = (mv_rectangle_s*) value;
+       } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS, MAX_VALUE_NAME_LENGHT) == 0) {
+               mv_rectangle_s *const appearedLocations = (mv_rectangle_s *) value;
 
                const size_t numberOfAppearedPersons = __appearedLocations.size();
 
                for (size_t i = 0u; i < numberOfAppearedPersons; ++i)
                        appearedLocations[i] = __appearedLocations[i];
-       } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER,
-                       MAX_VALUE_NAME_LENGHT) == 0) {
-               size_t * const numberOfTrackedPersons = (size_t*) value;
+       } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER, MAX_VALUE_NAME_LENGHT) == 0) {
+               size_t *const numberOfTrackedPersons = (size_t *) value;
                *numberOfTrackedPersons = __trackedLocations.size();
-       } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS,
-                       MAX_VALUE_NAME_LENGHT) == 0) {
-               mv_rectangle_s * const trackedLocations = (mv_rectangle_s*) value;
+       } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS, MAX_VALUE_NAME_LENGHT) == 0) {
+               mv_rectangle_s *const trackedLocations = (mv_rectangle_s *) value;
 
                const size_t numberOfTrackedPersons = __trackedLocations.size();
 
                for (size_t i = 0u; i < numberOfTrackedPersons; ++i)
                        trackedLocations[i] = __trackedLocations[i];
-       } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER,
-                       MAX_VALUE_NAME_LENGHT) == 0) {
-               size_t * const numberOfDisappearedPersons = (size_t*) value;
+       } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER, MAX_VALUE_NAME_LENGHT) == 0) {
+               size_t *const numberOfDisappearedPersons = (size_t *) value;
                *numberOfDisappearedPersons = __disappearedLocations.size();
-       } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS,
-                       MAX_VALUE_NAME_LENGHT) == 0) {
-               mv_rectangle_s * const disappearedLocations = (mv_rectangle_s*) value;
+       } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS, MAX_VALUE_NAME_LENGHT) == 0) {
+               mv_rectangle_s *const disappearedLocations = (mv_rectangle_s *) value;
 
                const size_t numberOfDisappearedPersons = __disappearedLocations.size();
 
@@ -205,44 +195,47 @@ int EventResultPersonAppearance::getResultValue(const char *valueName,
        return MEDIA_VISION_ERROR_NONE;
 }
 
-EventTriggerPersonAppearance::EventTriggerPersonAppearance(
-               mv_surveillance_event_trigger_h eventTrigger, long int triggerId,
-               int videoStreamId, mv_surveillance_event_occurred_cb callback,
-               void *userData, int numberOfPoints, mv_point_s *roi, bool isInternal) :
-                               EventTrigger(eventTrigger, triggerId, videoStreamId, callback, userData,
-                               numberOfPoints, roi, isInternal),
-                               __skipFramesCount(DEFAULT_SKIP_FRAMES_COUNT),
-                               __frameCounter(0), __movementDetectedEventId(InternalTriggersCounter--),
-                               __factorX(1.f), __factorY(1.f), __rectToDetect(ALL_IMAGE_RECT),
-                               __rectToDetectPrevious(ALL_IMAGE_RECT), __trackedRects(),
-                               __appearedRects(), __disappearedRects(), __hogClassifier(),
-                               __eventResult(new EventResultPersonAppearance())
+EventTriggerPersonAppearance::EventTriggerPersonAppearance(mv_surveillance_event_trigger_h eventTrigger,
+                                                                                                                  long int triggerId, int videoStreamId,
+                                                                                                                  mv_surveillance_event_occurred_cb callback, void *userData,
+                                                                                                                  int numberOfPoints, mv_point_s *roi, bool isInternal)
+               : EventTrigger(eventTrigger, triggerId, videoStreamId, callback, userData, numberOfPoints, roi, isInternal)
+               , __skipFramesCount(DEFAULT_SKIP_FRAMES_COUNT)
+               , __frameCounter(0)
+               , __movementDetectedEventId(InternalTriggersCounter--)
+               , __factorX(1.f)
+               , __factorY(1.f)
+               , __rectToDetect(ALL_IMAGE_RECT)
+               , __rectToDetectPrevious(ALL_IMAGE_RECT)
+               , __trackedRects()
+               , __appearedRects()
+               , __disappearedRects()
+               , __hogClassifier()
+               , __eventResult(new EventResultPersonAppearance())
 {
        __hogClassifier.setSVMDetector(DEFAULT_SVM_PEOPLE_DETECTOR);
 
        EventManager::getInstance().registerEvent(NULL, __movementDetectedEventId,
-               MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, videoStreamId, NULL,
-               movementDetectedCB, this, numberOfPoints, roi, true);
+                                                                                         MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, videoStreamId, NULL,
+                                                                                         movementDetectedCB, this, numberOfPoints, roi, true);
 }
 
 EventTriggerPersonAppearance::~EventTriggerPersonAppearance()
 {
-       EventManager::getInstance().unregisterEvent(__movementDetectedEventId,
-                       __videoStreamId);
+       EventManager::getInstance().unregisterEvent(__movementDetectedEventId, __videoStreamId);
 
        delete __eventResult;
 }
 
-int EventTriggerPersonAppearance::parseEngineConfig(
-               mv_engine_config_h engineConfig)
+int EventTriggerPersonAppearance::parseEngineConfig(mv_engine_config_h engineConfig)
 {
        if (NULL == engineConfig) {
                LOGI("Default value for frame skip count was set.");
                return MEDIA_VISION_ERROR_NONE;
        }
 
-       const int error = mv_engine_config_get_int_attribute(engineConfig,
-       MV_SURVEILLANCE_SKIP_FRAMES_COUNT, &__skipFramesCount);
+       const int error =
+                       mv_engine_config_get_int_attribute(engineConfig, MV_SURVEILLANCE_SKIP_FRAMES_COUNT, &__skipFramesCount);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                LOGE("Getting frame skip count from engine configuration failed.");
@@ -252,8 +245,7 @@ int EventTriggerPersonAppearance::parseEngineConfig(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int EventTriggerPersonAppearance::pushSource(mv_source_h source,
-               mv_source_h graySource, const cv::Mat& grayImage)
+int EventTriggerPersonAppearance::pushSource(mv_source_h source, mv_source_h graySource, const cv::Mat &grayImage)
 {
        if (source == NULL || graySource == NULL || grayImage.empty()) {
                LOGE("Media source is NULL. Pushing source failed.");
@@ -268,23 +260,20 @@ std::string EventTriggerPersonAppearance::getEventType() const
        return MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED;
 }
 
-void EventTriggerPersonAppearance::movementDetectedCB(
-               mv_surveillance_event_trigger_h /*event_trigger*/, mv_source_h source,
-               int /*video_stream_id*/, mv_surveillance_result_h event_result,
-               void *user_data)
+void EventTriggerPersonAppearance::movementDetectedCB(mv_surveillance_event_trigger_h /*event_trigger*/,
+                                                                                                         mv_source_h source, int /*video_stream_id*/,
+                                                                                                         mv_surveillance_result_h event_result, void *user_data)
 {
-       EventTriggerPersonAppearance *trigger =
-                       (EventTriggerPersonAppearance*) user_data;
+       EventTriggerPersonAppearance *trigger = (EventTriggerPersonAppearance *) user_data;
 
        /* 1. Get input image in grayscale and resize it */
-       EventResultMovementDetection *result =
-                       static_cast<EventResultMovementDetection*>(event_result);
+       EventResultMovementDetection *result = static_cast<EventResultMovementDetection *>(event_result);
 
        cv::Mat resizedImage;
        cv::resize(result->__grayImage, resizedImage, DEFAULT_FRAME_SIZE);
 
-       int error = trigger->applyROIToImage(resizedImage.data, resizedImage.cols,
-                       resizedImage.rows, true, DEFAULT_FRAME_WIDTH, DEFAULT_FRAME_HEIGHT);
+       int error = trigger->applyROIToImage(resizedImage.data, resizedImage.cols, resizedImage.rows, true,
+                                                                                DEFAULT_FRAME_WIDTH, DEFAULT_FRAME_HEIGHT);
 
        if (error != MEDIA_VISION_ERROR_NONE || resizedImage.empty()) {
                trigger->runCallbacks(source);
@@ -317,7 +306,7 @@ void EventTriggerPersonAppearance::movementDetectedCB(
                        trigger->__rectToDetect |= movementRegions[j];
 
                if (trigger->__rectToDetect.width < trigger->__hogClassifier.winSize.width ||
-                               trigger->__rectToDetect.height < trigger->__hogClassifier.winSize.height)
+                       trigger->__rectToDetect.height < trigger->__hogClassifier.winSize.height)
                        trigger->__rectToDetect |= trigger->__rectToDetectPrevious;
        }
 
@@ -349,9 +338,8 @@ void EventTriggerPersonAppearance::movementDetectedCB(
 
                CVRectangles hogRects;
 
-               trigger->__hogClassifier.detectMultiScale(
-                               resizedImage(trigger->__rectToDetect), hogRects, 0,
-                               DEFAULT_DETECTION_STEPS, cv::Size(32, 32), 1.059, 5);
+               trigger->__hogClassifier.detectMultiScale(resizedImage(trigger->__rectToDetect), hogRects, 0,
+                                                                                                 DEFAULT_DETECTION_STEPS, cv::Size(32, 32), 1.059, 5);
 
                const size_t hogRectsSize = hogRects.size();
 
@@ -361,8 +349,7 @@ void EventTriggerPersonAppearance::movementDetectedCB(
                }
 
                /* Merge appearance and tracked rectangles */
-               std::vector<bool> appearedBusyRects =
-                               trigger->reinforceTrackedPersons(hogRects);
+               std::vector<bool> appearedBusyRects = trigger->reinforceTrackedPersons(hogRects);
 
                /* Person appearance */
                trigger->__appearedRects.clear();
@@ -375,9 +362,8 @@ void EventTriggerPersonAppearance::movementDetectedCB(
                gettimeofday(&time, NULL);
                TrackedRectanglesIter trackRectIter = trigger->__trackedRects.begin();
                for (; trackRectIter != trigger->__trackedRects.end(); ++trackRectIter) {
-                       time_t currentLifetime = 1000000 *
-                                       (time.tv_sec - trackRectIter->appearanceTime.tv_sec) +
-                                       (time.tv_usec - trackRectIter->appearanceTime.tv_usec);
+                       time_t currentLifetime = 1000000 * (time.tv_sec - trackRectIter->appearanceTime.tv_sec) +
+                                                                        (time.tv_usec - trackRectIter->appearanceTime.tv_usec);
 
                        if (currentLifetime > PERSON_LIFETIME) {
                                trigger->__disappearedRects.push_back(trackRectIter->rect);
@@ -408,42 +394,33 @@ void EventTriggerPersonAppearance::movementDetectedCB(
 
        CVRectanglesConstIter appearedIter = trigger->__appearedRects.begin();
        for (; appearedIter != trigger->__appearedRects.end(); ++appearedIter)
-               trigger->__trackedRects.push_back(
-                               TrackedRectangle(*appearedIter, time));
+               trigger->__trackedRects.push_back(TrackedRectangle(*appearedIter, time));
        trigger->__appearedRects.clear();
 
        /* 7. Clear array of disappeared persons */
        trigger->__disappearedRects.clear();
 }
 
-std::vector<bool> EventTriggerPersonAppearance::reinforceTrackedPersons(
-               const CVRectangles& appearedPersons)
+std::vector<bool> EventTriggerPersonAppearance::reinforceTrackedPersons(const CVRectangles &appearedPersons)
 {
        const size_t hogRectsSize = appearedPersons.size();
        std::vector<bool> appearedBusyRects(hogRectsSize, false);
        if (__trackedRects.size() > 0u && hogRectsSize > 0u) {
                TrackedRectanglesIter trackRectIter = __trackedRects.begin();
-               std::vector<std::vector<size_t> > intersectionAreas(
-                               __trackedRects.size(), std::vector<size_t>(hogRectsSize, 0u));
-               std::vector<std::vector<size_t> > confidence(
-                               __trackedRects.size(), std::vector<size_t>(hogRectsSize, 0u));
+               std::vector<std::vector<size_t> > intersectionAreas(__trackedRects.size(),
+                                                                                                                       std::vector<size_t>(hogRectsSize, 0u));
+               std::vector<std::vector<size_t> > confidence(__trackedRects.size(), std::vector<size_t>(hogRectsSize, 0u));
 
                /* Merge tracked -> appearance */
-               for (size_t trIdx = 0u; trackRectIter != __trackedRects.end();
-                               ++trackRectIter, ++trIdx) {
+               for (size_t trIdx = 0u; trackRectIter != __trackedRects.end(); ++trackRectIter, ++trIdx) {
                        size_t bestIdx = 0u;
                        bool haveRes = false;
                        for (size_t apIdx = 0u; apIdx < hogRectsSize; ++apIdx) {
-                               intersectionAreas[trIdx][apIdx] =
-                                       static_cast<size_t>(
-                                               static_cast<unsigned int>(
-                                                       (appearedPersons[apIdx] & trackRectIter->rect).area()
-                                               )
-                                       );
+                               intersectionAreas[trIdx][apIdx] = static_cast<size_t>(
+                                               static_cast<unsigned int>((appearedPersons[apIdx] & trackRectIter->rect).area()));
 
                                if (intersectionAreas[trIdx][apIdx] > 0 &&
-                                               (intersectionAreas[trIdx][apIdx] >
-                                               intersectionAreas[trIdx][bestIdx] || !haveRes)) {
+                                       (intersectionAreas[trIdx][apIdx] > intersectionAreas[trIdx][bestIdx] || !haveRes)) {
                                        bestIdx = apIdx;
                                        haveRes = true;
                                }
@@ -451,7 +428,7 @@ std::vector<bool> EventTriggerPersonAppearance::reinforceTrackedPersons(
 
                        if (haveRes)
                                confidence[trIdx][bestIdx] += intersectionAreas[trIdx][bestIdx] *
-                                       sizeDifferenceFactor(trackRectIter->rect, appearedPersons[bestIdx]);
+                                                                                         sizeDifferenceFactor(trackRectIter->rect, appearedPersons[bestIdx]);
                }
 
                /* Merge appearance -> tracked */
@@ -461,11 +438,9 @@ std::vector<bool> EventTriggerPersonAppearance::reinforceTrackedPersons(
                        bool haveRes = false;
                        cv::Rect bestTrackedRect = trackRectIter->rect;
 
-                       for (size_t trIdx = 0u; trackRectIter != __trackedRects.end();
-                                       ++trackRectIter, ++trIdx) {
+                       for (size_t trIdx = 0u; trackRectIter != __trackedRects.end(); ++trackRectIter, ++trIdx) {
                                if (intersectionAreas[trIdx][apIdx] > 0 &&
-                                               (intersectionAreas[trIdx][apIdx] >
-                                               intersectionAreas[bestIdx][apIdx] || !haveRes)) {
+                                       (intersectionAreas[trIdx][apIdx] > intersectionAreas[bestIdx][apIdx] || !haveRes)) {
                                        bestIdx = trIdx;
                                        bestTrackedRect = trackRectIter->rect;
                                        haveRes = true;
@@ -474,26 +449,23 @@ std::vector<bool> EventTriggerPersonAppearance::reinforceTrackedPersons(
 
                        if (haveRes)
                                confidence[bestIdx][apIdx] += intersectionAreas[bestIdx][apIdx] *
-                                       sizeDifferenceFactor(bestTrackedRect, appearedPersons[apIdx]);
+                                                                                         sizeDifferenceFactor(bestTrackedRect, appearedPersons[apIdx]);
                }
 
                /* Final merge */
                trackRectIter = __trackedRects.begin();
-               for (size_t trIdx = 0u; trackRectIter != __trackedRects.end();
-                               ++trackRectIter, ++trIdx) {
+               for (size_t trIdx = 0u; trackRectIter != __trackedRects.end(); ++trackRectIter, ++trIdx) {
                        bool haveRes = false;
                        size_t bestIdx = 0u;
 
                        for (size_t apIdx = 0u; apIdx < hogRectsSize; ++apIdx) {
-                               if (!appearedBusyRects[apIdx] && (!haveRes ||
-                                               confidence[trIdx][apIdx] > confidence[trIdx][bestIdx])) {
+                               if (!appearedBusyRects[apIdx] && (!haveRes || confidence[trIdx][apIdx] > confidence[trIdx][bestIdx])) {
                                        bestIdx = apIdx;
                                        haveRes = true;
                                }
                        }
 
-                       if (isPossibleToMerge(trackRectIter->rect, appearedPersons[bestIdx]) &&
-                                       haveRes) {
+                       if (isPossibleToMerge(trackRectIter->rect, appearedPersons[bestIdx]) && haveRes) {
                                appearedBusyRects[bestIdx] = true;
 
                                struct timeval time;
@@ -501,19 +473,16 @@ std::vector<bool> EventTriggerPersonAppearance::reinforceTrackedPersons(
 
                                trackRectIter->appearanceTime = time;
                                trackRectIter->rect = appearedPersons[bestIdx];
-                               trackRectIter->tracker.reinforcement(
-                                               cutRectForTracking(trackRectIter->rect));
+                               trackRectIter->tracker.reinforcement(cutRectForTracking(trackRectIter->rect));
                        }
                }
 
                for (size_t apIdx = 0u; apIdx < hogRectsSize; ++apIdx) {
                        if (!appearedBusyRects[apIdx]) {
                                trackRectIter = __trackedRects.begin();
-                               for (;trackRectIter != __trackedRects.end();
-                                               ++trackRectIter) {
-                                       if (isPossibleToMerge(trackRectIter->rect,
-                                                       appearedPersons[apIdx]) && (appearedPersons[apIdx].area() / 2.f) <
-                                                       (appearedPersons[apIdx] & trackRectIter->rect).area()) {
+                               for (; trackRectIter != __trackedRects.end(); ++trackRectIter) {
+                                       if (isPossibleToMerge(trackRectIter->rect, appearedPersons[apIdx]) &&
+                                               (appearedPersons[apIdx].area() / 2.f) < (appearedPersons[apIdx] & trackRectIter->rect).area()) {
                                                appearedBusyRects[apIdx] = true;
                                                break;
                                        }
@@ -535,8 +504,7 @@ void EventTriggerPersonAppearance::runCallbacks(mv_source_h source)
        __eventResult->__appearedLocations.resize(appearedLocationsSize);
 
        for (size_t i = 0u; i < appearedLocationsSize; ++i) {
-               convertRectCV2MV(__appearedRects[i],
-                               __eventResult->__appearedLocations[i]);
+               convertRectCV2MV(__appearedRects[i], __eventResult->__appearedLocations[i]);
                __eventResult->__appearedLocations[i].point.x /= __factorX;
                __eventResult->__appearedLocations[i].point.y /= __factorY;
                __eventResult->__appearedLocations[i].width /= __factorX;
@@ -547,8 +515,7 @@ void EventTriggerPersonAppearance::runCallbacks(mv_source_h source)
        __eventResult->__disappearedLocations.resize(disappearedLocationsSize);
 
        for (size_t i = 0u; i < disappearedLocationsSize; ++i) {
-               convertRectCV2MV(__disappearedRects[i],
-                               __eventResult->__disappearedLocations[i]);
+               convertRectCV2MV(__disappearedRects[i], __eventResult->__disappearedLocations[i]);
                __eventResult->__disappearedLocations[i].point.x /= __factorX;
                __eventResult->__disappearedLocations[i].point.y /= __factorY;
                __eventResult->__disappearedLocations[i].width /= __factorX;
@@ -560,8 +527,7 @@ void EventTriggerPersonAppearance::runCallbacks(mv_source_h source)
 
        TrackedRectanglesConstIter trackedIter = __trackedRects.begin();
        for (size_t i = 0u; i < trackedLocationsSize; ++i, ++trackedIter) {
-               convertRectCV2MV(trackedIter->rect,
-                               __eventResult->__trackedLocations[i]);
+               convertRectCV2MV(trackedIter->rect, __eventResult->__trackedLocations[i]);
                __eventResult->__trackedLocations[i].point.x /= __factorX;
                __eventResult->__trackedLocations[i].point.y /= __factorY;
                __eventResult->__trackedLocations[i].width /= __factorX;
@@ -574,11 +540,9 @@ void EventTriggerPersonAppearance::runCallbacks(mv_source_h source)
        for (; iter != __callbackDataMap.end(); ++iter) {
                mv_surveillance_event_occurred_cb callback = iter->second.callback;
 
-
-               if (appearedLocationsSize > 0 || disappearedLocationsSize > 0
-                               || trackedLocationsSize > 0 || iter->second.isInternal)
-                       callback(iter->second.eventTrigger, source, __videoStreamId,
-                                       __eventResult, iter->second.userData);
+               if (appearedLocationsSize > 0 || disappearedLocationsSize > 0 || trackedLocationsSize > 0 ||
+                       iter->second.isInternal)
+                       callback(iter->second.eventTrigger, source, __videoStreamId, __eventResult, iter->second.userData);
        }
 }
 // LCOV_EXCL_STOP
index c367082..7a23796 100644 (file)
 
 #include <sstream>
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 static const int MAX_VALUE_NAME_LENGHT = 255;
 
-namespace {
-
-template <typename T>
-std::string numberToString(T Number)
+namespace
+{
+template<typename T> std::string numberToString(T Number)
 {
        std::ostringstream ss;
        ss << Number;
@@ -40,9 +40,7 @@ std::string numberToString(T Number)
 
 } /* Anonymous namespace*/
 
-int EventResultPersonRecognition::getResultValue(
-       const char *valueName,
-       void *value) const
+int EventResultPersonRecognition::getResultValue(const char *valueName, void *value) const
 {
        if (valueName == NULL) {
                LOGE("Invalid pointer for value name. Getting result value failed.");
@@ -56,29 +54,21 @@ int EventResultPersonRecognition::getResultValue(
 
        const size_t numberOfPersons = __locations.size();
 
-       if (strncmp(valueName,
-                               MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER,
-                               MAX_VALUE_NAME_LENGHT) == 0) {
-               size_t *outNumberOfPersons = (size_t*) value;
+       if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER, MAX_VALUE_NAME_LENGHT) == 0) {
+               size_t *outNumberOfPersons = (size_t *) value;
                *outNumberOfPersons = numberOfPersons;
-       } else if (strncmp(valueName,
-                                               MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS,
-                                               MAX_VALUE_NAME_LENGHT) == 0) {
-               mv_rectangle_s *locations = (mv_rectangle_s*) value;
+       } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS, MAX_VALUE_NAME_LENGHT) == 0) {
+               mv_rectangle_s *locations = (mv_rectangle_s *) value;
 
                for (size_t i = 0; i < numberOfPersons; ++i)
                        locations[i] = __locations[i];
-       } else if (strncmp(valueName,
-                                               MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS,
-                                               MAX_VALUE_NAME_LENGHT) == 0) {
-               int *labels = (int*) value;
+       } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS, MAX_VALUE_NAME_LENGHT) == 0) {
+               int *labels = (int *) value;
 
                for (size_t i = 0; i < numberOfPersons; ++i)
                        labels[i] = __faceLabels[i];
-       } else if (strncmp(valueName,
-                                               MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES,
-                                               MAX_VALUE_NAME_LENGHT) == 0) {
-               double *confidences = (double*) value;
+       } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES, MAX_VALUE_NAME_LENGHT) == 0) {
+               double *confidences = (double *) value;
 
                for (size_t i = 0; i < numberOfPersons; ++i)
                        confidences[i] = __confidences[i];
@@ -90,25 +80,14 @@ int EventResultPersonRecognition::getResultValue(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-EventTriggerPersonRecognition::EventTriggerPersonRecognition(
-               mv_surveillance_event_trigger_h eventTrigger,
-               long int triggerId,
-               int videoStreamId,
-               mv_surveillance_event_occurred_cb callback,
-               void *userData,
-               int numberOfPoints,
-               mv_point_s *roi,
-               bool isInternal) : EventTrigger(eventTrigger,
-                                                                               triggerId,
-                                                                               videoStreamId,
-                                                                               callback,
-                                                                               userData,
-                                                                               numberOfPoints,
-                                                                               roi,
-                                                                               isInternal),
-       __faceRecognitionModel(NULL),
-       __lastFrame(NULL),
-       __eventResult(new EventResultPersonRecognition())
+EventTriggerPersonRecognition::EventTriggerPersonRecognition(mv_surveillance_event_trigger_h eventTrigger,
+                                                                                                                        long int triggerId, int videoStreamId,
+                                                                                                                        mv_surveillance_event_occurred_cb callback, void *userData,
+                                                                                                                        int numberOfPoints, mv_point_s *roi, bool isInternal)
+               : EventTrigger(eventTrigger, triggerId, videoStreamId, callback, userData, numberOfPoints, roi, isInternal)
+               , __faceRecognitionModel(NULL)
+               , __lastFrame(NULL)
+               , __eventResult(new EventResultPersonRecognition())
 {
        ; /* NULL */
 }
@@ -119,7 +98,8 @@ EventTriggerPersonRecognition::~EventTriggerPersonRecognition()
                const int err = mv_face_recognition_model_destroy(__faceRecognitionModel);
                if (MEDIA_VISION_ERROR_NONE != err)
                        LOGE("Error while trying to delete face recognition model when "
-                                       "event trigger had been destroyed. Error code: %i.", err);
+                                "event trigger had been destroyed. Error code: %i.",
+                                err);
        }
 
        delete __eventResult;
@@ -134,10 +114,8 @@ int EventTriggerPersonRecognition::parseEngineConfig(mv_engine_config_h engineCo
 
        char *modelPath = NULL;
 
-       int error = mv_engine_config_get_string_attribute(
-                       engineConfig,
-                       MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH,
-                       &modelPath);
+       int error = mv_engine_config_get_string_attribute(engineConfig, MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH,
+                                                                                                         &modelPath);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                if (modelPath != NULL) {
@@ -155,8 +133,7 @@ int EventTriggerPersonRecognition::parseEngineConfig(mv_engine_config_h engineCo
        error = mv_face_recognition_model_load(modelPath, &recognitionModel);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Loading recognition model from file %s failed.",
-                               modelPath);
+               LOGE("Loading recognition model from file %s failed.", modelPath);
 
                if (modelPath != NULL) {
                        free(modelPath);
@@ -166,7 +143,8 @@ int EventTriggerPersonRecognition::parseEngineConfig(mv_engine_config_h engineCo
                const int err = mv_face_recognition_model_destroy(recognitionModel);
                if (MEDIA_VISION_ERROR_NONE != err) {
                        LOGE("Error while trying to delete face recognition model."
-                                       " Error code: %i.", err);
+                                " Error code: %i.",
+                                err);
                }
 
                return error;
@@ -176,7 +154,8 @@ int EventTriggerPersonRecognition::parseEngineConfig(mv_engine_config_h engineCo
                error = mv_face_recognition_model_destroy(__faceRecognitionModel);
                if (MEDIA_VISION_ERROR_NONE != error) {
                        LOGE("Error while trying to delete old face recognition model when "
-                                       "new model is trying to be loaded. Error code: %i.", error);
+                                "new model is trying to be loaded. Error code: %i.",
+                                error);
                }
        }
 
@@ -184,7 +163,8 @@ int EventTriggerPersonRecognition::parseEngineConfig(mv_engine_config_h engineCo
 
        if (NULL == __faceRecognitionModel) {
                LOGE("Failed to load face recognition model. Check %s attribute of the "
-                               "engine config.", MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH);
+                        "engine config.",
+                        MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH);
 
                if (modelPath != NULL) {
                        free(modelPath);
@@ -202,10 +182,7 @@ int EventTriggerPersonRecognition::parseEngineConfig(mv_engine_config_h engineCo
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int EventTriggerPersonRecognition::pushSource(
-               mv_source_h source,
-               mv_source_h graySource,
-               const cv::Mat& grayImage)
+int EventTriggerPersonRecognition::pushSource(mv_source_h source, mv_source_h graySource, const cv::Mat &grayImage)
 {
        if (source == NULL || graySource == NULL || grayImage.empty()) {
                LOGE("Media source is NULL. Pushing source failed.");
@@ -267,8 +244,7 @@ int EventTriggerPersonRecognition::pushSource(
                return error;
        }
 
-       error = mv_source_fill_by_buffer(sourceCopy, data_buffer, buffer_size,
-                       width, height, colorspace);
+       error = mv_source_fill_by_buffer(sourceCopy, data_buffer, buffer_size, width, height, colorspace);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                mv_destroy_source(sourceCopy);
@@ -315,42 +291,30 @@ std::string EventTriggerPersonRecognition::getEventType() const
        return MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED;
 }
 
-void EventTriggerPersonRecognition::setEventResults(
-               mv_rectangle_s faceLocation,
-               int faceLabel,
-               double confidence)
+void EventTriggerPersonRecognition::setEventResults(mv_rectangle_s faceLocation, int faceLabel, double confidence)
 {
        __eventResult->__locations.push_back(faceLocation);
        __eventResult->__faceLabels.push_back(faceLabel);
        __eventResult->__confidences.push_back(confidence);
 }
 
-void EventTriggerPersonRecognition::faceDetectedCB(
-               mv_source_h source,
-               mv_engine_config_h /*engine_cfg*/,
-               mv_rectangle_s *faces_locations,
-               int number_of_faces,
-               void *user_data)
+void EventTriggerPersonRecognition::faceDetectedCB(mv_source_h source, mv_engine_config_h /*engine_cfg*/,
+                                                                                                  mv_rectangle_s *faces_locations, int number_of_faces,
+                                                                                                  void *user_data)
 {
        if (NULL == user_data) {
                LOGE("Invalid user data passed");
                return;
        }
 
-       EventTriggerPersonRecognition *trigger =
-               (EventTriggerPersonRecognition*)user_data;
+       EventTriggerPersonRecognition *trigger = (EventTriggerPersonRecognition *) user_data;
 
        int location_idx = 0;
        for (; location_idx < number_of_faces; ++location_idx) {
                LOGI("Start surveillance face recognition");
 
-               const int error = mv_face_recognize(
-                               source,
-                               trigger->__faceRecognitionModel,
-                               NULL,
-                               &faces_locations[location_idx],
-                               faceRecognizedCB,
-                               trigger);
+               const int error = mv_face_recognize(source, trigger->__faceRecognitionModel, NULL,
+                                                                                       &faces_locations[location_idx], faceRecognizedCB, trigger);
 
                if (error != MEDIA_VISION_ERROR_NONE) {
                        LOGW("Face recognition for one model failed. Continue");
@@ -366,31 +330,22 @@ void EventTriggerPersonRecognition::faceDetectedCB(
                mv_surveillance_event_occurred_cb callback = iter->second.callback;
 
                if (trigger->__eventResult->__locations.size() > 0 || iter->second.isInternal)
-                       callback(
-                               iter->second.eventTrigger,
-                               trigger->__lastFrame,
-                               trigger->__videoStreamId,
-                               trigger->__eventResult,
-                               iter->second.userData);
+                       callback(iter->second.eventTrigger, trigger->__lastFrame, trigger->__videoStreamId, trigger->__eventResult,
+                                        iter->second.userData);
        }
 }
 
-void EventTriggerPersonRecognition::faceRecognizedCB(
-               mv_source_h source,
-               mv_face_recognition_model_h /*recognition_model*/,
-               mv_engine_config_h /*engine_cfg*/,
-               mv_rectangle_s *face_location,
-               const int *face_label,
-               double confidence,
-               void *user_data)
+void EventTriggerPersonRecognition::faceRecognizedCB(mv_source_h source,
+                                                                                                        mv_face_recognition_model_h /*recognition_model*/,
+                                                                                                        mv_engine_config_h /*engine_cfg*/, mv_rectangle_s *face_location,
+                                                                                                        const int *face_label, double confidence, void *user_data)
 {
        if (source == NULL || face_location == NULL || face_label == NULL) {
                LOGI("Face wasn't recognized");
                return;
        }
 
-       EventTriggerPersonRecognition *trigger =
-               (EventTriggerPersonRecognition*) user_data;
+       EventTriggerPersonRecognition *trigger = (EventTriggerPersonRecognition *) user_data;
 
        trigger->setEventResults(*face_location, *face_label, confidence);
 }
index 32179ff..c7d19a8 100644 (file)
 #include "opencv2/video/tracking.hpp"
 #include "opencv2/imgproc.hpp"
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 // LCOV_EXCL_START
-namespace {
+namespace
+{
 const float FLOATEPS = 10e-6f;
 
-template<typename T>
-T getMedian(std::vector<T>& values, int size = -1)
+template<typename T> T getMedian(std::vector<T> &values, int size = -1)
 {
        if (size == -1)
-               size = (int)values.size();
+               size = (int) values.size();
 
        std::vector<T> copy(values.begin(), values.begin() + size);
        std::sort(copy.begin(), copy.end());
-       if (size%2 == 0) {
-               return (copy[size / 2 - 1] + copy[size/2]) / ((T)2.0);
+       if (size % 2 == 0) {
+               return (copy[size / 2 - 1] + copy[size / 2]) / ((T) 2.0);
        } else {
                return copy[(size - 1) / 2];
        }
@@ -56,15 +57,14 @@ MFTracker::Params::Params()
        mPyrMaxLevel = 5;
 }
 
-MFTracker::MFTracker(Params params) :
-       __isInit(false),
-       __params(params),
-       __termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.3),
-       __confidence(0.0f)
-{
-}
+MFTracker::MFTracker(Params params)
+               : __isInit(false)
+               , __params(params)
+               , __termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.3)
+               , __confidence(0.0f)
+{}
 
-bool MFTracker::track(const cv::Mat& frame, cv::Rect_<float>& result)
+bool MFTracker::track(const cv::Mat &frame, cv::Rect_<float> &result)
 {
        if (!__isInit) {
                if (__boundingBox.width <= 0 || __boundingBox.height <= 0)
@@ -83,30 +83,26 @@ bool MFTracker::track(const cv::Mat& frame, cv::Rect_<float>& result)
        return true;
 }
 
-void MFTracker::reinforcement(const cv::Rect_<float>location)
+void MFTracker::reinforcement(const cv::Rect_<float> &location)
 {
        __isInit = false;
 
        __boundingBox = location;
 }
 
-bool MFTracker::init(const cv::Matimage)
+bool MFTracker::init(const cv::Mat &image)
 {
        if (image.empty())
                return false;
 
        image.copyTo(__image);
-       buildOpticalFlowPyramid(
-               __image,
-               __pyramid,
-               __params.mWindowSize,
-               __params.mPyrMaxLevel);
+       buildOpticalFlowPyramid(__image, __pyramid, __params.mWindowSize, __params.mPyrMaxLevel);
 
        __isInit = true;
        return __isInit;
 }
 
-bool MFTracker::update(const cv::Matimage)
+bool MFTracker::update(const cv::Mat &image)
 {
        if (!__isInit || image.empty())
                return false;
@@ -129,7 +125,7 @@ bool MFTracker::update(const cv::Mat& image)
        cv::Mat oldImage = __image;
 
        cv::Rect_<float> oldBox = __boundingBox;
-       if(!medianFlowImpl(oldImage, image, oldBox))
+       if (!medianFlowImpl(oldImage, image, oldBox))
                return false;
 
        image.copyTo(__image);
@@ -152,8 +148,7 @@ cv::Rect_<float> MFTracker::getLastBoundingBox() const
        return __boundingBox;
 }
 
-bool MFTracker::medianFlowImpl(
-       cv::Mat oldImage_gray, cv::Mat newImage_gray, cv::Rect_<float>& oldBox)
+bool MFTracker::medianFlowImpl(cv::Mat oldImage_gray, cv::Mat newImage_gray, cv::Rect_<float> &oldBox)
 {
        std::vector<cv::Point2f> pointsToTrackOld, pointsToTrackNew;
 
@@ -161,9 +156,8 @@ bool MFTracker::medianFlowImpl(
        const float gridYStep = oldBox.height / __params.mPointsInGrid;
        for (int i = 0; i < __params.mPointsInGrid; i++)
                for (int j = 0; j < __params.mPointsInGrid; j++) {
-                       pointsToTrackOld.push_back(
-                               cv::Point2f(oldBox.x + .5f*gridXStep + 1.f*gridXStep*j,
-                                                       oldBox.y + .5f*gridYStep + 1.f*gridYStep*i));
+                       pointsToTrackOld.push_back(cv::Point2f(oldBox.x + .5f * gridXStep + 1.f * gridXStep * j,
+                                                                                                  oldBox.y + .5f * gridYStep + 1.f * gridYStep * i));
                }
 
        const size_t numberOfPointsToTrackOld = pointsToTrackOld.size();
@@ -171,21 +165,10 @@ bool MFTracker::medianFlowImpl(
        std::vector<float> errors(numberOfPointsToTrackOld);
 
        std::vector<cv::Mat> tempPyramid;
-       buildOpticalFlowPyramid(
-               newImage_gray,
-               tempPyramid,
-               __params.mWindowSize,
-               __params.mPyrMaxLevel);
-
-       calcOpticalFlowPyrLK(__pyramid,
-                       tempPyramid,
-                       pointsToTrackOld,
-                       pointsToTrackNew,
-                       status,
-                       errors,
-                       __params.mWindowSize,
-                       __params.mPyrMaxLevel,
-                       __termcrit);
+       buildOpticalFlowPyramid(newImage_gray, tempPyramid, __params.mWindowSize, __params.mPyrMaxLevel);
+
+       calcOpticalFlowPyrLK(__pyramid, tempPyramid, pointsToTrackOld, pointsToTrackNew, status, errors,
+                                                __params.mWindowSize, __params.mPyrMaxLevel, __termcrit);
 
        std::vector<cv::Point2f> di;
        for (size_t idx = 0u; idx < numberOfPointsToTrackOld; idx++)
@@ -193,15 +176,8 @@ bool MFTracker::medianFlowImpl(
                        di.push_back(pointsToTrackNew[idx] - pointsToTrackOld[idx]);
 
        std::vector<bool> filter_status;
-       check_FB(tempPyramid,
-                       pointsToTrackOld,
-                       pointsToTrackNew,
-                       filter_status);
-       check_NCC(oldImage_gray,
-                       newImage_gray,
-                       pointsToTrackOld,
-                       pointsToTrackNew,
-                       filter_status);
+       check_FB(tempPyramid, pointsToTrackOld, pointsToTrackNew, filter_status);
+       check_NCC(oldImage_gray, newImage_gray, pointsToTrackOld, pointsToTrackNew, filter_status);
 
        for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++)
                if (!filter_status[idx]) {
@@ -215,8 +191,7 @@ bool MFTracker::medianFlowImpl(
                return false;
 
        cv::Point2f mDisplacement;
-       cv::Rect_<float> boxCandidate =
-               vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement);
+       cv::Rect_<float> boxCandidate = vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement);
 
        std::vector<float> displacements;
        for (size_t idx = 0u; idx < di.size(); idx++) {
@@ -224,8 +199,7 @@ bool MFTracker::medianFlowImpl(
                displacements.push_back(sqrt(di[idx].ddot(di[idx])));
        }
 
-       __confidence =
-               (10.f - getMedian(displacements, static_cast<int>(displacements.size()))) / 10.f;
+       __confidence = (10.f - getMedian(displacements, static_cast<int>(displacements.size()))) / 10.f;
 
        if (__confidence < 0.f) {
                __confidence = 0.f;
@@ -237,23 +211,18 @@ bool MFTracker::medianFlowImpl(
        return true;
 }
 
-cv::Rect_<float> MFTracker::vote(
-       const std::vector<cv::Point2f>& oldPoints,
-       const std::vector<cv::Point2f>& newPoints,
-       const cv::Rect_<float>& oldRect,
-       cv::Point2f& mD)
+cv::Rect_<float> MFTracker::vote(const std::vector<cv::Point2f> &oldPoints, const std::vector<cv::Point2f> &newPoints,
+                                                                const cv::Rect_<float> &oldRect, cv::Point2f &mD)
 {
        cv::Rect_<float> newRect;
-       cv::Point2f newCenter(
-               oldRect.x + oldRect.width / 2.f,
-               oldRect.y + oldRect.height / 2.f);
+       cv::Point2f newCenter(oldRect.x + oldRect.width / 2.f, oldRect.y + oldRect.height / 2.f);
 
-       const int n = (int)oldPoints.size();
-       std::vector<float> buf(std::max(n*(n-1) / 2, 3), 0.f);
+       const int n = (int) oldPoints.size();
+       std::vector<float> buf(std::max(n * (n - 1) / 2, 3), 0.f);
 
-       if(oldPoints.size() == 1) {
-               newRect.x = oldRect.x+newPoints[0].x-oldPoints[0].x;
-               newRect.y = oldRect.y+newPoints[0].y-oldPoints[0].y;
+       if (oldPoints.size() == 1) {
+               newRect.x = oldRect.x + newPoints[0].x - oldPoints[0].x;
+               newRect.y = oldRect.y + newPoints[0].y - oldPoints[0].y;
                newRect.width = oldRect.width;
                newRect.height = oldRect.height;
 
@@ -293,20 +262,17 @@ cv::Rect_<float> MFTracker::vote(
                        ctr++;
                }
 
-       float scale = getMedian(buf, n*(n-1) / 2);
+       float scale = getMedian(buf, n * (n - 1) / 2);
        newRect.x = newCenter.x - scale * oldRect.width / 2.f;
-       newRect.y = newCenter.y-scale * oldRect.height / 2.f;
+       newRect.y = newCenter.y - scale * oldRect.height / 2.f;
        newRect.width = scale * oldRect.width;
        newRect.height = scale * oldRect.height;
 
        return newRect;
 }
 
-void MFTracker::check_FB(
-       std::vector<cv::Mat> newPyramid,
-       const std::vector<cv::Point2f>& oldPoints,
-       const std::vector<cv::Point2f>& newPoints,
-       std::vector<bool>& status)
+void MFTracker::check_FB(std::vector<cv::Mat> newPyramid, const std::vector<cv::Point2f> &oldPoints,
+                                                const std::vector<cv::Point2f> &newPoints, std::vector<bool> &status)
 {
        const size_t numberOfOldPoints = oldPoints.size();
 
@@ -318,15 +284,8 @@ void MFTracker::check_FB(
        std::vector<float> FBerror(numberOfOldPoints);
        std::vector<cv::Point2f> pointsToTrackReprojection;
 
-       calcOpticalFlowPyrLK(newPyramid,
-                       __pyramid,
-                       newPoints,
-                       pointsToTrackReprojection,
-                       LKstatus,
-                       errors,
-                       __params.mWindowSize,
-                       __params.mPyrMaxLevel,
-                       __termcrit);
+       calcOpticalFlowPyrLK(newPyramid, __pyramid, newPoints, pointsToTrackReprojection, LKstatus, errors,
+                                                __params.mWindowSize, __params.mPyrMaxLevel, __termcrit);
 
        for (size_t idx = 0u; idx < numberOfOldPoints; idx++)
                FBerror[idx] = l2distance(oldPoints[idx], pointsToTrackReprojection[idx]);
@@ -336,12 +295,8 @@ void MFTracker::check_FB(
                status[idx] = (FBerror[idx] < FBerrorMedian);
 }
 
-void MFTracker::check_NCC(
-       const cv::Mat& oldImage,
-       const cv::Mat& newImage,
-       const std::vector<cv::Point2f>& oldPoints,
-       const std::vector<cv::Point2f>& newPoints,
-       std::vector<bool>& status)
+void MFTracker::check_NCC(const cv::Mat &oldImage, const cv::Mat &newImage, const std::vector<cv::Point2f> &oldPoints,
+                                                 const std::vector<cv::Point2f> &newPoints, std::vector<bool> &status)
 {
        std::vector<float> NCC(oldPoints.size(), 0.f);
        cv::Size patch(30, 30);
@@ -360,12 +315,11 @@ void MFTracker::check_NCC(
                const float prod = p1.dot(p2);
                const float sq1 = sqrt(n1 * n1 - s1 * s1 / N);
                const float sq2 = sqrt(n2 * n2 - s2 * s2 / N);
-               NCC[idx] = (sq2 == 0 ? sq1 / std::abs(sq1)
-                                       : (prod - s1 * s2 / N) / sq1 / sq2);
+               NCC[idx] = (sq2 == 0 ? sq1 / std::abs(sq1) : (prod - s1 * s2 / N) / sq1 / sq2);
        }
 
        float median = getMedian(NCC) - FLOATEPS;
-       for(size_t idx = 0u; idx < oldPoints.size(); idx++)
+       for (size_t idx = 0u; idx < oldPoints.size(); idx++)
                status[idx] = status[idx] && (NCC[idx] > median);
 }
 
index 98e2360..f1351db 100644 (file)
 #include "opencv2/highgui.hpp"
 #include "opencv2/imgproc/imgproc_c.h"
 
-namespace mediavision {
-namespace surveillance {
-
+namespace mediavision
+{
+namespace surveillance
+{
 #ifdef ENABLE_NEON
-int SurveillanceHelper::convertSourceMVRGB2GrayCVNeon(
-               mv_source_h mvSource,
-               cv::Mat& cvSource)
+int SurveillanceHelper::convertSourceMVRGB2GrayCVNeon(mv_source_h mvSource, cv::Mat &cvSource)
 {
        MEDIA_VISION_INSTANCE_CHECK(mvSource);
 
@@ -39,14 +38,10 @@ int SurveillanceHelper::convertSourceMVRGB2GrayCVNeon(
 
        mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
 
-       MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width),
-                                               "Failed to get the width.");
-       MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height),
-                                               "Failed to get the height.");
-       MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace),
-                                               "Failed to get the colorspace.");
-       MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &src, &bufferSize),
-                                               "Failed to get the buffer size.");
+       MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width), "Failed to get the width.");
+       MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height), "Failed to get the height.");
+       MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace), "Failed to get the colorspace.");
+       MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &src, &bufferSize), "Failed to get the buffer size.");
 
        if (colorspace != MEDIA_VISION_COLORSPACE_RGB888) {
                LOGE("Error: mv_source has unsupported colorspace.");
@@ -57,49 +52,51 @@ int SurveillanceHelper::convertSourceMVRGB2GrayCVNeon(
        const unsigned int cvSourceSize = width * height;
 
 #if defined(__aarch64__)
-       asm volatile ("lsr  %2, %2, #3     \n"
-               "# channel multimpliers:       \n"
-               "mov         w4, #28           \n"
-               "mov         w5, #151          \n"
-               "mov         w6, #77           \n"
-               "dup         v3.8b, w4         \n"
-               "dup         v4.8b, w5         \n"
-               "dup         v5.8b, w6         \n"
-               ".loop:                        \n"
-               "# load 8 pixels:              \n"
-               "ld3         {v0.8b,v1.8b,v2.8b}, [%0],#24 \n"
-               "# conversion:                             \n"
-               "umull       v7.8h, v0.8b, v3.8b           \n"
-               "umlal       v7.8h, v1.8b, v4.8b           \n"
-               "umlal       v7.8h, v2.8b, v5.8b           \n"
-               "# shift and store:                        \n"
-               "shrn        v6.8b, v7.8h, #8              \n"
-               "st1         {v6.8b}, [%1],#8              \n"
-               "subs        %2, %2, #1                    \n"
-               "bne         .loop             \n"::"r" (src), "r" (cvSource.data), "r" (cvSourceSize)
-               :"memory", "w4", "w5", "w6");
+       asm volatile("lsr  %2, %2, #3     \n"
+                                "# channel multimpliers:       \n"
+                                "mov         w4, #28           \n"
+                                "mov         w5, #151          \n"
+                                "mov         w6, #77           \n"
+                                "dup         v3.8b, w4         \n"
+                                "dup         v4.8b, w5         \n"
+                                "dup         v5.8b, w6         \n"
+                                ".loop:                        \n"
+                                "# load 8 pixels:              \n"
+                                "ld3         {v0.8b,v1.8b,v2.8b}, [%0],#24 \n"
+                                "# conversion:                             \n"
+                                "umull       v7.8h, v0.8b, v3.8b           \n"
+                                "umlal       v7.8h, v1.8b, v4.8b           \n"
+                                "umlal       v7.8h, v2.8b, v5.8b           \n"
+                                "# shift and store:                        \n"
+                                "shrn        v6.8b, v7.8h, #8              \n"
+                                "st1         {v6.8b}, [%1],#8              \n"
+                                "subs        %2, %2, #1                    \n"
+                                "bne         .loop             \n" ::"r"(src),
+                                "r"(cvSource.data), "r"(cvSourceSize)
+                                : "memory", "w4", "w5", "w6");
 #else
-       asm volatile ("lsr  %2, %2, #3     \n"
-               "# channel multimpliers:       \n"
-               "mov         r4, #77           \n"
-               "mov         r5, #151          \n"
-               "mov         r6, #28           \n"
-               "vdup.8      d3, r4            \n"
-               "vdup.8      d4, r5            \n"
-               "vdup.8      d5, r6            \n"
-               ".loop:                        \n"
-               "# load 8 pixels:              \n"
-               "vld3.8      {d0-d2}, [%0]!    \n"
-               "# conversion:                 \n"
-               "vmull.u8    q7, d0, d3        \n"
-               "vmlal.u8    q7, d1, d4        \n"
-               "vmlal.u8    q7, d2, d5        \n"
-               "# shift and store:            \n"
-               "vshrn.u16   d6, q7, #8        \n"
-               "vst1.8      {d6}, [%1]!       \n"
-               "subs        %2, %2, #1        \n"
-               "bne         .loop             \n"::"r" (src), "r" (cvSource.data), "r" (cvSourceSize)
-               :"memory", "r4", "r5", "r6");
+       asm volatile("lsr  %2, %2, #3     \n"
+                                "# channel multimpliers:       \n"
+                                "mov         r4, #77           \n"
+                                "mov         r5, #151          \n"
+                                "mov         r6, #28           \n"
+                                "vdup.8      d3, r4            \n"
+                                "vdup.8      d4, r5            \n"
+                                "vdup.8      d5, r6            \n"
+                                ".loop:                        \n"
+                                "# load 8 pixels:              \n"
+                                "vld3.8      {d0-d2}, [%0]!    \n"
+                                "# conversion:                 \n"
+                                "vmull.u8    q7, d0, d3        \n"
+                                "vmlal.u8    q7, d1, d4        \n"
+                                "vmlal.u8    q7, d2, d5        \n"
+                                "# shift and store:            \n"
+                                "vshrn.u16   d6, q7, #8        \n"
+                                "vst1.8      {d6}, [%1]!       \n"
+                                "subs        %2, %2, #1        \n"
+                                "bne         .loop             \n" ::"r"(src),
+                                "r"(cvSource.data), "r"(cvSourceSize)
+                                : "memory", "r4", "r5", "r6");
 #endif
 
        return MEDIA_VISION_ERROR_NONE;
index 416016a..c740598 100644 (file)
 #include <arm_neon.h>
 #endif
 
-int mv_absdiff(
-               uint8_t *__restrict__ src1,
-               uint8_t *__restrict__ src2,
-               int width,
-               int height,
-               int stride,
-               uint8_t *__restrict__ dst)
+int mv_absdiff(uint8_t *__restrict__ src1, uint8_t *__restrict__ src2, int width, int height, int stride,
+                          uint8_t *__restrict__ dst)
 {
-       if (src1 == NULL || src2 == NULL || width <= 0 || height <= 0 ||
-                       stride <= 0 || dst == NULL) {
+       if (src1 == NULL || src2 == NULL || width <= 0 || height <= 0 || stride <= 0 || dst == NULL) {
                LOGE("Wrong input parameter. Aplpying mask failed.");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
@@ -50,12 +44,12 @@ int mv_absdiff(
        for (; row < height; ++row) {
 #ifdef ENABLE_NEON
                for (column = 0; column < batch_columns_count; ++column) {
-                       uint8x16_t gray1 = vld1q_u8 (src1);
-                       uint8x16_t gray2 = vld1q_u8 (src2);
+                       uint8x16_t gray1 = vld1q_u8(src1);
+                       uint8x16_t gray2 = vld1q_u8(src2);
 
                        uint8x16_t dst_temp = vabdq_u8(gray1, gray2);
 
-                       vst1q_u8 (dst, dst_temp);
+                       vst1q_u8(dst, dst_temp);
 
                        src1 += batch_size;
                        src2 += batch_size;
index f156d07..f6c0103 100644 (file)
 #include <arm_neon.h>
 #endif
 
-int mv_apply_mask(
-       uint8_t *src_buffer,
-       uint8_t *__restrict mask,
-       int width,
-       int height,
-       int stride,
-       uint8_t *dst_buffer)
+int mv_apply_mask(uint8_t *src_buffer, uint8_t *__restrict mask, int width, int height, int stride, uint8_t *dst_buffer)
 {
-       if (src_buffer == NULL || mask == NULL || width <= 0 || height <= 0 ||
-                       stride <= 0 || dst_buffer == NULL) {
+       if (src_buffer == NULL || mask == NULL || width <= 0 || height <= 0 || stride <= 0 || dst_buffer == NULL) {
                LOGE("Wrong input parameter. Aplpying mask failed.");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
index 98d252b..45bf8d7 100644 (file)
 #include <stdio.h>
 #include <stdlib.h>
 
-int mv_get_mask_buffer(
-       unsigned int buffer_width,
-       unsigned int buffer_height,
-       mv_point_s *polygon,
-       unsigned int points_number,
-       unsigned char **mask_buffer)
+int mv_get_mask_buffer(unsigned int buffer_width, unsigned int buffer_height, mv_point_s *polygon,
+                                          unsigned int points_number, unsigned char **mask_buffer)
 {
-       if (buffer_width == 0u || buffer_height == 0u ||
-                       polygon == NULL || points_number == 0u || mask_buffer == NULL) {
+       if (buffer_width == 0u || buffer_height == 0u || polygon == NULL || points_number == 0u || mask_buffer == NULL) {
                LOGE("Wrong input parameter. Getting mask buffer failed.");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
        const unsigned int buffer_size = buffer_width * buffer_height;
-       unsigned char* buffer = (unsigned char*) malloc(sizeof(unsigned char) * buffer_size);
+       unsigned char *buffer = (unsigned char *) malloc(sizeof(unsigned char) * buffer_size);
        if (buffer == NULL) {
                LOGE("*buffer is null");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
@@ -63,7 +58,6 @@ int mv_get_mask_buffer(
                        min_y = polygon[k].y;
        }
 
-
        for (k = 0u; k < buffer_size; ++k) {
                bool inside_polygon = false;
 
@@ -77,10 +71,9 @@ int mv_get_mask_buffer(
 
                for (i = 0u, j = points_number - 1; i < points_number; j = i++) {
                        if (((polygon[i].y > test_y) != (polygon[j].y > test_y)) &&
-                                       ((float) test_x < (float) (polygon[j].x - polygon[i].x) *
-                                       (test_y - polygon[i].y) /
-                                       (polygon[j].y - polygon[i].y) +
-                                       polygon[i].x)) {
+                               ((float) test_x <
+                                (float) (polygon[j].x - polygon[i].x) * (test_y - polygon[i].y) / (polygon[j].y - polygon[i].y) +
+                                                polygon[i].x)) {
                                inside_polygon = !inside_polygon;
                        }
                }
index e01b76a..45b9cf1 100644 (file)
@@ -28,9 +28,7 @@
 
 static size_t __mv_surveillance_id_counter = 0;
 
-int mv_surveillance_event_trigger_create(
-               const char *event_type,
-               mv_surveillance_event_trigger_h * trigger)
+int mv_surveillance_event_trigger_create(const char *event_type, mv_surveillance_event_trigger_h *trigger)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
@@ -39,8 +37,7 @@ int mv_surveillance_event_trigger_create(
        MEDIA_VISION_FUNCTION_ENTER();
 
        mv_surveillance_event_trigger_s *handle =
-                       (mv_surveillance_event_trigger_s *) malloc(
-                                       sizeof(mv_surveillance_event_trigger_s));
+                       (mv_surveillance_event_trigger_s *) malloc(sizeof(mv_surveillance_event_trigger_s));
        if (NULL == handle) {
                LOGE("[%s] malloc fail", __func__);
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
@@ -60,16 +57,14 @@ int mv_surveillance_event_trigger_create(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_surveillance_event_trigger_destroy(
-               mv_surveillance_event_trigger_h trigger)
+int mv_surveillance_event_trigger_destroy(mv_surveillance_event_trigger_h trigger)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(trigger);
        MEDIA_VISION_FUNCTION_ENTER();
 
-       mv_surveillance_event_trigger_s *handle =
-                       (mv_surveillance_event_trigger_s *) trigger;
+       mv_surveillance_event_trigger_s *handle = (mv_surveillance_event_trigger_s *) trigger;
        free(handle->event_type);
        free(handle->roi);
        free((mv_surveillance_event_trigger_s *) trigger);
@@ -78,9 +73,7 @@ int mv_surveillance_event_trigger_destroy(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_surveillance_get_event_trigger_type(
-               mv_surveillance_event_trigger_h trigger,
-               char **event_type)
+int mv_surveillance_get_event_trigger_type(mv_surveillance_event_trigger_h trigger, char **event_type)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
@@ -88,18 +81,15 @@ int mv_surveillance_get_event_trigger_type(
        MEDIA_VISION_NULL_ARG_CHECK(event_type);
        MEDIA_VISION_FUNCTION_ENTER();
 
-       mv_surveillance_event_trigger_s *handle =
-                       (mv_surveillance_event_trigger_s *)trigger;
+       mv_surveillance_event_trigger_s *handle = (mv_surveillance_event_trigger_s *) trigger;
        *event_type = strndup(handle->event_type, 255);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_surveillance_set_event_trigger_roi(
-               mv_surveillance_event_trigger_h trigger,
-               int number_of_points,
-               mv_point_s *roi)
+int mv_surveillance_set_event_trigger_roi(mv_surveillance_event_trigger_h trigger, int number_of_points,
+                                                                                 mv_point_s *roi)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
@@ -112,8 +102,7 @@ int mv_surveillance_set_event_trigger_roi(
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
 
-       mv_surveillance_event_trigger_s *handle =
-                       (mv_surveillance_event_trigger_s *)trigger;
+       mv_surveillance_event_trigger_s *handle = (mv_surveillance_event_trigger_s *) trigger;
 
        if (handle->roi) {
                free(handle->roi);
@@ -121,7 +110,7 @@ int mv_surveillance_set_event_trigger_roi(
        }
 
        handle->number_of_roi_points = number_of_points;
-       handle->roi = (mv_point_s*) malloc(sizeof(mv_point_s) * number_of_points);
+       handle->roi = (mv_point_s *) malloc(sizeof(mv_point_s) * number_of_points);
 
        if (NULL == handle->roi) {
                LOGE("[%s] malloc fail", __func__);
@@ -138,10 +127,8 @@ int mv_surveillance_set_event_trigger_roi(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_surveillance_get_event_trigger_roi(
-               mv_surveillance_event_trigger_h trigger,
-               int *number_of_points,
-               mv_point_s ** roi)
+int mv_surveillance_get_event_trigger_roi(mv_surveillance_event_trigger_h trigger, int *number_of_points,
+                                                                                 mv_point_s **roi)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
@@ -150,8 +137,7 @@ int mv_surveillance_get_event_trigger_roi(
        MEDIA_VISION_NULL_ARG_CHECK(roi);
        MEDIA_VISION_FUNCTION_ENTER();
 
-       mv_surveillance_event_trigger_s *handle =
-                       (mv_surveillance_event_trigger_s *) trigger;
+       mv_surveillance_event_trigger_s *handle = (mv_surveillance_event_trigger_s *) trigger;
 
        *number_of_points = handle->number_of_roi_points;
        if (0 == *number_of_points) {
@@ -159,8 +145,7 @@ int mv_surveillance_get_event_trigger_roi(
                return MEDIA_VISION_ERROR_NONE;
        }
 
-       *roi = (mv_point_s *) malloc(
-                                       sizeof(mv_point_s) * handle->number_of_roi_points);
+       *roi = (mv_point_s *) malloc(sizeof(mv_point_s) * handle->number_of_roi_points);
        if (*roi == NULL) {
                LOGE("*roi is null");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
@@ -176,12 +161,9 @@ int mv_surveillance_get_event_trigger_roi(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_surveillance_subscribe_event_trigger(
-               mv_surveillance_event_trigger_h event_trigger,
-               int video_stream_id,
-               mv_engine_config_h engine_cfg,
-               mv_surveillance_event_occurred_cb callback,
-               void *user_data)
+int mv_surveillance_subscribe_event_trigger(mv_surveillance_event_trigger_h event_trigger, int video_stream_id,
+                                                                                       mv_engine_config_h engine_cfg, mv_surveillance_event_occurred_cb callback,
+                                                                                       void *user_data)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
@@ -189,37 +171,27 @@ int mv_surveillance_subscribe_event_trigger(
        MEDIA_VISION_NULL_ARG_CHECK(callback);
        MEDIA_VISION_FUNCTION_ENTER();
 
-       const int ret = mv_surveillance_subscribe_event_trigger_open(
-                                       event_trigger,
-                                       video_stream_id,
-                                       engine_cfg,
-                                       callback,
-                                       user_data);
+       const int ret = mv_surveillance_subscribe_event_trigger_open(event_trigger, video_stream_id, engine_cfg, callback,
+                                                                                                                                user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_surveillance_unsubscribe_event_trigger(
-               mv_surveillance_event_trigger_h event_trigger,
-               int video_stream_id)
+int mv_surveillance_unsubscribe_event_trigger(mv_surveillance_event_trigger_h event_trigger, int video_stream_id)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_INSTANCE_CHECK(event_trigger);
        MEDIA_VISION_FUNCTION_ENTER();
 
-       const int ret = mv_surveillance_unsubscribe_event_trigger_open(
-                                       event_trigger,
-                                       video_stream_id);
+       const int ret = mv_surveillance_unsubscribe_event_trigger_open(event_trigger, video_stream_id);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_surveillance_push_source(
-               mv_source_h source,
-               int video_stream_id)
+int mv_surveillance_push_source(mv_source_h source, int video_stream_id)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
@@ -232,27 +204,21 @@ int mv_surveillance_push_source(
        return ret;
 }
 
-int mv_surveillance_foreach_supported_event_type(
-               mv_surveillance_event_type_cb callback,
-               void *user_data)
+int mv_surveillance_foreach_supported_event_type(mv_surveillance_event_type_cb callback, void *user_data)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
        MEDIA_VISION_NULL_ARG_CHECK(callback);
        MEDIA_VISION_FUNCTION_ENTER();
 
-       const int ret = mv_surveillance_foreach_event_type_open(
-                                       callback,
-                                       user_data);
+       const int ret = mv_surveillance_foreach_event_type_open(callback, user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
 }
 
-int mv_surveillance_foreach_event_result_name(
-               const char *event_type,
-               mv_surveillance_event_result_name_cb callback,
-               void *user_data)
+int mv_surveillance_foreach_event_result_name(const char *event_type, mv_surveillance_event_result_name_cb callback,
+                                                                                         void *user_data)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
@@ -260,20 +226,14 @@ int mv_surveillance_foreach_event_result_name(
        MEDIA_VISION_NULL_ARG_CHECK(callback);
        MEDIA_VISION_FUNCTION_ENTER();
 
-       const int ret = mv_surveillance_foreach_event_result_value_name_open(
-                                       event_type,
-                                       callback,
-                                       user_data);
+       const int ret = mv_surveillance_foreach_event_result_value_name_open(event_type, callback, user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
 }
 
-int mv_surveillance_get_result_value(
-               mv_surveillance_result_h result,
-               const char *value_name,
-               void *value)
+int mv_surveillance_get_result_value(mv_surveillance_result_h result, const char *value_name, void *value)
 {
        MEDIA_VISION_SUPPORT_CHECK(_mv_face_check_system_info_feature_supported());
        MEDIA_VISION_SUPPORT_CHECK(_mv_image_check_system_info_feature_supported());
@@ -282,10 +242,7 @@ int mv_surveillance_get_result_value(
        MEDIA_VISION_NULL_ARG_CHECK(value);
        MEDIA_VISION_FUNCTION_ENTER();
 
-       const int ret = mv_surveillance_get_result_value_open(
-                                       result,
-                                       value_name,
-                                       value);
+       const int ret = mv_surveillance_get_result_value_open(result, value_name, value);
 
        MEDIA_VISION_FUNCTION_LEAVE();
        return ret;
index 0f303e4..11872ab 100644 (file)
 
 using namespace mediavision::surveillance;
 
-int mv_surveillance_subscribe_event_trigger_open(
-       mv_surveillance_event_trigger_h event_trigger,
-       int video_stream_id,
-       mv_engine_config_h engine_cfg,
-       mv_surveillance_event_occurred_cb callback,
-       void *user_data)
+int mv_surveillance_subscribe_event_trigger_open(mv_surveillance_event_trigger_h event_trigger, int video_stream_id,
+                                                                                                mv_engine_config_h engine_cfg,
+                                                                                                mv_surveillance_event_occurred_cb callback, void *user_data)
 {
-       mv_surveillance_event_trigger_s *handle =
-               (mv_surveillance_event_trigger_s *)event_trigger;
-
-       return EventManager::getInstance().registerEvent(
-                       event_trigger,
-                       static_cast<long int>(handle->trigger_id),
-                       handle->event_type,
-                       video_stream_id,
-                       engine_cfg,
-                       callback,
-                       user_data,
-                       handle->number_of_roi_points,
-                       handle->roi,
-                       false);
+       mv_surveillance_event_trigger_s *handle = (mv_surveillance_event_trigger_s *) event_trigger;
+
+       return EventManager::getInstance().registerEvent(event_trigger, static_cast<long int>(handle->trigger_id),
+                                                                                                        handle->event_type, video_stream_id, engine_cfg, callback,
+                                                                                                        user_data, handle->number_of_roi_points, handle->roi, false);
 }
 
-int mv_surveillance_unsubscribe_event_trigger_open(
-       mv_surveillance_event_trigger_h event_trigger,
-       int video_stream_id)
+int mv_surveillance_unsubscribe_event_trigger_open(mv_surveillance_event_trigger_h event_trigger, int video_stream_id)
 {
-       mv_surveillance_event_trigger_s *handle =
-               (mv_surveillance_event_trigger_s *)event_trigger;
+       mv_surveillance_event_trigger_s *handle = (mv_surveillance_event_trigger_s *) event_trigger;
 
-       return EventManager::getInstance().unregisterEvent(
-                       static_cast<long int>(handle->trigger_id),
-                       video_stream_id);
+       return EventManager::getInstance().unregisterEvent(static_cast<long int>(handle->trigger_id), video_stream_id);
 }
 
-int mv_surveillance_push_source_open(
-               mv_source_h source,
-               int video_stream_id)
+int mv_surveillance_push_source_open(mv_source_h source, int video_stream_id)
 {
        MEDIA_VISION_INSTANCE_CHECK(source);
 
        return EventManager::getInstance().pushSource(source, video_stream_id);
 }
 
-int mv_surveillance_foreach_event_type_open(
-               mv_surveillance_event_type_cb callback,
-               void *user_data)
+int mv_surveillance_foreach_event_type_open(mv_surveillance_event_type_cb callback, void *user_data)
 {
        StringVector eventTypes;
        const int error = EventManager::getInstance().getSupportedEventTypes(eventTypes);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Query events failed due to internal issues. Error code: %i",
-                               error);
+               LOGE("Query events failed due to internal issues. Error code: %i", error);
                return error;
        }
 
@@ -91,27 +69,23 @@ int mv_surveillance_foreach_event_type_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_surveillance_foreach_event_result_value_name_open(
-       const char *event_type,
-       mv_surveillance_event_result_name_cb callback,
-       void *user_data)
+int mv_surveillance_foreach_event_result_value_name_open(const char *event_type,
+                                                                                                                mv_surveillance_event_result_name_cb callback, void *user_data)
 {
        StringVector eventResultValueNames;
 
        int error = MEDIA_VISION_ERROR_NONE;
 
        if (NULL == event_type) {
-               error = EventManager::getInstance().getSupportedEventResultValueNames(
-                                       eventResultValueNames);
+               error = EventManager::getInstance().getSupportedEventResultValueNames(eventResultValueNames);
        } else {
-               error = EventManager::getInstance().getSupportedEventResultValueNames(
-                                       event_type,
-                                       eventResultValueNames);
+               error = EventManager::getInstance().getSupportedEventResultValueNames(event_type, eventResultValueNames);
        }
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                LOGE("Query result value names failed due to internal issues. "
-                               "Error code: %i", error);
+                        "Error code: %i",
+                        error);
                return error;
        }
 
@@ -126,16 +100,13 @@ int mv_surveillance_foreach_event_result_value_name_open(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_surveillance_get_result_value_open(
-       mv_surveillance_result_h result,
-       const char *value_name,
-       void *value)
+int mv_surveillance_get_result_value_open(mv_surveillance_result_h result, const char *value_name, void *value)
 {
        MEDIA_VISION_INSTANCE_CHECK(result);
        MEDIA_VISION_NULL_ARG_CHECK(value_name);
        MEDIA_VISION_NULL_ARG_CHECK(value);
 
-       EventResult *eventResult = (EventResult*) result;
+       EventResult *eventResult = (EventResult *) result;
 
        return eventResult->getResultValue(value_name, value);
 }
index 76f5f5b..ba8863e 100644 (file)
@@ -1,6 +1,6 @@
 Name:        capi-media-vision
 Summary:     Media Vision library for Tizen Native API
-Version:     0.23.22
+Version:     0.23.23
 Release:     0
 Group:       Multimedia/Framework
 License:     Apache-2.0 and BSD-3-Clause
index 47b80d1..b023750 100644 (file)
@@ -42,11 +42,8 @@ struct arguments
        char *source;
 };
 
-int load_image_to_buffer(char *source,
-               unsigned char **buffer,
-               unsigned long *size,
-               unsigned int *width,
-               unsigned int *height)
+int load_image_to_buffer(char *source, unsigned char **buffer, unsigned long *size, unsigned int *width,
+                                                unsigned int *height)
 {
        cv::Mat image;
        image = cv::imread(source);
@@ -70,14 +67,9 @@ int load_image_to_buffer(char *source,
        return MEDIA_VISION_ERROR_NONE;
 }
 
-void barcode_detected_cb(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               const mv_quadrangle_s *barcodes_locations,
-               const char *messages[],
-               const mv_barcode_type_e *types,
-               int number_of_barcodes,
-               void *user_data)
+void barcode_detected_cb(mv_source_h source, mv_engine_config_h engine_cfg, const mv_quadrangle_s *barcodes_locations,
+                                                const char *messages[], const mv_barcode_type_e *types, int number_of_barcodes,
+                                                void *user_data)
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
@@ -102,11 +94,10 @@ int perform_detect(struct arguments *arguments)
        unsigned int image_width = 0;
        unsigned int image_height = 0;
        int int_value = MV_BARCODE_DETECT_ATTR_TARGET_ALL;
-       mv_rectangle_s roi = { {0, 0}, 0, 0 };
+       mv_rectangle_s roi = { { 0, 0 }, 0, 0 };
        int err = MEDIA_VISION_ERROR_NONE;
 
-       err = load_image_to_buffer(arguments->source,
-                       &data_buffer, &buffer_size, &image_width, &image_height);
+       err = load_image_to_buffer(arguments->source, &data_buffer, &buffer_size, &image_width, &image_height);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
                LOGE("Errors were occurred during opening the file!!! code : %i", err);
@@ -120,10 +111,10 @@ int perform_detect(struct arguments *arguments)
        }
 
        if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
-               mv_engine_config_get_int_attribute(
-                                       mv_engine_config, MV_BARCODE_DETECT_ATTR_TARGET, &int_value)) {
+               mv_engine_config_get_int_attribute(mv_engine_config, MV_BARCODE_DETECT_ATTR_TARGET, &int_value)) {
                LOGE("Errors were occurred during target attribute"
-                               "configuration : %i", err);
+                        "configuration : %i",
+                        err);
                goto out;
        }
 
@@ -133,8 +124,8 @@ int perform_detect(struct arguments *arguments)
                goto out;
        }
 
-       err = mv_source_fill_by_buffer(source, data_buffer, buffer_size,
-                       image_width, image_height, MEDIA_VISION_COLORSPACE_Y800);
+       err = mv_source_fill_by_buffer(source, data_buffer, buffer_size, image_width, image_height,
+                                                                  MEDIA_VISION_COLORSPACE_Y800);
        if (MEDIA_VISION_ERROR_NONE != err) {
                LOGE("Errors were occurred during filling the source!!! code : %i", err);
                goto out;
@@ -150,7 +141,7 @@ out:
                printf("-1\n");
 
        if (data_buffer != NULL) {
-               delete [] data_buffer;
+               delete[] data_buffer;
                data_buffer = NULL;
        }
 
@@ -172,9 +163,9 @@ out:
        return err;
 }
 
-static error_t parse_opt (int key, char *arg, struct argp_state *state)
+static error_t parse_opt(int key, char *arg, struct argp_state *state)
 {
-       struct arguments *arguments = (struct arguments *)state->input;
+       struct arguments *arguments = (struct arguments *) state->input;
 
        switch (key) {
        case ARGP_KEY_NO_ARGS:
@@ -201,11 +192,12 @@ int main(int argc, char *argv[])
        LOGI("Media Vision Assessment-Barcode is launched.");
 
        struct arguments arguments;
-       argp_parse (&argp, argc, argv, 0, 0, &arguments);
+       argp_parse(&argp, argc, argv, 0, 0, &arguments);
 
        std::chrono::system_clock::time_point StartTime = std::chrono::system_clock::now();
        int err = perform_detect(&arguments);
-       std::chrono::milliseconds ms = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - StartTime);
+       std::chrono::milliseconds ms =
+                       std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - StartTime);
        std::cout << ms.count() << "ms" << std::endl;
 
        if (err != MEDIA_VISION_ERROR_NONE)
index 56a97c4..8d4fb07 100644 (file)
@@ -45,16 +45,12 @@ struct arguments
        int model_type;
 };
 
-const char *model_names[3] = {"/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml",
-                                                       "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml",
-                                                       "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt_tree.xml"};
-
-void on_face_detected_cb(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s *faces_locations,
-               int number_of_faces,
-               void *user_data)
+const char *model_names[3] = { "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml",
+                                                          "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml",
+                                                          "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt_tree.xml" };
+
+void on_face_detected_cb(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s *faces_locations,
+                                                int number_of_faces, void *user_data)
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
@@ -62,20 +58,16 @@ void on_face_detected_cb(
        printf("%i\n", number_of_faces);
        if (number_of_faces > 0) {
                for (i = 0; i < number_of_faces; ++i) {
-                       printf("%i %i %i %i\n",
-                                       faces_locations[i].point.x, faces_locations[i].point.y,
-                                       faces_locations[i].width, faces_locations[i].height);
+                       printf("%i %i %i %i\n", faces_locations[i].point.x, faces_locations[i].point.y, faces_locations[i].width,
+                                  faces_locations[i].height);
                }
        }
 
        MEDIA_VISION_FUNCTION_LEAVE();
 }
 
-int load_image_to_buffer(char *source,
-               unsigned char **buffer,
-               unsigned long *size,
-               unsigned int *width,
-               unsigned int *height)
+int load_image_to_buffer(char *source, unsigned char **buffer, unsigned long *size, unsigned int *width,
+                                                unsigned int *height)
 {
        cv::Mat image;
        image = cv::imread(source);
@@ -110,8 +102,7 @@ int perform_detect(struct arguments *arguments)
        unsigned int image_width = 0;
        unsigned int image_height = 0;
 
-       int err = load_image_to_buffer(arguments->source,
-                       &data_buffer, &buffer_size, &image_width, &image_height);
+       int err = load_image_to_buffer(arguments->source, &data_buffer, &buffer_size, &image_width, &image_height);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
                LOGE("Errors were occurred during opening the file!!! code : %i", err);
@@ -124,10 +115,8 @@ int perform_detect(struct arguments *arguments)
                goto out;
        }
 
-       mv_engine_config_set_string_attribute(
-                               mv_engine_config,
-                               MV_FACE_DETECTION_MODEL_FILE_PATH,
-                               model_names[arguments->model_type]);
+       mv_engine_config_set_string_attribute(mv_engine_config, MV_FACE_DETECTION_MODEL_FILE_PATH,
+                                                                                 model_names[arguments->model_type]);
 
        err = mv_create_source(&source);
        if (MEDIA_VISION_ERROR_NONE != err) {
@@ -135,8 +124,8 @@ int perform_detect(struct arguments *arguments)
                goto out;
        }
 
-       err = mv_source_fill_by_buffer(source, data_buffer, buffer_size,
-                       image_width, image_height, MEDIA_VISION_COLORSPACE_Y800);
+       err = mv_source_fill_by_buffer(source, data_buffer, buffer_size, image_width, image_height,
+                                                                  MEDIA_VISION_COLORSPACE_Y800);
        if (MEDIA_VISION_ERROR_NONE != err) {
                LOGE("Errors were occurred during filling the source!!! code : %i", err);
                goto out;
@@ -168,9 +157,9 @@ out:
        return err;
 }
 
-static error_t parse_opt (int key, char *arg, struct argp_state *state)
+static error_t parse_opt(int key, char *arg, struct argp_state *state)
 {
-       struct arguments *arguments = (struct arguments *)state->input;
+       struct arguments *arguments = (struct arguments *) state->input;
 
        switch (key) {
        case ARGP_KEY_NO_ARGS:
@@ -202,11 +191,12 @@ int main(int argc, char *argv[])
        LOGI("Media Vision Assessment-Face is launched.");
 
        struct arguments arguments;
-       argp_parse (&argp, argc, argv, 0, 0, &arguments);
+       argp_parse(&argp, argc, argv, 0, 0, &arguments);
 
        std::chrono::system_clock::time_point StartTime = std::chrono::system_clock::now();
        int err = perform_detect(&arguments);
-       std::chrono::milliseconds ms = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - StartTime);
+       std::chrono::milliseconds ms =
+                       std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - StartTime);
        std::cout << ms.count() << "ms" << std::endl;
 
        if (err != MEDIA_VISION_ERROR_NONE)
index 861ae3f..7bf5220 100644 (file)
@@ -42,43 +42,33 @@ struct arguments
        int threshold;
 };
 
-void movement_detected_cb(
-       mv_surveillance_event_trigger_h event_trigger,
-       mv_source_h source,
-       int video_stream_id,
-       mv_surveillance_result_h event_result,
-       void *user_data)
+void movement_detected_cb(mv_surveillance_event_trigger_h event_trigger, mv_source_h source, int video_stream_id,
+                                                 mv_surveillance_result_h event_result, void *user_data)
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
        int number_of_movement_regions = 0;
-       int error = mv_surveillance_get_result_value(
-                                       event_result,
-                                       MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS,
-                                       &number_of_movement_regions);
+       int error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS,
+                                                                                                &number_of_movement_regions);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                LOGE("Error with code %d was occurred in getting number of "
-                               "movement regions.", error);
+                        "movement regions.",
+                        error);
                return;
        }
 
-       mv_rectangle_s *movement_regions = (mv_rectangle_s *)
-               malloc(sizeof(mv_rectangle_s) * number_of_movement_regions);
+       mv_rectangle_s *movement_regions = (mv_rectangle_s *) malloc(sizeof(mv_rectangle_s) * number_of_movement_regions);
 
        if (movement_regions == NULL) {
                LOGE("Failed to malloc movement_regions");
                return;
        }
 
-       error = mv_surveillance_get_result_value(
-                               event_result,
-                               MV_SURVEILLANCE_MOVEMENT_REGIONS,
-                               movement_regions);
+       error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_MOVEMENT_REGIONS, movement_regions);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Error with code %d was occurred in getting movement regions.",
-                               error);
+               LOGE("Error with code %d was occurred in getting movement regions.", error);
                if (movement_regions != NULL)
                        free(movement_regions);
                return;
@@ -90,7 +80,6 @@ void movement_detected_cb(
        cv::Scalar color(0, 0, 255);
        static unsigned int findex = 0;
 
-
        mv_source_get_width(source, &image_width);
        mv_source_get_height(source, &image_height);
        mv_source_get_colorspace(source, &image_colorspace);
@@ -111,22 +100,15 @@ void movement_detected_cb(
 #endif
 
        for (int i = 0; i < number_of_movement_regions; ++i) {
-               printf("%d:%d:%d-%d:%d\n",
-                               findex,
-                               movement_regions[i].point.x,
-                               movement_regions[i].point.y,
-                               movement_regions[i].width,
-                               movement_regions[i].height);
+               printf("%d:%d:%d-%d:%d\n", findex, movement_regions[i].point.x, movement_regions[i].point.y,
+                          movement_regions[i].width, movement_regions[i].height);
 #if DUMP_JPEG
                char buf[256];
                cv::Mat cvImage(image_width, image_height, CV_8UC(3), out_buffer_copy);
-               cv::rectangle(
-                       cvImage,
-                       cv::Point(movement_regions[i].point.x, movement_regions[i].point.y),
-                       cv::Point((movement_regions[i].width - movement_regions[i].point.x),
-                                       (movement_regions[i].height - movement_regions[i].point.y)),
-                       color,
-                       3);
+               cv::rectangle(cvImage, cv::Point(movement_regions[i].point.x, movement_regions[i].point.y),
+                                         cv::Point((movement_regions[i].width - movement_regions[i].point.x),
+                                                               (movement_regions[i].height - movement_regions[i].point.y)),
+                                         color, 3);
                sprintf(buf, "%d.jpg", findex);
                if (!cv::imwrite(buf, cvImage))
                        LOGE("Failed to write image");
@@ -153,11 +135,11 @@ int perform_detect(struct arguments *arguments)
        mv_engine_config_h engine_cfg = NULL;
        int err = MEDIA_VISION_ERROR_NONE;
 
-       err = mv_surveillance_event_trigger_create(
-                                       MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, &event_trigger);
+       err = mv_surveillance_event_trigger_create(MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, &event_trigger);
        if (MEDIA_VISION_ERROR_NONE != err) {
                LOGE("mv_surveillance_event_trigger_create() error!\n"
-                               "Error code: %i\n", err);
+                        "Error code: %i\n",
+                        err);
                goto out;
        }
 
@@ -167,19 +149,14 @@ int perform_detect(struct arguments *arguments)
                goto out;
        }
 
-       err = mv_engine_config_set_int_attribute(engine_cfg,
-                                       MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD, arguments->threshold);
+       err = mv_engine_config_set_int_attribute(engine_cfg, MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD,
+                                                                                        arguments->threshold);
        if (err != MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Setting frame detection threshold from engine configuration failed.");
-                       goto out;
+               LOGE("Setting frame detection threshold from engine configuration failed.");
+               goto out;
        }
 
-       err = mv_surveillance_subscribe_event_trigger(
-                                       event_trigger,
-                                       0,
-                                       engine_cfg,
-                                       movement_detected_cb,
-                                       NULL);
+       err = mv_surveillance_subscribe_event_trigger(event_trigger, 0, engine_cfg, movement_detected_cb, NULL);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
                LOGE("Error with code %d was occurred in subscribe event.", err);
@@ -189,7 +166,7 @@ int perform_detect(struct arguments *arguments)
 out:
        if (MEDIA_VISION_ERROR_NONE == err) {
                cv::VideoCapture cap(arguments->source);
-               if(!cap.isOpened())
+               if (!cap.isOpened())
                        LOGE("Can not open Video file");
 
                int rows = cap.get(cv::CAP_PROP_FRAME_WIDTH);
@@ -199,7 +176,7 @@ out:
                if (MEDIA_VISION_ERROR_NONE != err) {
                        LOGE("Errors were occurred during source creating!!! Code %i", err);
                } else {
-                       for(int frame = 0; frame < cap.get(cv::CAP_PROP_FRAME_COUNT); frame++) {
+                       for (int frame = 0; frame < cap.get(cv::CAP_PROP_FRAME_COUNT); frame++) {
                                cv::Mat f, rgb;
                                cap >> f;
                                if (f.empty())
@@ -209,13 +186,7 @@ out:
                                int size = rgb.total() * rgb.elemSize();
                                unsigned char *data = rgb.data;
 
-
-                               err = mv_source_fill_by_buffer(
-                                               source, data,
-                                               size,
-                                               cols,
-                                               rows ,
-                                               MEDIA_VISION_COLORSPACE_RGB888);
+                               err = mv_source_fill_by_buffer(source, data, size, cols, rows, MEDIA_VISION_COLORSPACE_RGB888);
 
                                err = mv_surveillance_push_source(source, 0);
                                if (MEDIA_VISION_ERROR_NONE != err) {
@@ -235,14 +206,16 @@ out:
                err = mv_destroy_engine_config(engine_cfg);
                if (err != MEDIA_VISION_ERROR_NONE)
                        LOGE("Failed to destroy engine configuration for event trigger."
-                                       "Error code: %i", err);
+                                "Error code: %i",
+                                err);
        }
 
        if (event_trigger != NULL) {
                err = mv_surveillance_event_trigger_destroy(event_trigger);
                if (MEDIA_VISION_ERROR_NONE != err) {
                        LOGE("Error with code %d was occurred when try to destroy "
-                                       "event trigger.", err);
+                                "event trigger.",
+                                err);
                }
        }
 
@@ -251,9 +224,9 @@ out:
        return err;
 }
 
-static error_t parse_opt (int key, char *arg, struct argp_state *state)
+static error_t parse_opt(int key, char *arg, struct argp_state *state)
 {
-       struct arguments *arguments = (struct arguments *)state->input;
+       struct arguments *arguments = (struct arguments *) state->input;
 
        switch (key) {
        case ARGP_KEY_NO_ARGS:
@@ -287,11 +260,12 @@ int main(int argc, char *argv[])
        LOGI("Media Vision Assessment-Surveillance is launched.");
 
        struct arguments arguments;
-       argp_parse (&argp, argc, argv, 0, 0, &arguments);
+       argp_parse(&argp, argc, argv, 0, 0, &arguments);
 
        std::chrono::system_clock::time_point StartTime = std::chrono::system_clock::now();
        err = perform_detect(&arguments);
-       std::chrono::milliseconds ms = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - StartTime);
+       std::chrono::milliseconds ms =
+                       std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - StartTime);
        std::cout << ms.count() << "ms" << std::endl;
 
        if (err != MEDIA_VISION_ERROR_NONE)
index f89ff3d..517c64c 100644 (file)
@@ -42,7 +42,8 @@
 
 #define ARRAY_SIZE(x) (sizeof((x)) / sizeof((x)[0]))
 
-typedef struct {
+typedef struct
+{
        mv_barcode_type_e type;
        mv_barcode_qr_ecc_e ecc;
        mv_barcode_qr_mode_e mode;
@@ -60,7 +61,8 @@ typedef struct {
        char *back_color;
 } barcode_model_s;
 
-typedef enum {
+typedef enum
+{
        MV_TS_GENERATE_TO_IMAGE_FCN,
        MV_TS_GENERATE_TO_SOURCE_FCN
 } generation_fcn_e;
@@ -73,8 +75,8 @@ static const char doc[] = "mv_barcode_test -- mediavision barcode test";
 static const char args_doc[] = "";
 
 static struct argp_option arg_options[] = {
-       {"mode", 'm', "MODE", OPTION_ARG_OPTIONAL, "Run test by MODE [0=manual|1=auto] (default 0)"},
-       {"interval", 'i', "TIME", OPTION_ARG_OPTIONAL, "Run test by TIME (default 0 sec, maximum 9 sec) interval"},
+       { "mode", 'm', "MODE", OPTION_ARG_OPTIONAL, "Run test by MODE [0=manual|1=auto] (default 0)" },
+       { "interval", 'i', "TIME", OPTION_ARG_OPTIONAL, "Run test by TIME (default 0 sec, maximum 9 sec) interval" },
        { 0 }
 };
 
@@ -84,9 +86,8 @@ struct arguments
        int interval;
 };
 
-int convert_rgb_to(unsigned char *src_buffer, unsigned char **dst_buffer,
-               image_data_s image_data, mv_colorspace_e dst_colorspace,
-               unsigned long *cvt_buffer_size)
+int convert_rgb_to(unsigned char *src_buffer, unsigned char **dst_buffer, image_data_s image_data,
+                                  mv_colorspace_e dst_colorspace, unsigned long *cvt_buffer_size)
 {
        enum AVPixelFormat pixel_format = AV_PIX_FMT_NONE;
 
@@ -126,7 +127,7 @@ int convert_rgb_to(unsigned char *src_buffer, unsigned char **dst_buffer,
                break;
        case MEDIA_VISION_COLORSPACE_RGB888:
                *cvt_buffer_size = image_data.image_width * image_data.image_height * 3;
-               (*dst_buffer) = (unsigned char*)malloc(*cvt_buffer_size);
+               (*dst_buffer) = (unsigned char *) malloc(*cvt_buffer_size);
                memcpy(*dst_buffer, src_buffer, *cvt_buffer_size);
 
                MEDIA_VISION_FUNCTION_LEAVE();
@@ -139,31 +140,27 @@ int convert_rgb_to(unsigned char *src_buffer, unsigned char **dst_buffer,
        AVPicture src_picture;
        AVPicture dst_picture;
 
-       avpicture_fill(&src_picture, (uint8_t*)src_buffer, AV_PIX_FMT_RGB24,
-                       image_data.image_width, image_data.image_height);
+       avpicture_fill(&src_picture, (uint8_t *) src_buffer, AV_PIX_FMT_RGB24, image_data.image_width,
+                                  image_data.image_height);
 
-       avpicture_alloc(&dst_picture, pixel_format,
-                       image_data.image_width, image_data.image_height);
+       avpicture_alloc(&dst_picture, pixel_format, image_data.image_width, image_data.image_height);
 
-       struct SwsContext *context = sws_getContext(
-                       image_data.image_width, image_data.image_height, AV_PIX_FMT_RGB24,
-                       image_data.image_width, image_data.image_height, pixel_format,
-                       SWS_FAST_BILINEAR, 0, 0, 0);
+       struct SwsContext *context = sws_getContext(image_data.image_width, image_data.image_height, AV_PIX_FMT_RGB24,
+                                                                                               image_data.image_width, image_data.image_height, pixel_format,
+                                                                                               SWS_FAST_BILINEAR, 0, 0, 0);
 
-       sws_scale(context, (const uint8_t * const *)src_picture.data,
-                       src_picture.linesize, 0, image_data.image_height,
-                       dst_picture.data, dst_picture.linesize);
+       sws_scale(context, (const uint8_t *const *) src_picture.data, src_picture.linesize, 0, image_data.image_height,
+                         dst_picture.data, dst_picture.linesize);
 
-       int picture_size = avpicture_get_size(pixel_format,
-                       image_data.image_width, image_data.image_height);
+       int picture_size = avpicture_get_size(pixel_format, image_data.image_width, image_data.image_height);
        if (picture_size < 0) {
                avpicture_free(&dst_picture);
                MEDIA_VISION_FUNCTION_LEAVE();
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
        }
-       *cvt_buffer_size = (unsigned long)picture_size;
+       *cvt_buffer_size = (unsigned long) picture_size;
 
-       (*dst_buffer) = (unsigned char*)malloc(*cvt_buffer_size);
+       (*dst_buffer) = (unsigned char *) malloc(*cvt_buffer_size);
        memcpy(*dst_buffer, dst_picture.data[0], *cvt_buffer_size);
 
        avpicture_free(&dst_picture);
@@ -249,15 +246,15 @@ int find_max_y(const mv_quadrangle_s *quadrangle, int *maxY)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-bool _mv_engine_config_supported_attribute(mv_config_attribute_type_e attribute_type,
-               const char *attribute_name, void *user_data)
+bool _mv_engine_config_supported_attribute(mv_config_attribute_type_e attribute_type, const char *attribute_name,
+                                                                                  void *user_data)
 {
        mvprintw(current_y++, MINX, "Callback call for engine configuration attribute");
 
        if (user_data == NULL)
                return false;
 
-       mv_engine_config_h mv_engine_config = (mv_engine_config_h *)user_data;
+       mv_engine_config_h mv_engine_config = (mv_engine_config_h *) user_data;
 
        int int_value = 0;
        double double_value = 0.0;
@@ -266,47 +263,37 @@ bool _mv_engine_config_supported_attribute(mv_config_attribute_type_e attribute_
        switch (attribute_type) {
        case MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE:
                if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
-                       mv_engine_config_get_double_attribute(
-                                               mv_engine_config, attribute_name, &double_value)) {
-                       mvprintw(current_y++, MINX, "Default double attribute %s wasn't set in engine",
-                                       attribute_name);
+                       mv_engine_config_get_double_attribute(mv_engine_config, attribute_name, &double_value)) {
+                       mvprintw(current_y++, MINX, "Default double attribute %s wasn't set in engine", attribute_name);
                        return false;
                }
-               mvprintw(current_y++, MINX, "Default double attribute %s was set to %f in engine",
-                                       attribute_name, double_value);
+               mvprintw(current_y++, MINX, "Default double attribute %s was set to %f in engine", attribute_name,
+                                double_value);
                break;
        case MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER:
                if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
-                       mv_engine_config_get_int_attribute(
-                                               mv_engine_config, attribute_name, &int_value)) {
-                       mvprintw(current_y++, MINX, "Default integer attribute %s wasn't set in engine",
-                                       attribute_name);
+                       mv_engine_config_get_int_attribute(mv_engine_config, attribute_name, &int_value)) {
+                       mvprintw(current_y++, MINX, "Default integer attribute %s wasn't set in engine", attribute_name);
                        return false;
                }
-               mvprintw(current_y++, MINX, "Default integer attribute %s was set to %d in engine",
-                               attribute_name, int_value);
+               mvprintw(current_y++, MINX, "Default integer attribute %s was set to %d in engine", attribute_name, int_value);
                break;
        case MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN:
                if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
-                       mv_engine_config_get_bool_attribute(
-                                               mv_engine_config, attribute_name, &bool_value)) {
-                       mvprintw(current_y++, MINX, "Default bool attribute %s wasn't set in engine",
-                                       attribute_name);
+                       mv_engine_config_get_bool_attribute(mv_engine_config, attribute_name, &bool_value)) {
+                       mvprintw(current_y++, MINX, "Default bool attribute %s wasn't set in engine", attribute_name);
                        return false;
                }
-               mvprintw(current_y++, MINX, "Default bool attribute %s was set to %s in engine",
-                                       attribute_name,  bool_value ? "TRUE" : "FALSE");
+               mvprintw(current_y++, MINX, "Default bool attribute %s was set to %s in engine", attribute_name,
+                                bool_value ? "TRUE" : "FALSE");
                break;
        case MV_ENGINE_CONFIG_ATTR_TYPE_STRING:
                if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
-                       mv_engine_config_get_string_attribute(
-                                               mv_engine_config, attribute_name, &str_value)) {
-                       mvprintw(current_y++, MINX, "Default string attribute %s wasn't set in engine",
-                                       attribute_name);
+                       mv_engine_config_get_string_attribute(mv_engine_config, attribute_name, &str_value)) {
+                       mvprintw(current_y++, MINX, "Default string attribute %s wasn't set in engine", attribute_name);
                        return false;
                }
-               mvprintw(current_y++, MINX, "Default string attribute %s was set to %s in engine",
-                               attribute_name, str_value);
+               mvprintw(current_y++, MINX, "Default string attribute %s was set to %s in engine", attribute_name, str_value);
                if (str_value != NULL) {
                        free(str_value);
                        str_value = NULL;
@@ -320,14 +307,9 @@ bool _mv_engine_config_supported_attribute(mv_config_attribute_type_e attribute_
        return true;
 }
 
-void barcode_detected_cb(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               const mv_quadrangle_s *barcodes_locations,
-               const char *messages[],
-               const mv_barcode_type_e *types,
-               int number_of_barcodes,
-               void *user_data)
+void barcode_detected_cb(mv_source_h source, mv_engine_config_h engine_cfg, const mv_quadrangle_s *barcodes_locations,
+                                                const char *messages[], const mv_barcode_type_e *types, int number_of_barcodes,
+                                                void *user_data)
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
@@ -348,8 +330,8 @@ void barcode_detected_cb(
                        user_data == NULL) {
                        mvprintw(current_y++, MINX, "ERROR : Creating out image is impossible.");
                } else {
-                       file_name = ((barcode_model_s *)user_data)->out_file_name;
-                       draw_buffer = ((barcode_model_s *)user_data)->out_buffer_ptr;
+                       file_name = ((barcode_model_s *) user_data)->out_file_name;
+                       draw_buffer = ((barcode_model_s *) user_data)->out_buffer_ptr;
                        image_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
                        is_source_data_loaded = 1;
                }
@@ -422,17 +404,10 @@ void barcode_detected_cb(
                                        continue;
                                }
 
-                               const int drawing_color[] = {255, 0, 0};
-
-                               if (MEDIA_VISION_ERROR_NONE != draw_rectangle_on_buffer(
-                                                       minX,
-                                                       minY,
-                                                       maxX,
-                                                       maxY,
-                                                       6,
-                                                       drawing_color,
-                                                       &image_data,
-                                                       draw_buffer))
+                               const int drawing_color[] = { 255, 0, 0 };
+
+                               if (MEDIA_VISION_ERROR_NONE !=
+                                       draw_rectangle_on_buffer(minX, minY, maxX, maxY, 6, drawing_color, &image_data, draw_buffer))
                                        continue;
                        }
                }
@@ -454,8 +429,7 @@ int generate_barcode_to_image(barcode_model_s model)
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
-       if (model.message   == NULL ||
-                       model.file_name == NULL) {
+       if (model.message == NULL || model.file_name == NULL) {
                MEDIA_VISION_FUNCTION_LEAVE();
 
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -467,58 +441,59 @@ int generate_barcode_to_image(barcode_model_s model)
        int err = mv_create_engine_config(&mv_engine_config);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Errors were occurred during creating the media engine "
-                               "config : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Errors were occurred during creating the media engine "
+                                "config : %i",
+                                err);
                MEDIA_VISION_FUNCTION_LEAVE();
 
                return err;
        }
 
-       err = mv_engine_config_set_int_attribute(mv_engine_config,
-                                                                                       MV_BARCODE_GENERATE_ATTR_TEXT,
-                                                                                       model.is_hrt);
+       err = mv_engine_config_set_int_attribute(mv_engine_config, MV_BARCODE_GENERATE_ATTR_TEXT, model.is_hrt);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Errors were occurred during set integer attribute to "
-                               "media engine config : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Errors were occurred during set integer attribute to "
+                                "media engine config : %i",
+                                err);
                goto cleanup;
        }
 
-       err = mv_engine_config_set_string_attribute(mv_engine_config,
-                                                                                       MV_BARCODE_GENERATE_ATTR_COLOR_FRONT,
-                                                                                       model.front_color);
+       err = mv_engine_config_set_string_attribute(mv_engine_config, MV_BARCODE_GENERATE_ATTR_COLOR_FRONT,
+                                                                                               model.front_color);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Errors were occurred during set string attribute to "
-                               "media engine config : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Errors were occurred during set string attribute to "
+                                "media engine config : %i",
+                                err);
                goto cleanup;
        }
 
-       err = mv_engine_config_set_string_attribute(mv_engine_config,
-                                                                                       MV_BARCODE_GENERATE_ATTR_COLOR_BACK,
-                                                                                       model.back_color);
+       err = mv_engine_config_set_string_attribute(mv_engine_config, MV_BARCODE_GENERATE_ATTR_COLOR_BACK,
+                                                                                               model.back_color);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Errors were occurred during set string attribute to "
-                               "media engine config : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Errors were occurred during set string attribute to "
+                                "media engine config : %i",
+                                err);
                goto cleanup;
        }
-       err = mv_barcode_generate_image(mv_engine_config, model.message,
-                                                                       model.width, model.height, model.type,
-                                                                       model.mode, model.ecc, model.version,
-                                                                       model.file_name, model.out_image_format);
+       err = mv_barcode_generate_image(mv_engine_config, model.message, model.width, model.height, model.type, model.mode,
+                                                                       model.ecc, model.version, model.file_name, model.out_image_format);
        if (MEDIA_VISION_ERROR_NONE != err)
-               mvprintw(current_y++, MINX, "ERROR : Errors were occurred during generate image error : %i",
-                          err);
+               mvprintw(current_y++, MINX, "ERROR : Errors were occurred during generate image error : %i", err);
 cleanup:
        if (MEDIA_VISION_ERROR_NONE != err) {
                int err2 = mv_destroy_engine_config(mv_engine_config);
                if (MEDIA_VISION_ERROR_NONE != err2)
                        mvprintw(current_y++, MINX, "ERROR : Errors were occurred during destroying the media engine config : %i",
-                                  err2);
+                                        err2);
 
        } else {
                err = mv_destroy_engine_config(mv_engine_config);
                if (MEDIA_VISION_ERROR_NONE != err)
                        mvprintw(current_y++, MINX, "ERROR : Errors were occurred during destroying the media engine config : %i",
-                                  err);
+                                        err);
        }
        MEDIA_VISION_FUNCTION_LEAVE();
        return err;
@@ -527,8 +502,7 @@ int generate_barcode_to_source(barcode_model_s model)
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
-       if (model.message   == NULL ||
-               model.file_name == NULL) {
+       if (model.message == NULL || model.file_name == NULL) {
                MEDIA_VISION_FUNCTION_LEAVE();
 
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -539,8 +513,10 @@ int generate_barcode_to_source(barcode_model_s model)
        mv_source_h source = NULL;
        int err = mv_create_source(&source);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Error occurred when trying to create Media Vision "
-                               "source. Error code : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Error occurred when trying to create Media Vision "
+                                "source. Error code : %i",
+                                err);
 
                MEDIA_VISION_FUNCTION_LEAVE();
 
@@ -554,41 +530,41 @@ int generate_barcode_to_source(barcode_model_s model)
        mv_engine_config_h mv_engine_config;
        err = mv_create_engine_config(&mv_engine_config);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Errors were occurred during creating the media engine "
-                               "config : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Errors were occurred during creating the media engine "
+                                "config : %i",
+                                err);
                goto clean_source;
        }
 
-       err = mv_engine_config_set_string_attribute(mv_engine_config,
-                                                                                       MV_BARCODE_GENERATE_ATTR_COLOR_FRONT,
-                                                                                       model.front_color);
+       err = mv_engine_config_set_string_attribute(mv_engine_config, MV_BARCODE_GENERATE_ATTR_COLOR_FRONT,
+                                                                                               model.front_color);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Errors were occurred during set string attribute to "
-                               "media engine config : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Errors were occurred during set string attribute to "
+                                "media engine config : %i",
+                                err);
                goto clean_all;
        }
 
-       err = mv_engine_config_set_string_attribute(mv_engine_config,
-                                                                                       MV_BARCODE_GENERATE_ATTR_COLOR_BACK,
-                                                                                       model.back_color);
+       err = mv_engine_config_set_string_attribute(mv_engine_config, MV_BARCODE_GENERATE_ATTR_COLOR_BACK,
+                                                                                               model.back_color);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Errors were occurred during set string attribute to "
-                               "media engine config : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Errors were occurred during set string attribute to "
+                                "media engine config : %i",
+                                err);
                goto clean_all;
        }
 
-       err = mv_barcode_generate_source(
-                       mv_engine_config,
-                       model.message,
-                       model.type,
-                       model.mode,
-                       model.ecc,
-                       model.version,
-                       source);
+       err = mv_barcode_generate_source(mv_engine_config, model.message, model.type, model.mode, model.ecc, model.version,
+                                                                        source);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Error occurred during generation barcode to the "
-                               "Media Vision source. Error code : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Error occurred during generation barcode to the "
+                                "Media Vision source. Error code : %i",
+                                err);
                goto clean_all;
        }
 
@@ -600,29 +576,37 @@ int generate_barcode_to_source(barcode_model_s model)
 
        err = mv_source_get_buffer(source, &data_buffer, &buffer_size);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Error occurred when trying to get buffer from "
-                               "Media Vision source. Error code : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Error occurred when trying to get buffer from "
+                                "Media Vision source. Error code : %i",
+                                err);
                goto clean_all;
        }
 
        err = mv_source_get_width(source, &image_width);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Error occurred when trying to get width of "
-                               "Media Vision source. Error code : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Error occurred when trying to get width of "
+                                "Media Vision source. Error code : %i",
+                                err);
                goto clean_all;
        }
 
        err = mv_source_get_height(source, &image_height);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Error occurred when trying to get height of "
-                               "Media Vision source. Error code : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Error occurred when trying to get height of "
+                                "Media Vision source. Error code : %i",
+                                err);
                goto clean_all;
        }
 
        err = mv_source_get_colorspace(source, &image_colorspace);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Error occurred when trying to get colorspace of "
-                               "Media Vision source. Error code : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Error occurred when trying to get colorspace of "
+                                "Media Vision source. Error code : %i",
+                                err);
                goto clean_all;
        }
 
@@ -638,7 +622,7 @@ int generate_barcode_to_source(barcode_model_s model)
                }
        } else {
                size_t size = strlen(model.file_name);
-               jpeg_file_name = (char*)malloc(size + 5);
+               jpeg_file_name = (char *) malloc(size + 5);
                if (jpeg_file_name == NULL) {
                        err = MEDIA_VISION_ERROR_OUT_OF_MEMORY;
                        goto clean_all;
@@ -648,8 +632,10 @@ int generate_barcode_to_source(barcode_model_s model)
        }
        err = save_image_from_buffer(jpeg_file_name, data_buffer, &image_data, 100);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Error occurred when try to save image from buffer."
-                               "Error code : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Error occurred when try to save image from buffer."
+                                "Error code : %i",
+                                err);
        }
        free(jpeg_file_name);
 clean_all:
@@ -657,24 +643,24 @@ clean_all:
                int err2 = mv_destroy_engine_config(mv_engine_config);
                if (MEDIA_VISION_ERROR_NONE != err2)
                        mvprintw(current_y++, MINX, "ERROR : Errors were occurred during destroying the media engine config : %i",
-                                  err2);
+                                        err2);
        } else {
                err = mv_destroy_engine_config(mv_engine_config);
                if (MEDIA_VISION_ERROR_NONE != err)
                        mvprintw(current_y++, MINX, "ERROR : Errors were occurred during destroying the media engine config : %i",
-                                  err);
+                                        err);
        }
 clean_source:
        if (MEDIA_VISION_ERROR_NONE != err) {
                int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2)
-                       mvprintw(current_y++, MINX, "ERROR : Error occurred when try to destroy Media Vision source. Error code : %i",
-                                  err2);
+                       mvprintw(current_y++, MINX,
+                                        "ERROR : Error occurred when try to destroy Media Vision source. Error code : %i", err2);
        } else {
                err = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err)
-                       mvprintw(current_y++, MINX, "ERROR : Error occurred when try to destroy Media Vision source. Error code : %i",
-                                  err);
+                       mvprintw(current_y++, MINX,
+                                        "ERROR : Error occurred when try to destroy Media Vision source. Error code : %i", err);
        }
        MEDIA_VISION_FUNCTION_LEAVE();
        return err;
@@ -688,8 +674,7 @@ int detect_barcode(barcode_model_s model, mv_rectangle_s roi)
        unsigned long buffer_size = 0;
        image_data_s image_data;
 
-       int err = load_image_to_buffer(
-                       model.file_name, &data_buffer, &buffer_size, &image_data);
+       int err = load_image_to_buffer(model.file_name, &data_buffer, &buffer_size, &image_data);
        if (MEDIA_VISION_ERROR_NONE != err) {
                mvprintw(current_y++, MINX, "ERROR : Errors were occurred during opening the file!!! code : %i", err);
 
@@ -727,13 +712,13 @@ int detect_barcode(barcode_model_s model, mv_rectangle_s roi)
 
        mv_engine_config_foreach_supported_attribute(_mv_engine_config_supported_attribute, mv_engine_config);
 
-       err = mv_engine_config_set_int_attribute(
-                       mv_engine_config,
-                       MV_BARCODE_DETECT_ATTR_TARGET,
-                       MV_BARCODE_DETECT_ATTR_TARGET_ALL);
+       err = mv_engine_config_set_int_attribute(mv_engine_config, MV_BARCODE_DETECT_ATTR_TARGET,
+                                                                                        MV_BARCODE_DETECT_ATTR_TARGET_ALL);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               mvprintw(current_y++, MINX, "ERROR : Errors were occurred during target attribute"
-                               "configuration : %i", err);
+               mvprintw(current_y++, MINX,
+                                "ERROR : Errors were occurred during target attribute"
+                                "configuration : %i",
+                                err);
        }
 
        mv_source_h source;
@@ -755,8 +740,8 @@ int detect_barcode(barcode_model_s model, mv_rectangle_s roi)
                return err;
        }
 
-       err = mv_source_fill_by_buffer(source, converted_buffer, converted_buffer_size,
-                       image_data.image_width, image_data.image_height, model.colorspace);
+       err = mv_source_fill_by_buffer(source, converted_buffer, converted_buffer_size, image_data.image_width,
+                                                                  image_data.image_height, model.colorspace);
        if (MEDIA_VISION_ERROR_NONE != err) {
                mvprintw(current_y++, MINX, "ERROR : Errors were occurred during filling the source!!! code : %i", err);
 
@@ -816,7 +801,7 @@ int input_string(const char *prompt, size_t max_len, char **string)
        size_t string_len = strlen(buffer);
        buffer[string_len] = '\0';
        size_t real_string_len = string_len + 1;
-       *string = (char*)malloc(real_string_len * sizeof(char));
+       *string = (char *) malloc(real_string_len * sizeof(char));
        if (*string == NULL) {
                MEDIA_VISION_FUNCTION_LEAVE();
                return -1;
@@ -826,7 +811,7 @@ int input_string(const char *prompt, size_t max_len, char **string)
        size_t str_len = strlen(*string);
 
        if (test_interval > 0)
-                sleep(test_interval);
+               sleep(test_interval);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
@@ -850,7 +835,7 @@ int input_size(const char *prompt, size_t max_size, size_t *size)
        int ret = (*size > max_size ? -1 : 0);
 
        if (test_interval > 0)
-                sleep(test_interval);
+               sleep(test_interval);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
@@ -874,7 +859,7 @@ int input_int(const char *prompt, int min_value, int max_value, int *value)
        int ret = (*value < min_value || *value > max_value ? -1 : 0);
 
        if (test_interval > 0)
-                sleep(test_interval);
+               sleep(test_interval);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
@@ -906,7 +891,7 @@ int show_menu(const char *title, const int *options, const char **names, int cnt
                mvaddstr(current_y++, MINX, "Your Choice : ");
                getstr(&buf);
                selection = atoi(buf);
-               condition = selection > options[cnt-1];
+               condition = selection > options[cnt - 1];
                if (condition)
                        mvprintw(current_y++, MINX, "You pressed invalid menu, try again");
 
@@ -914,7 +899,7 @@ int show_menu(const char *title, const int *options, const char **names, int cnt
        } while (condition);
 
        if (test_interval > 0)
-                sleep(test_interval);
+               sleep(test_interval);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
@@ -926,8 +911,8 @@ mv_barcode_type_e select_type(void)
        mv_barcode_type_e selected_type = MV_BARCODE_UNKNOWN;
        int sel_opt = 0;
        const int options[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 };
-       const char *names[] = { "qr", "upca", "upce", "ean8", "ean13", "code39", "code128", "interleave25",
-                                                               "ean2", "ean5", "code93", "codabar", "databar", "databarExpand" };
+       const char *names[] = { "qr",                   "upca", "upce", "ean8",   "ean13",       "code39",      "code128",
+                                                       "interleave25", "ean2", "ean5", "code93", "codabar", "databar", "databarExpand" };
 
        MEDIA_VISION_FUNCTION_ENTER();
 
@@ -1093,7 +1078,7 @@ int select_version(void)
 
        int sel_opt = 0;
        while (sel_opt == 0) {
-               const int options[] = {1, 40};
+               const int options[] = { 1, 40 };
                const char *names[] = { "1..", "..40" };
                sel_opt = show_menu("Select QR version : ", options, names, ARRAY_SIZE(options));
                if (sel_opt < 1 || sel_opt > 40)
@@ -1170,22 +1155,28 @@ int perform_detect()
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
-       barcode_model_s detect_model = {
-               MV_BARCODE_UNKNOWN,
-               MV_BARCODE_QR_ECC_UNAVAILABLE,
-               MV_BARCODE_QR_MODE_UNAVAILABLE,
-               0,
-               0, 0, 0,
-               MV_BARCODE_IMAGE_FORMAT_PNG,
-               MEDIA_VISION_COLORSPACE_INVALID,
-               NULL, NULL, NULL, NULL, NULL, NULL };
+       barcode_model_s detect_model = { MV_BARCODE_UNKNOWN,
+                                                                        MV_BARCODE_QR_ECC_UNAVAILABLE,
+                                                                        MV_BARCODE_QR_MODE_UNAVAILABLE,
+                                                                        0,
+                                                                        0,
+                                                                        0,
+                                                                        0,
+                                                                        MV_BARCODE_IMAGE_FORMAT_PNG,
+                                                                        MEDIA_VISION_COLORSPACE_INVALID,
+                                                                        NULL,
+                                                                        NULL,
+                                                                        NULL,
+                                                                        NULL,
+                                                                        NULL,
+                                                                        NULL };
 
        while (input_string("Input file name to be analyzed : ", 1024, &(detect_model.file_name)) == -1)
                mvprintw(current_y++, MINX, "Incorrect input! Try again.");
 
        LOGI("Barcode input image has been specified");
 
-       mv_rectangle_s roi = { {0, 0}, 0, 0 };
+       mv_rectangle_s roi = { { 0, 0 }, 0, 0 };
 
        while (input_int("Input x coordinate for ROI top left vertex : ", 0, 10000, &(roi.point.x)) == -1)
                mvprintw(current_y++, MINX, "Incorrect input! Try again.");
@@ -1206,28 +1197,20 @@ int perform_detect()
 
        LOGI("Barcode output image has been specified");
 
-       const int options[] = { MEDIA_VISION_COLORSPACE_Y800,
-                                                               MEDIA_VISION_COLORSPACE_I420,
-                                                               MEDIA_VISION_COLORSPACE_NV12,
-                                                               MEDIA_VISION_COLORSPACE_YV12,
-                                                               MEDIA_VISION_COLORSPACE_NV21,
-                                                               MEDIA_VISION_COLORSPACE_YUYV,
-                                                               MEDIA_VISION_COLORSPACE_UYVY,
-                                                               MEDIA_VISION_COLORSPACE_422P,
-                                                               MEDIA_VISION_COLORSPACE_RGB565,
-                                                               MEDIA_VISION_COLORSPACE_RGB888,
-                                                               MEDIA_VISION_COLORSPACE_RGBA };
-       const char *names[] = { "Y800", "I420", "NV12", "YV12", "NV21",
-                                                               "YUYV", "UYVY", "422P", "RGB565",
-                                                               "RGB888", "RGBA" };
+       const int options[] = { MEDIA_VISION_COLORSPACE_Y800,   MEDIA_VISION_COLORSPACE_I420,
+                                                       MEDIA_VISION_COLORSPACE_NV12,   MEDIA_VISION_COLORSPACE_YV12,
+                                                       MEDIA_VISION_COLORSPACE_NV21,   MEDIA_VISION_COLORSPACE_YUYV,
+                                                       MEDIA_VISION_COLORSPACE_UYVY,   MEDIA_VISION_COLORSPACE_422P,
+                                                       MEDIA_VISION_COLORSPACE_RGB565, MEDIA_VISION_COLORSPACE_RGB888,
+                                                       MEDIA_VISION_COLORSPACE_RGBA };
+       const char *names[] = { "Y800", "I420", "NV12", "YV12", "NV21", "YUYV", "UYVY", "422P", "RGB565", "RGB888", "RGBA" };
 
        while (true) {
                int sel_opt = show_menu("Select colorspace to test detector on : ", options, names, ARRAY_SIZE(options));
-               if (sel_opt < MEDIA_VISION_COLORSPACE_Y800 ||
-                       sel_opt > MEDIA_VISION_COLORSPACE_RGBA)
+               if (sel_opt < MEDIA_VISION_COLORSPACE_Y800 || sel_opt > MEDIA_VISION_COLORSPACE_RGBA)
                        continue;
 
-               detect_model.colorspace = (mv_colorspace_e)sel_opt;
+               detect_model.colorspace = (mv_colorspace_e) sel_opt;
                LOGI("User selection is %i", sel_opt);
                break;
        }
@@ -1252,15 +1235,21 @@ int perform_generate(void)
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
-       barcode_model_s generate_model = {
-                       MV_BARCODE_UNKNOWN,
-                       MV_BARCODE_QR_ECC_UNAVAILABLE,
-                       MV_BARCODE_QR_MODE_UNAVAILABLE,
-                       0,
-                       0, 0, 0,
-                       MV_BARCODE_IMAGE_FORMAT_PNG,
-                       MEDIA_VISION_COLORSPACE_INVALID,
-                       NULL, NULL, NULL, NULL, NULL, NULL };
+       barcode_model_s generate_model = { MV_BARCODE_UNKNOWN,
+                                                                          MV_BARCODE_QR_ECC_UNAVAILABLE,
+                                                                          MV_BARCODE_QR_MODE_UNAVAILABLE,
+                                                                          0,
+                                                                          0,
+                                                                          0,
+                                                                          0,
+                                                                          MV_BARCODE_IMAGE_FORMAT_PNG,
+                                                                          MEDIA_VISION_COLORSPACE_INVALID,
+                                                                          NULL,
+                                                                          NULL,
+                                                                          NULL,
+                                                                          NULL,
+                                                                          NULL,
+                                                                          NULL };
 
        generation_fcn_e gen_fcn = select_gen_function();
        generate_model.type = select_type();
@@ -1317,10 +1306,8 @@ int perform_generate(void)
                LOGI("Barcode output file height has been specified");
        }
 
-       const int err =
-                       gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN ?
-                       generate_barcode_to_image(generate_model) :
-                       generate_barcode_to_source(generate_model);
+       const int err = gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN ? generate_barcode_to_image(generate_model) :
+                                                                                                                        generate_barcode_to_source(generate_model);
 
        if (generate_model.message != NULL)
                free(generate_model.message);
@@ -1353,16 +1340,16 @@ int perform_generate(void)
        return 0;
 }
 
-static error_t parse_opt (int key, char *arg, struct argp_state *state)
+static error_t parse_opt(int key, char *arg, struct argp_state *state)
 {
        struct arguments *arguments = state->input;
 
        switch (key) {
-    case 'm':
-               arguments->mode = arg ? atoi (arg) : 0;
-      break;
-    case 'i':
-               arguments->interval = arg ? atoi (arg) : 0;
+       case 'm':
+               arguments->mode = arg ? atoi(arg) : 0;
+               break;
+       case 'i':
+               arguments->interval = arg ? atoi(arg) : 0;
                if (arguments->interval > MAXINTERVAL) {
                        printf("WARN : value is out of range, reset to max value.\n");
                        arguments->interval = MAXINTERVAL;
@@ -1370,13 +1357,13 @@ static error_t parse_opt (int key, char *arg, struct argp_state *state)
                        printf("WARN : value is out of range, reset to min value.\n");
                        arguments->interval = MININTERVAL;
                }
-      break;
+               break;
        case ARGP_KEY_NO_ARGS:
                /* do nothing */
-    break;
-    default:
+               break;
+       default:
                return ARGP_ERR_UNKNOWN;
-    }
+       }
        return 0;
 }
 
@@ -1396,7 +1383,7 @@ int main(int argc, char *argv[])
        arguments.mode = 0;
        arguments.interval = 0;
 
-       argp_parse (&argp, argc, argv, 0, 0, &arguments);
+       argp_parse(&argp, argc, argv, 0, 0, &arguments);
        mode = arguments.mode;
        test_interval = arguments.interval;
 
index a4247dc..7dc302a 100644 (file)
@@ -17,10 +17,9 @@ using TestParams = tuple<string, string>;
        MV_CONFIG_PATH   \
        "/res/media-vision-barcodes.json"
 
-void barcode_detected_cb(mv_source_h source, mv_engine_config_h engine_cfg,
-                                                const mv_quadrangle_s *barcodes_locations,
-                                                const char *messages[], const mv_barcode_type_e *types,
-                                                int number_of_barcodes, void *user_data)
+void barcode_detected_cb(mv_source_h source, mv_engine_config_h engine_cfg, const mv_quadrangle_s *barcodes_locations,
+                                                const char *messages[], const mv_barcode_type_e *types, int number_of_barcodes,
+                                                void *user_data)
 {
        EXPECT_EQ(number_of_barcodes, 1);
        EXPECT_STREQ(messages[0], (const char *) user_data);
@@ -66,15 +65,13 @@ class TestBarcode : public testing::TestWithParam<TestParams>
 public:
        TestBarcode()
        {
-               EXPECT_EQ(mv_create_engine_config(&engine_cfg),
-                                 MEDIA_VISION_ERROR_NONE);
+               EXPECT_EQ(mv_create_engine_config(&engine_cfg), MEDIA_VISION_ERROR_NONE);
                EXPECT_EQ(mv_create_source(&mv_source), MEDIA_VISION_ERROR_NONE);
        }
        ~TestBarcode()
        {
                EXPECT_EQ(mv_destroy_source(mv_source), MEDIA_VISION_ERROR_NONE);
-               EXPECT_EQ(mv_destroy_engine_config(engine_cfg),
-                                 MEDIA_VISION_ERROR_NONE);
+               EXPECT_EQ(mv_destroy_engine_config(engine_cfg), MEDIA_VISION_ERROR_NONE);
        }
        mv_engine_config_h engine_cfg;
        mv_source_h mv_source;
@@ -85,21 +82,16 @@ TEST_P(TestBarcode, Detection)
        auto image_name = BARCODE_IMG_PREFIX + get<0>(GetParam());
        auto message = get<1>(GetParam());
 
-       EXPECT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(
-                                         image_name.c_str(), mv_source),
+       EXPECT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(image_name.c_str(), mv_source),
                          MEDIA_VISION_ERROR_NONE);
 
        mv_rectangle_s roi = { { 0, 0 }, 0, 0 };
        mv_source_get_width(mv_source, (unsigned int *) &(roi.width));
        mv_source_get_width(mv_source, (unsigned int *) &(roi.height));
-       mv_engine_config_set_int_attribute(engine_cfg,
-                                                                          MV_BARCODE_DETECT_ATTR_TARGET,
-                                                                          MV_BARCODE_DETECT_ATTR_TARGET_ALL);
+       mv_engine_config_set_int_attribute(engine_cfg, MV_BARCODE_DETECT_ATTR_TARGET, MV_BARCODE_DETECT_ATTR_TARGET_ALL);
 
-       EXPECT_EQ(mv_barcode_detect(mv_source, engine_cfg, roi, barcode_detected_cb,
-                                                               (void *) message.c_str()),
+       EXPECT_EQ(mv_barcode_detect(mv_source, engine_cfg, roi, barcode_detected_cb, (void *) message.c_str()),
                          MEDIA_VISION_ERROR_NONE);
 }
 
-INSTANTIATE_TEST_CASE_P(GeneralAndSpecial, TestBarcode,
-                                               testing::ValuesIn(ReadDetPositive1()));
\ No newline at end of file
+INSTANTIATE_TEST_CASE_P(GeneralAndSpecial, TestBarcode, testing::ValuesIn(ReadDetPositive1()));
\ No newline at end of file
index a2e01bd..f5e562d 100644 (file)
  * @brief  ImageHelper class definition.
  */
 
-namespace cv {
-       template<typename _Tp> class Scalar_;
-       typedef Scalar_<double> Scalar;
+namespace cv
+{
+template<typename _Tp> class Scalar_;
+typedef Scalar_<double> Scalar;
 
-       class VideoCapture;
-       class VideoWriter;
+class VideoCapture;
+class VideoWriter;
 }
 
-namespace MediaVision {
-namespace Common {
-
+namespace MediaVision
+{
+namespace Common
+{
 /**
  * @class ImageHelper
  * @brief Helper class that provides set of useful methods
  *        for image management.
  */
-class ImageHelper {
+class ImageHelper
+{
 public:
        /**
         * @brief Structure to keep information about width, height and colorspace of an image.
         */
-       struct ImageData {
-               unsigned int imageWidth;                 /**< Image width */
-               unsigned int imageHeight;                /**< Image height */
-               mv_colorspace_e imageColorspace;         /**< Image colorspace */
+       struct ImageData
+       {
+               unsigned int imageWidth; /**< Image width */
+               unsigned int imageHeight; /**< Image height */
+               mv_colorspace_e imageColorspace; /**< Image colorspace */
        };
 
        /**
@@ -71,11 +75,8 @@ public:
          * @see ImageHelper::saveImageFromBuffer()
          * @see ImageHelper::destroyLoadedBuffer()
          */
-       static int loadImageToBuffer(
-                       const char *filePath,
-                       unsigned char **pDataBuffer,
-                       unsigned long *pBufferSize,
-                       ImageData *pImageData);
+       static int loadImageToBuffer(const char *filePath, unsigned char **pDataBuffer, unsigned long *pBufferSize,
+                                                                ImageData *pImageData);
 
        static int loadImageToSource(const char *filePath, mv_source_h source);
 
@@ -93,11 +94,8 @@ public:
         *
         * @see ImageHelper::loadImageToBuffer()
         */
-       static int saveImageFromBuffer(
-                       const char *filePath,
-                       unsigned char *pDataBuffer,
-                       const ImageData& imageData,
-                       int quality = 100);
+       static int saveImageFromBuffer(const char *filePath, unsigned char *pDataBuffer, const ImageData &imageData,
+                                                                  int quality = 100);
 
        /**
         * @brief Destroys loaded buffer by loadImageToBuffer().
@@ -134,15 +132,9 @@ public:
         *                                    which will be used for rectangle drawing
         * @return @c 0 on success, otherwise a negative error value
         */
-       static int drawRectangleOnBuffer(
-                       int topLeftVertexX,
-                       int topLeftVertexY,
-                       int bottomRightVertexX,
-                       int bottomRightVertexY,
-                       int thickness,
-                       const cv::Scalar& color,
-                       const ImageData& imageData,
-                       unsigned char *pDataBuffer);
+       static int drawRectangleOnBuffer(int topLeftVertexX, int topLeftVertexY, int bottomRightVertexX,
+                                                                        int bottomRightVertexY, int thickness, const cv::Scalar &color,
+                                                                        const ImageData &imageData, unsigned char *pDataBuffer);
 
        /**
         * @brief Draws the quadrangle of specified size on the image data buffer.
@@ -159,12 +151,8 @@ public:
         *                                    which will be used for quadrangle drawing
         * @return @c 0 on success, otherwise a negative error value
         */
-       static int drawQuadrangleOnBuffer(
-                       mv_quadrangle_s location,
-                       int thickness,
-                       const cv::Scalar& color,
-                       const ImageData& imageData,
-                       unsigned char *pDataBuffer);
+       static int drawQuadrangleOnBuffer(mv_quadrangle_s location, int thickness, const cv::Scalar &color,
+                                                                         const ImageData &imageData, unsigned char *pDataBuffer);
 
        /**
         * @brief Converts image data to the image data of RGB888 colorspace.
@@ -179,10 +167,8 @@ public:
         *                            a result of the conversion
         * @return @c 0 on success, otherwise a negative error value
         */
-       static int convertBufferToRGB888(
-                       const unsigned char *pInBuffer,
-                       const ImageData& imageData,
-                       unsigned char **pOutBuffer);
+       static int convertBufferToRGB888(const unsigned char *pInBuffer, const ImageData &imageData,
+                                                                        unsigned char **pOutBuffer);
 
        /**
         * @brief Determines number of channels (components) for the colorspace.
@@ -193,9 +179,7 @@ public:
         * @param [out] pComponentsNumber    Number of components to be determined
         * @return @c 0 on success, otherwise a negative error value
         */
-       static int getNumberOfComponents(
-                       mv_colorspace_e colorspace,
-                       int *pComponentsNumber);
+       static int getNumberOfComponents(mv_colorspace_e colorspace, int *pComponentsNumber);
 
 private:
        /**
@@ -211,10 +195,7 @@ private:
         *                            data in RGB888 colorspace format
         * @return @c 0 on success, otherwise a negative error value
         */
-       static int convertY800ToRGB(
-                       const unsigned char *pInBuffer,
-                       const ImageData& imageData,
-                       unsigned char **pOutBuffer);
+       static int convertY800ToRGB(const unsigned char *pInBuffer, const ImageData &imageData, unsigned char **pOutBuffer);
 };
 
 } /* Common */
index fe4ce92..16869f2 100644 (file)
@@ -42,10 +42,11 @@ extern "C" {
  *
  * @since_tizen 3.0
  */
-typedef struct {
-       unsigned int image_width;                 /**< Image width */
-       unsigned int image_height;                /**< Image height */
-       mv_colorspace_e image_colorspace;         /**< Image colorspace */
+typedef struct
+{
+       unsigned int image_width; /**< Image width */
+       unsigned int image_height; /**< Image height */
+       mv_colorspace_e image_colorspace; /**< Image colorspace */
 } image_data_s;
 
 /**
@@ -65,11 +66,8 @@ typedef struct {
  * @see save_image_from_buffer()
  * @see destroy_loaded_buffer()
  */
-int load_image_to_buffer(
-               const char *file_path,
-               unsigned char **data_buffer,
-               unsigned long *buffer_size,
-               image_data_s *image_data);
+int load_image_to_buffer(const char *file_path, unsigned char **data_buffer, unsigned long *buffer_size,
+                                                image_data_s *image_data);
 
 /**
  * @brief Saves image stored into @a pDataBuffer to the file in jpeg format.
@@ -85,11 +83,8 @@ int load_image_to_buffer(
  *
  * @see load_image_to_buffer()
  */
-int save_image_from_buffer(
-               const char *file_path,
-               unsigned char *data_buffer,
-               const image_data_s *image_data,
-               int quality);
+int save_image_from_buffer(const char *file_path, unsigned char *data_buffer, const image_data_s *image_data,
+                                                  int quality);
 
 /**
  * @brief Destroys loaded buffer by load_image_to_buffer().
@@ -120,15 +115,8 @@ int destroy_loaded_buffer(unsigned char *data_buffer);
  *                             be used for rectangle drawing
  * @return @c 0 on success, otherwise a negative error value
  */
-int draw_rectangle_on_buffer(
-               int tl_vertex_x,
-               int tl_vertex_y,
-               int br_vertex_x,
-               int br_vertex_y,
-               int thickness,
-               const int rgb_color[3],
-               const image_data_s *image_data,
-               unsigned char *data_buffer);
+int draw_rectangle_on_buffer(int tl_vertex_x, int tl_vertex_y, int br_vertex_x, int br_vertex_y, int thickness,
+                                                        const int rgb_color[3], const image_data_s *image_data, unsigned char *data_buffer);
 
 /**
  * @brief Draws the quadrangle of specified size on the image data buffer.
@@ -144,12 +132,8 @@ int draw_rectangle_on_buffer(
  *                                be used for quadrangle drawing
  * @return @c 0 on success, otherwise a negative error value
  */
-int draw_quadrangle_on_buffer(
-               mv_quadrangle_s location,
-               int thickness,
-               const int rgb_color[3],
-               const image_data_s *image_data,
-               unsigned char *data_buffer);
+int draw_quadrangle_on_buffer(mv_quadrangle_s location, int thickness, const int rgb_color[3],
+                                                         const image_data_s *image_data, unsigned char *data_buffer);
 
 /**
  * @brief Converts image data to the image data of RGB888 colorspace.
@@ -164,10 +148,8 @@ int draw_quadrangle_on_buffer(
  *                            a result of the conversion
  * @return @c 0 on success, otherwise a negative error value
  */
-int convert_buffer_to_RGB888(
-               const unsigned char *in_buffer,
-               const image_data_s *image_data,
-               unsigned char **out_buffer);
+int convert_buffer_to_RGB888(const unsigned char *in_buffer, const image_data_s *image_data,
+                                                        unsigned char **out_buffer);
 
 /**
  * @brief Determines number of channels (components) for the colorspace.
@@ -178,9 +160,7 @@ int convert_buffer_to_RGB888(
  * @param [out] components_number    Number of components to be determined
  * @return @c 0 on success, otherwise a negative error value
  */
-int get_number_of_components(
-               mv_colorspace_e colorspace,
-               int *components_number);
+int get_number_of_components(mv_colorspace_e colorspace, int *components_number);
 
 #ifdef __cplusplus
 }
index 66c164f..b3f8550 100644 (file)
  * @brief  The ImageHelper class methods implementation.
  */
 
-namespace MediaVision {
-namespace Common {
-
-namespace {
-
+namespace MediaVision
+{
+namespace Common
+{
+namespace
+{
 const int OPEN_CV_CHANNELS = 3;
 const mv_colorspace_e OPEN_CV_COLOR = MEDIA_VISION_COLORSPACE_RGB888;
 const int QUADRANGLE_VERTICES = 4;
@@ -54,14 +55,10 @@ std::vector<std::string> getJPGExtensions()
 
 } /* anonymous namespace */
 
-int ImageHelper::loadImageToBuffer(
-               const char *filePath,
-               unsigned char **pDataBuffer,
-               unsigned long *pBufferSize,
-               ImageData *pImageData)
+int ImageHelper::loadImageToBuffer(const char *filePath, unsigned char **pDataBuffer, unsigned long *pBufferSize,
+                                                                  ImageData *pImageData)
 {
-       if (filePath == NULL || pDataBuffer == NULL ||
-               pBufferSize == NULL || pImageData == NULL)
+       if (filePath == NULL || pDataBuffer == NULL || pBufferSize == NULL || pImageData == NULL)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
 
        cv::Mat image;
@@ -96,18 +93,14 @@ int ImageHelper::loadImageToSource(const char *filePath, mv_source_h source)
        cv::Mat image = cv::imread(filePath);
        cv::cvtColor(image, image, CV_BGR2RGB);
 
-       if (!(mediaSource->fill(image.data, image.total() * image.elemSize(),
-                                                       image.cols, image.rows, OPEN_CV_COLOR)))
+       if (!(mediaSource->fill(image.data, image.total() * image.elemSize(), image.cols, image.rows, OPEN_CV_COLOR)))
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int ImageHelper::saveImageFromBuffer(
-               const char *filePath,
-               unsigned char *pDataBuffer,
-               const ImageData& imageData,
-               int quality)
+int ImageHelper::saveImageFromBuffer(const char *filePath, unsigned char *pDataBuffer, const ImageData &imageData,
+                                                                        int quality)
 {
        if (filePath == NULL || pDataBuffer == NULL)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -124,12 +117,9 @@ int ImageHelper::saveImageFromBuffer(
                for (size_t extNum = 0; extNum < JPG_EXTENSIONS.size(); ++extNum) {
                        if (resultFilePath.size() >= JPG_EXTENSIONS[extNum].size()) {
                                std::string givenExtension = resultFilePath.substr(
-                                               resultFilePath.length() - JPG_EXTENSIONS[extNum].size(),
-                                               JPG_EXTENSIONS[extNum].size());
+                                               resultFilePath.length() - JPG_EXTENSIONS[extNum].size(), JPG_EXTENSIONS[extNum].size());
 
-                               std::transform(
-                                       givenExtension.begin(), givenExtension.end(),
-                                       givenExtension.begin(), ::tolower);
+                               std::transform(givenExtension.begin(), givenExtension.end(), givenExtension.begin(), ::tolower);
 
                                if (givenExtension == JPG_EXTENSIONS[extNum]) {
                                        rightExtensionFlag = true;
@@ -205,8 +195,7 @@ int ImageHelper::saveImageFromBuffer(
        }
 
        const int depth = CV_8U;
-       cv::Mat cvImage(cv::Size(width, height),
-                                       CV_MAKETYPE(depth, channelsNumber), pDataBuffer);
+       cv::Mat cvImage(cv::Size(width, height), CV_MAKETYPE(depth, channelsNumber), pDataBuffer);
        cv::Mat cvBGRImage;
        cv::cvtColor(cvImage, cvBGRImage, conversionType);
 
@@ -226,75 +215,53 @@ int ImageHelper::destroyLoadedBuffer(unsigned char *pDataBuffer)
                return MEDIA_VISION_ERROR_NONE;
        }
 
-       delete [] pDataBuffer;
+       delete[] pDataBuffer;
        pDataBuffer = NULL;
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int ImageHelper::drawRectangleOnBuffer(
-               int topLeftVertexX,
-               int topLeftVertexY,
-               int bottomRightVertexX,
-               int bottomRightVertexY,
-               int thickness,
-               const cv::Scalar& color,
-               const ImageData& imageData,
-               unsigned char *pDataBuffer)
+int ImageHelper::drawRectangleOnBuffer(int topLeftVertexX, int topLeftVertexY, int bottomRightVertexX,
+                                                                          int bottomRightVertexY, int thickness, const cv::Scalar &color,
+                                                                          const ImageData &imageData, unsigned char *pDataBuffer)
 {
        if (NULL == pDataBuffer)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
 
        cv::Mat cvImage(imageData.imageHeight, imageData.imageWidth, CV_8UC(OPEN_CV_CHANNELS), pDataBuffer);
-       cv::rectangle(
-               cvImage,
-               cv::Point(topLeftVertexX, topLeftVertexY),
-               cv::Point(bottomRightVertexX, bottomRightVertexY),
-               color,
-               thickness);
+       cv::rectangle(cvImage, cv::Point(topLeftVertexX, topLeftVertexY), cv::Point(bottomRightVertexX, bottomRightVertexY),
+                                 color, thickness);
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int ImageHelper::drawQuadrangleOnBuffer(
-               mv_quadrangle_s location,
-               int thickness,
-               const cv::Scalar& color,
-               const ImageData& imageData,
-               unsigned char *pDataBuffer)
+int ImageHelper::drawQuadrangleOnBuffer(mv_quadrangle_s location, int thickness, const cv::Scalar &color,
+                                                                               const ImageData &imageData, unsigned char *pDataBuffer)
 {
        if (NULL == pDataBuffer)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
 
        cv::Mat cvImage(imageData.imageHeight, imageData.imageWidth, CV_8UC(OPEN_CV_CHANNELS), pDataBuffer);
        for (int i = 0; i < QUADRANGLE_VERTICES; ++i) {
-               cv::line(
-                       cvImage,
-                       cv::Point(location.points[i].x, location.points[i].y),
-                       cv::Point(location.points[(i + 1) % QUADRANGLE_VERTICES].x,
-                               location.points[(i + 1) % QUADRANGLE_VERTICES].y),
-                       color,
-                       thickness);
+               cv::line(cvImage, cv::Point(location.points[i].x, location.points[i].y),
+                                cv::Point(location.points[(i + 1) % QUADRANGLE_VERTICES].x,
+                                                  location.points[(i + 1) % QUADRANGLE_VERTICES].y),
+                                color, thickness);
        }
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int ImageHelper::convertBufferToRGB888(
-               const unsigned char *pInBuffer,
-               const ImageData& imageData,
-               unsigned char **pOutBuffer)
+int ImageHelper::convertBufferToRGB888(const unsigned char *pInBuffer, const ImageData &imageData,
+                                                                          unsigned char **pOutBuffer)
 {
        /* todo: support more colorspaces: */
        switch (imageData.imageColorspace) {
        case MEDIA_VISION_COLORSPACE_Y800:
                return convertY800ToRGB(pInBuffer, imageData, pOutBuffer);
-       case MEDIA_VISION_COLORSPACE_RGB888:
-       {
+       case MEDIA_VISION_COLORSPACE_RGB888: {
                int numberOfComponents = 0;
-               getNumberOfComponents(
-                       MEDIA_VISION_COLORSPACE_RGB888, &numberOfComponents);
-               const int dataSize =
-                       imageData.imageHeight * imageData.imageWidth * numberOfComponents;
+               getNumberOfComponents(MEDIA_VISION_COLORSPACE_RGB888, &numberOfComponents);
+               const int dataSize = imageData.imageHeight * imageData.imageWidth * numberOfComponents;
                (*pOutBuffer) = new unsigned char[dataSize];
                std::memcpy(*pOutBuffer, pInBuffer, dataSize);
                return MEDIA_VISION_ERROR_NONE;
@@ -306,9 +273,7 @@ int ImageHelper::convertBufferToRGB888(
        return MEDIA_VISION_ERROR_NOT_SUPPORTED;
 }
 
-int ImageHelper::getNumberOfComponents(
-               mv_colorspace_e colorspace,
-               int *pComponentsNumber)
+int ImageHelper::getNumberOfComponents(mv_colorspace_e colorspace, int *pComponentsNumber)
 {
        /* todo: support more colorspaces: */
        switch (colorspace) {
@@ -325,10 +290,8 @@ int ImageHelper::getNumberOfComponents(
        return MEDIA_VISION_ERROR_NOT_SUPPORTED;
 }
 
-int ImageHelper::convertY800ToRGB(
-               const unsigned char *pInBuffer,
-               const ImageData& imageData,
-               unsigned char **pOutBuffer)
+int ImageHelper::convertY800ToRGB(const unsigned char *pInBuffer, const ImageData &imageData,
+                                                                 unsigned char **pOutBuffer)
 {
        if (imageData.imageColorspace != MEDIA_VISION_COLORSPACE_Y800)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -339,10 +302,10 @@ int ImageHelper::convertY800ToRGB(
        const int dataSize = inDataSize * numberOfComponents;
        (*pOutBuffer) = new unsigned char[dataSize];
        for (int i = 0; i < inDataSize; ++i) {
-               int pixelStartIndex = i*numberOfComponents;
-               (*pOutBuffer)[pixelStartIndex]   = pInBuffer[i];
-               (*pOutBuffer)[pixelStartIndex+1] = pInBuffer[i];
-               (*pOutBuffer)[pixelStartIndex+2] = pInBuffer[i];
+               int pixelStartIndex = i * numberOfComponents;
+               (*pOutBuffer)[pixelStartIndex] = pInBuffer[i];
+               (*pOutBuffer)[pixelStartIndex + 1] = pInBuffer[i];
+               (*pOutBuffer)[pixelStartIndex + 2] = pInBuffer[i];
        }
        return MEDIA_VISION_ERROR_NONE;
 }
index c4d9698..0b4db2a 100644 (file)
@@ -46,12 +46,8 @@ ImageHelper::ImageData convertToCppData(image_data_s data)
        return ret;
 }
 
-
-int load_image_to_buffer(
-       const char *file_path,
-       unsigned char **data_buffer,
-       unsigned long *buffer_size,
-       image_data_s *image_data)
+int load_image_to_buffer(const char *file_path, unsigned char **data_buffer, unsigned long *buffer_size,
+                                                image_data_s *image_data)
 {
        if (image_data == NULL)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -66,11 +62,8 @@ int load_image_to_buffer(
        return err;
 }
 
-int save_image_from_buffer(
-       const char *file_path,
-       unsigned char *data_buffer,
-       const image_data_s *image_data,
-       int quality)
+int save_image_from_buffer(const char *file_path, unsigned char *data_buffer, const image_data_s *image_data,
+                                                  int quality)
 {
        if (image_data == NULL)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -84,15 +77,8 @@ int destroy_loaded_buffer(unsigned char *data_buffer)
        return ImageHelper::destroyLoadedBuffer(data_buffer);
 }
 
-int draw_rectangle_on_buffer(
-       int tl_vertex_x,
-       int tl_vertex_y,
-       int br_vertex_x,
-       int br_vertex_y,
-       int thickness,
-       const int rgb_color[3],
-       const image_data_s *image_data,
-       unsigned char *data_buffer)
+int draw_rectangle_on_buffer(int tl_vertex_x, int tl_vertex_y, int br_vertex_x, int br_vertex_y, int thickness,
+                                                        const int rgb_color[3], const image_data_s *image_data, unsigned char *data_buffer)
 {
        if (image_data == NULL)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -101,20 +87,12 @@ int draw_rectangle_on_buffer(
 
        cv::Scalar color(rgb_color[2], rgb_color[1], rgb_color[0]);
 
-       return ImageHelper::drawRectangleOnBuffer(
-                       tl_vertex_x, tl_vertex_y,
-                       br_vertex_x, br_vertex_y,
-                       thickness,
-                       color,
-                       imageData, data_buffer);
+       return ImageHelper::drawRectangleOnBuffer(tl_vertex_x, tl_vertex_y, br_vertex_x, br_vertex_y, thickness, color,
+                                                                                         imageData, data_buffer);
 }
 
-int draw_quadrangle_on_buffer(
-               mv_quadrangle_s location,
-               int thickness,
-               const int rgb_color[3],
-               const image_data_s *image_data,
-               unsigned char *data_buffer)
+int draw_quadrangle_on_buffer(mv_quadrangle_s location, int thickness, const int rgb_color[3],
+                                                         const image_data_s *image_data, unsigned char *data_buffer)
 {
        if (image_data == NULL)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -123,18 +101,10 @@ int draw_quadrangle_on_buffer(
 
        cv::Scalar color(rgb_color[2], rgb_color[1], rgb_color[0]);
 
-       return ImageHelper::drawQuadrangleOnBuffer(
-                       location,
-                       thickness,
-                       color,
-                       imageData,
-                       data_buffer);
+       return ImageHelper::drawQuadrangleOnBuffer(location, thickness, color, imageData, data_buffer);
 }
 
-int convert_buffer_to_RGB888(
-               const unsigned char *in_buffer,
-               const image_data_s *image_data,
-               unsigned char **out_buffer)
+int convert_buffer_to_RGB888(const unsigned char *in_buffer, const image_data_s *image_data, unsigned char **out_buffer)
 {
        if (image_data == NULL)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -143,9 +113,7 @@ int convert_buffer_to_RGB888(
        return ImageHelper::convertBufferToRGB888(in_buffer, imageData, out_buffer);
 }
 
-int get_number_of_components(
-               mv_colorspace_e colorspace,
-               int *components_number)
+int get_number_of_components(mv_colorspace_e colorspace, int *components_number)
 {
        return ImageHelper::getNumberOfComponents(colorspace, components_number);
 }
index 29a1385..44d87a0 100644 (file)
 #include <stdio.h>
 #include <stdlib.h>
 
-void print_fail_result(
-               const char *action_name,
-               int action_return_value)
+void print_fail_result(const char *action_name, int action_return_value)
 {
-       printf(TEXT_RED
-                       "Error with code %i was occurred during action '%s'"
-                       TEXT_RESET "\n",
-                       action_return_value,
-                       action_name);
+       printf(TEXT_RED "Error with code %i was occurred during action '%s'" TEXT_RESET "\n", action_return_value,
+                  action_name);
 }
 
 void print_done_result(const char *action_name)
 {
-       printf(TEXT_YELLOW
-                       "Action '%s' was finished"
-                       TEXT_RESET "\n",
-                       action_name);
+       printf(TEXT_YELLOW "Action '%s' was finished" TEXT_RESET "\n", action_name);
 }
 
 void print_success_result(const char *action_name)
 {
-       printf(TEXT_GREEN
-                       "Action '%s' was finished successfully"
-                       TEXT_RESET
-                       "\n", action_name);
+       printf(TEXT_GREEN "Action '%s' was finished successfully" TEXT_RESET "\n", action_name);
 }
 
-void print_action_result(
-               const char *action_name,
-               int action_return_value,
-               notification_type_e notification_type_e)
+void print_action_result(const char *action_name, int action_return_value, notification_type_e notification_type_e)
 {
        switch (notification_type_e) {
        case FAIL_OR_SUCCESSS:
@@ -132,11 +118,7 @@ int input_int(const char *prompt, int min_value, int max_value, int *value)
        return (*value < min_value || *value > max_value ? -1 : 0);
 }
 
-int input_double(
-               const char *prompt,
-               double min_value,
-               double max_value,
-               double *value)
+int input_double(const char *prompt, double min_value, double max_value, double *value)
 {
        printf("\n");
        printf("%s ", prompt);
@@ -154,7 +136,7 @@ int input_double(
 
 bool show_confirm_dialog(const char *title)
 {
-       const int options[2] = {1, 2};
+       const int options[2] = { 1, 2 };
        const char *names[2] = { "No", "Yes" };
 
        bool answer = false;
@@ -179,11 +161,7 @@ bool show_confirm_dialog(const char *title)
        return answer;
 }
 
-int show_menu(
-               const char *title,
-               const int *options,
-               const char **names,
-               int number_of_option)
+int show_menu(const char *title, const int *options, const char **names, int number_of_option)
 {
        if (NULL == title || NULL == options || NULL == names || 0 >= number_of_option)
                return -1;
@@ -237,35 +215,26 @@ int show_menu(
        return selection;
 }
 
-int load_mv_source_from_file(
-               const char *path_to_image,
-               mv_source_h source)
+int load_mv_source_from_file(const char *path_to_image, mv_source_h source)
 {
        unsigned char *data_buffer = NULL;
        unsigned long buffer_size = 0;
        image_data_s image_data;
 
-       int err = load_image_to_buffer(path_to_image, &data_buffer,
-                                                                       &buffer_size, &image_data);
+       int err = load_image_to_buffer(path_to_image, &data_buffer, &buffer_size, &image_data);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf("ERROR: Errors were occurred during opening file!!! code: %i\n",
-                       err);
+               printf("ERROR: Errors were occurred during opening file!!! code: %i\n", err);
                if (NULL != data_buffer)
                        destroy_loaded_buffer(data_buffer);
 
                return err;
        }
 
-       err = mv_source_fill_by_buffer(
-                       source, data_buffer,
-                       buffer_size,
-                       image_data.image_width,
-                       image_data.image_height,
-                       image_data.image_colorspace);
+       err = mv_source_fill_by_buffer(source, data_buffer, buffer_size, image_data.image_width, image_data.image_height,
+                                                                  image_data.image_colorspace);
 
        if (MEDIA_VISION_ERROR_NONE != err)
-               printf("ERROR: Errors were occurred during filling source!!! code %i\n",
-                       err);
+               printf("ERROR: Errors were occurred during filling source!!! code %i\n", err);
 
        if (NULL != data_buffer)
                destroy_loaded_buffer(data_buffer);
index 66876e0..5b0bc7c 100644 (file)
@@ -21,7 +21,8 @@
 
 #include <stddef.h>
 
-typedef enum {
+typedef enum
+{
        FAIL_OR_SUCCESSS,
        FAIL_OR_DONE
 } notification_type_e;
@@ -33,9 +34,7 @@ typedef enum {
  * @param [in] action_name           Name of action which result will be printed
  * @param [in] action_return_value   Return value of action
  */
-void print_fail_result(
-               const char *action_name,
-               int action_return_value);
+void print_fail_result(const char *action_name, int action_return_value);
 
 /**
  * @brief Prints success result of action.
@@ -61,10 +60,7 @@ void print_success_result(const char *action_name);
  * @param [in] action_return_value   Return value of action
  * @param [in] notification_type_e   Type of notification
  */
-void print_action_result(
-               const char *action_name,
-               int action_return_value,
-               notification_type_e notification_type_e);
+void print_action_result(const char *action_name, int action_return_value, notification_type_e notification_type_e);
 
 /**
  * @brief Gets string from console.
@@ -136,16 +132,9 @@ bool show_confirm_dialog(const char *title);
  * @return The selected item positive number from options array on success,
  *         otherwise a negative error value
  */
-int show_menu(
-               const char *title,
-               const int *options,
-               const char **names,
-               int number_of_option);
+int show_menu(const char *title, const int *options, const char **names, int number_of_option);
 
-int show_menu_linear(
-               const char *title,
-               const char **menu,
-               size_t len_menu);
+int show_menu_linear(const char *title, const char **menu, size_t len_menu);
 
 int show_menu_yes_or_no(const char *title);
 
@@ -157,8 +146,6 @@ int show_menu_yes_or_no(const char *title);
  * @param [in/out] source       The handler to media source which will be filled
  * @return @c 0 on success, otherwise a negative error value
  */
-int load_mv_source_from_file(
-               const char *path_to_image,
-               mv_source_h source);
+int load_mv_source_from_file(const char *path_to_image, mv_source_h source);
 
 #endif /* __MEDIA_VISION_MV_TESTSUITE_COMMON_H__ */
index 2af638b..31aa0e4 100644 (file)
 
 /* #define ROOTSTRAP_OUT // enables logs to console */
 
-#define TEXT_RED     "\x1b[31m"
-#define TEXT_GREEN   "\x1b[32m"
-#define TEXT_YELLOW  "\x1b[33m"
-#define TEXT_BLUE    "\x1b[34m"
+#define TEXT_RED "\x1b[31m"
+#define TEXT_GREEN "\x1b[32m"
+#define TEXT_YELLOW "\x1b[33m"
+#define TEXT_BLUE "\x1b[34m"
 #define TEXT_MAGENTA "\x1b[35m"
-#define TEXT_CYAN    "\x1b[36m"
-#define TEXT_RESET   "\x1b[0m"
+#define TEXT_CYAN "\x1b[36m"
+#define TEXT_RESET "\x1b[0m"
 
 #ifdef ROOTSTRAP_OUT
 
-#define LOGD(...)                                 \
-do {                                              \
-       printf("<%s:%d>", __FUNCTION__, __LINE__);    \
-       printf(TEXT_CYAN);                            \
-       printf(__VA_ARGS__);                          \
-       printf(TEXT_RESET "\n");                      \
-} while (0)
+#define LOGD(...)                                  \
+       do {                                           \
+               printf("<%s:%d>", __FUNCTION__, __LINE__); \
+               printf(TEXT_CYAN);                         \
+               printf(__VA_ARGS__);                       \
+               printf(TEXT_RESET "\n");                   \
+       } while (0)
 
-#define LOGI(...)                                 \
-do  {                                             \
-       printf("<%s:%d>", __FUNCTION__, __LINE__);    \
-       printf(TEXT_GREEN);                           \
-       printf(__VA_ARGS__);                          \
-       printf(TEXT_RESET "\n");                      \
-} while (0)
+#define LOGI(...)                                  \
+       do {                                           \
+               printf("<%s:%d>", __FUNCTION__, __LINE__); \
+               printf(TEXT_GREEN);                        \
+               printf(__VA_ARGS__);                       \
+               printf(TEXT_RESET "\n");                   \
+       } while (0)
 
-#define LOGW(...)                                 \
-do {                                              \
-       printf("<%s:%d>", __FUNCTION__, __LINE__);    \
-       printf(TEXT_YELLOW);                          \
-       printf(__VA_ARGS__);                          \
-       printf(TEXT_RESET "\n");                      \
-} while (0)
+#define LOGW(...)                                  \
+       do {                                           \
+               printf("<%s:%d>", __FUNCTION__, __LINE__); \
+               printf(TEXT_YELLOW);                       \
+               printf(__VA_ARGS__);                       \
+               printf(TEXT_RESET "\n");                   \
+       } while (0)
 
-#define LOGE(...)                                 \
-do {                                              \
-       printf("<%s:%d>", __FUNCTION__, __LINE__);    \
-       printf(TEXT_RED);                             \
-       printf(__VA_ARGS__);                          \
-       printf(TEXT_RESET "\n");                      \
-} while (0)
+#define LOGE(...)                                  \
+       do {                                           \
+               printf("<%s:%d>", __FUNCTION__, __LINE__); \
+               printf(TEXT_RED);                          \
+               printf(__VA_ARGS__);                       \
+               printf(TEXT_RESET "\n");                   \
+       } while (0)
 
 #endif
 
index 2eb7448..f95409c 100644 (file)
@@ -29,7 +29,8 @@
 
 #include <pthread.h>
 
-typedef struct _mv_video_reader_s {
+typedef struct _mv_video_reader_s
+{
        /* Main bin */
        GstElement *pl;
 
@@ -53,7 +54,8 @@ typedef struct _mv_video_reader_s {
        mv_video_reader_eos_cb eos_cb;
 } mv_video_reader_s;
 
-typedef struct _mv_video_writer_s {
+typedef struct _mv_video_writer_s
+{
        /* Main bin */
        GstElement *pl;
 
@@ -88,8 +90,7 @@ static void cb_newpad(GstElement *decodebin, GstPad *new_pad, gpointer user_data
 static GstPadProbeReturn pad_probe_data_cb(GstPad *pad, GstPadProbeInfo *info, gpointer user_data);
 
 /* video reader */
-int mv_create_video_reader(
-               mv_video_reader_h *reader)
+int mv_create_video_reader(mv_video_reader_h *reader)
 {
        mv_video_reader_s *handle = NULL;
        int err = MEDIA_VISION_ERROR_NONE;
@@ -127,8 +128,7 @@ int mv_create_video_reader(
        return err;
 }
 
-int mv_destroy_video_reader(
-               mv_video_reader_h reader)
+int mv_destroy_video_reader(mv_video_reader_h reader)
 {
        mv_video_reader_s *handle = NULL;
 
@@ -157,17 +157,12 @@ int mv_destroy_video_reader(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_video_reader_load(
-               mv_video_reader_h reader,
-               const char *path,
-               image_data_s *image_data,
-               unsigned int *fps)
+int mv_video_reader_load(mv_video_reader_h reader, const char *path, image_data_s *image_data, unsigned int *fps)
 {
        mv_video_reader_s *handle = NULL;
        GstVideoInfo info;
 
-       if (reader == NULL || path == NULL ||
-                       image_data == NULL || fps == NULL) {
+       if (reader == NULL || path == NULL || image_data == NULL || fps == NULL) {
                LOGE("NULL pointer passed");
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
        }
@@ -175,9 +170,7 @@ int mv_video_reader_load(
        handle = (mv_video_reader_s *) reader;
 
        /* Set input file location from path */
-       g_object_set(G_OBJECT(handle->filesrc),
-                       "location", path,
-                       NULL);
+       g_object_set(G_OBJECT(handle->filesrc), "location", path, NULL);
 
        /* Start playback */
        if (_mv_video_reader_state_change(handle, GST_STATE_PLAYING)) {
@@ -199,7 +192,7 @@ int mv_video_reader_load(
 
        gst_caps_unref(handle->caps);
 
-       *fps = info.fps_n/info.fps_d;
+       *fps = info.fps_n / info.fps_d;
 
        /* Fill image data */
        image_data->image_width = info.width;
@@ -241,8 +234,7 @@ int mv_video_reader_load(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_video_reader_start(
-               mv_video_reader_h reader)
+int mv_video_reader_start(mv_video_reader_h reader)
 {
        mv_video_reader_s *handle = NULL;
 
@@ -262,8 +254,7 @@ int mv_video_reader_start(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_video_reader_stop(
-               mv_video_reader_h reader)
+int mv_video_reader_stop(mv_video_reader_h reader)
 {
        mv_video_reader_s *handle = NULL;
 
@@ -283,10 +274,7 @@ int mv_video_reader_stop(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_video_reader_set_new_sample_cb(
-               mv_video_reader_h reader,
-               mv_video_reader_new_sample_cb callback,
-               void *user_data)
+int mv_video_reader_set_new_sample_cb(mv_video_reader_h reader, mv_video_reader_new_sample_cb callback, void *user_data)
 {
        mv_video_reader_s *handle = NULL;
 
@@ -305,10 +293,7 @@ int mv_video_reader_set_new_sample_cb(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_video_reader_set_eos_cb(
-               mv_video_reader_h reader,
-               mv_video_reader_eos_cb callback,
-               void *user_data)
+int mv_video_reader_set_eos_cb(mv_video_reader_h reader, mv_video_reader_eos_cb callback, void *user_data)
 {
        mv_video_reader_s *handle = NULL;
 
@@ -328,8 +313,7 @@ int mv_video_reader_set_eos_cb(
 }
 
 /* Video Writer */
-int mv_create_video_writer(
-               mv_video_writer_h *writer)
+int mv_create_video_writer(mv_video_writer_h *writer)
 {
        mv_video_writer_s *handle = NULL;
        int err = MEDIA_VISION_ERROR_NONE;
@@ -360,8 +344,7 @@ int mv_create_video_writer(
        return err;
 }
 
-int mv_destroy_video_writer(
-               mv_video_writer_h writer)
+int mv_destroy_video_writer(mv_video_writer_h writer)
 {
        mv_video_writer_s *handle = NULL;
 
@@ -386,11 +369,7 @@ int mv_destroy_video_writer(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_video_writer_init(
-               mv_video_writer_h writer,
-               const char *path,
-               image_data_s image_data,
-               unsigned int fps)
+int mv_video_writer_init(mv_video_writer_h writer, const char *path, image_data_s image_data, unsigned int fps)
 {
        mv_video_writer_s *handle = NULL;
        unsigned int err = MEDIA_VISION_ERROR_NONE;
@@ -408,9 +387,7 @@ int mv_video_writer_init(
 
        handle->fps = fps;
 
-       g_object_set(G_OBJECT(handle->filesink),
-                       "location", path,
-                       NULL);
+       g_object_set(G_OBJECT(handle->filesink), "location", path, NULL);
 
        err = _mv_video_writer_link_internals(handle);
        if (MEDIA_VISION_ERROR_NONE != err) {
@@ -421,9 +398,7 @@ int mv_video_writer_init(
        return err;
 }
 
-int mv_video_writer_write_frame(
-               mv_video_writer_h writer,
-               unsigned char *frame)
+int mv_video_writer_write_frame(mv_video_writer_h writer, unsigned char *frame)
 {
        mv_video_writer_s *handle = NULL;
        GstMapInfo info;
@@ -436,7 +411,7 @@ int mv_video_writer_write_frame(
 
        handle = (mv_video_writer_s *) writer;
 
-       buffer =  gst_buffer_new_allocate(NULL, handle->buffer_size, NULL);
+       buffer = gst_buffer_new_allocate(NULL, handle->buffer_size, NULL);
        if (!buffer) {
                LOGE("Unable to allocate buffer for frame");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
@@ -447,8 +422,7 @@ int mv_video_writer_write_frame(
        memcpy(info.data, frame, info.size);
        gst_buffer_unmap(buffer, &info);
 
-       if (GST_FLOW_OK !=
-                       gst_app_src_push_buffer((GstAppSrc*)(handle->appsrc), buffer)) {
+       if (GST_FLOW_OK != gst_app_src_push_buffer((GstAppSrc *) (handle->appsrc), buffer)) {
                LOGE("Failed to push buffer to appsrc");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
@@ -457,8 +431,7 @@ int mv_video_writer_write_frame(
 }
 
 /* Internal functions */
-static int _mv_video_reader_create_internals(
-               mv_video_reader_s *reader)
+static int _mv_video_reader_create_internals(mv_video_reader_s *reader)
 {
        pthread_spin_init(&(reader->new_sample_cb_guard), PTHREAD_PROCESS_SHARED);
        pthread_spin_init(&(reader->eos_cb_guard), PTHREAD_PROCESS_SHARED);
@@ -471,110 +444,72 @@ static int _mv_video_reader_create_internals(
        reader->queue = gst_element_factory_make("queue", "queue");
        reader->appsink = gst_element_factory_make("appsink", "appsink");
 
-       if ((!reader->pl) ||
-                       (!reader->filesrc) ||
-                       (!reader->decodebin) ||
-                       (!reader->videoconvert) ||
-                       (!reader->queue) ||
-                       (!reader->appsink)) {
+       if ((!reader->pl) || (!reader->filesrc) || (!reader->decodebin) || (!reader->videoconvert) || (!reader->queue) ||
+               (!reader->appsink)) {
                LOGE("Unable to create video read pipeline elements");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       gst_bin_add_many(GST_BIN(reader->pl),
-                       reader->filesrc,
-                       reader->decodebin,
-                       reader->videoconvert,
-                       reader->queue,
-                       reader->appsink,
-                       NULL);
+       gst_bin_add_many(GST_BIN(reader->pl), reader->filesrc, reader->decodebin, reader->videoconvert, reader->queue,
+                                        reader->appsink, NULL);
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-static int _mv_video_reader_link_internals(
-               mv_video_reader_s *reader)
+static int _mv_video_reader_link_internals(mv_video_reader_s *reader)
 {
        GstCaps *caps = NULL;
        GstPad *pad = NULL;
 
-       if (!gst_element_link_many(reader->filesrc,
-                               reader->decodebin,
-                               NULL)) {
+       if (!gst_element_link_many(reader->filesrc, reader->decodebin, NULL)) {
                LOGE("Unable to link filesrc to decodebin");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
        /* Decodebin pad will be linked during state change */
-       g_signal_connect(reader->decodebin,
-                       "pad-added",
-                       G_CALLBACK(cb_newpad),
-                       reader);
-
-       if (!gst_element_link_many(reader->videoconvert,
-                       reader->queue, reader->appsink, NULL)) {
+       g_signal_connect(reader->decodebin, "pad-added", G_CALLBACK(cb_newpad), reader);
 
+       if (!gst_element_link_many(reader->videoconvert, reader->queue, reader->appsink, NULL)) {
                LOGE("Unable to link videocovnert-queue-appsink");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       caps = gst_caps_new_simple("video/x-raw",
-                       "format", G_TYPE_STRING, "RGB",
-                       NULL);
+       caps = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING, "RGB", NULL);
 
        gst_app_sink_set_caps(GST_APP_SINK(reader->appsink), caps);
        gst_caps_unref(caps);
 
        /* Configure appsink */
        gst_app_sink_set_emit_signals(GST_APP_SINK(reader->appsink), TRUE);
-       g_signal_connect(reader->appsink,
-                       "new-sample",
-                       G_CALLBACK(appsink_newsample),
-                       reader);
-       g_signal_connect(reader->appsink,
-                       "eos",
-                       G_CALLBACK(appsink_eos),
-                       reader);
-       g_object_set(G_OBJECT(reader->appsink),
-                       "drop", TRUE,
-                       "enable-last-sample", TRUE,
-                       "sync", FALSE,
-                       NULL);
-
+       g_signal_connect(reader->appsink, "new-sample", G_CALLBACK(appsink_newsample), reader);
+       g_signal_connect(reader->appsink, "eos", G_CALLBACK(appsink_eos), reader);
+       g_object_set(G_OBJECT(reader->appsink), "drop", TRUE, "enable-last-sample", TRUE, "sync", FALSE, NULL);
 
        /* pad probe */
        pad = gst_element_get_static_pad(reader->queue, "src");
 
-       gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_BUFFER,
-                       (GstPadProbeCallback)pad_probe_data_cb, reader, NULL);
+       gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_BUFFER, (GstPadProbeCallback) pad_probe_data_cb, reader, NULL);
        gst_object_unref(pad);
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-static int _mv_video_reader_state_change(
-               mv_video_reader_s *reader,
-               GstState state)
+static int _mv_video_reader_state_change(mv_video_reader_s *reader, GstState state)
 {
        mv_video_reader_s *handle = (mv_video_reader_s *) reader;
        GstStateChangeReturn state_ret = GST_STATE_CHANGE_FAILURE;
        GstState pipeline_state = GST_STATE_NULL;
 
-       state_ret = gst_element_set_state(handle->pl,
-                       state);
+       state_ret = gst_element_set_state(handle->pl, state);
 
        if (GST_STATE_CHANGE_FAILURE == state_ret) {
                LOGE("Set state failure");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       LOGI("Set state [%d], change return [%d]",
-                       state, state_ret);
+       LOGI("Set state [%d], change return [%d]", state, state_ret);
 
-       state_ret = gst_element_get_state(handle->pl,
-                       &pipeline_state,
-                       NULL,
-                       GST_CLOCK_TIME_NONE);
+       state_ret = gst_element_get_state(handle->pl, &pipeline_state, NULL, GST_CLOCK_TIME_NONE);
 
        if (GST_STATE_CHANGE_FAILURE == state_ret) {
                LOGE("get state failure");
@@ -584,8 +519,7 @@ static int _mv_video_reader_state_change(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-static int _mv_video_writer_create_internals(
-               mv_video_writer_s *writer)
+static int _mv_video_writer_create_internals(mv_video_writer_s *writer)
 {
        writer->pl = gst_pipeline_new(NULL);
 
@@ -597,33 +531,19 @@ static int _mv_video_writer_create_internals(
        writer->muxer = gst_element_factory_make("avimux", "muxer");
        writer->filesink = gst_element_factory_make("filesink", "filesink");
 
-       if ((!writer->pl) ||
-                       (!writer->appsrc) ||
-                       (!writer->capsfilter) ||
-                       (!writer->videoconvert) ||
-                       (!writer->encoder) ||
-                       (!writer->queue) ||
-                       (!writer->muxer) ||
-                       (!writer->filesink)) {
+       if ((!writer->pl) || (!writer->appsrc) || (!writer->capsfilter) || (!writer->videoconvert) || (!writer->encoder) ||
+               (!writer->queue) || (!writer->muxer) || (!writer->filesink)) {
                LOGE("Unable to create video read pipeline elements\n");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       gst_bin_add_many(GST_BIN(writer->pl),
-                       writer->appsrc,
-                       writer->capsfilter,
-                       writer->videoconvert,
-                       writer->encoder,
-                       writer->queue,
-                       writer->muxer,
-                       writer->filesink,
-                       NULL);
+       gst_bin_add_many(GST_BIN(writer->pl), writer->appsrc, writer->capsfilter, writer->videoconvert, writer->encoder,
+                                        writer->queue, writer->muxer, writer->filesink, NULL);
 
        return MEDIA_VISION_ERROR_NONE;
 }
 
-static int _mv_video_writer_link_internals(
-               mv_video_writer_s *writer)
+static int _mv_video_writer_link_internals(mv_video_writer_s *writer)
 {
        GstVideoInfo vinfo;
        GstCaps *caps = NULL;
@@ -659,17 +579,13 @@ static int _mv_video_writer_link_internals(
                format = "RGBA";
                break;
        default:
-               LOGE("Selected format %d is not supported",
-                               writer->image_data.image_colorspace);
+               LOGE("Selected format %d is not supported", writer->image_data.image_colorspace);
                return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
        }
 
-       caps = gst_caps_new_simple("video/x-raw",
-                               "format", G_TYPE_STRING, format,
-                               "width", G_TYPE_INT, writer->image_data.image_width,
-                               "height", G_TYPE_INT, writer->image_data.image_height,
-                               "framerate", GST_TYPE_FRACTION, writer->fps, 1,
-                               NULL);
+       caps = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING, format, "width", G_TYPE_INT,
+                                                          writer->image_data.image_width, "height", G_TYPE_INT, writer->image_data.image_height,
+                                                          "framerate", GST_TYPE_FRACTION, writer->fps, 1, NULL);
 
        if (NULL == caps) {
                LOGE("Failed to create new caps");
@@ -685,38 +601,24 @@ static int _mv_video_writer_link_internals(
 
        writer->buffer_size = vinfo.size;
 
-       g_object_set(G_OBJECT(writer->appsrc),
-                       "max-bytes", 0,
-                       "blocksize", writer->buffer_size,
-                       "stream-type", 0,
-                       "format", GST_FORMAT_BYTES,
-                       "do-timestamp", true,
-                       "is-live", true,
-                       NULL);
+       g_object_set(G_OBJECT(writer->appsrc), "max-bytes", 0, "blocksize", writer->buffer_size, "stream-type", 0, "format",
+                                GST_FORMAT_BYTES, "do-timestamp", true, "is-live", true, NULL);
 
        /* link appsrc and capsfilter */
-       if ((!gst_element_link_filtered(writer->appsrc,
-                                       writer->capsfilter,
-                                       caps))) {
+       if ((!gst_element_link_filtered(writer->appsrc, writer->capsfilter, caps))) {
                LOGE("Failed to link appsrc to capsfilter");
                gst_caps_unref(caps);
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
        gst_caps_unref(caps);
 
-       if (!gst_element_link_many(writer->capsfilter,
-                               writer->videoconvert,
-                               writer->encoder,
-                               writer->queue,
-                               writer->muxer,
-                               writer->filesink,
-                               NULL)) {
+       if (!gst_element_link_many(writer->capsfilter, writer->videoconvert, writer->encoder, writer->queue, writer->muxer,
+                                                          writer->filesink, NULL)) {
                LOGE("Unable to capsfilter to filesink");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       if (_mv_video_writer_state_change(writer,
-                               GST_STATE_PLAYING)) {
+       if (_mv_video_writer_state_change(writer, GST_STATE_PLAYING)) {
                LOGE("Unable to change video writer state");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
@@ -724,23 +626,19 @@ static int _mv_video_writer_link_internals(
        return MEDIA_VISION_ERROR_NONE;
 }
 
-static int _mv_video_writer_state_change(
-               mv_video_writer_s *writer,
-               GstState state)
+static int _mv_video_writer_state_change(mv_video_writer_s *writer, GstState state)
 {
        mv_video_writer_s *handle = (mv_video_writer_s *) writer;
        GstStateChangeReturn state_ret = GST_STATE_CHANGE_FAILURE;
 
-       state_ret = gst_element_set_state(handle->pl,
-                       state);
+       state_ret = gst_element_set_state(handle->pl, state);
 
        if (GST_STATE_CHANGE_FAILURE == state_ret) {
                LOGE("Set state failure");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       LOGI("Set state [%d], change return [%d]",
-                       state, state_ret);
+       LOGI("Set state [%d], change return [%d]", state, state_ret);
 
        /* AppSrc can't go to PLAYING state before buffer is not pushed */
 
@@ -748,9 +646,7 @@ static int _mv_video_writer_state_change(
 }
 
 /* Callbacks */
-static GstFlowReturn appsink_newsample(
-               GstAppSink *appsink,
-               gpointer user_data)
+static GstFlowReturn appsink_newsample(GstAppSink *appsink, gpointer user_data)
 {
        mv_video_reader_s *handle = NULL;
        GstSample *sample = gst_app_sink_pull_sample(appsink);
@@ -817,11 +713,7 @@ static GstFlowReturn appsink_newsample(
 
                pthread_spin_lock(&(handle->new_sample_cb_guard));
                if (handle->new_sample_cb != NULL) {
-                       handle->new_sample_cb(
-                               buffer,
-                               info.size,
-                               im_data,
-                               handle->new_sample_cb_user_data);
+                       handle->new_sample_cb(buffer, info.size, im_data, handle->new_sample_cb_user_data);
                }
                pthread_spin_unlock(&(handle->new_sample_cb_guard));
 
@@ -835,9 +727,7 @@ static GstFlowReturn appsink_newsample(
        return GST_FLOW_OK;
 }
 
-static void appsink_eos(
-               GstAppSink *appsink,
-               gpointer user_data)
+static void appsink_eos(GstAppSink *appsink, gpointer user_data)
 {
        if (user_data == NULL) {
                LOGE("NULL pointer passed");
@@ -856,10 +746,7 @@ static void appsink_eos(
        gst_pad_remove_probe(gst_element_get_static_pad(handle->queue, "src"), handle->pad_probe_id);
 }
 
-static void cb_newpad(
-               GstElement *decodebin,
-               GstPad *pad,
-               gpointer user_data)
+static void cb_newpad(GstElement *decodebin, GstPad *pad, gpointer user_data)
 {
        mv_video_reader_s *reader = (mv_video_reader_s *) user_data;
        GstStructure *str = NULL;
@@ -887,10 +774,7 @@ static void cb_newpad(
        g_object_unref(video_pad);
 }
 
-static GstPadProbeReturn pad_probe_data_cb(
-               GstPad *pad,
-               GstPadProbeInfo *info,
-               gpointer user_data)
+static GstPadProbeReturn pad_probe_data_cb(GstPad *pad, GstPadProbeInfo *info, gpointer user_data)
 {
        if (user_data == NULL)
                return GST_PAD_PROBE_PASS;
index 17574d8..d54aa09 100644 (file)
@@ -49,11 +49,8 @@ typedef void *mv_video_writer_h;
  *
  * @see mv_video_reader_set_new_sample_cb()
  */
-typedef void (*mv_video_reader_new_sample_cb) (
-               char *buffer,
-               unsigned int buffer_size,
-               image_data_s image_data,
-               void *user_data);
+typedef void (*mv_video_reader_new_sample_cb)(char *buffer, unsigned int buffer_size, image_data_s image_data,
+                                                                                         void *user_data);
 
 /**
  * @brief Called when stream from video reader is finished.
@@ -66,8 +63,7 @@ typedef void (*mv_video_reader_new_sample_cb) (
  *
  * @see mv_video_reader_set_eos_cb()
  */
-typedef void (*mv_video_reader_eos_cb) (
-               void *user_data);
+typedef void (*mv_video_reader_eos_cb)(void *user_data);
 
 /**
  * @brief Creates a video reader handle.
@@ -82,8 +78,7 @@ typedef void (*mv_video_reader_eos_cb) (
  *
  * @see mv_destroy_video_reader()
  */
-int mv_create_video_reader(
-               mv_video_reader_h *reader);
+int mv_create_video_reader(mv_video_reader_h *reader);
 
 /**
  * @brief Destroys the video reader handle and releases all its resources.
@@ -96,8 +91,7 @@ int mv_create_video_reader(
  *
  * @see mv_create_video_reader()
  */
-int mv_destroy_video_reader(
-               mv_video_reader_h reader);
+int mv_destroy_video_reader(mv_video_reader_h reader);
 
 /**
  * @brief Loads video from file.
@@ -115,11 +109,7 @@ int mv_destroy_video_reader(
  *
  * @pre Create a video reader handle by calling @ref mv_create_video_reader()
  */
-int mv_video_reader_load(
-               mv_video_reader_h reader,
-               const char *path,
-               image_data_s *image_data,
-               unsigned int *fps);
+int mv_video_reader_load(mv_video_reader_h reader, const char *path, image_data_s *image_data, unsigned int *fps);
 
 /**
  * @brief Starts reader playback.
@@ -136,8 +126,7 @@ int mv_video_reader_load(
  *
  * @post Stop reader playback by calling @ref mv_video_reader_stop()
  */
-int mv_video_reader_start(
-               mv_video_reader_h reader);
+int mv_video_reader_start(mv_video_reader_h reader);
 
 /**
  * @brief Stops reader playback.
@@ -152,8 +141,7 @@ int mv_video_reader_start(
  * @pre Create a video reader handle by calling @ref mv_create_video_reader()
  *      and call @ref mv_video_reader_load()
  */
-int mv_video_reader_stop(
-               mv_video_reader_h reader);
+int mv_video_reader_stop(mv_video_reader_h reader);
 
 /**
  * @brief Sets new sample callback to video reader.
@@ -173,10 +161,8 @@ int mv_video_reader_stop(
  * @see mv_video_reader_load()
  *
  */
-int mv_video_reader_set_new_sample_cb(
-               mv_video_reader_h reader,
-               mv_video_reader_new_sample_cb callback,
-               void *user_data);
+int mv_video_reader_set_new_sample_cb(mv_video_reader_h reader, mv_video_reader_new_sample_cb callback,
+                                                                         void *user_data);
 
 /**
  * @brief Sets end of stream callback to video reader.
@@ -196,10 +182,7 @@ int mv_video_reader_set_new_sample_cb(
  * @see mv_video_reader_load()
  *
  */
-int mv_video_reader_set_eos_cb(
-               mv_video_reader_h reader,
-               mv_video_reader_eos_cb callback,
-               void *user_data);
+int mv_video_reader_set_eos_cb(mv_video_reader_h reader, mv_video_reader_eos_cb callback, void *user_data);
 
 /**
  * @brief Creates a video writer handle.
@@ -214,8 +197,7 @@ int mv_video_reader_set_eos_cb(
  *
  * @see mv_destroy_video_writer()
  */
-int mv_create_video_writer(
-               mv_video_writer_h *writer);
+int mv_create_video_writer(mv_video_writer_h *writer);
 
 /**
  * @brief Destroys the video writer handle and releases all its resources.
@@ -229,8 +211,7 @@ int mv_create_video_writer(
  *
  * @see mv_create_video_writer()
  */
-int mv_destroy_video_writer(
-               mv_video_writer_h writer);
+int mv_destroy_video_writer(mv_video_writer_h writer);
 
 /**
  * @brief Sets path and frame size for video file to be stored.
@@ -248,11 +229,7 @@ int mv_destroy_video_writer(
  *
  * @pre Create a video writer handle by calling @ref mv_create_video_writer()
  */
-int mv_video_writer_init(
-               mv_video_writer_h writer,
-               const char *path,
-               image_data_s image_data,
-               unsigned int fps);
+int mv_video_writer_init(mv_video_writer_h writer, const char *path, image_data_s image_data, unsigned int fps);
 
 /**
  * @brief   Writes consequently video frame to the file.
@@ -270,8 +247,6 @@ int mv_video_writer_init(
  * @pre Create a video writer handle by calling @ref mv_create_video_writer()
  *      and initialize video with @ref mv_video_writer_init()
  */
-int mv_video_writer_write_frame(
-               mv_video_writer_h writer,
-               unsigned char *frame);
+int mv_video_writer_write_frame(mv_video_writer_h writer, unsigned char *frame);
 
 #endif /* __MEDIA_VISION_MV_VIDEO_HELPER_H__ */
index b20e620..1b421fd 100644 (file)
 static bool Perform_eye_condition_recognize = false;
 static bool Perform_facial_expression_recognize = false;
 
-void eye_condition_cb(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s face_location,
-               mv_face_eye_condition_e eye_condition,
-               void *user_data)
+void eye_condition_cb(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s face_location,
+                                         mv_face_eye_condition_e eye_condition, void *user_data)
 {
        switch (eye_condition) {
        case MV_FACE_EYES_NOT_FOUND:
@@ -61,12 +57,8 @@ void eye_condition_cb(
        }
 }
 
-void face_expression_cb(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s face_location,
-               mv_face_facial_expression_e facial_expression,
-               void *user_data)
+void face_expression_cb(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s face_location,
+                                               mv_face_facial_expression_e facial_expression, void *user_data)
 {
        switch (facial_expression) {
        case MV_FACE_NEUTRAL:
@@ -83,12 +75,8 @@ void face_expression_cb(
        }
 }
 
-void on_face_detected_cb(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s *faces_locations,
-               int number_of_faces,
-               void *user_data)
+void on_face_detected_cb(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s *faces_locations,
+                                                int number_of_faces, void *user_data)
 {
        printf("%i faces were detected on the image.\n", number_of_faces);
        if (number_of_faces > 0) {
@@ -99,43 +87,35 @@ void on_face_detected_cb(
                unsigned int buf_size = 0;
                image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
                if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &out_buffer, &buf_size) ||
-                               MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
-                               MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
-                               MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
-                               user_data == NULL) {
+                       MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
+                       MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
+                       MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
+                       user_data == NULL) {
                        printf("ERROR: Creating out image is impossible.\n");
                } else {
-                       file_name = (char*)user_data;
+                       file_name = (char *) user_data;
                        is_source_data_loaded = 1;
                }
 
                int i = 0;
                for (i = 0; i < number_of_faces; ++i) {
-                       printf("\nFace %i : x - %i, y - %i, width - %i, height - %i ", i,
-                                       faces_locations[i].point.x, faces_locations[i].point.y,
-                                       faces_locations[i].width, faces_locations[i].height);
+                       printf("\nFace %i : x - %i, y - %i, width - %i, height - %i ", i, faces_locations[i].point.x,
+                                  faces_locations[i].point.y, faces_locations[i].width, faces_locations[i].height);
 
                        if (Perform_eye_condition_recognize) {
-                               if (MEDIA_VISION_ERROR_NONE != mv_face_eye_condition_recognize(
-                                                       source,
-                                                       engine_cfg,
-                                                       faces_locations[i],
-                                                       eye_condition_cb,
-                                                       user_data)) {
-                                       printf(TEXT_RED "\nEye condition recognition for %i face failed"
-                                                       TEXT_RESET "\n", i);
+                               if (MEDIA_VISION_ERROR_NONE != mv_face_eye_condition_recognize(source, engine_cfg, faces_locations[i],
+                                                                                                                                                          eye_condition_cb, user_data)) {
+                                       printf(TEXT_RED "\nEye condition recognition for %i face failed" TEXT_RESET "\n", i);
                                }
                        }
 
                        if (Perform_facial_expression_recognize) {
-                               if (MEDIA_VISION_ERROR_NONE != mv_face_facial_expression_recognize(
-                                                       source,
-                                                       engine_cfg,
-                                                       faces_locations[i],
-                                                       face_expression_cb,
-                                                       user_data)) {
+                               if (MEDIA_VISION_ERROR_NONE != mv_face_facial_expression_recognize(source, engine_cfg,
+                                                                                                                                                                  faces_locations[i],
+                                                                                                                                                                  face_expression_cb, user_data)) {
                                        printf(TEXT_RED "\nFacial expression recognition for %i "
-                                                       "face failed" TEXT_RESET "\n", i);
+                                                                       "face failed" TEXT_RESET "\n",
+                                                  i);
                                }
                        }
 
@@ -143,16 +123,12 @@ void on_face_detected_cb(
 
                        if ((is_source_data_loaded == 1) && !Perform_eye_condition_recognize) {
                                const int rectangle_thickness = 3;
-                               const int drawing_color[] = {255, 0, 0};
-                               if (MEDIA_VISION_ERROR_NONE != draw_rectangle_on_buffer(
-                                                       faces_locations[i].point.x,
-                                                       faces_locations[i].point.y,
-                                                       faces_locations[i].point.x + faces_locations[i].width,
-                                                       faces_locations[i].point.y + faces_locations[i].height,
-                                                       rectangle_thickness,
-                                                       drawing_color,
-                                                       &image_data,
-                                                       out_buffer)) {
+                               const int drawing_color[] = { 255, 0, 0 };
+                               if (MEDIA_VISION_ERROR_NONE !=
+                                       draw_rectangle_on_buffer(faces_locations[i].point.x, faces_locations[i].point.y,
+                                                                                        faces_locations[i].point.x + faces_locations[i].width,
+                                                                                        faces_locations[i].point.y + faces_locations[i].height,
+                                                                                        rectangle_thickness, drawing_color, &image_data, out_buffer)) {
                                        continue;
                                }
                        }
@@ -160,11 +136,7 @@ void on_face_detected_cb(
 
                if (!Perform_eye_condition_recognize) {
                        if (file_name != NULL &&
-                               MEDIA_VISION_ERROR_NONE == save_image_from_buffer(
-                                                                                               file_name,
-                                                                                               out_buffer,
-                                                                                               &image_data,
-                                                                                               100)) {
+                               MEDIA_VISION_ERROR_NONE == save_image_from_buffer(file_name, out_buffer, &image_data, 100)) {
                                printf("Image was generated as %s\n", file_name);
                        } else {
                                printf("ERROR: Failed to generate output file. Check file name and permissions. \n");
@@ -175,22 +147,16 @@ void on_face_detected_cb(
        }
 }
 
-void on_face_recognized_cb(
-               mv_source_h source,
-               mv_face_recognition_model_h recognition_model,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s *face_location,
-               const int *face_label,
-               double confidence,
-               void *user_data)
+void on_face_recognized_cb(mv_source_h source, mv_face_recognition_model_h recognition_model,
+                                                  mv_engine_config_h engine_cfg, mv_rectangle_s *face_location, const int *face_label,
+                                                  double confidence, void *user_data)
 {
        if (NULL == face_location) {
-               printf(TEXT_YELLOW "No faces were recognized in the source"
-                               TEXT_RESET "\n");
+               printf(TEXT_YELLOW "No faces were recognized in the source" TEXT_RESET "\n");
        } else {
                printf(TEXT_GREEN "Face labeled %i was recognized in the source with "
-                                               "recognition confidence of %.2f"
-                                               TEXT_RESET "\n", *face_label, confidence);
+                                                 "recognition confidence of %.2f" TEXT_RESET "\n",
+                          *face_label, confidence);
        }
 }
 
@@ -206,9 +172,7 @@ int perform_detect()
        mv_source_h source;
        int err = mv_create_source(&source);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during creating the source!!! code: %i"
-                               TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during creating the source!!! code: %i" TEXT_RESET "\n", err);
 
                free(in_file_name);
 
@@ -219,9 +183,8 @@ int perform_detect()
        if (MEDIA_VISION_ERROR_NONE != err) {
                const int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
-                       printf(TEXT_RED
-                                       "ERROR: Errors were occurred during destroying the source!!! code: %i"
-                                       TEXT_RESET "\n", err2);
+                       printf(TEXT_RED "ERROR: Errors were occurred during destroying the source!!! code: %i" TEXT_RESET "\n",
+                                  err2);
 
                        free(in_file_name);
 
@@ -241,26 +204,23 @@ int perform_detect()
 
        /* 3. Select Haar cascade */
        const int options[3] = { 1, 2, 3 };
-       const char *names[3] = { "haarcascade_frontalface_alt.xml",
-                                                               "haarcascade_frontalface_alt2.xml",
-                                                               "haarcascade_frontalface_alt_tree.xml"};
+       const char *names[3] = { "haarcascade_frontalface_alt.xml", "haarcascade_frontalface_alt2.xml",
+                                                        "haarcascade_frontalface_alt_tree.xml" };
 
        const int haarcascade = show_menu("Select Haarcascade:", options, names, 3);
 
        mv_engine_config_h eng_config;
        err = mv_create_engine_config(&eng_config);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during creating the engine config!!! code: %i"
-                               TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during creating the engine config!!! code: %i" TEXT_RESET "\n",
+                          err);
 
                free(out_file_name);
 
                const int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
-                       printf(TEXT_RED
-                                       "ERROR: Errors were occurred during destroying the source!!! code: %i"
-                                       TEXT_RESET "\n", err2);
+                       printf(TEXT_RED "ERROR: Errors were occurred during destroying the source!!! code: %i" TEXT_RESET "\n",
+                                  err2);
                }
 
                return err;
@@ -268,22 +228,16 @@ int perform_detect()
 
        switch (haarcascade) {
        case 1:
-               mv_engine_config_set_string_attribute(
-                                               eng_config,
-                                               MV_FACE_DETECTION_MODEL_FILE_PATH,
-                                               "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml");
+               mv_engine_config_set_string_attribute(eng_config, MV_FACE_DETECTION_MODEL_FILE_PATH,
+                                                                                         "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml");
                break;
        case 2:
-               mv_engine_config_set_string_attribute(
-                                               eng_config,
-                                               MV_FACE_DETECTION_MODEL_FILE_PATH,
-                                               "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml");
+               mv_engine_config_set_string_attribute(eng_config, MV_FACE_DETECTION_MODEL_FILE_PATH,
+                                                                                         "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml");
                break;
        case 3:
-               mv_engine_config_set_string_attribute(
-                                               eng_config,
-                                               MV_FACE_DETECTION_MODEL_FILE_PATH,
-                                               "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt_tree.xml");
+               mv_engine_config_set_string_attribute(eng_config, MV_FACE_DETECTION_MODEL_FILE_PATH,
+                                                                                         "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt_tree.xml");
                break;
        default:
                printf(TEXT_YELLOW "Default Haar cascade was set.\n" TEXT_RESET);
@@ -295,23 +249,20 @@ int perform_detect()
        free(out_file_name);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during face detection!!! code: %i"
-                               TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during face detection!!! code: %i" TEXT_RESET "\n", err);
 
                int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
-                       printf(TEXT_RED
-                                       "ERROR: Errors were occurred during destroying the source!!! code: %i"
-                                       TEXT_RESET "\n", err2);
+                       printf(TEXT_RED "ERROR: Errors were occurred during destroying the source!!! code: %i" TEXT_RESET "\n",
+                                  err2);
                        return err2;
                }
 
                err2 = mv_destroy_engine_config(eng_config);
                if (MEDIA_VISION_ERROR_NONE != err2) {
-                       printf(TEXT_RED
-                                       "ERROR: Errors were occurred during destroying the engine config!!! code: %i"
-                                       TEXT_RESET "\n", err2);
+                       printf(TEXT_RED "ERROR: Errors were occurred during destroying the engine config!!! code: %i" TEXT_RESET
+                                                       "\n",
+                                  err2);
                        return err2;
                }
 
@@ -320,17 +271,14 @@ int perform_detect()
 
        err = mv_destroy_source(source);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during destroying the source!!! code: %i"
-                               TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during destroying the source!!! code: %i" TEXT_RESET "\n", err);
                return err;
        }
 
        err = mv_destroy_engine_config(eng_config);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during destroying the engine config!!! code: %i"
-                               TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during destroying the engine config!!! code: %i" TEXT_RESET "\n",
+                          err);
                return err;
        }
 
@@ -345,25 +293,18 @@ int perform_mv_face_recognize(mv_face_recognition_model_h model)
        int err = mv_create_source(&source);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during creating the source!!! code: %i"
-                               TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during creating the source!!! code: %i" TEXT_RESET "\n", err);
                return err;
        }
 
-       printf(TEXT_GREEN "HINT:" TEXT_RESET "\n"
-                       TEXT_YELLOW "To achieve appropriate accuracy of recognition,\n"
-                                               "choose images with only faces. I.e. face has to cover\n"
-                                               "approximately 95-100%% of the image (passport photos\n"
-                                               "are the best example :)). Note that if this value is\n"
-                                               "less than 95%%, accuracy can be significantly reduced.\n"
-                                               "In real code such images can be achieved by cropping\n"
-                                               "faces from images with face detection functionality.\n"
-                       TEXT_RESET);
-       while (-1 == input_string(
-                               "Input file name with the face to be recognized:",
-                               1024,
-                               &(in_file_name)))
+       printf(TEXT_GREEN "HINT:" TEXT_RESET "\n" TEXT_YELLOW "To achieve appropriate accuracy of recognition,\n"
+                                         "choose images with only faces. I.e. face has to cover\n"
+                                         "approximately 95-100%% of the image (passport photos\n"
+                                         "are the best example :)). Note that if this value is\n"
+                                         "less than 95%%, accuracy can be significantly reduced.\n"
+                                         "In real code such images can be achieved by cropping\n"
+                                         "faces from images with face detection functionality.\n" TEXT_RESET);
+       while (-1 == input_string("Input file name with the face to be recognized:", 1024, &(in_file_name)))
                printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
 
        err = load_mv_source_from_file(in_file_name, source);
@@ -373,9 +314,8 @@ int perform_mv_face_recognize(mv_face_recognition_model_h model)
 
                const int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
-                       printf(TEXT_RED
-                                       "ERROR: Errors were occurred during destroying the source!!! code: %i"
-                                       TEXT_RESET "\n", err2);
+                       printf(TEXT_RED "ERROR: Errors were occurred during destroying the source!!! code: %i" TEXT_RESET "\n",
+                                  err2);
                        return err2;
                }
 
@@ -387,15 +327,12 @@ int perform_mv_face_recognize(mv_face_recognition_model_h model)
        if (MEDIA_VISION_ERROR_NONE != err) {
                free(in_file_name);
 
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during face recognition!!! code: %i"
-                               TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during face recognition!!! code: %i" TEXT_RESET "\n", err);
 
                int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
-                       printf(TEXT_RED
-                                       "ERROR: Errors were occurred during destroying the source!!! code: %i"
-                                       TEXT_RESET "\n", err2);
+                       printf(TEXT_RED "ERROR: Errors were occurred during destroying the source!!! code: %i" TEXT_RESET "\n",
+                                  err2);
                        return err2;
                }
 
@@ -407,16 +344,13 @@ int perform_mv_face_recognize(mv_face_recognition_model_h model)
        return err;
 }
 
-int add_single_example(
-               mv_face_recognition_model_h model, const char *in_file_name,
-               mv_rectangle_s *roi, int *face_label)
+int add_single_example(mv_face_recognition_model_h model, const char *in_file_name, mv_rectangle_s *roi,
+                                          int *face_label)
 {
        mv_source_h source;
        int err = mv_create_source(&source);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during creating the source!!! code: %i"
-                               TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during creating the source!!! code: %i" TEXT_RESET "\n", err);
 
                return err;
        }
@@ -425,9 +359,8 @@ int add_single_example(
        if (MEDIA_VISION_ERROR_NONE != err) {
                const int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
-                       printf(TEXT_RED
-                                       "ERROR: Errors were occurred during destroying the source!!! code: %i"
-                                       TEXT_RESET "\n", err2);
+                       printf(TEXT_RED "ERROR: Errors were occurred during destroying the source!!! code: %i" TEXT_RESET "\n",
+                                  err2);
                        return err2;
                }
 
@@ -436,24 +369,19 @@ int add_single_example(
 
        if (NULL != roi && !show_confirm_dialog("Do you want to use full image?")) {
                printf(TEXT_YELLOW "Specify the ROI as rectangle where face is located.\n"
-                                                       "Use negative values if you want to check correctness\n"
-                                                       "of error handling.\n"
-                               TEXT_RESET);
+                                                  "Use negative values if you want to check correctness\n"
+                                                  "of error handling.\n" TEXT_RESET);
 
-               while (-1 == input_int("Specify top left ROI x coordinate:",
-                                       INT_MIN, INT_MAX, &(roi->point.x)))
+               while (-1 == input_int("Specify top left ROI x coordinate:", INT_MIN, INT_MAX, &(roi->point.x)))
                        printf("Incorrect input! Try again.\n");
 
-               while (-1 == input_int("Specify top left ROI y coordinate:",
-                                       INT_MIN, INT_MAX, &(roi->point.y)))
+               while (-1 == input_int("Specify top left ROI y coordinate:", INT_MIN, INT_MAX, &(roi->point.y)))
                        printf("Incorrect input! Try again.\n");
 
-               while (-1 == input_int("Specify top left ROI width:",
-                                       INT_MIN, INT_MAX, &(roi->width)))
+               while (-1 == input_int("Specify top left ROI width:", INT_MIN, INT_MAX, &(roi->width)))
                        printf("Incorrect input! Try again.\n");
 
-               while (-1 == input_int("Specify top left ROI height:",
-                                       INT_MIN, INT_MAX, &(roi->height)))
+               while (-1 == input_int("Specify top left ROI height:", INT_MIN, INT_MAX, &(roi->height)))
                        printf("Incorrect input! Try again.\n");
        } else {
                roi = NULL;
@@ -462,63 +390,51 @@ int add_single_example(
        int real_label = 0;
        if (NULL == face_label) {
                printf(TEXT_YELLOW "Also, you has to assign label for the face in the\n"
-                                                       "image. You has assign the same labels for the same\n"
-                                                       "persons. For example, always assign label '1' for\n"
-                                                       "images with Alice's face; label '2' for Bob's faces,\n"
-                                                       "'3' for Ann's faces and so on...\n"
-                               TEXT_RESET);
+                                                  "image. You has assign the same labels for the same\n"
+                                                  "persons. For example, always assign label '1' for\n"
+                                                  "images with Alice's face; label '2' for Bob's faces,\n"
+                                                  "'3' for Ann's faces and so on...\n" TEXT_RESET);
 
                face_label = &real_label;
-               while (-1 == input_int("Specify label as integer:",
-                                                               MIN_ALLOWED_LABEL,
-                                                               MAX_ALLOWED_LABEL,
-                                                               face_label)) {
-                       printf("Incorrect input! You can use %i-%i labels only. Try again.\n",
-                                       MIN_ALLOWED_LABEL,
-                                       MAX_ALLOWED_LABEL);
+               while (-1 == input_int("Specify label as integer:", MIN_ALLOWED_LABEL, MAX_ALLOWED_LABEL, face_label)) {
+                       printf("Incorrect input! You can use %i-%i labels only. Try again.\n", MIN_ALLOWED_LABEL,
+                                  MAX_ALLOWED_LABEL);
                }
        }
 
        err = mv_face_recognition_model_add(source, model, roi, *face_label);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during adding the sample image from "
-                               "[%s] to the face recognition model!!! code: %i"
-                               TEXT_RESET "\n", in_file_name, err);
+               printf(TEXT_RED "ERROR: Errors were occurred during adding the sample image from "
+                                               "[%s] to the face recognition model!!! code: %i" TEXT_RESET "\n",
+                          in_file_name, err);
        }
 
        const int err2 = mv_destroy_source(source);
        if (MEDIA_VISION_ERROR_NONE != err2) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during destroying the source!!! code: %i"
-                               TEXT_RESET "\n", err2);
+               printf(TEXT_RED "ERROR: Errors were occurred during destroying the source!!! code: %i" TEXT_RESET "\n", err2);
        }
 
        return err;
 }
 
-int perform_mv_face_recognition_model_add_face_example(
-               mv_face_recognition_model_h model,
-               notification_type_e *notification_type)
+int perform_mv_face_recognition_model_add_face_example(mv_face_recognition_model_h model,
+                                                                                                          notification_type_e *notification_type)
 {
        char *in_file_name = NULL;
 
-       printf(TEXT_GREEN "HINT:" TEXT_RESET "\n"
-                       TEXT_YELLOW "To achieve appropriate accuracy of recognition,\n"
-                                               "choose images with only faces. I.e. face has to cover\n"
-                                               "approximately 95-100%% of the image (passport photos\n"
-                                               "are the best example :)). Note that if this value is\n"
-                                               "less than 95%%, accuracy can be significantly reduced.\n"
-                                               "In real code such images can be achieved by cropping\n"
-                                               "faces from images with face detection functionality.\n"
-                       TEXT_RESET);
+       printf(TEXT_GREEN "HINT:" TEXT_RESET "\n" TEXT_YELLOW "To achieve appropriate accuracy of recognition,\n"
+                                         "choose images with only faces. I.e. face has to cover\n"
+                                         "approximately 95-100%% of the image (passport photos\n"
+                                         "are the best example :)). Note that if this value is\n"
+                                         "less than 95%%, accuracy can be significantly reduced.\n"
+                                         "In real code such images can be achieved by cropping\n"
+                                         "faces from images with face detection functionality.\n" TEXT_RESET);
 
        const bool from_dir = show_confirm_dialog("Do add images from directory?");
-       const char *input_path_msg =
-                       from_dir ? "Input path to the directory with the face images to be "
-                                               "loaded to the model:"
-                                               : "Input file name with the face to be loaded to the model:";
+       const char *input_path_msg = from_dir ? "Input path to the directory with the face images to be "
+                                                                                       "loaded to the model:" :
+                                                                                       "Input file name with the face to be loaded to the model:";
 
        while (-1 == input_string(input_path_msg, 1024, &(in_file_name)))
                printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
@@ -528,13 +444,9 @@ int perform_mv_face_recognition_model_add_face_example(
        if (from_dir) {
                *notification_type = FAIL_OR_DONE;
                int face_label = 0;
-               while (-1 == input_int("Specify label as integer:",
-                                                               MIN_ALLOWED_LABEL,
-                                                               MAX_ALLOWED_LABEL,
-                                                               &face_label)) {
-                       printf("Incorrect input! You can use %i-%i labels only. Try again.\n",
-                                       MIN_ALLOWED_LABEL,
-                                       MAX_ALLOWED_LABEL);
+               while (-1 == input_int("Specify label as integer:", MIN_ALLOWED_LABEL, MAX_ALLOWED_LABEL, &face_label)) {
+                       printf("Incorrect input! You can use %i-%i labels only. Try again.\n", MIN_ALLOWED_LABEL,
+                                  MAX_ALLOWED_LABEL);
                }
 
                DIR *dir;
@@ -554,17 +466,16 @@ int perform_mv_face_recognition_model_add_face_example(
                                if (MEDIA_VISION_ERROR_NONE != err) {
                                        printf(TEXT_RED "Failed to add example from %s. "
                                                                        "Error code: %i\n" TEXT_RESET,
-                                                                       file_path, err);
+                                                  file_path, err);
                                } else {
-                                       printf(TEXT_GREEN "Example labeled [%i] added from " TEXT_RESET
-                                                       TEXT_YELLOW "%s\n" TEXT_RESET, face_label, file_path);
+                                       printf(TEXT_GREEN "Example labeled [%i] added from " TEXT_RESET TEXT_YELLOW "%s\n" TEXT_RESET,
+                                                  face_label, file_path);
                                }
                        }
 
                        closedir(dir);
                } else {
-                       printf(TEXT_RED "Can't read from specified directory (%s)\n"
-                                       TEXT_RESET, in_file_name);
+                       printf(TEXT_RED "Can't read from specified directory (%s)\n" TEXT_RESET, in_file_name);
                }
        } else {
                *notification_type = FAIL_OR_SUCCESSS;
@@ -577,52 +488,45 @@ int perform_mv_face_recognition_model_add_face_example(
        return err;
 }
 
-int perform_mv_face_recognition_model_reset_face_examples(
-               mv_face_recognition_model_h model,
-               bool full_reset)
+int perform_mv_face_recognition_model_reset_face_examples(mv_face_recognition_model_h model, bool full_reset)
 {
-       printf(TEXT_GREEN "HINT:" TEXT_RESET "\n"
-                       TEXT_YELLOW "Reset of the examples will affect only examples has\n"
-                                               "been collected via mv_face_recognition_model_add()\n"
-                                               "function calls (i.e. through 'Add image example' menu\n"
-                                               "item). Previously learned model will be not affected,\n"
-                                               "so it is possible to recognize faces with this model\n"
-                                               "after examples reset. Reset of the examples can be\n"
-                                               "useful to erase a class of faces (i.e. all examples\n"
-                                               "related to this class) before learning the model.\n"
-                                               "Or, if it is needed to reset all collected previously\n"
-                                               "examples as an alternative to the creating the new\n"
-                                               "model.\n"
-                       TEXT_RESET);
+       printf(TEXT_GREEN "HINT:" TEXT_RESET "\n" TEXT_YELLOW "Reset of the examples will affect only examples has\n"
+                                         "been collected via mv_face_recognition_model_add()\n"
+                                         "function calls (i.e. through 'Add image example' menu\n"
+                                         "item). Previously learned model will be not affected,\n"
+                                         "so it is possible to recognize faces with this model\n"
+                                         "after examples reset. Reset of the examples can be\n"
+                                         "useful to erase a class of faces (i.e. all examples\n"
+                                         "related to this class) before learning the model.\n"
+                                         "Or, if it is needed to reset all collected previously\n"
+                                         "examples as an alternative to the creating the new\n"
+                                         "model.\n" TEXT_RESET);
 
        int err = MEDIA_VISION_ERROR_NONE;
 
        if (full_reset) {
                err = mv_face_recognition_model_reset(model, NULL);
                if (MEDIA_VISION_ERROR_NONE != err) {
-                       printf(TEXT_RED
-                                       "ERROR: Errors were occurred during reset of all examples!!!"
-                                       " code: %i" TEXT_RESET "\n", err);
+                       printf(TEXT_RED "ERROR: Errors were occurred during reset of all examples!!!"
+                                                       " code: %i" TEXT_RESET "\n",
+                                  err);
                        return err;
                }
        } else {
                int reset_label = 0;
 
-               while (-1 == input_int("Specify label for the examples to be reset:",
-                                                               MIN_ALLOWED_LABEL,
-                                                               MAX_ALLOWED_LABEL,
-                                                               &reset_label)) {
-                       printf("Incorrect input! You can use %i-%i labels only. Try again.\n",
-                                       MIN_ALLOWED_LABEL,
-                                       MAX_ALLOWED_LABEL);
+               while (-1 == input_int("Specify label for the examples to be reset:", MIN_ALLOWED_LABEL, MAX_ALLOWED_LABEL,
+                                                          &reset_label)) {
+                       printf("Incorrect input! You can use %i-%i labels only. Try again.\n", MIN_ALLOWED_LABEL,
+                                  MAX_ALLOWED_LABEL);
                }
 
                err = mv_face_recognition_model_reset(model, &reset_label);
 
                if (MEDIA_VISION_ERROR_NONE != err) {
-                       printf(TEXT_RED
-                                       "ERROR: Errors were occurred during reset of examples labeled"
-                                       " with  %i!!! code: %i" TEXT_RESET "\n", reset_label, err);
+                       printf(TEXT_RED "ERROR: Errors were occurred during reset of examples labeled"
+                                                       " with  %i!!! code: %i" TEXT_RESET "\n",
+                                  reset_label, err);
                        return err;
                }
        }
@@ -634,8 +538,7 @@ int perform_mv_face_recognition_model_save(mv_face_recognition_model_h model)
 {
        char *out_file_name = NULL;
 
-       while (input_string("Input file name to save the model:",
-                               1024, &(out_file_name)) == -1)
+       while (input_string("Input file name to save the model:", 1024, &(out_file_name)) == -1)
                printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
 
        const int err = mv_face_recognition_model_save(out_file_name, model);
@@ -649,8 +552,7 @@ int perform_mv_face_recognition_model_load(mv_face_recognition_model_h *model)
 {
        char *in_file_name = NULL;
 
-       while (input_string("Input file name to load model from:",
-                               1024, &(in_file_name)) == -1)
+       while (input_string("Input file name to load model from:", 1024, &(in_file_name)) == -1)
                printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
 
        const int err = mv_face_recognition_model_load(in_file_name, model);
@@ -660,51 +562,46 @@ int perform_mv_face_recognition_model_load(mv_face_recognition_model_h *model)
        return err;
 }
 
-int perform_mv_face_recognition_model_clone(
-               mv_face_recognition_model_h model_to_clone)
+int perform_mv_face_recognition_model_clone(mv_face_recognition_model_h model_to_clone)
 {
        int err = MEDIA_VISION_ERROR_NONE;
 
        mv_face_recognition_model_h cloned_model = NULL;
 
-       printf(TEXT_GREEN "Perform clone of the recognition model..."
-                       TEXT_RESET "\n");
+       printf(TEXT_GREEN "Perform clone of the recognition model..." TEXT_RESET "\n");
 
        err = mv_face_recognition_model_clone(model_to_clone, &cloned_model);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED "Errors were occurred during model clone. Error code %i"
-                               TEXT_RESET "\n", err);
+               printf(TEXT_RED "Errors were occurred during model clone. Error code %i" TEXT_RESET "\n", err);
                return err;
        }
 
        printf(TEXT_YELLOW "Model cloning is done." TEXT_RESET "\n");
 
-       if (show_confirm_dialog("Save " TEXT_YELLOW "source model" TEXT_RESET
-                                                       " to file?")) {
+       if (show_confirm_dialog("Save " TEXT_YELLOW "source model" TEXT_RESET " to file?")) {
                const int serr = perform_mv_face_recognition_model_save(model_to_clone);
                if (MEDIA_VISION_ERROR_NONE != serr) {
-                       printf(TEXT_RED
-                                       "Errors were occurred when trying to save "
-                                       "source model to file. Error code %i" TEXT_RESET "\n", serr);
+                       printf(TEXT_RED "Errors were occurred when trying to save "
+                                                       "source model to file. Error code %i" TEXT_RESET "\n",
+                                  serr);
                }
        }
 
-       if (show_confirm_dialog("Save " TEXT_YELLOW "destination model" TEXT_RESET
-                                                       " to file?")) {
+       if (show_confirm_dialog("Save " TEXT_YELLOW "destination model" TEXT_RESET " to file?")) {
                const int serr = perform_mv_face_recognition_model_save(cloned_model);
                if (MEDIA_VISION_ERROR_NONE != serr) {
-                       printf(TEXT_RED
-                                       "Errors were occurred when trying to save destination model "
-                                       "to file. Error code %i" TEXT_RESET "\n", serr);
+                       printf(TEXT_RED "Errors were occurred when trying to save destination model "
+                                                       "to file. Error code %i" TEXT_RESET "\n",
+                                  serr);
                }
        }
 
        if (cloned_model) {
                const int dest_err = mv_face_recognition_model_destroy(cloned_model);
                if (MEDIA_VISION_ERROR_NONE != dest_err) {
-                       printf(TEXT_RED
-                               "Errors were occurred when destroying destination model ."
-                               "Error code %i" TEXT_RESET "\n", dest_err);
+                       printf(TEXT_RED "Errors were occurred when destroying destination model ."
+                                                       "Error code %i" TEXT_RESET "\n",
+                                  dest_err);
                }
        }
 
@@ -714,13 +611,12 @@ int perform_mv_face_recognition_model_clone(
 int perform_mv_face_recognition_model_learn(mv_face_recognition_model_h model)
 {
        printf(TEXT_YELLOW "Learning the model has to be performed after\n"
-                                               "adding some amount of examples to the model.\n"
-                                               "If you learn without examples, you will get useless\n"
-                                               "model, which will be unavailable to recognize. Anyway,\n"
-                                               "you can add examples and launch this method again to\n"
-                                               "get the appropriate recognition model suitable for\n"
-                                               "recognition."
-                       TEXT_RESET "\n");
+                                          "adding some amount of examples to the model.\n"
+                                          "If you learn without examples, you will get useless\n"
+                                          "model, which will be unavailable to recognize. Anyway,\n"
+                                          "you can add examples and launch this method again to\n"
+                                          "get the appropriate recognition model suitable for\n"
+                                          "recognition." TEXT_RESET "\n");
 
        printf(TEXT_GREEN "Start learning process..." TEXT_RESET "\n");
 
@@ -728,11 +624,10 @@ int perform_mv_face_recognition_model_learn(mv_face_recognition_model_h model)
 
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "Learning the model failed. Error code: %i. "
-                               "But you still can test with this model.\n"
-                               TEXT_RESET "\n", err);
+                                               "But you still can test with this model.\n" TEXT_RESET "\n",
+                          err);
        } else {
-               printf(TEXT_YELLOW "Recognition model has been learned."
-                               TEXT_RESET "\n");
+               printf(TEXT_YELLOW "Recognition model has been learned." TEXT_RESET "\n");
        }
 
        return err;
@@ -751,8 +646,7 @@ int perform_mv_face_recognition_model_query_labels(mv_face_recognition_model_h m
                return err;
        }
 
-       printf(TEXT_YELLOW "Recognition model had been learned for the following labels: "
-                       TEXT_RESET "\n" TEXT_GREEN);
+       printf(TEXT_YELLOW "Recognition model had been learned for the following labels: " TEXT_RESET "\n" TEXT_GREEN);
        for (unsigned i = 0; i < learned_labels_n; ++i)
                printf("%i, ", learned_labels[i]);
 
@@ -769,17 +663,11 @@ static int TN = 0;
 static int FN = 0;
 static double THRESHOLD = 0.75;
 
-void evaluation_cb(
-               mv_source_h source,
-               mv_face_recognition_model_h recognition_model,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s *face_location,
-               const int *face_label,
-               double confidence,
-               void *user_data)
+void evaluation_cb(mv_source_h source, mv_face_recognition_model_h recognition_model, mv_engine_config_h engine_cfg,
+                                  mv_rectangle_s *face_location, const int *face_label, double confidence, void *user_data)
 {
        if (NULL != user_data) {
-               const int real_label = *((int*)user_data);
+               const int real_label = *((int *) user_data);
                const int rec_label = (NULL != face_label ? *face_label : -1);
                if (real_label == -1) {
                        confidence >= THRESHOLD ? ++FP : ++TN;
@@ -802,8 +690,7 @@ int perform_model_evaluation(mv_face_recognition_model_h model)
        mv_face_recognition_model_query_labels(model, &learned_labels, &learned_labels_n);
 
        int i = 0;
-       printf(TEXT_YELLOW "Evaluating model had been learned for the following labels: "
-                       TEXT_RESET "\n" TEXT_GREEN);
+       printf(TEXT_YELLOW "Evaluating model had been learned for the following labels: " TEXT_RESET "\n" TEXT_GREEN);
        for (i = 0; i < learned_labels_n; ++i)
                printf("%i, ", learned_labels[i]);
 
@@ -811,7 +698,7 @@ int perform_model_evaluation(mv_face_recognition_model_h model)
 
        /* 100 directories are allowed: */
        const int max_dir_allowed = 100;
-       char (*directories)[1024] = malloc(sizeof *directories * max_dir_allowed);
+       char(*directories)[1024] = malloc(sizeof *directories * max_dir_allowed);
        int labels[max_dir_allowed];
        int unique_checks[MAX_ALLOWED_LABEL + 1];
        for (i = 0; i < MAX_ALLOWED_LABEL + 1; ++i)
@@ -819,16 +706,14 @@ int perform_model_evaluation(mv_face_recognition_model_h model)
 
        int dir_n = 0;
        int label_count = 0;
-       while (show_confirm_dialog("Add test images directory?") &&
-                       dir_n < max_dir_allowed) {
+       while (show_confirm_dialog("Add test images directory?") && dir_n < max_dir_allowed) {
                char *in_file_name = NULL;
                while (-1 == input_string("Specify path to the test images directory:", 1024, &(in_file_name)))
                        printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
 
                DIR *dir;
                if ((dir = opendir(in_file_name)) == NULL) {
-                       printf(TEXT_RED "Incorrect input! Directory %s can't be read.\n"
-                                       TEXT_RESET, in_file_name);
+                       printf(TEXT_RED "Incorrect input! Directory %s can't be read.\n" TEXT_RESET, in_file_name);
                        free(in_file_name);
                        in_file_name = NULL;
                        continue;
@@ -837,14 +722,9 @@ int perform_model_evaluation(mv_face_recognition_model_h model)
                }
 
                int face_label = 0;
-               if (-1 == input_int("Specify label as integer:",
-                                                               MIN_ALLOWED_LABEL,
-                                                               MAX_ALLOWED_LABEL,
-                                                               &face_label)) {
-                       printf(TEXT_RED "Incorrect input! You can use %i-%i labels only.\n"
-                                       TEXT_RESET,
-                                       MIN_ALLOWED_LABEL,
-                                       MAX_ALLOWED_LABEL);
+               if (-1 == input_int("Specify label as integer:", MIN_ALLOWED_LABEL, MAX_ALLOWED_LABEL, &face_label)) {
+                       printf(TEXT_RED "Incorrect input! You can use %i-%i labels only.\n" TEXT_RESET, MIN_ALLOWED_LABEL,
+                                  MAX_ALLOWED_LABEL);
                        free(in_file_name);
                        in_file_name = NULL;
                        continue;
@@ -860,7 +740,7 @@ int perform_model_evaluation(mv_face_recognition_model_h model)
 
                if (!known_label) {
                        printf(TEXT_YELLOW "Recognition model didn't learn with specified label.\n"
-                                                               "Images will be marked as unknown (-1)\n" TEXT_RESET);
+                                                          "Images will be marked as unknown (-1)\n" TEXT_RESET);
                }
 
                labels[dir_n] = known_label ? face_label : -1;
@@ -898,9 +778,7 @@ int perform_model_evaluation(mv_face_recognition_model_h model)
        int err = mv_create_source(&source);
        if (MEDIA_VISION_ERROR_NONE != err) {
                free(directories);
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during creating the source!!! code: %i"
-                               TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during creating the source!!! code: %i" TEXT_RESET "\n", err);
                return err;
        }
 
@@ -923,38 +801,35 @@ int perform_model_evaluation(mv_face_recognition_model_h model)
                                        printf(TEXT_RED "Failed to test on example from %s. "
                                                                        "Example will not affect the evaluation. "
                                                                        "Error code: %i\n" TEXT_RESET,
-                                                                       file_path, err);
+                                                  file_path, err);
                                } else {
                                        err = mv_face_recognize(source, model, NULL, NULL, evaluation_cb, &(labels[i]));
-                               if (MEDIA_VISION_ERROR_NONE != err) {
-                                       printf(TEXT_RED "Failed to recognize on example from %s. "
+                                       if (MEDIA_VISION_ERROR_NONE != err) {
+                                               printf(TEXT_RED "Failed to recognize on example from %s. "
                                                                                "Example will not affect the evaluation. "
                                                                                "Error code: %i\n" TEXT_RESET,
-                                                                               file_path, err);
+                                                          file_path, err);
                                        }
                                }
                        }
 
                        closedir(dir);
                } else {
-                       printf(TEXT_RED "Can't read from directory [%s]\n"
-                                       TEXT_RESET, directories[i]);
+                       printf(TEXT_RED "Can't read from directory [%s]\n" TEXT_RESET, directories[i]);
                }
        }
 
        int err2 = mv_destroy_source(source);
        if (MEDIA_VISION_ERROR_NONE != err2) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during destroying the source!!! code: %i"
-                               TEXT_RESET "\n", err2);
+               printf(TEXT_RED "ERROR: Errors were occurred during destroying the source!!! code: %i" TEXT_RESET "\n", err2);
        }
 
        double accuracy = (TP + TN) / (double) (TP + FP + TN + FN);
 
-       double prec_denom = (double)(TP + FP);
+       double prec_denom = (double) (TP + FP);
        double precision = (prec_denom < 1.0) ? 0.0 : TP / prec_denom;
 
-       double recall_denom = (double)(TP + FN);
+       double recall_denom = (double) (TP + FN);
        double recall = (recall_denom < 1.0) ? 0.0 : TP / recall_denom;
 
        double f1 = 2 * precision * recall / (precision + recall);
@@ -976,13 +851,10 @@ int perform_model_evaluation(mv_face_recognition_model_h model)
 
 int perform_recognize()
 {
-       printf("\n" TEXT_YELLOW
-                       "Recognition model isn't now created.\n"
-                       "You may create it to perform positive \n"
-                       "testing, or don't create to check the \n"
-                       "functionality behaviour for uncreated model."
-                       TEXT_RESET
-                       "\n");
+       printf("\n" TEXT_YELLOW "Recognition model isn't now created.\n"
+                  "You may create it to perform positive \n"
+                  "testing, or don't create to check the \n"
+                  "functionality behaviour for uncreated model." TEXT_RESET "\n");
 
        int err = MEDIA_VISION_ERROR_NONE;
        mv_face_recognition_model_h recognition_model = NULL;
@@ -993,27 +865,19 @@ int perform_recognize()
                err = mv_face_recognition_model_create(&recognition_model);
                if (MEDIA_VISION_ERROR_NONE != err) {
                        printf(TEXT_RED "Creating the model failed. Error code: %i. "
-                                       "But you still can test with uncreated model.\n"
-                                       TEXT_RESET "\n", err);
+                                                       "But you still can test with uncreated model.\n" TEXT_RESET "\n",
+                                  err);
                } else {
-                       printf(TEXT_YELLOW "Recognition model has been created."
-                                       TEXT_RESET "\n");
+                       printf(TEXT_YELLOW "Recognition model has been created." TEXT_RESET "\n");
                }
        }
 
        int sel_opt = 0;
        const int options[11] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
-       const char *names[11] = { "Add image example",
-                                                               "Reset examples by id",
-                                                               "Reset all examples",
-                                                               "Clone the model",
-                                                               "Learn the model",
-                                                               "Show learned labels",
-                                                               "Save model to file",
-                                                               "Load model from file",
-                                                               "Recognize with model",
-                                                               "Evaluate the model",
-                                                               "Destroy model and exit" };
+       const char *names[11] = { "Add image example",  "Reset examples by id",  "Reset all examples",
+                                                         "Clone the model",    "Learn the model",               "Show learned labels",
+                                                         "Save model to file", "Load model from file",  "Recognize with model",
+                                                         "Evaluate the model", "Destroy model and exit" };
 
        while (!sel_opt) {
                sel_opt = show_menu("Select action:", options, names, 11);
@@ -1054,9 +918,7 @@ int perform_recognize()
                        if (do_create) {
                                err = mv_face_recognition_model_destroy(recognition_model);
                                if (MEDIA_VISION_ERROR_NONE != err) {
-                                       printf(TEXT_RED
-                                                               "Error with code %i was occurred during destroy"
-                                                               TEXT_RESET "\n", err);
+                                       printf(TEXT_RED "Error with code %i was occurred during destroy" TEXT_RESET "\n", err);
                                }
                        } else {
                                err = MEDIA_VISION_ERROR_NONE;
@@ -1081,8 +943,7 @@ int perform_mv_face_tracking_model_save(mv_face_tracking_model_h model)
 {
        char *out_file_name = NULL;
 
-       while (input_string("Input file name to save the model:",
-                                               1024, &(out_file_name)) == -1) {
+       while (input_string("Input file name to save the model:", 1024, &(out_file_name)) == -1) {
                printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
        }
 
@@ -1097,8 +958,7 @@ int perform_mv_face_tracking_model_load(mv_face_tracking_model_h *model)
 {
        char *in_file_name = NULL;
 
-       while (input_string("Input file name to load model from:",
-                                               1024, &(in_file_name)) == -1) {
+       while (input_string("Input file name to load model from:", 1024, &(in_file_name)) == -1) {
                printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
        }
 
@@ -1109,51 +969,46 @@ int perform_mv_face_tracking_model_load(mv_face_tracking_model_h *model)
        return err;
 }
 
-int perform_mv_face_tracking_model_clone(
-               mv_face_tracking_model_h model_to_clone)
+int perform_mv_face_tracking_model_clone(mv_face_tracking_model_h model_to_clone)
 {
        int err = MEDIA_VISION_ERROR_NONE;
 
        mv_face_tracking_model_h cloned_model = NULL;
 
-       printf(TEXT_GREEN "Perform clone of the tracking model..."
-                       TEXT_RESET "\n");
+       printf(TEXT_GREEN "Perform clone of the tracking model..." TEXT_RESET "\n");
 
        err = mv_face_tracking_model_clone(model_to_clone, &cloned_model);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED "Errors were occurred during model clone. Error code %i"
-                               TEXT_RESET "\n", err);
+               printf(TEXT_RED "Errors were occurred during model clone. Error code %i" TEXT_RESET "\n", err);
                return err;
        }
 
        printf(TEXT_YELLOW "Model cloning is done." TEXT_RESET "\n");
 
-       if (show_confirm_dialog("Save " TEXT_YELLOW "source model" TEXT_RESET
-                                                       " to file?")) {
+       if (show_confirm_dialog("Save " TEXT_YELLOW "source model" TEXT_RESET " to file?")) {
                const int serr = perform_mv_face_tracking_model_save(model_to_clone);
                if (MEDIA_VISION_ERROR_NONE != serr) {
-                       printf(TEXT_RED
-                                       "Errors were occurred when trying to save "
-                                       "source model to file. Error code %i" TEXT_RESET "\n", serr);
+                       printf(TEXT_RED "Errors were occurred when trying to save "
+                                                       "source model to file. Error code %i" TEXT_RESET "\n",
+                                  serr);
                }
        }
 
-       if (show_confirm_dialog("Save " TEXT_YELLOW "destination model" TEXT_RESET
-                                                       " to file?")) {
+       if (show_confirm_dialog("Save " TEXT_YELLOW "destination model" TEXT_RESET " to file?")) {
                const int serr = perform_mv_face_tracking_model_save(cloned_model);
                if (MEDIA_VISION_ERROR_NONE != serr) {
-                       printf(TEXT_RED
-                                       "Errors were occurred when trying to save destination model "
-                                       "to file. Error code %i" TEXT_RESET "\n", serr);
+                       printf(TEXT_RED "Errors were occurred when trying to save destination model "
+                                                       "to file. Error code %i" TEXT_RESET "\n",
+                                  serr);
                }
        }
 
        if (cloned_model) {
                const int dest_err = mv_face_tracking_model_destroy(cloned_model);
                if (MEDIA_VISION_ERROR_NONE != dest_err) {
-                       printf(TEXT_RED
-                                       "Errors were occurred when destroying destination model ."
-                                       "Error code %i" TEXT_RESET "\n", dest_err);
+                       printf(TEXT_RED "Errors were occurred when destroying destination model ."
+                                                       "Error code %i" TEXT_RESET "\n",
+                                  dest_err);
                }
        }
 
@@ -1162,68 +1017,51 @@ int perform_mv_face_tracking_model_clone(
 
 static volatile bool frame_read = false;
 
-void video_1_sample_cb(
-               char *buffer,
-               unsigned int buffer_size,
-               image_data_s image_data,
-               void *user_data)
+void video_1_sample_cb(char *buffer, unsigned int buffer_size, image_data_s image_data, void *user_data)
 {
        if (!frame_read) {
-               mv_source_h source = (mv_source_h)user_data;
+               mv_source_h source = (mv_source_h) user_data;
 
-               const int err = mv_source_fill_by_buffer(
-                                                       source,
-                                                       (unsigned char*)buffer,
-                                                       buffer_size,
-                                                       image_data.image_width,
-                                                       image_data.image_height,
-                                                       image_data.image_colorspace);
+               const int err = mv_source_fill_by_buffer(source, (unsigned char *) buffer, buffer_size, image_data.image_width,
+                                                                                                image_data.image_height, image_data.image_colorspace);
 
                if (MEDIA_VISION_ERROR_NONE != err) {
                        printf(TEXT_RED "ERROR: Errors were occurred during filling the "
-                                       "source based on the video frame! Error code: %i"
-                                       TEXT_RESET, err);
+                                                       "source based on the video frame! Error code: %i" TEXT_RESET,
+                                  err);
                }
 
                frame_read = true;
        }
 }
 
-void face_detected_for_tracking_cb(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               mv_rectangle_s *faces_locations,
-               int number_of_faces,
-               void *user_data)
+void face_detected_for_tracking_cb(mv_source_h source, mv_engine_config_h engine_cfg, mv_rectangle_s *faces_locations,
+                                                                  int number_of_faces, void *user_data)
 {
        if (number_of_faces < 1) {
                printf(TEXT_RED "Unfortunately, no faces were detected on the\n"
                                                "preparation frame. You has to specify bounding\n"
-                                               "quadrangles for tracking without advices."
-                               TEXT_RESET "\n");
+                                               "quadrangles for tracking without advices." TEXT_RESET "\n");
                return;
        }
 
        printf(TEXT_YELLOW "%i face(s) were detected at the preparation frame.\n"
-                                               "Following list includes information on faces bounding\n"
-                                               "boxes coordinates:"
-                       TEXT_RESET "\n", number_of_faces);
+                                          "Following list includes information on faces bounding\n"
+                                          "boxes coordinates:" TEXT_RESET "\n",
+                  number_of_faces);
 
        int idx = 0;
        while (idx < number_of_faces) {
                printf(TEXT_MAGENTA "Face %i bounding box: " TEXT_RESET "\n", ++idx);
-               printf(TEXT_CYAN "\tTop left point:     x1: %4i;  y1: %4i\n" TEXT_RESET,
-                               faces_locations[idx - 1].point.x,
-                               faces_locations[idx - 1].point.y);
+               printf(TEXT_CYAN "\tTop left point:     x1: %4i;  y1: %4i\n" TEXT_RESET, faces_locations[idx - 1].point.x,
+                          faces_locations[idx - 1].point.y);
                printf(TEXT_CYAN "\tTop right point:    x2: %4i;  y2: %4i\n" TEXT_RESET,
-                               faces_locations[idx - 1].point.x + faces_locations[idx - 1].width,
-                               faces_locations[idx - 1].point.y);
+                          faces_locations[idx - 1].point.x + faces_locations[idx - 1].width, faces_locations[idx - 1].point.y);
                printf(TEXT_CYAN "\tBottom right point: x3: %4i;  y3: %4i\n" TEXT_RESET,
-                               faces_locations[idx - 1].point.x + faces_locations[idx - 1].width,
-                               faces_locations[idx - 1].point.y + faces_locations[idx - 1].height);
-               printf(TEXT_CYAN "\tBottom right point: x4: %4i;  y4: %4i\n" TEXT_RESET,
-                               faces_locations[idx - 1].point.x,
-                               faces_locations[idx - 1].point.y + faces_locations[idx - 1].height);
+                          faces_locations[idx - 1].point.x + faces_locations[idx - 1].width,
+                          faces_locations[idx - 1].point.y + faces_locations[idx - 1].height);
+               printf(TEXT_CYAN "\tBottom right point: x4: %4i;  y4: %4i\n" TEXT_RESET, faces_locations[idx - 1].point.x,
+                          faces_locations[idx - 1].point.y + faces_locations[idx - 1].height);
        }
 }
 
@@ -1233,22 +1071,22 @@ int load_source_from_first_video_frame(const char *video_file, mv_source_h sourc
        int err = mv_create_video_reader(&reader);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "ERROR: Errors were occurred during creating the video "
-                               "reader! Error code: %i\n" TEXT_RESET, err);
+                                               "reader! Error code: %i\n" TEXT_RESET,
+                          err);
                return err;
        }
 
-       err = mv_video_reader_set_new_sample_cb(
-                                       reader,
-                                       video_1_sample_cb,
-                                       source);
+       err = mv_video_reader_set_new_sample_cb(reader, video_1_sample_cb, source);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "ERROR: Errors were occurred during new sample "
-                               "callback set! Error code: %i\n" TEXT_RESET, err);
+                                               "callback set! Error code: %i\n" TEXT_RESET,
+                          err);
 
                const int err2 = mv_destroy_video_reader(reader);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf(TEXT_RED "ERROR: Errors were occurred during video reader "
-                                       "destroy! Error code: %i\n" TEXT_RESET, err);
+                                                       "destroy! Error code: %i\n" TEXT_RESET,
+                                  err);
                }
 
                return err;
@@ -1260,12 +1098,14 @@ int load_source_from_first_video_frame(const char *video_file, mv_source_h sourc
        err = mv_video_reader_load(reader, video_file, &video_info, &fps);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "ERROR: Errors were occurred during loading the video "
-                               "by reader! Error code: %i\n" TEXT_RESET, err);
+                                               "by reader! Error code: %i\n" TEXT_RESET,
+                          err);
 
                const int err2 = mv_destroy_video_reader(reader);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf(TEXT_RED "ERROR: Errors were occurred during video reader "
-                                       "destroy! Error code: %i\n" TEXT_RESET, err);
+                                                       "destroy! Error code: %i\n" TEXT_RESET,
+                                  err);
                }
 
                return err;
@@ -1277,13 +1117,15 @@ int load_source_from_first_video_frame(const char *video_file, mv_source_h sourc
                        int err2 = mv_video_reader_stop(reader);
                        if (MEDIA_VISION_ERROR_NONE != err2) {
                                printf(TEXT_RED "ERROR: Errors were occurred during attempt to "
-                                               "stop video reader! Error code: %i\n" TEXT_RESET, err2);
+                                                               "stop video reader! Error code: %i\n" TEXT_RESET,
+                                          err2);
                        }
 
                        err2 = mv_destroy_video_reader(reader);
                        if (MEDIA_VISION_ERROR_NONE != err2) {
                                printf(TEXT_RED "ERROR: Errors were occurred during video "
-                                               "reader destroy! Error code: %i\n" TEXT_RESET, err2);
+                                                               "reader destroy! Error code: %i\n" TEXT_RESET,
+                                          err2);
                        }
 
                        break;
@@ -1296,26 +1138,24 @@ int load_source_from_first_video_frame(const char *video_file, mv_source_h sourc
 int perform_mv_face_tracking_model_prepare(mv_face_tracking_model_h model)
 {
        printf(TEXT_YELLOW "Before any tracking session the tracking model\n"
-                                               "preparation is required. Exception is the case when\n"
-                                               "the next tracking session will be performed with the\n"
-                                               "video which is the direct continuation of the video\n"
-                                               "has been used at the previous tracking session.\n"
-                                               "Preparation has to be done with the first frame of\n"
-                                               "the video or first image from continuous image\n"
-                                               "sequence for which next tracking session plan to be\n"
-                                               "performed.\nTracking model preparation includes\n"
-                                               "specifying the location of the face to be tracked on\n"
-                                               "the first frame. Face tracking algorithm will try to\n"
-                                               "grab the face image significant features and\n"
-                                               "optionally will try to determine the background.\n"
-                                               "Actually, preparation is model-dependent and may\n"
-                                               "differs in respect to used tracking algorithm."
-                       TEXT_RESET "\n");
+                                          "preparation is required. Exception is the case when\n"
+                                          "the next tracking session will be performed with the\n"
+                                          "video which is the direct continuation of the video\n"
+                                          "has been used at the previous tracking session.\n"
+                                          "Preparation has to be done with the first frame of\n"
+                                          "the video or first image from continuous image\n"
+                                          "sequence for which next tracking session plan to be\n"
+                                          "performed.\nTracking model preparation includes\n"
+                                          "specifying the location of the face to be tracked on\n"
+                                          "the first frame. Face tracking algorithm will try to\n"
+                                          "grab the face image significant features and\n"
+                                          "optionally will try to determine the background.\n"
+                                          "Actually, preparation is model-dependent and may\n"
+                                          "differs in respect to used tracking algorithm." TEXT_RESET "\n");
 
        int sel_opt = 0;
        const int options[2] = { 1, 2 };
-       const char *names[2] = { "Prepare with the video file",
-                                                               "Prepare with the image file" };
+       const char *names[2] = { "Prepare with the video file", "Prepare with the image file" };
        bool is_video = false;
 
        while (!sel_opt) {
@@ -1337,17 +1177,14 @@ int perform_mv_face_tracking_model_prepare(mv_face_tracking_model_h model)
        int err = mv_create_source(&preparation_frame);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during creating the source!!! code: %i"
-                               TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during creating the source!!! code: %i" TEXT_RESET "\n", err);
 
                return err;
        }
 
        char *init_frame_file_name = NULL;
        const char *prompt_str =
-                       (is_video ? "Input video file name to prepare the model:"
-                                               : "Input image file name to prepare the model:");
+                       (is_video ? "Input video file name to prepare the model:" : "Input image file name to prepare the model:");
 
        while (input_string(prompt_str, 1024, &(init_frame_file_name)) == -1)
                printf(TEXT_RED "Incorrect input! Try again.\n" TEXT_RESET);
@@ -1362,12 +1199,14 @@ int perform_mv_face_tracking_model_prepare(mv_face_tracking_model_h model)
 
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "ERROR: Errors were occurred during preparation "
-                               "frame/image load! Error code: %i\n" TEXT_RESET, err);
+                                               "frame/image load! Error code: %i\n" TEXT_RESET,
+                          err);
 
                int err2 = mv_destroy_source(preparation_frame);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf(TEXT_RED "ERROR: Errors were occurred during destroying the "
-                                       "source! Error code: %i\n" TEXT_RESET, err2);
+                                                       "source! Error code: %i\n" TEXT_RESET,
+                                  err2);
                }
 
                return err;
@@ -1377,35 +1216,33 @@ int perform_mv_face_tracking_model_prepare(mv_face_tracking_model_h model)
        err = mv_create_engine_config(&eng_config);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "ERROR: Errors were occurred during creating the "
-                               "engine config! Error code: %i\n" TEXT_RESET, err);
+                                               "engine config! Error code: %i\n" TEXT_RESET,
+                          err);
        } else {
-               err = mv_engine_config_set_string_attribute(
-                                       eng_config,
-                                       MV_FACE_DETECTION_MODEL_FILE_PATH,
-                                       "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml");
+               err = mv_engine_config_set_string_attribute(eng_config, MV_FACE_DETECTION_MODEL_FILE_PATH,
+                                                                                                       "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml");
 
                if (MEDIA_VISION_ERROR_NONE != err) {
                        printf(TEXT_RED "ERROR: Errors were occurred during setting of the "
-                                       "the 'MV_FACE_DETECTION_MODEL_FILE_PATH' attribute "
-                                       "for engine configuration! Check media-vision-config.json "
-                                       "file existence. Error code: %i" TEXT_RESET, err);
+                                                       "the 'MV_FACE_DETECTION_MODEL_FILE_PATH' attribute "
+                                                       "for engine configuration! Check media-vision-config.json "
+                                                       "file existence. Error code: %i" TEXT_RESET,
+                                  err);
                }
        }
 
-       err = mv_face_detect(
-                               preparation_frame,
-                               eng_config,
-                               face_detected_for_tracking_cb,
-                               NULL);
+       err = mv_face_detect(preparation_frame, eng_config, face_detected_for_tracking_cb, NULL);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "ERROR: Errors were occurred during face detection! "
-                               "Error code: %i\n" TEXT_RESET, err);
+                                               "Error code: %i\n" TEXT_RESET,
+                          err);
 
                int err2 = mv_destroy_engine_config(eng_config);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf(TEXT_RED "ERROR: Errors were occurred during destroying the "
-                                       "engine configuration! Error code: %i\n" TEXT_RESET, err2);
+                                                       "engine configuration! Error code: %i\n" TEXT_RESET,
+                                  err2);
                }
 
                return err;
@@ -1415,54 +1252,50 @@ int perform_mv_face_tracking_model_prepare(mv_face_tracking_model_h model)
 
        if (show_confirm_dialog("Do specify the face location?")) {
                printf(TEXT_YELLOW "Specify the coordinates of the quadrangle to be used\n"
-                                                       "for tracking model preparation:" TEXT_RESET "\n");
+                                                  "for tracking model preparation:" TEXT_RESET "\n");
                int idx = 0;
                char str_prompt[100];
                while (idx < 4) {
                        ++idx;
-                       snprintf(str_prompt, 100, "Specify point %i x coordinate: x%i = ",
-                                       idx - 1, idx);
-                       while (-1 == input_int(str_prompt, INT_MIN, INT_MAX,
-                                                                       &(roi.points[idx - 1].x)))
+                       snprintf(str_prompt, 100, "Specify point %i x coordinate: x%i = ", idx - 1, idx);
+                       while (-1 == input_int(str_prompt, INT_MIN, INT_MAX, &(roi.points[idx - 1].x)))
                                printf("Incorrect input! Try again.\n");
 
-                       snprintf(str_prompt, 100, "Specify point %i y coordinate: y%i = ",
-                                                               idx - 1, idx);
-                       while (-1 == input_int(str_prompt, INT_MIN, INT_MAX,
-                                                                       &(roi.points[idx - 1].y)))
+                       snprintf(str_prompt, 100, "Specify point %i y coordinate: y%i = ", idx - 1, idx);
+                       while (-1 == input_int(str_prompt, INT_MIN, INT_MAX, &(roi.points[idx - 1].y)))
                                printf("Incorrect input! Try again.\n");
                }
 
-               err = mv_face_tracking_model_prepare(
-                                       model, eng_config, preparation_frame, &roi);
+               err = mv_face_tracking_model_prepare(model, eng_config, preparation_frame, &roi);
        } else {
-               err = mv_face_tracking_model_prepare(
-                                       model, eng_config, preparation_frame, NULL);
+               err = mv_face_tracking_model_prepare(model, eng_config, preparation_frame, NULL);
        }
 
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "ERROR: Errors were occurred during the tracking model "
-                               "preparation! Error code: %i\n" TEXT_RESET, err);
+                                               "preparation! Error code: %i\n" TEXT_RESET,
+                          err);
        }
 
        const int err2 = mv_destroy_source(preparation_frame);
        if (MEDIA_VISION_ERROR_NONE != err2) {
                printf(TEXT_RED "ERROR: Errors were occurred during destroying the "
-                               "source! Error code: %i\n" TEXT_RESET, err2);
+                                               "source! Error code: %i\n" TEXT_RESET,
+                          err2);
        }
 
        int err3 = mv_destroy_engine_config(eng_config);
        if (MEDIA_VISION_ERROR_NONE != err3) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during destroying the engine config!!! code: %i"
-                               TEXT_RESET "\n", err3);
+               printf(TEXT_RED "ERROR: Errors were occurred during destroying the engine config!!! code: %i" TEXT_RESET "\n",
+                          err3);
                return err3;
        }
        eng_config = NULL;
        return err;
 }
 
-typedef struct {
+typedef struct
+{
        mv_face_tracking_model_h target;
        mv_video_writer_h writer;
        int frame_number;
@@ -1470,13 +1303,8 @@ typedef struct {
        char out_file_path[FILE_PATH_SIZE];
 } tracking_cb_data;
 
-void track_cb(
-               mv_source_h source,
-               mv_face_tracking_model_h tracking_model,
-               mv_engine_config_h engine_cfg,
-               mv_quadrangle_s *location,
-               double confidence,
-               void *user_data)
+void track_cb(mv_source_h source, mv_face_tracking_model_h tracking_model, mv_engine_config_h engine_cfg,
+                         mv_quadrangle_s *location, double confidence, void *user_data)
 {
        int err = MEDIA_VISION_ERROR_NONE;
 
@@ -1486,14 +1314,10 @@ void track_cb(
        unsigned char *out_buffer = NULL;
        unsigned int buf_size = 0;
        image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
-       if (MEDIA_VISION_ERROR_NONE !=
-                       mv_source_get_buffer(source, &out_buffer, &buf_size) ||
-                       MEDIA_VISION_ERROR_NONE !=
-                       mv_source_get_width(source, &(image_data.image_width)) ||
-                       MEDIA_VISION_ERROR_NONE !=
-                       mv_source_get_height(source, &(image_data.image_height)) ||
-                       MEDIA_VISION_ERROR_NONE !=
-                       mv_source_get_colorspace(source, &(image_data.image_colorspace))) {
+       if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &out_buffer, &buf_size) ||
+               MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
+               MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
+               MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace))) {
                printf("ERROR: Creating out image is impossible.\n");
 
                return;
@@ -1501,51 +1325,36 @@ void track_cb(
 
        if (NULL != location) {
                if (!track_catch_face) {
-                       printf(TEXT_GREEN "Frame %i : Tracked object is appeared" TEXT_RESET "\n",
-                                       cb_data->frame_number);
+                       printf(TEXT_GREEN "Frame %i : Tracked object is appeared" TEXT_RESET "\n", cb_data->frame_number);
                        track_catch_face = true;
                } else {
-                       printf(TEXT_YELLOW "Frame %i : Tracked object is tracked" TEXT_RESET "\n",
-                                       cb_data->frame_number);
+                       printf(TEXT_YELLOW "Frame %i : Tracked object is tracked" TEXT_RESET "\n", cb_data->frame_number);
                }
 
                const int rectangle_thickness = 3;
-               const int drawing_color[] = {255, 0, 0};
-
-               printf(TEXT_YELLOW
-                               "Location: (%i,%i) -> (%i,%i) -> (%i,%i) -> (%i,%i)\n"
-                               TEXT_RESET,
-                               location->points[0].x,
-                               location->points[0].y,
-                               location->points[1].x,
-                               location->points[1].y,
-                               location->points[2].x,
-                               location->points[2].y,
-                               location->points[3].x,
-                               location->points[3].y);
+               const int drawing_color[] = { 255, 0, 0 };
+
+               printf(TEXT_YELLOW "Location: (%i,%i) -> (%i,%i) -> (%i,%i) -> (%i,%i)\n" TEXT_RESET, location->points[0].x,
+                          location->points[0].y, location->points[1].x, location->points[1].y, location->points[2].x,
+                          location->points[2].y, location->points[3].x, location->points[3].y);
                printf(TEXT_YELLOW "Track confidence: %f" TEXT_RESET "\n", confidence);
 
-               const int err = draw_quadrangle_on_buffer(
-                                                               *location,
-                                                               rectangle_thickness,
-                                                               drawing_color,
-                                                               &image_data,
-                                                               out_buffer);
+               const int err =
+                               draw_quadrangle_on_buffer(*location, rectangle_thickness, drawing_color, &image_data, out_buffer);
 
                if (MEDIA_VISION_ERROR_NONE != err) {
                        printf(TEXT_RED "ERROR: Quadrangle wasn't drew on frame buffer! "
-                                       "Error code: %i\n" TEXT_RESET, err);
+                                                       "Error code: %i\n" TEXT_RESET,
+                                  err);
 
                        return;
                }
        } else {
                if (track_catch_face) {
-                       printf(TEXT_RED "Frame %i : Tracked object is lost" TEXT_RESET "\n",
-                                       cb_data->frame_number);
+                       printf(TEXT_RED "Frame %i : Tracked object is lost" TEXT_RESET "\n", cb_data->frame_number);
                        track_catch_face = false;
                } else {
-                       printf(TEXT_YELLOW "Frame %i : Tracked object isn't detected" TEXT_RESET "\n",
-                                       cb_data->frame_number);
+                       printf(TEXT_YELLOW "Frame %i : Tracked object isn't detected" TEXT_RESET "\n", cb_data->frame_number);
                }
        }
 
@@ -1553,85 +1362,65 @@ void track_cb(
                err = mv_video_writer_write_frame(cb_data->writer, out_buffer);
                if (MEDIA_VISION_ERROR_NONE != err) {
                        printf(TEXT_RED "ERROR: Errors were occurred during writing frame #%i"
-                                       "to the result video file; code %i" TEXT_RESET "\n",
-                                       cb_data->frame_number,
-                                       err);
+                                                       "to the result video file; code %i" TEXT_RESET "\n",
+                                  cb_data->frame_number, err);
                        return;
                }
        } else {
                char out_file_name[FILE_PATH_SIZE];
-               if(snprintf(out_file_name, FILE_PATH_SIZE, "%s_%03d.jpg",
-                                       cb_data->out_file_path,
-                                       cb_data->frame_number) < 0)
-                                       {
-                                               printf("Output file name truncated.");
-                                       }
+               if (snprintf(out_file_name, FILE_PATH_SIZE, "%s_%03d.jpg", cb_data->out_file_path, cb_data->frame_number) < 0) {
+                       printf("Output file name truncated.");
+               }
 
                err = save_image_from_buffer(out_file_name, out_buffer, &image_data, 100);
                if (MEDIA_VISION_ERROR_NONE != err) {
                        printf(TEXT_RED "ERROR: Errors were occurred during writing frame #%i"
-                                       "to the result image file; code %i" TEXT_RESET "\n",
-                                       cb_data->frame_number,
-                                       err);
+                                                       "to the result image file; code %i" TEXT_RESET "\n",
+                                  cb_data->frame_number, err);
                        return;
                }
        }
 }
 
-void track_on_sample_cb(
-               char *buffer,
-               unsigned int buffer_size,
-               image_data_s image_data,
-               void *user_data)
+void track_on_sample_cb(char *buffer, unsigned int buffer_size, image_data_s image_data, void *user_data)
 {
        mv_source_h source = NULL;
        int err = mv_create_source(&source);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "ERROR: Errors were occurred during creating the source "
-                               "based on the video frame! Error code: %i\n" TEXT_RESET, err);
+                                               "based on the video frame! Error code: %i\n" TEXT_RESET,
+                          err);
 
                return;
        }
 
-       err = mv_source_fill_by_buffer(
-                                               source,
-                                               (unsigned char*)buffer,
-                                               buffer_size,
-                                               image_data.image_width,
-                                               image_data.image_height,
-                                               image_data.image_colorspace);
+       err = mv_source_fill_by_buffer(source, (unsigned char *) buffer, buffer_size, image_data.image_width,
+                                                                  image_data.image_height, image_data.image_colorspace);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "ERROR: Errors were occurred during filling the source "
-                               "based on the video frame! Error code: %i\n" TEXT_RESET , err);
+                                               "based on the video frame! Error code: %i\n" TEXT_RESET,
+                          err);
 
                const int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
-                       printf(TEXT_RED
-                                       "ERROR: Errors were occurred during destroying the source!!! code: %i"
-                                       TEXT_RESET "\n", err2);
+                       printf(TEXT_RED "ERROR: Errors were occurred during destroying the source!!! code: %i" TEXT_RESET "\n",
+                                  err2);
                }
 
                return;
        }
 
-       tracking_cb_data *cb_data = (tracking_cb_data*)user_data;
+       tracking_cb_data *cb_data = (tracking_cb_data *) user_data;
        ++(cb_data->frame_number);
 
-       err = mv_face_track(
-                       source,
-                       cb_data->target,
-                       NULL,
-                       track_cb,
-                       false,
-                       cb_data);
+       err = mv_face_track(source, cb_data->target, NULL, track_cb, false, cb_data);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED "ERROR: Errors were occurred during tracking the face "
-                               TEXT_RESET "on the video frame #%i! Error code: %i\n",
-                               cb_data->frame_number,
-                               err);
+               printf(TEXT_RED "ERROR: Errors were occurred during tracking the face " TEXT_RESET
+                                               "on the video frame #%i! Error code: %i\n",
+                          cb_data->frame_number, err);
        }
 
        return;
@@ -1642,25 +1431,23 @@ void eos_cb(void *user_data)
 {
        printf("Video was fully processed\n");
        if (NULL == user_data) {
-               printf(TEXT_RED
-                               "ERROR: eos callback can't stop tracking process."TEXT_RESET);
+               printf(TEXT_RED "ERROR: eos callback can't stop tracking process." TEXT_RESET);
                return;
        }
 
-       pthread_mutex_unlock((pthread_mutex_t*)user_data);
+       pthread_mutex_unlock((pthread_mutex_t *) user_data);
 }
 
-static inline void release_resources_for_process_video_file_function(
-       mv_video_reader_h *reader,
-       mv_video_writer_h *writer)
+static inline void release_resources_for_process_video_file_function(mv_video_reader_h *reader,
+                                                                                                                                        mv_video_writer_h *writer)
 {
        int err = MEDIA_VISION_ERROR_NONE;
        if (*reader) {
                err = mv_destroy_video_reader(*reader);
                if (MEDIA_VISION_ERROR_NONE != err) {
                        printf(TEXT_RED "ERROR: Errors were occurred during video reader"
-                                       "destroying; code %i" TEXT_RESET "\n",
-                                       err);
+                                                       "destroying; code %i" TEXT_RESET "\n",
+                                  err);
                }
                *reader = NULL;
        }
@@ -1668,26 +1455,24 @@ static inline void release_resources_for_process_video_file_function(
                err = mv_destroy_video_writer(*writer);
                if (MEDIA_VISION_ERROR_NONE != err) {
                        printf(TEXT_RED "ERROR: Errors were occurred during video writer"
-                                       "destroying; code %i" TEXT_RESET "\n",
-                                       err);
+                                                       "destroying; code %i" TEXT_RESET "\n",
+                                  err);
                }
                *writer = NULL;
        }
 }
 
-int process_video_file(
-       mv_face_tracking_model_h tracking_model,
-       const char *track_target_file_name,
-       const char *track_result_file_name)
+int process_video_file(mv_face_tracking_model_h tracking_model, const char *track_target_file_name,
+                                          const char *track_result_file_name)
 {
        mv_video_reader_h reader = NULL;
        mv_video_writer_h writer = NULL;
 
        int err = mv_create_video_reader(&reader);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during creating the video "
-                               "reader! Error code: %i" TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during creating the video "
+                                               "reader! Error code: %i" TEXT_RESET "\n",
+                          err);
                release_resources_for_process_video_file_function(&reader, &writer);
 
                return err;
@@ -1695,9 +1480,9 @@ int process_video_file(
 
        err = mv_create_video_writer(&writer);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during creating the video "
-                               "writer! Error code: %i" TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during creating the video "
+                                               "writer! Error code: %i" TEXT_RESET "\n",
+                          err);
                release_resources_for_process_video_file_function(&reader, &writer);
 
                return err;
@@ -1709,7 +1494,8 @@ int process_video_file(
        err = mv_video_reader_load(reader, track_target_file_name, &video_info, &fps);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "ERROR: Errors were occurred during loading the video "
-                               "by reader! Error code: %i" TEXT_RESET "\n", err);
+                                               "by reader! Error code: %i" TEXT_RESET "\n",
+                          err);
                release_resources_for_process_video_file_function(&reader, &writer);
 
                return err;
@@ -1717,14 +1503,11 @@ int process_video_file(
 
        video_info.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
 
-       err = mv_video_writer_init(
-                       writer,
-                       track_result_file_name,
-                       video_info,
-                       fps);
+       err = mv_video_writer_init(writer, track_result_file_name, video_info, fps);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "\nERROR: Errors were occurred during video writer"
-                               "initializing; code %i" TEXT_RESET "\n", err);
+                                               "initializing; code %i" TEXT_RESET "\n",
+                          err);
                release_resources_for_process_video_file_function(&reader, &writer);
 
                return err;
@@ -1736,14 +1519,11 @@ int process_video_file(
        cb_data.frame_number = 0;
        cb_data.is_video = true;
 
-       err = mv_video_reader_set_new_sample_cb(
-                       reader,
-                       track_on_sample_cb,
-                       &cb_data);
+       err = mv_video_reader_set_new_sample_cb(reader, track_on_sample_cb, &cb_data);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during new sample callback set!"
-                               " Error code: %i" TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during new sample callback set!"
+                                               " Error code: %i" TEXT_RESET "\n",
+                          err);
                release_resources_for_process_video_file_function(&reader, &writer);
 
                return err;
@@ -1756,9 +1536,9 @@ int process_video_file(
        /* set end of stream callback */
        err = mv_video_reader_set_eos_cb(reader, eos_cb, &block_during_tracking_mutex);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(TEXT_RED
-                               "ERROR: Errors were occurred during setting the eos "
-                               "callback for reader! Error code: %i" TEXT_RESET "\n", err);
+               printf(TEXT_RED "ERROR: Errors were occurred during setting the eos "
+                                               "callback for reader! Error code: %i" TEXT_RESET "\n",
+                          err);
                release_resources_for_process_video_file_function(&reader, &writer);
 
                pthread_mutex_unlock(&block_during_tracking_mutex);
@@ -1770,7 +1550,8 @@ int process_video_file(
        err = mv_video_reader_start(reader);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "ERROR: Errors were occurred during starting the "
-                               "video reader! Error code: %i" TEXT_RESET "\n", err);
+                                               "video reader! Error code: %i" TEXT_RESET "\n",
+                          err);
                release_resources_for_process_video_file_function(&reader, &writer);
 
                pthread_mutex_unlock(&block_during_tracking_mutex);
@@ -1788,8 +1569,8 @@ int process_video_file(
        err = mv_video_reader_stop(reader);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf(TEXT_RED "ERROR: Errors were occurred during "
-                               "attempt to stop video reader! Error code: %i\n"
-                               TEXT_RESET, err);
+                                               "attempt to stop video reader! Error code: %i\n" TEXT_RESET,
+                          err);
        }
 
        release_resources_for_process_video_file_function(&reader, &writer);
@@ -1799,13 +1580,11 @@ int process_video_file(
 
 static int cmpstring(const void *p1, const void *p2)
 {
-       return strcmp((char * const) p1, (char * const) p2);
+       return strcmp((char *const) p1, (char *const) p2);
 }
 
-int process_image_file(
-       mv_face_tracking_model_h tracking_model,
-       const char *track_target_file_name,
-       const char *track_result_file_name)
+int process_image_file(mv_face_tracking_model_h tracking_model, const char *track_target_file_name,
+                                          const char *track_result_file_name)
 {
        if (track_target_file_name == NULL)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -1813,7 +1592,7 @@ int process_image_file(
        int frame_idx;
        int err = MEDIA_VISION_ERROR_NONE;
        int frames_counter = 0;
-       char (*frames)[FILE_PATH_SIZE] = malloc(0);
+       char(*frames)[FILE_PATH_SIZE] = malloc(0);
 
        struct dirent *ent_eof;
        DIR *dir = opendir(track_target_file_name);
@@ -1827,14 +1606,13 @@ int process_image_file(
                if (ent_eof->d_name[0] == '.' || strlen(ent_eof->d_name) < 4)
                        continue;
 
-               if (strcmp(".jpg", ent_eof->d_name + strlen(ent_eof->d_name) -4) != 0)
+               if (strcmp(".jpg", ent_eof->d_name + strlen(ent_eof->d_name) - 4) != 0)
                        continue;
 
                frames_counter++;
                frames = realloc(frames, frames_counter * FILE_PATH_SIZE);
 
-               snprintf(frames[frames_counter -1], FILE_PATH_SIZE, "%s/%s",
-                       track_target_file_name, ent_eof->d_name);
+               snprintf(frames[frames_counter - 1], FILE_PATH_SIZE, "%s/%s", track_target_file_name, ent_eof->d_name);
        }
        closedir(dir);
 
@@ -1849,7 +1627,8 @@ int process_image_file(
        err = mv_create_source(&source);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf(TEXT_RED "\nERROR: Errors(code %i) were occurred during"
-                               "mv_create_source" TEXT_RESET "\n", err);
+                                               "mv_create_source" TEXT_RESET "\n",
+                          err);
                free(frames);
                if (source)
                        mv_destroy_source(source);
@@ -1869,7 +1648,8 @@ int process_image_file(
                err = mv_source_clear(source);
                if (err != MEDIA_VISION_ERROR_NONE) {
                        printf(TEXT_RED "\nWARN: WARN(code %i) were occurred during"
-                               "mv_source_clear, but keep going" TEXT_RESET "\n", err);
+                                                       "mv_source_clear, but keep going" TEXT_RESET "\n",
+                                  err);
                }
 
                unsigned char *data_buffer = NULL;
@@ -1879,7 +1659,8 @@ int process_image_file(
                err = load_image_to_buffer(frames[frame_idx], &data_buffer, &buffer_size, &image_data);
                if (err != MEDIA_VISION_ERROR_NONE) {
                        printf(TEXT_RED "\nWARN: WARN(code %i) were occurred during"
-                               "load_image_to_media_source, but continue" TEXT_RESET "\n", err);
+                                                       "load_image_to_media_source, but continue" TEXT_RESET "\n",
+                                  err);
 
                        if (data_buffer != NULL)
                                destroy_loaded_buffer(data_buffer);
@@ -1887,31 +1668,31 @@ int process_image_file(
                        continue;
                }
 
-               err = mv_source_fill_by_buffer(source, data_buffer,
-                                               buffer_size,
-                                               image_data.image_width,
-                                               image_data.image_height,
-                                               image_data.image_colorspace);
+               err = mv_source_fill_by_buffer(source, data_buffer, buffer_size, image_data.image_width,
+                                                                          image_data.image_height, image_data.image_colorspace);
                if (data_buffer != NULL)
                        destroy_loaded_buffer(data_buffer);
 
                if (err != MEDIA_VISION_ERROR_NONE) {
                        printf(TEXT_RED "\nWARN: WARN(code %i) were occurred during"
-                               "mv_source_fill_by_buffer, but continue" TEXT_RESET "\n", err);
+                                                       "mv_source_fill_by_buffer, but continue" TEXT_RESET "\n",
+                                  err);
                        continue;
                }
 
                err = mv_face_track(source, tracking_model, NULL, track_cb, false, &cb_data);
                if (err != MEDIA_VISION_ERROR_NONE) {
                        printf(TEXT_RED "\nWARN: WARN(code %i) were occurred during"
-                               "mv_face_track, but keep going" TEXT_RESET "\n", err);
+                                                       "mv_face_track, but keep going" TEXT_RESET "\n",
+                                  err);
                }
        }
 
        err = mv_destroy_source(source);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf(TEXT_RED "\nWARN: WARN(code %i) were occurred during"
-                               "mv_destroy_source, but keep going" TEXT_RESET "\n", err);
+                                               "mv_destroy_source, but keep going" TEXT_RESET "\n",
+                          err);
        }
        free(frames);
 
@@ -1921,22 +1702,20 @@ int process_image_file(
 int perform_mv_face_track(mv_face_tracking_model_h tracking_model)
 {
        printf(TEXT_YELLOW "Before any tracking session the tracking model\n"
-                                               "preparation is required. Exception is the case when\n"
-                                               "the next tracking session will be performed with the\n"
-                                               "video which is the direct continuation of the video\n"
-                                               "has been used at the previous tracking session.\n"
-                                               "If you want to test correct tracking case, don't\n"
-                                               "forget to perform preparation before tracking."
-                       TEXT_RESET "\n");
+                                          "preparation is required. Exception is the case when\n"
+                                          "the next tracking session will be performed with the\n"
+                                          "video which is the direct continuation of the video\n"
+                                          "has been used at the previous tracking session.\n"
+                                          "If you want to test correct tracking case, don't\n"
+                                          "forget to perform preparation before tracking." TEXT_RESET "\n");
 
        char *track_target_file_name = NULL;
        char *track_result_file_name = NULL;
 
        int res = MEDIA_VISION_ERROR_NONE;
        int sel_opt = 0;
-       const int options[2] = {1, 2};
-       const char *names[2] = {"Track with the video file",
-                               "Track with the image files"};
+       const int options[2] = { 1, 2 };
+       const char *names[2] = { "Track with the video file", "Track with the image files" };
 
        bool is_video = false;
        while (!sel_opt) {
@@ -1956,31 +1735,21 @@ int perform_mv_face_track(mv_face_tracking_model_h tracking_model)
        }
 
        if (is_video) {
-               while (input_string("Input video file name to track on:",
-                                                       1024, &(track_target_file_name)) == -1)
+               while (input_string("Input video file name to track on:", 1024, &(track_target_file_name)) == -1)
                        printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
 
-               while (input_string("Input video file name to save tracking results:",
-                                                       1024, &(track_result_file_name)) == -1)
+               while (input_string("Input video file name to save tracking results:", 1024, &(track_result_file_name)) == -1)
                        printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
 
-               res =  process_video_file(
-                               tracking_model,
-                               track_target_file_name,
-                               track_result_file_name);
+               res = process_video_file(tracking_model, track_target_file_name, track_result_file_name);
        } else {
-               while (input_string("Input image file path to track on:",
-                                                       1024, &(track_target_file_name)) == -1)
+               while (input_string("Input image file path to track on:", 1024, &(track_target_file_name)) == -1)
                        printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
 
-               while (input_string("Input image file name to save tracking results:",
-                                                       1024, &(track_result_file_name)) == -1)
+               while (input_string("Input image file name to save tracking results:", 1024, &(track_result_file_name)) == -1)
                        printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
 
-               res = process_image_file(
-                               tracking_model,
-                               track_target_file_name,
-                               track_result_file_name);
+               res = process_image_file(tracking_model, track_target_file_name, track_result_file_name);
        }
 
        free(track_target_file_name);
@@ -1991,13 +1760,10 @@ int perform_mv_face_track(mv_face_tracking_model_h tracking_model)
 
 int perform_track()
 {
-       printf("\n" TEXT_YELLOW
-                       "Tracking model isn't now created.\n"
-                       "You may create it to perform positive \n"
-                       "testing, or don't create to check the \n"
-                       "functionality behaviour for uncreated model."
-                       TEXT_RESET
-                       "\n");
+       printf("\n" TEXT_YELLOW "Tracking model isn't now created.\n"
+                  "You may create it to perform positive \n"
+                  "testing, or don't create to check the \n"
+                  "functionality behaviour for uncreated model." TEXT_RESET "\n");
 
        int err = MEDIA_VISION_ERROR_NONE;
        mv_face_tracking_model_h tracking_model = NULL;
@@ -2008,22 +1774,17 @@ int perform_track()
                err = mv_face_tracking_model_create(&tracking_model);
                if (MEDIA_VISION_ERROR_NONE != err) {
                        printf(TEXT_RED "Creating the model failed. Error code: %i. "
-                                       "But you still can test with uncreated model.\n"
-                                       TEXT_RESET "\n", err);
+                                                       "But you still can test with uncreated model.\n" TEXT_RESET "\n",
+                                  err);
                } else {
-                       printf(TEXT_YELLOW "Tracking model has been created."
-                                       TEXT_RESET "\n");
+                       printf(TEXT_YELLOW "Tracking model has been created." TEXT_RESET "\n");
                }
        }
 
        int sel_opt = 0;
        const int options[6] = { 1, 2, 3, 4, 5, 6 };
-       const char *names[6] = { "Prepare the model",
-                                                               "Clone the model",
-                                                               "Save model to file",
-                                                               "Load model from file",
-                                                               "Track with model",
-                                                               "Destroy model and exit" };
+       const char *names[6] = { "Prepare the model",    "Clone the model",      "Save model to file",
+                                                        "Load model from file", "Track with model", "Destroy model and exit" };
 
        while (!sel_opt) {
                sel_opt = show_menu("Select action:", options, names, 6);
@@ -2050,9 +1811,7 @@ int perform_track()
                        if (do_create) {
                                err = mv_face_tracking_model_destroy(tracking_model);
                                if (MEDIA_VISION_ERROR_NONE != err) {
-                                       printf(TEXT_RED
-                                                       "Error with code %i was occurred during destroy"
-                                                       TEXT_RESET "\n", err);
+                                       printf(TEXT_RED "Error with code %i was occurred during destroy" TEXT_RESET "\n", err);
                                }
                        } else {
                                err = MEDIA_VISION_ERROR_NONE;
@@ -2101,12 +1860,7 @@ int main(void)
 
        int sel_opt = 0;
        const int options[6] = { 1, 2, 3, 4, 5, 6 };
-       const char *names[6] = { "Detect",
-                                                               "Track",
-                                                               "Recognize",
-                                                               "Eye condition",
-                                                               "Face expression",
-                                                               "Exit" };
+       const char *names[6] = { "Detect", "Track", "Recognize", "Eye condition", "Face expression", "Exit" };
 
        while (sel_opt == 0) {
                sel_opt = show_menu("Select action:", options, names, 6);
index a18ef5a..c564059 100644 (file)
@@ -29,7 +29,8 @@
 
 #define FILE_PATH_SIZE 1024
 
-typedef enum {
+typedef enum
+{
        SOURCE_TYPE_GENERATION,
        SOURCE_TYPE_LOADING,
        SOURCE_TYPE_CLONING,
@@ -37,7 +38,8 @@ typedef enum {
        SOURCE_TYPE_INVALID
 } source_type_e;
 
-typedef enum {
+typedef enum
+{
        OBJECT_TYPE_IMAGE_OBJECT,
        OBJECT_TYPE_IMAGE_TRACKING_MODEL,
        OBJECT_TYPE_INVALID
@@ -45,7 +47,8 @@ typedef enum {
 
 #define testing_object_maximum_label_length 300
 
-typedef struct testing_object_s {
+typedef struct testing_object_s
+{
        void *entity;
 
        char origin_label[testing_object_maximum_label_length];
@@ -65,7 +68,7 @@ void testing_object_create(testing_object_h *result)
 {
        (*result) = malloc(sizeof(testing_object));
 
-       (*result)->entity = (void*)NULL;
+       (*result)->entity = (void *) NULL;
        (*result)->object_type = OBJECT_TYPE_INVALID;
        (*result)->source_type = SOURCE_TYPE_INVALID;
        (*result)->cloning_counter = 0;
@@ -73,12 +76,8 @@ void testing_object_create(testing_object_h *result)
        (*result)->actual_label[0] = '\0';
 }
 
-void testing_object_fill(
-               testing_object_h target,
-               void *entity,
-               testing_object_type_e object_type,
-               source_type_e source_type,
-               void *source)
+void testing_object_fill(testing_object_h target, void *entity, testing_object_type_e object_type,
+                                                source_type_e source_type, void *source)
 {
        target->entity = entity;
        target->object_type = object_type;
@@ -88,43 +87,38 @@ void testing_object_fill(
        switch (source_type) {
        case SOURCE_TYPE_GENERATION: {
                if (OBJECT_TYPE_IMAGE_OBJECT == object_type) {
-                       snprintf(
-                                               target->origin_label,
-                                               testing_object_maximum_label_length,
-                                               "generated from \"%s\"",
-                                               (char*)source);
+                       snprintf(target->origin_label, testing_object_maximum_label_length, "generated from \"%s\"",
+                                        (char *) source);
                } else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == object_type) {
                        char *title = "generated from image object which is ";
                        memcpy(target->origin_label, title, strlen(title));
-                       memcpy(target->origin_label + strlen(title), ((testing_object_h)source)->actual_label,
-                               testing_object_maximum_label_length - strlen(title));
-                       int pos = MIN(strlen(title) + strlen(((testing_object_h)source)->actual_label), testing_object_maximum_label_length - 1);
+                       memcpy(target->origin_label + strlen(title), ((testing_object_h) source)->actual_label,
+                                  testing_object_maximum_label_length - strlen(title));
+                       int pos = MIN(strlen(title) + strlen(((testing_object_h) source)->actual_label),
+                                                 testing_object_maximum_label_length - 1);
                        target->origin_label[pos] = '\0';
                } else {
-                       snprintf(
-                                               target->origin_label,
-                                               testing_object_maximum_label_length,
-                                               "generated unknown type of testing object");
+                       snprintf(target->origin_label, testing_object_maximum_label_length,
+                                        "generated unknown type of testing object");
                }
 
                strncpy(target->actual_label, target->origin_label, testing_object_maximum_label_length);
                break;
        }
        case SOURCE_TYPE_LOADING: {
-               snprintf(target->origin_label, testing_object_maximum_label_length, "loaded from \"%s\"", (char*)source);
+               snprintf(target->origin_label, testing_object_maximum_label_length, "loaded from \"%s\"", (char *) source);
                strncpy(target->actual_label, target->origin_label, testing_object_maximum_label_length);
                break;
        }
        case SOURCE_TYPE_CLONING: {
-               testing_object_h source_object = (testing_object_h)source;
+               testing_object_h source_object = (testing_object_h) source;
                strncpy(target->origin_label, source_object->origin_label, testing_object_maximum_label_length);
                target->cloning_counter = source_object->cloning_counter + 1;
 
                char number_of_cloning[20];
                number_of_cloning[0] = '\0';
                if (1 < target->cloning_counter) {
-                       snprintf(number_of_cloning, 20, "%s%i%s",
-                                               "(x", target->cloning_counter, ")");
+                       snprintf(number_of_cloning, 20, "%s%i%s", "(x", target->cloning_counter, ")");
                }
 
                char type_name[20];
@@ -135,13 +129,10 @@ void testing_object_fill(
                else
                        snprintf(type_name, 20, "unknown object");
 
-               snprintf(target->actual_label, testing_object_maximum_label_length,
-                                               "%s%s%s%s%s",
-                                               "cloned ", number_of_cloning,
-                                               " from ", type_name,
-                                               " which is ");
-               memcpy(target->actual_label+strlen(target->actual_label), target->origin_label,
-                               testing_object_maximum_label_length - strlen(target->actual_label));
+               snprintf(target->actual_label, testing_object_maximum_label_length, "%s%s%s%s%s", "cloned ", number_of_cloning,
+                                " from ", type_name, " which is ");
+               memcpy(target->actual_label + strlen(target->actual_label), target->origin_label,
+                          testing_object_maximum_label_length - strlen(target->actual_label));
                break;
        }
        case SOURCE_TYPE_EMPTY: {
@@ -163,29 +154,31 @@ void testing_object_destroy(testing_object_h *target)
                int err = mv_image_object_destroy((mv_image_object_h)((*target)->entity));
                if (MEDIA_VISION_ERROR_NONE != err) {
                        printf("\nERROR: Errors were occurred during image object "
-                                               "destroying; code %i\n", err);
+                                  "destroying; code %i\n",
+                                  err);
                }
                break;
        }
        case OBJECT_TYPE_IMAGE_TRACKING_MODEL: {
-               int err = mv_image_tracking_model_destroy(
-                                       (mv_image_tracking_model_h)((*target)->entity));
+               int err = mv_image_tracking_model_destroy((mv_image_tracking_model_h)((*target)->entity));
                if (MEDIA_VISION_ERROR_NONE != err) {
                        printf("\nERROR: Errors were occurred during image tracking "
-                                               "model destroying; code %i\n", err);
+                                  "model destroying; code %i\n",
+                                  err);
                }
                break;
        }
-       case OBJECT_TYPE_INVALID :
+       case OBJECT_TYPE_INVALID:
                printf("\nERROR: Errors were occurred during image tracking "
-                                       "model destroying; OBJECT_TYPE_INVALID\n");
+                          "model destroying; OBJECT_TYPE_INVALID\n");
                break;
        }
        free(*target);
        (*target) = NULL;
 }
 
-typedef struct {
+typedef struct
+{
        mv_quadrangle_s **locations;
        unsigned int locations_size;
        unsigned int currently_number;
@@ -204,20 +197,15 @@ void destroy_recognition_result(recognition_result *result)
        free(result->locations);
 }
 
-void recognized_cb(
-               mv_source_h source,
-               mv_engine_config_h engine_cfg,
-               const mv_image_object_h *image_objects,
-               mv_quadrangle_s **locations,
-               unsigned int number_of_objects,
-               void *user_data)
+void recognized_cb(mv_source_h source, mv_engine_config_h engine_cfg, const mv_image_object_h *image_objects,
+                                  mv_quadrangle_s **locations, unsigned int number_of_objects, void *user_data)
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
        if (NULL == user_data)
                return;
 
-       recognition_result *result = (recognition_result*)user_data;
+       recognition_result *result = (recognition_result *) user_data;
 
        int object_num = 0;
        for (; object_num < number_of_objects; ++object_num) {
@@ -237,11 +225,8 @@ void recognized_cb(
        MEDIA_VISION_FUNCTION_LEAVE();
 }
 
-void handle_recognition_result(
-               const recognition_result *result,
-               int number_of_objects,
-               mv_source_h *source,
-               const char *file_name)
+void handle_recognition_result(const recognition_result *result, int number_of_objects, mv_source_h *source,
+                                                          const char *file_name)
 {
        int is_source_data_loaded = 0;
 
@@ -250,10 +235,10 @@ void handle_recognition_result(
        image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
 
        if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &(out_buffer), &buffer_size) ||
-                       MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
-                       MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
-                       MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
-                       NULL == file_name) {
+               MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
+               MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
+               MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
+               NULL == file_name) {
                printf("ERROR: Creating out image is impossible.\n");
        } else {
                is_source_data_loaded = 1;
@@ -272,39 +257,31 @@ void handle_recognition_result(
 
                int point_num = 0;
                for (; point_num < 4; ++point_num) {
-                       printf("%d point - x = %d, y = %d\n", point_num + 1,
-                                       result->locations[object_num]->points[point_num].x,
-                                       result->locations[object_num]->points[point_num].y);
+                       printf("%d point - x = %d, y = %d\n", point_num + 1, result->locations[object_num]->points[point_num].x,
+                                  result->locations[object_num]->points[point_num].y);
                }
 
                if (is_source_data_loaded) {
                        const int thickness = 2;
-                       const int color[] = {0, 255, 0};
+                       const int color[] = { 0, 255, 0 };
 
-                       const int err = draw_quadrangle_on_buffer(
-                                       *(result->locations[object_num]),
-                                       thickness,
-                                       color,
-                                       &image_data,
-                                       out_buffer);
+                       const int err = draw_quadrangle_on_buffer(*(result->locations[object_num]), thickness, color, &image_data,
+                                                                                                         out_buffer);
 
                        if (MEDIA_VISION_ERROR_NONE != err)
                                printf("ERROR: Impossible to draw quadrangle\n");
                }
        }
 
-       if (save_image_from_buffer(file_name, out_buffer,
-                       &image_data, 100) != MEDIA_VISION_ERROR_NONE) {
+       if (save_image_from_buffer(file_name, out_buffer, &image_data, 100) != MEDIA_VISION_ERROR_NONE) {
                printf("\nERROR: Failed to generate output file\n");
        } else {
                printf("\nImage was generated as %s\n", file_name);
        }
 }
 
-int generate_image_object_from_file(const char *path_to_image,
-                                               bool roi_selected,
-                                               mv_rectangle_s roi,
-                                               mv_image_object_h *result)
+int generate_image_object_from_file(const char *path_to_image, bool roi_selected, mv_rectangle_s roi,
+                                                                       mv_image_object_h *result)
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
@@ -321,7 +298,8 @@ int generate_image_object_from_file(const char *path_to_image,
                int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during source "
-                                       "destroying; code %i\n", err2);
+                                  "destroying; code %i\n",
+                                  err2);
                }
 
                MEDIA_VISION_FUNCTION_LEAVE();
@@ -337,7 +315,8 @@ int generate_image_object_from_file(const char *path_to_image,
                int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during source "
-                                       "destroying; code %i\n", err2);
+                                  "destroying; code %i\n",
+                                  err2);
                }
 
                MEDIA_VISION_FUNCTION_LEAVE();
@@ -348,18 +327,21 @@ int generate_image_object_from_file(const char *path_to_image,
        err = mv_image_object_create(result);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf("ERROR: Errors were occurred during creating image object; "
-                               "code %i\n", err);
+                          "code %i\n",
+                          err);
 
                int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during source "
-                                       "destroying; code %i\n", err2);
+                                  "destroying; code %i\n",
+                                  err2);
                }
 
                err2 = mv_destroy_engine_config(config);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during engine config "
-                                       "destroying; code %i\n", err2);
+                                  "destroying; code %i\n",
+                                  err2);
                }
 
                MEDIA_VISION_FUNCTION_LEAVE();
@@ -374,24 +356,28 @@ int generate_image_object_from_file(const char *path_to_image,
 
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf("ERROR: Errors were occurred during filling image object; "
-                               "code %i\n", err);
+                          "code %i\n",
+                          err);
 
                int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during source "
-                                       "destroying; code %i\n", err2);
+                                  "destroying; code %i\n",
+                                  err2);
                }
 
                err2 = mv_image_object_destroy(*result);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during image object "
-                                       "destroying; code %i\n", err2);
+                                  "destroying; code %i\n",
+                                  err2);
                }
 
                err2 = mv_destroy_engine_config(config);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during engine config "
-                                       "destroying; code %i\n", err2);
+                                  "destroying; code %i\n",
+                                  err2);
                }
 
                *result = NULL;
@@ -404,12 +390,14 @@ int generate_image_object_from_file(const char *path_to_image,
        err = mv_destroy_source(source);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf("\nERROR: Errors were occurred during source "
-                               "destroying; code %i\n", err);
+                          "destroying; code %i\n",
+                          err);
 
                int err2 = mv_destroy_engine_config(config);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during engine config "
-                                       "destroying; code %i\n", err2);
+                                  "destroying; code %i\n",
+                                  err2);
                }
 
                MEDIA_VISION_FUNCTION_LEAVE();
@@ -420,7 +408,8 @@ int generate_image_object_from_file(const char *path_to_image,
        err = mv_destroy_engine_config(config);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf("\nERROR: Errors were occurred during engine config "
-                               "destroying; code %i\n", err);
+                          "destroying; code %i\n",
+                          err);
 
                MEDIA_VISION_FUNCTION_LEAVE();
 
@@ -431,10 +420,8 @@ int generate_image_object_from_file(const char *path_to_image,
        return err;
 }
 
-int recognize_image(const char *path_to_image,
-               const char *path_to_generated_image,
-               mv_image_object_h *targets,
-               int number_of_targets)
+int recognize_image(const char *path_to_image, const char *path_to_generated_image, mv_image_object_h *targets,
+                                       int number_of_targets)
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
@@ -456,7 +443,8 @@ int recognize_image(const char *path_to_image,
                int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during source destroying; "
-                                               "code %i\n", err2);
+                                  "code %i\n",
+                                  err2);
                }
                MEDIA_VISION_FUNCTION_LEAVE();
                return err;
@@ -465,7 +453,7 @@ int recognize_image(const char *path_to_image,
        recognition_result result;
        result.currently_number = 0;
        if (0 < number_of_targets) {
-               result.locations = malloc(sizeof(mv_quadrangle_s*) * number_of_targets);
+               result.locations = malloc(sizeof(mv_quadrangle_s *) * number_of_targets);
                result.locations_size = number_of_targets;
        } else {
                result.locations = NULL;
@@ -479,15 +467,15 @@ int recognize_image(const char *path_to_image,
                int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during source destroying;"
-                                       "code %i\n", err2);
+                                  "code %i\n",
+                                  err2);
                }
                destroy_recognition_result(&result);
                MEDIA_VISION_FUNCTION_LEAVE();
                return err;
        }
 
-       err = mv_image_recognize(source, targets, number_of_targets, config,
-                                                               recognized_cb, &result);
+       err = mv_image_recognize(source, targets, number_of_targets, config, recognized_cb, &result);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf("\nERROR: Image is not recognized; code %i\n", err);
@@ -497,12 +485,14 @@ int recognize_image(const char *path_to_image,
                int err2 = mv_destroy_source(source);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during source "
-                                       "destroying; code %i\n", err2);
+                                  "destroying; code %i\n",
+                                  err2);
                }
                err2 = mv_destroy_engine_config(config);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during engine config "
-                                       "destroying; code %i\n", err2);
+                                  "destroying; code %i\n",
+                                  err2);
                }
 
                MEDIA_VISION_FUNCTION_LEAVE();
@@ -510,20 +500,19 @@ int recognize_image(const char *path_to_image,
                return err;
        }
 
-       handle_recognition_result(&result, number_of_targets, source,
-                       path_to_generated_image);
+       handle_recognition_result(&result, number_of_targets, source, path_to_generated_image);
 
        destroy_recognition_result(&result);
 
        err = mv_destroy_source(source);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf("\nERROR: Errors were occurred during source destroying; code %i\n",
-                               err);
+               printf("\nERROR: Errors were occurred during source destroying; code %i\n", err);
 
                int err2 = mv_destroy_engine_config(config);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during engine config "
-                                       "destroying; code %i\n", err2);
+                                  "destroying; code %i\n",
+                                  err2);
                }
 
                MEDIA_VISION_FUNCTION_LEAVE();
@@ -534,7 +523,8 @@ int recognize_image(const char *path_to_image,
        err = mv_destroy_engine_config(config);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf("\nERROR: Errors were occurred during engine config destroying; "
-                               "code %i\n", err);
+                          "code %i\n",
+                          err);
 
                MEDIA_VISION_FUNCTION_LEAVE();
 
@@ -611,16 +601,13 @@ int perform_recognize(mv_image_object_h *targets, int number_of_targets)
        char *path_to_image = NULL;
        char *path_to_generated_image = NULL;
 
-       while (input_string("Input file name with image for recognizing:",
-                                               1024, &path_to_image) == -1)
+       while (input_string("Input file name with image for recognizing:", 1024, &path_to_image) == -1)
                printf("Incorrect input! Try again.\n");
 
-       while (input_string("Input file name for generated image:",
-                                               1024, &path_to_generated_image) == -1)
+       while (input_string("Input file name for generated image:", 1024, &path_to_generated_image) == -1)
                printf("Incorrect input! Try again.\n");
 
-       const int err = recognize_image(path_to_image, path_to_generated_image, targets,
-                                                                       number_of_targets);
+       const int err = recognize_image(path_to_image, path_to_generated_image, targets, number_of_targets);
 
        free(path_to_image);
        free(path_to_generated_image);
@@ -639,8 +626,7 @@ int perform_load_image_object(char **path_to_object, mv_image_object_h *result)
                *result = NULL;
        }
 
-       while (input_string("Input file name with image object to be loaded:",
-                                               1024, path_to_object) == -1)
+       while (input_string("Input file name with image object to be loaded:", 1024, path_to_object) == -1)
                printf("Incorrect input! Try again.\n");
 
        int err = mv_image_object_load(*path_to_object, result);
@@ -663,8 +649,7 @@ int perform_save_image_object(mv_image_object_h object)
        int err = MEDIA_VISION_ERROR_NONE;
        char *path_to_object = NULL;
 
-       while (input_string("Input file name to be generated for image object storing:",
-                                               1024, &path_to_object) == -1)
+       while (input_string("Input file name to be generated for image object storing:", 1024, &path_to_object) == -1)
                printf("Incorrect input! Try again.\n");
 
        err = mv_image_object_save(path_to_object, object);
@@ -691,30 +676,24 @@ int perform_generate_image_object(mv_image_object_h *result, char **path_to_imag
        if (NULL == path_to_image || NULL == result)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
 
-       while (input_string("Input file name with image to be analyzed:",
-                                                       1024, path_to_image) == -1)
+       while (input_string("Input file name with image to be analyzed:", 1024, path_to_image) == -1)
                printf("Incorrect input! Try again.\n");
 
        mv_rectangle_s roi;
        const bool sel_roi = show_confirm_dialog("Select if you want to set ROI");
        if (sel_roi) {
                printf("\nInput ROI coordinates\n");
-               while (input_int("Input x coordinate:", INT_MIN, INT_MAX,
-                                       &(roi.point.x)) == -1)
+               while (input_int("Input x coordinate:", INT_MIN, INT_MAX, &(roi.point.x)) == -1)
                        printf("Incorrect input! Try again.\n");
 
-               while (input_int("Input y coordinate:", INT_MIN, INT_MAX,
-                                       &(roi.point.y)) == -1)
+               while (input_int("Input y coordinate:", INT_MIN, INT_MAX, &(roi.point.y)) == -1)
                        printf("Incorrect input! Try again.\n");
 
-               while (input_int("Input ROI width:", INT_MIN, INT_MAX,
-                                       &(roi.width)) == -1)
+               while (input_int("Input ROI width:", INT_MIN, INT_MAX, &(roi.width)) == -1)
                        printf("Incorrect input! Try again.\n");
 
-               while (input_int("Input ROI height:", INT_MIN, INT_MAX,
-                                       &(roi.height)) == -1)
+               while (input_int("Input ROI height:", INT_MIN, INT_MAX, &(roi.height)) == -1)
                        printf("Incorrect input! Try again.\n");
-
        }
 
        int err = generate_image_object_from_file(*path_to_image, sel_roi, roi, result);
@@ -747,7 +726,8 @@ int perform_clone_image_object(mv_image_object_h src, mv_image_object_h *result)
                int err2 = mv_image_object_destroy(*result);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during image object "
-                                       "destroying; code %i\n", err);
+                                  "destroying; code %i\n",
+                                  err);
                }
 
                (*result) = NULL;
@@ -760,11 +740,8 @@ int perform_clone_image_object(mv_image_object_h src, mv_image_object_h *result)
        return err;
 }
 
-int handle_tracking_video_result(
-               mv_video_writer_h writer,
-               mv_source_h frame,
-               int frame_number,
-               mv_quadrangle_s *location)
+int handle_tracking_video_result(mv_video_writer_h writer, mv_source_h frame, int frame_number,
+                                                                mv_quadrangle_s *location)
 {
        unsigned char *data_buffer = NULL;
        unsigned int buffer_size = 0;
@@ -772,67 +749,49 @@ int handle_tracking_video_result(
 
        int err = mv_source_get_buffer(frame, &data_buffer, &buffer_size);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "ERROR: Errors were occurred during getting buffer from the "
-                               "source; code %i\n",
-                               err);
+               printf("ERROR: Errors were occurred during getting buffer from the "
+                          "source; code %i\n",
+                          err);
                return err;
        }
 
        err = mv_source_get_width(frame, &image_data.image_width);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "ERROR: Errors were occurred during getting width from the "
-                               "source; code %i\n",
-                               err);
+               printf("ERROR: Errors were occurred during getting width from the "
+                          "source; code %i\n",
+                          err);
                return err;
        }
 
        err = mv_source_get_height(frame, &image_data.image_height);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "ERROR: Errors were occurred during getting height from the "
-                               "source; code %i\n",
-                               err);
+               printf("ERROR: Errors were occurred during getting height from the "
+                          "source; code %i\n",
+                          err);
                return err;
        }
 
        err = mv_source_get_colorspace(frame, &image_data.image_colorspace);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "ERROR: Errors were occurred during getting colorspace from the "
-                               "source; code %i\n",
-                               err);
+               printf("ERROR: Errors were occurred during getting colorspace from the "
+                          "source; code %i\n",
+                          err);
                return err;
        }
 
        if (location) {
-               printf(
-                               "Frame #%i: object is found."
-                               "Location: {%i, %i}; {%i, %i}; {%i, %i}; {%i, %i}.\n",
-                               frame_number,
-                               location->points[0].x,
-                               location->points[0].y,
-                               location->points[1].x,
-                               location->points[1].y,
-                               location->points[2].x,
-                               location->points[2].y,
-                               location->points[3].x,
-                               location->points[3].y);
+               printf("Frame #%i: object is found."
+                          "Location: {%i, %i}; {%i, %i}; {%i, %i}; {%i, %i}.\n",
+                          frame_number, location->points[0].x, location->points[0].y, location->points[1].x, location->points[1].y,
+                          location->points[2].x, location->points[2].y, location->points[3].x, location->points[3].y);
                const int thickness = 2;
-               const int color[] = {0, 255, 0};
-
-               err = draw_quadrangle_on_buffer(
-                               *location,
-                               thickness,
-                               color,
-                               &image_data,
-                               data_buffer);
+               const int color[] = { 0, 255, 0 };
+
+               err = draw_quadrangle_on_buffer(*location, thickness, color, &image_data, data_buffer);
                if (MEDIA_VISION_ERROR_NONE != err) {
-                       printf(
-                                       "ERROR: Errors were occurred during drawing quadrangle on "
-                                       "the frame; code %i\n",
-                                       err);
+                       printf("ERROR: Errors were occurred during drawing quadrangle on "
+                                  "the frame; code %i\n",
+                                  err);
                        return err;
                }
        } else {
@@ -842,21 +801,17 @@ int handle_tracking_video_result(
 
        err = mv_video_writer_write_frame(writer, data_buffer);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "ERROR: Errors were occurred during writing frame to the "
-                               "result video file; code %i\n",
-                               err);
+               printf("ERROR: Errors were occurred during writing frame to the "
+                          "result video file; code %i\n",
+                          err);
                return err;
        }
 
        return err;
 }
 
-int handle_tracking_image_result(
-               const char *out_file_path,
-               mv_source_h frame,
-               int frame_number,
-               mv_quadrangle_s *location)
+int handle_tracking_image_result(const char *out_file_path, mv_source_h frame, int frame_number,
+                                                                mv_quadrangle_s *location)
 {
        unsigned char *data_buffer = NULL;
        unsigned int buffer_size = 0;
@@ -864,67 +819,49 @@ int handle_tracking_image_result(
 
        int err = mv_source_get_buffer(frame, &data_buffer, &buffer_size);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "ERROR: Errors were occurred during getting buffer from the "
-                               "source; code %i\n",
-                               err);
+               printf("ERROR: Errors were occurred during getting buffer from the "
+                          "source; code %i\n",
+                          err);
                return err;
        }
 
        err = mv_source_get_width(frame, &image_data.image_width);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "ERROR: Errors were occurred during getting width from the "
-                               "source; code %i\n",
-                               err);
+               printf("ERROR: Errors were occurred during getting width from the "
+                          "source; code %i\n",
+                          err);
                return err;
        }
 
        err = mv_source_get_height(frame, &image_data.image_height);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "ERROR: Errors were occurred during getting height from the "
-                               "source; code %i\n",
-                               err);
+               printf("ERROR: Errors were occurred during getting height from the "
+                          "source; code %i\n",
+                          err);
                return err;
        }
 
        err = mv_source_get_colorspace(frame, &image_data.image_colorspace);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "ERROR: Errors were occurred during getting image colorspace from the "
-                               "source; code %i\n",
-                               err);
+               printf("ERROR: Errors were occurred during getting image colorspace from the "
+                          "source; code %i\n",
+                          err);
                return err;
        }
 
        if (location) {
-               printf(
-                               "Frame #%i: object is found."
-                               "Location: {%i, %i}; {%i, %i}; {%i, %i}; {%i, %i}.\n",
-                               frame_number,
-                               location->points[0].x,
-                               location->points[0].y,
-                               location->points[1].x,
-                               location->points[1].y,
-                               location->points[2].x,
-                               location->points[2].y,
-                               location->points[3].x,
-                               location->points[3].y);
+               printf("Frame #%i: object is found."
+                          "Location: {%i, %i}; {%i, %i}; {%i, %i}; {%i, %i}.\n",
+                          frame_number, location->points[0].x, location->points[0].y, location->points[1].x, location->points[1].y,
+                          location->points[2].x, location->points[2].y, location->points[3].x, location->points[3].y);
                const int thickness = 2;
-               const int color[] = {0, 255, 0};
-
-               err = draw_quadrangle_on_buffer(
-                               *location,
-                               thickness,
-                               color,
-                               &image_data,
-                               data_buffer);
+               const int color[] = { 0, 255, 0 };
+
+               err = draw_quadrangle_on_buffer(*location, thickness, color, &image_data, data_buffer);
                if (MEDIA_VISION_ERROR_NONE != err) {
-                       printf(
-                                       "ERROR: Errors were occurred during drawing quadrangle on "
-                                       "the frame; code %i\n",
-                                       err);
+                       printf("ERROR: Errors were occurred during drawing quadrangle on "
+                                  "the frame; code %i\n",
+                                  err);
                        return err;
                }
        } else {
@@ -933,23 +870,21 @@ int handle_tracking_image_result(
        }
 
        char out_file_name[FILE_PATH_SIZE];
-       snprintf(out_file_name, FILE_PATH_SIZE, "%s_%03d.jpg",
-                                       out_file_path,
-                                       frame_number);
+       snprintf(out_file_name, FILE_PATH_SIZE, "%s_%03d.jpg", out_file_path, frame_number);
 
        err = save_image_from_buffer(out_file_name, data_buffer, &image_data, 100);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "ERROR: Errors were occurred during writing frame to the "
-                               "result image file; code %i\n",
-                               err);
+               printf("ERROR: Errors were occurred during writing frame to the "
+                          "result image file; code %i\n",
+                          err);
                return err;
        }
 
        return err;
 }
 
-typedef struct {
+typedef struct
+{
        mv_image_tracking_model_h target;
        mv_video_writer_h writer;
        int frame_number;
@@ -957,19 +892,15 @@ typedef struct {
        char out_file_path[FILE_PATH_SIZE];
 } tracking_cb_data;
 
-void tracked_cb(
-               mv_source_h source,
-               mv_image_object_h image_object,
-               mv_engine_config_h engine_cfg,
-               mv_quadrangle_s *location,
-               void *user_data)
+void tracked_cb(mv_source_h source, mv_image_object_h image_object, mv_engine_config_h engine_cfg,
+                               mv_quadrangle_s *location, void *user_data)
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
        if (NULL == user_data)
                return;
 
-       tracking_cb_data *cb_data = (tracking_cb_data*)user_data;
+       tracking_cb_data *cb_data = (tracking_cb_data *) user_data;
 
        if (cb_data->is_video)
                handle_tracking_video_result(cb_data->writer, source, cb_data->frame_number, location);
@@ -979,66 +910,49 @@ void tracked_cb(
        MEDIA_VISION_FUNCTION_LEAVE();
 }
 
-void new_frame_cb(
-               char *buffer,
-               unsigned int buffer_size,
-               image_data_s image_data,
-               void *user_data)
+void new_frame_cb(char *buffer, unsigned int buffer_size, image_data_s image_data, void *user_data)
 {
        if (NULL == user_data)
                return;
 
        mv_source_h frame = NULL;
 
-#define release_resources() \
-       if (frame) { \
-               const int err2 = mv_destroy_source(frame); \
-               if (MEDIA_VISION_ERROR_NONE != err2) { \
-                       printf( \
-                                       "\nERROR: Errors were occurred during source destroying; " \
-                                       "code %i\n", \
-                                       err2); \
-               } \
+#define release_resources()                                                   \
+       if (frame) {                                                              \
+               const int err2 = mv_destroy_source(frame);                            \
+               if (MEDIA_VISION_ERROR_NONE != err2) {                                \
+                       printf("\nERROR: Errors were occurred during source destroying; " \
+                                  "code %i\n",                                               \
+                                  err2);                                                     \
+               }                                                                     \
        }
 
-       tracking_cb_data *cb_data = (tracking_cb_data*)user_data;
+       tracking_cb_data *cb_data = (tracking_cb_data *) user_data;
 
        ++(cb_data->frame_number);
 
        int err = mv_create_source(&frame);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "\nERROR: Errors were occurred during source creating; "
-                               "code %i\n",
-                               err);
+               printf("\nERROR: Errors were occurred during source creating; "
+                          "code %i\n",
+                          err);
                release_resources();
                return;
        }
 
-       err = mv_source_fill_by_buffer(
-                       frame,
-                       (unsigned char*)buffer,
-                       buffer_size,
-                       image_data.image_width,
-                       image_data.image_height,
-                       image_data.image_colorspace);
+       err = mv_source_fill_by_buffer(frame, (unsigned char *) buffer, buffer_size, image_data.image_width,
+                                                                  image_data.image_height, image_data.image_colorspace);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf("ERROR: mv_source_h for frame is not filled; code %i\n", err);
                release_resources();
                return;
        }
 
-       err = mv_image_track(
-                       frame,
-                       cb_data->target,
-                       NULL,
-                       tracked_cb,
-                       cb_data);
+       err = mv_image_track(frame, cb_data->target, NULL, tracked_cb, cb_data);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "ERROR: Errors were occurred during tracking object on "
-                               "the video; code %i\n",
-                               err);
+               printf("ERROR: Errors were occurred during tracking object on "
+                          "the video; code %i\n",
+                          err);
                release_resources();
                return;
        }
@@ -1048,15 +962,14 @@ void new_frame_cb(
 #undef release_resources
 }
 
-void eos_frame_cb(
-               void *user_data)
+void eos_frame_cb(void *user_data)
 {
        if (NULL == user_data) {
                printf("ERROR: eos callback can't stop tracking process.");
                return;
        }
 
-       pthread_mutex_unlock((pthread_mutex_t*)user_data);
+       pthread_mutex_unlock((pthread_mutex_t *) user_data);
 }
 
 int perform_track_video(mv_image_tracking_model_h target)
@@ -1072,50 +985,47 @@ int perform_track_video(mv_image_tracking_model_h target)
        mv_video_writer_h writer = NULL;
        char *path_to_video = NULL;
        char *path_to_generated_video = NULL;
-       image_data_s image_data = {0};
+       image_data_s image_data = { 0 };
        unsigned int fps = 0;
 
-#define release_resources() \
-       int err2 = MEDIA_VISION_ERROR_NONE; \
-       if (reader) { \
-               err2 = mv_destroy_video_reader(reader); \
-               if (MEDIA_VISION_ERROR_NONE != err2) { \
-                       printf( \
-                                       "\nERROR: Errors were occurred during video reader destroying; " \
-                                       "code %i\n", \
-                                       err2); \
-               } \
-       } \
-       if (writer) { \
-               err2 = mv_destroy_video_writer(writer); \
-               if (MEDIA_VISION_ERROR_NONE != err2) { \
-                       printf( \
-                                       "\nERROR: Errors were occurred during video writer destroying; " \
-                                       "code %i\n", \
-                                       err2); \
-               } \
-       } \
-       if (path_to_video) { \
-               free(path_to_video); \
-       } \
-       if (path_to_generated_video) { \
-               free(path_to_generated_video); \
-       }
-
-       while (input_string("Input file name with video for tracking:",
-                                                       1024, &path_to_video) == -1) {
+#define release_resources()                                                         \
+       int err2 = MEDIA_VISION_ERROR_NONE;                                             \
+       if (reader) {                                                                   \
+               err2 = mv_destroy_video_reader(reader);                                     \
+               if (MEDIA_VISION_ERROR_NONE != err2) {                                      \
+                       printf("\nERROR: Errors were occurred during video reader destroying; " \
+                                  "code %i\n",                                                     \
+                                  err2);                                                           \
+               }                                                                           \
+       }                                                                               \
+       if (writer) {                                                                   \
+               err2 = mv_destroy_video_writer(writer);                                     \
+               if (MEDIA_VISION_ERROR_NONE != err2) {                                      \
+                       printf("\nERROR: Errors were occurred during video writer destroying; " \
+                                  "code %i\n",                                                     \
+                                  err2);                                                           \
+               }                                                                           \
+       }                                                                               \
+       if (path_to_video) {                                                            \
+               free(path_to_video);                                                        \
+       }                                                                               \
+       if (path_to_generated_video) {                                                  \
+               free(path_to_generated_video);                                              \
+       }
+
+       while (input_string("Input file name with video for tracking:", 1024, &path_to_video) == -1) {
                printf("Incorrect input! Try again.\n");
        }
 
-       while (input_string("Input file name for generated video:",
-                                                       1024, &path_to_generated_video) == -1) {
+       while (input_string("Input file name for generated video:", 1024, &path_to_generated_video) == -1) {
                printf("Incorrect input! Try again.\n");
        }
 
        int err = mv_create_video_reader(&reader);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf("\nERROR: Errors were occurred during video reader creating; "
-                               "code %i\n", err);
+                          "code %i\n",
+                          err);
                release_resources();
                MEDIA_VISION_FUNCTION_LEAVE();
                return err;
@@ -1123,20 +1033,15 @@ int perform_track_video(mv_image_tracking_model_h target)
 
        err = mv_create_video_writer(&writer);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "\nERROR: Errors were occurred during video writer creating; "
-                               "code %i\n",
-                               err);
+               printf("\nERROR: Errors were occurred during video writer creating; "
+                          "code %i\n",
+                          err);
                release_resources();
                MEDIA_VISION_FUNCTION_LEAVE();
                return err;
        }
 
-       err = mv_video_reader_load(
-               reader,
-               path_to_video,
-               &image_data,
-               &fps);
+       err = mv_video_reader_load(reader, path_to_video, &image_data, &fps);
        if (MEDIA_VISION_ERROR_NONE != err) {
                printf("\nERROR: Errors were occurred during video loading; code %i\n", err);
                release_resources();
@@ -1144,22 +1049,17 @@ int perform_track_video(mv_image_tracking_model_h target)
                return err;
        }
 
-       printf("Receive frame metadata: wxh - %ux%u, fps - %u, format - %d\n",
-               image_data.image_width, image_data.image_height, fps, image_data.image_colorspace);
+       printf("Receive frame metadata: wxh - %ux%u, fps - %u, format - %d\n", image_data.image_width,
+                  image_data.image_height, fps, image_data.image_colorspace);
 
        /* Temporary we accept only RGB888 */
        image_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
 
-       err = mv_video_writer_init(
-                       writer,
-                       path_to_generated_video,
-                       image_data,
-                       fps);
+       err = mv_video_writer_init(writer, path_to_generated_video, image_data, fps);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "\nERROR: Errors were occurred during video writer initializing; "
-                               "code %i\n",
-                               err);
+               printf("\nERROR: Errors were occurred during video writer initializing; "
+                          "code %i\n",
+                          err);
                release_resources();
                MEDIA_VISION_FUNCTION_LEAVE();
                return err;
@@ -1172,10 +1072,9 @@ int perform_track_video(mv_image_tracking_model_h target)
        cb_data.is_video = true;
        err = mv_video_reader_set_new_sample_cb(reader, new_frame_cb, &cb_data);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "\nERROR: Errors were occurred during set new frame callback; "
-                               "code %i\n",
-                               err);
+               printf("\nERROR: Errors were occurred during set new frame callback; "
+                          "code %i\n",
+                          err);
                release_resources();
                MEDIA_VISION_FUNCTION_LEAVE();
                return err;
@@ -1186,23 +1085,21 @@ int perform_track_video(mv_image_tracking_model_h target)
        pthread_mutex_lock(&block_during_tracking_mutex);
        err = mv_video_reader_set_eos_cb(reader, eos_frame_cb, &block_during_tracking_mutex);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "\nERROR: Errors were occurred during set new frame callback; "
-                               "code %i\n",
-                               err);
-                       release_resources();
-                       pthread_mutex_unlock(&block_during_tracking_mutex);
-                       pthread_mutex_destroy(&block_during_tracking_mutex);
-                       MEDIA_VISION_FUNCTION_LEAVE();
-                       return err;
+               printf("\nERROR: Errors were occurred during set new frame callback; "
+                          "code %i\n",
+                          err);
+               release_resources();
+               pthread_mutex_unlock(&block_during_tracking_mutex);
+               pthread_mutex_destroy(&block_during_tracking_mutex);
+               MEDIA_VISION_FUNCTION_LEAVE();
+               return err;
        }
 
        err = mv_video_reader_start(reader);
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "\nERROR: Errors were occurred during video reading starts; "
-                               "code %i\n",
-                               err);
+               printf("\nERROR: Errors were occurred during video reading starts; "
+                          "code %i\n",
+                          err);
                release_resources();
                pthread_mutex_unlock(&block_during_tracking_mutex);
                pthread_mutex_destroy(&block_during_tracking_mutex);
@@ -1226,7 +1123,7 @@ int perform_track_video(mv_image_tracking_model_h target)
 
 static int cmpstring(const void *p1, const void *p2)
 {
-       return strcmp((char * const) p1, (char * const) p2);
+       return strcmp((char *const) p1, (char *const) p2);
 }
 
 int perform_track_image(mv_image_tracking_model_h target)
@@ -1241,20 +1138,18 @@ int perform_track_image(mv_image_tracking_model_h target)
        char *path_to_image = NULL;
        char *path_to_generated_image = NULL;
 
-       while (input_string("Input path for tracking:",
-                                                       1024, &path_to_image) == -1) {
+       while (input_string("Input path for tracking:", 1024, &path_to_image) == -1) {
                printf("Incorrect input! Try again.\n");
        }
 
-       while (input_string("Input file name for generated results:",
-                                                       1024, &path_to_generated_image) == -1) {
+       while (input_string("Input file name for generated results:", 1024, &path_to_generated_image) == -1) {
                printf("Incorrect input! Try again.\n");
        }
 
        int frame_idx;
        int err = MEDIA_VISION_ERROR_NONE;
        int frames_counter = 0;
-       char (*frames)[FILE_PATH_SIZE] = malloc(0);
+       char(*frames)[FILE_PATH_SIZE] = malloc(0);
 
        struct dirent *ent_eof;
        DIR *dir = opendir(path_to_image);
@@ -1270,13 +1165,13 @@ int perform_track_image(mv_image_tracking_model_h target)
                if (ent_eof->d_name[0] == '.' || strlen(ent_eof->d_name) < 4)
                        continue;
 
-               if (strcmp(".jpg", ent_eof->d_name + strlen(ent_eof->d_name) -4) != 0)
+               if (strcmp(".jpg", ent_eof->d_name + strlen(ent_eof->d_name) - 4) != 0)
                        continue;
 
                frames_counter++;
                frames = realloc(frames, frames_counter * FILE_PATH_SIZE);
 
-               snprintf(frames[frames_counter -1], FILE_PATH_SIZE, "%s/%s", path_to_image, ent_eof->d_name);
+               snprintf(frames[frames_counter - 1], FILE_PATH_SIZE, "%s/%s", path_to_image, ent_eof->d_name);
        }
        closedir(dir);
 
@@ -1293,7 +1188,8 @@ int perform_track_image(mv_image_tracking_model_h target)
        err = mv_create_source(&source);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf("\nERROR: Errors(code %i) were occurred during"
-                               "mv_create_source\n", err);
+                          "mv_create_source\n",
+                          err);
                free(frames);
                free(path_to_image);
                free(path_to_generated_image);
@@ -1322,7 +1218,8 @@ int perform_track_image(mv_image_tracking_model_h target)
                err = load_image_to_buffer(frames[frame_idx], &data_buffer, &buffer_size, &image_data);
                if (err != MEDIA_VISION_ERROR_NONE) {
                        printf("\nWARN: WARN(code %i) were occurred during"
-                               "load_image_to_media_source, but continue\n", err);
+                                  "load_image_to_media_source, but continue\n",
+                                  err);
 
                        if (data_buffer != NULL)
                                destroy_loaded_buffer(data_buffer);
@@ -1330,27 +1227,20 @@ int perform_track_image(mv_image_tracking_model_h target)
                        continue;
                }
 
-               err = mv_source_fill_by_buffer(source, data_buffer,
-                                               buffer_size,
-                                               image_data.image_width,
-                                               image_data.image_height,
-                                               image_data.image_colorspace);
+               err = mv_source_fill_by_buffer(source, data_buffer, buffer_size, image_data.image_width,
+                                                                          image_data.image_height, image_data.image_colorspace);
 
                if (data_buffer != NULL)
                        destroy_loaded_buffer(data_buffer);
 
                if (err != MEDIA_VISION_ERROR_NONE) {
                        printf("\nWARN: WARN(code %i) were occurred during"
-                               "mv_source_fill_by_buffer, but continue\n", err);
+                                  "mv_source_fill_by_buffer, but continue\n",
+                                  err);
                        continue;
                }
 
-               err = mv_image_track(source,
-                               cb_data.target,
-                               NULL,
-                               tracked_cb,
-                               &cb_data);
-
+               err = mv_image_track(source, cb_data.target, NULL, tracked_cb, &cb_data);
        }
 
        mv_destroy_source(source);
@@ -1373,19 +1263,17 @@ int perform_save_image_tracking_model(mv_image_tracking_model_h model)
        int err = MEDIA_VISION_ERROR_NONE;
        char *path_to_file = NULL;
 
-       while (input_string(
-                       "Input file name to be generated for image tracking model storing:",
-                       1024, &path_to_file) == -1) {
+       while (input_string("Input file name to be generated for image tracking model storing:", 1024, &path_to_file) ==
+                  -1) {
                printf("Incorrect input! Try again.\n");
        }
 
        err = mv_image_tracking_model_save(path_to_file, model);
 
        if (MEDIA_VISION_ERROR_NONE != err) {
-               printf(
-                               "\nError during saving the image tracking model. "
-                               "Error code is %i\n",
-                               err);
+               printf("\nError during saving the image tracking model. "
+                          "Error code is %i\n",
+                          err);
                free(path_to_file);
                return err;
        }
@@ -1399,15 +1287,12 @@ int perform_save_image_tracking_model(mv_image_tracking_model_h model)
        return err;
 }
 
-int perform_load_image_tracking_model(
-               char **path_to_file, mv_image_tracking_model_h *result)
+int perform_load_image_tracking_model(char **path_to_file, mv_image_tracking_model_h *result)
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
-       while (input_string(
-                       "Input file name with image tracking model to be loaded:",
-                       1024, path_to_file) == -1) {
-                       printf("Incorrect input! Try again.\n");
+       while (input_string("Input file name with image tracking model to be loaded:", 1024, path_to_file) == -1) {
+               printf("Incorrect input! Try again.\n");
        }
 
        int err = mv_image_tracking_model_load(*path_to_file, result);
@@ -1427,9 +1312,7 @@ int perform_load_image_tracking_model(
        return err;
 }
 
-int perform_clone_image_tracking_model(
-               mv_image_tracking_model_h src,
-               mv_image_tracking_model_h *result)
+int perform_clone_image_tracking_model(mv_image_tracking_model_h src, mv_image_tracking_model_h *result)
 {
        MEDIA_VISION_FUNCTION_ENTER();
 
@@ -1441,7 +1324,8 @@ int perform_clone_image_tracking_model(
                int err2 = mv_image_tracking_model_destroy(*result);
                if (MEDIA_VISION_ERROR_NONE != err2) {
                        printf("\nERROR: Errors were occurred during tracking model "
-                               "destroying; code %i\n", err);
+                                  "destroying; code %i\n",
+                                  err);
                }
 
                (*result) = NULL;
@@ -1507,8 +1391,7 @@ int select_testing_object(GArray *testing_objects, testing_object_h *result, cha
 
        show_testing_objects(title, testing_objects);
        int sel_index = 0;
-       while (input_int("Input number of element:", 0,
-                       testing_objects->len - 1, &sel_index) == -1) {
+       while (input_int("Input number of element:", 0, testing_objects->len - 1, &sel_index) == -1) {
                printf("Incorrect input! Try again.\n");
        }
        (*result) = g_array_index(testing_objects, testing_object_h, sel_index);
@@ -1525,8 +1408,7 @@ int select_testing_object_index(GArray *testing_objects, guint *result_index, ch
        show_testing_objects(title, testing_objects);
 
        int sel_index = 0;
-       while (input_int("Input number of element:", 0,
-               testing_objects->len - 1, &sel_index) == -1) {
+       while (input_int("Input number of element:", 0, testing_objects->len - 1, &sel_index) == -1) {
                printf("Incorrect input! Try again.\n");
        }
        (*result_index) = sel_index;
@@ -1553,19 +1435,18 @@ int remove_testing_object(GArray *testing_objects, guint index)
 
 void perform_recognition_cases(GArray *image_objects)
 {
-       const char *names[] = {
-                       "Show created set of image objects",
-                       "Generate new image object from source image  (mv_image_object_fill )",
-                       "Load existed image object from file          (mv_image_object_load)",
-                       "Clone existed image object                   (mv_image_object_clone)",
-                       "Create empty image object                    (mv_image_object_create)",
-                       "Save existed image object to the file        (mv_image_object_save)",
-                       "Remove image object from created set         (mv_image_object_destroy)",
-                       "Get confidence from existed image object     (mv_image_object_get_recognition_rate)",
-                       "Recognize all image objects on image         (mv_image_recognize)",
-                       "Set label for existed image object           (mv_image_set_label_of_object)",
-                       "Get label from existed image object          (mv_image_get_label_of_object)",
-                       "Back to the main menu"};
+       const char *names[] = { "Show created set of image objects",
+                                                       "Generate new image object from source image  (mv_image_object_fill )",
+                                                       "Load existed image object from file          (mv_image_object_load)",
+                                                       "Clone existed image object                   (mv_image_object_clone)",
+                                                       "Create empty image object                    (mv_image_object_create)",
+                                                       "Save existed image object to the file        (mv_image_object_save)",
+                                                       "Remove image object from created set         (mv_image_object_destroy)",
+                                                       "Get confidence from existed image object     (mv_image_object_get_recognition_rate)",
+                                                       "Recognize all image objects on image         (mv_image_recognize)",
+                                                       "Set label for existed image object           (mv_image_set_label_of_object)",
+                                                       "Get label from existed image object          (mv_image_get_label_of_object)",
+                                                       "Back to the main menu" };
 
        int number_of_options = sizeof(names) / sizeof(names[0]);
        int options[number_of_options];
@@ -1580,12 +1461,12 @@ void perform_recognition_cases(GArray *image_objects)
 
                switch (sel_opt) {
                case 1: {
-               /* Show created set of image objects */
+                       /* Show created set of image objects */
                        show_testing_objects("Set of image objects", image_objects);
                        break;
                }
                case 2: {
-               /* Generate new image object from source image (mv_image_object_fill) */
+                       /* Generate new image object from source image (mv_image_object_fill) */
                        mv_image_object_h temporary = NULL;
                        char *path_to_image = NULL;
 
@@ -1600,9 +1481,8 @@ void perform_recognition_cases(GArray *image_objects)
 
                        testing_object_h added_object = NULL;
                        testing_object_create(&added_object);
-                       testing_object_fill(added_object, temporary,
-                                       OBJECT_TYPE_IMAGE_OBJECT, SOURCE_TYPE_GENERATION,
-                                       path_to_image);
+                       testing_object_fill(added_object, temporary, OBJECT_TYPE_IMAGE_OBJECT, SOURCE_TYPE_GENERATION,
+                                                               path_to_image);
 
                        if (NULL != path_to_image)
                                free(path_to_image);
@@ -1615,8 +1495,7 @@ void perform_recognition_cases(GArray *image_objects)
                        mv_image_object_h temporary_image_object = NULL;
                        char *path_to_object = NULL;
 
-                       err = perform_load_image_object(
-                                               &path_to_object, &temporary_image_object);
+                       err = perform_load_image_object(&path_to_object, &temporary_image_object);
 
                        if (MEDIA_VISION_ERROR_NONE != err) {
                                printf("Loading failed (error code - %i)\n", err);
@@ -1625,12 +1504,8 @@ void perform_recognition_cases(GArray *image_objects)
 
                        testing_object_h added_object = NULL;
                        testing_object_create(&added_object);
-                       testing_object_fill(
-                                               added_object,
-                                               temporary_image_object,
-                                               OBJECT_TYPE_IMAGE_OBJECT,
-                                               SOURCE_TYPE_LOADING,
-                                               path_to_object);
+                       testing_object_fill(added_object, temporary_image_object, OBJECT_TYPE_IMAGE_OBJECT, SOURCE_TYPE_LOADING,
+                                                               path_to_object);
 
                        free(path_to_object);
 
@@ -1645,24 +1520,15 @@ void perform_recognition_cases(GArray *image_objects)
                        }
 
                        testing_object_h temporary_testing_object = NULL;
-                       select_testing_object(
-                                               image_objects,
-                                               &temporary_testing_object,
-                                               "Select the object you want to clone");
+                       select_testing_object(image_objects, &temporary_testing_object, "Select the object you want to clone");
 
                        mv_image_object_h temporary_image_object = NULL;
-                       perform_clone_image_object(
-                                               temporary_testing_object->entity,
-                                               &temporary_image_object);
+                       perform_clone_image_object(temporary_testing_object->entity, &temporary_image_object);
 
                        testing_object_h added_object = NULL;
                        testing_object_create(&added_object);
-                       testing_object_fill(
-                                               added_object,
-                                               temporary_image_object,
-                                               OBJECT_TYPE_IMAGE_OBJECT,
-                                               SOURCE_TYPE_CLONING,
-                                               temporary_testing_object);
+                       testing_object_fill(added_object, temporary_image_object, OBJECT_TYPE_IMAGE_OBJECT, SOURCE_TYPE_CLONING,
+                                                               temporary_testing_object);
 
                        add_testing_object(image_objects, added_object);
                        break;
@@ -1679,12 +1545,8 @@ void perform_recognition_cases(GArray *image_objects)
 
                        testing_object_h added_object = NULL;
                        testing_object_create(&added_object);
-                       testing_object_fill(
-                                               added_object,
-                                               temporary_image_object,
-                                               OBJECT_TYPE_IMAGE_OBJECT,
-                                               SOURCE_TYPE_EMPTY,
-                                               NULL);
+                       testing_object_fill(added_object, temporary_image_object, OBJECT_TYPE_IMAGE_OBJECT, SOURCE_TYPE_EMPTY,
+                                                               NULL);
 
                        add_testing_object(image_objects, added_object);
                        printf("\nImage object successfully created\n");
@@ -1698,8 +1560,7 @@ void perform_recognition_cases(GArray *image_objects)
                        }
 
                        testing_object_h temporary_testing_object = NULL;
-                       select_testing_object(image_objects, &temporary_testing_object,
-                                               "Select the object you want to save");
+                       select_testing_object(image_objects, &temporary_testing_object, "Select the object you want to save");
                        perform_save_image_object(temporary_testing_object->entity);
                        break;
                }
@@ -1711,10 +1572,8 @@ void perform_recognition_cases(GArray *image_objects)
                        }
 
                        guint selected_index;
-                       int err = select_testing_object_index(
-                                               image_objects,
-                                               &selected_index,
-                                               "Select the object you want to remove");
+                       int err =
+                                       select_testing_object_index(image_objects, &selected_index, "Select the object you want to remove");
                        if (MEDIA_VISION_ERROR_NONE == err) {
                                remove_testing_object(image_objects, selected_index);
                                printf("\nImage object successfully removed\n");
@@ -1730,7 +1589,7 @@ void perform_recognition_cases(GArray *image_objects)
 
                        testing_object_h temporary_testing_object = NULL;
                        select_testing_object(image_objects, &temporary_testing_object,
-                                               "Select the object from which you want getting confidence");
+                                                                 "Select the object from which you want getting confidence");
                        perform_get_confidence(temporary_testing_object->entity);
                        break;
                }
@@ -1763,10 +1622,10 @@ void perform_recognition_cases(GArray *image_objects)
 
                        testing_object_h temporary_testing_object = NULL;
                        select_testing_object(image_objects, &temporary_testing_object,
-                                               "Select the object for which you want setting label");
+                                                                 "Select the object for which you want setting label");
                        perform_set_label(temporary_testing_object->entity);
                        break;
-                       }
+               }
                case 11: {
                        /* Get label from existed image object (mv_image_object_get_label) */
                        if (image_objects->len <= 0) {
@@ -1776,7 +1635,7 @@ void perform_recognition_cases(GArray *image_objects)
 
                        testing_object_h temporary_testing_object = NULL;
                        select_testing_object(image_objects, &temporary_testing_object,
-                                               "Select the object from which you want getting label");
+                                                                 "Select the object from which you want getting label");
                        perform_get_label(temporary_testing_object->entity);
                        break;
                }
@@ -1792,17 +1651,16 @@ void perform_recognition_cases(GArray *image_objects)
 
 void perform_tracking_cases(GArray *image_objects, GArray *image_tracking_models)
 {
-       const char *names[] = {
-                       "Show created set of tracking models",
-                       "Create empty tracking model              (mv_image_tracking_model_create)",
-                       "Generate model based on image object     (mv_image_tracking_model_set_target)",
-                       "Load existed tracking model from file    (mv_image_tracking_model_load)",
-                       "Clone existed tracking model             (mv_image_tracking_model_clone)",
-                       "Save existed tracking model to the file  (mv_image_tracking_model_save)",
-                       "Remove tracking model from created set   (mv_image_tracking_model_destroy)",
-                       "Refresh tracking model                   (mv_image_tracking_model_refresh)",
-                       "Track                                    (mv_image_track)",
-                       "Back to the main menu"};
+       const char *names[] = { "Show created set of tracking models",
+                                                       "Create empty tracking model              (mv_image_tracking_model_create)",
+                                                       "Generate model based on image object     (mv_image_tracking_model_set_target)",
+                                                       "Load existed tracking model from file    (mv_image_tracking_model_load)",
+                                                       "Clone existed tracking model             (mv_image_tracking_model_clone)",
+                                                       "Save existed tracking model to the file  (mv_image_tracking_model_save)",
+                                                       "Remove tracking model from created set   (mv_image_tracking_model_destroy)",
+                                                       "Refresh tracking model                   (mv_image_tracking_model_refresh)",
+                                                       "Track                                    (mv_image_track)",
+                                                       "Back to the main menu" };
 
        int number_of_options = sizeof(names) / sizeof(names[0]);
        int options[number_of_options];
@@ -1834,12 +1692,8 @@ void perform_tracking_cases(GArray *image_objects, GArray *image_tracking_models
 
                        testing_object_h added_object = NULL;
                        testing_object_create(&added_object);
-                       testing_object_fill(
-                                       added_object,
-                                       temporary_image_tracking_model,
-                                       OBJECT_TYPE_IMAGE_TRACKING_MODEL,
-                                       SOURCE_TYPE_EMPTY,
-                                       NULL);
+                       testing_object_fill(added_object, temporary_image_tracking_model, OBJECT_TYPE_IMAGE_TRACKING_MODEL,
+                                                               SOURCE_TYPE_EMPTY, NULL);
 
                        add_testing_object(image_tracking_models, added_object);
                        printf("\nTracking model successfully created\n");
@@ -1860,32 +1714,24 @@ void perform_tracking_cases(GArray *image_objects, GArray *image_tracking_models
                        }
 
                        testing_object_h temporary_testing_object = NULL;
-                       select_testing_object(
-                                       image_objects,
-                                       &temporary_testing_object,
-                                       "Select the image object for tracking");
-
-                       err = mv_image_tracking_model_set_target(
-                                       (mv_image_object_h)(temporary_testing_object->entity),
-                                       temporary_image_tracking_model);
+                       select_testing_object(image_objects, &temporary_testing_object, "Select the image object for tracking");
+
+                       err = mv_image_tracking_model_set_target((mv_image_object_h)(temporary_testing_object->entity),
+                                                                                                        temporary_image_tracking_model);
                        if (MEDIA_VISION_ERROR_NONE != err) {
                                printf("Error: target isn't set with error code %i\n", err);
-                               int err2 = mv_image_tracking_model_destroy(
-                                               temporary_image_tracking_model);
+                               int err2 = mv_image_tracking_model_destroy(temporary_image_tracking_model);
                                if (MEDIA_VISION_ERROR_NONE != err2)
                                        printf("Error: tracking model destroying return"
-                                                       "error code %i\n", err);
+                                                  "error code %i\n",
+                                                  err);
                                break;
                        }
 
                        testing_object_h added_object = NULL;
                        testing_object_create(&added_object);
-                       testing_object_fill(
-                                       added_object,
-                                       temporary_image_tracking_model,
-                                       OBJECT_TYPE_IMAGE_TRACKING_MODEL,
-                                       SOURCE_TYPE_GENERATION,
-                                       temporary_testing_object);
+                       testing_object_fill(added_object, temporary_image_tracking_model, OBJECT_TYPE_IMAGE_TRACKING_MODEL,
+                                                               SOURCE_TYPE_GENERATION, temporary_testing_object);
 
                        add_testing_object(image_tracking_models, added_object);
                        printf("\nTracking model successfully generated\n");
@@ -1896,8 +1742,7 @@ void perform_tracking_cases(GArray *image_objects, GArray *image_tracking_models
                        mv_image_tracking_model_h temporary_image_tracking_model = NULL;
                        char *path_to_object = NULL;
 
-                       err = perform_load_image_tracking_model(
-                                       &path_to_object, &temporary_image_tracking_model);
+                       err = perform_load_image_tracking_model(&path_to_object, &temporary_image_tracking_model);
 
                        if (MEDIA_VISION_ERROR_NONE != err) {
                                printf("Loading failed (error code - %i)\n", err);
@@ -1906,12 +1751,8 @@ void perform_tracking_cases(GArray *image_objects, GArray *image_tracking_models
 
                        testing_object_h added_object = NULL;
                        testing_object_create(&added_object);
-                       testing_object_fill(
-                                       added_object,
-                                       temporary_image_tracking_model,
-                                       OBJECT_TYPE_IMAGE_TRACKING_MODEL,
-                                       SOURCE_TYPE_LOADING,
-                                       path_to_object);
+                       testing_object_fill(added_object, temporary_image_tracking_model, OBJECT_TYPE_IMAGE_TRACKING_MODEL,
+                                                               SOURCE_TYPE_LOADING, path_to_object);
 
                        free(path_to_object);
 
@@ -1922,31 +1763,22 @@ void perform_tracking_cases(GArray *image_objects, GArray *image_tracking_models
                case 5: {
                        /* Clone existed tracking model (mv_image_tracking_model_clone) */
                        if (image_tracking_models->len <= 0) {
-                               printf(
-                                       "\nFirstly you must create at least one image "
-                                       "tracking model.\n");
+                               printf("\nFirstly you must create at least one image "
+                                          "tracking model.\n");
                                break;
                        }
 
                        testing_object_h temporary_testing_object = NULL;
-                       select_testing_object(
-                                       image_tracking_models,
-                                       &temporary_testing_object,
-                                       "Select the tracking model you want to clone");
+                       select_testing_object(image_tracking_models, &temporary_testing_object,
+                                                                 "Select the tracking model you want to clone");
 
                        mv_image_tracking_model_h temporary_image_tracking_model = NULL;
-                       perform_clone_image_tracking_model(
-                                       temporary_testing_object->entity,
-                                       &temporary_image_tracking_model);
+                       perform_clone_image_tracking_model(temporary_testing_object->entity, &temporary_image_tracking_model);
 
                        testing_object_h added_object = NULL;
                        testing_object_create(&added_object);
-                       testing_object_fill(
-                                       added_object,
-                                       temporary_image_tracking_model,
-                                       OBJECT_TYPE_IMAGE_TRACKING_MODEL,
-                                       SOURCE_TYPE_CLONING,
-                                       temporary_testing_object);
+                       testing_object_fill(added_object, temporary_image_tracking_model, OBJECT_TYPE_IMAGE_TRACKING_MODEL,
+                                                               SOURCE_TYPE_CLONING, temporary_testing_object);
 
                        add_testing_object(image_tracking_models, added_object);
                        break;
@@ -1954,17 +1786,14 @@ void perform_tracking_cases(GArray *image_objects, GArray *image_tracking_models
                case 6: {
                        /* Save existed tracking model to the file (mv_image_tracking_model_save) */
                        if (image_tracking_models->len <= 0) {
-                               printf(
-                                               "\nFirstly you must create at least one image "
-                                               "tracking model.\n");
-                                               break;
+                               printf("\nFirstly you must create at least one image "
+                                          "tracking model.\n");
+                               break;
                        }
 
                        testing_object_h temporary_testing_object = NULL;
-                       select_testing_object(
-                                       image_tracking_models,
-                                       &temporary_testing_object,
-                                       "Select the tracking model you want to save");
+                       select_testing_object(image_tracking_models, &temporary_testing_object,
+                                                                 "Select the tracking model you want to save");
 
                        perform_save_image_tracking_model(temporary_testing_object->entity);
                        break;
@@ -1972,17 +1801,14 @@ void perform_tracking_cases(GArray *image_objects, GArray *image_tracking_models
                case 7: {
                        /* Remove tracking model from created set (mv_image_tracking_model_destroy) */
                        if (image_tracking_models->len <= 0) {
-                               printf(
-                                               "\nFirstly you must create at least one image "
-                                               "tracking model.\n");
+                               printf("\nFirstly you must create at least one image "
+                                          "tracking model.\n");
                                break;
                        }
 
                        guint selected_index;
-                       err = select_testing_object_index(
-                                       image_tracking_models,
-                                       &selected_index,
-                                       "Select the object you want to remove");
+                       err = select_testing_object_index(image_tracking_models, &selected_index,
+                                                                                         "Select the object you want to remove");
 
                        if (MEDIA_VISION_ERROR_NONE == err) {
                                remove_testing_object(image_tracking_models, selected_index);
@@ -1993,17 +1819,14 @@ void perform_tracking_cases(GArray *image_objects, GArray *image_tracking_models
                case 8: {
                        /* Refresh tracking model (mv_image_tracking_model_refresh) */
                        if (image_tracking_models->len <= 0) {
-                               printf(
-                                               "\nFirstly you must create at least one image "
-                                               "tracking model.\n");
-                                               break;
+                               printf("\nFirstly you must create at least one image "
+                                          "tracking model.\n");
+                               break;
                        }
 
                        testing_object_h temporary_testing_object = NULL;
-                       select_testing_object(
-                                       image_tracking_models,
-                                       &temporary_testing_object,
-                                       "Select the tracking model you want to refresh");
+                       select_testing_object(image_tracking_models, &temporary_testing_object,
+                                                                 "Select the tracking model you want to refresh");
 
                        perform_refresh_image_tracking_model(temporary_testing_object->entity);
                        break;
@@ -2011,46 +1834,43 @@ void perform_tracking_cases(GArray *image_objects, GArray *image_tracking_models
                case 9: {
                        /* Track (mv_image_track) */
                        if (image_tracking_models->len <= 0) {
-                               printf(
-                                               "\nFirstly you must create at least one image "
-                                               "tracking model.\n");
+                               printf("\nFirstly you must create at least one image "
+                                          "tracking model.\n");
                                break;
                        }
 
                        testing_object_h temporary_testing_object = NULL;
-                       err = select_testing_object(
-                                       image_tracking_models,
-                                       &temporary_testing_object,
-                                       "Select the object which you want to track");
+                       err = select_testing_object(image_tracking_models, &temporary_testing_object,
+                                                                               "Select the object which you want to track");
 
                        if (MEDIA_VISION_ERROR_NONE != err) {
                                printf("\nERROR: Errors(code %i) were occurred during "
-                                       "select_testing_object\n", err);
+                                          "select_testing_object\n",
+                                          err);
                                break;
                        }
 
                        int track_opt = 0;
-                       const int track_options[2] = {1, 2};
-                       const char *track_opt_names[2] = {"Track with the video file",
-                                               "Track with the image files"};
+                       const int track_options[2] = { 1, 2 };
+                       const char *track_opt_names[2] = { "Track with the video file", "Track with the image files" };
                        while (!track_opt) {
                                track_opt = show_menu("Select actions:", track_options, track_opt_names, 2);
 
                                switch (track_opt) {
                                case 1:
-                                               perform_track_video(temporary_testing_object->entity);
-                                               break;
+                                       perform_track_video(temporary_testing_object->entity);
+                                       break;
                                case 2:
-                                               perform_track_image(temporary_testing_object->entity);
-                                               break;
+                                       perform_track_image(temporary_testing_object->entity);
+                                       break;
                                default:
-                                               track_opt = 0;
-                                               break;
+                                       track_opt = 0;
+                                       break;
                                }
                        }
                        break;
                }
-                case 10: {
+               case 10: {
                        /* Back to the main menu */
                        return;
                }
@@ -2076,10 +1896,7 @@ int main(void)
        }
 
        const int options[3] = { 1, 2, 3 };
-       const char *names[3] = {
-                       "Recognition cases",
-                       "Tracking cases",
-                       "Exit" };
+       const char *names[3] = { "Recognition cases", "Tracking cases", "Exit" };
 
        while (1) {
                char exit = 'n';
@@ -2109,8 +1926,7 @@ int main(void)
                        const char *names_last[2] = { "No", "Yes" };
 
                        while (sel_opt == 0) {
-                               sel_opt = show_menu("Are you sure?",
-                                                                               options_last, names_last, 2);
+                               sel_opt = show_menu("Are you sure?", options_last, names_last, 2);
                                switch (sel_opt) {
                                case 1:
                                        exit = 'n';
@@ -2132,17 +1948,13 @@ int main(void)
 
        guint i = 0;
        for (i = 0; i < image_objects->len; ++i) {
-               testing_object_h temp = g_array_index(
-                               image_objects,
-                               testing_object_h, i);
+               testing_object_h temp = g_array_index(image_objects, testing_object_h, i);
                testing_object_destroy(&temp);
        }
        g_array_free(image_objects, TRUE);
 
        for (i = 0; i < image_tracking_models->len; ++i) {
-               testing_object_h temp = g_array_index(
-                               image_tracking_models,
-                               testing_object_h, i);
+               testing_object_h temp = g_array_index(image_tracking_models, testing_object_h, i);
                testing_object_destroy(&temp);
        }
        g_array_free(image_tracking_models, TRUE);
index 80b4e40..9311728 100644 (file)
 #include "ImageHelper.h"
 #include "mv_face_recognition.h"
 
-#define TRAIN_LIST_FILE        "/home/owner/media/res/face_recognition/res/measurement/train_list.txt"
-#define TEST_LIST_FILE "/home/owner/media/res/face_recognition/res/measurement/test_list.txt"
-#define TRAINING_IMAGE_PATH    "/home/owner/media/res/face_recognition/res/measurement/train/"
-#define TEST_IMAGE_PATH        "/home/owner/media/res/face_recognition/res/measurement/test/"
-#define MAX_TRAINING_CLASS                     20
-#define SHOT_PER_CLASS                         5
+#define TRAIN_LIST_FILE "/home/owner/media/res/face_recognition/res/measurement/train_list.txt"
+#define TEST_LIST_FILE "/home/owner/media/res/face_recognition/res/measurement/test_list.txt"
+#define TRAINING_IMAGE_PATH "/home/owner/media/res/face_recognition/res/measurement/train/"
+#define TEST_IMAGE_PATH "/home/owner/media/res/face_recognition/res/measurement/test/"
+#define MAX_TRAINING_CLASS 20
+#define SHOT_PER_CLASS 5
 
 using namespace testing;
 using namespace std;
@@ -39,7 +39,6 @@ using namespace MediaVision::Common;
 
 TEST(FaceRecognitionAccuracy, Measure)
 {
-
        ifstream train_file(TRAIN_LIST_FILE, ios::in | ios::binary);
 
        if (!train_file.is_open()) {
@@ -59,15 +58,15 @@ TEST(FaceRecognitionAccuracy, Measure)
 
        vector<string> cached_label;
 
-
        while (!train_file.eof()) {
                string filename, label, index;
 
-               train_file >> filename  >> label >> index;
+               train_file >> filename >> label >> index;
 
                const string image_path = string(TRAINING_IMAGE_PATH) + filename;
 
-               cout << "training " << image_path << " file" << " with " << label << " train cnt = " << train_cnt << endl;
+               cout << "training " << image_path << " file"
+                        << " with " << label << " train cnt = " << train_cnt << endl;
 
                mv_source_h mv_source = NULL;
 
@@ -112,14 +111,15 @@ TEST(FaceRecognitionAccuracy, Measure)
        while (!test_file.eof()) {
                string filename, label, index;
 
-               test_file >> filename  >> label >> index;
+               test_file >> filename >> label >> index;
 
                if (filename.empty() || label.empty() || index.empty())
                        break;
 
                string image_path = string(TEST_IMAGE_PATH) + filename;
 
-               cout << "inferencing " << image_path << " file" << " with " << label << " index = " << test_cnt++ << endl;
+               cout << "inferencing " << image_path << " file"
+                        << " with " << label << " index = " << test_cnt++ << endl;
 
                mv_source_h mv_source = NULL;
 
@@ -134,7 +134,7 @@ TEST(FaceRecognitionAccuracy, Measure)
                bool is_no_data = (ret == MEDIA_VISION_ERROR_NO_DATA);
                const char *out_label = NULL;
 
-               ret  = mv_face_recognition_get_label(handle, &out_label);
+               ret = mv_face_recognition_get_label(handle, &out_label);
                ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
 
                auto it = find(cached_label.begin(), cached_label.end(), label);
index f1be72d..37a278b 100644 (file)
 #include "ImageHelper.h"
 #include "mv_face_recognition.h"
 
-#define TRAINING_IMAGE_PATH    "/home/owner/media/res/face_recognition/res/test/training/"
-#define TEST_IMAGE_PATH                "/home/owner/media/res/face_recognition/res/test/test/"
-#define LABEL_FILE_PATH                "/home/owner/media/res/face_recognition/training/labels.dat"
-#define MODEL_FILE_PATH                "/home/owner/media/res/face_recognition/training/model_and_weights.ini"
-#define FV_FILE_PATH           "/home/owner/media/res/face_recognition/training/feature_vector_file.dat"
+#define TRAINING_IMAGE_PATH "/home/owner/media/res/face_recognition/res/test/training/"
+#define TEST_IMAGE_PATH "/home/owner/media/res/face_recognition/res/test/test/"
+#define LABEL_FILE_PATH "/home/owner/media/res/face_recognition/training/labels.dat"
+#define MODEL_FILE_PATH "/home/owner/media/res/face_recognition/training/model_and_weights.ini"
+#define FV_FILE_PATH "/home/owner/media/res/face_recognition/training/feature_vector_file.dat"
 
 using namespace testing;
 using namespace std;
 
 static const map<string, string> training_images = {
-       { "037830.png", "2929" },
-       { "038965.png", "2929" },
-       { "045978.png", "2929" },
-       { "050501.png", "2929" },
-       { "065899.png", "2929" },
-       { "010348.png", "7779" },
-       { "029342.png", "7779" },
-       { "035939.png", "7779" },
-       { "061310.png", "7779" },
-       { "062261.png", "7779" },
-       { "000928.png", "3448" },
-       { "008922.png", "3448" },
-       { "029633.png", "3448" },
-       { "032962.png", "3448" },
-       { "054616.png", "3448" }
+       { "037830.png", "2929" }, { "038965.png", "2929" }, { "045978.png", "2929" }, { "050501.png", "2929" },
+       { "065899.png", "2929" }, { "010348.png", "7779" }, { "029342.png", "7779" }, { "035939.png", "7779" },
+       { "061310.png", "7779" }, { "062261.png", "7779" }, { "000928.png", "3448" }, { "008922.png", "3448" },
+       { "029633.png", "3448" }, { "032962.png", "3448" }, { "054616.png", "3448" }
 };
 
 static const map<string, string> test_images = {
-       { "068468.png", "2929" },
-       { "068883.png", "2929" },
-       { "075004.png", "2929" },
-       { "078125.png", "2929" },
-       { "080649.png", "2929" },
-       { "074645.png", "7779" },
-       { "086536.png", "7779" },
-       { "089334.png", "7779" },
-       { "096514.png", "7779" },
-       { "100336.png", "7779" },
-       { "054757.png", "3448" },
-       { "064838.png", "3448" },
-       { "072749.png", "3448" },
-       { "073526.png", "3448" },
-       { "080451.png", "3448" }
+       { "068468.png", "2929" }, { "068883.png", "2929" }, { "075004.png", "2929" }, { "078125.png", "2929" },
+       { "080649.png", "2929" }, { "074645.png", "7779" }, { "086536.png", "7779" }, { "089334.png", "7779" },
+       { "096514.png", "7779" }, { "100336.png", "7779" }, { "054757.png", "3448" }, { "064838.png", "3448" },
+       { "072749.png", "3448" }, { "073526.png", "3448" }, { "080451.png", "3448" }
 };
 
 using namespace MediaVision::Common;
@@ -91,11 +69,8 @@ TEST(FaceRecognitionTest, CreateAndDestroyShouldBeOk)
 TEST(FaceRecognitionTest, InferenceAfterTrainingShouldBeOk)
 {
        mv_face_recognition_h handle;
-       vector<string> answers = {
-               "3448", "3448", "2929", "2929", "3448",
-               "3448", "7779", "2929", "2929", "3448",
-               "2929", "7779", "7779", "7779", "7779"
-       };
+       vector<string> answers = { "3448", "3448", "2929", "2929", "3448", "3448", "7779", "2929",
+                                                          "2929", "3448", "2929", "7779", "7779", "7779", "7779" };
 
        int ret = mv_face_recognition_create(&handle);
        ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
@@ -103,7 +78,7 @@ TEST(FaceRecognitionTest, InferenceAfterTrainingShouldBeOk)
        ret = mv_face_recognition_prepare(handle);
        ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
 
-       for (autoimage : training_images) {
+       for (auto &image : training_images) {
                const string image_path = string(TRAINING_IMAGE_PATH) + image.first;
                mv_source_h mv_source = NULL;
 
@@ -123,7 +98,7 @@ TEST(FaceRecognitionTest, InferenceAfterTrainingShouldBeOk)
        unsigned int image_idx = 0;
        unsigned int correct_cnt = 0;
 
-       for (autoimage : test_images) {
+       for (auto &image : test_images) {
                const string image_path = string(TEST_IMAGE_PATH) + image.first;
                mv_source_h mv_source = NULL;
 
@@ -143,7 +118,7 @@ TEST(FaceRecognitionTest, InferenceAfterTrainingShouldBeOk)
 
                const char *out_label = NULL;
 
-               ret  = mv_face_recognition_get_label(handle, &out_label);
+               ret = mv_face_recognition_get_label(handle, &out_label);
                ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
 
                string label_str(out_label);
@@ -170,7 +145,7 @@ TEST(FaceRecognitionTest, InferenceWithoutLabelShouldBeOk)
        ret = mv_face_recognition_prepare(handle);
        ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
 
-       for (autoimage : test_images) {
+       for (auto &image : test_images) {
                const string image_path = string(TEST_IMAGE_PATH) + image.first;
                mv_source_h mv_source = NULL;
 
@@ -188,7 +163,7 @@ TEST(FaceRecognitionTest, InferenceWithoutLabelShouldBeOk)
 
                const char *out_label = NULL;
 
-               ret  = mv_face_recognition_get_label(handle, &out_label);
+               ret = mv_face_recognition_get_label(handle, &out_label);
                ASSERT_NE(ret, MEDIA_VISION_ERROR_NONE);
        }
 
@@ -199,17 +174,12 @@ TEST(FaceRecognitionTest, InferenceWithoutLabelShouldBeOk)
 TEST(FaceRecognitionTest, LabelRemovalShouldBeOk)
 {
        vector<string> labels = { "3448", "2929", "7779" };
-       vector<vector<string>> answers = {
-                       { "none", "none", "2929", "2929", "none",
-                       "none", "7779", "2929", "2929", "none",
-                       "2929", "7779", "7779", "7779", "7779" },
-                       { "3448", "3448", "none", "none", "3448",
-                       "3448", "7779", "none", "none", "3448",
-                       "none", "7779", "7779", "7779", "7779" },
-                       { "3448", "3448", "2929", "2929", "3448",
-                       "3448", "none", "2929", "2929", "3448",
-                       "2929", "none", "none", "none", "none" }
-               };
+       vector<vector<string> > answers = { { "none", "none", "2929", "2929", "none", "none", "7779", "2929", "2929",
+                                                                                 "none", "2929", "7779", "7779", "7779", "7779" },
+                                                                               { "3448", "3448", "none", "none", "3448", "3448", "7779", "none", "none",
+                                                                                 "3448", "none", "7779", "7779", "7779", "7779" },
+                                                                               { "3448", "3448", "2929", "2929", "3448", "3448", "none", "2929", "2929",
+                                                                                 "3448", "2929", "none", "none", "none", "none" } };
 
        unsigned int label_idx = 0;
        mv_face_recognition_h handle;
@@ -217,13 +187,13 @@ TEST(FaceRecognitionTest, LabelRemovalShouldBeOk)
        int ret = mv_face_recognition_create(&handle);
        ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
 
-       for (autolabel : labels) {
+       for (auto &label : labels) {
                ret = mv_face_recognition_prepare(handle);
                ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
 
-               autoanswer = answers[label_idx++];
+               auto &answer = answers[label_idx++];
 
-               for (autoimage : training_images) {
+               for (auto &image : training_images) {
                        const string image_path = string(TRAINING_IMAGE_PATH) + image.first;
                        mv_source_h mv_source = NULL;
                        ret = mv_create_source(&mv_source);
@@ -247,7 +217,7 @@ TEST(FaceRecognitionTest, LabelRemovalShouldBeOk)
                unsigned int image_idx = 0;
                unsigned int correct_cnt = 0;
 
-               for (autoimage : test_images) {
+               for (auto &image : test_images) {
                        const string image_path = string(TEST_IMAGE_PATH) + image.first;
                        mv_source_h mv_source = NULL;
 
@@ -305,7 +275,7 @@ TEST(FaceRecognitionTest, RemoveAllLabelsShouldBeOk)
        ret = mv_face_recognition_prepare(handle);
        ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
 
-       for (autoimage : training_images) {
+       for (auto &image : training_images) {
                const string image_path = string(TRAINING_IMAGE_PATH) + image.first;
                mv_source_h mv_source = NULL;
                ret = mv_create_source(&mv_source);
@@ -321,7 +291,7 @@ TEST(FaceRecognitionTest, RemoveAllLabelsShouldBeOk)
                ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
        }
 
-       for (autolabel : labels) {
+       for (auto &label : labels) {
                ret = mv_face_recognition_unregister(handle, label.c_str());
                ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
        }
index 135081b..8220ee7 100644 (file)
 #define FILE_PATH_SIZE 1024
 
 //Image Classification
-#define IC_LABEL_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_label.txt"
-#define IC_TFLITE_WEIGHT_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite"
-#define IC_Q_LABEL_PATH \
-       "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_label.txt"
-#define IC_Q_TFLITE_WEIGHT_PATH \
-       "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_model.tflite"
+#define IC_LABEL_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_label.txt"
+#define IC_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite"
+#define IC_Q_LABEL_PATH "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_label.txt"
+#define IC_Q_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_model.tflite"
 
 /*
  * Hosted models
  */
-#define IC_LABEL_MOBILENET_V1_224_PATH\
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_mobilenet_v1_label.txt"
+#define IC_LABEL_MOBILENET_V1_224_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_mobilenet_v1_label.txt"
 #define IC_TFLITE_WEIGHT_MOBILENET_V1_224_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_mobilenet_v1_224x224.tflite"
+       "/usr/share/capi-media-vision/models/IC/tflite/ic_mobilenet_v1_224x224.tflite"
 #define IC_TFLITE_META_MOBILENET_V1_224_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_mobilenet_v1_224x224.json"
-#define IC_LABEL_MOBILENET_V2_224_PATH\
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_mobilenet_v2_label.txt"
+       "/usr/share/capi-media-vision/models/IC/tflite/ic_mobilenet_v1_224x224.json"
+#define IC_LABEL_MOBILENET_V2_224_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_mobilenet_v2_label.txt"
 #define IC_TFLITE_WEIGHT_MOBILENET_V2_224_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_mobilenet_v2_224x224.tflite"
+       "/usr/share/capi-media-vision/models/IC/tflite/ic_mobilenet_v2_224x224.tflite"
 #define IC_TFLITE_META_MOBILENET_V2_224_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_mobilenet_v2_224x224.json"
-#define IC_LABEL_DENSENET_224_PATH\
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_densenet_label.txt"
-#define IC_TFLITE_WEIGHT_DENSENET_224_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_densenet_224x224.tflite"
-#define IC_TFLITE_META_DENSENET_224_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_densenet_224x224.json"
-#define IC_LABEL_INCEPTION_RESENET_299_PATH\
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_densenet_label.txt"
+       "/usr/share/capi-media-vision/models/IC/tflite/ic_mobilenet_v2_224x224.json"
+#define IC_LABEL_DENSENET_224_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_densenet_label.txt"
+#define IC_TFLITE_WEIGHT_DENSENET_224_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_densenet_224x224.tflite"
+#define IC_TFLITE_META_DENSENET_224_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_densenet_224x224.json"
+#define IC_LABEL_INCEPTION_RESENET_299_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_densenet_label.txt"
 #define IC_TFLITE_WEIGHT_INCEPTION_RESENET_299_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_inception_resnet_v2_299x299.tflite"
-#define IC_LABEL_INCEPTION_V3_299_PATH\
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_inception_v3_label.txt"
+       "/usr/share/capi-media-vision/models/IC/tflite/ic_inception_resnet_v2_299x299.tflite"
+#define IC_LABEL_INCEPTION_V3_299_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_inception_v3_label.txt"
 #define IC_TFLITE_WEIGHT_INCEPTION_V3_299_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_inception_v3_299x299.tflite"
-#define IC_LABEL_INCEPTION_V4_299_PATH\
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_inception_v4_label.txt"
+       "/usr/share/capi-media-vision/models/IC/tflite/ic_inception_v3_299x299.tflite"
+#define IC_LABEL_INCEPTION_V4_299_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_inception_v4_label.txt"
 #define IC_TFLITE_WEIGHT_INCEPTION_V4_299_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_inception_v4_299x299.tflite"
-#define IC_LABEL_NASNET_224_PATH\
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_nasnet_label.txt"
-#define IC_TFLITE_WEIGHT_NASNET_224_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_nasnet_224x224.tflite"
-#define IC_TFLITE_META_NASNET_224_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_nasnet_224x224.json"
-#define IC_LABEL_MNASNET_224_PATH\
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_mnasnet_label.txt"
-#define IC_TFLITE_WEIGHT_MNASNET_224_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_mnasnet_224x224.tflite"
-#define IC_TFLITE_META_MNASNET_224_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_mnasnet_224x224.json"
-#define IC_LABEL_RESNET_V2_299_PATH\
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_resnet_v2_label.txt"
-#define IC_TFLITE_WEIGHT_RESNET_V2_299_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_resnet_v2_299x299.tflite"
-#define IC_LABEL_SQUEEZENET_224_PATH\
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_squeezenet_label.txt"
+       "/usr/share/capi-media-vision/models/IC/tflite/ic_inception_v4_299x299.tflite"
+#define IC_LABEL_NASNET_224_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_nasnet_label.txt"
+#define IC_TFLITE_WEIGHT_NASNET_224_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_nasnet_224x224.tflite"
+#define IC_TFLITE_META_NASNET_224_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_nasnet_224x224.json"
+#define IC_LABEL_MNASNET_224_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_mnasnet_label.txt"
+#define IC_TFLITE_WEIGHT_MNASNET_224_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_mnasnet_224x224.tflite"
+#define IC_TFLITE_META_MNASNET_224_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_mnasnet_224x224.json"
+#define IC_LABEL_RESNET_V2_299_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_resnet_v2_label.txt"
+#define IC_TFLITE_WEIGHT_RESNET_V2_299_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_resnet_v2_299x299.tflite"
+#define IC_LABEL_SQUEEZENET_224_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_squeezenet_label.txt"
 #define IC_TFLITE_WEIGHT_SQUEEZENET_224_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_squeezenet_224x224.tflite"
-#define IC_TFLITE_META_SQUEEZENET_224_PATH \
-       "/usr/share/capi-media-vision/models/IC/tflite/ic_squeezenet_224x224.json"
-
-#define IC_VIVANTE_LABEL_PATH \
-       "/usr/share/capi-media-vision/models/IC/vivante/ic_label.txt"
-#define IC_VIVANTE_WEIGHT_PATH \
-       "/usr/share/capi-media-vision/models/IC/vivante/ic_vivante_model.nb"
-#define IC_VIVANTE_CONFIG_PATH \
-       "/usr/share/capi-media-vision/models/IC/vivante/ic_vivante_model.so"
-
-#define IC_OPENCV_LABEL_CAFFE_PATH \
-       "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_label_squeezenet.txt"
-#define IC_OPENCV_WEIGHT_CAFFE_PATH \
-       "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel"
-#define IC_OPENCV_CONFIG_CAFFE_PATH \
-       "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt"
+       "/usr/share/capi-media-vision/models/IC/tflite/ic_squeezenet_224x224.tflite"
+#define IC_TFLITE_META_SQUEEZENET_224_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_squeezenet_224x224.json"
+
+#define IC_VIVANTE_LABEL_PATH "/usr/share/capi-media-vision/models/IC/vivante/ic_label.txt"
+#define IC_VIVANTE_WEIGHT_PATH "/usr/share/capi-media-vision/models/IC/vivante/ic_vivante_model.nb"
+#define IC_VIVANTE_CONFIG_PATH "/usr/share/capi-media-vision/models/IC/vivante/ic_vivante_model.so"
+
+#define IC_OPENCV_LABEL_CAFFE_PATH "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_label_squeezenet.txt"
+#define IC_OPENCV_WEIGHT_CAFFE_PATH "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel"
+#define IC_OPENCV_CONFIG_CAFFE_PATH "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt"
 
 //Object Detection
-#define OD_LABEL_PATH \
-       "/usr/share/capi-media-vision/models/OD/tflite/od_label.txt"
-#define OD_TFLITE_WEIGHT_PATH \
-       "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite"
+#define OD_LABEL_PATH "/usr/share/capi-media-vision/models/OD/tflite/od_label.txt"
+#define OD_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite"
 
-#define OD_OPENCV_LABEL_CAFFE_PATH \
-       "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_label_mobilenetv1ssd.txt"
+#define OD_OPENCV_LABEL_CAFFE_PATH "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_label_mobilenetv1ssd.txt"
 #define OD_OPENCV_WEIGHT_CAFFE_PATH \
        "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel"
 #define OD_OPENCV_CONFIG_CAFFE_PATH \
        "/usr/share/capi-media-vision/models/OD/tflite/od_mobilenet_v1_ssd_postop_300x300.tflite"
 #define OD_TFLITE_META_MOBILENET_V1_SSD_300_PATH \
        "/usr/share/capi-media-vision/models/OD/tflite/od_mobilenet_v1_ssd_postop_300x300.json"
-#define OD_LABEL_MOBILENET_V2_SSD_320_PATH \
-       "/usr/share/capi-media-vision/models/OD/tflite/od_mobilenet_v2_ssd_label.txt"
+#define OD_LABEL_MOBILENET_V2_SSD_320_PATH "/usr/share/capi-media-vision/models/OD/tflite/od_mobilenet_v2_ssd_label.txt"
 #define OD_TFLITE_WEIGHT_MOBILENET_V2_SSD_320_PATH \
        "/usr/share/capi-media-vision/models/OD/tflite/od_mobilenet_v2_ssd_320x320.tflite"
 #define OD_TFLITE_META_MOBILENET_V2_SSD_320_PATH \
        "/usr/share/capi-media-vision/models/OD/tflite/od_mobilenet_v2_ssd_320x320.json"
-#define OD_LABEL_QUANT_EFFICIENT_PATH \
-       "/usr/share/capi-media-vision/models/OD/tflite/od_efficientdet_label.txt"
-#define OD_TFLITE_WEIGHT_QUANT_EFFICIENT_PATH \
-       "/usr/share/capi-media-vision/models/OD/tflite/od_efficientdet.tflite"
-#define OD_TFLITE_META_QUANT_EFFICIENT_PATH \
-       "/usr/share/capi-media-vision/models/OD/tflite/od_efficientdet.json"
-#define OD_TFLITE_WEIGHT_YOLO_V5_320_PATH \
-       "/usr/share/capi-media-vision/models/OD/tflite/od_yolo_v5_320x320.tflite"
-#define OD_TFLITE_META_YOLO_V5_320_PATH \
-       "/usr/share/capi-media-vision/models/OD/tflite/od_yolo_v5_320x320.json"
-#define OD_LABLE_YOLO_V5_320_PATH \
-       "/usr/share/capi-media-vision/models/OD/tflite/od_yolo_v5_label.txt"
+#define OD_LABEL_QUANT_EFFICIENT_PATH "/usr/share/capi-media-vision/models/OD/tflite/od_efficientdet_label.txt"
+#define OD_TFLITE_WEIGHT_QUANT_EFFICIENT_PATH "/usr/share/capi-media-vision/models/OD/tflite/od_efficientdet.tflite"
+#define OD_TFLITE_META_QUANT_EFFICIENT_PATH "/usr/share/capi-media-vision/models/OD/tflite/od_efficientdet.json"
+#define OD_TFLITE_WEIGHT_YOLO_V5_320_PATH "/usr/share/capi-media-vision/models/OD/tflite/od_yolo_v5_320x320.tflite"
+#define OD_TFLITE_META_YOLO_V5_320_PATH "/usr/share/capi-media-vision/models/OD/tflite/od_yolo_v5_320x320.json"
+#define OD_LABLE_YOLO_V5_320_PATH "/usr/share/capi-media-vision/models/OD/tflite/od_yolo_v5_label.txt"
 
 //Face Detection
-#define FD_TFLITE_WEIGHT_PATH \
-       "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite"
+#define FD_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite"
 
-#define FD_OPENCV_WEIGHT_CAFFE_PATH \
-       "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel"
-#define FD_OPENCV_CONFIG_CAFFE_PATH \
-       "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt"
+#define FD_OPENCV_WEIGHT_CAFFE_PATH "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel"
+#define FD_OPENCV_CONFIG_CAFFE_PATH "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt"
 
 /*
  * Hosted models
        "/usr/share/capi-media-vision/models/FD/tflite/fd_mobilenet_v1_ssd_postop_300x300.tflite"
 #define FD_TFLITE_META_MOBILENET_V1_SSD_300_PATH \
        "/usr/share/capi-media-vision/models/FD/tflite/fd_mobilenet_v1_ssd_postop_300x300.json"
-#define FD_LABEL_BLAZEFACE_128_PATH \
-       "/usr/share/capi-media-vision/models/FD/tflite/fd_blazeface_front_label.txt"
+#define FD_LABEL_BLAZEFACE_128_PATH "/usr/share/capi-media-vision/models/FD/tflite/fd_blazeface_front_label.txt"
 #define FD_TFLITE_WEIGHT_BLAZEFACE_128_PATH \
        "/usr/share/capi-media-vision/models/FD/tflite/fd_blazeface_front_128x128.tflite"
 #define FD_TFLITE_META_BLAZEFACE_128_PATH \
        "/usr/share/capi-media-vision/models/FD/tflite/fd_blazeface_front_128x128.json"
 
 //Facial LandmarkDetection
-#define FLD_TFLITE_WEIGHT_PATH \
-       "/usr/share/capi-media-vision/models/FLD/tflite/fld_tflite_model1.tflite"
+#define FLD_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/FLD/tflite/fld_tflite_model1.tflite"
 
-#define FLD_OPENCV_WEIGHT_CAFFE_PATH \
-       "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel"
-#define FLD_OPENCV_CONFIG_CAFFE_PATH \
-       "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt"
+#define FLD_OPENCV_WEIGHT_CAFFE_PATH "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel"
+#define FLD_OPENCV_CONFIG_CAFFE_PATH "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt"
 
 /*
  * Hosted models
  */
-#define FLD_TFLITE_WIEGHT_TWEAKCNN_128_PATH \
-       "/usr/share/capi-media-vision/models/FLD/tflite/fld_tweakcnn_128x128.tflite"
-#define FLD_TFLITE_META_TWEAKCNN_128_PATH \
-       "/usr/share/capi-media-vision/models/FLD/tflite/fld_tweakcnn_128x128.json"
+#define FLD_TFLITE_WIEGHT_TWEAKCNN_128_PATH "/usr/share/capi-media-vision/models/FLD/tflite/fld_tweakcnn_128x128.tflite"
+#define FLD_TFLITE_META_TWEAKCNN_128_PATH "/usr/share/capi-media-vision/models/FLD/tflite/fld_tweakcnn_128x128.json"
 #define FLD_TFLITE_WIEGHT_MEDIAPIPE_192_PATH \
        "/usr/share/capi-media-vision/models/FLD/tflite/fld_mediapipe_192x192.tflite"
-#define FLD_TFLITE_META_MEDIAPIPE_192_PATH \
-       "/usr/share/capi-media-vision/models/FLD/tflite/fld_mediapipe_192x192.json"
+#define FLD_TFLITE_META_MEDIAPIPE_192_PATH "/usr/share/capi-media-vision/models/FLD/tflite/fld_mediapipe_192x192.json"
 
 //Pose Detection
-#define PLD_TFLITE_WEIGHT_PATH \
-       "/usr/share/capi-media-vision/models/PLD/tflite/pld-tflite-001.tflite"
-#define PLD_POSE_LABEL_PATH \
-       "/usr/share/capi-media-vision/models/PLD/tflite/pose-label.txt"
-#define PLD_MOTION_CAPTURE_FILE_PATH \
-       "/usr/share/capi-media-vision/models/PLD/mocap/example.bvh"
-#define PLD_MOTION_CAPTURE_MAPPING_FILE_PATH \
-       "/usr/share/capi-media-vision/models/PLD/mocap/example-mocap-mapping.txt"
+#define PLD_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/PLD/tflite/pld-tflite-001.tflite"
+#define PLD_POSE_LABEL_PATH "/usr/share/capi-media-vision/models/PLD/tflite/pose-label.txt"
+#define PLD_MOTION_CAPTURE_FILE_PATH "/usr/share/capi-media-vision/models/PLD/mocap/example.bvh"
+#define PLD_MOTION_CAPTURE_MAPPING_FILE_PATH "/usr/share/capi-media-vision/models/PLD/mocap/example-mocap-mapping.txt"
 
 /*
  * Hosted models
  */
-#define PLD_TFLITE_WEIGHT_CPM_192_PATH \
-       "/usr/share/capi-media-vision/models/PLD/tflite/pld_cpm_192x192.tflite"
-#define PLD_TFLITE_META_CPM_192_PATH \
-       "/usr/share/capi-media-vision/models/PLD/tflite/pld_cpm_192x192.json"
+#define PLD_TFLITE_WEIGHT_CPM_192_PATH "/usr/share/capi-media-vision/models/PLD/tflite/pld_cpm_192x192.tflite"
+#define PLD_TFLITE_META_CPM_192_PATH "/usr/share/capi-media-vision/models/PLD/tflite/pld_cpm_192x192.json"
 
 #define PLD_TFLITE_WEIGHT_MOBILENET_V1_POSENET_257_PATH \
        "/usr/share/capi-media-vision/models/PLD/tflite/pld_mobilenet_v1_posenet_multi_257x257.tflite"
        "/usr/share/capi-media-vision/models/PLD/tflite/pld_mobilenet_v1_posenet_multi_label.txt"
 
 // https://tfhub.dev/google/lite-model/movenet/singlepose/lightning/tflite/int8/4
-#define PLD_TFLITE_WEIGHT_INT8_MOVENET_PATH \
-       "/usr/share/capi-media-vision/models/PLD/tflite/pld_int8_movenet.tflite"
+#define PLD_TFLITE_WEIGHT_INT8_MOVENET_PATH "/usr/share/capi-media-vision/models/PLD/tflite/pld_int8_movenet.tflite"
 
 #define PLD_TFLITE_WEIGHT_MOVENET_THUNDER3_256_FLOAT32_PATH \
        "/usr/share/capi-media-vision/models/PLD/tflite/pld_movenet_thunder3_256x256.tflite"
 #define NANO_PER_MILLI ((__clock_t) 1000000)
 #define MILLI_PER_SEC ((__clock_t) 1000)
 
-int engine_config_hosted_tflite_cpu(mv_engine_config_h handle,
-                                                                       const char *tf_weight,
-                                                                       const char *meta_file)
+int engine_config_hosted_tflite_cpu(mv_engine_config_h handle, const char *tf_weight, const char *meta_file)
 {
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, tf_weight));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_TFLITE));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, tf_weight));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_TFLITE));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
        if (meta_file != NULL)
-               RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_META_FILE_PATH, meta_file));
+               RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_META_FILE_PATH, meta_file));
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int engine_config_user_hosted_tflite_cpu(mv_engine_config_h handle,
-                                                                                const char *tf_weight,
-                                                                                const char *user_file,
+int engine_config_user_hosted_tflite_cpu(mv_engine_config_h handle, const char *tf_weight, const char *user_file,
                                                                                 const char *meta_file)
 {
        RET_IF_FAIL(engine_config_hosted_tflite_cpu(handle, tf_weight, meta_file));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_USER_FILE_PATH, user_file));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_USER_FILE_PATH, user_file));
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -341,10 +277,8 @@ unsigned long gettotalmillisec(const struct timespec time)
        return time.tv_sec * MILLI_PER_SEC + time.tv_nsec / NANO_PER_MILLI;
 }
 
-void _object_detected_cb(mv_source_h source, const int number_of_objects,
-                                                const int *indices, const char **names,
-                                                const float *confidences,
-                                                const mv_rectangle_s *locations, void *user_data)
+void _object_detected_cb(mv_source_h source, const int number_of_objects, const int *indices, const char **names,
+                                                const float *confidences, const mv_rectangle_s *locations, void *user_data)
 {
        printf("In callback: %d objects\n", number_of_objects);
 
@@ -352,27 +286,23 @@ void _object_detected_cb(mv_source_h source, const int number_of_objects,
                printf("%2d\n", indices[n]);
                printf("%s\n", names[n]);
                printf("%.3f\n", confidences[n]);
-               printf("%d,%d,%d,%d\n", locations[n].point.x, locations[n].point.y,
-                          locations[n].width, locations[n].height);
+               printf("%d,%d,%d,%d\n", locations[n].point.x, locations[n].point.y, locations[n].width, locations[n].height);
        }
 }
 
-void _face_detected_cb(mv_source_h source, const int number_of_faces,
-                                          const float *confidences,
+void _face_detected_cb(mv_source_h source, const int number_of_faces, const float *confidences,
                                           const mv_rectangle_s *locations, void *user_data)
 {
        printf("In callback: %d faces\n", number_of_faces);
 
        for (int n = 0; n < number_of_faces; n++) {
                printf("%.3f\n", confidences[n]);
-               printf("%d,%d,%d,%d\n", locations[n].point.x, locations[n].point.y,
-                          locations[n].width, locations[n].height);
+               printf("%d,%d,%d,%d\n", locations[n].point.x, locations[n].point.y, locations[n].width, locations[n].height);
        }
 }
 
-void _facial_landmark_detected_cb(mv_source_h source,
-                                                                 const int number_of_landmarks,
-                                                                 const mv_point_s *locations, void *user_data)
+void _facial_landmark_detected_cb(mv_source_h source, const int number_of_landmarks, const mv_point_s *locations,
+                                                                 void *user_data)
 {
        printf("In callback, %d landmarks\n", number_of_landmarks);
        for (int n = 0; n < number_of_landmarks; n++) {
@@ -380,22 +310,21 @@ void _facial_landmark_detected_cb(mv_source_h source,
        }
 }
 
-void _pose_landmark_detected_cb(mv_source_h source,
-                                                                 mv_inference_pose_result_h pose, void *user_data)
+void _pose_landmark_detected_cb(mv_source_h source, mv_inference_pose_result_h pose, void *user_data)
 {
        int cb_number_of_poses = 0;
        int cb_number_of_landmarks = 0;
        mv_inference_pose_get_number_of_poses(pose, &cb_number_of_poses);
        mv_inference_pose_get_number_of_landmarks(pose, &cb_number_of_landmarks);
-       printf("%d pose with %d landmarks\n",cb_number_of_poses, cb_number_of_landmarks);
+       printf("%d pose with %d landmarks\n", cb_number_of_poses, cb_number_of_landmarks);
 
        mv_point_s point;
        float score;
        for (int k = 0; k < cb_number_of_poses; ++k)
-       for (int n = 0; n < cb_number_of_landmarks; n++) {
-               mv_inference_pose_get_landmark(pose, k, n, &point, &score);
-               printf("%d-%d: x[%d], y[%d] with %.4f\n", k, n, point.x, point.y, score);
-       }
+               for (int n = 0; n < cb_number_of_landmarks; n++) {
+                       mv_inference_pose_get_landmark(pose, k, n, &point, &score);
+                       printf("%d-%d: x[%d], y[%d] with %.4f\n", k, n, point.x, point.y, score);
+               }
 
        mv_pose_h poser;
        float poseScore;
@@ -405,18 +334,15 @@ void _pose_landmark_detected_cb(mv_source_h source,
                return;
        }
 
-       ret = mv_pose_set_from_file(poser,
-                                               PLD_MOTION_CAPTURE_FILE_PATH,
-                                               PLD_MOTION_CAPTURE_MAPPING_FILE_PATH);
+       ret = mv_pose_set_from_file(poser, PLD_MOTION_CAPTURE_FILE_PATH, PLD_MOTION_CAPTURE_MAPPING_FILE_PATH);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                mv_pose_destroy(poser);
                printf("Fail to mv_pose_set_from_file");
                return;
        }
 
-       ret = mv_pose_compare(poser, pose,
-                       (MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT | MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT),
-                       &poseScore);
+       ret = mv_pose_compare(poser, pose, (MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT | MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT),
+                                                 &poseScore);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                mv_pose_destroy(poser);
                printf("Fail to mv_pose_compare");
@@ -433,8 +359,7 @@ void _pose_landmark_detected_cb(mv_source_h source,
        return;
 }
 
-void _image_classified_cb(mv_source_h source, const int number_of_classes,
-                                                 const int *indices, const char **names,
+void _image_classified_cb(mv_source_h source, const int number_of_classes, const int *indices, const char **names,
                                                  const float *confidences, void *user_data)
 {
        printf("In callback: %d classes\n", number_of_classes);
@@ -468,24 +393,19 @@ int infer_task_with_img(char *img_file_name, mv_inference_h infer, int task_id)
 
        switch (task_id) {
        case TASK_IC:
-               err = mv_inference_image_classify(mvSource, infer, NULL,
-                                                                                 _image_classified_cb, NULL);
+               err = mv_inference_image_classify(mvSource, infer, NULL, _image_classified_cb, NULL);
                break;
        case TASK_OD:
-               err = mv_inference_object_detect(mvSource, infer, _object_detected_cb,
-                                                                                NULL);
+               err = mv_inference_object_detect(mvSource, infer, _object_detected_cb, NULL);
                break;
        case TASK_FD:
-               err = mv_inference_face_detect(mvSource, infer, _face_detected_cb,
-                                                                          NULL);
+               err = mv_inference_face_detect(mvSource, infer, _face_detected_cb, NULL);
                break;
        case TASK_FLD:
-               err = mv_inference_facial_landmark_detect(
-                               mvSource, infer, NULL, _facial_landmark_detected_cb, NULL);
+               err = mv_inference_facial_landmark_detect(mvSource, infer, NULL, _facial_landmark_detected_cb, NULL);
                break;
        case TASK_PLD:
-               err = mv_inference_pose_landmark_detect(
-                               mvSource, infer, NULL, _pose_landmark_detected_cb, NULL);
+               err = mv_inference_pose_landmark_detect(mvSource, infer, NULL, _pose_landmark_detected_cb, NULL);
                break;
        default:
                err = MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -510,8 +430,7 @@ int infer_task(mv_inference_h infer, int task_id)
 {
        char *in_file_name = NULL;
        /* Load media source */
-       while (input_string("Input file name to be inferred:", 1024,
-                                               &(in_file_name)) == -1)
+       while (input_string("Input file name to be inferred:", 1024, &(in_file_name)) == -1)
                printf("Incorrect input! Try again.\n");
 
        int err = infer_task_with_img(in_file_name, infer, task_id);
@@ -557,13 +476,11 @@ int perform_configure_set_model_config_path(mv_engine_config_h engine_cfg)
 {
        int err = MEDIA_VISION_ERROR_NONE;
        char *filePath = NULL;
-       while (-1 == input_string("Model configuration file path:", FILE_PATH_SIZE,
-                                                         &(filePath))) {
+       while (-1 == input_string("Model configuration file path:", FILE_PATH_SIZE, &(filePath))) {
                printf("Incorrect file path! Try again.\n");
        }
 
-       err = mv_engine_config_set_string_attribute(
-                       engine_cfg, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH, filePath);
+       err = mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH, filePath);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf("Fail to set model configuration file path: %s\n", filePath);
        }
@@ -578,13 +495,11 @@ int perform_configure_set_model_weights_path(mv_engine_config_h engine_cfg)
 {
        int err = MEDIA_VISION_ERROR_NONE;
        char *filePath = NULL;
-       while (-1 == input_string("Model weights file path:", FILE_PATH_SIZE,
-                                                         &(filePath))) {
+       while (-1 == input_string("Model weights file path:", FILE_PATH_SIZE, &(filePath))) {
                printf("Incorrect file path! Try again.\n");
        }
 
-       err = mv_engine_config_set_string_attribute(
-                       engine_cfg, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, filePath);
+       err = mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, filePath);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf("Fail to set model weights file path: %s\n", filePath);
        }
@@ -603,9 +518,8 @@ int perform_configure_set_input_data_type(mv_engine_config_h engine_cfg)
                printf("Invalid type! Try again.\n");
        }
 
-       err = mv_engine_config_set_int_attribute(
-                       engine_cfg, MV_INFERENCE_INPUT_DATA_TYPE,
-                       (mv_inference_data_type_e) dataType);
+       err = mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_DATA_TYPE,
+                                                                                        (mv_inference_data_type_e) dataType);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf("Fail to set input tensor data type: %d\n", dataType);
        }
@@ -617,13 +531,11 @@ int perform_configure_set_model_userfile_path(mv_engine_config_h engine_cfg)
 {
        int err = MEDIA_VISION_ERROR_NONE;
        char *filePath = NULL;
-       while (-1 == input_string("Model user file (category list) path:",
-                                                         FILE_PATH_SIZE, &(filePath))) {
+       while (-1 == input_string("Model user file (category list) path:", FILE_PATH_SIZE, &(filePath))) {
                printf("Incorrect file path! Try again.\n");
        }
 
-       err = mv_engine_config_set_string_attribute(
-                       engine_cfg, MV_INFERENCE_MODEL_USER_FILE_PATH, filePath);
+       err = mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_MODEL_USER_FILE_PATH, filePath);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf("Fail to set model user file path: %s\n", filePath);
        }
@@ -642,8 +554,7 @@ int perform_configure_set_model_mean_value(mv_engine_config_h engine_cfg)
                printf("Invalid value! Try again.\n");
        }
 
-       err = mv_engine_config_set_double_attribute(
-                       engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, meanValue);
+       err = mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, meanValue);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf("Fail to set model mean value: %f\n", meanValue);
        }
@@ -659,8 +570,7 @@ int perform_configure_set_image_scale(mv_engine_config_h engine_cfg)
                printf("Invalid value! Try again.\n");
        }
 
-       err = mv_engine_config_set_double_attribute(
-                       engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, stdValue);
+       err = mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, stdValue);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf("Fail to set std value: %lf\n", stdValue);
        }
@@ -676,8 +586,7 @@ int perform_configure_set_confidence_threshold(mv_engine_config_h engine_cfg)
                printf("Invalid value! Try again.\n");
        }
 
-       err = mv_engine_config_set_double_attribute(
-                       engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, threshold);
+       err = mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, threshold);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf("Fail to set image scale value: %lf\n", threshold);
        }
@@ -693,9 +602,8 @@ int perform_configure_set_backend(mv_engine_config_h engine_cfg)
                printf("Invalid type! Try again.\n");
        }
 
-       err = mv_engine_config_set_int_attribute(
-                       engine_cfg, MV_INFERENCE_BACKEND_TYPE,
-                       (mv_inference_backend_type_e) backendType);
+       err = mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_BACKEND_TYPE,
+                                                                                        (mv_inference_backend_type_e) backendType);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf("Fail to set backend type: %d\n", backendType);
        }
@@ -711,9 +619,8 @@ int perform_configure_set_target(mv_engine_config_h engine_cfg)
                printf("Invalid type! Try again.\n");
        }
 
-       err = mv_engine_config_set_int_attribute(
-                       engine_cfg, MV_INFERENCE_TARGET_TYPE,
-                       (mv_inference_target_type_e) targetType);
+       err = mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_TARGET_TYPE,
+                                                                                        (mv_inference_target_type_e) targetType);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf("Fail to set target type: %d\n", targetType);
        }
@@ -729,8 +636,7 @@ int perform_configure_set_tensor_width(mv_engine_config_h engine_cfg)
                printf("Invalid value! Try again.\n");
        }
 
-       err = mv_engine_config_set_int_attribute(
-                       engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, tensorW);
+       err = mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, tensorW);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf("Fail to set tensor width: %d\n", tensorW);
        }
@@ -746,8 +652,7 @@ int perform_configure_set_tensor_height(mv_engine_config_h engine_cfg)
                printf("Invalid value! Try again.\n");
        }
 
-       err = mv_engine_config_set_int_attribute(
-                       engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, tensorH);
+       err = mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, tensorH);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf("Fail to set tensor height: %d\n", tensorH);
        }
@@ -763,8 +668,7 @@ int perform_configure_set_tensor_channels(mv_engine_config_h engine_cfg)
                printf("Invalid value! Try again.\n");
        }
 
-       err = mv_engine_config_set_int_attribute(
-                       engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, tensorC);
+       err = mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, tensorC);
        if (err != MEDIA_VISION_ERROR_NONE) {
                printf("Fail to set tensor channels: %d\n", tensorC);
        }
@@ -778,18 +682,18 @@ int perform_configuration(mv_engine_config_h *engine_cfg)
 
        int sel_opt = 0;
        const char *names[] = { "Set Model Configuration",
-                                                         "Set Model Weights",
-                                                         "Set Model Data Type",
-                                                         "Set Model UserFile",
-                                                         "Set Model MeanFile",
-                                                         "Set Image Scale",
-                                                         "Set Confidence Threshold",
-                                                         "Set Backend",
-                                                         "Set Target",
-                                                         "Set InputTensor Width",
-                                                         "Set InputTensor Height",
-                                                         "Set InputTensor Channels",
-                                                         "Back" };
+                                                       "Set Model Weights",
+                                                       "Set Model Data Type",
+                                                       "Set Model UserFile",
+                                                       "Set Model MeanFile",
+                                                       "Set Image Scale",
+                                                       "Set Confidence Threshold",
+                                                       "Set Backend",
+                                                       "Set Target",
+                                                       "Set InputTensor Width",
+                                                       "Set InputTensor Height",
+                                                       "Set InputTensor Channels",
+                                                       "Back" };
 
        mv_engine_config_h handle = NULL;
        err = mv_create_engine_config(&handle);
@@ -873,33 +777,20 @@ int perform_tflite_mobilenetv1_config(mv_engine_config_h handle)
        const char *inputNodeName = "input_2";
        const char *outputNodeName[] = { "dense_3/Softmax" };
 
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                       IC_TFLITE_WEIGHT_PATH));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_USER_FILE_PATH, IC_LABEL_PATH));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 127.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.6));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_TFLITE));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
+       RET_IF_FAIL(
+                       mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, IC_TFLITE_WEIGHT_PATH));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_USER_FILE_PATH, IC_LABEL_PATH));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 127.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.6));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_TFLITE));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -908,33 +799,20 @@ int perform_armnn_mobilenetv1_config(mv_engine_config_h handle)
        const char *inputNodeName = "input_2";
        const char *outputNodeName[] = { "dense_3/Softmax" };
 
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                       IC_TFLITE_WEIGHT_PATH));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_USER_FILE_PATH, IC_LABEL_PATH));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 127.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.6));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_ARMNN));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
+       RET_IF_FAIL(
+                       mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, IC_TFLITE_WEIGHT_PATH));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_USER_FILE_PATH, IC_LABEL_PATH));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 127.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.6));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_ARMNN));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -943,33 +821,20 @@ int perform_one_mobilenetv1_quant_config(mv_engine_config_h handle)
        const char *inputNodeName = "input";
        const char *outputNodeName[] = { "MobilenetV1/Predictions/Reshape_1" };
 
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                       IC_Q_TFLITE_WEIGHT_PATH));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_UINT8));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_USER_FILE_PATH, IC_Q_LABEL_PATH));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 0.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 1.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.6));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_ONE));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+                                                                                                         IC_Q_TFLITE_WEIGHT_PATH));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_UINT8));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_USER_FILE_PATH, IC_Q_LABEL_PATH));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 0.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 1.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.6));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_ONE));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -978,38 +843,25 @@ int perform_vivante_inceptionv3_config(mv_engine_config_h handle)
        const char *inputNodeName = "input";
        const char *outputNodeName[] = { "InceptionV3/Predictions/Peshape_1" };
 
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                       IC_VIVANTE_WEIGHT_PATH));
-
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
-                       IC_VIVANTE_CONFIG_PATH));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_UINT8));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_USER_FILE_PATH, IC_VIVANTE_LABEL_PATH));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 0.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 1.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.6));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_MLAPI));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_TARGET_DEVICE_TYPE,
-                       MV_INFERENCE_TARGET_DEVICE_CUSTOM));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 299));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 299));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
+       RET_IF_FAIL(
+                       mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, IC_VIVANTE_WEIGHT_PATH));
+
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+                                                                                                         IC_VIVANTE_CONFIG_PATH));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_UINT8));
+       RET_IF_FAIL(
+                       mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_USER_FILE_PATH, IC_VIVANTE_LABEL_PATH));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 0.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 1.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.6));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_MLAPI));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_DEVICE_TYPE,
+                                                                                                  MV_INFERENCE_TARGET_DEVICE_CUSTOM));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 299));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 299));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -1018,37 +870,23 @@ int perform_opencv_caffe_squeezenet_config(mv_engine_config_h handle)
        const char *inputNodeName = "data";
        const char *outputNodeName[] = { "prob" };
 
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                       IC_OPENCV_WEIGHT_CAFFE_PATH));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
-                       IC_OPENCV_CONFIG_CAFFE_PATH));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_USER_FILE_PATH,
-                       IC_OPENCV_LABEL_CAFFE_PATH));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 0.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 1.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_OPENCV));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 227));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 227));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+                                                                                                         IC_OPENCV_WEIGHT_CAFFE_PATH));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+                                                                                                         IC_OPENCV_CONFIG_CAFFE_PATH));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_USER_FILE_PATH,
+                                                                                                         IC_OPENCV_LABEL_CAFFE_PATH));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 0.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 1.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_OPENCV));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 227));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 227));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -1099,64 +937,46 @@ int perform_image_classification()
                err = perform_vivante_inceptionv3_config(engine_cfg);
        } break;
        case 6: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, IC_TFLITE_WEIGHT_MOBILENET_V1_224_PATH,
-                               IC_LABEL_MOBILENET_V1_224_PATH,
-                               IC_TFLITE_META_MOBILENET_V1_224_PATH);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, IC_TFLITE_WEIGHT_MOBILENET_V1_224_PATH,
+                                                                                                  IC_LABEL_MOBILENET_V1_224_PATH,
+                                                                                                  IC_TFLITE_META_MOBILENET_V1_224_PATH);
        } break;
        case 7: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, IC_TFLITE_WEIGHT_MOBILENET_V2_224_PATH,
-                               IC_LABEL_MOBILENET_V2_224_PATH,
-                               IC_TFLITE_META_MOBILENET_V2_224_PATH);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, IC_TFLITE_WEIGHT_MOBILENET_V2_224_PATH,
+                                                                                                  IC_LABEL_MOBILENET_V2_224_PATH,
+                                                                                                  IC_TFLITE_META_MOBILENET_V2_224_PATH);
        } break;
        case 8: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, IC_TFLITE_WEIGHT_DENSENET_224_PATH,
-                               IC_LABEL_DENSENET_224_PATH,
-                               IC_TFLITE_META_DENSENET_224_PATH);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, IC_TFLITE_WEIGHT_DENSENET_224_PATH,
+                                                                                                  IC_LABEL_DENSENET_224_PATH, IC_TFLITE_META_DENSENET_224_PATH);
        } break;
        case 9: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, IC_TFLITE_WEIGHT_INCEPTION_RESENET_299_PATH,
-                               IC_LABEL_INCEPTION_RESENET_299_PATH,
-                               NULL);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, IC_TFLITE_WEIGHT_INCEPTION_RESENET_299_PATH,
+                                                                                                  IC_LABEL_INCEPTION_RESENET_299_PATH, NULL);
        } break;
        case 10: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, IC_TFLITE_WEIGHT_INCEPTION_V3_299_PATH,
-                               IC_LABEL_INCEPTION_V3_299_PATH,
-                               NULL);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, IC_TFLITE_WEIGHT_INCEPTION_V3_299_PATH,
+                                                                                                  IC_LABEL_INCEPTION_V3_299_PATH, NULL);
        } break;
        case 11: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, IC_TFLITE_WEIGHT_INCEPTION_V4_299_PATH,
-                               IC_LABEL_INCEPTION_V4_299_PATH,
-                               NULL);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, IC_TFLITE_WEIGHT_INCEPTION_V4_299_PATH,
+                                                                                                  IC_LABEL_INCEPTION_V4_299_PATH, NULL);
        } break;
        case 12: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, IC_TFLITE_WEIGHT_NASNET_224_PATH,
-                               IC_LABEL_NASNET_224_PATH,
-                               IC_TFLITE_META_NASNET_224_PATH);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, IC_TFLITE_WEIGHT_NASNET_224_PATH,
+                                                                                                  IC_LABEL_NASNET_224_PATH, IC_TFLITE_META_NASNET_224_PATH);
        } break;
        case 13: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, IC_TFLITE_WEIGHT_MNASNET_224_PATH,
-                               IC_LABEL_MNASNET_224_PATH,
-                               IC_TFLITE_META_MOBILENET_V2_224_PATH);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, IC_TFLITE_WEIGHT_MNASNET_224_PATH,
+                                                                                                  IC_LABEL_MNASNET_224_PATH, IC_TFLITE_META_MOBILENET_V2_224_PATH);
        } break;
        case 14: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, IC_TFLITE_WEIGHT_RESNET_V2_299_PATH,
-                               IC_LABEL_RESNET_V2_299_PATH,
-                               NULL);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, IC_TFLITE_WEIGHT_RESNET_V2_299_PATH,
+                                                                                                  IC_LABEL_RESNET_V2_299_PATH, NULL);
        } break;
        case 15: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, IC_TFLITE_WEIGHT_SQUEEZENET_224_PATH,
-                               IC_LABEL_SQUEEZENET_224_PATH,
-                               IC_TFLITE_META_SQUEEZENET_224_PATH);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, IC_TFLITE_WEIGHT_SQUEEZENET_224_PATH,
+                                                                                                  IC_LABEL_SQUEEZENET_224_PATH, IC_TFLITE_META_SQUEEZENET_224_PATH);
        } break;
        }
        if (err != MEDIA_VISION_ERROR_NONE) {
@@ -1181,38 +1001,23 @@ clean_image_engine:
 int perform_tflite_mobilenetv1ssd_config(mv_engine_config_h handle)
 {
        const char *inputNodeName = "normalized_input_image_tensor";
-       const char *outputNodeName[] = { "TFLite_Detection_PostProcess",
-                                                                        "TFLite_Detection_PostProcess:1",
-                                                                        "TFLite_Detection_PostProcess:2",
-                                                                        "TFLite_Detection_PostProcess:3" };
-
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                       OD_TFLITE_WEIGHT_PATH));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_USER_FILE_PATH, OD_LABEL_PATH));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 127.5));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_TFLITE));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 4));
+       const char *outputNodeName[] = { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1",
+                                                                        "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" };
+
+       RET_IF_FAIL(
+                       mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, OD_TFLITE_WEIGHT_PATH));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_USER_FILE_PATH, OD_LABEL_PATH));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 127.5));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_TFLITE));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 4));
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -1221,75 +1026,46 @@ int perform_opencv_mobilenetv1ssd_config(mv_engine_config_h handle)
        const char *inputNodeName = "data";
        const char *outputNodeName[1] = { "detection_out" };
 
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                       OD_OPENCV_WEIGHT_CAFFE_PATH));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
-                       OD_OPENCV_CONFIG_CAFFE_PATH));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_USER_FILE_PATH,
-                       OD_OPENCV_LABEL_CAFFE_PATH));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 127.5));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_OPENCV));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+                                                                                                         OD_OPENCV_WEIGHT_CAFFE_PATH));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+                                                                                                         OD_OPENCV_CONFIG_CAFFE_PATH));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_USER_FILE_PATH,
+                                                                                                         OD_OPENCV_LABEL_CAFFE_PATH));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 127.5));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_OPENCV));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
        return MEDIA_VISION_ERROR_NONE;
 }
 
 int perform_armnn_mobilenetv1ssd_config(mv_engine_config_h handle)
 {
        const char *inputNodeName = "normalized_input_image_tensor";
-       const char *outputNodeName[] = { "TFLite_Detection_PostProcess",
-                                                                        "TFLite_Detection_PostProcess:1",
-                                                                        "TFLite_Detection_PostProcess:2",
-                                                                        "TFLite_Detection_PostProcess:3" };
-
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                       OD_TFLITE_WEIGHT_PATH));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_USER_FILE_PATH, OD_LABEL_PATH));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 127.5));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_ARMNN));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 4));
+       const char *outputNodeName[] = { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1",
+                                                                        "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" };
+
+       RET_IF_FAIL(
+                       mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, OD_TFLITE_WEIGHT_PATH));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_USER_FILE_PATH, OD_LABEL_PATH));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 127.5));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_ARMNN));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 4));
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -1297,15 +1073,13 @@ int perform_object_detection()
 {
        int err = MEDIA_VISION_ERROR_NONE;
        mv_engine_config_h engine_cfg = NULL;
-       const char *names[] = {
-               "TFLITE(CPU) + MobileNetV1+SSD",
-               "OPENCV(CPU) + MobileNetV1+SSD",
-               "ARMNN(CPU) + MobileNetV1+SSD",
-               "Hosted[o]: TFLite(cpu + MobilenetV1+SSD)",
-               "Hosted[o]: TFLite(cpu + MobilenetV2+SSD)",
-               "Hosted[o]: TFLite(Quant + EfficientDet)",
-               "Hosted[o]: TFLite(cpu + YoloV5)"
-       };
+       const char *names[] = { "TFLITE(CPU) + MobileNetV1+SSD",
+                                                       "OPENCV(CPU) + MobileNetV1+SSD",
+                                                       "ARMNN(CPU) + MobileNetV1+SSD",
+                                                       "Hosted[o]: TFLite(cpu + MobilenetV1+SSD)",
+                                                       "Hosted[o]: TFLite(cpu + MobilenetV2+SSD)",
+                                                       "Hosted[o]: TFLite(Quant + EfficientDet)",
+                                                       "Hosted[o]: TFLite(cpu + YoloV5)" };
 
        int sel_opt = show_menu_linear("Select Action:", names, ARRAY_SIZE(names));
        if (sel_opt <= 0 || sel_opt > ARRAY_SIZE(names)) {
@@ -1326,28 +1100,22 @@ int perform_object_detection()
                err = perform_armnn_mobilenetv1ssd_config(engine_cfg);
        } break;
        case 4: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, OD_TFLITE_WEIGHT_MOBILENET_V1_SSD_300_PATH,
-                               OD_LABEL_MOBILENET_V1_SSD_300_PATH,
-                               OD_TFLITE_META_MOBILENET_V1_SSD_300_PATH);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, OD_TFLITE_WEIGHT_MOBILENET_V1_SSD_300_PATH,
+                                                                                                  OD_LABEL_MOBILENET_V1_SSD_300_PATH,
+                                                                                                  OD_TFLITE_META_MOBILENET_V1_SSD_300_PATH);
        } break;
        case 5: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, OD_TFLITE_WEIGHT_MOBILENET_V2_SSD_320_PATH,
-                               OD_LABEL_MOBILENET_V2_SSD_320_PATH,
-                               OD_TFLITE_META_MOBILENET_V2_SSD_320_PATH);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, OD_TFLITE_WEIGHT_MOBILENET_V2_SSD_320_PATH,
+                                                                                                  OD_LABEL_MOBILENET_V2_SSD_320_PATH,
+                                                                                                  OD_TFLITE_META_MOBILENET_V2_SSD_320_PATH);
        } break;
        case 6: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, OD_TFLITE_WEIGHT_QUANT_EFFICIENT_PATH,
-                               OD_LABEL_QUANT_EFFICIENT_PATH,
-                               OD_TFLITE_META_QUANT_EFFICIENT_PATH);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, OD_TFLITE_WEIGHT_QUANT_EFFICIENT_PATH,
+                                                                                                  OD_LABEL_QUANT_EFFICIENT_PATH, OD_TFLITE_META_QUANT_EFFICIENT_PATH);
        } break;
        case 7: {
-               err = engine_config_user_hosted_tflite_cpu(
-                               engine_cfg, OD_TFLITE_WEIGHT_YOLO_V5_320_PATH,
-                               OD_LABLE_YOLO_V5_320_PATH,
-                               OD_TFLITE_META_YOLO_V5_320_PATH);
+               err = engine_config_user_hosted_tflite_cpu(engine_cfg, OD_TFLITE_WEIGHT_YOLO_V5_320_PATH,
+                                                                                                  OD_LABLE_YOLO_V5_320_PATH, OD_TFLITE_META_YOLO_V5_320_PATH);
        } break;
        }
        if (err != MEDIA_VISION_ERROR_NONE) {
@@ -1367,30 +1135,19 @@ clean_object_detect_engine:
 int perform_tflite_mobilenetv1ssd_face(mv_engine_config_h handle)
 {
        const char *inputNodeName = "normalized_input_image_tensor";
-       const char *outputNodeName[] = { "TFLite_Detection_PostProcess",
-                                                                        "TFLite_Detection_PostProcess:1",
-                                                                        "TFLite_Detection_PostProcess:2",
-                                                                        "TFLite_Detection_PostProcess:3" };
+       const char *outputNodeName[] = { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1",
+                                                                        "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" };
 
        RET_IF_FAIL(engine_config_hosted_tflite_cpu(handle, FD_TFLITE_WEIGHT_PATH, NULL));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 127.5));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 4));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 127.5));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 4));
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -1399,70 +1156,43 @@ int perform_opencv_resnet10ssd_face(mv_engine_config_h handle)
        const char *inputNodeName = "data";
        const char *outputNodeName[] = { "detection_out" };
 
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                       FD_OPENCV_WEIGHT_CAFFE_PATH));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
-                       FD_OPENCV_CONFIG_CAFFE_PATH));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 135.7));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 1.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_OPENCV));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+                                                                                                         FD_OPENCV_WEIGHT_CAFFE_PATH));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+                                                                                                         FD_OPENCV_CONFIG_CAFFE_PATH));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 135.7));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 1.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_OPENCV));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
        return MEDIA_VISION_ERROR_NONE;
 }
 
 int perform_armnn_mobilenetv1ssd_face(mv_engine_config_h handle)
 {
        const char *inputNodeName = "normalized_input_image_tensor";
-       const char *outputNodeName[] = { "TFLite_Detection_PostProcess",
-                                                                        "TFLite_Detection_PostProcess:1",
-                                                                        "TFLite_Detection_PostProcess:2",
-                                                                        "TFLite_Detection_PostProcess:3" };
-
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                       FD_TFLITE_WEIGHT_PATH));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 127.5));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_ARMNN));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 4));
+       const char *outputNodeName[] = { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1",
+                                                                        "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" };
+
+       RET_IF_FAIL(
+                       mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, FD_TFLITE_WEIGHT_PATH));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 127.5));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_ARMNN));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 4));
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -1471,10 +1201,8 @@ int perform_face_detection()
        int err = MEDIA_VISION_ERROR_NONE;
        mv_engine_config_h engine_cfg = NULL;
        const char *names[] = {
-               "TFLite(CPU) + MobileNetV1 + SSD",
-               "OPENCV(CPU) + Resnet10 + SSD",
-               "ARMNN(CPU) + MobileNetV1 + SSD",
-               "Hosted[o]: TFLite(cpu + MobilenetV1+SSD)",
+               "TFLite(CPU) + MobileNetV1 + SSD",        "OPENCV(CPU) + Resnet10 + SSD",
+               "ARMNN(CPU) + MobileNetV1 + SSD",         "Hosted[o]: TFLite(cpu + MobilenetV1+SSD)",
                "Hosted[o]: TFLite(cpu + BlazeFace)",
        };
 
@@ -1497,14 +1225,12 @@ int perform_face_detection()
                err = perform_armnn_mobilenetv1ssd_face(engine_cfg);
        } break;
        case 4: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, FD_TFLITE_WEIGHT_MOBILENET_V1_SSD_300_PATH,
-                               FD_TFLITE_META_MOBILENET_V1_SSD_300_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, FD_TFLITE_WEIGHT_MOBILENET_V1_SSD_300_PATH,
+                                                                                         FD_TFLITE_META_MOBILENET_V1_SSD_300_PATH);
        } break;
        case 5: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, FD_TFLITE_WEIGHT_BLAZEFACE_128_PATH,
-                               FD_TFLITE_META_BLAZEFACE_128_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, FD_TFLITE_WEIGHT_BLAZEFACE_128_PATH,
+                                                                                         FD_TFLITE_META_BLAZEFACE_128_PATH);
        } break;
        }
        if (err != MEDIA_VISION_ERROR_NONE) {
@@ -1526,24 +1252,15 @@ int perform_tflite_TweakCNN(mv_engine_config_h handle)
        const char *inputNodeName = "INPUT_TENSOR_NAME";
        const char *outputNodeName[] = { "OUTPUT_TENSOR_NAME" };
 
-       RET_IF_FAIL(
-                       engine_config_hosted_tflite_cpu(handle, FLD_TFLITE_WEIGHT_PATH, NULL));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 0.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 1.0));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 128));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 128));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
+       RET_IF_FAIL(engine_config_hosted_tflite_cpu(handle, FLD_TFLITE_WEIGHT_PATH, NULL));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 0.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 1.0));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 128));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 128));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -1552,32 +1269,20 @@ int perform_opencv_cnncascade(mv_engine_config_h handle)
        const char *inputNodeName = "data";
        const char *outputNodeName[] = { "Sigmoid_fc2" };
 
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                       FLD_OPENCV_WEIGHT_CAFFE_PATH));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
-                       FLD_OPENCV_CONFIG_CAFFE_PATH));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 127.5));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_OPENCV));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 128));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 128));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+                                                                                                         FLD_OPENCV_WEIGHT_CAFFE_PATH));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+                                                                                                         FLD_OPENCV_CONFIG_CAFFE_PATH));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 127.5));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_OPENCV));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 128));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 128));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -1608,14 +1313,12 @@ int perform_facial_landmark_detection()
                err = perform_opencv_cnncascade(engine_cfg);
        } break;
        case 3: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, FLD_TFLITE_WIEGHT_TWEAKCNN_128_PATH,
-                               FLD_TFLITE_META_TWEAKCNN_128_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, FLD_TFLITE_WIEGHT_TWEAKCNN_128_PATH,
+                                                                                         FLD_TFLITE_META_TWEAKCNN_128_PATH);
        } break;
        case 4: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, FLD_TFLITE_WIEGHT_MEDIAPIPE_192_PATH,
-                               FLD_TFLITE_META_MEDIAPIPE_192_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, FLD_TFLITE_WIEGHT_MEDIAPIPE_192_PATH,
+                                                                                         FLD_TFLITE_META_MEDIAPIPE_192_PATH);
        } break;
        }
        if (err != MEDIA_VISION_ERROR_NONE) {
@@ -1637,28 +1340,17 @@ int perform_armnn_cpm_config(mv_engine_config_h handle)
        const char *inputNodeName = "image";
        const char *outputNodeName[] = { "Convolutional_Pose_Machine/stage_5_out" };
 
-       RET_IF_FAIL(
-                       engine_config_hosted_tflite_cpu(handle, PLD_TFLITE_WEIGHT_PATH, NULL));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_MODEL_USER_FILE_PATH, PLD_POSE_LABEL_PATH));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_MEAN_VALUE, 0.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_MODEL_STD_VALUE, 1.0));
-       RET_IF_FAIL(mv_engine_config_set_double_attribute(
-                       handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 192));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 192));
-       RET_IF_FAIL(mv_engine_config_set_int_attribute(
-                       handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
-       RET_IF_FAIL(mv_engine_config_set_string_attribute(
-                       handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
-       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(
-                       handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
+       RET_IF_FAIL(engine_config_hosted_tflite_cpu(handle, PLD_TFLITE_WEIGHT_PATH, NULL));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_USER_FILE_PATH, PLD_POSE_LABEL_PATH));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE, 0.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE, 1.0));
+       RET_IF_FAIL(mv_engine_config_set_double_attribute(handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH, 192));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 192));
+       RET_IF_FAIL(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3));
+       RET_IF_FAIL(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName));
+       RET_IF_FAIL(mv_engine_config_set_array_string_attribute(handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1));
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -1698,59 +1390,46 @@ int perform_pose_landmark_detection()
                err = perform_armnn_cpm_config(engine_cfg);
        } break;
        case 2: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, PLD_TFLITE_WEIGHT_CPM_192_PATH,
-                               PLD_TFLITE_META_CPM_192_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, PLD_TFLITE_WEIGHT_CPM_192_PATH, PLD_TFLITE_META_CPM_192_PATH);
        } break;
        case 3: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, PLD_TFLITE_WEIGHT_MOBILENET_V1_POSENET_257_PATH,
-                               PLD_TFLITE_META_MOBILENET_V1_POSENET_257_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, PLD_TFLITE_WEIGHT_MOBILENET_V1_POSENET_257_PATH,
+                                                                                         PLD_TFLITE_META_MOBILENET_V1_POSENET_257_PATH);
        } break;
        case 4: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, PLD_TFLITE_WEIGHT_INT8_MOVENET_PATH,
-                               NULL);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, PLD_TFLITE_WEIGHT_INT8_MOVENET_PATH, NULL);
        } break;
        case 5: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_THUNDER3_256_FLOAT32_PATH,
-                               PLD_TFLITE_META_MOVENET_THUNDER3_256_FLOAT32_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_THUNDER3_256_FLOAT32_PATH,
+                                                                                         PLD_TFLITE_META_MOVENET_THUNDER3_256_FLOAT32_PATH);
        } break;
        case 6: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_LIGHTNING3_192_FLOAT32_PATH,
-                               PLD_TFLITE_META_MOVENET_LIGHTNING3_192_FLOAT32_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_LIGHTNING3_192_FLOAT32_PATH,
+                                                                                         PLD_TFLITE_META_MOVENET_LIGHTNING3_192_FLOAT32_PATH);
        } break;
        case 7: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_THUNDER3_256_INT8_QUANT_PATH,
-                               PLD_TFLITE_META_MOVENET_THUNDER3_256_INT8_QUANT_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_THUNDER3_256_INT8_QUANT_PATH,
+                                                                                         PLD_TFLITE_META_MOVENET_THUNDER3_256_INT8_QUANT_PATH);
        } break;
        case 8: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_LIGHTNING3_192_INT8_QUANT_PATH,
-                               PLD_TFLITE_META_MOVENET_LIGHTNING3_192_INT8_QUANT_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_LIGHTNING3_192_INT8_QUANT_PATH,
+                                                                                         PLD_TFLITE_META_MOVENET_LIGHTNING3_192_INT8_QUANT_PATH);
        } break;
        case 9: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_THUNDER4_256_FLOAT32_PATH,
-                               PLD_TFLITE_META_MOVENET_THUNDER4_256_FLOAT32_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_THUNDER4_256_FLOAT32_PATH,
+                                                                                         PLD_TFLITE_META_MOVENET_THUNDER4_256_FLOAT32_PATH);
        } break;
        case 10: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_LIGHTNING4_192_FLOAT32_PATH,
-                               PLD_TFLITE_META_MOVENET_LIGHTNING4_192_FLOAT32_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_LIGHTNING4_192_FLOAT32_PATH,
+                                                                                         PLD_TFLITE_META_MOVENET_LIGHTNING4_192_FLOAT32_PATH);
        } break;
        case 11: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_THUNDER4_256_INT8_QUANT_PATH,
-                               PLD_TFLITE_META_MOVENET_THUNDER4_256_INT8_QUANT_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_THUNDER4_256_INT8_QUANT_PATH,
+                                                                                         PLD_TFLITE_META_MOVENET_THUNDER4_256_INT8_QUANT_PATH);
        } break;
        case 12: {
-               err = engine_config_hosted_tflite_cpu(
-                               engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_LIGHTNING4_192_INT8_QUANT_PATH,
-                               PLD_TFLITE_META_MOVENET_LIGHTNING4_192_INT8_QUANT_PATH);
+               err = engine_config_hosted_tflite_cpu(engine_cfg, PLD_TFLITE_WEIGHT_MOVENET_LIGHTNING4_192_INT8_QUANT_PATH,
+                                                                                         PLD_TFLITE_META_MOVENET_LIGHTNING4_192_INT8_QUANT_PATH);
        } break;
        }
        if (err != MEDIA_VISION_ERROR_NONE) {
@@ -1771,8 +1450,7 @@ clean_pose_engine:
 int main()
 {
        int err = MEDIA_VISION_ERROR_NONE;
-       const char *names[] = { "Image Classification", "Object Detection",
-                                                       "Face Detection", "Facial Landmark Detection",
+       const char *names[] = { "Image Classification", "Object Detection", "Face Detection", "Facial Landmark Detection",
                                                        "Pose Landmark Detection" };
 
        int sel_opt = show_menu_linear("Select Action:", names, ARRAY_SIZE(names));
index 4e405e7..47ef375 100644 (file)
@@ -13,8 +13,7 @@
        MV_CONFIG_PATH \
        "/res/inference/images/faceDetection.jpg"
 
-void _face_detected_cb(mv_source_h source, const int number_of_faces,
-                                          const float *confidences,
+void _face_detected_cb(mv_source_h source, const int number_of_faces, const float *confidences,
                                           const mv_rectangle_s *locations, void *user_data)
 {
        EXPECT_GT(number_of_faces, 0);
@@ -27,51 +26,43 @@ public:
        {
                TestInference::ConfigureInference();
 
-               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(
-                                                 IMG_FACE, mv_source),
-                                 MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_inference_face_detect(mv_source, infer, _face_detected_cb,
-                                                                                  NULL),
-                                 MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(IMG_FACE, mv_source), MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_inference_face_detect(mv_source, infer, _face_detected_cb, NULL), MEDIA_VISION_ERROR_NONE);
        }
 };
 
 TEST_P(TestFaceDetectionTflite, MobilenetV1_SSD)
 {
-       engine_config_hosted_tflite_model(engine_cfg, FD_TFLITE_WEIGHT_MOBILENET_V1_SSD_300_PATH,
-                                                                         NULL, _use_json_parser, _target_device_type);
+       engine_config_hosted_tflite_model(engine_cfg, FD_TFLITE_WEIGHT_MOBILENET_V1_SSD_300_PATH, NULL, _use_json_parser,
+                                                                         _target_device_type);
        if (!_use_json_parser) {
                const char *inputNodeName = "normalized_input_image_tensor";
-               const char *outputNodeName[] = { "TFLite_Detection_PostProcess",
-                                                                                       "TFLite_Detection_PostProcess:1",
-                                                                                       "TFLite_Detection_PostProcess:2",
-                                                                                       "TFLite_Detection_PostProcess:3" };
+               const char *outputNodeName[] = { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1",
+                                                                                "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" };
 
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
 
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
-                                       MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME,
-                                                                                                               inputNodeName), MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName),
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES,
-                                                                                                                               outputNodeName, 4), MEDIA_VISION_ERROR_NONE);
+                                                                                                                         outputNodeName, 4),
+                                 MEDIA_VISION_ERROR_NONE);
        }
 
        inferenceFace();
 }
 
 INSTANTIATE_TEST_CASE_P(Prefix, TestFaceDetectionTflite,
-                                               ::testing::Values(
-                                                       ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CPU),
-                                                       ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CPU)
-                                               )
-);
\ No newline at end of file
+                                               ::testing::Values(ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CPU),
+                                                                                 ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CPU)));
\ No newline at end of file
index f528619..aa6ce88 100644 (file)
@@ -12,9 +12,8 @@
        MV_CONFIG_PATH        \
        "/res/inference/images/faceLandmark.jpg"
 
-void _facial_landmark_detected_cb(mv_source_h source,
-                                                                 const int number_of_landmarks,
-                                                                 const mv_point_s *locations, void *user_data)
+void _facial_landmark_detected_cb(mv_source_h source, const int number_of_landmarks, const mv_point_s *locations,
+                                                                 void *user_data)
 {
        EXPECT_GT(number_of_landmarks, 0);
 }
@@ -26,12 +25,9 @@ public:
        {
                TestInference::ConfigureInference();
 
-               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(
-                                                 IMG_FACE_LANDMARK, mv_source),
+               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(IMG_FACE_LANDMARK, mv_source),
                                  MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_inference_facial_landmark_detect(
-                                                 mv_source, infer, NULL, _facial_landmark_detected_cb,
-                                                 NULL),
+               ASSERT_EQ(mv_inference_facial_landmark_detect(mv_source, infer, NULL, _facial_landmark_detected_cb, NULL),
                                  MEDIA_VISION_ERROR_NONE);
        }
 };
@@ -41,54 +37,36 @@ TEST_P(TestFaceLandmarkDetectionOpenCV, CAFFE_CNNCASCADE)
        const char *inputNodeName = "data";
        const char *outputNodeName[] = { "Sigmoid_fc2" };
 
-       ASSERT_EQ(mv_engine_config_set_string_attribute(
-                                         engine_cfg, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                                         FLD_OPENCV_WEIGHT_CAFFE_PATH),
+       ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+                                                                                                       FLD_OPENCV_WEIGHT_CAFFE_PATH),
                          MEDIA_VISION_ERROR_NONE);
-       ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg,
-                                                                                                MV_INFERENCE_INPUT_DATA_TYPE,
-                                                                                                MV_INFERENCE_DATA_FLOAT32),
+       ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_FLOAT32),
                          MEDIA_VISION_ERROR_NONE);
-       ASSERT_EQ(mv_engine_config_set_string_attribute(
-                                         engine_cfg, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
-                                         FLD_OPENCV_CONFIG_CAFFE_PATH),
+       ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+                                                                                                       FLD_OPENCV_CONFIG_CAFFE_PATH),
                          MEDIA_VISION_ERROR_NONE);
-       ASSERT_EQ(mv_engine_config_set_double_attribute(
-                                         engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5),
+       ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5),
                          MEDIA_VISION_ERROR_NONE);
-       ASSERT_EQ(mv_engine_config_set_double_attribute(
-                                         engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5),
+       ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5),
                          MEDIA_VISION_ERROR_NONE);
-       ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg,
-                                                                                                MV_INFERENCE_BACKEND_TYPE,
-                                                                                                MV_INFERENCE_BACKEND_OPENCV),
+       ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_OPENCV),
                          MEDIA_VISION_ERROR_NONE);
-       ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg,
-                                                                                                MV_INFERENCE_TARGET_TYPE,
-                                                                                                MV_INFERENCE_TARGET_CPU),
+       ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU),
                          MEDIA_VISION_ERROR_NONE);
-       ASSERT_EQ(mv_engine_config_set_int_attribute(
-                                         engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 128),
+       ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 128),
                          MEDIA_VISION_ERROR_NONE);
-       ASSERT_EQ(mv_engine_config_set_int_attribute(
-                                         engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 128),
+       ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 128),
                          MEDIA_VISION_ERROR_NONE);
-       ASSERT_EQ(mv_engine_config_set_int_attribute(
-                                         engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
+       ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
                          MEDIA_VISION_ERROR_NONE);
-       ASSERT_EQ(mv_engine_config_set_string_attribute(
-                                         engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName),
+       ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName),
                          MEDIA_VISION_ERROR_NONE);
-       ASSERT_EQ(mv_engine_config_set_array_string_attribute(
-                                         engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES,
-                                         outputNodeName, 1),
+       ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName,
+                                                                                                                 1),
                          MEDIA_VISION_ERROR_NONE);
        inferenceFaceLandmark();
 }
 
 INSTANTIATE_TEST_CASE_P(Prefix, TestFaceLandmarkDetectionOpenCV,
-                                               ::testing::Values(
-                                                       ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CPU),
-                                                       ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CPU)
-                                               )
-);
\ No newline at end of file
+                                               ::testing::Values(ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CPU),
+                                                                                 ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CPU)));
\ No newline at end of file
index fbece70..f7171bd 100644 (file)
        MV_CONFIG_PATH                     \
        "/models/IC_Q/snpe/imagenet_slim_labels.txt"
 #define IC_SNPE_WEIGHT_QUANT_INCEPTION_V3_299_PATH \
-       MV_CONFIG_PATH                                   \
+       MV_CONFIG_PATH                                 \
        "/models/IC_Q/snpe/inception_v3_quantized.dlc"
 
-void _image_classified_cb(mv_source_h source, const int number_of_classes,
-                                                 const int *indices, const char **names,
+void _image_classified_cb(mv_source_h source, const int number_of_classes, const int *indices, const char **names,
                                                  const float *confidences, void *user_data)
 {
        const std::string answer = "banana";
@@ -60,10 +59,8 @@ public:
        {
                TestInference::ConfigureInference();
 
-               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(IMG_BANANA, mv_source),
-                                                                                                                                         MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_inference_image_classify(mv_source, infer, NULL,
-                                                                                         _image_classified_cb, NULL),
+               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(IMG_BANANA, mv_source), MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_inference_image_classify(mv_source, infer, NULL, _image_classified_cb, NULL),
                                  MEDIA_VISION_ERROR_NONE);
        }
 };
@@ -75,11 +72,8 @@ public:
        {
                TestInference::ConfigureInference();
 
-               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(
-                                                 IMG_BANANA, mv_source),
-                                 MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_inference_image_classify(mv_source, infer, NULL,
-                                                                                         _image_classified_cb, NULL),
+               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(IMG_BANANA, mv_source), MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_inference_image_classify(mv_source, infer, NULL, _image_classified_cb, NULL),
                                  MEDIA_VISION_ERROR_NONE);
        }
 };
@@ -87,29 +81,29 @@ public:
 TEST_P(TestImageClassificationTflite, MobilenetV1)
 {
        engine_config_hosted_tflite_model(engine_cfg, IC_TFLITE_WEIGHT_MOBILENET_V1_224_PATH,
-                                                                         IC_LABEL_MOBILENET_V1_224_PATH, _use_json_parser,
-                                                                         _target_device_type);
+                                                                         IC_LABEL_MOBILENET_V1_224_PATH, _use_json_parser, _target_device_type);
 
        if (!_use_json_parser) {
                const char *inputNodeName = "input";
                const char *outputNodeName[] = { "MobilenetV1/Predictions/Reshape_1" };
 
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.0),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
-                                       MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME,
-                                                                                                               inputNodeName), MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName),
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES,
-                                                                                                                         outputNodeName, 1), MEDIA_VISION_ERROR_NONE);
+                                                                                                                         outputNodeName, 1),
+                                 MEDIA_VISION_ERROR_NONE);
        }
 
        inferenceBanana();
@@ -118,29 +112,29 @@ TEST_P(TestImageClassificationTflite, MobilenetV1)
 TEST_P(TestImageClassificationTflite, MobilenetV2)
 {
        engine_config_hosted_tflite_model(engine_cfg, IC_TFLITE_WEIGHT_MOBILENET_V2_224_PATH,
-                                                                         IC_LABEL_MOBILENET_V1_224_PATH, _use_json_parser,
-                                                                         _target_device_type);
+                                                                         IC_LABEL_MOBILENET_V1_224_PATH, _use_json_parser, _target_device_type);
 
        if (!_use_json_parser) {
                const char *inputNodeName = "input";
                const char *outputNodeName[] = { "MobilenetV2/Predictions/Reshape_1" };
 
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.01),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
-                                       MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME,
-                                                                                                               inputNodeName), MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName),
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES,
-                                                                                                                         outputNodeName, 1), MEDIA_VISION_ERROR_NONE);
+                                                                                                                         outputNodeName, 1),
+                                 MEDIA_VISION_ERROR_NONE);
        }
 
        inferenceBanana();
@@ -148,30 +142,30 @@ TEST_P(TestImageClassificationTflite, MobilenetV2)
 
 TEST_P(TestImageClassificationTflite, Densenet)
 {
-       engine_config_hosted_tflite_model(engine_cfg, IC_TFLITE_WEIGHT_DENSENET_224_PATH,
-                                                                         IC_LABEL_MOBILENET_V1_224_PATH, _use_json_parser,
-                                                                         _target_device_type);
+       engine_config_hosted_tflite_model(engine_cfg, IC_TFLITE_WEIGHT_DENSENET_224_PATH, IC_LABEL_MOBILENET_V1_224_PATH,
+                                                                         _use_json_parser, _target_device_type);
 
        if (!_use_json_parser) {
                const char *inputNodeName = "Placeholder";
                const char *outputNodeName[] = { "softmax_tensor" };
 
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 255.0),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.0),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
-                                       MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME,
-                                                                                                               inputNodeName), MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName),
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES,
-                                                                                                                         outputNodeName, 1), MEDIA_VISION_ERROR_NONE);
+                                                                                                                         outputNodeName, 1),
+                                 MEDIA_VISION_ERROR_NONE);
        }
 
        inferenceBanana();
@@ -179,30 +173,30 @@ TEST_P(TestImageClassificationTflite, Densenet)
 
 TEST_P(TestImageClassificationTflite, Nasnet)
 {
-       engine_config_hosted_tflite_model(engine_cfg, IC_TFLITE_WEIGHT_NASNET_224_PATH,
-                                                                         IC_LABEL_MOBILENET_V1_224_PATH, _use_json_parser,
-                                                                         _target_device_type);
+       engine_config_hosted_tflite_model(engine_cfg, IC_TFLITE_WEIGHT_NASNET_224_PATH, IC_LABEL_MOBILENET_V1_224_PATH,
+                                                                         _use_json_parser, _target_device_type);
 
        if (!_use_json_parser) {
                const char *inputNodeName = "input";
                const char *outputNodeName[] = { "final_layer/predictions" };
 
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.0),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
-                                       MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME,
-                                                                                                               inputNodeName), MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName),
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES,
-                                                                                                                         outputNodeName, 1), MEDIA_VISION_ERROR_NONE);
+                                                                                                                         outputNodeName, 1),
+                                 MEDIA_VISION_ERROR_NONE);
        }
 
        inferenceBanana();
@@ -210,30 +204,30 @@ TEST_P(TestImageClassificationTflite, Nasnet)
 
 TEST_P(TestImageClassificationTflite, MNasnet)
 {
-       engine_config_hosted_tflite_model(engine_cfg, IC_TFLITE_WEIGHT_MNASNET_224_PATH,
-                                                                         IC_LABEL_MOBILENET_V1_224_PATH, _use_json_parser,
-                                                                         _target_device_type);
+       engine_config_hosted_tflite_model(engine_cfg, IC_TFLITE_WEIGHT_MNASNET_224_PATH, IC_LABEL_MOBILENET_V1_224_PATH,
+                                                                         _use_json_parser, _target_device_type);
 
        if (!_use_json_parser) {
                const char *inputNodeName = "input";
                const char *outputNodeName[] = { "output" };
 
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 57.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.0),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
-                                       MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME,
-                                                                                                               inputNodeName), MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName),
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES,
-                                                                                                                         outputNodeName, 1), MEDIA_VISION_ERROR_NONE);
+                                                                                                                         outputNodeName, 1),
+                                 MEDIA_VISION_ERROR_NONE);
        }
 
        inferenceBanana();
@@ -241,30 +235,30 @@ TEST_P(TestImageClassificationTflite, MNasnet)
 
 TEST_P(TestImageClassificationTflite, Squeezenet)
 {
-       engine_config_hosted_tflite_model(engine_cfg, IC_TFLITE_WEIGHT_SQUEEZENET_224_PATH,
-                                                                         IC_LABEL_MOBILENET_V1_224_PATH, _use_json_parser,
-                                                                         _target_device_type);
+       engine_config_hosted_tflite_model(engine_cfg, IC_TFLITE_WEIGHT_SQUEEZENET_224_PATH, IC_LABEL_MOBILENET_V1_224_PATH,
+                                                                         _use_json_parser, _target_device_type);
 
        if (!_use_json_parser) {
                const char *inputNodeName = "Placeholder";
                const char *outputNodeName[] = { "softmax_tensor" };
 
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.0),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
-                                       MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME,
-                                                                                                               inputNodeName), MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName),
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES,
-                                                                                                                         outputNodeName, 1), MEDIA_VISION_ERROR_NONE);
+                                                                                                                         outputNodeName, 1),
+                                 MEDIA_VISION_ERROR_NONE);
        }
 
        inferenceBanana();
@@ -273,31 +267,31 @@ TEST_P(TestImageClassificationTflite, Squeezenet)
 TEST_P(TestImageClassificationTflite, QUANT_MobilenetV1)
 {
        engine_config_hosted_tflite_model(engine_cfg, IC_TFLITE_WEIGHT_QUANT_MOBILENET_V1_224_PATH,
-                                                                         IC_LABEL_MOBILENET_V1_224_PATH, _use_json_parser,
-                                                                         _target_device_type);
+                                                                         IC_LABEL_MOBILENET_V1_224_PATH, _use_json_parser, _target_device_type);
 
        if (!_use_json_parser) {
                const char *inputNodeName = "input";
                const char *outputNodeName[] = { "MobilenetV1/Predictions/Reshape_1" };
 
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_UINT8),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 0.0),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 1.0),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.0),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
-                                       MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME,
-                                                                                                               inputNodeName), MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName),
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES,
-                                                                                                                         outputNodeName, 1), MEDIA_VISION_ERROR_NONE);
+                                                                                                                         outputNodeName, 1),
+                                 MEDIA_VISION_ERROR_NONE);
        }
 
        inferenceBanana();
@@ -306,50 +300,44 @@ TEST_P(TestImageClassificationTflite, QUANT_MobilenetV1)
 TEST_P(TestImageClassificationSnpe, DISABLED_InceptionV3_Quantized)
 {
        engine_config_hosted_snpe_model(engine_cfg, IC_SNPE_WEIGHT_QUANT_INCEPTION_V3_299_PATH,
-                                                                       IC_LABEL_INCEPTION_V3_299_PATH, _use_json_parser,
-                                                                       _target_device_type);
+                                                                       IC_LABEL_INCEPTION_V3_299_PATH, _use_json_parser, _target_device_type);
 
        if (!_use_json_parser) {
                const char *inputNodeName = "input";
                const char *outputNodeName[] = { "output" };
 
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_UINT8),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.0),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 299),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 299),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
-                                       MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME,
-                                                                                                               inputNodeName), MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName),
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES,
-                                                                                                                         outputNodeName, 1), MEDIA_VISION_ERROR_NONE);
+                                                                                                                         outputNodeName, 1),
+                                 MEDIA_VISION_ERROR_NONE);
        }
 
        inferenceBanana();
 }
 
 INSTANTIATE_TEST_CASE_P(Prefix, TestImageClassificationTflite,
-                                               ::testing::Values(
-                                                       ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CPU),
-                                                       ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CPU)
-                                               )
-);
+                                               ::testing::Values(ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CPU),
+                                                                                 ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CPU)));
 
 INSTANTIATE_TEST_CASE_P(Prefix, TestImageClassificationSnpe,
-                                               ::testing::Values(
-                                                       ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CPU),
-                                                       ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_GPU),
-                                                       ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CUSTOM),
-                                                       ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CPU),
-                                                       ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_GPU),
-                                                       ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CUSTOM)
-                                               )
-);
\ No newline at end of file
+                                               ::testing::Values(ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CPU),
+                                                                                 ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_GPU),
+                                                                                 ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CUSTOM),
+                                                                                 ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CPU),
+                                                                                 ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_GPU),
+                                                                                 ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CUSTOM)));
\ No newline at end of file
index 1d66f77..df99f74 100644 (file)
@@ -2,9 +2,7 @@
 #include <image_helper.h>
 #include "test_inference_helper.hpp"
 
-TestInference::TestInference() :
-               _use_json_parser(false),
-               _target_device_type(MV_INFERENCE_TARGET_DEVICE_NONE)
+TestInference::TestInference() : _use_json_parser(false), _target_device_type(MV_INFERENCE_TARGET_DEVICE_NONE)
 {
        EXPECT_EQ(mv_create_engine_config(&engine_cfg), MEDIA_VISION_ERROR_NONE);
        EXPECT_EQ(mv_inference_create(&infer), MEDIA_VISION_ERROR_NONE);
@@ -19,14 +17,11 @@ TestInference::~TestInference()
 
 void TestInference::ConfigureInference()
 {
-       ASSERT_EQ(mv_inference_configure(infer, engine_cfg),
-                               MEDIA_VISION_ERROR_NONE);
+       ASSERT_EQ(mv_inference_configure(infer, engine_cfg), MEDIA_VISION_ERROR_NONE);
        ASSERT_EQ(mv_inference_prepare(infer), MEDIA_VISION_ERROR_NONE);
 }
 
-void engine_config_hosted_model_config(mv_engine_config_h handle,
-                                                                          const char *tf_weight,
-                                                                          const bool use_json_parser)
+void engine_config_hosted_model_config(mv_engine_config_h handle, const char *tf_weight, const bool use_json_parser)
 {
        EXPECT_EQ(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, tf_weight),
                          MEDIA_VISION_ERROR_NONE);
@@ -36,27 +31,21 @@ void engine_config_hosted_model_config(mv_engine_config_h handle,
                meta_file_path = meta_file_path.substr(0, meta_file_path.find('.'));
                meta_file_path += std::string(".json");
 
-               EXPECT_EQ(mv_engine_config_set_string_attribute(
-                                               handle, MV_INFERENCE_MODEL_META_FILE_PATH , meta_file_path.c_str()),
-                               MEDIA_VISION_ERROR_NONE);
+               EXPECT_EQ(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_META_FILE_PATH,
+                                                                                                               meta_file_path.c_str()),
+                                 MEDIA_VISION_ERROR_NONE);
        }
 }
 
-void engine_config_hosted_tflite_model(mv_engine_config_h handle,
-                                                                          const char *tf_weight,
-                                                                          const char *user_file,
+void engine_config_hosted_tflite_model(mv_engine_config_h handle, const char *tf_weight, const char *user_file,
                                                                           const bool use_json_parser,
                                                                           const mv_inference_target_device_e target_device_type)
 {
        engine_config_hosted_model_config(handle, tf_weight, use_json_parser);
 
-       EXPECT_EQ(mv_engine_config_set_int_attribute(handle,
-                                                                                                MV_INFERENCE_BACKEND_TYPE,
-                                                                                                MV_INFERENCE_BACKEND_TFLITE),
+       EXPECT_EQ(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_TFLITE),
                          MEDIA_VISION_ERROR_NONE);
-       EXPECT_EQ(mv_engine_config_set_int_attribute(handle,
-                                                                                                MV_INFERENCE_TARGET_DEVICE_TYPE,
-                                                                                                target_device_type),
+       EXPECT_EQ(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_DEVICE_TYPE, target_device_type),
                          MEDIA_VISION_ERROR_NONE);
 
        if (user_file) {
@@ -65,24 +54,16 @@ void engine_config_hosted_tflite_model(mv_engine_config_h handle,
        }
 }
 
-void engine_config_hosted_snpe_model(mv_engine_config_h handle,
-                                                                        const char *tf_weight,
-                                                                        const char *user_file,
-                                                                        const bool use_json_parser,
-                                                                        const mv_inference_target_device_e target_device_type)
+void engine_config_hosted_snpe_model(mv_engine_config_h handle, const char *tf_weight, const char *user_file,
+                                                                        const bool use_json_parser, const mv_inference_target_device_e target_device_type)
 {
        engine_config_hosted_model_config(handle, tf_weight, use_json_parser);
 
-       EXPECT_EQ(mv_engine_config_set_int_attribute(handle,
-                                                                                                MV_INFERENCE_BACKEND_TYPE,
-                                                                                                MV_INFERENCE_BACKEND_SNPE),
+       EXPECT_EQ(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_SNPE),
                          MEDIA_VISION_ERROR_NONE);
-       EXPECT_EQ(mv_engine_config_set_int_attribute(handle,
-                                                                                                MV_INFERENCE_TARGET_DEVICE_TYPE,
-                                                                                                target_device_type),
+       EXPECT_EQ(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_DEVICE_TYPE, target_device_type),
                          MEDIA_VISION_ERROR_NONE);
 
-       EXPECT_EQ(mv_engine_config_set_string_attribute(
-                                         handle, MV_INFERENCE_MODEL_USER_FILE_PATH, user_file),
+       EXPECT_EQ(mv_engine_config_set_string_attribute(handle, MV_INFERENCE_MODEL_USER_FILE_PATH, user_file),
                          MEDIA_VISION_ERROR_NONE);
 }
index 42c0c48..b658a5b 100644 (file)
        MV_CONFIG_PATH                                   \
        "/models/OD/snpe/lite2_fp32.dlc"
 
-void _object_detected_cb(mv_source_h source, const int number_of_objects,
-                                                const int *indices, const char **names,
-                                                const float *confidences,
-                                                const mv_rectangle_s *locations, void *user_data)
+void _object_detected_cb(mv_source_h source, const int number_of_objects, const int *indices, const char **names,
+                                                const float *confidences, const mv_rectangle_s *locations, void *user_data)
 {
        auto answer = static_cast<std::string *>(user_data);
        auto answer_found = false;
@@ -43,65 +41,47 @@ public:
                std::string result("Dog");
                TestInference::ConfigureInference();
 
-               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(
-                                                 IMG_DOG, mv_source),
-                                 MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_inference_object_detect(mv_source, infer,
-                                                                                        _object_detected_cb, &result),
-                                 MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(IMG_DOG, mv_source), MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_inference_object_detect(mv_source, infer, _object_detected_cb, &result), MEDIA_VISION_ERROR_NONE);
        }
 };
 
 TEST_P(TestObjectDetectionTflite, MobilenetV1_SSD)
 {
-       engine_config_hosted_tflite_model(
-                       engine_cfg, OD_TFLITE_WEIGHT_MOBILENET_V1_SSD_300_PATH,
-                       OD_LABEL_MOBILENET_V1_SSD_300_PATH, _use_json_parser,
-                       _target_device_type);
+       engine_config_hosted_tflite_model(engine_cfg, OD_TFLITE_WEIGHT_MOBILENET_V1_SSD_300_PATH,
+                                                                         OD_LABEL_MOBILENET_V1_SSD_300_PATH, _use_json_parser, _target_device_type);
 
        if (!_use_json_parser) {
                const char *inputNodeName = "normalized_input_image_tensor";
-               const char *outputNodeName[] = { "TFLite_Detection_PostProcess",
-                                                                                "TFLite_Detection_PostProcess:1",
-                                                                                "TFLite_Detection_PostProcess:2",
-                                                                                "TFLite_Detection_PostProcess:3" };
+               const char *outputNodeName[] = { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1",
+                                                                                "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" };
 
-               ASSERT_EQ(mv_engine_config_set_double_attribute(
-                                                 engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5),
+               ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5),
                                  MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_double_attribute(
-                                                 engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5),
+               ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5),
                                  MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_double_attribute(
-                                                 engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3),
+               ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3),
                                  MEDIA_VISION_ERROR_NONE);
 
-               ASSERT_EQ(mv_engine_config_set_int_attribute(
-                                                 engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300),
+               ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300),
                                  MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_int_attribute(
-                                                 engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300),
+               ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300),
                                  MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_int_attribute(
-                                                 engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
+               ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
                                  MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_string_attribute(
-                                                 engine_cfg, MV_INFERENCE_INPUT_NODE_NAME,
-                                                 inputNodeName),
+               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName),
                                  MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_array_string_attribute(
-                                                 engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES,
-                                                 outputNodeName, 4),
+               ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES,
+                                                                                                                         outputNodeName, 4),
                                  MEDIA_VISION_ERROR_NONE);
        }
 
        inferenceDog();
 }
 
-INSTANTIATE_TEST_CASE_P(
-               Prefix, TestObjectDetectionTflite,
-               ::testing::Values(ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CPU),
-                                                 ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CPU)));
+INSTANTIATE_TEST_CASE_P(Prefix, TestObjectDetectionTflite,
+                                               ::testing::Values(ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CPU),
+                                                                                 ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CPU)));
 
 class TestObjectDetectionSnpe : public TestInference
 {
@@ -111,26 +91,19 @@ public:
                std::string result("dog");
                TestInference::ConfigureInference();
 
-               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(
-                                                 IMG_DOG, mv_source),
-                                 MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_inference_object_detect(mv_source, infer,
-                                                                                        _object_detected_cb, &result),
-                                 MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(IMG_DOG, mv_source), MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_inference_object_detect(mv_source, infer, _object_detected_cb, &result), MEDIA_VISION_ERROR_NONE);
        }
 };
 
 TEST_P(TestObjectDetectionSnpe, DISABLED_EFDLite2QC)
 {
        ASSERT_TRUE(_use_json_parser);
-       engine_config_hosted_snpe_model(
-                       engine_cfg, OD_SNPE_WEIGHT_QC_EFFCIENTDET_LITE2_448_PATH,
-                       OD_LABEL_EFFICIENTDET_LITE2_448_PATH, _use_json_parser,
-                       _target_device_type);
+       engine_config_hosted_snpe_model(engine_cfg, OD_SNPE_WEIGHT_QC_EFFCIENTDET_LITE2_448_PATH,
+                                                                       OD_LABEL_EFFICIENTDET_LITE2_448_PATH, _use_json_parser, _target_device_type);
 
        inferenceDog();
 }
 
-INSTANTIATE_TEST_CASE_P(
-               Prefix, TestObjectDetectionSnpe,
-               ::testing::Values(ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CUSTOM)));
\ No newline at end of file
+INSTANTIATE_TEST_CASE_P(Prefix, TestObjectDetectionSnpe,
+                                               ::testing::Values(ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CUSTOM)));
\ No newline at end of file
index 88f9918..3d04221 100644 (file)
@@ -9,13 +9,10 @@
        MV_CONFIG_PATH        \
        "/res/inference/images/poseLandmark.jpg"
 
-void _pose_landmark_detected_cb(mv_source_h source,
-                                                               mv_inference_pose_result_h pose,
-                                                               void *user_data)
+void _pose_landmark_detected_cb(mv_source_h source, mv_inference_pose_result_h pose, void *user_data)
 {
        int cb_number_of_poses = 0;
-       ASSERT_EQ(mv_inference_pose_get_number_of_poses(pose, &cb_number_of_poses),
-                         MEDIA_VISION_ERROR_NONE);
+       ASSERT_EQ(mv_inference_pose_get_number_of_poses(pose, &cb_number_of_poses), MEDIA_VISION_ERROR_NONE);
        ASSERT_EQ(cb_number_of_poses, 1);
 }
 
@@ -26,54 +23,47 @@ public:
        {
                TestInference::ConfigureInference();
 
-               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(
-                                                 IMG_POSE_LANDMARK, mv_source),
+               ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource(IMG_POSE_LANDMARK, mv_source),
                                  MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_inference_pose_landmark_detect(mv_source, infer, NULL,
-                                                                                                       _pose_landmark_detected_cb,
-                                                                                                       NULL),
+               ASSERT_EQ(mv_inference_pose_landmark_detect(mv_source, infer, NULL, _pose_landmark_detected_cb, NULL),
                                  MEDIA_VISION_ERROR_NONE);
        }
 };
 
 TEST_P(TestPoseLandmarkDetectionTflite, MobilenetV1)
 {
-       engine_config_hosted_tflite_model(engine_cfg, PLD_TFLITE_WEIGHT_MOBILENET_V1_POSENET_257_PATH,
-                                                                         NULL, _use_json_parser, _target_device_type);
+       engine_config_hosted_tflite_model(engine_cfg, PLD_TFLITE_WEIGHT_MOBILENET_V1_POSENET_257_PATH, NULL,
+                                                                         _use_json_parser, _target_device_type);
 
        if (!_use_json_parser) {
                const char *inputNodeName = "sub_2";
-               const char *outputNodeName[] = { "MobilenetV1/heatmap_2/BiasAdd",
-                                                                                       "MobilenetV1/offset_2/BiasAdd",
-                                                                                       "MobilenetV1/displacement_fwd_2/BiasAdd",
-                                                                                       "MobilenetV1/displacement_bwd_2/BiasAdd" };
+               const char *outputNodeName[] = { "MobilenetV1/heatmap_2/BiasAdd", "MobilenetV1/offset_2/BiasAdd",
+                                                                                "MobilenetV1/displacement_fwd_2/BiasAdd",
+                                                                                "MobilenetV1/displacement_bwd_2/BiasAdd" };
 
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
 
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300),
-                                       MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3),
-                                       MEDIA_VISION_ERROR_NONE);
-               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME,
-                                                                                                               inputNodeName), MEDIA_VISION_ERROR_NONE);
+                                 MEDIA_VISION_ERROR_NONE);
+               ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, inputNodeName),
+                                 MEDIA_VISION_ERROR_NONE);
                ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES,
-                                                                                                                               outputNodeName, 4), MEDIA_VISION_ERROR_NONE);
+                                                                                                                         outputNodeName, 4),
+                                 MEDIA_VISION_ERROR_NONE);
        }
 
-
        inferencePoseLandmark();
 }
 
 INSTANTIATE_TEST_CASE_P(Prefix, TestPoseLandmarkDetectionTflite,
-                                               ::testing::Values(
-                                                       ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CPU),
-                                                       ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CPU)
-                                               )
-);
\ No newline at end of file
+                                               ::testing::Values(ParamTypes(false, MV_INFERENCE_TARGET_DEVICE_CPU),
+                                                                                 ParamTypes(true, MV_INFERENCE_TARGET_DEVICE_CPU)));
\ No newline at end of file
index 5b7810c..89987f7 100644 (file)
@@ -52,9 +52,9 @@
 static mv_surveillance_event_trigger_h is_subscribed[MAX_EVENTS_NUMBER];
 static int video_streams_ids[MAX_EVENTS_NUMBER];
 static unsigned int trigger_id_cnt = 0;
-static const int green_color[] = {0, 255, 0};
-static const int red_color[] = {0, 0, 255};
-static const int blue_color[] = {255, 0, 0};
+static const int green_color[] = { 0, 255, 0 };
+static const int red_color[] = { 0, 0, 255 };
+static const int blue_color[] = { 255, 0, 0 };
 static bool save_results_to_image = false;
 /*----------------------------------------------------*/
 /*functions*/
@@ -95,26 +95,14 @@ void turn_on_off_saving_to_image();
 /*----------------------------------------------------*/
 /* callbacks */
 
-void detect_person_appeared_cb(
-       mv_surveillance_event_trigger_h handle,
-       mv_source_h source,
-       int video_stream_id,
-       mv_surveillance_result_h event_result,
-       void *user_data);
-
-void person_recognized_cb(
-       mv_surveillance_event_trigger_h handle,
-       mv_source_h source,
-       int video_stream_id,
-       mv_surveillance_result_h event_result,
-       void *user_data);
-
-void movement_detected_cb(
-       mv_surveillance_event_trigger_h handle,
-       mv_source_h source,
-       int video_stream_id,
-       mv_surveillance_result_h event_result,
-       void *user_data);
+void detect_person_appeared_cb(mv_surveillance_event_trigger_h handle, mv_source_h source, int video_stream_id,
+                                                          mv_surveillance_result_h event_result, void *user_data);
+
+void person_recognized_cb(mv_surveillance_event_trigger_h handle, mv_source_h source, int video_stream_id,
+                                                 mv_surveillance_result_h event_result, void *user_data);
+
+void movement_detected_cb(mv_surveillance_event_trigger_h handle, mv_source_h source, int video_stream_id,
+                                                 mv_surveillance_result_h event_result, void *user_data);
 
 /*----------------------------------------------------*/
 
@@ -127,16 +115,14 @@ int main(void)
        init_is_subscribed();
 
        const int options[8] = { 1, 2, 3, 4, 5, 6, 7, 8 };
-       const char *names[8] = {
-               "Get list of supported events",
-               "Get identificators of subscribed events",
-               "Subscribe to event",
-               "Unsubscribe from event",
-               "Unsubscribe from all events",
-               "Push source",
-               "Turn on (off) saving event result to image",
-               "Exit"
-       };
+       const char *names[8] = { "Get list of supported events",
+                                                        "Get identificators of subscribed events",
+                                                        "Subscribe to event",
+                                                        "Unsubscribe from event",
+                                                        "Unsubscribe from all events",
+                                                        "Push source",
+                                                        "Turn on (off) saving event result to image",
+                                                        "Exit" };
 
        while (1) {
                char exit = 'n';
@@ -178,8 +164,7 @@ int main(void)
                        const char *names_last[2] = { "No", "Yes" };
 
                        while (sel_opt == 0) {
-                               sel_opt = show_menu("Are you sure?",
-                                                                       options_last, names_last, 2);
+                               sel_opt = show_menu("Are you sure?", options_last, names_last, 2);
                                switch (sel_opt) {
                                case 1:
                                        exit = 'n';
@@ -233,11 +218,9 @@ void print_is_subscribed()
                PRINT_Y("List of subscribed events is empty");
 }
 
-static const char *EVENT_TYPES_NAMES[MAX_EVENT_TYPE_LEN] = {
-       MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED,
-       MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED,
-       MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED
-};
+static const char *EVENT_TYPES_NAMES[MAX_EVENT_TYPE_LEN] = { MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED,
+                                                                                                                        MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED,
+                                                                                                                        MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED };
 
 static const unsigned int NUMBER_OF_TYPES = 3u;
 
@@ -261,11 +244,12 @@ bool foreach_event_type_cb(const char *event_type, void *user_data)
        PRINT_W("%s", event_type);
        PRINT_G("\tList of supported event result value names is:");
 
-       const int error = mv_surveillance_foreach_event_result_name(
-                       event_type, foreach_event_result_value_name_cb, user_data);
+       const int error =
+                       mv_surveillance_foreach_event_result_name(event_type, foreach_event_result_value_name_cb, user_data);
        if (MEDIA_VISION_ERROR_NONE != error) {
                PRINT_E("Error occurred when trying to get value names for "
-                               "event named '%s'", event_type);
+                               "event named '%s'",
+                               event_type);
                return true;
        }
        return true;
@@ -275,15 +259,13 @@ void print_supported_events()
 {
        PRINT_G("List of supported events is:");
 
-       const int error = mv_surveillance_foreach_supported_event_type(
-                       foreach_event_type_cb, NULL);
+       const int error = mv_surveillance_foreach_supported_event_type(foreach_event_type_cb, NULL);
 
        if (MEDIA_VISION_ERROR_NONE != error)
                PRINT_R("Error occurred when trying to get list of event type names \n");
 }
 
-int create_trigger_handle_by_event_name(
-       mv_surveillance_event_trigger_h *handle)
+int create_trigger_handle_by_event_name(mv_surveillance_event_trigger_h *handle)
 {
        PRINT_G("\nSelect event type:");
 
@@ -292,8 +274,7 @@ int create_trigger_handle_by_event_name(
                printf("#%d. %s\n", i, EVENT_TYPES_NAMES[i]);
 
        size_t event_id = 0ul;
-       while (input_size("Input event type (unsigned integer value):",
-                               NUMBER_OF_TYPES - 1, &event_id) == -1) {
+       while (input_size("Input event type (unsigned integer value):", NUMBER_OF_TYPES - 1, &event_id) == -1) {
                PRINT_R("Incorrect input! Try again.\n List of supported events is:");
 
                unsigned int i = 0u;
@@ -301,11 +282,11 @@ int create_trigger_handle_by_event_name(
                        printf("%d\t%s\n", i, EVENT_TYPES_NAMES[i]);
        }
 
-       const int error = mv_surveillance_event_trigger_create(
-                                               EVENT_TYPES_NAMES[event_id], handle);
+       const int error = mv_surveillance_event_trigger_create(EVENT_TYPES_NAMES[event_id], handle);
        if (MEDIA_VISION_ERROR_NONE != error) {
                PRINT_E("mv_surveillance_event_trigger_create() error!\n"
-                               "Error code: %i\n", error);
+                               "Error code: %i\n",
+                               error);
                return error;
        }
 
@@ -317,7 +298,8 @@ bool try_destroy_event_trigger(mv_surveillance_event_trigger_h trigger)
        const int error = mv_surveillance_event_trigger_destroy(trigger);
        if (MEDIA_VISION_ERROR_NONE != error) {
                PRINT_E("Error with code %d was occured when try to destroy "
-                               "event trigger.", error);
+                               "event trigger.",
+                               error);
                return false;
        }
        return true;
@@ -335,24 +317,23 @@ void subscribe_to_event()
        int error = create_trigger_handle_by_event_name(&event_trigger);
        if (MEDIA_VISION_ERROR_NONE != error) {
                PRINT_E("Error occurred when creating of event trigger. "
-                               "Error code: %i", error);
+                               "Error code: %i",
+                               error);
                try_destroy_event_trigger(event_trigger);
                return;
        }
 
        int video_stream_id = 0;
 
-       while (input_int("Input video stream identificator (integer value):",
-                               INT_MIN,
-                               INT_MAX,
-                               &video_stream_id) == -1)
+       while (input_int("Input video stream identificator (integer value):", INT_MIN, INT_MAX, &video_stream_id) == -1)
                PRINT_R("Incorrect input! Try again.");
 
        char *event_type = NULL;
        error = mv_surveillance_get_event_trigger_type(event_trigger, &event_type);
        if (MEDIA_VISION_ERROR_NONE != error) {
                PRINT_E("Error occurred when getting of event trigger type. "
-                               "Error code: %i", error);
+                               "Error code: %i",
+                               error);
                try_destroy_event_trigger(event_trigger);
                return;
        }
@@ -360,20 +341,12 @@ void subscribe_to_event()
        if (show_confirm_dialog("Would you like to set ROI (Region Of Interest)?"))
                add_roi_to_event(event_trigger);
 
-       if (strncmp(event_type,
-                       MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED,
-                       MAX_EVENT_TYPE_LEN) == 0) {
-               error = mv_surveillance_subscribe_event_trigger(
-                       event_trigger,
-                       video_stream_id,
-                       NULL,
-                       detect_person_appeared_cb,
-                       NULL);
-       } else if (strncmp(event_type,
-                                       MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED,
-                                       MAX_EVENT_TYPE_LEN) == 0) {
-                       PRINT_Y("Please create and save face recognition models\n"
-                                       "before subscribing to event. Use mv_face_test_suite.");
+       if (strncmp(event_type, MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED, MAX_EVENT_TYPE_LEN) == 0) {
+               error = mv_surveillance_subscribe_event_trigger(event_trigger, video_stream_id, NULL, detect_person_appeared_cb,
+                                                                                                               NULL);
+       } else if (strncmp(event_type, MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED, MAX_EVENT_TYPE_LEN) == 0) {
+               PRINT_Y("Please create and save face recognition models\n"
+                               "before subscribing to event. Use mv_face_test_suite.");
 
                mv_engine_config_h engine_cfg = NULL;
                error = mv_create_engine_config(&engine_cfg);
@@ -393,17 +366,14 @@ void subscribe_to_event()
                        error = mv_destroy_engine_config(engine_cfg);
                        if (error != MEDIA_VISION_ERROR_NONE)
                                PRINT_E("Failed to destroy engine configuration for event trigger."
-                                               "Error code: %i", error);
+                                               "Error code: %i",
+                                               error);
                        free(event_type);
                        return;
                }
 
-               error = mv_surveillance_subscribe_event_trigger(
-                                       event_trigger,
-                                       video_stream_id,
-                                       engine_cfg,
-                                       person_recognized_cb,
-                                       NULL);
+               error = mv_surveillance_subscribe_event_trigger(event_trigger, video_stream_id, engine_cfg,
+                                                                                                               person_recognized_cb, NULL);
 
                if (error != MEDIA_VISION_ERROR_NONE) {
                        PRINT_E("Subscription failed. Error code: %i.", error);
@@ -411,7 +381,8 @@ void subscribe_to_event()
                        error = mv_destroy_engine_config(engine_cfg);
                        if (error != MEDIA_VISION_ERROR_NONE)
                                PRINT_E("Failed to destroy engine configuration for event trigger."
-                                               "Error code: %i", error);
+                                               "Error code: %i",
+                                               error);
                        free(event_type);
                        return;
                }
@@ -419,16 +390,11 @@ void subscribe_to_event()
                error = mv_destroy_engine_config(engine_cfg);
                if (error != MEDIA_VISION_ERROR_NONE)
                        PRINT_E("Failed to destroy engine configuration for event trigger."
-                                       "Error code: %i", error);
-       } else if (strncmp(event_type,
-                               MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED,
-                               MAX_EVENT_TYPE_LEN) == 0) {
-               error = mv_surveillance_subscribe_event_trigger(
-                                       event_trigger,
-                                       video_stream_id,
-                                       NULL,
-                                       movement_detected_cb,
-                                       NULL);
+                                       "Error code: %i",
+                                       error);
+       } else if (strncmp(event_type, MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, MAX_EVENT_TYPE_LEN) == 0) {
+               error = mv_surveillance_subscribe_event_trigger(event_trigger, video_stream_id, NULL, movement_detected_cb,
+                                                                                                               NULL);
        }
 
        free(event_type);
@@ -447,13 +413,11 @@ void subscribe_to_event()
 void add_roi_to_event(mv_surveillance_event_trigger_h event_trigger)
 {
        int number_of_roi_points = 0;
-       while (input_int("Input number of ROI points (integer value >2):",
-                                       MIN_NUMBER_OF_ROI_POINTS,
-                                       MAX_NUMBER_OF_ROI_POINTS,
-                                       &number_of_roi_points) == -1)
+       while (input_int("Input number of ROI points (integer value >2):", MIN_NUMBER_OF_ROI_POINTS,
+                                        MAX_NUMBER_OF_ROI_POINTS, &number_of_roi_points) == -1)
                PRINT_R("Incorrect input! Try again.");
 
-       mv_point_s* roi = (mv_point_s*) malloc(sizeof(mv_point_s) * number_of_roi_points);
+       mv_point_s *roi = (mv_point_s *) malloc(sizeof(mv_point_s) * number_of_roi_points);
        if (roi == NULL) {
                PRINT_E("Fail to alloc roi err[%d].", MEDIA_VISION_ERROR_OUT_OF_MEMORY);
                return;
@@ -466,31 +430,22 @@ void add_roi_to_event(mv_surveillance_event_trigger_h event_trigger)
        for (; i < number_of_roi_points; ++i) {
                printf("Point %d \n", i + 1);
 
-               while (input_int("Input x (integer value):",
-                                               MIN_ROI_X_COORD,
-                                               MAX_ROI_X_COORD,
-                                               &x) == -1)
+               while (input_int("Input x (integer value):", MIN_ROI_X_COORD, MAX_ROI_X_COORD, &x) == -1)
                        PRINT_R("Incorrect input! Try again.");
 
-               while (input_int("Input y (integer value):",
-                                               MIN_ROI_Y_COORD,
-                                               MAX_ROI_Y_COORD,
-                                               &y) == -1)
+               while (input_int("Input y (integer value):", MIN_ROI_Y_COORD, MAX_ROI_Y_COORD, &y) == -1)
                        PRINT_R("Incorrect input! Try again.");
 
                roi[i].x = x;
                roi[i].y = y;
        }
 
-       const int error = mv_surveillance_set_event_trigger_roi(
-                                               event_trigger,
-                                               number_of_roi_points,
-                                               roi);
+       const int error = mv_surveillance_set_event_trigger_roi(event_trigger, number_of_roi_points, roi);
 
        if (error == MEDIA_VISION_ERROR_NONE)
                PRINT_G("ROI was successfully set")
        else
-               PRINT_R("Setting ROI failed. Please try again") ;
+               PRINT_R("Setting ROI failed. Please try again");
 
        if (roi != NULL)
                free(roi);
@@ -499,22 +454,16 @@ void add_roi_to_event(mv_surveillance_event_trigger_h event_trigger)
 void unsubscribe_from_event()
 {
        int trigger_id = 0;
-       while (input_int("Input event identificator (1-100):",
-                                       1,
-                                       MAX_EVENTS_NUMBER - 1,
-                                       &trigger_id) == -1)
+       while (input_int("Input event identificator (1-100):", 1, MAX_EVENTS_NUMBER - 1, &trigger_id) == -1)
                PRINT_R("Incorrect input! Try again.");
 
        mv_surveillance_event_trigger_h event_trigger = is_subscribed[trigger_id];
        if (NULL == event_trigger) {
-               PRINT_E("Sorry, event trigger with %i identifier wasn't subscribed.",
-                               trigger_id);
+               PRINT_E("Sorry, event trigger with %i identifier wasn't subscribed.", trigger_id);
                return;
        }
 
-       const int error = mv_surveillance_unsubscribe_event_trigger(
-                       event_trigger,
-                       video_streams_ids[trigger_id]);
+       const int error = mv_surveillance_unsubscribe_event_trigger(event_trigger, video_streams_ids[trigger_id]);
        if (error != MEDIA_VISION_ERROR_NONE) {
                PRINT_E("Error with code %d was occured in unsubscribe event.", error);
                return;
@@ -532,15 +481,11 @@ void unsubscribe_from_all_events()
        unsigned int trigger_id = 0;
        int unsubscribed_number = 0;
        for (; trigger_id < MAX_EVENTS_NUMBER; ++trigger_id) {
-               mv_surveillance_event_trigger_h event_trigger =
-                       is_subscribed[trigger_id];
+               mv_surveillance_event_trigger_h event_trigger = is_subscribed[trigger_id];
                if (NULL != event_trigger) {
-                       error = mv_surveillance_unsubscribe_event_trigger(
-                                       event_trigger,
-                                       video_streams_ids[trigger_id]);
+                       error = mv_surveillance_unsubscribe_event_trigger(event_trigger, video_streams_ids[trigger_id]);
                        if (error != MEDIA_VISION_ERROR_NONE) {
-                               PRINT_E("Error with code %d was occurred in unsubscribe event.",
-                                               error);
+                               PRINT_E("Error with code %d was occurred in unsubscribe event.", error);
                                continue;
                        }
                        ++unsubscribed_number;
@@ -553,9 +498,8 @@ void unsubscribe_from_all_events()
                }
        }
 
-       unsubscribed_number > 0 ?
-       PRINT_S("%d event(s) was successfully unsubscribed", unsubscribed_number) :
-       PRINT_Y("\nThere are no triggers can be unsubscribed.");
+       unsubscribed_number > 0 ? PRINT_S("%d event(s) was successfully unsubscribed", unsubscribed_number) :
+                                                         PRINT_Y("\nThere are no triggers can be unsubscribed.");
 }
 
 void push_source()
@@ -563,15 +507,13 @@ void push_source()
        mv_source_h source;
        int error = mv_create_source(&source);
        if (MEDIA_VISION_ERROR_NONE != error) {
-               PRINT_E("ERROR: Errors were occurred during source creating!!! Code %i" ,
-                               error);
+               PRINT_E("ERROR: Errors were occurred during source creating!!! Code %i", error);
                return;
        }
 
        char *path_to_image = NULL;
 
-       while (input_string("Input file name with image to be analyzed:",
-                                               1024, &path_to_image) == -1)
+       while (input_string("Input file name with image to be analyzed:", 1024, &path_to_image) == -1)
                PRINT_R("Incorrect input! Try again.");
 
        error = load_mv_source_from_file(path_to_image, source);
@@ -591,10 +533,7 @@ void push_source()
 
        int video_stream_id = 0;
 
-       while (input_int("Input video stream identificator (integer value):",
-                               INT_MIN,
-                               INT_MAX,
-                               &video_stream_id) == -1)
+       while (input_int("Input video stream identificator (integer value):", INT_MIN, INT_MAX, &video_stream_id) == -1)
                PRINT_R("Incorrect input! Try again.");
 
        error = mv_surveillance_push_source(source, video_stream_id);
@@ -621,18 +560,15 @@ bool fill_engine_cfg_person_recognized(mv_engine_config_h engine_cfg)
        bool ret = false;
        char *path_to_model = NULL;
 
-       while (input_string("Input file name with face recognition model:",
-                                               1024, &path_to_model) == -1)
+       while (input_string("Input file name with face recognition model:", 1024, &path_to_model) == -1)
                PRINT_R("Incorrect input! Try again.");
 
        const int error = mv_engine_config_set_string_attribute(
-                                               engine_cfg,
-                                               MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH,
-                                               path_to_model);
+                       engine_cfg, MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH, path_to_model);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                PRINT_E("Setting path to face recognition model failed, code %i", error);
-       }       else {
+       } else {
                printf("\nModel path is %s \n", path_to_model);
                ret = true;
        }
@@ -647,17 +583,12 @@ void turn_on_off_saving_to_image()
 {
        save_results_to_image = !save_results_to_image;
 
-       save_results_to_image ?
-               PRINT_Y("Save event results to image files ON.") :
-               PRINT_Y("Save event results to image files OFF.");
+       save_results_to_image ? PRINT_Y("Save event results to image files ON.") :
+                                                       PRINT_Y("Save event results to image files OFF.");
 }
 
-void detect_person_appeared_cb(
-       mv_surveillance_event_trigger_h handle,
-       mv_source_h source,
-       int video_stream_id,
-       mv_surveillance_result_h event_result,
-       void *user_data)
+void detect_person_appeared_cb(mv_surveillance_event_trigger_h handle, mv_source_h source, int video_stream_id,
+                                                          mv_surveillance_result_h event_result, void *user_data)
 {
        PRINT_G("Person appeared / disappeared event was occured");
        if (save_results_to_image)
@@ -671,12 +602,10 @@ void detect_person_appeared_cb(
        image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
 
        if (save_results_to_image &&
-                       (mv_source_get_buffer(source, &out_buffer, &buf_size) ||
-                       mv_source_get_width(source, &(image_data.image_width)) ||
-                       mv_source_get_height(source, &(image_data.image_height)) ||
-                       mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
-                       out_buffer == NULL ||
-                       buf_size == 0)) {
+               (mv_source_get_buffer(source, &out_buffer, &buf_size) ||
+                mv_source_get_width(source, &(image_data.image_width)) ||
+                mv_source_get_height(source, &(image_data.image_height)) ||
+                mv_source_get_colorspace(source, &(image_data.image_colorspace)) || out_buffer == NULL || buf_size == 0)) {
                PRINT_R("ERROR: Creating out image is impossible.");
 
                return;
@@ -689,14 +618,13 @@ void detect_person_appeared_cb(
        }
 
        int number_of_appeared_persons = 0;
-       int error = mv_surveillance_get_result_value(
-                                       event_result,
-                                       MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER,
-                                       &number_of_appeared_persons);
+       int error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER,
+                                                                                                &number_of_appeared_persons);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                PRINT_E("Error with code %d was occured in getting number of "
-                               "appeared persons.", error);
+                               "appeared persons.",
+                               error);
                if (out_buffer_copy != NULL)
                        free(out_buffer_copy);
 
@@ -705,17 +633,15 @@ void detect_person_appeared_cb(
 
        printf("\nNumber of appeared persons is %d \n", number_of_appeared_persons);
 
-       mv_rectangle_s *appeared_locations =
-               malloc(sizeof(mv_rectangle_s) * number_of_appeared_persons);
+       mv_rectangle_s *appeared_locations = malloc(sizeof(mv_rectangle_s) * number_of_appeared_persons);
 
-       error = mv_surveillance_get_result_value(
-                               event_result,
-                               MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS,
-                               appeared_locations);
+       error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS,
+                                                                                        appeared_locations);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                PRINT_E("Error with code %d was occured in getting locations of "
-                               "appeared persons.", error);
+                               "appeared persons.",
+                               error);
 
                if (appeared_locations != NULL)
                        free(appeared_locations);
@@ -728,34 +654,24 @@ void detect_person_appeared_cb(
 
        int i = 0;
        for (; i < number_of_appeared_persons; ++i) {
-               printf("Person #%d location is: x - %d, y - %d, w - %d, h - %d.\n",
-                               i,
-                               appeared_locations[i].point.x,
-                               appeared_locations[i].point.y,
-                               appeared_locations[i].width,
-                               appeared_locations[i].height);
+               printf("Person #%d location is: x - %d, y - %d, w - %d, h - %d.\n", i, appeared_locations[i].point.x,
+                          appeared_locations[i].point.y, appeared_locations[i].width, appeared_locations[i].height);
 
                if (save_results_to_image)
-                       draw_rectangle_on_buffer(
-                                               appeared_locations[i].point.x,
-                                               appeared_locations[i].point.y,
-                                               appeared_locations[i].point.x + appeared_locations[i].width,
-                                               appeared_locations[i].point.y + appeared_locations[i].height,
-                                               3,
-                                               green_color,
-                                               &image_data,
-                                               out_buffer_copy);
+                       draw_rectangle_on_buffer(appeared_locations[i].point.x, appeared_locations[i].point.y,
+                                                                        appeared_locations[i].point.x + appeared_locations[i].width,
+                                                                        appeared_locations[i].point.y + appeared_locations[i].height, 3, green_color,
+                                                                        &image_data, out_buffer_copy);
        }
 
        int number_of_tracked_persons = 0;
-       error = mv_surveillance_get_result_value(
-                               event_result,
-                               MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER,
-                               &number_of_tracked_persons);
+       error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER,
+                                                                                        &number_of_tracked_persons);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                PRINT_E("Error with code %d was occured in getting number of "
-                               "tracked persons.", error);
+                               "tracked persons.",
+                               error);
 
                if (appeared_locations != NULL)
                        free(appeared_locations);
@@ -768,17 +684,15 @@ void detect_person_appeared_cb(
 
        printf("\nNumber of tracked persons is %d \n", number_of_tracked_persons);
 
-       mv_rectangle_s *tracked_locations =
-               malloc(sizeof(mv_rectangle_s) * number_of_tracked_persons);
+       mv_rectangle_s *tracked_locations = malloc(sizeof(mv_rectangle_s) * number_of_tracked_persons);
 
-       error = mv_surveillance_get_result_value(
-                               event_result,
-                               MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS,
-                               tracked_locations);
+       error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS,
+                                                                                        tracked_locations);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                PRINT_E("Error with code %d was occured in getting locations of "
-                               "tracked persons.", error);
+                               "tracked persons.",
+                               error);
 
                if (appeared_locations != NULL)
                        free(appeared_locations);
@@ -793,34 +707,24 @@ void detect_person_appeared_cb(
        }
 
        for (i = 0; i < number_of_tracked_persons; ++i) {
-               printf("Person #%d location is: x - %d, y - %d, w - %d, h - %d.\n",
-                               i,
-                               tracked_locations[i].point.x,
-                               tracked_locations[i].point.y,
-                               tracked_locations[i].width,
-                               tracked_locations[i].height);
+               printf("Person #%d location is: x - %d, y - %d, w - %d, h - %d.\n", i, tracked_locations[i].point.x,
+                          tracked_locations[i].point.y, tracked_locations[i].width, tracked_locations[i].height);
 
                if (save_results_to_image)
-                       draw_rectangle_on_buffer(
-                                               tracked_locations[i].point.x,
-                                               tracked_locations[i].point.y,
-                                               tracked_locations[i].point.x + tracked_locations[i].width,
-                                               tracked_locations[i].point.y + tracked_locations[i].height,
-                                               3,
-                                               blue_color,
-                                               &image_data,
-                                               out_buffer_copy);
+                       draw_rectangle_on_buffer(tracked_locations[i].point.x, tracked_locations[i].point.y,
+                                                                        tracked_locations[i].point.x + tracked_locations[i].width,
+                                                                        tracked_locations[i].point.y + tracked_locations[i].height, 3, blue_color,
+                                                                        &image_data, out_buffer_copy);
        }
 
        int number_of_disappeared_persons = 0;
-       error = mv_surveillance_get_result_value(
-                               event_result,
-                               MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER,
-                               &number_of_disappeared_persons);
+       error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER,
+                                                                                        &number_of_disappeared_persons);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                PRINT_E("Error with code %d was occured in getting number of "
-                               "disappeared persons.", error);
+                               "disappeared persons.",
+                               error);
 
                if (appeared_locations != NULL)
                        free(appeared_locations);
@@ -836,17 +740,15 @@ void detect_person_appeared_cb(
 
        printf("\nNumber of disappeared persons is %d \n", number_of_disappeared_persons);
 
-       mv_rectangle_s *disappeared_locations =
-               malloc(sizeof(mv_rectangle_s) * number_of_disappeared_persons);
+       mv_rectangle_s *disappeared_locations = malloc(sizeof(mv_rectangle_s) * number_of_disappeared_persons);
 
-       error = mv_surveillance_get_result_value(
-                               event_result,
-                               MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS,
-                               disappeared_locations);
+       error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS,
+                                                                                        disappeared_locations);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                PRINT_E("Error with code %d was occured in getting locations of "
-                               "disappeared persons.", error);
+                               "disappeared persons.",
+                               error);
 
                if (appeared_locations != NULL)
                        free(appeared_locations);
@@ -864,23 +766,14 @@ void detect_person_appeared_cb(
        }
 
        for (i = 0; i < number_of_disappeared_persons; ++i) {
-               printf("Person #%d location is: x - %d, y - %d, w - %d, h - %d.\n",
-                               i,
-                               disappeared_locations[i].point.x,
-                               disappeared_locations[i].point.y,
-                               disappeared_locations[i].width,
-                               disappeared_locations[i].height);
+               printf("Person #%d location is: x - %d, y - %d, w - %d, h - %d.\n", i, disappeared_locations[i].point.x,
+                          disappeared_locations[i].point.y, disappeared_locations[i].width, disappeared_locations[i].height);
 
                if (save_results_to_image)
-                       draw_rectangle_on_buffer(
-                                               disappeared_locations[i].point.x,
-                                               disappeared_locations[i].point.y,
-                                               disappeared_locations[i].point.x + disappeared_locations[i].width,
-                                               disappeared_locations[i].point.y + disappeared_locations[i].height,
-                                               3,
-                                               red_color,
-                                               &image_data,
-                                               out_buffer_copy);
+                       draw_rectangle_on_buffer(disappeared_locations[i].point.x, disappeared_locations[i].point.y,
+                                                                        disappeared_locations[i].point.x + disappeared_locations[i].width,
+                                                                        disappeared_locations[i].point.y + disappeared_locations[i].height, 3, red_color,
+                                                                        &image_data, out_buffer_copy);
        }
 
        if (save_results_to_image)
@@ -901,12 +794,8 @@ void detect_person_appeared_cb(
                free(out_buffer_copy);
 }
 
-void person_recognized_cb(
-       mv_surveillance_event_trigger_h handle,
-       mv_source_h source,
-       int video_stream_id,
-       mv_surveillance_result_h event_result,
-       void *user_data)
+void person_recognized_cb(mv_surveillance_event_trigger_h handle, mv_source_h source, int video_stream_id,
+                                                 mv_surveillance_result_h event_result, void *user_data)
 {
        PRINT_G("Person recognized event was occurred");
        if (save_results_to_image)
@@ -914,14 +803,11 @@ void person_recognized_cb(
                                "Person recognized locations - red.");
 
        int number_of_persons = 0;
-       int error = mv_surveillance_get_result_value(
-                                       event_result,
-                                       MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER,
-                                       &number_of_persons);
+       int error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER,
+                                                                                                &number_of_persons);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
-               PRINT_E("Error with code %d was occured in getting number of persons.",
-                               error);
+               PRINT_E("Error with code %d was occured in getting number of persons.", error);
                return;
        }
 
@@ -929,14 +815,10 @@ void person_recognized_cb(
 
        mv_rectangle_s *locations = (mv_rectangle_s *) malloc(sizeof(mv_rectangle_s) * number_of_persons);
 
-       error = mv_surveillance_get_result_value(
-                               event_result,
-                               MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS,
-                               locations);
+       error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS, locations);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
-               PRINT_E("Error with code %d was occured in getting locations of persons.",
-                               error);
+               PRINT_E("Error with code %d was occured in getting locations of persons.", error);
 
                if (locations != NULL)
                        free(locations);
@@ -946,14 +828,10 @@ void person_recognized_cb(
 
        int *labels = (int *) malloc(sizeof(int) * number_of_persons);
 
-       error = mv_surveillance_get_result_value(
-                               event_result,
-                               MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS,
-                               labels);
+       error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS, labels);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
-               PRINT_E("Error with code %d was occured in getting labels of persons.",
-                               error);
+               PRINT_E("Error with code %d was occured in getting labels of persons.", error);
 
                if (locations != NULL)
                        free(locations);
@@ -966,14 +844,10 @@ void person_recognized_cb(
 
        double *confidences = (double *) malloc(sizeof(double) * number_of_persons);
 
-       error = mv_surveillance_get_result_value(
-                               event_result,
-                               MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES,
-                               confidences);
+       error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES, confidences);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
-               PRINT_E("Error with code %d was occured in getting confidences of persons.",
-                               error);
+               PRINT_E("Error with code %d was occured in getting confidences of persons.", error);
 
                if (locations != NULL)
                        free(locations);
@@ -992,12 +866,10 @@ void person_recognized_cb(
        image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
 
        if (save_results_to_image &&
-                       (mv_source_get_buffer(source, &out_buffer, &buf_size) ||
-                       mv_source_get_width(source, &(image_data.image_width)) ||
-                       mv_source_get_height(source, &(image_data.image_height)) ||
-                       mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
-                       out_buffer == NULL ||
-                       buf_size == 0)) {
+               (mv_source_get_buffer(source, &out_buffer, &buf_size) ||
+                mv_source_get_width(source, &(image_data.image_width)) ||
+                mv_source_get_height(source, &(image_data.image_height)) ||
+                mv_source_get_colorspace(source, &(image_data.image_colorspace)) || out_buffer == NULL || buf_size == 0)) {
                PRINT_R("ERROR: Creating out image is impossible.");
 
                if (locations != NULL)
@@ -1021,24 +893,16 @@ void person_recognized_cb(
        int i = 0;
        for (; i < number_of_persons; ++i) {
                printf("Person %d:\n", labels[i]);
-               printf("Location is: x - %d, y - %d, w - %d, h - %d.\n",
-                               locations[i].point.x,
-                               locations[i].point.y,
-                               locations[i].width,
-                               locations[i].height);
+               printf("Location is: x - %d, y - %d, w - %d, h - %d.\n", locations[i].point.x, locations[i].point.y,
+                          locations[i].width, locations[i].height);
                printf("Model confidence - %3.2f", confidences[i]);
                printf("\n");
 
                if (save_results_to_image)
-                       draw_rectangle_on_buffer(
-                                               locations[i].point.x,
-                                               locations[i].point.y,
-                                               locations[i].point.x + locations[i].width,
-                                               locations[i].point.y + locations[i].height,
-                                               3,
-                                               red_color,
-                                               &image_data,
-                                               out_buffer_copy);
+                       draw_rectangle_on_buffer(locations[i].point.x, locations[i].point.y,
+                                                                        locations[i].point.x + locations[i].width,
+                                                                        locations[i].point.y + locations[i].height, 3, red_color, &image_data,
+                                                                        out_buffer_copy);
        }
 
        printf("\n");
@@ -1059,12 +923,8 @@ void person_recognized_cb(
                free(out_buffer_copy);
 }
 
-void movement_detected_cb(
-       mv_surveillance_event_trigger_h event_trigger,
-       mv_source_h source,
-       int video_stream_id,
-       mv_surveillance_result_h event_result,
-       void *user_data)
+void movement_detected_cb(mv_surveillance_event_trigger_h event_trigger, mv_source_h source, int video_stream_id,
+                                                 mv_surveillance_result_h event_result, void *user_data)
 {
        PRINT_G("Movement detected event was occured");
        if (save_results_to_image)
@@ -1072,31 +932,25 @@ void movement_detected_cb(
                                "Movement detected locations - blue.");
 
        int number_of_movement_regions = 0;
-       int error = mv_surveillance_get_result_value(
-                                       event_result,
-                                       MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS,
-                                       &number_of_movement_regions);
+       int error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS,
+                                                                                                &number_of_movement_regions);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
                PRINT_E("Error with code %d was occured in getting number of "
-                               "movement regions.", error);
+                               "movement regions.",
+                               error);
 
                return;
        }
 
        printf("\nNumber of movement regions is %d \n", number_of_movement_regions);
 
-       mv_rectangle_s *movement_regions =
-               malloc(sizeof(mv_rectangle_s) * number_of_movement_regions);
+       mv_rectangle_s *movement_regions = malloc(sizeof(mv_rectangle_s) * number_of_movement_regions);
 
-       error = mv_surveillance_get_result_value(
-                               event_result,
-                               MV_SURVEILLANCE_MOVEMENT_REGIONS,
-                               movement_regions);
+       error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_MOVEMENT_REGIONS, movement_regions);
 
        if (error != MEDIA_VISION_ERROR_NONE) {
-               PRINT_E("Error with code %d was occured in getting movement regions.",
-                               error);
+               PRINT_E("Error with code %d was occured in getting movement regions.", error);
 
                if (movement_regions != NULL)
                        free(movement_regions);
@@ -1109,12 +963,10 @@ void movement_detected_cb(
        image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
 
        if (save_results_to_image &&
-                       (mv_source_get_buffer(source, &out_buffer, &buf_size) ||
-                       mv_source_get_width(source, &(image_data.image_width)) ||
-                       mv_source_get_height(source, &(image_data.image_height)) ||
-                       mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
-                       out_buffer == NULL ||
-                       buf_size == 0)) {
+               (mv_source_get_buffer(source, &out_buffer, &buf_size) ||
+                mv_source_get_width(source, &(image_data.image_width)) ||
+                mv_source_get_height(source, &(image_data.image_height)) ||
+                mv_source_get_colorspace(source, &(image_data.image_colorspace)) || out_buffer == NULL || buf_size == 0)) {
                PRINT_R("ERROR: Creating out image is impossible.");
 
                if (movement_regions != NULL)
@@ -1131,23 +983,14 @@ void movement_detected_cb(
 
        int i = 0;
        for (; i < number_of_movement_regions; ++i) {
-               printf("Movement #%d region is: x - %d, y - %d, w - %d, h - %d.\n",
-                               i,
-                               movement_regions[i].point.x,
-                               movement_regions[i].point.y,
-                               movement_regions[i].width,
-                               movement_regions[i].height);
+               printf("Movement #%d region is: x - %d, y - %d, w - %d, h - %d.\n", i, movement_regions[i].point.x,
+                          movement_regions[i].point.y, movement_regions[i].width, movement_regions[i].height);
 
                if (save_results_to_image)
-                       draw_rectangle_on_buffer(
-                                               movement_regions[i].point.x,
-                                               movement_regions[i].point.y,
-                                               movement_regions[i].point.x + movement_regions[i].width,
-                                               movement_regions[i].point.y + movement_regions[i].height,
-                                               3,
-                                               blue_color,
-                                               &image_data,
-                                               out_buffer_copy);
+                       draw_rectangle_on_buffer(movement_regions[i].point.x, movement_regions[i].point.y,
+                                                                        movement_regions[i].point.x + movement_regions[i].width,
+                                                                        movement_regions[i].point.y + movement_regions[i].height, 3, blue_color,
+                                                                        &image_data, out_buffer_copy);
        }
 
        printf("\n");
index dd24589..244e8d1 100644 (file)
@@ -27,17 +27,14 @@ using namespace testing;
 using namespace std;
 using namespace MediaVision::Common;
 
-void _tracked_cb(mv_source_h source,
-                               mv_rectangle_s roi,
-                               void *user_data)
+void _tracked_cb(mv_source_h source, mv_rectangle_s roi, void *user_data)
 {
        printf("In callback: roi.x y width height : %d %d %d %d\n", roi.point.x, roi.point.y, roi.width, roi.height);
 }
 
 int perform_tracker_configure(mv_engine_config_h engine_cfg)
 {
-       int ret = mv_engine_config_set_int_attribute(
-                       engine_cfg, MV_ROI_TRACKER_TYPE, (int)MV_ROI_TRACKER_TYPE_BALANCE);
+       int ret = mv_engine_config_set_int_attribute(engine_cfg, MV_ROI_TRACKER_TYPE, (int) MV_ROI_TRACKER_TYPE_BALANCE);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                printf("Fail to set roi tracker type");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;