From 5a95dcd9150c279776acc64a633895ca806f56bf Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Thu, 10 Sep 2015 18:08:26 +0900 Subject: [PATCH 01/16] fixed feature support Change-Id: Ifafe4eb5d510109df9a17e48c4f89c5a3d254b21 Signed-off-by: Tae-Young Chung --- src/mv_common.c | 40 ++++++++++++++++++++-------------------- src/mv_face.c | 40 ++++++++++++++++++++-------------------- src/mv_image.c | 36 ++++++++++++++++++------------------ src/mv_private.c | 24 ++---------------------- 4 files changed, 60 insertions(+), 80 deletions(-) diff --git a/src/mv_common.c b/src/mv_common.c index d11ca6b..66f51df 100644 --- a/src/mv_common.c +++ b/src/mv_common.c @@ -21,7 +21,7 @@ int mv_create_source( mv_source_h *source) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_NULL_ARG_CHECK(source); MEDIA_VISION_FUNCTION_ENTER(); @@ -34,7 +34,7 @@ int mv_create_source( int mv_destroy_source( mv_source_h source) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_FUNCTION_ENTER(); @@ -48,7 +48,7 @@ int mv_source_fill_by_media_packet( mv_source_h source, media_packet_h media_packet) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_INSTANCE_CHECK(media_packet); @@ -67,7 +67,7 @@ int mv_source_fill_by_buffer( unsigned int image_height, mv_colorspace_e image_colorspace) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_NULL_ARG_CHECK(data_buffer); @@ -83,7 +83,7 @@ int mv_source_fill_by_buffer( int mv_source_clear( mv_source_h source) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_FUNCTION_ENTER(); @@ -98,7 +98,7 @@ int mv_source_get_buffer( unsigned char **data_buffer, unsigned int *buffer_size) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_NULL_ARG_CHECK(data_buffer); MEDIA_VISION_NULL_ARG_CHECK(buffer_size); @@ -114,7 +114,7 @@ int mv_source_get_height( mv_source_h source, unsigned int *image_height) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_NULL_ARG_CHECK(image_height); @@ -129,7 +129,7 @@ int mv_source_get_width( mv_source_h source, unsigned int *image_width) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_NULL_ARG_CHECK(image_width); @@ -144,7 +144,7 @@ int mv_source_get_colorspace( mv_source_h source, mv_colorspace_e *image_colorspace) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_NULL_ARG_CHECK(image_colorspace); @@ -158,7 +158,7 @@ int mv_source_get_colorspace( int mv_create_engine_config( mv_engine_config_h *engine_cfg) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_NULL_ARG_CHECK(engine_cfg); MEDIA_VISION_FUNCTION_ENTER(); @@ -171,7 +171,7 @@ int mv_create_engine_config( int mv_destroy_engine_config( mv_engine_config_h engine_cfg) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(engine_cfg); MEDIA_VISION_FUNCTION_ENTER(); @@ -186,7 +186,7 @@ int mv_engine_config_set_double_attribute( const char *name, double value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(engine_cfg); MEDIA_VISION_NULL_ARG_CHECK(name); @@ -203,7 +203,7 @@ int mv_engine_config_set_int_attribute( const char *name, int value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(engine_cfg); MEDIA_VISION_NULL_ARG_CHECK(name); @@ -220,7 +220,7 @@ int mv_engine_config_set_bool_attribute( const char *name, bool value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(engine_cfg); MEDIA_VISION_NULL_ARG_CHECK(name); @@ -237,7 +237,7 @@ int mv_engine_config_set_string_attribute( const char *name, const char *value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(engine_cfg); MEDIA_VISION_NULL_ARG_CHECK(name); MEDIA_VISION_NULL_ARG_CHECK(value); @@ -255,7 +255,7 @@ int mv_engine_config_get_double_attribute( const char *name, double *value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(engine_cfg); MEDIA_VISION_NULL_ARG_CHECK(name); MEDIA_VISION_NULL_ARG_CHECK(value); @@ -273,7 +273,7 @@ int mv_engine_config_get_int_attribute( const char *name, int *value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(engine_cfg); MEDIA_VISION_NULL_ARG_CHECK(name); MEDIA_VISION_NULL_ARG_CHECK(value); @@ -291,7 +291,7 @@ int mv_engine_config_get_bool_attribute( const char *name, bool *value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(engine_cfg); MEDIA_VISION_NULL_ARG_CHECK(name); MEDIA_VISION_NULL_ARG_CHECK(value); @@ -309,7 +309,7 @@ int mv_engine_config_get_string_attribute( const char *name, char **value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(engine_cfg); MEDIA_VISION_NULL_ARG_CHECK(name); MEDIA_VISION_NULL_ARG_CHECK(value); @@ -326,7 +326,7 @@ int mv_engine_config_foreach_supported_attribute( mv_supported_attribute_cb callback, void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); MEDIA_VISION_NULL_ARG_CHECK(callback); MEDIA_VISION_FUNCTION_ENTER(); diff --git a/src/mv_face.c b/src/mv_face.c index bf229a9..2dbb1e7 100644 --- a/src/mv_face.c +++ b/src/mv_face.c @@ -120,7 +120,7 @@ int mv_face_detect( mv_face_detected_cb detected_cb, void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_NULL_ARG_CHECK(detected_cb); @@ -150,7 +150,7 @@ int mv_face_recognize( mv_face_recognized_cb recognized_cb, void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_INSTANCE_CHECK(recognition_model); MEDIA_VISION_NULL_ARG_CHECK(recognized_cb); @@ -198,7 +198,7 @@ int mv_face_track( bool do_learn, void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_INSTANCE_CHECK(tracking_model); MEDIA_VISION_NULL_ARG_CHECK(tracked_cb); @@ -240,7 +240,7 @@ int mv_face_eye_condition_recognize( mv_face_eye_condition_recognized_cb eye_condition_recognized_cb, void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_NULL_ARG_CHECK(eye_condition_recognized_cb); @@ -279,7 +279,7 @@ int mv_face_facial_expression_recognize( mv_face_facial_expression_recognized_cb expression_recognized_cb, void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_NULL_ARG_CHECK(expression_recognized_cb); @@ -319,7 +319,7 @@ int mv_face_facial_expression_recognize( int mv_face_recognition_model_create( mv_face_recognition_model_h *recognition_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_NULL_ARG_CHECK(recognition_model); MEDIA_VISION_FUNCTION_ENTER(); @@ -343,7 +343,7 @@ int mv_face_recognition_model_create( int mv_face_recognition_model_destroy( mv_face_recognition_model_h recognition_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(recognition_model); MEDIA_VISION_FUNCTION_ENTER(); @@ -368,7 +368,7 @@ int mv_face_recognition_model_clone( mv_face_recognition_model_h src, mv_face_recognition_model_h *dst) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(src); MEDIA_VISION_NULL_ARG_CHECK(dst); @@ -394,7 +394,7 @@ int mv_face_recognition_model_save( const char *file_name, mv_face_recognition_model_h recognition_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(recognition_model); if (file_name == NULL) @@ -429,7 +429,7 @@ int mv_face_recognition_model_load( const char *file_name, mv_face_recognition_model_h *recognition_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_NULL_ARG_CHECK(recognition_model); if (file_name == NULL) @@ -466,7 +466,7 @@ int mv_face_recognition_model_add( const mv_rectangle_s *example_location, int face_label) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_INSTANCE_CHECK(recognition_model); @@ -504,7 +504,7 @@ int mv_face_recognition_model_reset( mv_face_recognition_model_h recognition_model, int *face_label) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(recognition_model); MEDIA_VISION_FUNCTION_ENTER(); @@ -533,7 +533,7 @@ int mv_face_recognition_model_learn( mv_engine_config_h engine_cfg, mv_face_recognition_model_h recognition_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(recognition_model); MEDIA_VISION_FUNCTION_ENTER(); @@ -559,7 +559,7 @@ int mv_face_recognition_model_query_labels( int **labels, unsigned int *number_of_labels) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(recognition_model); MEDIA_VISION_NULL_ARG_CHECK(labels); MEDIA_VISION_NULL_ARG_CHECK(number_of_labels); @@ -585,7 +585,7 @@ int mv_face_recognition_model_query_labels( int mv_face_tracking_model_create( mv_face_tracking_model_h *tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_NULL_ARG_CHECK(tracking_model); MEDIA_VISION_FUNCTION_ENTER(); @@ -609,7 +609,7 @@ int mv_face_tracking_model_create( int mv_face_tracking_model_destroy( mv_face_tracking_model_h tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(tracking_model); MEDIA_VISION_FUNCTION_ENTER(); @@ -636,7 +636,7 @@ int mv_face_tracking_model_prepare( mv_source_h source, mv_quadrangle_s *location) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(tracking_model); MEDIA_VISION_INSTANCE_CHECK(source); @@ -669,7 +669,7 @@ int mv_face_tracking_model_clone( mv_face_tracking_model_h src, mv_face_tracking_model_h *dst) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(src); MEDIA_VISION_NULL_ARG_CHECK(dst); @@ -695,7 +695,7 @@ int mv_face_tracking_model_save( const char *file_name, mv_face_tracking_model_h tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(tracking_model); if (file_name == NULL) @@ -730,7 +730,7 @@ int mv_face_tracking_model_load( const char *file_name, mv_face_tracking_model_h *tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); MEDIA_VISION_NULL_ARG_CHECK(tracking_model); if (file_name == NULL) diff --git a/src/mv_image.c b/src/mv_image.c index f041924..5d60e98 100644 --- a/src/mv_image.c +++ b/src/mv_image.c @@ -42,7 +42,7 @@ int mv_image_recognize( mv_image_recognized_cb recognized_cb, void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_NULL_ARG_CHECK(image_objects); int object_num = 0; @@ -79,7 +79,7 @@ int mv_image_track( mv_image_tracked_cb tracked_cb, void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); MEDIA_VISION_NULL_ARG_CHECK(tracked_cb); @@ -105,7 +105,7 @@ int mv_image_track( int mv_image_object_create( mv_image_object_h *image_object) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_NULL_ARG_CHECK(image_object); MEDIA_VISION_FUNCTION_ENTER(); @@ -128,7 +128,7 @@ int mv_image_object_create( int mv_image_object_destroy( mv_image_object_h image_object) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(image_object); MEDIA_VISION_FUNCTION_ENTER(); @@ -154,7 +154,7 @@ int mv_image_object_fill( mv_source_h source, mv_rectangle_s *location) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(image_object); MEDIA_VISION_INSTANCE_CHECK(source); @@ -180,7 +180,7 @@ int mv_image_object_get_recognition_rate( mv_image_object_h image_object, double *recognition_rate) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(image_object); MEDIA_VISION_NULL_ARG_CHECK(recognition_rate); @@ -206,7 +206,7 @@ int mv_image_object_set_label( mv_image_object_h image_object, int label) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(image_object); MEDIA_VISION_FUNCTION_ENTER(); @@ -231,7 +231,7 @@ int mv_image_object_get_label( mv_image_object_h image_object, int *label) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(image_object); MEDIA_VISION_NULL_ARG_CHECK(label); @@ -257,7 +257,7 @@ int mv_image_object_clone( mv_image_object_h src, mv_image_object_h *dst) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(src); MEDIA_VISION_NULL_ARG_CHECK(dst); @@ -282,7 +282,7 @@ int mv_image_object_clone( int mv_image_object_save( const char *file_name, mv_image_object_h image_object) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(image_object); if (file_name == NULL) @@ -312,7 +312,7 @@ int mv_image_object_save( int mv_image_object_load( const char *file_name, mv_image_object_h *image_object) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_NULL_ARG_CHECK(image_object); if (file_name == NULL) @@ -342,7 +342,7 @@ int mv_image_object_load( int mv_image_tracking_model_create( mv_image_tracking_model_h *image_tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model); MEDIA_VISION_FUNCTION_ENTER(); @@ -367,7 +367,7 @@ int mv_image_tracking_model_set_target( mv_image_object_h image_object, mv_image_tracking_model_h image_tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); MEDIA_VISION_INSTANCE_CHECK(image_object); @@ -392,7 +392,7 @@ int mv_image_tracking_model_set_target( int mv_image_tracking_model_destroy( mv_image_tracking_model_h image_tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); MEDIA_VISION_FUNCTION_ENTER(); @@ -417,7 +417,7 @@ int mv_image_tracking_model_refresh( mv_image_tracking_model_h image_tracking_model, mv_engine_config_h engine_cfg) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); MEDIA_VISION_FUNCTION_ENTER(); @@ -446,7 +446,7 @@ int mv_image_tracking_model_clone( mv_image_tracking_model_h src, mv_image_tracking_model_h *dst) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(src); MEDIA_VISION_NULL_ARG_CHECK(dst); @@ -471,7 +471,7 @@ int mv_image_tracking_model_clone( int mv_image_tracking_model_save( const char *file_name, mv_image_tracking_model_h image_tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); if (file_name == NULL) @@ -501,7 +501,7 @@ int mv_image_tracking_model_save( int mv_image_tracking_model_load( const char *file_name, mv_image_tracking_model_h *image_tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model); if (file_name == NULL) diff --git a/src/mv_private.c b/src/mv_private.c index af4e7df..290cf51 100644 --- a/src/mv_private.c +++ b/src/mv_private.c @@ -20,11 +20,6 @@ bool __mv_check_system_info_feature_supported() { - // return true is temporary added to allow correct flow for UTC - // todo: remove when vision.barcode_detection and vision.barcode_generation - // will be added - return true; - bool isBarcodeDetectionSupported = false; bool isBarcodeGenerationSupported = false; bool isFaceRecognitionSupported = false; @@ -56,11 +51,12 @@ bool __mv_check_system_info_feature_supported() const int nRetVal4 = system_info_get_platform_bool("http://tizen.org/feature/vision.image_recognition", &isImageRecognitionSupported); - if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) + if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) { LOGE("[%s] SYSTEM_INFO_ERROR: __FUNCTION__"); return false; } + (isBarcodeDetectionSupported || isBarcodeGenerationSupported || isFaceRecognitionSupported || isImageRecognitionSupported) ? LOGI("system_info_get_platform_bool returned" @@ -78,10 +74,6 @@ bool __mv_check_system_info_feature_supported() bool __mv_barcode_detect_check_system_info_feature_supported() { - // return true is temporary added to allow correct flow for UTC - // todo: remove when vision.barcode_detection feature will be added - return true; - bool isBarcodeDetectionSupported = false; const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_detection", &isBarcodeDetectionSupported); @@ -103,10 +95,6 @@ bool __mv_barcode_detect_check_system_info_feature_supported() bool __mv_barcode_generate_check_system_info_feature_supported() { - // return true is temporary added to allow correct flow for UTC - // todo: remove when vision.barcode_generation feature will be added - return true; - bool isBarcodeGenerationSupported = false; const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_generation", &isBarcodeGenerationSupported); @@ -128,10 +116,6 @@ bool __mv_barcode_generate_check_system_info_feature_supported() bool __mv_face_check_system_info_feature_supported() { - // return true is temporary added to allow correct flow for UTC - // todo: remove when vision.face_recognition feature will be added - return true; - bool isFaceRecognitionSupported = false; const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.face_recognition", &isFaceRecognitionSupported); @@ -152,10 +136,6 @@ bool __mv_face_check_system_info_feature_supported() bool __mv_image_check_system_info_feature_supported() { - // return true is temporary added to allow correct flow for UTC - // todo: remove when vision.image_recognition feature will be added - return true; - bool isImageRecognitionSupported = false; const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.image_recognition", &isImageRecognitionSupported); -- 2.7.4 From 819ac85ed4161849caa46f87c88f80f96dafb857 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Mon, 19 Oct 2015 10:39:29 +0900 Subject: [PATCH 02/16] Changed json-c to json-glib support for EngineConfig Change-Id: I9a1d5143219868a317eb33fd7c5d1c560be2e6ba Signed-off-by: Tae-Young Chung --- CMakeLists.txt | 9 ++-- media-vision-config.json | 46 ++++++++-------- mv_common/CMakeLists.txt | 5 +- mv_common/src/EngineConfig.cpp | 110 ++++++++++++++++++++++++++------------- packaging/capi-media-vision.spec | 5 +- 5 files changed, 108 insertions(+), 67 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 16f4b13..ebd2df1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,8 +57,6 @@ else() SET(INC_IMAGE "${PROJECT_SOURCE_DIR}/mv_image/image/include") endif() -INCLUDE_DIRECTORIES(${INC_DIR} ${INC_COMMON} ${INC_BARCODE_DETECTOR} ${INC_BARCODE_GENERATOR} ${INC_IMAGE}) - if(MEDIA_VISION_FACE_LICENSE_PORT) add_definitions(-DMEDIA_VISION_FACE_LICENSE_PORT) SET(INC_FACE "${PROJECT_SOURCE_DIR}/mv_face/face_lic/include") @@ -66,7 +64,12 @@ else() SET(INC_FACE "${PROJECT_SOURCE_DIR}/mv_face/face/include") endif() -INCLUDE_DIRECTORIES(${INC_DIR} ${INC_COMMON} ${INC_BARCODE_DETECTOR} ${INC_BARCODE_GENERATOR} ${INC_FACE}) +INCLUDE_DIRECTORIES(${INC_DIR} + ${INC_COMMON} + ${INC_BARCODE_DETECTOR} + ${INC_BARCODE_GENERATOR} + ${INC_FACE} + ${INC_IMAGE}) SET(dependents "dlog capi-media-tool capi-system-info capi-appfw-application") SET(pc_dependents "dlog") diff --git a/media-vision-config.json b/media-vision-config.json index a3d6eee..9185a1a 100644 --- a/media-vision-config.json +++ b/media-vision-config.json @@ -4,116 +4,116 @@ { "name" : "MV_FACE_DETECTION_MODEL_FILE_PATH", "type" : "string", - "value" : "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml", + "value" : "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml" }, { "name" : "MV_FACE_DETECTION_ROI_X", "type" : "integer", - "value" : -1, + "value" : -1 }, { "name" : "MV_FACE_DETECTION_ROI_Y", "type" : "integer", - "value" : -1, + "value" : -1 }, { "name" : "MV_FACE_DETECTION_ROI_WIDTH", "type" : "integer", - "value" : -1, + "value" : -1 }, { "name" : "MV_FACE_DETECTION_ROI_HEIGHT", "type" : "integer", - "value" : -1, + "value" : -1 }, { "name" : "MV_FACE_DETECTION_MIN_SIZE_WIDTH", "type" : "integer", - "value" : -1, + "value" : -1 }, { "name" : "MV_FACE_DETECTION_MIN_SIZE_HEIGHT", "type" : "integer", - "value" : -1, + "value" : -1 }, { "name" : "MV_BARCODE_GENERATE_ATTR_TEXT", "type" : "integer", - "value" : 0, + "value" : 0 }, { "name" : "MV_BARCODE_DETECT_ATTR_TARGET", "type" : "integer", - "value" : 0, + "value" : 0 }, { "name" : "MV_IMAGE_RECOGNITION_OBJECT_SCALE_FACTOR", "type" : "double", - "value" : 1.2, + "value" : 1.2 }, { "name" : "MV_IMAGE_RECOGNITION_OBJECT_MAX_KEYPOINTS_NUM", "type" : "integer", - "value" : 1000, + "value" : 1000 }, { "name" : "MV_IMAGE_RECOGNITION_SCENE_SCALE_FACTOR", "type" : "double", - "value" : 1.2, + "value" : 1.2 }, { "name" : "MV_IMAGE_RECOGNITION_SCENE_MAX_KEYPOINTS_NUM", "type" : "integer", - "value" : 5000, + "value" : 5000 }, { "name" : "MV_IMAGE_RECOGNITION_MIN_MATCH_NUM", "type" : "integer", - "value" : 30, + "value" : 30 }, { "name" : "MV_IMAGE_RECOGNITION_REQ_MATCH_PART", "type" : "double", - "value" : 0.05, + "value" : 0.05 }, { "name" : "MV_IMAGE_RECOGNITION_TOLERANT_MATCH_PART_ERR", "type" : "double", - "value" : 0.1, + "value" : 0.1 }, { "name" : "MV_IMAGE_TRACKING_HISTORY_AMOUNT", "type" : "integer", - "value" : 3, + "value" : 3 }, { "name" : "MV_IMAGE_TRACKING_EXPECTED_OFFSET", "type" : "double", - "value" : 0, + "value" : 0 }, { "name" : "MV_IMAGE_TRACKING_USE_STABLIZATION", "type" : "boolean", - "value" : false, + "value" : false }, { "name" : "MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT", "type" : "double", - "value" : 0.006, + "value" : 0.006 }, { "name" : "MV_IMAGE_TRACKING_STABLIZATION_SPEED", "type" : "double", - "value" : 2, + "value" : 2 }, { "name" : "MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION", "type" : "double", - "value" : 0.001, + "value" : 0.001 }, { "name" : "MV_FACE_RECOGNITION_MODEL_TYPE", "type" : "integer", - "value" : 3, + "value" : 3 } ] } diff --git a/mv_common/CMakeLists.txt b/mv_common/CMakeLists.txt index 64fa19a..db991e8 100644 --- a/mv_common/CMakeLists.txt +++ b/mv_common/CMakeLists.txt @@ -25,12 +25,15 @@ else() include_directories(${OpenCV_INCLUDE_DIRS}) endif() +PKG_CHECK_MODULES(JSONGLIB REQUIRED json-glib-1.0) +INCLUDE_DIRECTORIES(${JSONGLIB_INCLUDE_DIRS}) + if(FORCED_STATIC_BUILD) add_library(${PROJECT_NAME} STATIC ${MV_COMMON_INCLUDE_LIST} ${MV_COMMON_SRC_LIST}) else() add_library(${PROJECT_NAME} SHARED ${MV_COMMON_INCLUDE_LIST} ${MV_COMMON_SRC_LIST}) endif() -TARGET_LINK_LIBRARIES(${MV_COMMON_LIB_NAME} jpeg ${OpenCV_LIBS} capi-media-tool tbm json-c) +TARGET_LINK_LIBRARIES(${MV_COMMON_LIB_NAME} jpeg ${OpenCV_LIBS} capi-media-tool tbm json-glib-1.0) INSTALL(TARGETS ${PROJECT_NAME} DESTINATION ${LIB_INSTALL_DIR}) diff --git a/mv_common/src/EngineConfig.cpp b/mv_common/src/EngineConfig.cpp index 3faedf1..64bf1d9 100644 --- a/mv_common/src/EngineConfig.cpp +++ b/mv_common/src/EngineConfig.cpp @@ -18,7 +18,7 @@ #include -#include +#include /** * @file EngineConfig.cpp @@ -244,79 +244,115 @@ int EngineConfig::cacheDictionaries(bool isLazyCache, std::string configFilePath DefBoolDict.clear(); DefStrDict.clear(); - json_object *jobj = json_object_from_file(configFilePath.c_str()); + const char *conf_file = configFilePath.c_str(); + JsonParser *parser; + GError *error = NULL; - enum json_type type = json_object_get_type(jobj); - if (json_type_object != type) + parser = json_parser_new(); + json_parser_load_from_file(parser, conf_file, &error); + if (error) { - LOGE("Can't parse engine config file. Incorrect json markup. " - "Supported attributes can't be determined."); - json_object_put(jobj); + LOGW("Unable to parse file '%s': %s\n", conf_file, error->message); + g_error_free(error); + g_object_unref(parser); return MEDIA_VISION_ERROR_NO_DATA; } - json_object *pAttributesObj = json_object_object_get(jobj, "attributes"); - type = json_object_get_type(pAttributesObj); - if (json_type_array != type) + JsonNode *root = json_parser_get_root(parser); + if (JSON_NODE_OBJECT != json_node_get_node_type(root)) { - LOGE("Can't parse engine config file. Incorrect json markup. " - "Supported attributes can't be determined."); - json_object_put(jobj); + LOGW("Can't parse tests configuration file. " + "Incorrect json markup."); + g_object_unref(parser); return MEDIA_VISION_ERROR_NO_DATA; } - const int attrNum = json_object_array_length(pAttributesObj); + JsonObject *jobj = json_node_get_object(root); - for (int attrInd = 0; attrInd < attrNum; ++attrInd) + if (!json_object_has_member(jobj, "attributes")) { - json_object *pAttrObj = - json_object_array_get_idx(pAttributesObj, attrInd); - type = json_object_get_type(pAttrObj); - - json_object *pAttrNameObj = NULL; - json_object *pAttrTypeObj = NULL; - json_object *pAttrValueObj = NULL; - - if (json_type_object != type || - !json_object_object_get_ex(pAttrObj, "name", &pAttrNameObj) || - !json_object_object_get_ex(pAttrObj, "type", &pAttrTypeObj) || - !json_object_object_get_ex(pAttrObj, "value", &pAttrValueObj)) + LOGW("Can't parse tests configuration file. " + "No 'attributes' section."); + g_object_unref(parser); + return MEDIA_VISION_ERROR_NO_DATA; + } + + JsonNode *attr_node = + json_object_get_member(jobj, "attributes"); + + if (JSON_NODE_ARRAY != json_node_get_node_type(attr_node)) + { + LOGW("Can't parse tests configuration file. " + "'attributes' section isn't array."); + g_object_unref(parser); + return MEDIA_VISION_ERROR_NO_DATA; + } + + JsonArray *attr_array = json_node_get_array(attr_node); + + const guint attr_num = json_array_get_length(attr_array); + + guint attrInd = 0; + for (; attrInd < attr_num; ++attrInd) + { + JsonNode *attr_node = json_array_get_element(attr_array, attrInd); + + if (JSON_NODE_OBJECT != json_node_get_node_type(attr_node)) + { + LOGW("Attribute %u wasn't parsed from json file.", attrInd); + continue; + } + + JsonObject *attr_obj = json_node_get_object(attr_node); + + if (!json_object_has_member(attr_obj, "name") || + !json_object_has_member(attr_obj, "type") || + !json_object_has_member(attr_obj, "value")) { - LOGW("Attribute %i wasn't parsed from json file.", attrInd); + LOGW("Attribute %u wasn't parsed from json file.", attrInd); continue; } - const char *nameStr = json_object_get_string(pAttrNameObj); - const char *typeStr = json_object_get_string(pAttrTypeObj); + const char *nameStr = + (char*)json_object_get_string_member(attr_obj, "name"); + const char *typeStr = + (char*)json_object_get_string_member(attr_obj, "type"); - if (0 == strcmp("double", typeStr)) + if (NULL == nameStr || NULL == typeStr) + { + LOGW("Attribute %i wasn't parsed from json file. name and/or " + "type of the attribute are parsed as NULL.", attrInd); + continue; + } + else if (0 == strcmp("double", typeStr)) { DefDblDict[std::string(nameStr)] = - json_object_get_double(pAttrValueObj); + (double)json_object_get_double_member(attr_obj, "value"); } else if (0 == strcmp("integer", typeStr)) { DefIntDict[std::string(nameStr)] = - json_object_get_int(pAttrValueObj); + (int)json_object_get_int_member(attr_obj, "value"); } else if (0 == strcmp("boolean", typeStr)) { DefBoolDict[std::string(nameStr)] = - json_object_get_boolean(pAttrValueObj) ? true : false; + json_object_get_boolean_member(attr_obj, "value") ? true : false; } else if (0 == strcmp("string", typeStr)) { DefStrDict[std::string(nameStr)] = - json_object_get_string(pAttrValueObj); + (char*)json_object_get_string_member(attr_obj, "value"); } else { - LOGW("Attribute %i:%s wasn't parsed from json file. Type isn't supported.", attrInd, nameStr); + LOGW("Attribute %i:%s wasn't parsed from json file. " + "Type isn't supported.", attrInd, nameStr); continue; } } - json_object_put(jobj); + g_object_unref(parser); isCached = true; } diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index a786d8e..73a4fbb 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.2.1 +Version: 0.2.2 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 @@ -17,8 +17,7 @@ BuildRequires: pkgconfig(glib-2.0) # Change to the pkgconfig(zint) after zint package refactor BuildRequires: zint BuildRequires: zint-devel -BuildRequires: libjson -BuildRequires: libjson-devel +BuildRequires: pkgconfig(json-glib-1.0) BuildRequires: dlogutil BuildRequires: libjpeg-turbo BuildRequires: libjpeg-turbo-devel -- 2.7.4 From cd96fa4795772731a74030246c307a127dfe0eb6 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Mon, 19 Oct 2015 15:11:02 +0900 Subject: [PATCH 03/16] Applied Tizen C coding rule Change-Id: Ica2b395e6229a60f0474fd4e451f52d1764c9dfd Signed-off-by: Tae-Young Chung --- packaging/capi-media-vision.spec | 2 +- src/mv_barcode.c | 311 ++++++------- src/mv_common.c | 398 ++++++++-------- src/mv_face.c | 964 +++++++++++++++++++-------------------- src/mv_image.c | 476 ++++++++++--------- src/mv_private.c | 146 +++--- 6 files changed, 1127 insertions(+), 1170 deletions(-) diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index 73a4fbb..adf0cee 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.2.2 +Version: 0.2.3 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 diff --git a/src/mv_barcode.c b/src/mv_barcode.c index 5d323fe..74ebee9 100644 --- a/src/mv_barcode.c +++ b/src/mv_barcode.c @@ -50,203 +50,188 @@ */ int mv_barcode_detect( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s roi, - mv_barcode_detected_cb detect_cb, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s roi, + mv_barcode_detected_cb detect_cb, + void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_barcode_detect_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_NULL_ARG_CHECK(detect_cb); - - MEDIA_VISION_FUNCTION_ENTER(); - - int ret = MEDIA_VISION_ERROR_NONE; - unsigned int src_w = 0; - unsigned int src_h = 0; - - ret = mv_source_get_width(source, &src_w); - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("mv_source_get_width fail"); - return ret; - } - - ret = mv_source_get_height(source, &src_h); - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("mv_source_get_height fail"); - return ret; - } - - if (roi.point.x < 0 || roi.point.y < 0 || - (roi.point.x + roi.width) > src_w || - (roi.point.y + roi.height) > src_h) - { - LOGE("roi is out of area on source"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + MEDIA_VISION_SUPPORT_CHECK(__mv_barcode_detect_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_NULL_ARG_CHECK(detect_cb); + + MEDIA_VISION_FUNCTION_ENTER(); + + int ret = MEDIA_VISION_ERROR_NONE; + unsigned int src_w = 0; + unsigned int src_h = 0; + + ret = mv_source_get_width(source, &src_w); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("mv_source_get_width fail"); + return ret; + } + + ret = mv_source_get_height(source, &src_h); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("mv_source_get_height fail"); + return ret; + } + + if (roi.point.x < 0 || roi.point.y < 0 || + (roi.point.x + roi.width) > src_w || + (roi.point.y + roi.height) > src_h) { + LOGE("roi is out of area on source"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } #ifdef MEDIA_VISION_BARCODE_DETECTOR_LICENSE_PORT - /* Use licensed barcode detect functionality here. */ - ret = mv_barcode_detect_lic( - source, engine_cfg, roi, detect_cb, user_data); + /* Use licensed barcode detect functionality here. */ + ret = mv_barcode_detect_lic( + source, engine_cfg, roi, detect_cb, user_data); #else - /* Use open barcode detect functionality here. */ - ret = mv_barcode_detect_open( - source, engine_cfg, roi, detect_cb, user_data); + /* Use open barcode detect functionality here. */ + ret = mv_barcode_detect_open( + source, engine_cfg, roi, detect_cb, user_data); #endif /* MEDIA_VISION_BARCODE_DETECTOR_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_barcode_generate_source( - mv_engine_config_h engine_cfg, - const char *message, - mv_barcode_type_e type, - mv_barcode_qr_mode_e qr_enc_mode, - mv_barcode_qr_ecc_e qr_ecc, - int qr_version, - mv_source_h image) + mv_engine_config_h engine_cfg, + const char *message, + mv_barcode_type_e type, + mv_barcode_qr_mode_e qr_enc_mode, + mv_barcode_qr_ecc_e qr_ecc, + int qr_version, + mv_source_h image) { - MEDIA_VISION_SUPPORT_CHECK(__mv_barcode_generate_check_system_info_feature_supported()); - MEDIA_VISION_NULL_ARG_CHECK(message); - MEDIA_VISION_INSTANCE_CHECK(image); - - MEDIA_VISION_FUNCTION_ENTER(); - - if (type < MV_BARCODE_QR || - type >= MV_BARCODE_UNDEFINED) - { - LOGE("Not supported barcode type [%d]", type); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (type == MV_BARCODE_QR) - { - if (qr_enc_mode < MV_BARCODE_QR_MODE_NUMERIC || - qr_enc_mode >= MV_BARCODE_QR_MODE_UNAVAILABLE) - { - LOGE("Not supported QR encoding mode[%d]", qr_enc_mode); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (qr_ecc < MV_BARCODE_QR_ECC_LOW || - qr_ecc >= MV_BARCODE_QR_ECC_UNAVAILABLE) - { - LOGE("Not supported QR ECC level [%d]", qr_ecc); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (qr_version < 1 || qr_version > 40) - { - LOGE("Not supported QR version [%d]", qr_version); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - } + MEDIA_VISION_SUPPORT_CHECK(__mv_barcode_generate_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(message); + MEDIA_VISION_INSTANCE_CHECK(image); + + MEDIA_VISION_FUNCTION_ENTER(); + + if (type < MV_BARCODE_QR || + type >= MV_BARCODE_UNDEFINED) { + LOGE("Not supported barcode type [%d]", type); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (type == MV_BARCODE_QR) { + if (qr_enc_mode < MV_BARCODE_QR_MODE_NUMERIC || + qr_enc_mode >= MV_BARCODE_QR_MODE_UNAVAILABLE) { + LOGE("Not supported QR encoding mode[%d]", qr_enc_mode); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (qr_ecc < MV_BARCODE_QR_ECC_LOW || + qr_ecc >= MV_BARCODE_QR_ECC_UNAVAILABLE) { + LOGE("Not supported QR ECC level [%d]", qr_ecc); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (qr_version < 1 || qr_version > 40) { + LOGE("Not supported QR version [%d]", qr_version); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + } #ifdef MEDIA_VISION_BARCODE_GENERATOR_LICENSE_PORT - /* Use licensed barcode generate functionality here. */ - int ret = mv_barcode_generate_source_lic( - engine_cfg, message, type, qr_enc_mode, qr_ecc, qr_version, - image); + /* Use licensed barcode generate functionality here. */ + int ret = mv_barcode_generate_source_lic( + engine_cfg, message, type, qr_enc_mode, qr_ecc, qr_version, + image); #else - /* Use open barcode generate functionality here. */ - int ret = mv_barcode_generate_source_open( - engine_cfg, message, type, qr_enc_mode, qr_ecc, qr_version, - image); + /* Use open barcode generate functionality here. */ + int ret = mv_barcode_generate_source_open( + engine_cfg, message, type, qr_enc_mode, qr_ecc, qr_version, + image); #endif /* MEDIA_VISION_BARCODE_GENERATOR_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_barcode_generate_image( - mv_engine_config_h engine_cfg, - const char *message, - int image_width, - int image_height, - mv_barcode_type_e type, - mv_barcode_qr_mode_e qr_enc_mode, - mv_barcode_qr_ecc_e qr_ecc, - int qr_version, - const char *image_path, - mv_barcode_image_format_e image_format) + mv_engine_config_h engine_cfg, + const char *message, + int image_width, + int image_height, + mv_barcode_type_e type, + mv_barcode_qr_mode_e qr_enc_mode, + mv_barcode_qr_ecc_e qr_ecc, + int qr_version, + const char *image_path, + mv_barcode_image_format_e image_format) { - MEDIA_VISION_SUPPORT_CHECK(__mv_barcode_generate_check_system_info_feature_supported()); - MEDIA_VISION_NULL_ARG_CHECK(message); - - MEDIA_VISION_FUNCTION_ENTER(); - - if (image_path == NULL) - { - LOGE("image_path is NULL\n"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - if (type < MV_BARCODE_QR || - type >= MV_BARCODE_UNDEFINED) - { - LOGE("Not supported barcode type [%d]", type); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (type == MV_BARCODE_QR) - { - if (qr_enc_mode < MV_BARCODE_QR_MODE_NUMERIC || - qr_enc_mode >= MV_BARCODE_QR_MODE_UNAVAILABLE) - { - LOGE("Not supported QR encoding mode[%d]", qr_enc_mode); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (qr_ecc < MV_BARCODE_QR_ECC_LOW || - qr_ecc >= MV_BARCODE_QR_ECC_UNAVAILABLE) - { - LOGE("Not supported QR ECC level [%d]", qr_ecc); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (qr_version < 1 || qr_version > 40) - { - LOGE("Not supported QR version [%d]", qr_version); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - } - - if (image_format < MV_BARCODE_IMAGE_FORMAT_BMP || - image_format >= MV_BARCODE_IMAGE_FORMAT_NUM) - { - LOGE("Not supported image format [%d]", image_format); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + MEDIA_VISION_SUPPORT_CHECK(__mv_barcode_generate_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(message); + + MEDIA_VISION_FUNCTION_ENTER(); + + if (image_path == NULL) { + LOGE("image_path is NULL\n"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + if (type < MV_BARCODE_QR || + type >= MV_BARCODE_UNDEFINED) { + LOGE("Not supported barcode type [%d]", type); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (type == MV_BARCODE_QR) { + if (qr_enc_mode < MV_BARCODE_QR_MODE_NUMERIC || + qr_enc_mode >= MV_BARCODE_QR_MODE_UNAVAILABLE) { + LOGE("Not supported QR encoding mode[%d]", qr_enc_mode); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (qr_ecc < MV_BARCODE_QR_ECC_LOW || + qr_ecc >= MV_BARCODE_QR_ECC_UNAVAILABLE) { + LOGE("Not supported QR ECC level [%d]", qr_ecc); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (qr_version < 1 || qr_version > 40) { + LOGE("Not supported QR version [%d]", qr_version); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + } + + if (image_format < MV_BARCODE_IMAGE_FORMAT_BMP || + image_format >= MV_BARCODE_IMAGE_FORMAT_NUM) { + LOGE("Not supported image format [%d]", image_format); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } #ifdef MEDIA_VISION_BARCODE_GENERATOR_LICENSE_PORT - /* Use licensed barcode generate functionality here. */ - int ret = mv_barcode_generate_image_lic( - engine_cfg, message, image_width, image_height, type, - qr_enc_mode, qr_ecc, qr_version, image_path, image_format); + /* Use licensed barcode generate functionality here. */ + int ret = mv_barcode_generate_image_lic( + engine_cfg, message, image_width, image_height, type, + qr_enc_mode, qr_ecc, qr_version, image_path, image_format); #else - /* Use open barcode generate functionality here. */ - int ret = mv_barcode_generate_image_open( - engine_cfg, message, image_width, image_height, type, - qr_enc_mode, qr_ecc, qr_version, image_path, image_format); + /* Use open barcode generate functionality here. */ + int ret = mv_barcode_generate_image_open( + engine_cfg, message, image_width, image_height, type, + qr_enc_mode, qr_ecc, qr_version, image_path, image_format); #endif /* MEDIA_VISION_BARCODE_GENERATOR_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } diff --git a/src/mv_common.c b/src/mv_common.c index 66f51df..24e0c43 100644 --- a/src/mv_common.c +++ b/src/mv_common.c @@ -19,320 +19,320 @@ #include "mv_common_c.h" int mv_create_source( - mv_source_h *source) + mv_source_h *source) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_NULL_ARG_CHECK(source); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(source); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_create_source_c(source); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_create_source_c(source); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_destroy_source( - mv_source_h source) + mv_source_h source) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_destroy_source_c(source); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_destroy_source_c(source); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_source_fill_by_media_packet( - mv_source_h source, - media_packet_h media_packet) + mv_source_h source, + media_packet_h media_packet) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_INSTANCE_CHECK(media_packet); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_INSTANCE_CHECK(media_packet); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_source_fill_by_media_packet_c(source, media_packet); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_source_fill_by_media_packet_c(source, media_packet); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_source_fill_by_buffer( - mv_source_h source, - unsigned char *data_buffer, - unsigned int buffer_size, - unsigned int image_width, - unsigned int image_height, - mv_colorspace_e image_colorspace) + mv_source_h source, + unsigned char *data_buffer, + unsigned int buffer_size, + unsigned int image_width, + unsigned int image_height, + mv_colorspace_e image_colorspace) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_NULL_ARG_CHECK(data_buffer); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_NULL_ARG_CHECK(data_buffer); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_source_fill_by_buffer_c( - source, data_buffer, buffer_size, image_width, image_height, - image_colorspace); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_source_fill_by_buffer_c( + source, data_buffer, buffer_size, image_width, image_height, + image_colorspace); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_source_clear( - mv_source_h source) + mv_source_h source) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_source_clear_c(source); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_source_clear_c(source); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_source_get_buffer( - mv_source_h source, - unsigned char **data_buffer, - unsigned int *buffer_size) + mv_source_h source, + unsigned char **data_buffer, + unsigned int *buffer_size) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_NULL_ARG_CHECK(data_buffer); - MEDIA_VISION_NULL_ARG_CHECK(buffer_size); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_NULL_ARG_CHECK(data_buffer); + MEDIA_VISION_NULL_ARG_CHECK(buffer_size); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_source_get_buffer_c(source, data_buffer, buffer_size); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_source_get_buffer_c(source, data_buffer, buffer_size); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_source_get_height( - mv_source_h source, - unsigned int *image_height) + mv_source_h source, + unsigned int *image_height) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_NULL_ARG_CHECK(image_height); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_NULL_ARG_CHECK(image_height); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_source_get_height_c(source, image_height); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_source_get_height_c(source, image_height); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_source_get_width( - mv_source_h source, - unsigned int *image_width) + mv_source_h source, + unsigned int *image_width) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_NULL_ARG_CHECK(image_width); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_NULL_ARG_CHECK(image_width); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_source_get_width_c(source, image_width); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_source_get_width_c(source, image_width); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_source_get_colorspace( - mv_source_h source, - mv_colorspace_e *image_colorspace) + mv_source_h source, + mv_colorspace_e *image_colorspace) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_NULL_ARG_CHECK(image_colorspace); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_NULL_ARG_CHECK(image_colorspace); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_source_get_colorspace_c(source, image_colorspace); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_source_get_colorspace_c(source, image_colorspace); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_create_engine_config( - mv_engine_config_h *engine_cfg) + mv_engine_config_h *engine_cfg) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_NULL_ARG_CHECK(engine_cfg); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(engine_cfg); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_create_engine_config_c(engine_cfg); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_create_engine_config_c(engine_cfg); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_destroy_engine_config( - mv_engine_config_h engine_cfg) + mv_engine_config_h engine_cfg) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(engine_cfg); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(engine_cfg); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_destroy_engine_config_c(engine_cfg); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_destroy_engine_config_c(engine_cfg); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_engine_config_set_double_attribute( - mv_engine_config_h engine_cfg, - const char *name, - double value) + mv_engine_config_h engine_cfg, + const char *name, + double value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(engine_cfg); - MEDIA_VISION_NULL_ARG_CHECK(name); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(engine_cfg); + MEDIA_VISION_NULL_ARG_CHECK(name); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_engine_config_set_double_attribute_c( - engine_cfg, name, value); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_engine_config_set_double_attribute_c( + engine_cfg, name, value); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_engine_config_set_int_attribute( - mv_engine_config_h engine_cfg, - const char *name, - int value) + mv_engine_config_h engine_cfg, + const char *name, + int value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(engine_cfg); - MEDIA_VISION_NULL_ARG_CHECK(name); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(engine_cfg); + MEDIA_VISION_NULL_ARG_CHECK(name); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_engine_config_set_int_attribute_c( - engine_cfg, name, value); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_engine_config_set_int_attribute_c( + engine_cfg, name, value); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_engine_config_set_bool_attribute( - mv_engine_config_h engine_cfg, - const char *name, - bool value) + mv_engine_config_h engine_cfg, + const char *name, + bool value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(engine_cfg); - MEDIA_VISION_NULL_ARG_CHECK(name); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(engine_cfg); + MEDIA_VISION_NULL_ARG_CHECK(name); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_engine_config_set_bool_attribute_c( - engine_cfg, name, value); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_engine_config_set_bool_attribute_c( + engine_cfg, name, value); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_engine_config_set_string_attribute( - mv_engine_config_h engine_cfg, - const char *name, - const char *value) + mv_engine_config_h engine_cfg, + const char *name, + const char *value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(engine_cfg); - MEDIA_VISION_NULL_ARG_CHECK(name); - MEDIA_VISION_NULL_ARG_CHECK(value); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(engine_cfg); + MEDIA_VISION_NULL_ARG_CHECK(name); + MEDIA_VISION_NULL_ARG_CHECK(value); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_engine_config_set_string_attribute_c( - engine_cfg, name, value); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_engine_config_set_string_attribute_c( + engine_cfg, name, value); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_engine_config_get_double_attribute( - mv_engine_config_h engine_cfg, - const char *name, - double *value) + mv_engine_config_h engine_cfg, + const char *name, + double *value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(engine_cfg); - MEDIA_VISION_NULL_ARG_CHECK(name); - MEDIA_VISION_NULL_ARG_CHECK(value); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(engine_cfg); + MEDIA_VISION_NULL_ARG_CHECK(name); + MEDIA_VISION_NULL_ARG_CHECK(value); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_engine_config_get_double_attribute_c( - engine_cfg, name, value); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_engine_config_get_double_attribute_c( + engine_cfg, name, value); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_engine_config_get_int_attribute( - mv_engine_config_h engine_cfg, - const char *name, - int *value) + mv_engine_config_h engine_cfg, + const char *name, + int *value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(engine_cfg); - MEDIA_VISION_NULL_ARG_CHECK(name); - MEDIA_VISION_NULL_ARG_CHECK(value); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(engine_cfg); + MEDIA_VISION_NULL_ARG_CHECK(name); + MEDIA_VISION_NULL_ARG_CHECK(value); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_engine_config_get_int_attribute_c( - engine_cfg, name, value); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_engine_config_get_int_attribute_c( + engine_cfg, name, value); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_engine_config_get_bool_attribute( - mv_engine_config_h engine_cfg, - const char *name, - bool *value) + mv_engine_config_h engine_cfg, + const char *name, + bool *value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(engine_cfg); - MEDIA_VISION_NULL_ARG_CHECK(name); - MEDIA_VISION_NULL_ARG_CHECK(value); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(engine_cfg); + MEDIA_VISION_NULL_ARG_CHECK(name); + MEDIA_VISION_NULL_ARG_CHECK(value); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_engine_config_get_bool_attribute_c( - engine_cfg, name, value); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_engine_config_get_bool_attribute_c( + engine_cfg, name, value); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_engine_config_get_string_attribute( - mv_engine_config_h engine_cfg, - const char *name, - char **value) + mv_engine_config_h engine_cfg, + const char *name, + char **value) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(engine_cfg); - MEDIA_VISION_NULL_ARG_CHECK(name); - MEDIA_VISION_NULL_ARG_CHECK(value); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(engine_cfg); + MEDIA_VISION_NULL_ARG_CHECK(name); + MEDIA_VISION_NULL_ARG_CHECK(value); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_engine_config_get_string_attribute_c( - engine_cfg, name, value); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = mv_engine_config_get_string_attribute_c( + engine_cfg, name, value); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int mv_engine_config_foreach_supported_attribute( - mv_supported_attribute_cb callback, - void *user_data) + mv_supported_attribute_cb callback, + void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); - MEDIA_VISION_NULL_ARG_CHECK(callback); + MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(callback); - MEDIA_VISION_FUNCTION_ENTER(); - int ret = - mv_engine_config_foreach_supported_attribute_c(callback, user_data); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + int ret = + mv_engine_config_foreach_supported_attribute_c(callback, user_data); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } diff --git a/src/mv_face.c b/src/mv_face.c index 2dbb1e7..3bbb0f8 100644 --- a/src/mv_face.c +++ b/src/mv_face.c @@ -36,727 +36,709 @@ static const int check_source_roi_quadrangle(mv_quadrangle_s *roi, mv_source_h source) { - int ret = MEDIA_VISION_ERROR_NONE; - - if (roi) - { - int src_w = 0; - int src_h = 0; - - ret = mv_source_get_width(source, &src_w); - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("mv_source_get_width fail"); - return ret; - } - - ret = mv_source_get_height(source, &src_h); - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("mv_source_get_height fail"); - return ret; - } - - int idx = 0; - while (idx < 4) - { - if (roi->points[idx].x < 0 || roi->points[idx].y < 0 || - roi->points[idx].x > src_w || roi->points[idx].y > src_h) - { - LOGE("roi is out of area on source"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - ++idx; - } - } - - return ret; + int ret = MEDIA_VISION_ERROR_NONE; + + if (roi) { + int src_w = 0; + int src_h = 0; + + ret = mv_source_get_width(source, &src_w); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("mv_source_get_width fail"); + return ret; + } + + ret = mv_source_get_height(source, &src_h); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("mv_source_get_height fail"); + return ret; + } + + int idx = 0; + while (idx < 4) { + if (roi->points[idx].x < 0 || roi->points[idx].y < 0 || + roi->points[idx].x > src_w || roi->points[idx].y > src_h) { + LOGE("roi is out of area on source"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + ++idx; + } + } + + return ret; } static const int check_source_roi(mv_rectangle_s *roi, mv_source_h source) { - int ret = MEDIA_VISION_ERROR_NONE; - - if (roi) - { - int src_w = 0; - int src_h = 0; - - ret = mv_source_get_width(source, &src_w); - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("mv_source_get_width fail"); - return ret; - } - - ret = mv_source_get_height(source, &src_h); - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("mv_source_get_height fail"); - return ret; - } - - if (roi->width <= 0 || roi->height <= 0) - { - LOGE("roi has negative width or height"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (roi->point.x < 0 || roi->point.y < 0 || - (roi->point.x + roi->width) > src_w || - (roi->point.y + roi->height) > src_h) - { - LOGE("roi is out of area on source"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - } - - return ret; + int ret = MEDIA_VISION_ERROR_NONE; + + if (roi) { + int src_w = 0; + int src_h = 0; + + ret = mv_source_get_width(source, &src_w); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("mv_source_get_width fail"); + return ret; + } + + ret = mv_source_get_height(source, &src_h); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("mv_source_get_height fail"); + return ret; + } + + if (roi->width <= 0 || roi->height <= 0) { + LOGE("roi has negative width or height"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (roi->point.x < 0 || roi->point.y < 0 || + (roi->point.x + roi->width) > src_w || + (roi->point.y + roi->height) > src_h) { + LOGE("roi is out of area on source"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + } + + return ret; } int mv_face_detect( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_face_detected_cb detected_cb, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_face_detected_cb detected_cb, + void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_NULL_ARG_CHECK(detected_cb); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_NULL_ARG_CHECK(detected_cb); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_detect_lic(source, engine_cfg, detected_cb, user_data); + ret = mv_face_detect_lic(source, engine_cfg, detected_cb, user_data); - #else +#else - ret = mv_face_detect_open(source, engine_cfg, detected_cb, user_data); + ret = mv_face_detect_open(source, engine_cfg, detected_cb, user_data); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_recognize( - mv_source_h source, - mv_face_recognition_model_h recognition_model, - mv_engine_config_h engine_cfg, - mv_rectangle_s *face_location, - mv_face_recognized_cb recognized_cb, - void *user_data) + mv_source_h source, + mv_face_recognition_model_h recognition_model, + mv_engine_config_h engine_cfg, + mv_rectangle_s *face_location, + mv_face_recognized_cb recognized_cb, + void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_INSTANCE_CHECK(recognition_model); - MEDIA_VISION_NULL_ARG_CHECK(recognized_cb); - - MEDIA_VISION_FUNCTION_ENTER(); - - int ret = check_source_roi(face_location, source); - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Errors occured when check source and ROI"); - return ret; - } - - #ifdef MEDIA_VISION_FACE_LICENSE_PORT - - ret = mv_face_recognize_lic( - source, - recognition_model, - engine_cfg, - face_location, - recognized_cb, - user_data); - - #else - - ret = mv_face_recognize_open( - source, - recognition_model, - engine_cfg, - face_location, - recognized_cb, - user_data); - - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_INSTANCE_CHECK(recognition_model); + MEDIA_VISION_NULL_ARG_CHECK(recognized_cb); + + MEDIA_VISION_FUNCTION_ENTER(); + + int ret = check_source_roi(face_location, source); + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Errors occured when check source and ROI"); + return ret; + } + +#ifdef MEDIA_VISION_FACE_LICENSE_PORT + + ret = mv_face_recognize_lic( + source, + recognition_model, + engine_cfg, + face_location, + recognized_cb, + user_data); + +#else + + ret = mv_face_recognize_open( + source, + recognition_model, + engine_cfg, + face_location, + recognized_cb, + user_data); + +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ + + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_track( - mv_source_h source, - mv_face_tracking_model_h tracking_model, - mv_engine_config_h engine_cfg, - mv_face_tracked_cb tracked_cb, - bool do_learn, - void *user_data) + mv_source_h source, + mv_face_tracking_model_h tracking_model, + mv_engine_config_h engine_cfg, + mv_face_tracked_cb tracked_cb, + bool do_learn, + void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_INSTANCE_CHECK(tracking_model); - MEDIA_VISION_NULL_ARG_CHECK(tracked_cb); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_INSTANCE_CHECK(tracking_model); + MEDIA_VISION_NULL_ARG_CHECK(tracked_cb); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_track_lic( - source, - tracking_model, - engine_cfg, - tracked_cb, - do_learn, - user_data); + ret = mv_face_track_lic( + source, + tracking_model, + engine_cfg, + tracked_cb, + do_learn, + user_data); - #else +#else - ret = mv_face_track_open( - source, - tracking_model, - engine_cfg, - tracked_cb, - do_learn, - user_data); + ret = mv_face_track_open( + source, + tracking_model, + engine_cfg, + tracked_cb, + do_learn, + user_data); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ + #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_eye_condition_recognize( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s face_location, - mv_face_eye_condition_recognized_cb eye_condition_recognized_cb, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s face_location, + mv_face_eye_condition_recognized_cb eye_condition_recognized_cb, + void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_NULL_ARG_CHECK(eye_condition_recognized_cb); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_NULL_ARG_CHECK(eye_condition_recognized_cb); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_eye_condition_recognize_lic( - source, - engine_cfg, - face_location, - eye_condition_recognized_cb, - user_data); + ret = mv_face_eye_condition_recognize_lic( + source, + engine_cfg, + face_location, + eye_condition_recognized_cb, + user_data); - #else +#else - ret = mv_face_eye_condition_recognize_open( - source, - engine_cfg, - face_location, - eye_condition_recognized_cb, - user_data); + ret = mv_face_eye_condition_recognize_open( + source, + engine_cfg, + face_location, + eye_condition_recognized_cb, + user_data); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; +MEDIA_VISION_FUNCTION_LEAVE(); +return ret; } int mv_face_facial_expression_recognize( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s face_location, - mv_face_facial_expression_recognized_cb expression_recognized_cb, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s face_location, + mv_face_facial_expression_recognized_cb expression_recognized_cb, + void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_NULL_ARG_CHECK(expression_recognized_cb); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_NULL_ARG_CHECK(expression_recognized_cb); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = check_source_roi(&face_location, source); - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Errors occurred when check source and ROI"); - return ret; - } + int ret = check_source_roi(&face_location, source); + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Errors occurred when check source and ROI"); + return ret; + } - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_facial_expression_recognize_lic( - source, - engine_cfg, - face_location, - expression_recognized_cb, - user_data); + ret = mv_face_facial_expression_recognize_lic( + source, + engine_cfg, + face_location, + expression_recognized_cb, + user_data); - #else +#else - ret = mv_face_facial_expression_recognize_open( - source, - engine_cfg, - face_location, - expression_recognized_cb, - user_data); + ret = mv_face_facial_expression_recognize_open( + source, + engine_cfg, + face_location, + expression_recognized_cb, + user_data); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_recognition_model_create( - mv_face_recognition_model_h *recognition_model) + mv_face_recognition_model_h *recognition_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_NULL_ARG_CHECK(recognition_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(recognition_model); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_recognition_model_create_lic(recognition_model); + ret = mv_face_recognition_model_create_lic(recognition_model); - #else +#else - ret = mv_face_recognition_model_create_open(recognition_model); + ret = mv_face_recognition_model_create_open(recognition_model); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_recognition_model_destroy( - mv_face_recognition_model_h recognition_model) + mv_face_recognition_model_h recognition_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(recognition_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(recognition_model); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_recognition_model_destroy_lic(recognition_model); + ret = mv_face_recognition_model_destroy_lic(recognition_model); - #else +#else - ret = mv_face_recognition_model_destroy_open(recognition_model); + ret = mv_face_recognition_model_destroy_open(recognition_model); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; +MEDIA_VISION_FUNCTION_LEAVE(); +return ret; } int mv_face_recognition_model_clone( - mv_face_recognition_model_h src, - mv_face_recognition_model_h *dst) + mv_face_recognition_model_h src, + mv_face_recognition_model_h *dst) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(src); - MEDIA_VISION_NULL_ARG_CHECK(dst); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(src); + MEDIA_VISION_NULL_ARG_CHECK(dst); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_recognition_model_clone_lic(src, dst); + ret = mv_face_recognition_model_clone_lic(src, dst); - #else +#else - ret = mv_face_recognition_model_clone_open(src, dst); + ret = mv_face_recognition_model_clone_open(src, dst); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_recognition_model_save( - const char *file_name, - mv_face_recognition_model_h recognition_model) + const char *file_name, + mv_face_recognition_model_h recognition_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(recognition_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(recognition_model); - if (file_name == NULL) - { - LOGE("File name is NULL. The file name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + if (file_name == NULL) { + LOGE("File name is NULL. The file name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_recognition_model_save_lic( - file_name, - recognition_model); + ret = mv_face_recognition_model_save_lic( + file_name, + recognition_model); - #else +#else - ret = mv_face_recognition_model_save_open( - file_name, - recognition_model); + ret = mv_face_recognition_model_save_open( + file_name, + recognition_model); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_recognition_model_load( - const char *file_name, - mv_face_recognition_model_h *recognition_model) + const char *file_name, + mv_face_recognition_model_h *recognition_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_NULL_ARG_CHECK(recognition_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(recognition_model); - if (file_name == NULL) - { - LOGE("File name is NULL. The file name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + if (file_name == NULL) { + LOGE("File name is NULL. The file name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_recognition_model_load_lic( - file_name, - recognition_model); + ret = mv_face_recognition_model_load_lic( + file_name, + recognition_model); - #else +#else - ret = mv_face_recognition_model_load_open( - file_name, - recognition_model); + ret = mv_face_recognition_model_load_open( + file_name, + recognition_model); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_recognition_model_add( - const mv_source_h source, - mv_face_recognition_model_h recognition_model, - const mv_rectangle_s *example_location, - int face_label) + const mv_source_h source, + mv_face_recognition_model_h recognition_model, + const mv_rectangle_s *example_location, + int face_label) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_INSTANCE_CHECK(recognition_model); - - MEDIA_VISION_FUNCTION_ENTER(); - - int ret = check_source_roi(example_location, source); - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Errors occured when check source and ROI"); - return ret; - } - - #ifdef MEDIA_VISION_FACE_LICENSE_PORT - - ret = mv_face_recognition_model_add_lic( - source, - recognition_model, - example_location, - face_label); - #else - - ret = mv_face_recognition_model_add_open( - source, - recognition_model, - example_location, - face_label); - - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_INSTANCE_CHECK(recognition_model); + + MEDIA_VISION_FUNCTION_ENTER(); + + int ret = check_source_roi(example_location, source); + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Errors occured when check source and ROI"); + return ret; + } + +#ifdef MEDIA_VISION_FACE_LICENSE_PORT + + ret = mv_face_recognition_model_add_lic( + source, + recognition_model, + example_location, + face_label); +#else + +ret = mv_face_recognition_model_add_open( + source, + recognition_model, + example_location, + face_label); + +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ + + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_recognition_model_reset( - mv_face_recognition_model_h recognition_model, - int *face_label) + mv_face_recognition_model_h recognition_model, + int *face_label) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(recognition_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(recognition_model); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_recognition_model_reset_lic( - recognition_model, - face_label); + ret = mv_face_recognition_model_reset_lic( + recognition_model, + face_label); - #else +#else - ret = mv_face_recognition_model_reset_open( - recognition_model, - face_label); +ret = mv_face_recognition_model_reset_open( + recognition_model, + face_label); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_recognition_model_learn( - mv_engine_config_h engine_cfg, - mv_face_recognition_model_h recognition_model) + mv_engine_config_h engine_cfg, + mv_face_recognition_model_h recognition_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(recognition_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(recognition_model); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_recognition_model_learn_lic(engine_cfg, recognition_model); + ret = mv_face_recognition_model_learn_lic(engine_cfg, recognition_model); - #else +#else - ret = mv_face_recognition_model_learn_open(engine_cfg, recognition_model); + ret = mv_face_recognition_model_learn_open(engine_cfg, recognition_model); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_recognition_model_query_labels( - mv_face_recognition_model_h recognition_model, - int **labels, - unsigned int *number_of_labels) + mv_face_recognition_model_h recognition_model, + int **labels, + unsigned int *number_of_labels) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(recognition_model); - MEDIA_VISION_NULL_ARG_CHECK(labels); - MEDIA_VISION_NULL_ARG_CHECK(number_of_labels); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(recognition_model); + MEDIA_VISION_NULL_ARG_CHECK(labels); + MEDIA_VISION_NULL_ARG_CHECK(number_of_labels); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_recognition_model_query_labels_lic(recognition_model, labels, number_of_labels); + ret = mv_face_recognition_model_query_labels_lic(recognition_model, labels, number_of_labels); - #else +#else - ret = mv_face_recognition_model_query_labels_open(recognition_model, labels, number_of_labels); + ret = mv_face_recognition_model_query_labels_open(recognition_model, labels, number_of_labels); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; +MEDIA_VISION_FUNCTION_LEAVE(); +return ret; } int mv_face_tracking_model_create( - mv_face_tracking_model_h *tracking_model) + mv_face_tracking_model_h *tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_NULL_ARG_CHECK(tracking_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(tracking_model); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_tracking_model_create_lic(tracking_model); + ret = mv_face_tracking_model_create_lic(tracking_model); - #else +#else - ret = mv_face_tracking_model_create_open(tracking_model); + ret = mv_face_tracking_model_create_open(tracking_model); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_tracking_model_destroy( - mv_face_tracking_model_h tracking_model) + mv_face_tracking_model_h tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(tracking_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(tracking_model); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_tracking_model_destroy_lic(tracking_model); + ret = mv_face_tracking_model_destroy_lic(tracking_model); - #else +#else - ret = mv_face_tracking_model_destroy_open(tracking_model); + ret = mv_face_tracking_model_destroy_open(tracking_model); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_tracking_model_prepare( - mv_face_tracking_model_h tracking_model, - mv_engine_config_h engine_cfg, - mv_source_h source, - mv_quadrangle_s *location) + mv_face_tracking_model_h tracking_model, + mv_engine_config_h engine_cfg, + mv_source_h source, + mv_quadrangle_s *location) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(tracking_model); - MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(tracking_model); + MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = check_source_roi_quadrangle(location, source); - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Errors occured when check source and tracking start location"); - return ret; - } + int ret = check_source_roi_quadrangle(location, source); + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Errors occured when check source and tracking start location"); + return ret; + } - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_tracking_model_prepare_lic( - tracking_model, engine_cfg, source, location); + ret = mv_face_tracking_model_prepare_lic( + tracking_model, engine_cfg, source, location); - #else +#else - ret = mv_face_tracking_model_prepare_open( - tracking_model, engine_cfg, source, location); + ret = mv_face_tracking_model_prepare_open( + tracking_model, engine_cfg, source, location); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_tracking_model_clone( - mv_face_tracking_model_h src, - mv_face_tracking_model_h *dst) + mv_face_tracking_model_h src, + mv_face_tracking_model_h *dst) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(src); - MEDIA_VISION_NULL_ARG_CHECK(dst); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(src); + MEDIA_VISION_NULL_ARG_CHECK(dst); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_tracking_model_clone_lic(src, dst); + ret = mv_face_tracking_model_clone_lic(src, dst); - #else +#else - ret = mv_face_tracking_model_clone_open(src, dst); + ret = mv_face_tracking_model_clone_open(src, dst); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_tracking_model_save( - const char *file_name, - mv_face_tracking_model_h tracking_model) + const char *file_name, + mv_face_tracking_model_h tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(tracking_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(tracking_model); - if (file_name == NULL) - { - LOGE("File name is NULL. The file name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + if (file_name == NULL) { + LOGE("File name is NULL. The file name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_tracking_model_save_lic( - file_name, - tracking_model); +ret = mv_face_tracking_model_save_lic( + file_name, + tracking_model); - #else +#else - ret = mv_face_tracking_model_save_open( - file_name, - tracking_model); + ret = mv_face_tracking_model_save_open( + file_name, + tracking_model); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_face_tracking_model_load( - const char *file_name, - mv_face_tracking_model_h *tracking_model) + const char *file_name, + mv_face_tracking_model_h *tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); - MEDIA_VISION_NULL_ARG_CHECK(tracking_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(tracking_model); - if (file_name == NULL) - { - LOGE("File name is NULL. The file name has to be specifiled"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + if (file_name == NULL) { + LOGE("File name is NULL. The file name has to be specifiled"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int ret = MEDIA_VISION_ERROR_NONE; + int ret = MEDIA_VISION_ERROR_NONE; - #ifdef MEDIA_VISION_FACE_LICENSE_PORT +#ifdef MEDIA_VISION_FACE_LICENSE_PORT - ret = mv_face_tracking_model_load_lic( - file_name, - tracking_model); + ret = mv_face_tracking_model_load_lic( + file_name, + tracking_model); - #else +#else - ret = mv_face_tracking_model_load_open( - file_name, - tracking_model); +ret = mv_face_tracking_model_load_open( + file_name, + tracking_model); - #endif /* MEDIA_VISION_FACE_LICENSE_PORT */ +#endif /* MEDIA_VISION_FACE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } diff --git a/src/mv_image.c b/src/mv_image.c index 5d60e98..f6a4000 100644 --- a/src/mv_image.c +++ b/src/mv_image.c @@ -35,495 +35,491 @@ */ int mv_image_recognize( - mv_source_h source, - const mv_image_object_h *image_objects, - int number_of_objects, - mv_engine_config_h engine_cfg, - mv_image_recognized_cb recognized_cb, - void *user_data) + mv_source_h source, + const mv_image_object_h *image_objects, + int number_of_objects, + mv_engine_config_h engine_cfg, + mv_image_recognized_cb recognized_cb, + void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_NULL_ARG_CHECK(image_objects); - int object_num = 0; - for (; object_num < number_of_objects; ++object_num) - { - MEDIA_VISION_INSTANCE_CHECK(image_objects[object_num]); - } - MEDIA_VISION_NULL_ARG_CHECK(recognized_cb); - - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_NULL_ARG_CHECK(image_objects); + + int object_num = 0; + for (; object_num < number_of_objects; ++object_num) + MEDIA_VISION_INSTANCE_CHECK(image_objects[object_num]); + + MEDIA_VISION_NULL_ARG_CHECK(recognized_cb); + + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_recognize_lic(source, image_objects, - number_of_objects, engine_cfg, recognized_cb, user_data); + /* Use licensed image functionality here. */ + int ret = mv_image_recognize_lic(source, image_objects, + number_of_objects, engine_cfg, recognized_cb, user_data); #else - /* Use open image functionality here. */ - int ret = mv_image_recognize_open(source, image_objects, - number_of_objects, engine_cfg, recognized_cb, user_data); +/* Use open image functionality here. */ + int ret = mv_image_recognize_open(source, image_objects, + number_of_objects, engine_cfg, recognized_cb, user_data); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_track( - mv_source_h source, - mv_image_tracking_model_h image_tracking_model, - mv_engine_config_h engine_cfg, - mv_image_tracked_cb tracked_cb, - void *user_data) + mv_source_h source, + mv_image_tracking_model_h image_tracking_model, + mv_engine_config_h engine_cfg, + mv_image_tracked_cb tracked_cb, + void *user_data) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); - MEDIA_VISION_NULL_ARG_CHECK(tracked_cb); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); + MEDIA_VISION_NULL_ARG_CHECK(tracked_cb); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_track_lic(source, image_tracking_model, engine_cfg, tracked_cb, user_data); + /* Use licensed image functionality here. */ + int ret = mv_image_track_lic(source, image_tracking_model, engine_cfg, tracked_cb, user_data); #else - /* Use open image functionality here. */ - int ret = mv_image_track_open(source, image_tracking_model, engine_cfg, tracked_cb, user_data); + /* Use open image functionality here. */ + int ret = mv_image_track_open(source, image_tracking_model, engine_cfg, tracked_cb, user_data); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_object_create( - mv_image_object_h *image_object) + mv_image_object_h *image_object) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_NULL_ARG_CHECK(image_object); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(image_object); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_object_create_lic(image_object); + /* Use licensed image functionality here. */ + int ret = mv_image_object_create_lic(image_object); #else - /* Use open image functionality here. */ - int ret = mv_image_object_create_open(image_object); + /* Use open image functionality here. */ + int ret = mv_image_object_create_open(image_object); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_object_destroy( - mv_image_object_h image_object) + mv_image_object_h image_object) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(image_object); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(image_object); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_object_destroy_lic(image_object); + /* Use licensed image functionality here. */ + int ret = mv_image_object_destroy_lic(image_object); #else - /* Use open image functionality here. */ - int ret = mv_image_object_destroy_open(image_object); + /* Use open image functionality here. */ + int ret = mv_image_object_destroy_open(image_object); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_object_fill( - mv_image_object_h image_object, - mv_engine_config_h engine_cfg, - mv_source_h source, - mv_rectangle_s *location) + mv_image_object_h image_object, + mv_engine_config_h engine_cfg, + mv_source_h source, + mv_rectangle_s *location) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(image_object); - MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(image_object); + MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_object_fill_lic(image_object, engine_cfg, source, location); + /* Use licensed image functionality here. */ + int ret = mv_image_object_fill_lic(image_object, engine_cfg, source, location); #else - /* Use open image functionality here. */ - int ret = mv_image_object_fill_open(image_object, engine_cfg, source, location); + /* Use open image functionality here. */ + int ret = mv_image_object_fill_open(image_object, engine_cfg, source, location); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_object_get_recognition_rate( - mv_image_object_h image_object, - double *recognition_rate) + mv_image_object_h image_object, + double *recognition_rate) { MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(image_object); - MEDIA_VISION_NULL_ARG_CHECK(recognition_rate); + MEDIA_VISION_INSTANCE_CHECK(image_object); + MEDIA_VISION_NULL_ARG_CHECK(recognition_rate); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_object_get_recognition_rate_lic(image_object, recognition_rate); + /* Use licensed image functionality here. */ + int ret = mv_image_object_get_recognition_rate_lic(image_object, recognition_rate); #else - /* Use open image functionality here. */ - int ret = mv_image_object_get_recognition_rate_open(image_object, recognition_rate); + /* Use open image functionality here. */ + int ret = mv_image_object_get_recognition_rate_open(image_object, recognition_rate); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_object_set_label( - mv_image_object_h image_object, - int label) + mv_image_object_h image_object, + int label) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(image_object); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(image_object); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_object_set_label_lic(image_object, label); + /* Use licensed image functionality here. */ + int ret = mv_image_object_set_label_lic(image_object, label); #else - /* Use open image functionality here. */ - int ret = mv_image_object_set_label_open(image_object, label); + /* Use open image functionality here. */ + int ret = mv_image_object_set_label_open(image_object, label); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_object_get_label( - mv_image_object_h image_object, - int *label) + mv_image_object_h image_object, + int *label) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(image_object); - MEDIA_VISION_NULL_ARG_CHECK(label); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(image_object); + MEDIA_VISION_NULL_ARG_CHECK(label); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_object_get_label_lic(image_object, label); + /* Use licensed image functionality here. */ + int ret = mv_image_object_get_label_lic(image_object, label); #else - /* Use open image functionality here. */ - int ret = mv_image_object_get_label_open(image_object, label); + /* Use open image functionality here. */ + int ret = mv_image_object_get_label_open(image_object, label); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_object_clone( - mv_image_object_h src, - mv_image_object_h *dst) + mv_image_object_h src, + mv_image_object_h *dst) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(src); - MEDIA_VISION_NULL_ARG_CHECK(dst); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(src); + MEDIA_VISION_NULL_ARG_CHECK(dst); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_object_clone_lic(src, dst); + /* Use licensed image functionality here. */ + int ret = mv_image_object_clone_lic(src, dst); #else - /* Use open image functionality here. */ - int ret = mv_image_object_clone_open(src, dst); + /* Use open image functionality here. */ + int ret = mv_image_object_clone_open(src, dst); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_object_save( - const char *file_name, mv_image_object_h image_object) + const char *file_name, mv_image_object_h image_object) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(image_object); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(image_object); - if (file_name == NULL) - { - LOGE("File name is NULL. The file name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + if (file_name == NULL) { + LOGE("File name is NULL. The file name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_object_save_lic(file_name, image_object); + /* Use licensed image functionality here. */ + int ret = mv_image_object_save_lic(file_name, image_object); #else - /* Use open image functionality here. */ - int ret = mv_image_object_save_open(file_name, image_object); + /* Use open image functionality here. */ + int ret = mv_image_object_save_open(file_name, image_object); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_object_load( - const char *file_name, mv_image_object_h *image_object) + const char *file_name, mv_image_object_h *image_object) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_NULL_ARG_CHECK(image_object); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(image_object); - if (file_name == NULL) - { - LOGE("file name is NULL. The file name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + if (file_name == NULL) { + LOGE("file name is NULL. The file name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_object_load_lic(file_name, image_object); + /* Use licensed image functionality here. */ + int ret = mv_image_object_load_lic(file_name, image_object); #else - /* Use open image functionality here. */ - int ret = mv_image_object_load_open(file_name, image_object); + /* Use open image functionality here. */ + int ret = mv_image_object_load_open(file_name, image_object); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_tracking_model_create( - mv_image_tracking_model_h *image_tracking_model) + mv_image_tracking_model_h *image_tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_tracking_model_create_lic(image_tracking_model); + /* Use licensed image functionality here. */ + int ret = mv_image_tracking_model_create_lic(image_tracking_model); #else - /* Use open image functionality here. */ - int ret = mv_image_tracking_model_create_open(image_tracking_model); + /* Use open image functionality here. */ + int ret = mv_image_tracking_model_create_open(image_tracking_model); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_tracking_model_set_target( - mv_image_object_h image_object, - mv_image_tracking_model_h image_tracking_model) + mv_image_object_h image_object, + mv_image_tracking_model_h image_tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); - MEDIA_VISION_INSTANCE_CHECK(image_object); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); + MEDIA_VISION_INSTANCE_CHECK(image_object); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_tracking_model_set_target_lic(image_object, image_tracking_model); + /* Use licensed image functionality here. */ + int ret = mv_image_tracking_model_set_target_lic(image_object, image_tracking_model); #else - /* Use open image functionality here. */ - int ret = mv_image_tracking_model_set_target_open(image_object, image_tracking_model); + /* Use open image functionality here. */ + int ret = mv_image_tracking_model_set_target_open(image_object, image_tracking_model); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_tracking_model_destroy( - mv_image_tracking_model_h image_tracking_model) + mv_image_tracking_model_h image_tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_tracking_model_destroy_lic(image_tracking_model); + /* Use licensed image functionality here. */ + int ret = mv_image_tracking_model_destroy_lic(image_tracking_model); #else - /* Use open image functionality here. */ - int ret = mv_image_tracking_model_destroy_open(image_tracking_model); + /* Use open image functionality here. */ + int ret = mv_image_tracking_model_destroy_open(image_tracking_model); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_tracking_model_refresh( - mv_image_tracking_model_h image_tracking_model, - mv_engine_config_h engine_cfg) + mv_image_tracking_model_h image_tracking_model, + mv_engine_config_h engine_cfg) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_tracking_model_refresh_lic( - image_tracking_model, - engine_cfg); + /* Use licensed image functionality here. */ + int ret = mv_image_tracking_model_refresh_lic( + image_tracking_model, + engine_cfg); #else - /* Use open image functionality here. */ - int ret = mv_image_tracking_model_refresh_open( - image_tracking_model, - engine_cfg); + /* Use open image functionality here. */ + int ret = mv_image_tracking_model_refresh_open( + image_tracking_model, + engine_cfg); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_tracking_model_clone( - mv_image_tracking_model_h src, - mv_image_tracking_model_h *dst) + mv_image_tracking_model_h src, + mv_image_tracking_model_h *dst) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(src); - MEDIA_VISION_NULL_ARG_CHECK(dst); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(src); + MEDIA_VISION_NULL_ARG_CHECK(dst); - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_tracking_model_clone_lic(src, dst); + /* Use licensed image functionality here. */ + int ret = mv_image_tracking_model_clone_lic(src, dst); #else - /* Use open image functionality here. */ - int ret = mv_image_tracking_model_clone_open(src, dst); + /* Use open image functionality here. */ + int ret = mv_image_tracking_model_clone_open(src, dst); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_tracking_model_save( - const char *file_name, mv_image_tracking_model_h image_tracking_model) + const char *file_name, mv_image_tracking_model_h image_tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); - if (file_name == NULL) - { - LOGE("File name is NULL. The file name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + if (file_name == NULL) { + LOGE("File name is NULL. The file name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_tracking_model_save_lic(file_name, image_tracking_model); + /* Use licensed image functionality here. */ + int ret = mv_image_tracking_model_save_lic(file_name, image_tracking_model); #else - /* Use open image functionality here. */ - int ret = mv_image_tracking_model_save_open(file_name, image_tracking_model); + /* Use open image functionality here. */ + int ret = mv_image_tracking_model_save_open(file_name, image_tracking_model); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } int mv_image_tracking_model_load( - const char *file_name, mv_image_tracking_model_h *image_tracking_model) + const char *file_name, mv_image_tracking_model_h *image_tracking_model) { - MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); - MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model); - if (file_name == NULL) - { - LOGE("File name is NULL. The file name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + if (file_name == NULL) { + LOGE("File name is NULL. The file name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); #ifdef MEDIA_VISION_IMAGE_LICENSE_PORT - /* Use licensed image functionality here. */ - int ret = mv_image_tracking_model_load_lic(file_name, image_tracking_model); + /* Use licensed image functionality here. */ + int ret = mv_image_tracking_model_load_lic(file_name, image_tracking_model); #else - /* Use open image functionality here. */ - int ret = mv_image_tracking_model_load_open(file_name, image_tracking_model); + /* Use open image functionality here. */ + int ret = mv_image_tracking_model_load_open(file_name, image_tracking_model); #endif /* MEDIA_VISION_IMAGE_LICENSE_PORT */ - MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; } diff --git a/src/mv_private.c b/src/mv_private.c index 290cf51..4161510 100644 --- a/src/mv_private.c +++ b/src/mv_private.c @@ -20,98 +20,92 @@ bool __mv_check_system_info_feature_supported() { - bool isBarcodeDetectionSupported = false; - bool isBarcodeGenerationSupported = false; - bool isFaceRecognitionSupported = false; - bool isImageRecognitionSupported = false; - - const int nRetVal1 = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_detection", &isBarcodeDetectionSupported); - - if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) - { - LOGE("[%s] SYSTEM_INFO_ERROR: __FUNCTION__"); - return false; - } - - const int nRetVal2 = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_generation", &isBarcodeGenerationSupported); - - if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) - { - LOGE("[%s] SYSTEM_INFO_ERROR: __FUNCTION__"); - return false; - } - - const int nRetVal3 = system_info_get_platform_bool("http://tizen.org/feature/vision.face_recognition", &isFaceRecognitionSupported); - - if (nRetVal3 != SYSTEM_INFO_ERROR_NONE) - { - LOGE("[%s] SYSTEM_INFO_ERROR: __FUNCTION__"); - return false; - } - - const int nRetVal4 = system_info_get_platform_bool("http://tizen.org/feature/vision.image_recognition", &isImageRecognitionSupported); - - if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) - { - LOGE("[%s] SYSTEM_INFO_ERROR: __FUNCTION__"); - return false; - } - - (isBarcodeDetectionSupported || isBarcodeGenerationSupported || - isFaceRecognitionSupported || isImageRecognitionSupported) ? - LOGI("system_info_get_platform_bool returned" - "Supported one feature among barcode detection, " - "barcode generation, face recognition, " - "and image recognition capability\n") : - LOGE("system_info_get_platform_bool returned" - "Unsupported all features of barcode detection, " - "barcode generation, face recognition, " - "and image recognition capability\n") ; - - return (isBarcodeDetectionSupported || isBarcodeGenerationSupported || - isFaceRecognitionSupported || isImageRecognitionSupported); + bool isBarcodeDetectionSupported = false; + bool isBarcodeGenerationSupported = false; + bool isFaceRecognitionSupported = false; + bool isImageRecognitionSupported = false; + + const int nRetVal1 = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_detection", &isBarcodeDetectionSupported); + + if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) { + LOGE("[%s] SYSTEM_INFO_ERROR: __FUNCTION__"); + return false; + } + + const int nRetVal2 = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_generation", &isBarcodeGenerationSupported); + + if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) { + LOGE("[%s] SYSTEM_INFO_ERROR: __FUNCTION__"); + return false; + } + + const int nRetVal3 = system_info_get_platform_bool("http://tizen.org/feature/vision.face_recognition", &isFaceRecognitionSupported); + + if (nRetVal3 != SYSTEM_INFO_ERROR_NONE) { + LOGE("[%s] SYSTEM_INFO_ERROR: __FUNCTION__"); + return false; + } + + const int nRetVal4 = system_info_get_platform_bool("http://tizen.org/feature/vision.image_recognition", &isImageRecognitionSupported); + + if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) { + LOGE("[%s] SYSTEM_INFO_ERROR: __FUNCTION__"); + return false; + } + + (isBarcodeDetectionSupported || isBarcodeGenerationSupported || + isFaceRecognitionSupported || isImageRecognitionSupported) ? + LOGI("system_info_get_platform_bool returned" + "Supported one feature among barcode detection, " + "barcode generation, face recognition, " + "and image recognition capability\n") : + LOGE("system_info_get_platform_bool returned" + "Unsupported all features of barcode detection, " + "barcode generation, face recognition, " + "and image recognition capability\n") ; + + return (isBarcodeDetectionSupported || isBarcodeGenerationSupported || + isFaceRecognitionSupported || isImageRecognitionSupported); } bool __mv_barcode_detect_check_system_info_feature_supported() { - bool isBarcodeDetectionSupported = false; + bool isBarcodeDetectionSupported = false; - const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_detection", &isBarcodeDetectionSupported); + const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_detection", &isBarcodeDetectionSupported); - if (nRetVal != SYSTEM_INFO_ERROR_NONE) - { - LOGE("[%s] SYSTEM_INFO_ERROR: __FUNCTION__"); - return false; - } + if (nRetVal != SYSTEM_INFO_ERROR_NONE) { + LOGE("[%s] SYSTEM_INFO_ERROR: __FUNCTION__"); + return false; + } - isBarcodeDetectionSupported ? - LOGI("system_info_get_platform_bool returned " - "Supported barcode detection feature capability\n") : - LOGE("system_info_get_platform_bool returned " - "Unsupported barcode detection feature capability\n"); + isBarcodeDetectionSupported ? + LOGI("system_info_get_platform_bool returned " + "Supported barcode detection feature capability\n") : + LOGE("system_info_get_platform_bool returned " + "Unsupported barcode detection feature capability\n"); - return isBarcodeDetectionSupported; + return isBarcodeDetectionSupported; } bool __mv_barcode_generate_check_system_info_feature_supported() { - bool isBarcodeGenerationSupported = false; + bool isBarcodeGenerationSupported = false; - const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_generation", &isBarcodeGenerationSupported); + const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_generation", &isBarcodeGenerationSupported); - if (nRetVal != SYSTEM_INFO_ERROR_NONE) - { - LOGE("[%s] SYSTEM_INFO_ERROR: __FUNCTION__"); - return false; - } + if (nRetVal != SYSTEM_INFO_ERROR_NONE) { + LOGE("[%s] SYSTEM_INFO_ERROR: __FUNCTION__"); + return false; + } - isBarcodeGenerationSupported ? - LOGI("system_info_get_platform_bool returned " - "Supported barcode generation feature capability\n") : - LOGE("system_info_get_platform_bool returned " - "Unsupported barcode generation feature capability\n"); + isBarcodeGenerationSupported ? + LOGI("system_info_get_platform_bool returned " + "Supported barcode generation feature capability\n") : + LOGE("system_info_get_platform_bool returned " + "Unsupported barcode generation feature capability\n"); - return isBarcodeGenerationSupported; + return isBarcodeGenerationSupported; } bool __mv_face_check_system_info_feature_supported() -- 2.7.4 From b54972ca08e90f8c31dc86c6c59ae059d9010294 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Wed, 21 Oct 2015 21:05:38 +0900 Subject: [PATCH 04/16] Applied Tizen C++ coding rule Change-Id: Ifd9f6e2e312eea7735e17aaf3bd4c416c845fd91 Signed-off-by: Tae-Young Chung --- mv_barcode/barcode_detector/include/Barcode.h | 90 +- mv_barcode/barcode_detector/include/BarcodeUtils.h | 11 +- .../include/mv_barcode_detect_open.h | 11 +- mv_barcode/barcode_detector/src/Barcode.cpp | 187 +- mv_barcode/barcode_detector/src/BarcodeUtils.cpp | 146 +- .../src/mv_barcode_detect_open.cpp | 235 +- .../include/mv_barcode_detect_lic.h | 10 +- .../src/mv_barcode_detect_lic.c | 12 +- .../barcode_generator/include/BarcodeGenerator.h | 123 +- .../barcode_generator/include/BarcodeOptions.h | 90 +- .../include/mv_barcode_generate_open.h | 32 +- .../barcode_generator/src/BarcodeGenerator.cpp | 479 ++- .../src/mv_barcode_generate_open.cpp | 605 ++-- .../include/mv_barcode_generate_lic.h | 34 +- .../src/mv_barcode_generate_lic.c | 39 +- mv_common/include/EngineConfig.h | 262 +- mv_common/include/MediaSource.h | 196 +- mv_common/include/mv_common_c.h | 96 +- mv_common/src/EngineConfig.cpp | 436 ++- mv_common/src/MediaSource.cpp | 124 +- mv_common/src/mv_common_c.cpp | 973 +++--- mv_face/face/include/FaceDetector.h | 118 +- mv_face/face/include/FaceExpressionRecognizer.h | 52 +- mv_face/face/include/FaceEyeCondition.h | 52 +- mv_face/face/include/FaceRecognitionModel.h | 411 ++- mv_face/face/include/FaceTrackingModel.h | 241 +- mv_face/face/include/FaceUtil.h | 31 +- mv_face/face/include/TrackerMedianFlow.h | 126 +- mv_face/face/include/mv_face_open.h | 114 +- mv_face/face/src/FaceDetector.cpp | 111 +- mv_face/face/src/FaceExpressionRecognizer.cpp | 127 +- mv_face/face/src/FaceEyeCondition.cpp | 364 +- mv_face/face/src/FaceRecognitionModel.cpp | 823 +++-- mv_face/face/src/FaceTrackingModel.cpp | 244 +- mv_face/face/src/FaceUtil.cpp | 183 +- mv_face/face/src/TrackerMedianFlow.cpp | 652 ++-- mv_face/face/src/mv_face_open.cpp | 1705 +++++---- mv_face/face_lic/include/mv_face_lic.h | 114 +- mv_face/face_lic/src/mv_face_lic.c | 154 +- mv_image/image/include/ImageConfig.h | 133 +- mv_image/image/include/ImageContourStabilizator.h | 87 +- mv_image/image/include/ImageMathUtil.h | 25 +- mv_image/image/include/ImageObject.h | 315 +- mv_image/image/include/ImageRecognizer.h | 121 +- mv_image/image/include/ImageTracker.h | 86 +- mv_image/image/include/ImageTrackingModel.h | 300 +- mv_image/image/include/mv_image_open.h | 74 +- mv_image/image/src/ImageConfig.cpp | 95 +- mv_image/image/src/ImageContourStabilizator.cpp | 487 ++- mv_image/image/src/ImageMathUtil.cpp | 45 +- mv_image/image/src/ImageObject.cpp | 635 ++-- mv_image/image/src/ImageRecognizer.cpp | 476 ++- mv_image/image/src/ImageTracker.cpp | 572 ++-- mv_image/image/src/ImageTrackingModel.cpp | 407 ++- mv_image/image/src/mv_image_open.cpp | 1186 +++---- packaging/capi-media-vision.spec | 2 +- test/testsuites/barcode/barcode_test_suite.c | 2083 ++++++----- test/testsuites/face/face_test_suite.c | 3620 +++++++++----------- test/testsuites/image/image_test_suite.c | 3414 +++++++++--------- 59 files changed, 11324 insertions(+), 12652 deletions(-) diff --git a/mv_barcode/barcode_detector/include/Barcode.h b/mv_barcode/barcode_detector/include/Barcode.h index 6003fb1..b47cacd 100644 --- a/mv_barcode/barcode_detector/include/Barcode.h +++ b/mv_barcode/barcode_detector/include/Barcode.h @@ -22,64 +22,60 @@ #include #include -namespace MediaVision -{ -namespace Barcode -{ - +namespace MediaVision { +namespace Barcode { /** * @class Barcode * @brief Handle to barcode object. */ -class Barcode -{ +class Barcode { public: - /** - * @brief Barcode constructor. - * - * @since_tizen 2.4 - * @remarks create copy of bar_obj - * @param [in] barObj zbar barcode handle - * - */ - Barcode(const zbar::Symbol& barObj); + /** + * @brief Barcode constructor. + * + * @since_tizen 2.4 + * @remarks create copy of bar_obj + * @param [in] barObj zbar barcode handle + * + */ + Barcode(const zbar::Symbol& barObj); - /** - * @brief Barcode destructor. - * - * @since_tizen 2.4 - */ - ~Barcode(); + /** + * @brief Barcode destructor. + * + * @since_tizen 2.4 + */ + ~Barcode(); - /** - * @brief Gets encoded message from barcode object. - * - * @since_tizen 2.4 - * @return Encoded message - */ - std::string getMessage(void) const; + /** + * @brief Gets encoded message from barcode object. + * + * @since_tizen 2.4 + * @return Encoded message + */ + std::string getMessage(void) const; - /** - * @brief Gets the type of the barcode. - * - * @since_tizen 2.4 - * @return Enumeration value corresponding to the barcode type - */ - mv_barcode_type_e getType(void) const; + /** + * @brief Gets the type of the barcode. + * + * @since_tizen 2.4 + * @return Enumeration value corresponding to the barcode type + */ + mv_barcode_type_e getType(void) const; - /** - * @brief Calculates location of barcode handle from zbar. - * location polygon. - * - * @since_tizen 2.4 - * @param [out] location Quadrangle that contains barcode on image - * @return @c MEDIA_VISION_ERROR_NONE on success, - * otherwise a negative error value - */ - int calculateLocation(mv_quadrangle_s& location) const; + /** + * @brief Calculates location of barcode handle from zbar. + * location polygon. + * + * @since_tizen 2.4 + * @param [out] location Quadrangle that contains barcode on image + * @return @c MEDIA_VISION_ERROR_NONE on success, + * otherwise a negative error value + */ + int calculateLocation(mv_quadrangle_s& location) const; private: - const zbar::Symbol *m_pBarcodeObj; ///< Pointer to zbar barcode handle + const zbar::Symbol *m_pBarcodeObj; ///< Pointer to zbar barcode handle }; } /* Barcode */ diff --git a/mv_barcode/barcode_detector/include/BarcodeUtils.h b/mv_barcode/barcode_detector/include/BarcodeUtils.h index 4dea365..d386cc0 100644 --- a/mv_barcode/barcode_detector/include/BarcodeUtils.h +++ b/mv_barcode/barcode_detector/include/BarcodeUtils.h @@ -19,15 +19,12 @@ #include "mv_common.h" -namespace zbar -{ - class Image; +namespace zbar { +class Image; } -namespace MediaVision -{ -namespace Barcode -{ +namespace MediaVision { +namespace Barcode { /** * @brief This function converts media vision image handle to zbar image handle. diff --git a/mv_barcode/barcode_detector/include/mv_barcode_detect_open.h b/mv_barcode/barcode_detector/include/mv_barcode_detect_open.h index e13d8e7..c6a3495 100644 --- a/mv_barcode/barcode_detector/include/mv_barcode_detect_open.h +++ b/mv_barcode/barcode_detector/include/mv_barcode_detect_open.h @@ -53,12 +53,11 @@ extern "C" { * @see mv_barcode_detected_cb() */ int mv_barcode_detect_open( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s roi, - mv_barcode_detected_cb detect_cb, - void *user_data); - + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s roi, + mv_barcode_detected_cb detect_cb, + void *user_data); #ifdef __cplusplus } diff --git a/mv_barcode/barcode_detector/src/Barcode.cpp b/mv_barcode/barcode_detector/src/Barcode.cpp index a35f8bd..3361610 100644 --- a/mv_barcode/barcode_detector/src/Barcode.cpp +++ b/mv_barcode/barcode_detector/src/Barcode.cpp @@ -18,130 +18,109 @@ #include -namespace MediaVision -{ -namespace Barcode -{ +namespace MediaVision { +namespace Barcode { Barcode::Barcode(const zbar::Symbol& barObj): - m_pBarcodeObj(new zbar::Symbol(barObj)) + m_pBarcodeObj(new zbar::Symbol(barObj)) { ; /* NULL */ } Barcode::~Barcode() { - LOGI("Delete ZBar object"); - delete m_pBarcodeObj; + LOGI("Delete ZBar object"); + delete m_pBarcodeObj; } std::string Barcode::getMessage(void) const { - LOGI("Retrieve message data from ZBar object"); - return m_pBarcodeObj->get_data(); + LOGI("Retrieve message data from ZBar object"); + return m_pBarcodeObj->get_data(); } mv_barcode_type_e Barcode::getType(void) const { - zbar::zbar_symbol_type_t barcodeType = m_pBarcodeObj->get_type(); - - switch (barcodeType) - { - case zbar::ZBAR_QRCODE: - return MV_BARCODE_QR; - - case zbar::ZBAR_UPCA: - return MV_BARCODE_UPC_A; - - case zbar::ZBAR_UPCE: - return MV_BARCODE_UPC_E; - - case zbar::ZBAR_EAN8: - return MV_BARCODE_EAN_8; - - case zbar::ZBAR_EAN13: - return MV_BARCODE_EAN_13; - - case zbar::ZBAR_CODE128: - return MV_BARCODE_CODE128; - - case zbar::ZBAR_CODE39: - return MV_BARCODE_CODE39; - - case zbar::ZBAR_I25: - return MV_BARCODE_I2_5; - - default: - LOGE("ZBar symbol colorspace is not supported by media vision"); - return MV_BARCODE_UNDEFINED; - } + zbar::zbar_symbol_type_t barcodeType = m_pBarcodeObj->get_type(); + + switch (barcodeType) { + case zbar::ZBAR_QRCODE: + return MV_BARCODE_QR; + case zbar::ZBAR_UPCA: + return MV_BARCODE_UPC_A; + case zbar::ZBAR_UPCE: + return MV_BARCODE_UPC_E; + case zbar::ZBAR_EAN8: + return MV_BARCODE_EAN_8; + case zbar::ZBAR_EAN13: + return MV_BARCODE_EAN_13; + case zbar::ZBAR_CODE128: + return MV_BARCODE_CODE128; + case zbar::ZBAR_CODE39: + return MV_BARCODE_CODE39; + case zbar::ZBAR_I25: + return MV_BARCODE_I2_5; + default: + LOGE("ZBar symbol colorspace is not supported by media vision"); + return MV_BARCODE_UNDEFINED; + } } int Barcode::calculateLocation(mv_quadrangle_s& location) const { - const int numberOfVertexes = 4; - - const int locationPolygonSize = m_pBarcodeObj->get_location_size(); - - //polygon location should contain at least 4 points - if (locationPolygonSize < numberOfVertexes) - { - LOGW("Can't compute location of the barcode by %i points (less then %i).", locationPolygonSize, numberOfVertexes); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - if (locationPolygonSize == numberOfVertexes) - { - for (int i = 0; i < numberOfVertexes; ++i) - { - location.points[i].x = m_pBarcodeObj->get_location_x(i); - location.points[i].y = m_pBarcodeObj->get_location_y(i); - } - - return MEDIA_VISION_ERROR_NONE; - } - - //bounding quadrangle is computing by 4 marginal points - mv_point_s first = {m_pBarcodeObj->get_location_x(0), m_pBarcodeObj->get_location_y(0)}; - - int minX = first.x; - int maxX = first.x; - int minY = first.y; - int maxY = first.y; - - for (int i = 0; i < locationPolygonSize; ++i) - { - mv_point_s current = {m_pBarcodeObj->get_location_x(i), m_pBarcodeObj->get_location_y(i)}; - if (current.x < minX) - { - minX = current.x; - } - else if (current.x > maxX) - { - maxX = current.x; - } - - if (current.y < minY) - { - minY = current.y; - } - else if (current.y > maxY) - { - maxY = current.y; - } - } - - mv_point_s bottomLeft = {minX, maxY}; - mv_point_s bottomRight = {maxX, maxY}; - mv_point_s topRight = {maxX, minY}; - mv_point_s topLeft = {minX, minY}; - - location.points[0] = topLeft; - location.points[1] = topRight; - location.points[2] = bottomRight; - location.points[3] = bottomLeft; - - return MEDIA_VISION_ERROR_NONE; + const int numberOfVertexes = 4; + + const int locationPolygonSize = m_pBarcodeObj->get_location_size(); + + /*polygon location should contain at least 4 points */ + if (locationPolygonSize < numberOfVertexes) { + LOGW("Can't compute location of the barcode by %i points (less then %i).", locationPolygonSize, numberOfVertexes); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + if (locationPolygonSize == numberOfVertexes) { + for (int i = 0; i < numberOfVertexes; ++i) { + location.points[i].x = m_pBarcodeObj->get_location_x(i); + location.points[i].y = m_pBarcodeObj->get_location_y(i); + } + + return MEDIA_VISION_ERROR_NONE; + } + + /* bounding quadrangle is computing by 4 marginal points */ + mv_point_s first = {m_pBarcodeObj->get_location_x(0), m_pBarcodeObj->get_location_y(0)}; + + int minX = first.x; + int maxX = first.x; + int minY = first.y; + int maxY = first.y; + + for (int i = 0; i < locationPolygonSize; ++i) { + mv_point_s current = {m_pBarcodeObj->get_location_x(i), m_pBarcodeObj->get_location_y(i)}; + if (current.x < minX) { + minX = current.x; + } else if (current.x > maxX) { + maxX = current.x; + } + + if (current.y < minY) { + minY = current.y; + } else if (current.y > maxY) { + maxY = current.y; + } + } + + mv_point_s bottomLeft = {minX, maxY}; + mv_point_s bottomRight = {maxX, maxY}; + mv_point_s topRight = {maxX, minY}; + mv_point_s topLeft = {minX, minY}; + + location.points[0] = topLeft; + location.points[1] = topRight; + location.points[2] = bottomRight; + location.points[3] = bottomLeft; + + return MEDIA_VISION_ERROR_NONE; } } /* Barcode */ diff --git a/mv_barcode/barcode_detector/src/BarcodeUtils.cpp b/mv_barcode/barcode_detector/src/BarcodeUtils.cpp index 57219eb..a9da923 100644 --- a/mv_barcode/barcode_detector/src/BarcodeUtils.cpp +++ b/mv_barcode/barcode_detector/src/BarcodeUtils.cpp @@ -21,92 +21,84 @@ #include -namespace MediaVision -{ -namespace Barcode -{ - +namespace MediaVision { +namespace Barcode { int convertSourceMV2Zbar(mv_source_h mvSource, zbar::Image& zbarSource) { - int err = MEDIA_VISION_ERROR_NONE; - unsigned char *buffer = NULL; - unsigned int height = 0; - unsigned int width = 0; - unsigned int size = 0; - mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; + int err = MEDIA_VISION_ERROR_NONE; + unsigned char *buffer = NULL; + unsigned int height = 0; + unsigned int width = 0; + unsigned int size = 0; + mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; - err = mv_source_get_colorspace_c(mvSource, &colorspace); - if (err != MEDIA_VISION_ERROR_NONE) - { - LOGW("Can't determine mv_source_h colorspace to convert to ZBar colorspace. Conversion failed"); - return err; - } + err = mv_source_get_colorspace_c(mvSource, &colorspace); + if (err != MEDIA_VISION_ERROR_NONE) { + LOGW("Can't determine mv_source_h colorspace to convert to ZBar colorspace. Conversion failed"); + return err; + } - switch(colorspace) - { - case MEDIA_VISION_COLORSPACE_Y800: - zbarSource.set_format("Y800"); - break; - case MEDIA_VISION_COLORSPACE_I420: - zbarSource.set_format("I420"); - break; - case MEDIA_VISION_COLORSPACE_NV12: - zbarSource.set_format("NV12"); - break; - case MEDIA_VISION_COLORSPACE_YV12: - zbarSource.set_format("YV12"); - break; - case MEDIA_VISION_COLORSPACE_NV21: - zbarSource.set_format("NV21"); - break; - case MEDIA_VISION_COLORSPACE_YUYV: - zbarSource.set_format("YUYV"); - break; - case MEDIA_VISION_COLORSPACE_UYVY: - zbarSource.set_format("UYVY"); - break; - case MEDIA_VISION_COLORSPACE_422P: - zbarSource.set_format("422P"); - break; - case MEDIA_VISION_COLORSPACE_RGB565: - zbarSource.set_format("RGBP"); - break; - case MEDIA_VISION_COLORSPACE_RGB888: - zbarSource.set_format("RGB3"); - break; - case MEDIA_VISION_COLORSPACE_RGBA: - zbarSource.set_format("RGB4"); - break; - default: - LOGE("Media vision colorspace is not supported by ZBar symbol"); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } + switch(colorspace) { + case MEDIA_VISION_COLORSPACE_Y800: + zbarSource.set_format("Y800"); + break; + case MEDIA_VISION_COLORSPACE_I420: + zbarSource.set_format("I420"); + break; + case MEDIA_VISION_COLORSPACE_NV12: + zbarSource.set_format("NV12"); + break; + case MEDIA_VISION_COLORSPACE_YV12: + zbarSource.set_format("YV12"); + break; + case MEDIA_VISION_COLORSPACE_NV21: + zbarSource.set_format("NV21"); + break; + case MEDIA_VISION_COLORSPACE_YUYV: + zbarSource.set_format("YUYV"); + break; + case MEDIA_VISION_COLORSPACE_UYVY: + zbarSource.set_format("UYVY"); + break; + case MEDIA_VISION_COLORSPACE_422P: + zbarSource.set_format("422P"); + break; + case MEDIA_VISION_COLORSPACE_RGB565: + zbarSource.set_format("RGBP"); + break; + case MEDIA_VISION_COLORSPACE_RGB888: + zbarSource.set_format("RGB3"); + break; + case MEDIA_VISION_COLORSPACE_RGBA: + zbarSource.set_format("RGB4"); + break; + default: + LOGE("Media vision colorspace is not supported by ZBar symbol"); + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } - err = mv_source_get_buffer_c(mvSource, &buffer, &size); - if (err != MEDIA_VISION_ERROR_NONE) - { - LOGW("Can't get mv_source_h buffer to convert to ZBar image. Conversion failed"); - return err; - } + err = mv_source_get_buffer_c(mvSource, &buffer, &size); + if (err != MEDIA_VISION_ERROR_NONE) { + LOGW("Can't get mv_source_h buffer to convert to ZBar image. Conversion failed"); + return err; + } - err = mv_source_get_height_c(mvSource, &height); - if (err != MEDIA_VISION_ERROR_NONE) - { - LOGW("Can't get mv_source_h height for conversion. Conversion failed"); - return err; - } + err = mv_source_get_height_c(mvSource, &height); + if (err != MEDIA_VISION_ERROR_NONE) { + LOGW("Can't get mv_source_h height for conversion. Conversion failed"); + return err; + } - err = mv_source_get_width_c(mvSource, &width); - if (err != MEDIA_VISION_ERROR_NONE) - { - LOGW("Can't get mv_source_h width for conversion. Conversion failed"); - return err; - } + err = mv_source_get_width_c(mvSource, &width); + if (err != MEDIA_VISION_ERROR_NONE) { + LOGW("Can't get mv_source_h width for conversion. Conversion failed"); + return err; + } - zbarSource.set_size(width, height); - zbarSource.set_data(buffer, size); + zbarSource.set_size(width, height); + zbarSource.set_data(buffer, size); - return err; + return err; } } /* Barcode */ diff --git a/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp b/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp index dd5e557..b2357f7 100644 --- a/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp +++ b/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp @@ -26,128 +26,117 @@ using namespace MediaVision::Barcode; int mv_barcode_detect_open( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s roi, - mv_barcode_detected_cb detect_cb, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s roi, + mv_barcode_detected_cb detect_cb, + void *user_data) { - if (!source || !detect_cb) - { - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - zbar::Image image; - int err = convertSourceMV2Zbar(source, image); - if (err != MEDIA_VISION_ERROR_NONE) - { - LOGW("convertSourceMV2Zbar failed"); - return err; - } - - zbar::Image greyImage = image.convert("Y800"); - greyImage.set_crop(roi.point.x, roi.point.y, roi.width, roi.height); - zbar::ImageScanner scanner; - - int target_val; - err = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_DETECT_ATTR_TARGET", &target_val); - if (err != MEDIA_VISION_ERROR_NONE) - { - LOGW("mv_engine_config_get_int_attribute failed"); - return err; - } - - /** - * 0 - linear barcodes and QR codes - * 1 - only linear barcodes - * 2 - only QR codes - */ - switch (target_val) - { - case 0: - scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 1); - break; - case 1: - scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 0); - scanner.set_config(zbar::ZBAR_UPCA, zbar::ZBAR_CFG_ENABLE, 1); - scanner.set_config(zbar::ZBAR_UPCE, zbar::ZBAR_CFG_ENABLE, 1); - scanner.set_config(zbar::ZBAR_EAN8, zbar::ZBAR_CFG_ENABLE, 1); - scanner.set_config(zbar::ZBAR_EAN13, zbar::ZBAR_CFG_ENABLE, 1); - scanner.set_config(zbar::ZBAR_CODE128, zbar::ZBAR_CFG_ENABLE, 1); - scanner.set_config(zbar::ZBAR_CODE39, zbar::ZBAR_CFG_ENABLE, 1); - scanner.set_config(zbar::ZBAR_I25, zbar::ZBAR_CFG_ENABLE, 1); - break; - case 2: - scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 0); - scanner.set_config(zbar::ZBAR_QRCODE, zbar::ZBAR_CFG_ENABLE, 1); - break; - default: - LOGW("Unavailabe target value %d", target_val); - } - - int numberOfBarcodes = scanner.scan(greyImage); - LOGI("ZBar scanner has found %i barcodes on the mv_source_h", numberOfBarcodes); - mv_quadrangle_s *barcodeLocations = NULL; - mv_barcode_type_e *types = NULL; - - if (numberOfBarcodes == 0) - { - LOGI("Call the detect callback for 0 detected barcodes"); - detect_cb(source, engine_cfg, barcodeLocations, NULL, types, numberOfBarcodes, user_data); - return MEDIA_VISION_ERROR_NONE; - } - else if (numberOfBarcodes < 0) - { - LOGW("Incorrect number of barcodes (%i), detection is terminated", numberOfBarcodes); - return MEDIA_VISION_ERROR_INTERNAL; - } - - const char **messagesArray = new const char*[numberOfBarcodes]; - barcodeLocations = new mv_quadrangle_s[numberOfBarcodes]; - types = new mv_barcode_type_e[numberOfBarcodes]; - - int i = 0; - //extract results and prepare them for callback passing - for (zbar::SymbolIterator symbol = greyImage.symbol_begin(); - symbol != greyImage.symbol_end(); - ++symbol, ++i) - { - Barcode curBarcode(*symbol); - - size_t messageLength = curBarcode.getMessage().size(); - char *curMessage = new char[messageLength + 1]; - curBarcode.getMessage().copy(curMessage, messageLength); - curMessage[messageLength] = '\0'; - messagesArray[i] = curMessage; - - types[i] = curBarcode.getType(); - - int err = curBarcode.calculateLocation(barcodeLocations[i]); - if (err != MEDIA_VISION_ERROR_NONE) - { - LOGW("Can't determine location for barcode, detection is terminated"); - for (int j = 0; j <= i; ++j) - { - delete[] messagesArray[j]; - } - delete[] messagesArray; - delete[] barcodeLocations; - delete[] types; - return err; - } - } - - LOGI("Call the detect callback for %i detected barcodes", numberOfBarcodes); - detect_cb(source, engine_cfg, barcodeLocations, messagesArray, types, numberOfBarcodes, user_data); - - LOGI("Clean the memory from barcodes messages, locations and types"); - for (int j = 0; j < numberOfBarcodes; ++j) - { - delete[] messagesArray[j]; - } - delete[] messagesArray; - delete[] barcodeLocations; - delete[] types; - - return MEDIA_VISION_ERROR_NONE; + if (!source || !detect_cb) { + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + zbar::Image image; + int err = convertSourceMV2Zbar(source, image); + if (err != MEDIA_VISION_ERROR_NONE) { + LOGW("convertSourceMV2Zbar failed"); + return err; + } + + zbar::Image greyImage = image.convert("Y800"); + greyImage.set_crop(roi.point.x, roi.point.y, roi.width, roi.height); + zbar::ImageScanner scanner; + + int target_val; + err = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_DETECT_ATTR_TARGET", &target_val); + if (err != MEDIA_VISION_ERROR_NONE) { + LOGW("mv_engine_config_get_int_attribute failed"); + return err; + } + + /** + * 0 - linear barcodes and QR codes + * 1 - only linear barcodes + * 2 - only QR codes + */ + switch (target_val) { + case 0: + scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 1); + break; + case 1: + scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 0); + scanner.set_config(zbar::ZBAR_UPCA, zbar::ZBAR_CFG_ENABLE, 1); + scanner.set_config(zbar::ZBAR_UPCE, zbar::ZBAR_CFG_ENABLE, 1); + scanner.set_config(zbar::ZBAR_EAN8, zbar::ZBAR_CFG_ENABLE, 1); + scanner.set_config(zbar::ZBAR_EAN13, zbar::ZBAR_CFG_ENABLE, 1); + scanner.set_config(zbar::ZBAR_CODE128, zbar::ZBAR_CFG_ENABLE, 1); + scanner.set_config(zbar::ZBAR_CODE39, zbar::ZBAR_CFG_ENABLE, 1); + scanner.set_config(zbar::ZBAR_I25, zbar::ZBAR_CFG_ENABLE, 1); + break; + case 2: + scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 0); + scanner.set_config(zbar::ZBAR_QRCODE, zbar::ZBAR_CFG_ENABLE, 1); + break; + default: + LOGW("Unavailabe target value %d", target_val); + } + + int numberOfBarcodes = scanner.scan(greyImage); + LOGI("ZBar scanner has found %i barcodes on the mv_source_h", numberOfBarcodes); + mv_quadrangle_s *barcodeLocations = NULL; + mv_barcode_type_e *types = NULL; + + if (numberOfBarcodes == 0) { + LOGI("Call the detect callback for 0 detected barcodes"); + detect_cb(source, engine_cfg, barcodeLocations, NULL, types, numberOfBarcodes, user_data); + return MEDIA_VISION_ERROR_NONE; + } else if (numberOfBarcodes < 0) { + LOGW("Incorrect number of barcodes (%i), detection is terminated", numberOfBarcodes); + return MEDIA_VISION_ERROR_INTERNAL; + } + + const char **messagesArray = new const char*[numberOfBarcodes]; + barcodeLocations = new mv_quadrangle_s[numberOfBarcodes]; + types = new mv_barcode_type_e[numberOfBarcodes]; + + int i = 0; + /* extract results and prepare them for callback passing */ + for (zbar::SymbolIterator symbol = greyImage.symbol_begin(); + symbol != greyImage.symbol_end(); + ++symbol, ++i) { + Barcode curBarcode(*symbol); + + size_t messageLength = curBarcode.getMessage().size(); + char *curMessage = new char[messageLength + 1]; + curBarcode.getMessage().copy(curMessage, messageLength); + curMessage[messageLength] = '\0'; + messagesArray[i] = curMessage; + + types[i] = curBarcode.getType(); + + int err = curBarcode.calculateLocation(barcodeLocations[i]); + if (err != MEDIA_VISION_ERROR_NONE) { + LOGW("Can't determine location for barcode, detection is terminated"); + for (int j = 0; j <= i; ++j) { + delete[] messagesArray[j]; + } + delete[] messagesArray; + delete[] barcodeLocations; + delete[] types; + return err; + } + } + + LOGI("Call the detect callback for %i detected barcodes", numberOfBarcodes); + detect_cb(source, engine_cfg, barcodeLocations, messagesArray, types, numberOfBarcodes, user_data); + + LOGI("Clean the memory from barcodes messages, locations and types"); + for (int j = 0; j < numberOfBarcodes; ++j) { + delete[] messagesArray[j]; + } + delete[] messagesArray; + delete[] barcodeLocations; + delete[] types; + + return MEDIA_VISION_ERROR_NONE; } diff --git a/mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h b/mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h index b119723..40d7163 100644 --- a/mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h +++ b/mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h @@ -50,11 +50,11 @@ extern "C" { * @see mv_barcode_detected_cb() */ int mv_barcode_detect_lic( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s roi, - mv_barcode_detected_cb detect_cb, - void *user_data); + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s roi, + mv_barcode_detected_cb detect_cb, + void *user_data); #ifdef __cplusplus } diff --git a/mv_barcode/barcode_detector_lic/src/mv_barcode_detect_lic.c b/mv_barcode/barcode_detector_lic/src/mv_barcode_detect_lic.c index 5dc9fc0..e54a69e 100644 --- a/mv_barcode/barcode_detector_lic/src/mv_barcode_detect_lic.c +++ b/mv_barcode/barcode_detector_lic/src/mv_barcode_detect_lic.c @@ -17,11 +17,11 @@ #include "mv_barcode_detect_lic.h" int mv_barcode_detect_lic( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s roi, - mv_barcode_detected_cb detect_cb, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s roi, + mv_barcode_detected_cb detect_cb, + void *user_data) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } diff --git a/mv_barcode/barcode_generator/include/BarcodeGenerator.h b/mv_barcode/barcode_generator/include/BarcodeGenerator.h index 40659ae..9fb0104 100644 --- a/mv_barcode/barcode_generator/include/BarcodeGenerator.h +++ b/mv_barcode/barcode_generator/include/BarcodeGenerator.h @@ -26,78 +26,73 @@ * @brief This file contains the BarcodeGenerator class. */ -namespace MediaVision -{ -namespace Barcode -{ - +namespace MediaVision { +namespace Barcode { /** * @brief This class implements barcode generation. * @details 1D Barcodes and 2D QR codes are supported. * * @since_tizen 2.4 */ -class BarcodeGenerator -{ +class BarcodeGenerator { public: + /** + * @brief This method generates Barcodes image according to options. + * + * @since_tizen 2.4 + * @param [in] imageFileName Image file name which will be generated + * @param [in] imageFormat Image file format which will be generated + * @param [in] imageWidth Image file width which will be generated + * @param [in] imageHeight Image file height which will be generated + * @param [in] message Input message to be encoded + * @param [in] type Barcode type (1D barcode or 2D QR code) + * @param [in] encodingMode Encoding mode (for QR codes only) + * @param [in] correctionLevel Error correction level (for QR codes only) + * @param [in] qrVersion QR code version (1 ~ 40, 0 for 1D barcodes) + * @param [in] showText Show text or not + * @return BARCODE_ERROR_NONE from BarcodeError which is 0 if success, + * BarcodeError value otherwise + */ + static int generateBarcodeToImage( + const std::string& imageFileName, + BarcodeImageFormat imageFormat, + const int imageWidth, + const int imageHeight, + const std::string& message, + BarcodeType type, + BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_UNAVAILABLE, + BarcodeQRErrorCorrectionLevel correctionLevel = BARCODE_QR_ECC_UNAVAILABLE, + int qrVersion = 0, + int showText = 0); - /** - * @brief This method generates Barcodes image according to options. - * - * @since_tizen 2.4 - * @param [in] imageFileName Image file name which will be generated - * @param [in] imageFormat Image file format which will be generated - * @param [in] imageWidth Image file width which will be generated - * @param [in] imageHeight Image file height which will be generated - * @param [in] message Input message to be encoded - * @param [in] type Barcode type (1D barcode or 2D QR code) - * @param [in] encodingMode Encoding mode (for QR codes only) - * @param [in] correctionLevel Error correction level (for QR codes only) - * @param [in] qrVersion QR code version (1 ~ 40, 0 for 1D barcodes) - * @param [in] showText Show text or not - * @return BARCODE_ERROR_NONE from BarcodeError which is 0 if success, - * BarcodeError value otherwise - */ - static int generateBarcodeToImage( - const std::string& imageFileName, - BarcodeImageFormat imageFormat, - const int imageWidth, - const int imageHeight, - const std::string& message, - BarcodeType type, - BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_UNAVAILABLE, - BarcodeQRErrorCorrectionLevel correctionLevel = BARCODE_QR_ECC_UNAVAILABLE, - int qrVersion = 0, - int showText = 0); - - /** - * @brief This method generates Barcodes image buffer according to options. - * - * @since_tizen 2.4 - * @param [out] imageBuffer Image buffer with image to be generated - * @param [out] imageWidth Image buffer width which will be generated - * @param [out] imageHeight Image buffer height which will be generated - * @param [out] imageChannels Image buffer channels number which will be generated - * @param [in] message Input message to be encoded - * @param [in] type Barcode type (1D barcode or 2D QR code) - * @param [in] encodingMode Encoding mode (for QR codes only) - * @param [in] correctionLevel Error correction level (for QR codes only) - * @param [in] qrVersion QR code version (1 ~ 40, 0 for 1D barcodes) - * @param [in] showText Show text or not - * @return BARCODE_ERROR_NONE from BarcodeError which is 0 if success, - * BarcodeError value otherwise - */ - static int generateBarcodeToBuffer( - unsigned char **imageBuffer, - unsigned int *imageWidth, - unsigned int *imageHeight, - unsigned int *imageChannels, - const std::string& message, - BarcodeType type, - BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_UNAVAILABLE, - BarcodeQRErrorCorrectionLevel correctionLevel = BARCODE_QR_ECC_UNAVAILABLE, - int qrVersion = 0, - int showText = 0); + /** + * @brief This method generates Barcodes image buffer according to options. + * + * @since_tizen 2.4 + * @param [out] imageBuffer Image buffer with image to be generated + * @param [out] imageWidth Image buffer width which will be generated + * @param [out] imageHeight Image buffer height which will be generated + * @param [out] imageChannels Image buffer channels number which will be generated + * @param [in] message Input message to be encoded + * @param [in] type Barcode type (1D barcode or 2D QR code) + * @param [in] encodingMode Encoding mode (for QR codes only) + * @param [in] correctionLevel Error correction level (for QR codes only) + * @param [in] qrVersion QR code version (1 ~ 40, 0 for 1D barcodes) + * @param [in] showText Show text or not + * @return BARCODE_ERROR_NONE from BarcodeError which is 0 if success, + * BarcodeError value otherwise + */ + static int generateBarcodeToBuffer( + unsigned char **imageBuffer, + unsigned int *imageWidth, + unsigned int *imageHeight, + unsigned int *imageChannels, + const std::string& message, + BarcodeType type, + BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_UNAVAILABLE, + BarcodeQRErrorCorrectionLevel correctionLevel = BARCODE_QR_ECC_UNAVAILABLE, + int qrVersion = 0, + int showText = 0); }; } /* Barcode */ diff --git a/mv_barcode/barcode_generator/include/BarcodeOptions.h b/mv_barcode/barcode_generator/include/BarcodeOptions.h index c643550..2ebf402 100644 --- a/mv_barcode/barcode_generator/include/BarcodeOptions.h +++ b/mv_barcode/barcode_generator/include/BarcodeOptions.h @@ -22,26 +22,22 @@ * @brief This file contains the Barcode options. */ -namespace MediaVision -{ -namespace Barcode -{ - +namespace MediaVision { +namespace Barcode { /** * @brief The Barcode type enumeration. * * @since_tizen 2.4 */ -enum BarcodeType -{ - BARCODE_QR = 58, - BARCODE_UPCA = 34, - BARCODE_UPCE = 37, - BARCODE_EAN8 = 13, - BARCODE_EAN13 = BARCODE_EAN8, - BARCODE_CODE39 = 8, - BARCODE_CODE128 = 20, - BARCODE_INTERLEAVE_2_5 = 3 +enum BarcodeType { + BARCODE_QR = 58, + BARCODE_UPCA = 34, + BARCODE_UPCE = 37, + BARCODE_EAN8 = 13, + BARCODE_EAN13 = BARCODE_EAN8, + BARCODE_CODE39 = 8, + BARCODE_CODE128 = 20, + BARCODE_INTERLEAVE_2_5 = 3 }; /** @@ -50,30 +46,27 @@ enum BarcodeType * @since_tizen 2.4 * @remarks This is unavailable for 1D barcodes. */ -enum BarcodeQRErrorCorrectionLevel -{ - BARCODE_QR_ECC_UNAVAILABLE = 0, - BARCODE_QR_ECC_LOW = 1, - BARCODE_QR_ECC_MEDIUM = 2, - BARCODE_QR_ECC_QUARTILE = 3, - BARCODE_QR_ECC_HIGH = 4 +enum BarcodeQRErrorCorrectionLevel { + BARCODE_QR_ECC_UNAVAILABLE = 0, + BARCODE_QR_ECC_LOW = 1, + BARCODE_QR_ECC_MEDIUM = 2, + BARCODE_QR_ECC_QUARTILE = 3, + BARCODE_QR_ECC_HIGH = 4 }; - /** * @brief The Barcode encoding mode enumeration. * * @since_tizen 2.4 * @remarks This is unavailable for 1D barcodes. */ -enum BarcodeQREncodingMode -{ - BARCODE_QR_MODE_NUMERIC = 1, - BARCODE_QR_MODE_ALPHANUMERIC = 1, - BARCODE_QR_MODE_BYTE = 0, - BARCODE_QR_MODE_UTF8 = 1, - BARCODE_QR_MODE_KANJI = 3, - BARCODE_QR_MODE_UNAVAILABLE +enum BarcodeQREncodingMode { + BARCODE_QR_MODE_NUMERIC = 1, + BARCODE_QR_MODE_ALPHANUMERIC = 1, + BARCODE_QR_MODE_BYTE = 0, + BARCODE_QR_MODE_UTF8 = 1, + BARCODE_QR_MODE_KANJI = 3, + BARCODE_QR_MODE_UNAVAILABLE }; /** @@ -81,11 +74,10 @@ enum BarcodeQREncodingMode * * @since_tizen 2.4 */ -enum BarcodeImageFormat -{ - BARCODE_IMAGE_JPG, - BARCODE_IMAGE_PNG, - BARCODE_IMAGE_BMP +enum BarcodeImageFormat { + BARCODE_IMAGE_JPG, + BARCODE_IMAGE_PNG, + BARCODE_IMAGE_BMP }; /** @@ -93,8 +85,7 @@ enum BarcodeImageFormat * * @since_tizen 2.4 */ -enum BarcodeGenTextOpt -{ +enum BarcodeGenTextOpt { BARCODE_GEN_TEXT_INVISIBLE, BARCODE_GEN_TEXT_VISIBLE }; @@ -104,18 +95,17 @@ enum BarcodeGenTextOpt * * @since_tizen 2.4 */ -enum BarcodeError -{ - BARCODE_ERROR_NONE = 0, - BARCODE_WARNING_INVALID_OPTION = 2, - BARCODE_ERROR_TOO_LONG = 5, - BARCODE_ERROR_INVALID_DATA = 6, - BARCODE_ERROR_INVALID_CHECK = 7, - BARCODE_ERROR_INVALID_OPTION = 8, - BARCODE_ERROR_ENCODING_PROBLEM = 9, - BARCODE_ERROR_FILE_ACCESS = 10, - BARCODE_ERROR_MEMORY = 11, - BARCODE_ERROR_INVALID_PATH =12, +enum BarcodeError { + BARCODE_ERROR_NONE = 0, + BARCODE_WARNING_INVALID_OPTION = 2, + BARCODE_ERROR_TOO_LONG = 5, + BARCODE_ERROR_INVALID_DATA = 6, + BARCODE_ERROR_INVALID_CHECK = 7, + BARCODE_ERROR_INVALID_OPTION = 8, + BARCODE_ERROR_ENCODING_PROBLEM = 9, + BARCODE_ERROR_FILE_ACCESS = 10, + BARCODE_ERROR_MEMORY = 11, + BARCODE_ERROR_INVALID_PATH = 12, }; } /* Barcode */ diff --git a/mv_barcode/barcode_generator/include/mv_barcode_generate_open.h b/mv_barcode/barcode_generator/include/mv_barcode_generate_open.h index d3134ac..bb1e8b6 100644 --- a/mv_barcode/barcode_generator/include/mv_barcode_generate_open.h +++ b/mv_barcode/barcode_generator/include/mv_barcode_generate_open.h @@ -55,12 +55,12 @@ extern "C" { * @see mv_barcode_generate_image_open() */ int mv_barcode_generate_source_open(mv_engine_config_h engine_cfg, - const char *message, - mv_barcode_type_e type, - mv_barcode_qr_mode_e qr_enc_mode, - mv_barcode_qr_ecc_e qr_ecc, - int qr_version, - mv_source_h image); + const char *message, + mv_barcode_type_e type, + mv_barcode_qr_mode_e qr_enc_mode, + mv_barcode_qr_ecc_e qr_ecc, + int qr_version, + mv_source_h image); /** * @brief Generates image file with barcode. @@ -91,16 +91,16 @@ int mv_barcode_generate_source_open(mv_engine_config_h engine_cfg, * @see mv_barcode_generate_source_open() */ int mv_barcode_generate_image_open( - mv_engine_config_h engine_cfg, - const char *message, - int image_width, - int image_height, - mv_barcode_type_e type, - mv_barcode_qr_mode_e qr_enc_mode, - mv_barcode_qr_ecc_e qr_ecc, - int qr_version, - const char *image_path, - mv_barcode_image_format_e image_format); + mv_engine_config_h engine_cfg, + const char *message, + int image_width, + int image_height, + mv_barcode_type_e type, + mv_barcode_qr_mode_e qr_enc_mode, + mv_barcode_qr_ecc_e qr_ecc, + int qr_version, + const char *image_path, + mv_barcode_image_format_e image_format); #ifdef __cplusplus } diff --git a/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp b/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp index 624a3f5..522f65f 100644 --- a/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp +++ b/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp @@ -28,277 +28,256 @@ #include #include -namespace MediaVision -{ -namespace Barcode -{ - -namespace -{ +namespace MediaVision { +namespace Barcode { +namespace { int getFormatEncodingInfo( - BarcodeImageFormat imageFormat, - std::vector& extensions, - std::vector& compressionParams) + BarcodeImageFormat imageFormat, + std::vector& extensions, + std::vector& compressionParams) { - static const int PNGCompressionLevel = 3; - - compressionParams.clear(); - extensions.clear(); - - switch (imageFormat) - { - case BARCODE_IMAGE_PNG: - compressionParams.push_back(CV_IMWRITE_PNG_COMPRESSION); - compressionParams.push_back(PNGCompressionLevel); - extensions.push_back(".png"); - break; - case BARCODE_IMAGE_JPG: - extensions.push_back(".jpg"); - extensions.push_back(".jpeg"); - extensions.push_back(".jpe"); - break; - case BARCODE_IMAGE_BMP: - extensions.push_back(".bmp"); - extensions.push_back(".dib"); - break; - default: - return BARCODE_ERROR_INVALID_OPTION; - } - return BARCODE_ERROR_NONE; + static const int PNGCompressionLevel = 3; + + compressionParams.clear(); + extensions.clear(); + + switch (imageFormat) { + case BARCODE_IMAGE_PNG: + compressionParams.push_back(CV_IMWRITE_PNG_COMPRESSION); + compressionParams.push_back(PNGCompressionLevel); + extensions.push_back(".png"); + break; + case BARCODE_IMAGE_JPG: + extensions.push_back(".jpg"); + extensions.push_back(".jpeg"); + extensions.push_back(".jpe"); + break; + case BARCODE_IMAGE_BMP: + extensions.push_back(".bmp"); + extensions.push_back(".dib"); + break; + default: + return BARCODE_ERROR_INVALID_OPTION; + } + return BARCODE_ERROR_NONE; } int createBarcode( - const std::string& message, - BarcodeType type, - BarcodeQREncodingMode encodingMode, - BarcodeQRErrorCorrectionLevel correctionLevel, - int qrVersion, - int showText, - zint_symbol *symbol) + const std::string& message, + BarcodeType type, + BarcodeQREncodingMode encodingMode, + BarcodeQRErrorCorrectionLevel correctionLevel, + int qrVersion, + int showText, + zint_symbol *symbol) { - // set input values - symbol->symbology = type; - symbol->input_mode = encodingMode; - symbol->option_1 = correctionLevel; - symbol->option_2 = qrVersion; - symbol->scale = 1; - symbol->show_hrt = showText; - - // set default values - std::strncpy(symbol->fgcolour, "000000", 10); - std::strncpy(symbol->bgcolour, "ffffff", 10); - symbol->border_width = 1; - symbol->height = 50; - - if (type == BARCODE_QR) { - symbol->whitespace_width = 0; - } else { - symbol->whitespace_width = 10; - } - - // create barcode - const int rotationAngle = 0; - int error = ZBarcode_Encode_and_Buffer( - symbol, - (unsigned char*)(message.c_str()), - message.length(), - rotationAngle); - - return error; + /* set input values */ + symbol->symbology = type; + symbol->input_mode = encodingMode; + symbol->option_1 = correctionLevel; + symbol->option_2 = qrVersion; + symbol->scale = 1; + symbol->show_hrt = showText; + + /* set default values */ + std::strncpy(symbol->fgcolour, "000000", 10); + std::strncpy(symbol->bgcolour, "ffffff", 10); + symbol->border_width = 1; + symbol->height = 50; + + if (type == BARCODE_QR) { + symbol->whitespace_width = 0; + } else { + symbol->whitespace_width = 10; + } + + /* create barcode */ + const int rotationAngle = 0; + int error = ZBarcode_Encode_and_Buffer( + symbol, + (unsigned char*)(message.c_str()), + message.length(), + rotationAngle); + + return error; } int writeBufferToImageFile( - zint_symbol *symbol, - const std::string& imageFileName, - BarcodeImageFormat imageFormat, - const int imageWidth, - const int imageHeight) + zint_symbol *symbol, + const std::string& imageFileName, + BarcodeImageFormat imageFormat, + const int imageWidth, + const int imageHeight) { - if (imageWidth <= 0 || imageHeight <= 0) - { - LOGE("Barcode image size is invalid: %i x %i. Terminate write to " - "the image operation", imageWidth, imageHeight); - return BARCODE_ERROR_INVALID_DATA; - } - - /* find directory */ - std::string prefix_imageFileName = imageFileName.substr(0, imageFileName.find_last_of('/')); - LOGD("prefix_path: %s", prefix_imageFileName.c_str()); - - /* check the directory is available */ - if (access(prefix_imageFileName.c_str(),F_OK)) - { - LOGE("Can't save barcode image to the path. The path[%s] doesn't existed.", prefix_imageFileName.c_str()); - return BARCODE_ERROR_INVALID_PATH; - } - - // check current extension - std::vector expectedExtensions; - std::vector compressionParams; - - int error = getFormatEncodingInfo(imageFormat, - expectedExtensions, compressionParams); - - if (BARCODE_ERROR_NONE != error || expectedExtensions.empty()) - { - LOGE("Image format is incorrectly specified or not supported"); - return error; - } - - bool rightExtensionFlag = false; - - std::string resultFilePath(imageFileName); - - for (size_t extNum = 0; extNum < expectedExtensions.size(); ++extNum) - { - if (resultFilePath.size() >= expectedExtensions[extNum].size()) - { - std::string givenExtension = resultFilePath.substr( - resultFilePath.length() - expectedExtensions[extNum].size(), - expectedExtensions[extNum].size()); - - std::transform( - givenExtension.begin(), givenExtension.end(), - givenExtension.begin(), ::tolower); - - if (givenExtension == expectedExtensions[extNum]) - { - rightExtensionFlag = true; - break; - } - } - } - - if (!rightExtensionFlag) - { - resultFilePath += expectedExtensions[0]; - } - - cv::Mat image(symbol->bitmap_height, symbol->bitmap_width, CV_8UC3, symbol->bitmap); - cv::resize(image, image, cv::Size(imageWidth, imageHeight), 0, 0, cv::INTER_AREA); - - error = cv::imwrite(resultFilePath, image, compressionParams) ? - BARCODE_ERROR_NONE : BARCODE_ERROR_INVALID_DATA; - - if (BARCODE_ERROR_NONE != error) - { - LOGE("Write barcode image to file %s operation failed.", - resultFilePath.c_str()); - return error; - } - - return error; + if (imageWidth <= 0 || imageHeight <= 0) { + LOGE("Barcode image size is invalid: %i x %i. Terminate write to " + "the image operation", imageWidth, imageHeight); + return BARCODE_ERROR_INVALID_DATA; + } + + /* find directory */ + std::string prefix_imageFileName = imageFileName.substr(0, imageFileName.find_last_of('/')); + LOGD("prefix_path: %s", prefix_imageFileName.c_str()); + + /* check the directory is available */ + if (access(prefix_imageFileName.c_str(), F_OK)) { + LOGE("Can't save barcode image to the path. The path[%s] doesn't existed.", prefix_imageFileName.c_str()); + return BARCODE_ERROR_INVALID_PATH; + } + + /* check current extension */ + std::vector expectedExtensions; + std::vector compressionParams; + + int error = getFormatEncodingInfo(imageFormat, + expectedExtensions, compressionParams); + + if (BARCODE_ERROR_NONE != error || expectedExtensions.empty()) { + LOGE("Image format is incorrectly specified or not supported"); + return error; + } + + bool rightExtensionFlag = false; + + std::string resultFilePath(imageFileName); + + for (size_t extNum = 0; extNum < expectedExtensions.size(); ++extNum) { + if (resultFilePath.size() >= expectedExtensions[extNum].size()) { + std::string givenExtension = resultFilePath.substr( + resultFilePath.length() - expectedExtensions[extNum].size(), + expectedExtensions[extNum].size()); + + std::transform( + givenExtension.begin(), givenExtension.end(), + givenExtension.begin(), ::tolower); + + if (givenExtension == expectedExtensions[extNum]) { + rightExtensionFlag = true; + break; + } + } + } + + if (!rightExtensionFlag) { + resultFilePath += expectedExtensions[0]; + } + + cv::Mat image(symbol->bitmap_height, symbol->bitmap_width, CV_8UC3, symbol->bitmap); + cv::resize(image, image, cv::Size(imageWidth, imageHeight), 0, 0, cv::INTER_AREA); + + error = cv::imwrite(resultFilePath, image, compressionParams) ? + BARCODE_ERROR_NONE : BARCODE_ERROR_INVALID_DATA; + + if (BARCODE_ERROR_NONE != error) { + LOGE("Write barcode image to file %s operation failed.", + resultFilePath.c_str()); + return error; + } + + return error; } } /* anonymous namespace */ int BarcodeGenerator::generateBarcodeToImage( - const std::string& imageFileName, - BarcodeImageFormat imageFormat, - const int imageWidth, - const int imageHeight, - const std::string& message, - BarcodeType type, - BarcodeQREncodingMode encodingMode, - BarcodeQRErrorCorrectionLevel correctionLevel, - int qrVersion, - int showText) + const std::string& imageFileName, + BarcodeImageFormat imageFormat, + const int imageWidth, + const int imageHeight, + const std::string& message, + BarcodeType type, + BarcodeQREncodingMode encodingMode, + BarcodeQRErrorCorrectionLevel correctionLevel, + int qrVersion, + int showText) { - zint_symbol *symbol = ZBarcode_Create(); - - if(symbol == NULL) - { - LOGE("ZBarcode creation failed"); - - return BARCODE_ERROR_ENCODING_PROBLEM; - } - - int error = createBarcode( - message, - type, - encodingMode, - correctionLevel, - qrVersion, - showText, - symbol); - - if (error != BARCODE_ERROR_NONE) - { - LOGE("Barcode creation failed, clean memory"); - ZBarcode_Delete(symbol); - return error; - } - - error = writeBufferToImageFile( - symbol, - imageFileName, - imageFormat, - imageWidth, - imageHeight); - if (error != BARCODE_ERROR_NONE) - { - LOGE("Barcode [%s] file write fail, clean memory", imageFileName.c_str()); - } - else - { - LOGI("Barcode image [%s] is successfully generated, clean memory", imageFileName.c_str()); - } - - ZBarcode_Delete(symbol); - - return error; + zint_symbol *symbol = ZBarcode_Create(); + + if(symbol == NULL) { + LOGE("ZBarcode creation failed"); + return BARCODE_ERROR_ENCODING_PROBLEM; + } + + int error = createBarcode( + message, + type, + encodingMode, + correctionLevel, + qrVersion, + showText, + symbol); + + if (error != BARCODE_ERROR_NONE) { + LOGE("Barcode creation failed, clean memory"); + ZBarcode_Delete(symbol); + return error; + } + + error = writeBufferToImageFile( + symbol, + imageFileName, + imageFormat, + imageWidth, + imageHeight); + if (error != BARCODE_ERROR_NONE) { + LOGE("Barcode [%s] file write fail, clean memory", imageFileName.c_str()); + } else { + LOGI("Barcode image [%s] is successfully generated, clean memory", imageFileName.c_str()); + } + + ZBarcode_Delete(symbol); + + return error; } int BarcodeGenerator::generateBarcodeToBuffer( - unsigned char **imageBuffer, - unsigned int *imageWidth, - unsigned int *imageHeight, - unsigned int *imageChannels, - const std::string& message, - BarcodeType type, - BarcodeQREncodingMode encodingMode, - BarcodeQRErrorCorrectionLevel correctionLevel, - int qrVersion, - int showText) + unsigned char **imageBuffer, + unsigned int *imageWidth, + unsigned int *imageHeight, + unsigned int *imageChannels, + const std::string& message, + BarcodeType type, + BarcodeQREncodingMode encodingMode, + BarcodeQRErrorCorrectionLevel correctionLevel, + int qrVersion, + int showText) { - zint_symbol *symbol = ZBarcode_Create(); - - if(symbol == NULL) - { - LOGE("ZBarcode creation failed"); - - return BARCODE_ERROR_ENCODING_PROBLEM; - } - - int error = createBarcode( - message, - type, - encodingMode, - correctionLevel, - qrVersion, - showText, - symbol); - - if (error != BARCODE_ERROR_NONE) - { - LOGE("Barcode creation failed, clean memory"); - ZBarcode_Delete(symbol); - return error; - } - - // fill output buffer - *imageWidth = symbol->bitmap_width; - *imageHeight = symbol->bitmap_height; - *imageChannels = 3; - const unsigned int imageBufferSize = (*imageWidth) * (*imageHeight) * (*imageChannels); - *imageBuffer = new unsigned char [imageBufferSize]; - memmove(*imageBuffer, symbol->bitmap, imageBufferSize); - - LOGI("Barcode buffer has been successfully generated, clean memory"); - ZBarcode_Delete(symbol); - - return BARCODE_ERROR_NONE; + zint_symbol *symbol = ZBarcode_Create(); + + if(symbol == NULL) { + LOGE("ZBarcode creation failed"); + + return BARCODE_ERROR_ENCODING_PROBLEM; + } + + int error = createBarcode( + message, + type, + encodingMode, + correctionLevel, + qrVersion, + showText, + symbol); + + if (error != BARCODE_ERROR_NONE) { + LOGE("Barcode creation failed, clean memory"); + ZBarcode_Delete(symbol); + return error; + } + + /* fill output buffer */ + *imageWidth = symbol->bitmap_width; + *imageHeight = symbol->bitmap_height; + *imageChannels = 3; + const unsigned int imageBufferSize = (*imageWidth) * (*imageHeight) * (*imageChannels); + *imageBuffer = new unsigned char[imageBufferSize]; + memmove(*imageBuffer, symbol->bitmap, imageBufferSize); + + LOGI("Barcode buffer has been successfully generated, clean memory"); + ZBarcode_Delete(symbol); + + return BARCODE_ERROR_NONE; } } /* Barcode */ diff --git a/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp b/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp index 19657d3..44c68b2 100644 --- a/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp +++ b/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp @@ -26,353 +26,326 @@ using namespace MediaVision::Barcode; -namespace -{ - +namespace { int alphanumToUpper(std::string& strToTransform) { - std::string tempString = strToTransform; - std::transform(tempString.begin(), tempString.end(), - tempString.begin(), ::toupper); - - if (std::string::npos != tempString.find_first_not_of("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:")) - { - LOGE("Barcode message can't be converted according to support " - "alphanumeric (0..9, A..Z, space, $, %, *, +, -, ., /, :) " - "mode: %s", strToTransform.c_str()); - return BARCODE_ERROR_INVALID_DATA; - } - - LOGI("Barcode message was converted according to support alphanumeric " - "mode: %s -> %s", strToTransform.c_str(), tempString.c_str()); - strToTransform = tempString; - return BARCODE_ERROR_NONE; + std::string tempString = strToTransform; + std::transform(tempString.begin(), tempString.end(), + tempString.begin(), ::toupper); + + if (std::string::npos != tempString.find_first_not_of("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:")) { + LOGE("Barcode message can't be converted according to support " + "alphanumeric (0..9, A..Z, space, $, %, *, +, -, ., /, :) " + "mode: %s", strToTransform.c_str()); + return BARCODE_ERROR_INVALID_DATA; + } + + LOGI("Barcode message was converted according to support alphanumeric " + "mode: %s -> %s", strToTransform.c_str(), tempString.c_str()); + strToTransform = tempString; + return BARCODE_ERROR_NONE; } BarcodeType convertBarcodeType(mv_barcode_type_e type) { - BarcodeType barcodeType = BARCODE_QR; - switch (type) - { - case MV_BARCODE_UPC_A: - barcodeType = BARCODE_UPCA; - break; - case MV_BARCODE_UPC_E: - barcodeType = BARCODE_UPCE; - break; - case MV_BARCODE_EAN_8: - barcodeType = BARCODE_EAN8; - break; - case MV_BARCODE_EAN_13: - barcodeType = BARCODE_EAN13; - break; - case MV_BARCODE_CODE128: - barcodeType = BARCODE_CODE128; - break; - case MV_BARCODE_CODE39: - barcodeType = BARCODE_CODE39; - break; - case MV_BARCODE_I2_5: - barcodeType = BARCODE_INTERLEAVE_2_5; - break; - default: - break; - } - - LOGI("Media vision barcode type has been converted to ZInt barcode type " - "(%i -> %i)", type, barcodeType); - return barcodeType; + BarcodeType barcodeType = BARCODE_QR; + switch (type) { + case MV_BARCODE_UPC_A: + barcodeType = BARCODE_UPCA; + break; + case MV_BARCODE_UPC_E: + barcodeType = BARCODE_UPCE; + break; + case MV_BARCODE_EAN_8: + barcodeType = BARCODE_EAN8; + break; + case MV_BARCODE_EAN_13: + barcodeType = BARCODE_EAN13; + break; + case MV_BARCODE_CODE128: + barcodeType = BARCODE_CODE128; + break; + case MV_BARCODE_CODE39: + barcodeType = BARCODE_CODE39; + break; + case MV_BARCODE_I2_5: + barcodeType = BARCODE_INTERLEAVE_2_5; + break; + default: + break; + } + + LOGI("Media vision barcode type has been converted to ZInt barcode type " + "(%i -> %i)", type, barcodeType); + return barcodeType; } BarcodeQREncodingMode convertEncodingMode(mv_barcode_qr_mode_e mode) { - BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_ALPHANUMERIC; - - switch (mode) - { - case MV_BARCODE_QR_MODE_NUMERIC: - encodingMode = BARCODE_QR_MODE_NUMERIC; - break; - case MV_BARCODE_QR_MODE_BYTE: - encodingMode = BARCODE_QR_MODE_BYTE; - break; - case MV_BARCODE_QR_MODE_UTF8: - encodingMode = BARCODE_QR_MODE_UTF8; - break; - default: - break; - } - - LOGI("Media vision QRCode encoding mode has been converted to " - "ZInt encoding mode (%i -> %i)", mode, encodingMode); - return encodingMode; + BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_ALPHANUMERIC; + + switch (mode) { + case MV_BARCODE_QR_MODE_NUMERIC: + encodingMode = BARCODE_QR_MODE_NUMERIC; + break; + case MV_BARCODE_QR_MODE_BYTE: + encodingMode = BARCODE_QR_MODE_BYTE; + break; + case MV_BARCODE_QR_MODE_UTF8: + encodingMode = BARCODE_QR_MODE_UTF8; + break; + default: + break; + } + + LOGI("Media vision QRCode encoding mode has been converted to " + "ZInt encoding mode (%i -> %i)", mode, encodingMode); + return encodingMode; } BarcodeQRErrorCorrectionLevel convertECC(mv_barcode_qr_ecc_e ecc) { - BarcodeQRErrorCorrectionLevel ecclevel = BARCODE_QR_ECC_LOW; - - switch (ecc) - { - case MV_BARCODE_QR_ECC_MEDIUM: - ecclevel = BARCODE_QR_ECC_MEDIUM; - break; - case MV_BARCODE_QR_ECC_QUARTILE: - ecclevel = BARCODE_QR_ECC_QUARTILE; - break; - case MV_BARCODE_QR_ECC_HIGH: - ecclevel = BARCODE_QR_ECC_HIGH; - break; - default: - break; - } - - LOGI("Media vision ECC level has been converted to " - "ZInt ECC level (%i -> %i)", ecc, ecclevel); - return ecclevel; + BarcodeQRErrorCorrectionLevel ecclevel = BARCODE_QR_ECC_LOW; + + switch (ecc) { + case MV_BARCODE_QR_ECC_MEDIUM: + ecclevel = BARCODE_QR_ECC_MEDIUM; + break; + case MV_BARCODE_QR_ECC_QUARTILE: + ecclevel = BARCODE_QR_ECC_QUARTILE; + break; + case MV_BARCODE_QR_ECC_HIGH: + ecclevel = BARCODE_QR_ECC_HIGH; + break; + default: + break; + } + + LOGI("Media vision ECC level has been converted to " + "ZInt ECC level (%i -> %i)", ecc, ecclevel); + return ecclevel; } int convertBarcodeError(int barcodeError) { - int mvError = MEDIA_VISION_ERROR_NONE; - - switch (barcodeError) - { - case BARCODE_WARNING_INVALID_OPTION: - mvError = MEDIA_VISION_ERROR_INVALID_PARAMETER; - break; - case BARCODE_ERROR_TOO_LONG: - mvError = MEDIA_VISION_ERROR_MSG_TOO_LONG; - break; - case BARCODE_ERROR_INVALID_DATA: - mvError = MEDIA_VISION_ERROR_INVALID_DATA; - break; - case BARCODE_ERROR_INVALID_CHECK: - mvError = MEDIA_VISION_ERROR_INVALID_PARAMETER; - break; - case BARCODE_ERROR_INVALID_OPTION: - mvError = MEDIA_VISION_ERROR_INVALID_PARAMETER; - break; - case BARCODE_ERROR_ENCODING_PROBLEM: - mvError = MEDIA_VISION_ERROR_INTERNAL; - break; - case BARCODE_ERROR_FILE_ACCESS: - mvError = MEDIA_VISION_ERROR_PERMISSION_DENIED; - break; - case BARCODE_ERROR_MEMORY: - mvError = MEDIA_VISION_ERROR_OUT_OF_MEMORY; - break; - case BARCODE_ERROR_INVALID_PATH: - mvError = MEDIA_VISION_ERROR_INVALID_PATH; - default: - break; - } - - LOGI("ZInt error code has been converted to the media vision error code " - "(%i -> (0x%08x))", barcodeError, mvError); - return mvError; + int mvError = MEDIA_VISION_ERROR_NONE; + + switch (barcodeError) { + case BARCODE_WARNING_INVALID_OPTION: + mvError = MEDIA_VISION_ERROR_INVALID_PARAMETER; + break; + case BARCODE_ERROR_TOO_LONG: + mvError = MEDIA_VISION_ERROR_MSG_TOO_LONG; + break; + case BARCODE_ERROR_INVALID_DATA: + mvError = MEDIA_VISION_ERROR_INVALID_DATA; + break; + case BARCODE_ERROR_INVALID_CHECK: + mvError = MEDIA_VISION_ERROR_INVALID_PARAMETER; + break; + case BARCODE_ERROR_INVALID_OPTION: + mvError = MEDIA_VISION_ERROR_INVALID_PARAMETER; + break; + case BARCODE_ERROR_ENCODING_PROBLEM: + mvError = MEDIA_VISION_ERROR_INTERNAL; + break; + case BARCODE_ERROR_FILE_ACCESS: + mvError = MEDIA_VISION_ERROR_PERMISSION_DENIED; + break; + case BARCODE_ERROR_MEMORY: + mvError = MEDIA_VISION_ERROR_OUT_OF_MEMORY; + break; + case BARCODE_ERROR_INVALID_PATH: + mvError = MEDIA_VISION_ERROR_INVALID_PATH; + default: + break; + } + + LOGI("ZInt error code has been converted to the media vision error code " + "(%i -> (0x%08x))", barcodeError, mvError); + return mvError; } BarcodeImageFormat convertImageFormat(mv_barcode_image_format_e format) { - BarcodeImageFormat imageFormat = BARCODE_IMAGE_PNG; - - switch (format) - { - case MV_BARCODE_IMAGE_FORMAT_JPG: - imageFormat = BARCODE_IMAGE_JPG; - break; - case MV_BARCODE_IMAGE_FORMAT_BMP: - imageFormat = BARCODE_IMAGE_BMP; - break; - default: - break; - } - - LOGI("Media vision image format has been converted to " - "internal image format (%i -> %i)", format, imageFormat); - return imageFormat; + BarcodeImageFormat imageFormat = BARCODE_IMAGE_PNG; + + switch (format) { + case MV_BARCODE_IMAGE_FORMAT_JPG: + imageFormat = BARCODE_IMAGE_JPG; + break; + case MV_BARCODE_IMAGE_FORMAT_BMP: + imageFormat = BARCODE_IMAGE_BMP; + break; + default: + break; + } + + LOGI("Media vision image format has been converted to " + "internal image format (%i -> %i)", format, imageFormat); + return imageFormat; } } /* anonymous namespace */ int mv_barcode_generate_source_open( - mv_engine_config_h engine_cfg, - const char *message, - mv_barcode_type_e type, - mv_barcode_qr_mode_e qr_enc_mode, - mv_barcode_qr_ecc_e qr_ecc, - int qr_version, - mv_source_h image) + mv_engine_config_h engine_cfg, + const char *message, + mv_barcode_type_e type, + mv_barcode_qr_mode_e qr_enc_mode, + mv_barcode_qr_ecc_e qr_ecc, + int qr_version, + mv_source_h image) { - std::string messageStr = std::string(message); - - if (qr_enc_mode == MV_BARCODE_QR_MODE_NUMERIC && - messageStr.find_first_not_of("0123456789") != std::string::npos) - { - LOGE("Barcode message can't be used according to support " - "numeric (0..9) mode: %s", messageStr.c_str()); - return MEDIA_VISION_ERROR_INVALID_DATA; - } - - int error = BARCODE_ERROR_NONE; - if (MV_BARCODE_QR == type && - MV_BARCODE_QR_MODE_ALPHANUMERIC == qr_enc_mode) - { - error = alphanumToUpper(messageStr); - if (BARCODE_ERROR_NONE != error) - { - return convertBarcodeError(error); - } - } - - unsigned char *imageBuffer = NULL; - unsigned int imageWidth = 0u; - unsigned int imageHeight = 0u; - unsigned int imageChannels = 0u; - - int showText = 0; - error = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_GENERATE_ATTR_TEXT", &showText); - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGW("mv_engine_config_get_int_attribute failed"); - return error; - } - - if (showText == BARCODE_GEN_TEXT_VISIBLE && type == MV_BARCODE_QR) - { - LOGW("QR code generation with visible text is not supported"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - error = BarcodeGenerator::generateBarcodeToBuffer( - &imageBuffer, - &imageWidth, - &imageHeight, - &imageChannels, - messageStr, - convertBarcodeType(type), - convertEncodingMode(qr_enc_mode), - convertECC(qr_ecc), - qr_version, - showText); - - if (error != BARCODE_ERROR_NONE) - { - LOGE("Barcode generation to the buffer failed"); - if (NULL != imageBuffer) - { - LOGI("Delete temporal buffer"); - delete[] imageBuffer; - } - return convertBarcodeError(error); - } - - const unsigned int imageBufferSize = imageWidth * imageHeight * imageChannels; - - LOGI("Barcode has been generated to the buffer: " - "Buffer size = %ui x %ui; Channels = %ui; Message = %s", - imageWidth, imageHeight, imageChannels, messageStr.c_str()); - - error = mv_source_fill_by_buffer_c( - image, - imageBuffer, - imageBufferSize, - imageWidth, - imageHeight, - MEDIA_VISION_COLORSPACE_RGB888); - - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGE("Meidiavision source fill by generated buffer failed"); - } - - if (NULL != imageBuffer) - { - LOGI("Delete temporal buffer"); - delete[] imageBuffer; - } - - return error; + std::string messageStr = std::string(message); + + if (qr_enc_mode == MV_BARCODE_QR_MODE_NUMERIC && + messageStr.find_first_not_of("0123456789") != std::string::npos) { + LOGE("Barcode message can't be used according to support " + "numeric (0..9) mode: %s", messageStr.c_str()); + return MEDIA_VISION_ERROR_INVALID_DATA; + } + + int error = BARCODE_ERROR_NONE; + if (MV_BARCODE_QR == type && + MV_BARCODE_QR_MODE_ALPHANUMERIC == qr_enc_mode) { + error = alphanumToUpper(messageStr); + if (BARCODE_ERROR_NONE != error) { + return convertBarcodeError(error); + } + } + + unsigned char *imageBuffer = NULL; + unsigned int imageWidth = 0u; + unsigned int imageHeight = 0u; + unsigned int imageChannels = 0u; + + int showText = 0; + error = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_GENERATE_ATTR_TEXT", &showText); + if (error != MEDIA_VISION_ERROR_NONE) { + LOGW("mv_engine_config_get_int_attribute failed"); + return error; + } + + if (showText == BARCODE_GEN_TEXT_VISIBLE && type == MV_BARCODE_QR) { + LOGW("QR code generation with visible text is not supported"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + error = BarcodeGenerator::generateBarcodeToBuffer( + &imageBuffer, + &imageWidth, + &imageHeight, + &imageChannels, + messageStr, + convertBarcodeType(type), + convertEncodingMode(qr_enc_mode), + convertECC(qr_ecc), + qr_version, + showText); + + if (error != BARCODE_ERROR_NONE) { + LOGE("Barcode generation to the buffer failed"); + if (NULL != imageBuffer) { + LOGI("Delete temporal buffer"); + delete[] imageBuffer; + } + return convertBarcodeError(error); + } + + const unsigned int imageBufferSize = imageWidth * imageHeight * imageChannels; + + LOGI("Barcode has been generated to the buffer: " + "Buffer size = %ui x %ui; Channels = %ui; Message = %s", + imageWidth, imageHeight, imageChannels, messageStr.c_str()); + + error = mv_source_fill_by_buffer_c( + image, + imageBuffer, + imageBufferSize, + imageWidth, + imageHeight, + MEDIA_VISION_COLORSPACE_RGB888); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Meidiavision source fill by generated buffer failed"); + } + + if (NULL != imageBuffer) { + LOGI("Delete temporal buffer"); + delete[] imageBuffer; + } + + return error; } int mv_barcode_generate_image_open( - mv_engine_config_h engine_cfg, - const char *message, - int image_width, - int image_height, - mv_barcode_type_e type, - mv_barcode_qr_mode_e qr_enc_mode, - mv_barcode_qr_ecc_e qr_ecc, - int qr_version, - const char *image_path, - mv_barcode_image_format_e image_format) + mv_engine_config_h engine_cfg, + const char *message, + int image_width, + int image_height, + mv_barcode_type_e type, + mv_barcode_qr_mode_e qr_enc_mode, + mv_barcode_qr_ecc_e qr_ecc, + int qr_version, + const char *image_path, + mv_barcode_image_format_e image_format) { - std::string messageStr = std::string(message); - - if (qr_enc_mode == MV_BARCODE_QR_MODE_NUMERIC && - messageStr.find_first_not_of("0123456789") != std::string::npos) - { - LOGE("Barcode message can't be used according to support " - "numeric (0..9) mode: %s", messageStr.c_str()); - return MEDIA_VISION_ERROR_INVALID_DATA; - } - - if (NULL == image_path) - { - LOGE("Can't save barcode image to the path[%p]. The path has to be specified", image_path); - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - int error = BARCODE_ERROR_NONE; - if (MV_BARCODE_QR == type && - MV_BARCODE_QR_MODE_ALPHANUMERIC == qr_enc_mode) - { - error = alphanumToUpper(messageStr); - if (BARCODE_ERROR_NONE != error) - { - return convertBarcodeError(error); - } - } - - int showText = 0; - error = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_GENERATE_ATTR_TEXT", &showText); - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGW("mv_engine_config_get_int_attribute failed"); - return error; - } - - if (showText == BARCODE_GEN_TEXT_VISIBLE && type == MV_BARCODE_QR) - { - LOGW("QR code generation with visible text is not supported"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - error = BarcodeGenerator::generateBarcodeToImage( - std::string(image_path), - convertImageFormat(image_format), - image_width, - image_height, - messageStr, - convertBarcodeType(type), - convertEncodingMode(qr_enc_mode), - convertECC(qr_ecc), - qr_version, - showText); - - if (error != BARCODE_ERROR_NONE) - { - LOGE("Barcode generation to the image file failed"); - } - else - { - LOGI("Barcode has been generated to the image: " - "Image size = %ui x %ui; Message = %s", - image_width, image_height, messageStr.c_str()); - } - - return convertBarcodeError(error); + std::string messageStr = std::string(message); + + if (qr_enc_mode == MV_BARCODE_QR_MODE_NUMERIC && + messageStr.find_first_not_of("0123456789") != std::string::npos) { + LOGE("Barcode message can't be used according to support " + "numeric (0..9) mode: %s", messageStr.c_str()); + return MEDIA_VISION_ERROR_INVALID_DATA; + } + + if (NULL == image_path) { + LOGE("Can't save barcode image to the path[%p]. The path has to be specified", image_path); + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + int error = BARCODE_ERROR_NONE; + if (MV_BARCODE_QR == type && + MV_BARCODE_QR_MODE_ALPHANUMERIC == qr_enc_mode) { + error = alphanumToUpper(messageStr); + if (BARCODE_ERROR_NONE != error) { + return convertBarcodeError(error); + } + } + + int showText = 0; + error = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_GENERATE_ATTR_TEXT", &showText); + if (error != MEDIA_VISION_ERROR_NONE) { + LOGW("mv_engine_config_get_int_attribute failed"); + return error; + } + + if (showText == BARCODE_GEN_TEXT_VISIBLE && type == MV_BARCODE_QR) { + LOGW("QR code generation with visible text is not supported"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + error = BarcodeGenerator::generateBarcodeToImage( + std::string(image_path), + convertImageFormat(image_format), + image_width, + image_height, + messageStr, + convertBarcodeType(type), + convertEncodingMode(qr_enc_mode), + convertECC(qr_ecc), + qr_version, + showText); + + if (error != BARCODE_ERROR_NONE) { + LOGE("Barcode generation to the image file failed"); + } else { + LOGI("Barcode has been generated to the image: " + "Image size = %ui x %ui; Message = %s", + image_width, image_height, messageStr.c_str()); + } + + return convertBarcodeError(error); } - diff --git a/mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h b/mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h index 2076b0c..2fbd935 100644 --- a/mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h +++ b/mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h @@ -55,13 +55,13 @@ extern "C" { * @see mv_barcode_generate_image_lic() */ int mv_barcode_generate_source_lic( - mv_engine_config_h engine_cfg, - const char *message, - mv_barcode_type_e type, - mv_barcode_qr_mode_e qr_enc_mode, - mv_barcode_qr_ecc_e qr_ecc, - int qr_version, - mv_source_h image); + mv_engine_config_h engine_cfg, + const char *message, + mv_barcode_type_e type, + mv_barcode_qr_mode_e qr_enc_mode, + mv_barcode_qr_ecc_e qr_ecc, + int qr_version, + mv_source_h image); /** * @brief Generates image file with barcode. @@ -89,16 +89,16 @@ int mv_barcode_generate_source_lic( * @see mv_barcode_generate_source_lic() */ int mv_barcode_generate_image_lic( - mv_engine_config_h engine_cfg, - const char *message, - int image_width, - int image_height, - mv_barcode_type_e type, - mv_barcode_qr_mode_e qr_enc_mode, - mv_barcode_qr_ecc_e qr_ecc, - int qr_version, - const char *image_path, - mv_barcode_image_format_e image_format); + mv_engine_config_h engine_cfg, + const char *message, + int image_width, + int image_height, + mv_barcode_type_e type, + mv_barcode_qr_mode_e qr_enc_mode, + mv_barcode_qr_ecc_e qr_ecc, + int qr_version, + const char *image_path, + mv_barcode_image_format_e image_format); #ifdef __cplusplus } diff --git a/mv_barcode/barcode_generator_lic/src/mv_barcode_generate_lic.c b/mv_barcode/barcode_generator_lic/src/mv_barcode_generate_lic.c index d57621f..fada9e7 100644 --- a/mv_barcode/barcode_generator_lic/src/mv_barcode_generate_lic.c +++ b/mv_barcode/barcode_generator_lic/src/mv_barcode_generate_lic.c @@ -17,29 +17,28 @@ #include "mv_barcode_generate_lic.h" int mv_barcode_generate_source_lic( - mv_engine_config_h engine_cfg, - const char *message, - mv_barcode_type_e type, - mv_barcode_qr_mode_e qr_enc_mode, - mv_barcode_qr_ecc_e qr_ecc, - int qr_version, - mv_source_h image) + mv_engine_config_h engine_cfg, + const char *message, + mv_barcode_type_e type, + mv_barcode_qr_mode_e qr_enc_mode, + mv_barcode_qr_ecc_e qr_ecc, + int qr_version, + mv_source_h image) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED;; + return MEDIA_VISION_ERROR_NOT_SUPPORTED;; } int mv_barcode_generate_image_lic( - mv_engine_config_h engine_cfg, - const char *message, - int image_width, - int image_height, - mv_barcode_type_e type, - mv_barcode_qr_mode_e qr_enc_mode, - mv_barcode_qr_ecc_e qr_ecc, - int qr_version, - const char *image_path, - mv_barcode_image_format_e image_format) + mv_engine_config_h engine_cfg, + const char *message, + int image_width, + int image_height, + mv_barcode_type_e type, + mv_barcode_qr_mode_e qr_enc_mode, + mv_barcode_qr_ecc_e qr_ecc, + int qr_version, + const char *image_path, + mv_barcode_image_format_e image_format) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } - diff --git a/mv_common/include/EngineConfig.h b/mv_common/include/EngineConfig.h index ac580c9..5b7e871 100644 --- a/mv_common/include/EngineConfig.h +++ b/mv_common/include/EngineConfig.h @@ -27,154 +27,150 @@ * @brief Engine Configuration class definition. */ -namespace MediaVision -{ -namespace Common -{ +namespace MediaVision { +namespace Common { typedef std::map::const_iterator DictDblConstIter; typedef std::map::const_iterator DictIntConstIter; typedef std::map::const_iterator DictBoolConstIter; typedef std::map::const_iterator DictStrConstIter; -class EngineConfig -{ +class EngineConfig { public: - /** - * @brief Engine configuration constructor. - * @details Create new engine configuration dictionary and set default - * attributes values. - * - * @since_tizen 2.4 - */ - EngineConfig(); - - /** - * @brief Engine configuration destructor. - */ - virtual ~EngineConfig(); - - /** - * @brief Sets attribute with double value. - * - * @since_tizen 2.4 - * @param [in] key The string name of the attribute - * @param [in] value The double attribute value to be set - * @return @c MEDIA_VISION_ERROR_NONE on success,\n - * otherwise a negative error value - */ - int setAttribute(const std::string& key, const double value); - - /** - * @brief Sets attribute with integer value. - * - * @since_tizen 2.4 - * @param [in] key The string name of the attribute - * @param [in] value The integer attribute value to be set - * @return @c MEDIA_VISION_ERROR_NONE on success,\n - * otherwise a negative error value - */ - int setAttribute(const std::string& key, const int value); - - /** - * @brief Sets attribute with boolean value. - * - * @since_tizen 2.4 - * @param [in] key The string name of the attribute - * @param [in] value The boolean attribute value to be set - * @return @c MEDIA_VISION_ERROR_NONE on success,\n - * otherwise a negative error value - */ - int setAttribute(const std::string& key, const bool value); - - /** - * @brief Sets attribute with string value. - * - * @since_tizen 2.4 - * @param [in] key The string name of the attribute - * @param [in] value The string attribute value to be set - * @return @c MEDIA_VISION_ERROR_NONE on success,\n - * otherwise a negative error value - */ - int setAttribute(const std::string& key, const std::string& value); - - /** - * @brief Gets double attribute value by attribute name. - * - * @since_tizen 2.4 - * @param [in] key The string name of the attribute - * @param [out] value r The double attribute value to be obtained - * @return @c MEDIA_VISION_ERROR_NONE on success,\n - * otherwise a negative error value - * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key - * doesn't exist in the engine configuration dictionary - */ - int getDoubleAttribute(const std::string& key, double *value) const; - - /** - * @brief Gets integer attribute value by attribute name. - * - * @since_tizen 2.4 - * @param [in] key The string name of the attribute - * @param [out] value The integer attribute value to be obtained - * @return @c MEDIA_VISION_ERROR_NONE on success,\n - * otherwise a negative error value - * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key - * doesn't exist in the engine configuration dictionary - */ - int getIntegerAttribute(const std::string& key, int *value) const; - - /** - * @brief Gets boolean attribute value by attribute name. - * - * @since_tizen 2.4 - * @param [in] key The string name of the attribute - * @param [out] value The boolean attribute value to be obtained - * @return @c MEDIA_VISION_ERROR_NONE on success,\n - * otherwise a negative error value - * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key - * doesn't exist in the engine configuration dictionary - */ - int getBooleanAttribute(const std::string& key, bool *value) const; - - /** - * @brief Gets string attribute value by attribute name. - * - * @since_tizen 2.4 - * @param [in] key The string name of the attribute - * @param [out] value The string attribute value to be obtained - * @return @c MEDIA_VISION_ERROR_NONE on success,\n - * otherwise a negative error value - * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key - * doesn't exist in the engine configuration dictionary - */ - int getStringAttribute(const std::string& key, std::string *value) const; + /** + * @brief Engine configuration constructor. + * @details Create new engine configuration dictionary and set default + * attributes values. + * + * @since_tizen 2.4 + */ + EngineConfig(); + + /** + * @brief Engine configuration destructor. + */ + virtual ~EngineConfig(); + + /** + * @brief Sets attribute with double value. + * + * @since_tizen 2.4 + * @param [in] key The string name of the attribute + * @param [in] value The double attribute value to be set + * @return @c MEDIA_VISION_ERROR_NONE on success,\n + * otherwise a negative error value + */ + int setAttribute(const std::string& key, const double value); + + /** + * @brief Sets attribute with integer value. + * + * @since_tizen 2.4 + * @param [in] key The string name of the attribute + * @param [in] value The integer attribute value to be set + * @return @c MEDIA_VISION_ERROR_NONE on success,\n + * otherwise a negative error value + */ + int setAttribute(const std::string& key, const int value); + + /** + * @brief Sets attribute with boolean value. + * + * @since_tizen 2.4 + * @param [in] key The string name of the attribute + * @param [in] value The boolean attribute value to be set + * @return @c MEDIA_VISION_ERROR_NONE on success,\n + * otherwise a negative error value + */ + int setAttribute(const std::string& key, const bool value); + + /** + * @brief Sets attribute with string value. + * + * @since_tizen 2.4 + * @param [in] key The string name of the attribute + * @param [in] value The string attribute value to be set + * @return @c MEDIA_VISION_ERROR_NONE on success,\n + * otherwise a negative error value + */ + int setAttribute(const std::string& key, const std::string& value); + + /** + * @brief Gets double attribute value by attribute name. + * + * @since_tizen 2.4 + * @param [in] key The string name of the attribute + * @param [out] value r The double attribute value to be obtained + * @return @c MEDIA_VISION_ERROR_NONE on success,\n + * otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key + * doesn't exist in the engine configuration dictionary + */ + int getDoubleAttribute(const std::string& key, double *value) const; + + /** + * @brief Gets integer attribute value by attribute name. + * + * @since_tizen 2.4 + * @param [in] key The string name of the attribute + * @param [out] value The integer attribute value to be obtained + * @return @c MEDIA_VISION_ERROR_NONE on success,\n + * otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key + * doesn't exist in the engine configuration dictionary + */ + int getIntegerAttribute(const std::string& key, int *value) const; + +/** + * @brief Gets boolean attribute value by attribute name. + * + * @since_tizen 2.4 + * @param [in] key The string name of the attribute + * @param [out] value The boolean attribute value to be obtained + * @return @c MEDIA_VISION_ERROR_NONE on success,\n + * otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key + * doesn't exist in the engine configuration dictionary + */ + int getBooleanAttribute(const std::string& key, bool *value) const; + + /** + * @brief Gets string attribute value by attribute name. + * + * @since_tizen 2.4 + * @param [in] key The string name of the attribute + * @param [out] value The string attribute value to be obtained + * @return @c MEDIA_VISION_ERROR_NONE on success,\n + * otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key + * doesn't exist in the engine configuration dictionary + */ + int getStringAttribute(const std::string& key, std::string *value) const; public: - static bool setDefaultConfigFilePath(const std::string& confFilePath); + static bool setDefaultConfigFilePath(const std::string& confFilePath); - static const std::map& getDefaultDblDict(); - static const std::map& getDefaultIntDict(); - static const std::map& getDefaultBoolDict(); - static const std::map& getDefaultStrDict(); - static int cacheDictionaries( - bool isLazyCache = true, - std::string configFilePath = DefConfigFilePath); + static const std::map& getDefaultDblDict(); + static const std::map& getDefaultIntDict(); + static const std::map& getDefaultBoolDict(); + static const std::map& getDefaultStrDict(); + static int cacheDictionaries( + bool isLazyCache = true, + std::string configFilePath = DefConfigFilePath); private: - std::map m_dblDict; - std::map m_intDict; - std::map m_boolDict; - std::map m_strDict; + std::map m_dblDict; + std::map m_intDict; + std::map m_boolDict; + std::map m_strDict; private: - static std::string DefConfigFilePath; - - static std::map DefDblDict; - static std::map DefIntDict; - static std::map DefBoolDict; - static std::map DefStrDict; + static std::string DefConfigFilePath; + static std::map DefDblDict; + static std::map DefIntDict; + static std::map DefBoolDict; + static std::map DefStrDict; }; } /* Common */ diff --git a/mv_common/include/MediaSource.h b/mv_common/include/MediaSource.h index 454f69f..cc79f74 100644 --- a/mv_common/include/MediaSource.h +++ b/mv_common/include/MediaSource.h @@ -25,121 +25,115 @@ * @brief This file contains the MediaSource class. */ -namespace MediaVision -{ -namespace Common -{ - +namespace MediaVision { +namespace Common { /** * @class MediaSource * @brief The Media Source container * @details It is class which contains Media Source information. This class * will be use in the Media Vision as simple image. */ -class MediaSource -{ +class MediaSource { public: + /** + * @brief Creates a MediaSource. + * @details Default parameters values of the MediaSource will be: zero for + * width, height and buffer size; NULL for buffer; + * MEDIA_VISION_COLORSPACE_INVALID for colorspace. + * + * @since_tizen 2.4 + * + * @see MediaSource::~MediaSource() + */ + MediaSource(); + + /** + * @brief Destroys the MediaSource and releases all its resources. + * + * @since_tizen 2.4 + * + * @see MediaSource::MediaSource() + */ + virtual ~MediaSource(); + + /** + * @brief Clears the MediaSource. + * @details Releases all internal resources and set parameters to default values. + * + * @since_tizen 2.4 + * + * @see MediaSource::MediaSource() + * @see MediaSource::fill() + */ + void clear(void); + + /** + * @brief Fills the MediaSource based on the buffer and metadata. + * + * @since_tizen 2.4 + * @param [in] buffer The buffer of image data + * @param [in] bufferSize The buffer size + * @param [in] width The image width + * @param [in] height The image height + * @param [in] colorspace The image colorspace + * @return true if filled process is ok. Otherwise return false. + * + * @see MediaSource::MediaSource() + * @see MediaSource::clear() + */ + bool fill(const unsigned char *buffer, unsigned int bufferSize, unsigned int + width, unsigned int height, mv_colorspace_e colorspace); + + /** + * @brief Gets data buffer of the MediaSource. + * + * @since_tizen 2.4 + * @return Pointer to the data buffer. + */ + unsigned char *getBuffer(void) const; - /** - * @brief Creates a MediaSource. - * @details Default parameters values of the MediaSource will be: zero for - * width, height and buffer size; NULL for buffer; - * MEDIA_VISION_COLORSPACE_INVALID for colorspace. - * - * @since_tizen 2.4 - * - * @see MediaSource::~MediaSource() - */ - MediaSource(); - - /** - * @brief Destroys the MediaSource and releases all its resources. - * - * @since_tizen 2.4 - * - * @see MediaSource::MediaSource() - */ - virtual ~MediaSource(); - - /** - * @brief Clears the MediaSource. - * @details Releases all internal resources and set parameters to default values. - * - * @since_tizen 2.4 - * - * @see MediaSource::MediaSource() - * @see MediaSource::fill() - */ - void clear(void); - - /** - * @brief Fills the MediaSource based on the buffer and metadata. - * - * @since_tizen 2.4 - * @param [in] buffer The buffer of image data - * @param [in] bufferSize The buffer size - * @param [in] width The image width - * @param [in] height The image height - * @param [in] colorspace The image colorspace - * @return true if filled process is ok. Otherwise return false. - * - * @see MediaSource::MediaSource() - * @see MediaSource::clear() - */ - bool fill(const unsigned char *buffer, unsigned int bufferSize, unsigned int - width, unsigned int height, mv_colorspace_e colorspace); - - /** - * @brief Gets data buffer of the MediaSource. - * - * @since_tizen 2.4 - * @return Pointer to the data buffer. - */ - unsigned char *getBuffer(void) const; - - /** - * @brief Gets buffer size of the MediaSource. - * - * @since_tizen 2.4 - * @return Size of data buffer. - */ - unsigned int getBufferSize(void) const; - - /** - * @brief Gets image width of the MediaSource. - * - * @since_tizen 2.4 - * @return Width of image. - */ - unsigned int getWidth(void) const; - - /** - * @brief Gets image height of the MediaSource. - * - * @since_tizen 2.4 - * @return Height of image. - */ - unsigned int getHeight(void) const; - - /** - * @brief Gets image colorspace of the MediaSource. - * - * @since_tizen 2.4 - * @return Colorspace of image. - */ - mv_colorspace_e getColorspace(void) const; +/** + * @brief Gets buffer size of the MediaSource. + * + * @since_tizen 2.4 + * @return Size of data buffer. + */ + unsigned int getBufferSize(void) const; + + /** + * @brief Gets image width of the MediaSource. + * + * @since_tizen 2.4 + * @return Width of image. + */ + unsigned int getWidth(void) const; + + /** + * @brief Gets image height of the MediaSource. + * + * @since_tizen 2.4 + * @return Height of image. + */ + unsigned int getHeight(void) const; + + /** + * @brief Gets image colorspace of the MediaSource. + * + * @since_tizen 2.4 + * @return Colorspace of image. + */ + mv_colorspace_e getColorspace(void) const; private: + unsigned char *m_pBuffer; /**< The data buffer */ - unsigned char *m_pBuffer; /**< The data buffer */ - - unsigned int m_bufferSize; /**< The buffer size */ + unsigned int m_bufferSize; /**< The buffer size */ - unsigned int m_width; /**< The image width */ + unsigned int m_width; /**< The image width */ - unsigned int m_height; /**< The image height */ + unsigned int m_height; /**< The image height */ - mv_colorspace_e m_colorspace; /**< The image colorspace */ + mv_colorspace_e m_colorspace; /**< The image colorspace */ }; } /* Common */ diff --git a/mv_common/include/mv_common_c.h b/mv_common/include/mv_common_c.h index a7fa1bb..9afa2d1 100644 --- a/mv_common/include/mv_common_c.h +++ b/mv_common/include/mv_common_c.h @@ -42,7 +42,7 @@ extern "C" { * @see mv_destroy_source_c() */ int mv_create_source_c( - mv_source_h *source); + mv_source_h *source); /** * @brief Destroys the source handle and releases all its resources. @@ -56,7 +56,7 @@ int mv_create_source_c( * @see mv_create_source_c() */ int mv_destroy_source_c( - mv_source_h source); + mv_source_h source); /** * @brief Fills the media source based on the media packet. @@ -78,8 +78,8 @@ int mv_destroy_source_c( * @see mv_destroy_source_c() */ int mv_source_fill_by_media_packet_c( - mv_source_h source, - media_packet_h media_packet); + mv_source_h source, + media_packet_h media_packet); /** * @brief Fills the media source based on the buffer and metadata. @@ -101,12 +101,12 @@ int mv_source_fill_by_media_packet_c( * @see mv_source_clear_c() */ int mv_source_fill_by_buffer_c( - mv_source_h source, - unsigned char *data_buffer, - unsigned int buffer_size, - unsigned int image_width, - unsigned int image_height, - mv_colorspace_e image_colorspace); + mv_source_h source, + unsigned char *data_buffer, + unsigned int buffer_size, + unsigned int image_width, + unsigned int image_height, + mv_colorspace_e image_colorspace); /** * @brief Clears the buffer of the media source. @@ -120,7 +120,7 @@ int mv_source_fill_by_buffer_c( * @see mv_source_fill_by_buffer_c() */ int mv_source_clear_c( - mv_source_h source); + mv_source_h source); /** * @brief Gets buffer of the media source. @@ -142,9 +142,9 @@ int mv_source_clear_c( * @see mv_source_get_colorspace_c() */ int mv_source_get_buffer_c( - mv_source_h source, - unsigned char **data_buffer, - unsigned int *buffer_size); + mv_source_h source, + unsigned char **data_buffer, + unsigned int *buffer_size); /** * @brief Gets height of the media source. @@ -161,8 +161,8 @@ int mv_source_get_buffer_c( * @see mv_source_get_buffer_c() */ int mv_source_get_height_c( - mv_source_h source, - unsigned int *image_height); + mv_source_h source, + unsigned int *image_height); /** * @brief Gets width of the media source. @@ -179,8 +179,8 @@ int mv_source_get_height_c( * @see mv_source_get_buffer_c() */ int mv_source_get_width_c( - mv_source_h source, - unsigned int *image_width); + mv_source_h source, + unsigned int *image_width); /** * @brief Gets colorspace of the media source. @@ -197,8 +197,8 @@ int mv_source_get_width_c( * @see mv_source_get_buffer_c() */ int mv_source_get_colorspace_c( - mv_source_h source, - mv_colorspace_e *image_colorspace); + mv_source_h source, + mv_colorspace_e *image_colorspace); /** * @brief Creates the handle to the configuration of engine. @@ -222,7 +222,7 @@ int mv_source_get_colorspace_c( * @see mv_engine_config_get_string_attribute_c() */ int mv_create_engine_config_c( - mv_engine_config_h *engine_cfg); + mv_engine_config_h *engine_cfg); /** * @brief Destroys the engine configuration handle and releases all its @@ -239,7 +239,7 @@ int mv_create_engine_config_c( * @see mv_create_engine_config_c() */ int mv_destroy_engine_config_c( - mv_engine_config_h engine_cfg); + mv_engine_config_h engine_cfg); /** * @brief Sets the double attribute to the configuration. @@ -261,9 +261,9 @@ int mv_destroy_engine_config_c( * @see mv_engine_config_set_string_attribute_c() */ int mv_engine_config_set_double_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - double value); + mv_engine_config_h engine_cfg, + const char *name, + double value); /** * @brief Sets the integer attribute to the configuration. @@ -285,9 +285,9 @@ int mv_engine_config_set_double_attribute_c( * @see mv_engine_config_set_string_attribute_c() */ int mv_engine_config_set_int_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - int value); + mv_engine_config_h engine_cfg, + const char *name, + int value); /** * @brief Sets the boolean attribute to the configuration. @@ -309,9 +309,9 @@ int mv_engine_config_set_int_attribute_c( * @see mv_engine_config_set_string_attribute_c() */ int mv_engine_config_set_bool_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - bool attribute); + mv_engine_config_h engine_cfg, + const char *name, + bool attribute); /** * @brief Sets the string attribute to the configuration. @@ -333,9 +333,9 @@ int mv_engine_config_set_bool_attribute_c( * @see mv_engine_config_set_bool_attribute_c() */ int mv_engine_config_set_string_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - const char *value); + mv_engine_config_h engine_cfg, + const char *name, + const char *value); /** * @brief Gets the double attribute from the configuration dictionary. @@ -359,9 +359,9 @@ int mv_engine_config_set_string_attribute_c( * @see mv_engine_config_get_string_attribute_c() */ int mv_engine_config_get_double_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - double *value); + mv_engine_config_h engine_cfg, + const char *name, + double *value); /** * @brief Gets the integer attribute from the configuration dictionary. @@ -385,9 +385,9 @@ int mv_engine_config_get_double_attribute_c( * @see mv_engine_config_get_string_attribute_c() */ int mv_engine_config_get_int_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - int *value); + mv_engine_config_h engine_cfg, + const char *name, + int *value); /** * @brief Gets the boolean attribute from the configuration dictionary. @@ -411,9 +411,9 @@ int mv_engine_config_get_int_attribute_c( * @see mv_engine_config_get_string_attribute_c() */ int mv_engine_config_get_bool_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - bool *value); + mv_engine_config_h engine_cfg, + const char *name, + bool *value); /** * @brief Gets the string attribute from the configuration dictionary. @@ -439,9 +439,9 @@ int mv_engine_config_get_bool_attribute_c( * @see mv_engine_config_get_bool_attribute_c() */ int mv_engine_config_get_string_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - char **value); + mv_engine_config_h engine_cfg, + const char *name, + char **value); /** * @brief Traverses the list of supported attribute names and types. @@ -482,8 +482,8 @@ int mv_engine_config_get_string_attribute_c( * @see mv_engine_config_get_string_attribute_c() */ int mv_engine_config_foreach_supported_attribute_c( - mv_supported_attribute_cb callback, - void *user_data); + mv_supported_attribute_cb callback, + void *user_data); #ifdef __cplusplus } diff --git a/mv_common/src/EngineConfig.cpp b/mv_common/src/EngineConfig.cpp index 64bf1d9..38e545f 100644 --- a/mv_common/src/EngineConfig.cpp +++ b/mv_common/src/EngineConfig.cpp @@ -25,13 +25,11 @@ * @brief Engine Configuration class methods implementation. */ -namespace MediaVision -{ -namespace Common -{ +namespace MediaVision { +namespace Common { std::string EngineConfig::DefConfigFilePath = - std::string("/usr/share/config/capi-media-vision/media-vision-config.json"); + std::string("/usr/share/config/capi-media-vision/media-vision-config.json"); std::map EngineConfig::DefDblDict; std::map EngineConfig::DefIntDict; @@ -40,323 +38,297 @@ std::map EngineConfig::DefStrDict; EngineConfig::EngineConfig() { - // Force load default attributes from configuration file - cacheDictionaries(false); - - // Insert default attribute values into creating engine configuration - m_dblDict.insert(getDefaultDblDict().begin(), getDefaultDblDict().end()); - m_intDict.insert(getDefaultIntDict().begin(), getDefaultIntDict().end()); - m_boolDict.insert(getDefaultBoolDict().begin(), getDefaultBoolDict().end()); - m_strDict.insert(getDefaultStrDict().begin(), getDefaultStrDict().end()); + // Force load default attributes from configuration file + cacheDictionaries(false); + + // Insert default attribute values into creating engine configuration + m_dblDict.insert(getDefaultDblDict().begin(), getDefaultDblDict().end()); + m_intDict.insert(getDefaultIntDict().begin(), getDefaultIntDict().end()); + m_boolDict.insert(getDefaultBoolDict().begin(), getDefaultBoolDict().end()); + m_strDict.insert(getDefaultStrDict().begin(), getDefaultStrDict().end()); } EngineConfig::~EngineConfig() { - ; /* NULL */ + ; /* NULL */ } int EngineConfig::setAttribute(const std::string& key, const double value) { - LOGI("Set double attribute for the engine config %p. [%s] = %f", - this, key.c_str(), value); + LOGI("Set double attribute for the engine config %p. [%s] = %f", + this, key.c_str(), value); - if (m_dblDict.find(key) == m_dblDict.end()) - { - LOGE("Double attribute [%s] can't be set because isn't supported", key.c_str()); - return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; - } + if (m_dblDict.find(key) == m_dblDict.end()) { + LOGE("Double attribute [%s] can't be set because isn't supported", key.c_str()); + return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; + } - m_dblDict[key] = value; + m_dblDict[key] = value; - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int EngineConfig::setAttribute(const std::string& key, const int value) { - LOGI("Set integer attribute for the engine config %p. [%s] = %i", - this, key.c_str(), value); + LOGI("Set integer attribute for the engine config %p. [%s] = %i", + this, key.c_str(), value); - if (m_intDict.find(key) == m_intDict.end()) - { - LOGE("Integer attribute [%s] can't be set because isn't supported", key.c_str()); - return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; - } + if (m_intDict.find(key) == m_intDict.end()) { + LOGE("Integer attribute [%s] can't be set because isn't supported", key.c_str()); + return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; + } - m_intDict[key] = value; + m_intDict[key] = value; - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int EngineConfig::setAttribute(const std::string& key, const bool value) { - LOGI("Set boolean attribute for the engine config %p. [%s] = %s", - this, key.c_str(), value ? "TRUE" : "FALSE"); + LOGI("Set boolean attribute for the engine config %p. [%s] = %s", + this, key.c_str(), value ? "TRUE" : "FALSE"); - if (m_boolDict.find(key) == m_boolDict.end()) - { - LOGE("Boolean attribute [%s] can't be set because isn't supported", key.c_str()); - return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; - } + if (m_boolDict.find(key) == m_boolDict.end()) { + LOGE("Boolean attribute [%s] can't be set because isn't supported", key.c_str()); + return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; + } - m_boolDict[key] = value; + m_boolDict[key] = value; - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int EngineConfig::setAttribute(const std::string& key, const std::string& value) { - LOGI("Set string attribute for the engine config %p. [%s] = %s", - this, key.c_str(), value.c_str()); + LOGI("Set string attribute for the engine config %p. [%s] = %s", + this, key.c_str(), value.c_str()); - if (m_strDict.find(key) == m_strDict.end()) - { - LOGE("String attribute [%s] can't be set because isn't supported", key.c_str()); - return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; - } + if (m_strDict.find(key) == m_strDict.end()) { + LOGE("String attribute [%s] can't be set because isn't supported", key.c_str()); + return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; + } - m_strDict[key] = value; + m_strDict[key] = value; - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int EngineConfig::getDoubleAttribute(const std::string& key, double *value) const { - DictDblConstIter dictIter = m_dblDict.find(key); - if (dictIter == m_dblDict.end()) - { - LOGE("Attempt to access to the unsupported double attribute [%s] " - "of the engine config %p", key.c_str(), this); - return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; - } + DictDblConstIter dictIter = m_dblDict.find(key); + if (dictIter == m_dblDict.end()) { + LOGE("Attempt to access to the unsupported double attribute [%s] " + "of the engine config %p", key.c_str(), this); + return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; + } - LOGD("Get double attribute from the engine config %p. [%s] = %f", - this, dictIter->first.c_str(), dictIter->second); + LOGD("Get double attribute from the engine config %p. [%s] = %f", + this, dictIter->first.c_str(), dictIter->second); - *value = dictIter->second; + *value = dictIter->second; - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int EngineConfig::getIntegerAttribute(const std::string& key, int *value) const { - DictIntConstIter dictIter = m_intDict.find(key); - if (dictIter == m_intDict.end()) - { - LOGE("Attempt to access to the unsupported integer attribute [%s] " - "of the engine config %p", key.c_str(), this); - return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; - } + DictIntConstIter dictIter = m_intDict.find(key); + if (dictIter == m_intDict.end()) { + LOGE("Attempt to access to the unsupported integer attribute [%s] " + "of the engine config %p", key.c_str(), this); + return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; + } - LOGD("Get integer attribute from the engine config %p. [%s] = %i", - this, dictIter->first.c_str(), dictIter->second); + LOGD("Get integer attribute from the engine config %p. [%s] = %i", + this, dictIter->first.c_str(), dictIter->second); - *value = dictIter->second; + *value = dictIter->second; - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int EngineConfig::getBooleanAttribute(const std::string& key, bool *value) const { - DictBoolConstIter dictIter = m_boolDict.find(key); - if (dictIter == m_boolDict.end()) - { - LOGE("Attempt to access to the unsupported boolean attribute [%s] " - "of the engine config %p", key.c_str(), this); - return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; - } + DictBoolConstIter dictIter = m_boolDict.find(key); + if (dictIter == m_boolDict.end()) { + LOGE("Attempt to access to the unsupported boolean attribute [%s] " + "of the engine config %p", key.c_str(), this); + return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; + } - LOGD("Get boolean attribute from the engine config %p. [%s] = %s", - this, dictIter->first.c_str(), dictIter->second ? "TRUE" : "FALSE"); + LOGD("Get boolean attribute from the engine config %p. [%s] = %s", + this, dictIter->first.c_str(), dictIter->second ? "TRUE" : "FALSE"); - *value = dictIter->second; + *value = dictIter->second; - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int EngineConfig::getStringAttribute(const std::string& key, std::string *value) const { - DictStrConstIter dictIter = m_strDict.find(key); - if (dictIter == m_strDict.end()) - { - LOGE("Attempt to access to the unsupported string attribute [%s] " - "of the engine config %p", key.c_str(), this); - return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; - } + DictStrConstIter dictIter = m_strDict.find(key); + if (dictIter == m_strDict.end()) { + LOGE("Attempt to access to the unsupported string attribute [%s] " + "of the engine config %p", key.c_str(), this); + return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; + } - LOGD("Get string attribute from the engine config %p. [%s] = %s", - this, dictIter->first.c_str(), dictIter->second.c_str()); + LOGD("Get string attribute from the engine config %p. [%s] = %s", + this, dictIter->first.c_str(), dictIter->second.c_str()); - *value = dictIter->second; + *value = dictIter->second; - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } -// static +/* + * static + */ bool EngineConfig::setDefaultConfigFilePath(const std::string& confFilePath) { - if (0 != DefConfigFilePath.compare(confFilePath)) - { - DefConfigFilePath = confFilePath; - return true; - } + if (0 != DefConfigFilePath.compare(confFilePath)) { + DefConfigFilePath = confFilePath; + return true; + } - return false; + return false; } const std::map& EngineConfig::getDefaultDblDict() { - cacheDictionaries(); + cacheDictionaries(); - return DefDblDict; + return DefDblDict; } const std::map& EngineConfig::getDefaultIntDict() { - cacheDictionaries(); + cacheDictionaries(); - return DefIntDict; + return DefIntDict; } const std::map& EngineConfig::getDefaultBoolDict() { - cacheDictionaries(); + cacheDictionaries(); - return DefBoolDict; + return DefBoolDict; } const std::map& EngineConfig::getDefaultStrDict() { - cacheDictionaries(); + cacheDictionaries(); - return DefStrDict; + return DefStrDict; } int EngineConfig::cacheDictionaries(bool isLazyCache, std::string configFilePath) { - static bool isCached = false; - if (!isLazyCache || !isCached) - { - LOGI("Start to cache default attributes from engine configuration file."); - - DefDblDict.clear(); - DefIntDict.clear(); - DefBoolDict.clear(); - DefStrDict.clear(); - - const char *conf_file = configFilePath.c_str(); - JsonParser *parser; - GError *error = NULL; - - parser = json_parser_new(); - json_parser_load_from_file(parser, conf_file, &error); - if (error) - { - LOGW("Unable to parse file '%s': %s\n", conf_file, error->message); - g_error_free(error); - g_object_unref(parser); - return MEDIA_VISION_ERROR_NO_DATA; - } - - JsonNode *root = json_parser_get_root(parser); - if (JSON_NODE_OBJECT != json_node_get_node_type(root)) - { - LOGW("Can't parse tests configuration file. " - "Incorrect json markup."); - g_object_unref(parser); - return MEDIA_VISION_ERROR_NO_DATA; - } - - JsonObject *jobj = json_node_get_object(root); - - if (!json_object_has_member(jobj, "attributes")) - { - LOGW("Can't parse tests configuration file. " - "No 'attributes' section."); - g_object_unref(parser); - return MEDIA_VISION_ERROR_NO_DATA; - } - - JsonNode *attr_node = - json_object_get_member(jobj, "attributes"); - - if (JSON_NODE_ARRAY != json_node_get_node_type(attr_node)) - { - LOGW("Can't parse tests configuration file. " - "'attributes' section isn't array."); - g_object_unref(parser); - return MEDIA_VISION_ERROR_NO_DATA; - } - - JsonArray *attr_array = json_node_get_array(attr_node); - - const guint attr_num = json_array_get_length(attr_array); - - guint attrInd = 0; - for (; attrInd < attr_num; ++attrInd) - { - JsonNode *attr_node = json_array_get_element(attr_array, attrInd); - - if (JSON_NODE_OBJECT != json_node_get_node_type(attr_node)) - { - LOGW("Attribute %u wasn't parsed from json file.", attrInd); - continue; - } - - JsonObject *attr_obj = json_node_get_object(attr_node); - - if (!json_object_has_member(attr_obj, "name") || - !json_object_has_member(attr_obj, "type") || - !json_object_has_member(attr_obj, "value")) - { - LOGW("Attribute %u wasn't parsed from json file.", attrInd); - continue; - } - - const char *nameStr = - (char*)json_object_get_string_member(attr_obj, "name"); - const char *typeStr = - (char*)json_object_get_string_member(attr_obj, "type"); - - if (NULL == nameStr || NULL == typeStr) - { - LOGW("Attribute %i wasn't parsed from json file. name and/or " - "type of the attribute are parsed as NULL.", attrInd); - continue; - } - else if (0 == strcmp("double", typeStr)) - { - DefDblDict[std::string(nameStr)] = - (double)json_object_get_double_member(attr_obj, "value"); - } - else if (0 == strcmp("integer", typeStr)) - { - DefIntDict[std::string(nameStr)] = - (int)json_object_get_int_member(attr_obj, "value"); - } - else if (0 == strcmp("boolean", typeStr)) - { - DefBoolDict[std::string(nameStr)] = - json_object_get_boolean_member(attr_obj, "value") ? true : false; - } - else if (0 == strcmp("string", typeStr)) - { - DefStrDict[std::string(nameStr)] = - (char*)json_object_get_string_member(attr_obj, "value"); - } - else - { - LOGW("Attribute %i:%s wasn't parsed from json file. " - "Type isn't supported.", attrInd, nameStr); - continue; - } - } - - g_object_unref(parser); - isCached = true; - } - - return MEDIA_VISION_ERROR_NONE; + static bool isCached = false; + if (!isLazyCache || !isCached) { + LOGI("Start to cache default attributes from engine configuration file."); + + DefDblDict.clear(); + DefIntDict.clear(); + DefBoolDict.clear(); + DefStrDict.clear(); + + const char *conf_file = configFilePath.c_str(); + JsonParser *parser; + GError *error = NULL; + + parser = json_parser_new(); + json_parser_load_from_file(parser, conf_file, &error); + if (error) { + LOGW("Unable to parse file '%s': %s\n", conf_file, error->message); + g_error_free(error); + g_object_unref(parser); + return MEDIA_VISION_ERROR_NO_DATA; + } + + JsonNode *root = json_parser_get_root(parser); + if (JSON_NODE_OBJECT != json_node_get_node_type(root)) { + LOGW("Can't parse tests configuration file. " + "Incorrect json markup."); + g_object_unref(parser); + return MEDIA_VISION_ERROR_NO_DATA; + } + + JsonObject *jobj = json_node_get_object(root); + + if (!json_object_has_member(jobj, "attributes")) { + LOGW("Can't parse tests configuration file. " + "No 'attributes' section."); + g_object_unref(parser); + return MEDIA_VISION_ERROR_NO_DATA; + } + + JsonNode *attr_node = + json_object_get_member(jobj, "attributes"); + + if (JSON_NODE_ARRAY != json_node_get_node_type(attr_node)) { + LOGW("Can't parse tests configuration file. " + "'attributes' section isn't array."); + g_object_unref(parser); + return MEDIA_VISION_ERROR_NO_DATA; + } + + JsonArray *attr_array = json_node_get_array(attr_node); + + const guint attr_num = json_array_get_length(attr_array); + + guint attrInd = 0; + for (; attrInd < attr_num; ++attrInd) { + JsonNode *attr_node = json_array_get_element(attr_array, attrInd); + + if (JSON_NODE_OBJECT != json_node_get_node_type(attr_node)) { + LOGW("Attribute %u wasn't parsed from json file.", attrInd); + continue; + } + + JsonObject *attr_obj = json_node_get_object(attr_node); + + if (!json_object_has_member(attr_obj, "name") || + !json_object_has_member(attr_obj, "type") || + !json_object_has_member(attr_obj, "value")) { + LOGW("Attribute %u wasn't parsed from json file.", attrInd); + continue; + } + + const char *nameStr = + (char*)json_object_get_string_member(attr_obj, "name"); + const char *typeStr = + (char*)json_object_get_string_member(attr_obj, "type"); + + if (NULL == nameStr || NULL == typeStr) { + LOGW("Attribute %i wasn't parsed from json file. name and/or " + "type of the attribute are parsed as NULL.", attrInd); + continue; + } else if (0 == strcmp("double", typeStr)) { + DefDblDict[std::string(nameStr)] = + (double)json_object_get_double_member(attr_obj, "value"); + } else if (0 == strcmp("integer", typeStr)) { + DefIntDict[std::string(nameStr)] = + (int)json_object_get_int_member(attr_obj, "value"); + } else if (0 == strcmp("boolean", typeStr)) { + DefBoolDict[std::string(nameStr)] = + json_object_get_boolean_member(attr_obj, "value") ? true : false; + } else if (0 == strcmp("string", typeStr)) { + DefStrDict[std::string(nameStr)] = + (char*)json_object_get_string_member(attr_obj, "value"); + } else { + LOGW("Attribute %i:%s wasn't parsed from json file. " + "Type isn't supported.", attrInd, nameStr); + continue; + } + } + + g_object_unref(parser); + isCached = true; + } + + return MEDIA_VISION_ERROR_NONE; } } /* namespace Common */ diff --git a/mv_common/src/MediaSource.cpp b/mv_common/src/MediaSource.cpp index 1288625..bca35c5 100644 --- a/mv_common/src/MediaSource.cpp +++ b/mv_common/src/MediaSource.cpp @@ -20,104 +20,100 @@ #include -namespace MediaVision -{ -namespace Common -{ - -MediaSource::MediaSource() : m_pBuffer (NULL), m_bufferSize (0), m_width (0), - m_height (0), m_colorspace (MEDIA_VISION_COLORSPACE_INVALID) +namespace MediaVision { +namespace Common { +MediaSource::MediaSource() : + m_pBuffer(NULL), + m_bufferSize(0), + m_width(0), + m_height(0), + m_colorspace(MEDIA_VISION_COLORSPACE_INVALID) { } MediaSource::~MediaSource() { - clear(); + clear(); } void MediaSource::clear(void) { - if (m_pBuffer != NULL) - { - LOGD("Delete internal buffer for media source %p", this); - delete[] m_pBuffer; - } - LOGD("Set defaults for media source %p : buffer = NULL; " - "bufferSize = 0; width = 0; height = 0; " - "colorspace = MEDIA_VISION_COLORSPACE_INVALID", this); - m_pBuffer = NULL; - m_bufferSize = 0; - m_width = 0; - m_height = 0; - m_colorspace = MEDIA_VISION_COLORSPACE_INVALID; + if (m_pBuffer != NULL) { + LOGD("Delete internal buffer for media source %p", this); + delete[] m_pBuffer; + } + LOGD("Set defaults for media source %p : buffer = NULL; " + "bufferSize = 0; width = 0; height = 0; " + "colorspace = MEDIA_VISION_COLORSPACE_INVALID", this); + m_pBuffer = NULL; + m_bufferSize = 0; + m_width = 0; + m_height = 0; + m_colorspace = MEDIA_VISION_COLORSPACE_INVALID; } bool MediaSource::fill(const unsigned char *buffer, unsigned int bufferSize, unsigned int width, unsigned int height, mv_colorspace_e colorspace) { - if (bufferSize == 0 || buffer == NULL) - { - return false; - } - - LOGD("Call clear() first for media source %p", this); - clear(); - - try - { - LOGD("Allocate memory for buffer in media source %p", this); - m_pBuffer = new unsigned char[bufferSize]; - } - catch(...) - { - LOGE("Memory allocating for buffer in media source %p failed!", this); - m_pBuffer = NULL; - return false; - } - - LOGD("Copy data from external buffer (%p) to the internal buffer (%p) of " - "media source %p", buffer, m_pBuffer, this); - std::memcpy(m_pBuffer, buffer, bufferSize); - - LOGD("Assign new size of the internal buffer of media source %p. " - "New size is %ui.", this, bufferSize); - m_bufferSize = bufferSize; - - LOGD("Assign new size (%ui x %ui) of the internal buffer image for " - "the media source %p", width, height, this); - m_width = width; - m_height = height; - - LOGD("Assign new colorspace (%i) of the internal buffer image for " - "the media source %p", colorspace, this); - m_colorspace = colorspace; - - return true; + if (bufferSize == 0 || buffer == NULL) { + return false; + } + + LOGD("Call clear() first for media source %p", this); + clear(); + + try { + LOGD("Allocate memory for buffer in media source %p", this); + m_pBuffer = new unsigned char[bufferSize]; + } catch(...) { + LOGE("Memory allocating for buffer in media source %p failed!", this); + m_pBuffer = NULL; + return false; + } + + LOGD("Copy data from external buffer (%p) to the internal buffer (%p) of " + "media source %p", buffer, m_pBuffer, this); + std::memcpy(m_pBuffer, buffer, bufferSize); + + LOGD("Assign new size of the internal buffer of media source %p. " + "New size is %ui.", this, bufferSize); + m_bufferSize = bufferSize; + + LOGD("Assign new size (%ui x %ui) of the internal buffer image for " + "the media source %p", width, height, this); + m_width = width; + m_height = height; + + LOGD("Assign new colorspace (%i) of the internal buffer image for " + "the media source %p", colorspace, this); + m_colorspace = colorspace; + + return true; } unsigned char *MediaSource::getBuffer(void) const { - return m_pBuffer; + return m_pBuffer; } unsigned int MediaSource::getBufferSize(void) const { - return m_bufferSize; + return m_bufferSize; } unsigned int MediaSource::getWidth(void) const { - return m_width; + return m_width; } unsigned int MediaSource::getHeight(void) const { - return m_height; + return m_height; } mv_colorspace_e MediaSource::getColorspace(void) const { - return m_colorspace; + return m_colorspace; } } /* Common */ diff --git a/mv_common/src/mv_common_c.cpp b/mv_common/src/mv_common_c.cpp index ececf20..9242a56 100644 --- a/mv_common/src/mv_common_c.cpp +++ b/mv_common/src/mv_common_c.cpp @@ -25,266 +25,246 @@ #include int mv_create_source_c( - mv_source_h *source_ptr) + mv_source_h *source_ptr) { - if (source_ptr == NULL) - { - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (source_ptr == NULL) { + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - LOGD("Creating media vision source"); - (*source_ptr) = ((mv_source_h)new MediaVision::Common::MediaSource()); + LOGD("Creating media vision source"); + (*source_ptr) = ((mv_source_h)new MediaVision::Common::MediaSource()); - if (*source_ptr == NULL) - { - LOGE("Failed to create media vision source"); - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } - LOGD("Media vision source [%p] has been created", *source_ptr); + if (*source_ptr == NULL) { + LOGE("Failed to create media vision source"); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } + LOGD("Media vision source [%p] has been created", *source_ptr); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_destroy_source_c( - mv_source_h source) + mv_source_h source) { - if (!source) - { - LOGE("Media source can't be destroyed because handle is NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (!source) { + LOGE("Media source can't be destroyed because handle is NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - LOGD("Destroying media vision source [%p]", source); - delete ((MediaVision::Common::MediaSource*)source); - LOGD("Media vision source has been destroyed"); + LOGD("Destroying media vision source [%p]", source); + delete ((MediaVision::Common::MediaSource*)source); + LOGD("Media vision source has been destroyed"); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_source_fill_by_media_packet_c( - mv_source_h source, - media_packet_h media_packet) + mv_source_h source, + media_packet_h media_packet) { - if (!source || !media_packet) - { - LOGE("Media source can't be filled by media_packet handle because " - "one of the source or media_packet handles is NULL. " - "source = %p; media_packet = %p", source, media_packet); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - bool is_video = false; - int image_width = 0; - int image_height = 0; - media_format_h format = NULL; - media_format_mimetype_e mimetype = MEDIA_FORMAT_I420; - unsigned char *data_buffer = NULL; - uint64_t buffer_size = 0; - mv_colorspace_e image_colorspace = MEDIA_VISION_COLORSPACE_INVALID; - - int ret = media_packet_is_video(media_packet, &is_video); - if (ret != MEDIA_PACKET_ERROR_NONE) - { - LOGE("media_packet_is_video() failed, mv_source_h fill skipped"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (!is_video) - { - LOGE("Media packet isn't video, mv_source_h fill skipped"); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - - ret = media_packet_get_format(media_packet, &format); - if (ret != MEDIA_PACKET_ERROR_NONE) - { - LOGE("media_packet_get_format() failed, mv_source_h fill skipped"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - ret = media_format_get_video_info( - format, &mimetype, &image_width, &image_height, NULL, NULL); - if (ret != MEDIA_PACKET_ERROR_NONE) - { - LOGE("media_format_get_video_info() failed, mv_source_h fill skipped"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (mimetype > MEDIA_FORMAT_H261 && mimetype <= MEDIA_FORMAT_MPEG4_ASP) - { - LOGE("Media format mimetype is not the raw video, mv_source_h fill skipped"); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - - switch (mimetype) - { - case MEDIA_FORMAT_I420: - image_colorspace = MEDIA_VISION_COLORSPACE_I420; - break; - case MEDIA_FORMAT_NV12: - image_colorspace = MEDIA_VISION_COLORSPACE_NV12; - break; - case MEDIA_FORMAT_YV12: - image_colorspace = MEDIA_VISION_COLORSPACE_YV12; - break; - case MEDIA_FORMAT_NV21: - image_colorspace = MEDIA_VISION_COLORSPACE_NV21; - break; - case MEDIA_FORMAT_YUYV: - image_colorspace = MEDIA_VISION_COLORSPACE_YUYV; - break; - case MEDIA_FORMAT_UYVY: - image_colorspace = MEDIA_VISION_COLORSPACE_UYVY; - break; - case MEDIA_FORMAT_422P: - image_colorspace = MEDIA_VISION_COLORSPACE_422P; - break; - case MEDIA_FORMAT_RGB565: - image_colorspace = MEDIA_VISION_COLORSPACE_RGB565; - break; - case MEDIA_FORMAT_RGB888: - image_colorspace = MEDIA_VISION_COLORSPACE_RGB888; - break; - case MEDIA_FORMAT_RGBA: - image_colorspace = MEDIA_VISION_COLORSPACE_RGBA; - break; - default: - LOGE("Format of the media packet buffer is not supported by media " - "vision source (media_format_h mimetype=%i)", mimetype); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - - ret = media_packet_get_buffer_data_ptr(media_packet, (void**)&data_buffer); - if (ret != MEDIA_PACKET_ERROR_NONE) - { - LOGE("media_packet_get_buffer_data_ptr() failed, mv_source_h fill skipped"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - ret = media_packet_get_buffer_size(media_packet, &buffer_size); - if (ret != MEDIA_PACKET_ERROR_NONE) - { - LOGE("media_packet_get_buffer_size() failed, mv_source_h fill skipped"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (!((MediaVision::Common::MediaSource*)source)->fill(data_buffer, - buffer_size, (unsigned int)image_width, (unsigned int)image_height, image_colorspace)) - { - LOGE("mv_source_h filling from media_packet_h failed"); - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } - - LOGD("Media source has been filled from media packet"); - return MEDIA_VISION_ERROR_NONE; + if (!source || !media_packet) { + LOGE("Media source can't be filled by media_packet handle because " + "one of the source or media_packet handles is NULL. " + "source = %p; media_packet = %p", source, media_packet); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + bool is_video = false; + int image_width = 0; + int image_height = 0; + media_format_h format = NULL; + media_format_mimetype_e mimetype = MEDIA_FORMAT_I420; + unsigned char *data_buffer = NULL; + uint64_t buffer_size = 0; + mv_colorspace_e image_colorspace = MEDIA_VISION_COLORSPACE_INVALID; + + int ret = media_packet_is_video(media_packet, &is_video); + if (ret != MEDIA_PACKET_ERROR_NONE) { + LOGE("media_packet_is_video() failed, mv_source_h fill skipped"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (!is_video) { + LOGE("Media packet isn't video, mv_source_h fill skipped"); + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } + + ret = media_packet_get_format(media_packet, &format); + if (ret != MEDIA_PACKET_ERROR_NONE) { + LOGE("media_packet_get_format() failed, mv_source_h fill skipped"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + ret = media_format_get_video_info( + format, &mimetype, &image_width, &image_height, NULL, NULL); + if (ret != MEDIA_PACKET_ERROR_NONE) { + LOGE("media_format_get_video_info() failed, mv_source_h fill skipped"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (mimetype > MEDIA_FORMAT_H261 && mimetype <= MEDIA_FORMAT_MPEG4_ASP) { + LOGE("Media format mimetype is not the raw video, mv_source_h fill skipped"); + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } + + switch (mimetype) { + case MEDIA_FORMAT_I420: + image_colorspace = MEDIA_VISION_COLORSPACE_I420; + break; + case MEDIA_FORMAT_NV12: + image_colorspace = MEDIA_VISION_COLORSPACE_NV12; + break; + case MEDIA_FORMAT_YV12: + image_colorspace = MEDIA_VISION_COLORSPACE_YV12; + break; + case MEDIA_FORMAT_NV21: + image_colorspace = MEDIA_VISION_COLORSPACE_NV21; + break; + case MEDIA_FORMAT_YUYV: + image_colorspace = MEDIA_VISION_COLORSPACE_YUYV; + break; + case MEDIA_FORMAT_UYVY: + image_colorspace = MEDIA_VISION_COLORSPACE_UYVY; + break; + case MEDIA_FORMAT_422P: + image_colorspace = MEDIA_VISION_COLORSPACE_422P; + break; + case MEDIA_FORMAT_RGB565: + image_colorspace = MEDIA_VISION_COLORSPACE_RGB565; + break; + case MEDIA_FORMAT_RGB888: + image_colorspace = MEDIA_VISION_COLORSPACE_RGB888; + break; + case MEDIA_FORMAT_RGBA: + image_colorspace = MEDIA_VISION_COLORSPACE_RGBA; + break; + default: + LOGE("Format of the media packet buffer is not supported by media " + "vision source (media_format_h mimetype=%i)", mimetype); + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } + + ret = media_packet_get_buffer_data_ptr(media_packet, (void**)&data_buffer); + if (ret != MEDIA_PACKET_ERROR_NONE) { + LOGE("media_packet_get_buffer_data_ptr() failed, mv_source_h fill skipped"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + ret = media_packet_get_buffer_size(media_packet, &buffer_size); + if (ret != MEDIA_PACKET_ERROR_NONE) { + LOGE("media_packet_get_buffer_size() failed, mv_source_h fill skipped"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (!((MediaVision::Common::MediaSource*)source)->fill(data_buffer, buffer_size, + (unsigned int)image_width, (unsigned int)image_height, image_colorspace)) { + LOGE("mv_source_h filling from media_packet_h failed"); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } + + LOGD("Media source has been filled from media packet"); + return MEDIA_VISION_ERROR_NONE; } int mv_source_fill_by_buffer_c( - mv_source_h source, - unsigned char *data_buffer, - unsigned int buffer_size, - unsigned int image_width, - unsigned int image_height, - mv_colorspace_e image_colorspace) + mv_source_h source, + unsigned char *data_buffer, + unsigned int buffer_size, + unsigned int image_width, + unsigned int image_height, + mv_colorspace_e image_colorspace) { - if (!source || buffer_size == 0 || data_buffer == NULL) - { - LOGE("Media source can't be filled by buffer because " - "one of the source or data_buffer is NULL or buffer_size = 0. " - "source = %p; data_buffer = %p; buffer_size = %u", - source, data_buffer, buffer_size); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (!((MediaVision::Common::MediaSource*)source)->fill(data_buffer, - buffer_size, image_width, image_height, image_colorspace)) - { - LOGE("mv_source_h filling from buffer failed"); - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } - - LOGD("Media source has been filled from buffer"); - return MEDIA_VISION_ERROR_NONE; + if (!source || buffer_size == 0 || data_buffer == NULL) { + LOGE("Media source can't be filled by buffer because " + "one of the source or data_buffer is NULL or buffer_size = 0. " + "source = %p; data_buffer = %p; buffer_size = %u", + source, data_buffer, buffer_size); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (!((MediaVision::Common::MediaSource*)source)->fill(data_buffer, + buffer_size, image_width, image_height, image_colorspace)) { + LOGE("mv_source_h filling from buffer failed"); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } + + LOGD("Media source has been filled from buffer"); + return MEDIA_VISION_ERROR_NONE; } int mv_source_clear_c( - mv_source_h source) + mv_source_h source) { - if (!source) - { - LOGE("Media source can't be cleared because source handle is NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (!source) { + LOGE("Media source can't be cleared because source handle is NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - LOGD("Clear media vision source [%p]", source); - ((MediaVision::Common::MediaSource*)source)->clear(); - LOGD("Media vision source [%p] has been cleared", source); + LOGD("Clear media vision source [%p]", source); + ((MediaVision::Common::MediaSource*)source)->clear(); + LOGD("Media vision source [%p] has been cleared", source); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_source_get_buffer_c( - mv_source_h source, - unsigned char **buffer, - unsigned int *size) + mv_source_h source, + unsigned char **buffer, + unsigned int *size) { - if (!source) - { - LOGE("Impossible to get buffer for NULL mv_source_h handle"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (!source) { + LOGE("Impossible to get buffer for NULL mv_source_h handle"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - LOGD("Get media vision source [%p] buffer and buffer size to be returned", source); - *buffer = ((MediaVision::Common::MediaSource*)source)->getBuffer(); - *size = ((MediaVision::Common::MediaSource*)source)->getBufferSize(); - LOGD("Media vision source [%p] buffer (%p) and buffer size (%ui) has been returned", source, buffer, *size); + LOGD("Get media vision source [%p] buffer and buffer size to be returned", source); + *buffer = ((MediaVision::Common::MediaSource*)source)->getBuffer(); + *size = ((MediaVision::Common::MediaSource*)source)->getBufferSize(); + LOGD("Media vision source [%p] buffer (%p) and buffer size (%ui) has been returned", source, buffer, *size); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_source_get_height_c( - mv_source_h source, - unsigned int *height) + mv_source_h source, + unsigned int *height) { - if (!source) - { - LOGE("Impossible to get height for NULL mv_source_h handle"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (!source) { + LOGE("Impossible to get height for NULL mv_source_h handle"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - LOGD("Get media vision source [%p] height to be returned", source); - *height = ((MediaVision::Common::MediaSource*)source)->getHeight(); - LOGD("Media vision source [%p] height (%ui) has been returned", source, *height); + LOGD("Get media vision source [%p] height to be returned", source); + *height = ((MediaVision::Common::MediaSource*)source)->getHeight(); + LOGD("Media vision source [%p] height (%ui) has been returned", source, *height); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_source_get_width_c( - mv_source_h source, - unsigned int *width) + mv_source_h source, + unsigned int *width) { - if (!source) - { - LOGE("Impossible to get width for NULL mv_source_h handle"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (!source) { + LOGE("Impossible to get width for NULL mv_source_h handle"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - LOGD("Get media vision source [%p] width to be returned", source); - *width = ((MediaVision::Common::MediaSource*)source)->getWidth(); - LOGD("Media vision source [%p] width (%ui) has been returned", source, *width); + LOGD("Get media vision source [%p] width to be returned", source); + *width = ((MediaVision::Common::MediaSource*)source)->getWidth(); + LOGD("Media vision source [%p] width (%ui) has been returned", source, *width); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_source_get_colorspace_c( - mv_source_h source, - mv_colorspace_e *colorspace) + mv_source_h source, + mv_colorspace_e *colorspace) { - if (!source) - { - LOGE("Impossible to get colorspace for NULL mv_source_h handle"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (!source) { + LOGE("Impossible to get colorspace for NULL mv_source_h handle"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } LOGD("Get media vision source [%p] colorspace to be returned", source); *colorspace = ((MediaVision::Common::MediaSource*)source)->getColorspace(); @@ -296,349 +276,318 @@ int mv_source_get_colorspace_c( int mv_create_engine_config_c( mv_engine_config_h *engine_cfg) { - if (engine_cfg == NULL) - { - LOGE("Impossible to create mv_engine_config_h handle"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (engine_cfg == NULL) { + LOGE("Impossible to create mv_engine_config_h handle"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + LOGD("Creating media vision engine config"); + (*engine_cfg) = ((mv_engine_config_h)new MediaVision::Common::EngineConfig()); + LOGD("Media vision engine config [%p] has been created", *engine_cfg); - LOGD("Creating media vision engine config"); - (*engine_cfg) = ((mv_engine_config_h)new MediaVision::Common::EngineConfig()); - LOGD("Media vision engine config [%p] has been created", *engine_cfg); + if (*engine_cfg == NULL) { + LOGE("Failed to create mv_engine_config_h handle"); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } - if (*engine_cfg == NULL) - { - LOGE("Failed to create mv_engine_config_h handle"); - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } - - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_destroy_engine_config_c( - mv_engine_config_h engine_cfg) + mv_engine_config_h engine_cfg) { - if (!engine_cfg) - { - LOGE("Impossible to destroy NULL mv_engine_config_h handle"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (!engine_cfg) { + LOGE("Impossible to destroy NULL mv_engine_config_h handle"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - LOGD("Destroying media vision engine config [%p]", engine_cfg); - delete ((MediaVision::Common::EngineConfig*)engine_cfg); - LOGD("Media vision engine config has been destroyed"); + LOGD("Destroying media vision engine config [%p]", engine_cfg); + delete ((MediaVision::Common::EngineConfig*)engine_cfg); + LOGD("Media vision engine config has been destroyed"); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_engine_config_set_double_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - double value) + mv_engine_config_h engine_cfg, + const char *name, + double value) { - if (!engine_cfg || name == NULL) - { - LOGE("Impossible to set attribute. One of the required parameters is " - "NULL. engine_cfg = %p; name = %p;", - engine_cfg, name); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute( - std::string(name), value); - - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("Failed to set attribute [%s] with value %f. Error code (0x%08x)", - name, value, ret); - return ret; - } - - LOGD("Attribute [%s] (value %f) has been set", name, value); - return ret; + if (!engine_cfg || name == NULL) { + LOGE("Impossible to set attribute. One of the required parameters is " + "NULL. engine_cfg = %p; name = %p;", + engine_cfg, name); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute( + std::string(name), value); + + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Failed to set attribute [%s] with value %f. Error code (0x%08x)", + name, value, ret); + return ret; + } + + LOGD("Attribute [%s] (value %f) has been set", name, value); + return ret; } int mv_engine_config_set_int_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - int value) + mv_engine_config_h engine_cfg, + const char *name, + int value) { - if (!engine_cfg || name == NULL) - { - LOGE("Impossible to set attribute. One of the required parameters is " - "NULL. engine_cfg = %p; name = %p;", - engine_cfg, name); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute( - std::string(name), value); - - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("Failed to set attribute [%s] with value %i. Error code (0x%08x)", - name, value, ret); - return ret; - } - - LOGD("Attribute [%s] (value %i) has been set", name, value); - - return ret; + if (!engine_cfg || name == NULL) { + LOGE("Impossible to set attribute. One of the required parameters is " + "NULL. engine_cfg = %p; name = %p;", + engine_cfg, name); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute( + std::string(name), value); + + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Failed to set attribute [%s] with value %i. Error code (0x%08x)", + name, value, ret); + return ret; + } + + LOGD("Attribute [%s] (value %i) has been set", name, value); + + return ret; } int mv_engine_config_set_bool_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - bool value) + mv_engine_config_h engine_cfg, + const char *name, + bool value) { - if (!engine_cfg || name == NULL) - { - LOGE("Impossible to set attribute. One of the required parameters is " - "NULL. engine_cfg = %p; name = %p;", - engine_cfg, name); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute( - std::string(name), value); - - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("Failed to set attribute [%s] with value %s. Error code (0x%08x)", - name, value ? "TRUE" : "FALSE", ret); - return ret; - } - - LOGD("Attribute [%s] (value %s) has been set", - name, value ? "TRUE" : "FALSE"); - return ret; + if (!engine_cfg || name == NULL) { + LOGE("Impossible to set attribute. One of the required parameters is " + "NULL. engine_cfg = %p; name = %p;", + engine_cfg, name); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute( + std::string(name), value); + + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Failed to set attribute [%s] with value %s. Error code (0x%08x)", + name, value ? "TRUE" : "FALSE", ret); + return ret; + } + + LOGD("Attribute [%s] (value %s) has been set", + name, value ? "TRUE" : "FALSE"); + return ret; } int mv_engine_config_set_string_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - const char *value) + mv_engine_config_h engine_cfg, + const char *name, + const char *value) { - if (!engine_cfg || name == NULL || value == NULL) - { - LOGE("Impossible to set attribute. One of the required parameters is " - "NULL. engine_cfg = %p; name = %p; value = %p;", - engine_cfg, name, value); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute( - std::string(name), std::string(value)); - - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("Failed to set attribute [%s] with value %s. Error code (0x%08x)", - name, value, ret); - return ret; - } - - LOGD("Attribute [%s] (value %s) has been set", name, value); - return ret; + if (!engine_cfg || name == NULL || value == NULL) { + LOGE("Impossible to set attribute. One of the required parameters is " + "NULL. engine_cfg = %p; name = %p; value = %p;", + engine_cfg, name, value); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute( + std::string(name), std::string(value)); + + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Failed to set attribute [%s] with value %s. Error code (0x%08x)", + name, value, ret); + return ret; + } + + LOGD("Attribute [%s] (value %s) has been set", name, value); + return ret; } int mv_engine_config_get_double_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - double *value) + mv_engine_config_h engine_cfg, + const char *name, + double *value) { - if (!engine_cfg || name == NULL || value == NULL) - { - LOGE("Impossible to get attribute. One of the required parameters is " - "NULL. engine_cfg = %p; name = %p; value = %p;", - engine_cfg, name, value); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getDoubleAttribute( - std::string(name), value); - - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("Failed to get attribute [%s]. Error code (0x%08x)", - name, ret); - return ret; - } - - LOGD("Attribute [%s] (value %f) has been gotten", - name, *value); - return ret; + if (!engine_cfg || name == NULL || value == NULL) { + LOGE("Impossible to get attribute. One of the required parameters is " + "NULL. engine_cfg = %p; name = %p; value = %p;", + engine_cfg, name, value); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getDoubleAttribute( + std::string(name), value); + + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Failed to get attribute [%s]. Error code (0x%08x)", + name, ret); + return ret; + } + + LOGD("Attribute [%s] (value %f) has been gotten", + name, *value); + return ret; } int mv_engine_config_get_int_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - int *value) + mv_engine_config_h engine_cfg, + const char *name, + int *value) { - if (!engine_cfg || name == NULL || value == NULL) - { - LOGE("Impossible to get attribute. One of the required parameters is " - "NULL. engine_cfg = %p; name = %p; value = %p;", - engine_cfg, name, value); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getIntegerAttribute( - std::string(name), value); - - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("Failed to get attribute [%s]. Error code (0x%08x)", - name, ret); - return ret; - } - - LOGD("Attribute [%s] (value %i) has been gotten", - name, *value); - return ret; + if (!engine_cfg || name == NULL || value == NULL) { + LOGE("Impossible to get attribute. One of the required parameters is " + "NULL. engine_cfg = %p; name = %p; value = %p;", + engine_cfg, name, value); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getIntegerAttribute( + std::string(name), value); + + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Failed to get attribute [%s]. Error code (0x%08x)", + name, ret); + return ret; + } + + LOGD("Attribute [%s] (value %i) has been gotten", + name, *value); + return ret; } int mv_engine_config_get_bool_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - bool *value) + mv_engine_config_h engine_cfg, + const char *name, + bool *value) { - if (!engine_cfg || name == NULL || value == NULL) - { - LOGE("Impossible to get attribute. One of the required parameters is " - "NULL. engine_cfg = %p; name = %p; value = %p;", - engine_cfg, name, value); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getBooleanAttribute( - std::string(name), value); - - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("Failed to get attribute [%s]. Error code (0x%08x)", - name, ret); - return ret; - } - - LOGD("Attribute [%s] (value %s) has been gotten", - name, *value ? "TRUE" : "FALSE"); - return ret; + if (!engine_cfg || name == NULL || value == NULL) { + LOGE("Impossible to get attribute. One of the required parameters is " + "NULL. engine_cfg = %p; name = %p; value = %p;", + engine_cfg, name, value); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getBooleanAttribute( + std::string(name), value); + + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Failed to get attribute [%s]. Error code (0x%08x)", + name, ret); + return ret; + } + + LOGD("Attribute [%s] (value %s) has been gotten", + name, *value ? "TRUE" : "FALSE"); + return ret; } int mv_engine_config_get_string_attribute_c( - mv_engine_config_h engine_cfg, - const char *name, - char **value) + mv_engine_config_h engine_cfg, + const char *name, + char **value) { - if (!engine_cfg || name == NULL || value == NULL) - { - LOGE("Impossible to get attribute. One of the required parameters is " - "NULL. engine_cfg = %p; name = %p; value = %p;", - engine_cfg, name, value); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - std::string attributeValue; - int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getStringAttribute( - std::string(name), &attributeValue); - - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("Failed to get attribute [%s]. Error code (0x%08x)", - name, ret); - return ret; - } - - LOGD("Convert string to char*"); - int stringSize = attributeValue.size(); - (*value) = new char[stringSize + 1]; - - if (attributeValue.copy(*value, stringSize) != attributeValue.size()) - { - LOGE("Conversion from string to char* failed"); - delete[] (*value); - (*value) = NULL; - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - (*value)[stringSize] = '\0'; - - LOGD("Attribute [%s] (value %s) has been gotten", - name, *value); - return ret; + if (!engine_cfg || name == NULL || value == NULL) { + LOGE("Impossible to get attribute. One of the required parameters is " + "NULL. engine_cfg = %p; name = %p; value = %p;", + engine_cfg, name, value); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + std::string attributeValue; + int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getStringAttribute( + std::string(name), &attributeValue); + + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Failed to get attribute [%s]. Error code (0x%08x)", + name, ret); + return ret; + } + + LOGD("Convert string to char*"); + int stringSize = attributeValue.size(); + (*value) = new char[stringSize + 1]; + + if (attributeValue.copy(*value, stringSize) != attributeValue.size()) { + LOGE("Conversion from string to char* failed"); + delete[] (*value); + (*value) = NULL; + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + (*value)[stringSize] = '\0'; + + LOGD("Attribute [%s] (value %s) has been gotten", + name, *value); + return ret; } int mv_engine_config_foreach_supported_attribute_c( mv_supported_attribute_cb callback, void *user_data) { - if (NULL == callback) - { - LOGE("Impossible to traverse supported by Media Vision engine " - "configuration attributes. Callback is NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - using namespace MediaVision::Common; - - int err = EngineConfig::cacheDictionaries(); - - if (MEDIA_VISION_ERROR_NONE != err) - { - LOGE("Failed to get attribute names/types. " - "Failed to cache attributes from file"); - return err; - } - - DictDblConstIter dblDictIter = EngineConfig::getDefaultDblDict().begin(); - DictIntConstIter intDictIter = EngineConfig::getDefaultIntDict().begin(); - DictBoolConstIter boolDictIter = EngineConfig::getDefaultBoolDict().begin(); - DictStrConstIter strDictIter = EngineConfig::getDefaultStrDict().begin(); - - while (dblDictIter != EngineConfig::getDefaultDblDict().end()) - { - if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE, - dblDictIter->first.c_str(), user_data)) - { - LOGD("Attribute names/types traverse has been stopped by the user"); - return MEDIA_VISION_ERROR_NONE; - } - ++dblDictIter; - } - - while (intDictIter != EngineConfig::getDefaultIntDict().end()) - { - if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER, - intDictIter->first.c_str(), user_data)) - { - LOGD("Attribute names/types traverse has been stopped by the user"); - return MEDIA_VISION_ERROR_NONE; - } - ++intDictIter; - } - - while (boolDictIter != EngineConfig::getDefaultBoolDict().end()) - { - if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN, - boolDictIter->first.c_str(), user_data)) - { - LOGD("Attribute names/types traverse has been stopped by the user"); - return MEDIA_VISION_ERROR_NONE; - } - ++boolDictIter; - } - - while (strDictIter != EngineConfig::getDefaultStrDict().end()) - { - if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_STRING, - strDictIter->first.c_str(), user_data)) - { - LOGD("Attribute names/types traverse has been stopped by the user"); - return MEDIA_VISION_ERROR_NONE; - } - ++strDictIter; - } - - LOGD("Attribute names/types has been gotten"); - return MEDIA_VISION_ERROR_NONE; + if (NULL == callback) { + LOGE("Impossible to traverse supported by Media Vision engine " + "configuration attributes. Callback is NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + using namespace MediaVision::Common; + + int err = EngineConfig::cacheDictionaries(); + + if (MEDIA_VISION_ERROR_NONE != err) { + LOGE("Failed to get attribute names/types. " + "Failed to cache attributes from file"); + return err; + } + + DictDblConstIter dblDictIter = EngineConfig::getDefaultDblDict().begin(); + DictIntConstIter intDictIter = EngineConfig::getDefaultIntDict().begin(); + DictBoolConstIter boolDictIter = EngineConfig::getDefaultBoolDict().begin(); + DictStrConstIter strDictIter = EngineConfig::getDefaultStrDict().begin(); + + while (dblDictIter != EngineConfig::getDefaultDblDict().end()) { + if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE, + dblDictIter->first.c_str(), user_data)) { + LOGD("Attribute names/types traverse has been stopped by the user"); + return MEDIA_VISION_ERROR_NONE; + } + ++dblDictIter; + } + + while (intDictIter != EngineConfig::getDefaultIntDict().end()) { + if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER, + intDictIter->first.c_str(), user_data)) { + LOGD("Attribute names/types traverse has been stopped by the user"); + return MEDIA_VISION_ERROR_NONE; + } + ++intDictIter; + } + + while (boolDictIter != EngineConfig::getDefaultBoolDict().end()) { + if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN, + boolDictIter->first.c_str(), user_data)) { + LOGD("Attribute names/types traverse has been stopped by the user"); + return MEDIA_VISION_ERROR_NONE; + } + ++boolDictIter; + } + + while (strDictIter != EngineConfig::getDefaultStrDict().end()) { + if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_STRING, + strDictIter->first.c_str(), user_data)) { + LOGD("Attribute names/types traverse has been stopped by the user"); + return MEDIA_VISION_ERROR_NONE; + } + ++strDictIter; + } + + LOGD("Attribute names/types has been gotten"); + return MEDIA_VISION_ERROR_NONE; } diff --git a/mv_face/face/include/FaceDetector.h b/mv_face/face/include/FaceDetector.h index f014a54..b9b2888 100644 --- a/mv_face/face/include/FaceDetector.h +++ b/mv_face/face/include/FaceDetector.h @@ -27,11 +27,8 @@ * detection functionality. */ -namespace MediaVision -{ -namespace Face -{ - +namespace MediaVision { +namespace Face { /** * @class FaceDetector * @brief The Face Detector container. @@ -39,71 +36,68 @@ namespace Face * * @since_tizen 3.0 */ -class FaceDetector -{ +class FaceDetector { public: + /** + * @brief Creates a FaceDetector. + * + * @since_tizen 3.0 + */ + FaceDetector(); - /** - * @brief Creates a FaceDetector. - * - * @since_tizen 3.0 - */ - FaceDetector(); - - /** - * @brief Destroys the FaceDetector and releases all its resources. - * - * @since_tizen 3.0 - */ - virtual ~FaceDetector(); + /** + * @brief Destroys the FaceDetector and releases all its resources. + * + * @since_tizen 3.0 + */ + virtual ~FaceDetector(); - /** - * @brief Performs face detection functionality. - * @details Use this function to launch face detection algorithm which - * used the haarcascade set by setHaarcascadeFilepath(). - * - * @since_tizen 3.0 - * @param [in] image The image where faces will be detected - * @param [in] roi Region of image where faces will be detected - * @param [in] minSize Minimum size of faces which will be detected - * @param [out] faceLocations The result locations of detected faces. - * @return true if detect process is completely finished. Otherwise return false. - * - * @pre Set a face haarcascade by calling setHaarcascadeFilepath() - * - * @see setHaarcascadeFilepath() - */ - bool detectFaces( - const cv::Mat& image, - const cv::Rect& roi, - const cv::Size& minSize, - std::vector& faceLocations); + /** + * @brief Performs face detection functionality. + * @details Use this function to launch face detection algorithm which + * used the haarcascade set by setHaarcascadeFilepath(). + * + * @since_tizen 3.0 + * @param [in] image The image where faces will be detected + * @param [in] roi Region of image where faces will be detected + * @param [in] minSize Minimum size of faces which will be detected + * @param [out] faceLocations The result locations of detected faces. + * @return true if detect process is completely finished. Otherwise return false. + * + * @pre Set a face haarcascade by calling setHaarcascadeFilepath() + * + * @see setHaarcascadeFilepath() + */ + bool detectFaces( + const cv::Mat& image, + const cv::Rect& roi, + const cv::Size& minSize, + std::vector& faceLocations); - /** - * @brief Loads haar cascade classifier for detection process. - * @details This method is mandatory for normally detecting process. - * - * @since_tizen 3.0 - * @param [in] haarcascadeFilepath The path to the file, which contains haar - * cascade classifier information for - * detection process. - * @return true if cascade is loaded from file and ready for detecting - * process. Otherwise is false. - */ - bool loadHaarcascade(const std::string& haarcascadeFilepath); + /** + * @brief Loads haar cascade classifier for detection process. + * @details This method is mandatory for normally detecting process. + * + * @since_tizen 3.0 + * @param [in] haarcascadeFilepath The path to the file, which contains haar + * cascade classifier information for + * detection process. + * @return true if cascade is loaded from file and ready for detecting + * process. Otherwise is false. + */ + bool loadHaarcascade(const std::string& haarcascadeFilepath); private: + cv::CascadeClassifier m_faceCascade; /**< Cascade classifier of the face + detecting process. */ - cv::CascadeClassifier m_faceCascade; /**< Cascade classifier of the face - detecting process. */ - - std::string m_haarcascadeFilepath; /**< Path to the file, which contains - cascade classifier information. */ + std::string m_haarcascadeFilepath; /**< Path to the file, which contains + cascade classifier information. */ - bool m_faceCascadeIsLoaded; /**< Flag to determine the state of the - m_faceCascade class. true if cascade is loaded - from file and is ready to detecting process. - Otherwise is false. */ + bool m_faceCascadeIsLoaded; /**< Flag to determine the state of the + m_faceCascade class. true if cascade is loaded + from file and is ready to detecting process. + Otherwise is false. */ }; } /* Face */ diff --git a/mv_face/face/include/FaceExpressionRecognizer.h b/mv_face/face/include/FaceExpressionRecognizer.h index fb445a4..284e7d9 100644 --- a/mv_face/face/include/FaceExpressionRecognizer.h +++ b/mv_face/face/include/FaceExpressionRecognizer.h @@ -22,8 +22,7 @@ #include -namespace cv -{ +namespace cv { class Mat; } @@ -33,18 +32,14 @@ namespace cv * the facial expressions recognition functionality. */ -namespace MediaVision -{ -namespace Face -{ - +namespace MediaVision { +namespace Face { /** * @brief Face expression recognition configuration. * * @since_tizen 3.0 */ -struct FaceRecognizerConfig -{ +struct FaceRecognizerConfig { FaceRecognizerConfig(); std::string mHaarcascadeFilepath; }; @@ -56,27 +51,26 @@ struct FaceRecognizerConfig * * @since_tizen 3.0 */ -class FaceExpressionRecognizer -{ +class FaceExpressionRecognizer { public: - /** - * @brief Recognizes facial expression on the image with known face location. - * - * @since_tizen 3.0 - * @param [in] grayImage The grayscale image with face - * @param [in] faceLocation The location of the face on the @a image - * @param [out] faceExpression Expression recognized for the face at - * @a faceLocation - * @param [in] config The configuration will be used for - * facial expression recognition - * - * @see MediaVision::Face::FaceRecognizerConfig - */ - static int recognizeFaceExpression( - const cv::Mat& grayImage, - const mv_rectangle_s& faceLocation, - mv_face_facial_expression_e *faceExpression, - const FaceRecognizerConfig& config = FaceRecognizerConfig()); + /** + * @brief Recognizes facial expression on the image with known face location. + * + * @since_tizen 3.0 + * @param [in] grayImage The grayscale image with face + * @param [in] faceLocation The location of the face on the @a image + * @param [out] faceExpression Expression recognized for the face at + * @a faceLocation + * @param [in] config The configuration will be used for + * facial expression recognition + * + * @see MediaVision::Face::FaceRecognizerConfig + */ + static int recognizeFaceExpression( + const cv::Mat& grayImage, + const mv_rectangle_s& faceLocation, + mv_face_facial_expression_e *faceExpression, + const FaceRecognizerConfig& config = FaceRecognizerConfig()); }; } /* Face */ diff --git a/mv_face/face/include/FaceEyeCondition.h b/mv_face/face/include/FaceEyeCondition.h index 56e1038..78c0992 100644 --- a/mv_face/face/include/FaceEyeCondition.h +++ b/mv_face/face/include/FaceEyeCondition.h @@ -28,11 +28,8 @@ * eye condition recognition functionality. */ -namespace MediaVision -{ -namespace Face -{ - +namespace MediaVision { +namespace Face { /** * @class FaceEyeCondition * @brief The FaceEyeCondition implements the face @@ -40,34 +37,31 @@ namespace Face * * @since_tizen 3.0 */ -class FaceEyeCondition -{ +class FaceEyeCondition { public: - - /** - * @brief Recognizes eye condition on the image with face location. - * - * @since_tizen 3.0 - * @param [in] grayImage The image in gray scale with face where - * eye condition will be recognized - * @param [in] faceLocation The rectangle with face location - * @param [out] eyeCondition The eye condition which was recognized - * @return @c 0 on success, otherwise a negative error value - */ - static int recognizeEyeCondition( - const cv::Mat& grayImage, - mv_rectangle_s faceLocation, - mv_face_eye_condition_e *eyeCondition); + /** + * @brief Recognizes eye condition on the image with face location. + * + * @since_tizen 3.0 + * @param [in] grayImage The image in gray scale with face where + * eye condition will be recognized + * @param [in] faceLocation The rectangle with face location + * @param [out] eyeCondition The eye condition which was recognized + * @return @c 0 on success, otherwise a negative error value + */ + static int recognizeEyeCondition( + const cv::Mat& grayImage, + mv_rectangle_s faceLocation, + mv_face_eye_condition_e *eyeCondition); private: + static void splitEyes( + /*[in]*/ const cv::Mat& grayImage, + /*[in]*/ mv_rectangle_s faceLocation, + /*[out]*/ cv::Mat& leftEye, + /*[out]*/ cv::Mat& rightEye); - static void splitEyes( - /*[in]*/ const cv::Mat& grayImage, - /*[in]*/ mv_rectangle_s faceLocation, - /*[out]*/ cv::Mat& leftEye, - /*[out]*/ cv::Mat& rightEye); - - static int isEyeOpen(/*[in]*/const cv::Mat& eye); + static int isEyeOpen(/*[in]*/const cv::Mat& eye); }; } /* Face */ diff --git a/mv_face/face/include/FaceRecognitionModel.h b/mv_face/face/include/FaceRecognitionModel.h index b4888f2..15232e1 100644 --- a/mv_face/face/include/FaceRecognitionModel.h +++ b/mv_face/face/include/FaceRecognitionModel.h @@ -31,54 +31,50 @@ * provides face recognition model interface. */ -namespace MediaVision -{ -namespace Face -{ - +namespace MediaVision { +namespace Face { /** * @brief Structure containing supported recognition algorithms settings. * * @since_tizen 3.0 */ -struct FaceRecognitionModelConfig -{ - /** - * @brief Default constructor for the @ref FaceRecognitionModelConfig - * - * @since_tizen 3.0 - */ - FaceRecognitionModelConfig(); +struct FaceRecognitionModelConfig { + /** + * @brief Default constructor for the @ref FaceRecognitionModelConfig + * + * @since_tizen 3.0 + */ + FaceRecognitionModelConfig(); - bool operator!=( - const FaceRecognitionModelConfig& other) const; + bool operator!=( + const FaceRecognitionModelConfig& other) const; - FaceRecognitionModelType mModelType; /**< - Type of the recognition algorithm */ + FaceRecognitionModelType mModelType; /**< + Type of the recognition algorithm */ - int mNumComponents; /**< How many principal components will be included - to the Eigenvectors */ + int mNumComponents; /**< How many principal components will be included + to the Eigenvectors */ - double mThreshold; /**< Minimal distance between principal components of - the model allowed */ + double mThreshold; /**< Minimal distance between principal components of + the model allowed */ - int mRadius; /**< Radius of the local features for LBHP algorithm */ + int mRadius; /**< Radius of the local features for LBHP algorithm */ - int mNeighbors; /**< How many neighboring pixels has to be analyzed - when LBHP learning applied. Usually set as - 8*radius */ + int mNeighbors; /**< How many neighboring pixels has to be analyzed + when LBHP learning applied. Usually set as + 8*radius */ - int mGridX; /**< X size of the spatial histogram (LBPH) */ + int mGridX; /**< X size of the spatial histogram (LBPH) */ - int mGridY; /**< Y size of the spatial histogram (LBPH) */ + int mGridY; /**< Y size of the spatial histogram (LBPH) */ - int mImgWidth; /**< Width of the image to resize the samples for - algorithms working on the samples of the same - size (Eigenfaces, Fisherfaces) */ + int mImgWidth; /**< Width of the image to resize the samples for + Eigenfaces and Fisherfaces algorithms working + on the samples of the same size */ - int mImgHeight; /**< Height of the image to resize the samples for - algorithms working on the samples of the same - size (Eigenfaces, Fisherfaces) */ + int mImgHeight; /**< Height of the image to resize the samples for + Eigenfaces and Fisherfaces algorithms working + on the samples of the same size */ }; /** @@ -88,21 +84,20 @@ struct FaceRecognitionModelConfig * * @since_tizen 3.0 */ -struct FaceRecognitionResults -{ - /** - * @brief Default constructor for the @ref FaceRecognitionResults - * - * @since_tizen 3.0 - */ - FaceRecognitionResults(); - - bool mIsRecognized; /**< The flag indication success of the - recognition */ - cv::Rect_ mFaceLocation; /**< Location of the face where face has - been recognized */ - int mFaceLabel; /**< Unique label of the face */ - double mConfidence; /**< Recognition confidence level */ +struct FaceRecognitionResults { + /** + * @brief Default constructor for the @ref FaceRecognitionResults + * + * @since_tizen 3.0 + */ + FaceRecognitionResults(); + + bool mIsRecognized; /**< The flag indication success of the + recognition */ + cv::Rect_ mFaceLocation; /**< Location of the face where face has + been recognized */ + int mFaceLabel; /**< Unique label of the face */ + double mConfidence; /**< Recognition confidence level */ }; /** @@ -111,177 +106,173 @@ struct FaceRecognitionResults * * @since_tizen 3.0 */ -class FaceRecognitionModel -{ +class FaceRecognitionModel { public: - - /** - * @brief Creates a FaceRecognitionModel class instance. - * - * @since_tizen 3.0 - */ - FaceRecognitionModel(); - - /** - * @brief Creates a FaceRecognitionModel class instance based on existed - * instance. - * - * @since_tizen 3.0 - * @param [in] origin The FaceRecognitionModel object that will be used - * for creation of new one - */ - FaceRecognitionModel(const FaceRecognitionModel& origin); - - /** - * @brief @ref FaceRecognitionModel copy assignment operator. - * @details Fills the information based on the @a copy - * - * @since_tizen 3.0 - * @param [in] copy @ref FaceRecognitionModel object which will be - * copied - */ - FaceRecognitionModel& operator=(const FaceRecognitionModel& copy); - - /** - * @brief Destroys the FaceRecognitionModel class instance including all - * its resources. - * - * @since_tizen 3.0 - */ - ~FaceRecognitionModel(); - - /** - * @brief Serializes FaceRecognitionModel object to the file. - * - * @since_tizen 3.0 - * @param [in] fileName The name of the file to which serialized - * FaceRecognitionModel object will be saved - * @return @c 0 on success, otherwise a negative error value - * - * @see FaceRecognitionModel::load() - */ - int save(const std::string& fileName); - - /** - * @brief Deserializes FaceRecognitionModel object from the file. - * - * @since_tizen 3.0 - * @param [in] fileName The name to the file from which serialized - * FaceRecognitionModel object will be deserialized - * @return @c 0 on success, otherwise a negative error value - * - * @see FaceRecognitionModel::save() - */ - int load(const std::string& fileName); - - /** - * @brief Adds face image example for face labeled by @a faceLabel - * - * @since_tizen 3.0 - * @param [in] faceImage Face image to be added to the training set - * @param [in] faceLabel Label that defines class of the face - * @return @c 0 on success, otherwise a negative error value - * - * @see FaceRecognitionModel::resetFaceExamples() - */ - int addFaceExample(const cv::Mat& faceImage, int faceLabel); - - /** - * @brief Clears the internal set of face image examples. - * - * @since_tizen 3.0 - * @remarks Internal set of face image examples contains all samples - * collected with @ref FaceRecognitionModel::addPositiveExample() - * method. - * @return @c 0 on success, otherwise a negative error value - * - * @see FaceRecognitionModel::addFaceExample() - */ - int resetFaceExamples(void); - - /** - * @brief Clears the internal set of face image examples labeled with - * @a faceLabel. - * - * @since_tizen 3.0 - * @remarks Internal set of face image examples contains all samples - * collected with @ref FaceRecognitionModel::addPositiveExample() - * method. - * @param faceLabel Unique for the model face label - * @return @c 0 on success, otherwise a negative error value - * - * @see FaceRecognitionModel::addFaceExample() - */ - int resetFaceExamples(int faceLabel); - - /** - * @brief Getter for the face labels learned by the model. - * - * @since_tizen 3.0 - * @remarks Returning vector will contain only labels had been learned by - * FaceRecognitionModel::learn() method. - * @return Vector of the face labels known by the model - * - * @see FaceRecognitionModel::addFaceExample() - * @see FaceRecognitionModel::learn() - */ - const std::set& getFaceLabels(void) const; - - /** - * @brief Learns recognition model based on the set of collected face image - * examples. - * - * @since_tizen 3.0 - * @param [in] config Configuration of the algorithm to be used for - * learning the model - * @return @c 0 on success, otherwise a negative error value - * - * @see FaceRecognitionModel::addFaceExample() - */ - int learn(const FaceRecognitionModelConfig& config = FaceRecognitionModelConfig()); - - /** - * @brief Recognizes faces in the image and outputs recognition results to - * the @a results structure. - * - * @since_tizen 3.0 - * @param [in] config Configuration of the algorithm to be used for - * face recognition - * @param [out] results Structure that will contain recognition results - * @return @c 0 on success, otherwise a negative error value - * - * @see FaceRecognitionModel::learn() - */ - int recognize(const cv::Mat& image, FaceRecognitionResults& results); +/** + * @brief Creates a FaceRecognitionModel class instance. + * + * @since_tizen 3.0 + */ + FaceRecognitionModel(); + + /** + * @brief Creates a FaceRecognitionModel class instance based on existed + * instance. + * + * @since_tizen 3.0 + * @param [in] origin The FaceRecognitionModel object that will be used + * for creation of new one + */ + FaceRecognitionModel(const FaceRecognitionModel& origin); + + /** + * @brief @ref FaceRecognitionModel copy assignment operator. + * @details Fills the information based on the @a copy + * + * @since_tizen 3.0 + * @param [in] copy @ref FaceRecognitionModel object which will be + * copied + */ + FaceRecognitionModel& operator=(const FaceRecognitionModel& copy); + + /** + * @brief Destroys the FaceRecognitionModel class instance including all + * its resources. + * + * @since_tizen 3.0 + */ + ~FaceRecognitionModel(); + + /** + * @brief Serializes FaceRecognitionModel object to the file. + * + * @since_tizen 3.0 + * @param [in] fileName The name of the file to which serialized + * FaceRecognitionModel object will be saved + * @return @c 0 on success, otherwise a negative error value + * + * @see FaceRecognitionModel::load() + */ + int save(const std::string& fileName); + + /** + * @brief Deserializes FaceRecognitionModel object from the file. + * + * @since_tizen 3.0 + * @param [in] fileName The name to the file from which serialized + * FaceRecognitionModel object will be deserialized + * @return @c 0 on success, otherwise a negative error value + * + * @see FaceRecognitionModel::save() + */ + int load(const std::string& fileName); + + /** + * @brief Adds face image example for face labeled by @a faceLabel + * + * @since_tizen 3.0 + * @param [in] faceImage Face image to be added to the training set + * @param [in] faceLabel Label that defines class of the face + * @return @c 0 on success, otherwise a negative error value + * + * @see FaceRecognitionModel::resetFaceExamples() + */ + int addFaceExample(const cv::Mat& faceImage, int faceLabel); + + /** + * @brief Clears the internal set of face image examples. + * + * @since_tizen 3.0 + * @remarks Internal set of face image examples contains all samples + * collected with @ref FaceRecognitionModel::addPositiveExample() + * method. + * @return @c 0 on success, otherwise a negative error value + * + * @see FaceRecognitionModel::addFaceExample() + */ + int resetFaceExamples(void); + + /** + * @brief Clears the internal set of face image examples labeled with + * @a faceLabel. + * + * @since_tizen 3.0 + * @remarks Internal set of face image examples contains all samples + * collected with @ref FaceRecognitionModel::addPositiveExample() + * method. + * @param faceLabel Unique for the model face label + * @return @c 0 on success, otherwise a negative error value + * + * @see FaceRecognitionModel::addFaceExample() + */ + int resetFaceExamples(int faceLabel); + + /** + * @brief Getter for the face labels learned by the model. + * + * @since_tizen 3.0 + * @remarks Returning vector will contain only labels had been learned by + * FaceRecognitionModel::learn() method. + * @return Vector of the face labels known by the model + * + * @see FaceRecognitionModel::addFaceExample() + * @see FaceRecognitionModel::learn() + */ + const std::set& getFaceLabels(void) const; + + /** + * @brief Learns recognition model based on the set of collected face image + * examples. + * + * @since_tizen 3.0 + * @param [in] config Configuration of the algorithm to be used for + * learning the model + * @return @c 0 on success, otherwise a negative error value + * + * @see FaceRecognitionModel::addFaceExample() + */ + int learn(const FaceRecognitionModelConfig& config = FaceRecognitionModelConfig()); + + /** + * @brief Recognizes faces in the image and outputs recognition results to + * the @a results structure. + * + * @since_tizen 3.0 + * @param [in] config Configuration of the algorithm to be used for + * face recognition + * @param [out] results Structure that will contain recognition results + * @return @c 0 on success, otherwise a negative error value + * + * @see FaceRecognitionModel::learn() + */ + int recognize(const cv::Mat& image, FaceRecognitionResults& results); private: - - /** - * Factory method for creating of the recognition algorithm based on input - * configuration: - */ - static cv::Ptr CreateRecognitionAlgorithm( - const FaceRecognitionModelConfig& config = - FaceRecognitionModelConfig()); + /** + * Factory method for creating of the recognition algorithm based on input + * configuration: + */ + static cv::Ptr CreateRecognitionAlgorithm( + const FaceRecognitionModelConfig& config = + FaceRecognitionModelConfig()); private: + bool m_canRecognize; /**< The flag showing possibility to recognize with + the face recognition model */ - bool m_canRecognize; /**< The flag showing possibility to recognize with - the face recognition model */ - - std::map > m_faceSamples; /**< Samples of the - images which - will be used for - the learning */ + std::map > m_faceSamples; /**< Samples of the + images which + will be used for + the learning */ - FaceRecognitionModelConfig m_learnAlgorithmConfig; /**< Configuration of the - learning method */ + FaceRecognitionModelConfig m_learnAlgorithmConfig; /**< Configuration of the + learning method */ - cv::Ptr m_recognizer; /**< Recognizer associated with - the current model */ + cv::Ptr m_recognizer; /**< Recognizer associated with + the current model */ - std::set m_learnedLabels; /**< Vector of the labels had been learned - by the model */ + std::set m_learnedLabels; /**< Vector of the labels had been learned + by the model */ }; } /* Face */ diff --git a/mv_face/face/include/FaceTrackingModel.h b/mv_face/face/include/FaceTrackingModel.h index daa56c7..1fb6ccf 100644 --- a/mv_face/face/include/FaceTrackingModel.h +++ b/mv_face/face/include/FaceTrackingModel.h @@ -25,33 +25,29 @@ * provides face tracking model interface. */ -namespace MediaVision -{ -namespace Face -{ - +namespace MediaVision { +namespace Face { /** * @brief Structure where results of * @ref MediaVision::Face::FaceTrackingModel::track() call are stored. * * @since_tizen 3.0 */ -struct FaceTrackingResults -{ - /** - * @brief Default constructor for the @ref FaceTrackingResults - * - * @since_tizen 3.0 - */ - FaceTrackingResults(); - - bool mIsTracked; /**< The flag indication success of the - tracking */ - cv::Rect_ mFaceLocation; /**< Location of the face at the current - track iteration where face position - is predicted */ - float mConfidence; /**< Tracking confidence level - (0.0 .. 1.0) */ +struct FaceTrackingResults { + /** + * @brief Default constructor for the @ref FaceTrackingResults + * + * @since_tizen 3.0 + */ + FaceTrackingResults(); + + bool mIsTracked; /**< The flag indication success of the + tracking */ + cv::Rect_ mFaceLocation; /**< Location of the face at the current + track iteration where face position + is predicted */ + float mConfidence; /**< Tracking confidence level + (0.0 .. 1.0) */ }; /** @@ -60,113 +56,110 @@ struct FaceTrackingResults * * @since_tizen 3.0 */ -class FaceTrackingModel -{ +class FaceTrackingModel { public: - /** - * @brief Creates a FaceTrackingModel class instance. - * - * @since_tizen 3.0 - */ - FaceTrackingModel(); - - /** - * @brief Creates a FaceTrackingModel class instance based on existed - * instance. - * - * @since_tizen 3.0 - * @param [in] origin The FaceTrackingModel object that will be used - * for creation of new one - */ - FaceTrackingModel(const FaceTrackingModel& origin); - - /** - * @brief @ref FaceTrackingModel copy assignment operator. - * @details Fills the information based on the @a copy - * - * @since_tizen 3.0 - * @param [in] copy @ref FaceTrackingModel object which will be - * copied - */ - FaceTrackingModel& operator=(const FaceTrackingModel& copy); - - /** - * @brief Destroys the FaceTrackingModel class instance including all - * its resources. - * - * @since_tizen 3.0 - */ - ~FaceTrackingModel(); - - /** - * @brief Serializes FaceTrackingModel object to the file. - * - * @since_tizen 3.0 - * @param [in] fileName The name to the file to which serialized - * FaceTrackingModel object will be saved - * @return @c 0 on success, otherwise a negative error value - * - * @see FaceTrackingModel::load() - */ - int save(const std::string& fileName); - - /** - * @brief Deserializes FaceTrackingModel object from the file. - * - * @since_tizen 3.0 - * @param [in] fileName The name of the file from which serialized - * FaceTrackingModel object will be deserialized - * @return @c 0 on success, otherwise a negative error value - * - * @see FaceTrackingModel::save() - */ - int load(const std::string& fileName); - - /** - * @brief Prepares FaceTrackingModel object to the next tracking session. - * - * @since_tizen 3.0 - * @param [in] image First frame of the video or image sequence for - * which tracking will be started - * @return @c 0 on success, otherwise a negative error value - * - * @see FaceTrackingModel::save() - */ - int prepare(const cv::Mat& image); - - /** - * @brief Prepares FaceTrackingModel object to the next tracking session. - * - * @since_tizen 3.0 - * @param [in] image First frame of the video or image sequence for - * which tracking will be started - * @param [in] boundingBox Rectangular location of the face on the @a - * image - * @return @c 0 on success, otherwise a negative error value - * - * @see FaceTrackingModel::save() - */ - int prepare(const cv::Mat& image, const cv::Rect_& boundingBox); - - /** - * @brief Performs one tracking iteration for the video frame or image - * from the continuous sequence of images. - * - * @since_tizen 3.0 - * @param [in] image - * @param [out] boundingBox - */ - int track(const cv::Mat& image, FaceTrackingResults& results); + /** + * @brief Creates a FaceTrackingModel class instance. + * + * @since_tizen 3.0 + */ + FaceTrackingModel(); + + /** + * @brief Creates a FaceTrackingModel class instance based on existed + * instance. + * + * @since_tizen 3.0 + * @param [in] origin The FaceTrackingModel object that will be used + * for creation of new one + */ + FaceTrackingModel(const FaceTrackingModel& origin); + + /** + * @brief @ref FaceTrackingModel copy assignment operator. + * @details Fills the information based on the @a copy + * + * @since_tizen 3.0 + * @param [in] copy @ref FaceTrackingModel object which will be + * copied + */ + FaceTrackingModel& operator=(const FaceTrackingModel& copy); + + /** + * @brief Destroys the FaceTrackingModel class instance including all + * its resources. + * + * @since_tizen 3.0 + */ + ~FaceTrackingModel(); + + /** + * @brief Serializes FaceTrackingModel object to the file. + * + * @since_tizen 3.0 + * @param [in] fileName The name to the file to which serialized + * FaceTrackingModel object will be saved + * @return @c 0 on success, otherwise a negative error value + * + * @see FaceTrackingModel::load() + */ + int save(const std::string& fileName); + + /** + * @brief Deserializes FaceTrackingModel object from the file. + * + * @since_tizen 3.0 + * @param [in] fileName The name of the file from which serialized + * FaceTrackingModel object will be deserialized + * @return @c 0 on success, otherwise a negative error value + * + * @see FaceTrackingModel::save() + */ + int load(const std::string& fileName); + + /** + * @brief Prepares FaceTrackingModel object to the next tracking session. + * + * @since_tizen 3.0 + * @param [in] image First frame of the video or image sequence for + * which tracking will be started + * @return @c 0 on success, otherwise a negative error value + * + * @see FaceTrackingModel::save() + */ + int prepare(const cv::Mat& image); + + /** + * @brief Prepares FaceTrackingModel object to the next tracking session. + * + * @since_tizen 3.0 + * @param [in] image First frame of the video or image sequence for + * which tracking will be started + * @param [in] boundingBox Rectangular location of the face on the @a + * image + * @return @c 0 on success, otherwise a negative error value + * + * @see FaceTrackingModel::save() + */ + int prepare(const cv::Mat& image, const cv::Rect_& boundingBox); + + /** + * @brief Performs one tracking iteration for the video frame or image + * from the continuous sequence of images. + * + * @since_tizen 3.0 + * @param [in] image + * @param [out] boundingBox + */ + int track(const cv::Mat& image, FaceTrackingResults& results); private: + bool m_canTrack; /**< The flag showing possibility + of the tracking model to + perform track */ - bool m_canTrack; /**< The flag showing possibility - of the tracking model to - perform track */ - - cv::Ptr m_tracker; /**< Underlying OpenCV tracking - model */ - + cv::Ptr m_tracker; /**< Underlying OpenCV tracking + model */ }; } /* Face */ diff --git a/mv_face/face/include/FaceUtil.h b/mv_face/face/include/FaceUtil.h index d79757d..a6e1913 100644 --- a/mv_face/face/include/FaceUtil.h +++ b/mv_face/face/include/FaceUtil.h @@ -25,23 +25,19 @@ * @file FaceUtil.h * @brief This file contains the useful functionality for Face module. */ -namespace MediaVision -{ -namespace Face -{ - +namespace MediaVision { +namespace Face { /** * @brief Enumeration of supported learning algorithms. * * @since_tizen 3.0 */ -enum FaceRecognitionModelType -{ - MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN = 0, /**< Unknown algorithm type */ - MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES = 1, /**< Eigenfaces algorithm */ - MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES = 2, /**< Fisherfaces algorithm */ - MEDIA_VISION_FACE_MODEL_TYPE_LBPH = 3 /**< Local Binary Patterns - Histograms algorithm */ +enum FaceRecognitionModelType { + MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN = 0, /**< Unknown algorithm type */ + MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES = 1, /**< Eigenfaces algorithm */ + MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES = 2, /**< Fisherfaces algorithm */ + MEDIA_VISION_FACE_MODEL_TYPE_LBPH = 3 /**< Local Binary Patterns + Histograms algorithm */ }; /** @@ -49,14 +45,13 @@ enum FaceRecognitionModelType * * @since_tizen 3.0 */ -struct RecognitionParams -{ - RecognitionParams(FaceRecognitionModelType algType); +struct RecognitionParams { + RecognitionParams(FaceRecognitionModelType algType); - RecognitionParams(); + RecognitionParams(); - FaceRecognitionModelType mRecognitionAlgType; - /**< The type of the learning algorithm. */ + FaceRecognitionModelType mRecognitionAlgType; /**< The type of + the learning algorithm */ }; /** diff --git a/mv_face/face/include/TrackerMedianFlow.h b/mv_face/face/include/TrackerMedianFlow.h index 7112a14..44b46c7 100644 --- a/mv_face/face/include/TrackerMedianFlow.h +++ b/mv_face/face/include/TrackerMedianFlow.h @@ -59,95 +59,91 @@ by authors to outperform MIL). During the implementation period the code at , the courtesy of the author Arthur Amarra, was used for the reference purpose. */ -class TrackerMedianFlow : public virtual Algorithm -{ +class TrackerMedianFlow : public virtual Algorithm { public: + struct Params { + /** + * @brief TrackerMedianFlow algorithm parameters constructor + */ + Params(); + void read(const FileNode& fn); + void write(FileStorage& fs) const; - struct Params - { - /** - * @brief TrackerMedianFlow algorithm parameters constructor - */ - Params(); - void read(const FileNode& fn); - void write(FileStorage& fs) const; - - int mPointsInGrid; /**< Square root of number of keypoints used. - Increase it to trade accurateness for speed. - Default value is sensible and recommended */ + int mPointsInGrid; /**< Square root of number of keypoints used. + Increase it to trade accurateness for speed. + Default value is sensible and recommended */ - Size mWindowSize; /**< Size of the search window at each pyramid level - for Lucas-Kanade optical flow search used for - tracking */ + Size mWindowSize; /**< Size of the search window at each pyramid level + for Lucas-Kanade optical flow search used for + tracking */ - int mPyrMaxLevel; /**< Number of pyramid levels for Lucas-Kanade optical - flow search used for tracking */ - }; + int mPyrMaxLevel; /**< Number of pyramid levels for Lucas-Kanade optical + flow search used for tracking */ + }; - TrackerMedianFlow(Params paramsIn = Params()); + TrackerMedianFlow(Params paramsIn = Params()); - bool copyTo(TrackerMedianFlow& copy) const; + bool copyTo(TrackerMedianFlow& copy) const; - bool init(const Mat& image, const Rect_& boundingBox); - bool update(const Mat& image, Rect_& boundingBox); + bool init(const Mat& image, const Rect_& boundingBox); + bool update(const Mat& image, Rect_& boundingBox); - bool isInited() const; + bool isInited() const; - float getLastConfidence() const; - Rect_ getLastBoundingBox() const; + float getLastConfidence() const; + Rect_ getLastBoundingBox() const; - void read(FileStorage& fn); - void write(FileStorage& fs) const; + void read(FileStorage& fn); + void write(FileStorage& fs) const; private: + bool isInit; - bool isInit; - - bool medianFlowImpl(Mat oldImage, Mat newImage, Rect_& oldBox); + bool medianFlowImpl(Mat oldImage, Mat newImage, Rect_& oldBox); - Rect_ vote( - const std::vector& oldPoints, - const std::vector& newPoints, - const Rect_& oldRect, - Point2f& mD); + Rect_ vote( + const std::vector& oldPoints, + const std::vector& newPoints, + const Rect_& oldRect, + Point2f& mD); - template - T getMedian( - std::vector& values, int size = -1); + template + T getMedian( + std::vector& values, int size = -1); - void check_FB( - std::vector newPyramid, - const std::vector& oldPoints, - const std::vector& newPoints, - std::vector& status); + void check_FB( + std::vector newPyramid, + const std::vector& oldPoints, + const std::vector& newPoints, + std::vector& status); - void check_NCC( - const Mat& oldImage, - const Mat& newImage, - const std::vector& oldPoints, - const std::vector& newPoints, - std::vector& status); + void check_NCC( + const Mat& oldImage, + const Mat& newImage, + const std::vector& oldPoints, + const std::vector& newPoints, + std::vector& status); - inline float l2distance(Point2f p1, Point2f p2); + inline float l2distance(Point2f p1, Point2f p2); - Params params; /**< Parameters used during tracking, see - @ref TrackerMedianFlow::Params */ + Params params; /**< Parameters used during tracking, see + @ref TrackerMedianFlow::Params */ - TermCriteria termcrit; /**< Terminating criteria for OpenCV - Lucas–Kanade optical flow algorithm used - during tracking */ + TermCriteria termcrit; /**< Terminating criteria for OpenCV + Lucas–Kanade optical flow algorithm used + during tracking */ - Rect_ m_boundingBox; /**< Tracking object bounding box */ + Rect_ m_boundingBox; /**< Tracking object bounding box */ - float m_confidence; /**< Confidence that face was tracked correctly - at the last tracking iteration */ + float m_confidence; /**< Confidence that face was tracked correctly + at the last tracking iteration */ - Mat m_image; /**< Last image for which tracking was - performed */ + Mat m_image; /**< Last image for which tracking was + performed */ - std::vector m_pyramid; /**< The pyramid had been calculated for - the previous frame (or when - initialize the model) */ + std::vector m_pyramid; /**< The pyramid had been calculated for + the previous frame or when + initialize the model */ }; } /* namespace cv */ diff --git a/mv_face/face/include/mv_face_open.h b/mv_face/face/include/mv_face_open.h index a127d5a..8346b4f 100644 --- a/mv_face/face/include/mv_face_open.h +++ b/mv_face/face/include/mv_face_open.h @@ -65,10 +65,10 @@ extern "C" { * @see mv_face_detected_cb */ int mv_face_detect_open( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_face_detected_cb detected_cb, - void *user_data); + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_face_detected_cb detected_cb, + void *user_data); /********************/ @@ -126,12 +126,12 @@ int mv_face_detect_open( * @see mv_face_recognized_cb */ int mv_face_recognize_open( - mv_source_h source, - mv_face_recognition_model_h recognition_model, - mv_engine_config_h engine_cfg, - mv_rectangle_s *face_location, - mv_face_recognized_cb recognized_cb, - void *user_data); + mv_source_h source, + mv_face_recognition_model_h recognition_model, + mv_engine_config_h engine_cfg, + mv_rectangle_s *face_location, + mv_face_recognized_cb recognized_cb, + void *user_data); /*****************/ @@ -190,12 +190,12 @@ int mv_face_recognize_open( * @see mv_face_tracked_cb */ int mv_face_track_open( - mv_source_h source, - mv_face_tracking_model_h tracking_model, - mv_engine_config_h engine_cfg, - mv_face_tracked_cb tracked_cb, - bool do_learn, - void *user_data); + mv_source_h source, + mv_face_tracking_model_h tracking_model, + mv_engine_config_h engine_cfg, + mv_face_tracked_cb tracked_cb, + bool do_learn, + void *user_data); /********************************/ @@ -233,11 +233,11 @@ int mv_face_track_open( * @see mv_face_eye_condition_recognized_cb */ int mv_face_eye_condition_recognize_open( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s face_location, - mv_face_eye_condition_recognized_cb eye_condition_recognized_cb, - void *user_data); + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s face_location, + mv_face_eye_condition_recognized_cb eye_condition_recognized_cb, + void *user_data); /************************************/ @@ -274,11 +274,11 @@ int mv_face_eye_condition_recognize_open( * @see mv_face_facial_expression_recognized_cb */ int mv_face_facial_expression_recognize_open( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s face_location, - mv_face_facial_expression_recognized_cb expression_recognized_cb, - void *user_data); + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s face_location, + mv_face_facial_expression_recognized_cb expression_recognized_cb, + void *user_data); /*******************************/ /* Recognition model behavior */ @@ -313,7 +313,7 @@ int mv_face_facial_expression_recognize_open( * @see mv_face_recognition_model_destroy_open() */ int mv_face_recognition_model_create_open( - mv_face_recognition_model_h *recognition_model); + mv_face_recognition_model_h *recognition_model); /** * @brief Destroys the face recognition model handle and releases all its @@ -329,7 +329,7 @@ int mv_face_recognition_model_create_open( * @see mv_face_recognition_model_create_open() */ int mv_face_recognition_model_destroy_open( - mv_face_recognition_model_h recognition_model); + mv_face_recognition_model_h recognition_model); /** * @brief Creates a copy of existed recognition model handle and clones all its @@ -353,8 +353,8 @@ int mv_face_recognition_model_destroy_open( * @see mv_face_recognition_model_create_open() */ int mv_face_recognition_model_clone_open( - mv_face_recognition_model_h src, - mv_face_recognition_model_h *dst); + mv_face_recognition_model_h src, + mv_face_recognition_model_h *dst); /** * @brief Saves recognition model to the file. @@ -387,8 +387,8 @@ int mv_face_recognition_model_clone_open( * @see mv_face_recognition_model_create_open() */ int mv_face_recognition_model_save_open( - const char *file_name, - mv_face_recognition_model_h recognition_model); + const char *file_name, + mv_face_recognition_model_h recognition_model); /** * @brief Loads recognition model from file. @@ -420,8 +420,8 @@ int mv_face_recognition_model_save_open( * @see mv_face_recognition_model_destroy_open() */ int mv_face_recognition_model_load_open( - const char *file_name, - mv_face_recognition_model_h *recognition_model); + const char *file_name, + mv_face_recognition_model_h *recognition_model); /** * @brief Adds face image example to be used for face recognition model learning @@ -460,10 +460,10 @@ int mv_face_recognition_model_load_open( * @see mv_face_recognition_model_learn_open() */ int mv_face_recognition_model_add_open( - const mv_source_h source, - mv_face_recognition_model_h recognition_model, - const mv_rectangle_s *example_location, - int face_label); + const mv_source_h source, + mv_face_recognition_model_h recognition_model, + const mv_rectangle_s *example_location, + int face_label); /** * @brief Remove from @a recognition_model all collected with @@ -498,8 +498,8 @@ int mv_face_recognition_model_add_open( * @see mv_face_recognition_model_learn_open() */ int mv_face_recognition_model_reset_open( - mv_face_recognition_model_h recognition_model, - const int *face_label); + mv_face_recognition_model_h recognition_model, + const int *face_label); /** * @brief Learns face recognition model. @@ -555,8 +555,8 @@ int mv_face_recognition_model_reset_open( * @see mv_face_recognize_open() */ int mv_face_recognition_model_learn_open( - mv_engine_config_h engine_cfg, - mv_face_recognition_model_h recognition_model); + mv_engine_config_h engine_cfg, + mv_face_recognition_model_h recognition_model); /** * @brief Queries labels list and number of labels had been learned by the model. @@ -585,9 +585,9 @@ int mv_face_recognition_model_learn_open( * @see mv_face_recognition_model_learn_open() */ int mv_face_recognition_model_query_labels_open( - mv_face_recognition_model_h recognition_model, - int **labels, - unsigned int *number_of_labels); + mv_face_recognition_model_h recognition_model, + int **labels, + unsigned int *number_of_labels); /***************************/ /* Tracking model behavior */ @@ -628,7 +628,7 @@ int mv_face_recognition_model_query_labels_open( * @see mv_face_tracking_model_load_open() */ int mv_face_tracking_model_create_open( - mv_face_tracking_model_h *tracking_model); + mv_face_tracking_model_h *tracking_model); /** * @brief Call this function to destroy the face tracking model handle and @@ -645,7 +645,7 @@ int mv_face_tracking_model_create_open( * @see mv_face_tracking_model_create_open() */ int mv_face_tracking_model_destroy_open( - mv_face_tracking_model_h tracking_model); + mv_face_tracking_model_h tracking_model); /** * @brief Call this function to initialize tracking model by the location of the @@ -689,10 +689,10 @@ int mv_face_tracking_model_destroy_open( * @see mv_face_track_open() */ int mv_face_tracking_model_prepare_open( - mv_face_tracking_model_h tracking_model, - mv_engine_config_h engine_cfg, - mv_source_h source, - mv_quadrangle_s */*location*/); + mv_face_tracking_model_h tracking_model, + mv_engine_config_h engine_cfg, + mv_source_h source, + mv_quadrangle_s */*location*/); /** * @brief Call this function to make a copy of existed tracking model handle and @@ -717,8 +717,8 @@ int mv_face_tracking_model_prepare_open( * @see mv_face_tracking_model_create_open() */ int mv_face_tracking_model_clone_open( - mv_face_tracking_model_h src, - mv_face_tracking_model_h *dst); + mv_face_tracking_model_h src, + mv_face_tracking_model_h *dst); /** * @brief Call this method to save tracking model to the file. @@ -747,8 +747,8 @@ int mv_face_tracking_model_clone_open( * @see mv_face_tracking_model_create_open() */ int mv_face_tracking_model_save_open( - const char *file_name, - mv_face_tracking_model_h tracking_model); + const char *file_name, + mv_face_tracking_model_h tracking_model); /** * @brief Call this method to load a tracking model from file. @@ -781,8 +781,8 @@ int mv_face_tracking_model_save_open( * @see mv_face_tracking_model_destroy_open() */ int mv_face_tracking_model_load_open( - const char *file_name, - mv_face_tracking_model_h *tracking_model); + const char *file_name, + mv_face_tracking_model_h *tracking_model); #ifdef __cplusplus } diff --git a/mv_face/face/src/FaceDetector.cpp b/mv_face/face/src/FaceDetector.cpp index 21d8195..d9b4fe5 100644 --- a/mv_face/face/src/FaceDetector.cpp +++ b/mv_face/face/src/FaceDetector.cpp @@ -16,89 +16,78 @@ #include "FaceDetector.h" -namespace MediaVision -{ -namespace Face -{ - +namespace MediaVision { +namespace Face { FaceDetector::FaceDetector() : - m_faceCascade(), - m_haarcascadeFilepath(), - m_faceCascadeIsLoaded(false) + m_faceCascade(), + m_haarcascadeFilepath(), + m_faceCascadeIsLoaded(false) { ; /* NULL */ } FaceDetector::~FaceDetector() { - ; /* NULL */ + ; /* NULL */ } bool FaceDetector::detectFaces( - const cv::Mat& image, - const cv::Rect& roi, - const cv::Size& minSize, - std::vector& faceLocations) + const cv::Mat& image, + const cv::Rect& roi, + const cv::Size& minSize, + std::vector& faceLocations) { - if (!m_faceCascadeIsLoaded) - { - return false; - } + if (!m_faceCascadeIsLoaded) { + return false; + } - faceLocations.clear(); + faceLocations.clear(); - cv::Mat intrestingRegion = image; + cv::Mat intrestingRegion = image; - bool roiIsUsed = false; - if (roi.x >= 0 && roi.y >= 0 && roi.width > 0 && roi.height > 0 && - (roi.x + roi.width) <= image.cols && (roi.y + roi.height) <= image.rows) - { - intrestingRegion = intrestingRegion(roi); - roiIsUsed = true; - } + bool roiIsUsed = false; + if (roi.x >= 0 && roi.y >= 0 && roi.width > 0 && roi.height > 0 && + (roi.x + roi.width) <= image.cols && + (roi.y + roi.height) <= image.rows) { + intrestingRegion = intrestingRegion(roi); + roiIsUsed = true; + } - if (minSize.width > 0 && minSize.height > 0 && - minSize.width <= image.cols && minSize.height <= image.rows) - { - m_faceCascade.detectMultiScale( - intrestingRegion, - faceLocations, - 1.1, - 3, - 0, - minSize); - } - else - { - m_faceCascade.detectMultiScale(intrestingRegion, faceLocations); - } + if (minSize.width > 0 && minSize.height > 0 && + minSize.width <= image.cols && minSize.height <= image.rows) { + m_faceCascade.detectMultiScale( + intrestingRegion, + faceLocations, + 1.1, + 3, + 0, + minSize); + } else { + m_faceCascade.detectMultiScale(intrestingRegion, faceLocations); + } - if (roiIsUsed) - { - const size_t numberOfLocations = faceLocations.size(); - for (size_t i = 0u; i < numberOfLocations; ++i) - { - faceLocations[i].x += roi.x; - faceLocations[i].y += roi.y; - } - } + if (roiIsUsed) { + const size_t numberOfLocations = faceLocations.size(); + for (size_t i = 0u; i < numberOfLocations; ++i) { + faceLocations[i].x += roi.x; + faceLocations[i].y += roi.y; + } + } - return true; + return true; } bool FaceDetector::loadHaarcascade(const std::string& haarcascadeFilepath) { + if (!m_faceCascadeIsLoaded || + m_haarcascadeFilepath != haarcascadeFilepath) { + if (!(m_faceCascadeIsLoaded = m_faceCascade.load(haarcascadeFilepath))) { + return false; + } + m_haarcascadeFilepath = haarcascadeFilepath; + } - if (!m_faceCascadeIsLoaded || m_haarcascadeFilepath != haarcascadeFilepath) - { - if (!(m_faceCascadeIsLoaded = m_faceCascade.load(haarcascadeFilepath))) - { - return false; - } - m_haarcascadeFilepath = haarcascadeFilepath; - } - - return true; + return true; } } /* Face */ diff --git a/mv_face/face/src/FaceExpressionRecognizer.cpp b/mv_face/face/src/FaceExpressionRecognizer.cpp index 51d9d05..e32ddc0 100644 --- a/mv_face/face/src/FaceExpressionRecognizer.cpp +++ b/mv_face/face/src/FaceExpressionRecognizer.cpp @@ -22,83 +22,74 @@ #include -namespace MediaVision -{ -namespace Face -{ - +namespace MediaVision { +namespace Face { static const int MinDetectionWidth = 30; static const int MinDetectionHeight = 30; FaceRecognizerConfig::FaceRecognizerConfig() : - mHaarcascadeFilepath( - "/usr/share/OpenCV/haarcascades/haarcascade_smile.xml") + mHaarcascadeFilepath( + "/usr/share/OpenCV/haarcascades/haarcascade_smile.xml") { - ; /* NULL */ + ; /* NULL */ } int FaceExpressionRecognizer::recognizeFaceExpression( - const cv::Mat& grayImage, - const mv_rectangle_s& faceLocation, - mv_face_facial_expression_e *faceExpression, - const FaceRecognizerConfig& config) + const cv::Mat& grayImage, + const mv_rectangle_s& faceLocation, + mv_face_facial_expression_e *faceExpression, + const FaceRecognizerConfig& config) { - if (NULL == faceExpression) - { - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - const int smileRectHeight = cvRound((float)faceLocation.height / 2); - - const cv::Rect roi( - faceLocation.point.x, - faceLocation.point.y + faceLocation.height - smileRectHeight, - faceLocation.width, - smileRectHeight); - - if (roi.width < MinDetectionWidth || - roi.height < MinDetectionHeight) - { - (*faceExpression) = MV_FACE_UNKNOWN; - return MEDIA_VISION_ERROR_NONE; - } - - if (0 > roi.x || - 0 > roi.y || - roi.x + roi.width > grayImage.cols || - roi.y + roi.height > grayImage.rows) - { - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - const cv::Mat mouthImg(grayImage, roi); - - std::vector areas; - - cv::CascadeClassifier smileClassifier; - smileClassifier.load(config.mHaarcascadeFilepath); - smileClassifier.detectMultiScale( - mouthImg, - areas, - 1.1, - 80, - cv::CASCADE_FIND_BIGGEST_OBJECT | - cv::CASCADE_DO_CANNY_PRUNING | - cv::CASCADE_SCALE_IMAGE, - cv::Size(MinDetectionWidth, MinDetectionHeight)); - - (*faceExpression) = MV_FACE_UNKNOWN; - const size_t smilesFoundSize = areas.size(); - if (smilesFoundSize == 0) - { - (*faceExpression) = MV_FACE_NEUTRAL; - } - else if (smilesFoundSize == 1) - { - (*faceExpression) = MV_FACE_SMILE; - } - - return MEDIA_VISION_ERROR_NONE; + if (NULL == faceExpression) { + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + const int smileRectHeight = cvRound((float)faceLocation.height / 2); + + const cv::Rect roi( + faceLocation.point.x, + faceLocation.point.y + faceLocation.height - smileRectHeight, + faceLocation.width, + smileRectHeight); + + if (roi.width < MinDetectionWidth || + roi.height < MinDetectionHeight) { + (*faceExpression) = MV_FACE_UNKNOWN; + return MEDIA_VISION_ERROR_NONE; + } + + if (0 > roi.x || + 0 > roi.y || + roi.x + roi.width > grayImage.cols || + roi.y + roi.height > grayImage.rows) { + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + const cv::Mat mouthImg(grayImage, roi); + + std::vector areas; + + cv::CascadeClassifier smileClassifier; + smileClassifier.load(config.mHaarcascadeFilepath); + smileClassifier.detectMultiScale( + mouthImg, + areas, + 1.1, + 80, + cv::CASCADE_FIND_BIGGEST_OBJECT | + cv::CASCADE_DO_CANNY_PRUNING | + cv::CASCADE_SCALE_IMAGE, + cv::Size(MinDetectionWidth, MinDetectionHeight)); + + (*faceExpression) = MV_FACE_UNKNOWN; + const size_t smilesFoundSize = areas.size(); + if (smilesFoundSize == 0) { + (*faceExpression) = MV_FACE_NEUTRAL; + } else if (smilesFoundSize == 1) { + (*faceExpression) = MV_FACE_SMILE; + } + + return MEDIA_VISION_ERROR_NONE; } } /* Face */ diff --git a/mv_face/face/src/FaceEyeCondition.cpp b/mv_face/face/src/FaceEyeCondition.cpp index 9432d1e..10d9e6e 100644 --- a/mv_face/face/src/FaceEyeCondition.cpp +++ b/mv_face/face/src/FaceEyeCondition.cpp @@ -20,209 +20,189 @@ #include -namespace MediaVision -{ -namespace Face -{ - +namespace MediaVision { +namespace Face { void FaceEyeCondition::splitEyes( - const cv::Mat& grayImage, - mv_rectangle_s faceLocation, - cv::Mat& leftEye, - cv::Mat& rightEye) + const cv::Mat& grayImage, + mv_rectangle_s faceLocation, + cv::Mat& leftEye, + cv::Mat& rightEye) { - leftEye = grayImage.rowRange(0, grayImage.rows / 2 - grayImage.rows / 10) - .colRange(grayImage.cols / 2 + grayImage.cols / 10, - grayImage.cols) - .clone(); - - rightEye = grayImage.rowRange(grayImage.rows / 2 + grayImage.rows / 10, - grayImage.rows) - .colRange(grayImage.cols / 2 + grayImage.cols / 10, - grayImage.cols) - .clone(); - - const cv::Rect faceRect( - faceLocation.point.x, - faceLocation.point.y, - faceLocation.width, - faceLocation.height); - - const cv::Rect eyeAreaRight( - faceRect.x + faceRect.width / 16, - (int) (faceRect.y + (faceRect.height / 4.5)), - (faceRect.width - 2 * faceRect.width / 16) / 2, - (int) (faceRect.height / 3.0)); - - const cv::Rect eyeAreaLeft( - faceRect.x + faceRect.width / 16 - + (faceRect.width - 2 * faceRect.width / 16) / 2, - (int) (faceRect.y + (faceRect.height / 4.5)), - (faceRect.width - 2 * faceRect.width / 16) / 2, - (int) (faceRect.height / 3.0)); - - const double xLeftEyeCenter = (2 * eyeAreaLeft.x + eyeAreaLeft.width) / 2.; - const double yLeftEyeCenter = (2 * eyeAreaLeft.y + eyeAreaLeft.height) / 2.; - - const double xRightEyeCenter = (2 * eyeAreaRight.x + eyeAreaRight.width) / 2.; - const double yRightEyeCenter = (2 * eyeAreaRight.y + eyeAreaRight.height) / 2.; - - const cv::Rect leftEyeRect(xLeftEyeCenter - eyeAreaLeft.width / 4, - yLeftEyeCenter - eyeAreaLeft.height / 4, - eyeAreaLeft.width / 2, - eyeAreaLeft.height / 2); - - const cv::Rect rightEyeRect(xRightEyeCenter - eyeAreaRight.width / 4, - yRightEyeCenter - eyeAreaRight.height / 4, - eyeAreaRight.width / 2, - eyeAreaRight.height / 2); - - cv::resize( - grayImage(leftEyeRect), - leftEye, - leftEye.size()); + leftEye = grayImage.rowRange(0, grayImage.rows / 2 - grayImage.rows / 10) + .colRange(grayImage.cols / 2 + grayImage.cols / 10, + grayImage.cols) + .clone(); + + rightEye = grayImage.rowRange(grayImage.rows / 2 + grayImage.rows / 10, + grayImage.rows) + .colRange(grayImage.cols / 2 + grayImage.cols / 10, + grayImage.cols) + .clone(); + + const cv::Rect faceRect( + faceLocation.point.x, + faceLocation.point.y, + faceLocation.width, + faceLocation.height); + + const cv::Rect eyeAreaRight( + faceRect.x + faceRect.width / 16, + (int) (faceRect.y + (faceRect.height / 4.5)), + (faceRect.width - 2 * faceRect.width / 16) / 2, + (int) (faceRect.height / 3.0)); + + const cv::Rect eyeAreaLeft( + faceRect.x + faceRect.width / 16 + + (faceRect.width - 2 * faceRect.width / 16) / 2, + (int) (faceRect.y + (faceRect.height / 4.5)), + (faceRect.width - 2 * faceRect.width / 16) / 2, + (int) (faceRect.height / 3.0)); + + const double xLeftEyeCenter = (2 * eyeAreaLeft.x + eyeAreaLeft.width) / 2.; + const double yLeftEyeCenter = (2 * eyeAreaLeft.y + eyeAreaLeft.height) / 2.; + + const double xRightEyeCenter = (2 * eyeAreaRight.x + eyeAreaRight.width) / 2.; + const double yRightEyeCenter = (2 * eyeAreaRight.y + eyeAreaRight.height) / 2.; + + const cv::Rect leftEyeRect(xLeftEyeCenter - eyeAreaLeft.width / 4, + yLeftEyeCenter - eyeAreaLeft.height / 4, + eyeAreaLeft.width / 2, + eyeAreaLeft.height / 2); + + const cv::Rect rightEyeRect(xRightEyeCenter - eyeAreaRight.width / 4, + yRightEyeCenter - eyeAreaRight.height / 4, + eyeAreaRight.width / 2, + eyeAreaRight.height / 2); + + cv::resize( + grayImage(leftEyeRect), + leftEye, + leftEye.size()); cv::resize( - grayImage(rightEyeRect), - rightEye, - rightEye.size()); + grayImage(rightEyeRect), + rightEye, + rightEye.size()); } int FaceEyeCondition::isEyeOpen(const cv::Mat& eye) { - int isOpen = MV_FACE_EYES_CLOSED; - - cv::Mat eyeEqualized; - cv::equalizeHist(eye, eyeEqualized); - - const int thresold = 8; - eyeEqualized = eyeEqualized < thresold; - - std::vector > contours; - std::vector hierarchy; - - cv::findContours( - eyeEqualized, - contours, - hierarchy, - CV_RETR_CCOMP, - CV_CHAIN_APPROX_SIMPLE); - - const size_t contoursSize = contours.size(); - - if (!contoursSize) - { - return MV_FACE_EYES_NOT_FOUND; - } - - const int xCenter = eyeEqualized.cols / 2; - const int yCenter = eyeEqualized.rows / 2; - const int width = eyeEqualized.cols / 2.5; - const int height = eyeEqualized.rows / 2.5; - - const cv::Rect boundThresold(xCenter - width, yCenter - height, 2 * width, 2 * height); - - const int widthHeightRatio = 3; - const double areaRatio = 0.005; - const double areaSmallRatio = 0.0005; - size_t rectanglesInsideCount = 0u; - - for (size_t i = 0; i < contoursSize; ++i) - { - const cv::Rect currentRect = cv::boundingRect(contours[i]); - const double currentArea = cv::contourArea(contours[i]); - - if (boundThresold.contains(currentRect.br()) && - boundThresold.contains(currentRect.tl()) && - currentArea > areaRatio * boundThresold.area() && - currentRect.width < widthHeightRatio * currentRect.height) - { - isOpen = MV_FACE_EYES_OPEN; - } - else if (boundThresold.contains(currentRect.br()) && - boundThresold.contains(currentRect.tl()) && - currentArea > areaSmallRatio * boundThresold.area()) - { - ++rectanglesInsideCount; - } - } - - if (rectanglesInsideCount > 8u) - { - isOpen = MV_FACE_EYES_CLOSED; - } - - return isOpen; + int isOpen = MV_FACE_EYES_CLOSED; + + cv::Mat eyeEqualized; + cv::equalizeHist(eye, eyeEqualized); + + const int thresold = 8; + eyeEqualized = eyeEqualized < thresold; + + std::vector > contours; + std::vector hierarchy; + + cv::findContours( + eyeEqualized, + contours, + hierarchy, + CV_RETR_CCOMP, + CV_CHAIN_APPROX_SIMPLE); + + const size_t contoursSize = contours.size(); + + if (!contoursSize) { + return MV_FACE_EYES_NOT_FOUND; + } + + const int xCenter = eyeEqualized.cols / 2; + const int yCenter = eyeEqualized.rows / 2; + const int width = eyeEqualized.cols / 2.5; + const int height = eyeEqualized.rows / 2.5; + + const cv::Rect boundThresold(xCenter - width, yCenter - height, 2 * width, 2 * height); + + const int widthHeightRatio = 3; + const double areaRatio = 0.005; + const double areaSmallRatio = 0.0005; + size_t rectanglesInsideCount = 0u; + + for (size_t i = 0; i < contoursSize; ++i) { + const cv::Rect currentRect = cv::boundingRect(contours[i]); + const double currentArea = cv::contourArea(contours[i]); + + if (boundThresold.contains(currentRect.br()) && + boundThresold.contains(currentRect.tl()) && + currentArea > areaRatio * boundThresold.area() && + currentRect.width < widthHeightRatio * currentRect.height) { + isOpen = MV_FACE_EYES_OPEN; + } else if (boundThresold.contains(currentRect.br()) && + boundThresold.contains(currentRect.tl()) && + currentArea > areaSmallRatio * boundThresold.area()) { + ++rectanglesInsideCount; + } + } + + if (rectanglesInsideCount > 8u) { + isOpen = MV_FACE_EYES_CLOSED; + } + + return isOpen; } int FaceEyeCondition::recognizeEyeCondition( - const cv::Mat& grayImage, - mv_rectangle_s faceLocation, - mv_face_eye_condition_e *eyeCondition) + const cv::Mat& grayImage, + mv_rectangle_s faceLocation, + mv_face_eye_condition_e *eyeCondition) { - if (grayImage.empty()) - { - *eyeCondition = MV_FACE_EYES_NOT_FOUND; - - LOGE("Input image is empty. Eye condition recognition failed."); - return MEDIA_VISION_ERROR_NO_DATA; - } - - if (faceLocation.height <= 0 || faceLocation.width <= 0 || - faceLocation.point.x < 0 || faceLocation.point.y < 0 || - (faceLocation.point.x + faceLocation.width) > grayImage.cols || - (faceLocation.point.y + faceLocation.height) > grayImage.rows) - { - *eyeCondition = MV_FACE_EYES_NOT_FOUND; - - LOGE("Input face location is wrong. Eye condition recognition failed."); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (NULL == eyeCondition) - { - *eyeCondition = MV_FACE_EYES_NOT_FOUND; - - LOGE("Output eye condition is NULL. Eye condition recognition failed."); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - // split left and right eyes - cv::Mat leftEye; - cv::Mat rightEye; - splitEyes(grayImage, faceLocation, leftEye, rightEye); - - // recognize eyes conditions - const int isOpenLeft = isEyeOpen(leftEye); - - if (isOpenLeft == MV_FACE_EYES_CLOSED) - { - *eyeCondition = MV_FACE_EYES_CLOSED; - - return MEDIA_VISION_ERROR_NONE; - } - else if (isOpenLeft == MV_FACE_EYES_NOT_FOUND) - { - *eyeCondition = MV_FACE_EYES_NOT_FOUND; - - return MEDIA_VISION_ERROR_NONE; - } - - const int isOpenRight = isEyeOpen(rightEye); - - if (isOpenRight == MV_FACE_EYES_OPEN) - { - *eyeCondition = MV_FACE_EYES_OPEN; - } - else if (isOpenRight == MV_FACE_EYES_CLOSED) - { - *eyeCondition = MV_FACE_EYES_CLOSED; - } - else - { - *eyeCondition = MV_FACE_EYES_NOT_FOUND; - } - - return MEDIA_VISION_ERROR_NONE; + if (grayImage.empty()) { + *eyeCondition = MV_FACE_EYES_NOT_FOUND; + + LOGE("Input image is empty. Eye condition recognition failed."); + return MEDIA_VISION_ERROR_NO_DATA; + } + + if (faceLocation.height <= 0 || faceLocation.width <= 0 || + faceLocation.point.x < 0 || faceLocation.point.y < 0 || + (faceLocation.point.x + faceLocation.width) > grayImage.cols || + (faceLocation.point.y + faceLocation.height) > grayImage.rows) { + *eyeCondition = MV_FACE_EYES_NOT_FOUND; + + LOGE("Input face location is wrong. Eye condition recognition failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (NULL == eyeCondition) { + *eyeCondition = MV_FACE_EYES_NOT_FOUND; + + LOGE("Output eye condition is NULL. Eye condition recognition failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + /* split left and right eyes */ + cv::Mat leftEye; + cv::Mat rightEye; + splitEyes(grayImage, faceLocation, leftEye, rightEye); + + /* recognize eyes conditions */ + const int isOpenLeft = isEyeOpen(leftEye); + + if (isOpenLeft == MV_FACE_EYES_CLOSED) { + *eyeCondition = MV_FACE_EYES_CLOSED; + + return MEDIA_VISION_ERROR_NONE; + } else if (isOpenLeft == MV_FACE_EYES_NOT_FOUND) { + *eyeCondition = MV_FACE_EYES_NOT_FOUND; + + return MEDIA_VISION_ERROR_NONE; + } + + const int isOpenRight = isEyeOpen(rightEye); + + if (isOpenRight == MV_FACE_EYES_OPEN) { + *eyeCondition = MV_FACE_EYES_OPEN; + } else if (isOpenRight == MV_FACE_EYES_CLOSED) { + *eyeCondition = MV_FACE_EYES_CLOSED; + } else { + *eyeCondition = MV_FACE_EYES_NOT_FOUND; + } + + return MEDIA_VISION_ERROR_NONE; } } /* Face */ diff --git a/mv_face/face/src/FaceRecognitionModel.cpp b/mv_face/face/src/FaceRecognitionModel.cpp index 1887cea..c18de17 100644 --- a/mv_face/face/src/FaceRecognitionModel.cpp +++ b/mv_face/face/src/FaceRecognitionModel.cpp @@ -26,520 +26,473 @@ #include #include -namespace MediaVision -{ -namespace Face -{ - -namespace -{ - +namespace MediaVision { +namespace Face { +namespace { int CopyOpenCVAlgorithmParameters(const cv::Ptr& srcAlg, - cv::Ptr& dstAlg) + cv::Ptr& dstAlg) { - char tempPath[1024]; - - sprintf(tempPath, "/tmp/alg_copy_%p_%p", srcAlg.obj, dstAlg.obj); - - srcAlg->save(tempPath); - dstAlg->load(tempPath); - - if (0 != remove(tempPath)) - { - LOGW("Error removing serialized FaceRecognizer in %s", tempPath); - } - - // todo: consider to uncomment this lines if OpenCV will support deep - // copy of AlgorithmInfo objects: - - /*std::vector paramNames; - srcAlg->getParams(paramNames); - size_t paramSize = paramNames.size(); - for (size_t i = 0; i < paramSize; ++i) - { - int pType = srcAlg->paramType(paramNames[i]); - - switch(pType) - { - case cv::Param::INT: - case cv::Param::UNSIGNED_INT: - case cv::Param::UINT64: - case cv::Param::SHORT: - case cv::Param::UCHAR: - dstAlg->set(paramNames[i], srcAlg->getInt(paramNames[i])); - break; - case cv::Param::BOOLEAN: - dstAlg->set(paramNames[i], srcAlg->getBool(paramNames[i])); - break; - case cv::Param::REAL: - case cv::Param::FLOAT: - dstAlg->set(paramNames[i], srcAlg->getDouble(paramNames[i])); - break; - case cv::Param::STRING: - dstAlg->set(paramNames[i], srcAlg->getString(paramNames[i])); - break; - case cv::Param::MAT: - dstAlg->set(paramNames[i], srcAlg->getMat(paramNames[i])); - break; - case cv::Param::MAT_VECTOR: - { - //std::vector value = srcAlg->getMatVector(paramNames[i]); - //dstAlg->info()->addParam(*(dstAlg.obj), paramNames[i].c_str(), value); - dstAlg->set(paramNames[i], srcAlg->getMatVector(paramNames[i])); - break; - } - case cv::Param::ALGORITHM: - dstAlg->set(paramNames[i], srcAlg->getAlgorithm(paramNames[i])); - break; - default: - LOGE("While copying algorothm parameters unsupported parameter " - "%s was found.", paramNames[i].c_str()); - - return MEDIA_VISION_ERROR_NOT_SUPPORTED; - break; - } - }*/ - - return MEDIA_VISION_ERROR_NONE; + char tempPath[1024]; + + sprintf(tempPath, "/tmp/alg_copy_%p_%p", srcAlg.obj, dstAlg.obj); + + srcAlg->save(tempPath); + dstAlg->load(tempPath); + + if (0 != remove(tempPath)) { + LOGW("Error removing serialized FaceRecognizer in %s", tempPath); + } + + /* todo: consider to uncomment this lines if OpenCV will support deep + / copy of AlgorithmInfo objects: */ + + /*std::vector paramNames; + srcAlg->getParams(paramNames); + size_t paramSize = paramNames.size(); + for (size_t i = 0; i < paramSize; ++i) { + int pType = srcAlg->paramType(paramNames[i]); + + switch(pType) { + case cv::Param::INT: + case cv::Param::UNSIGNED_INT: + case cv::Param::UINT64: + case cv::Param::SHORT: + case cv::Param::UCHAR: + dstAlg->set(paramNames[i], srcAlg->getInt(paramNames[i])); + break; + case cv::Param::BOOLEAN: + dstAlg->set(paramNames[i], srcAlg->getBool(paramNames[i])); + break; + case cv::Param::REAL: + case cv::Param::FLOAT: + dstAlg->set(paramNames[i], srcAlg->getDouble(paramNames[i])); + break; + case cv::Param::STRING: + dstAlg->set(paramNames[i], srcAlg->getString(paramNames[i])); + break; + case cv::Param::MAT: + dstAlg->set(paramNames[i], srcAlg->getMat(paramNames[i])); + break; + case cv::Param::MAT_VECTOR: + { + //std::vector value = srcAlg->getMatVector(paramNames[i]); + //dstAlg->info()->addParam(*(dstAlg.obj), paramNames[i].c_str(), value); + dstAlg->set(paramNames[i], srcAlg->getMatVector(paramNames[i])); + break; + } + case cv::Param::ALGORITHM: + dstAlg->set(paramNames[i], srcAlg->getAlgorithm(paramNames[i])); + break; + default: + LOGE("While copying algorothm parameters unsupported parameter " + "%s was found.", paramNames[i].c_str()); + + return MEDIA_VISION_ERROR_NOT_SUPPORTED; + } + }*/ + + return MEDIA_VISION_ERROR_NONE; } void ParseOpenCVLabels( - const cv::Ptr& recognizer, - std::set& outLabels) + const cv::Ptr& recognizer, + std::set& outLabels) { - if (!recognizer.empty()) - { - cv::Mat labels = recognizer->getMat("labels"); - for(int i = 0; i < labels.rows; ++i) - { - outLabels.insert(labels.at(i, 0)); - } - } + if (!recognizer.empty()) { + cv::Mat labels = recognizer->getMat("labels"); + for (int i = 0; i < labels.rows; ++i) { + outLabels.insert(labels.at(i, 0)); + } + } } } /* anonymous namespace */ FaceRecognitionModelConfig::FaceRecognitionModelConfig() : - mModelType(MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN), - mNumComponents(0), - mThreshold(DBL_MAX), - mRadius(1), - mNeighbors(8), - mGridX(8), - mGridY(8), - mImgWidth(150), - mImgHeight(150) + mModelType(MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN), + mNumComponents(0), + mThreshold(DBL_MAX), + mRadius(1), + mNeighbors(8), + mGridX(8), + mGridY(8), + mImgWidth(150), + mImgHeight(150) { - ; /* NULL */ + ; /* NULL */ } FaceRecognitionResults::FaceRecognitionResults() : - mIsRecognized(false), - mFaceLabel(-1), - mConfidence(0.0) + mIsRecognized(false), + mFaceLabel(-1), + mConfidence(0.0) { - ; /* NULL */ + ; /* NULL */ } bool FaceRecognitionModelConfig::operator!=( - const FaceRecognitionModelConfig& other) const + const FaceRecognitionModelConfig& other) const { - return mModelType != other.mModelType || - mNumComponents != other.mNumComponents || - mThreshold != other.mThreshold || - mRadius != other.mRadius || - mNeighbors != other.mNeighbors || - mGridX != other.mGridX || - mGridY != other.mGridY || - mImgWidth != other.mImgWidth || - mImgHeight != other.mImgHeight; + return mModelType != other.mModelType || + mNumComponents != other.mNumComponents || + mThreshold != other.mThreshold || + mRadius != other.mRadius || + mNeighbors != other.mNeighbors || + mGridX != other.mGridX || + mGridY != other.mGridY || + mImgWidth != other.mImgWidth || + mImgHeight != other.mImgHeight; } FaceRecognitionModel::FaceRecognitionModel() : - m_canRecognize(false), - m_recognizer(NULL) + m_canRecognize(false), + m_recognizer(NULL) { - ; /* NULL */ + ; /* NULL */ } FaceRecognitionModel::FaceRecognitionModel(const FaceRecognitionModel& origin) : - m_canRecognize(origin.m_canRecognize), - m_faceSamples(origin.m_faceSamples), - m_learnAlgorithmConfig(origin.m_learnAlgorithmConfig), - m_recognizer(CreateRecognitionAlgorithm(origin.m_learnAlgorithmConfig)), - m_learnedLabels(origin.m_learnedLabels) + m_canRecognize(origin.m_canRecognize), + m_faceSamples(origin.m_faceSamples), + m_learnAlgorithmConfig(origin.m_learnAlgorithmConfig), + m_recognizer(CreateRecognitionAlgorithm(origin.m_learnAlgorithmConfig)), + m_learnedLabels(origin.m_learnedLabels) { - if (!m_recognizer.empty()) - { - CopyOpenCVAlgorithmParameters(origin.m_recognizer, m_recognizer); - } + if (!m_recognizer.empty()) { + CopyOpenCVAlgorithmParameters(origin.m_recognizer, m_recognizer); + } } FaceRecognitionModel& FaceRecognitionModel::operator=( - const FaceRecognitionModel& copy) + const FaceRecognitionModel& copy) { - if (this != ©) - { - m_canRecognize = copy.m_canRecognize; - m_faceSamples = copy.m_faceSamples; - m_learnAlgorithmConfig = copy.m_learnAlgorithmConfig; - m_recognizer = CreateRecognitionAlgorithm(m_learnAlgorithmConfig); - m_learnedLabels = copy.m_learnedLabels; - - if (!m_recognizer.empty()) - { - CopyOpenCVAlgorithmParameters(copy.m_recognizer, m_recognizer); - } - } - - return *this; + if (this != ©) { + m_canRecognize = copy.m_canRecognize; + m_faceSamples = copy.m_faceSamples; + m_learnAlgorithmConfig = copy.m_learnAlgorithmConfig; + m_recognizer = CreateRecognitionAlgorithm(m_learnAlgorithmConfig); + m_learnedLabels = copy.m_learnedLabels; + + if (!m_recognizer.empty()) { + CopyOpenCVAlgorithmParameters(copy.m_recognizer, m_recognizer); + } + } + + return *this; } FaceRecognitionModel::~FaceRecognitionModel() { - ; /* NULL */ + ; /* NULL */ } int FaceRecognitionModel::save(const std::string& fileName) { - if (!m_recognizer.empty()) - { - /* find directory */ - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); - - std::string filePath; - filePath += prefix_path; - filePath += fileName; - - /* check the directory is available */ - std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/')); - if (access(prefix_path_check.c_str(),F_OK)) - { - LOGE("Can't save recognition model. Path[%s] doesn't existed.", prefix_path_check.c_str()); - - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - cv::FileStorage storage(filePath, cv::FileStorage::WRITE); - if (!storage.isOpened()) - { - LOGE("Can't save recognition model. Write to file permission denied."); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } - - switch (m_learnAlgorithmConfig.mModelType) - { - case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES: - storage << "algorithm" << "Eigenfaces"; - break; - case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES: - storage << "algorithm" << "Fisherfaces"; - break; - case MEDIA_VISION_FACE_MODEL_TYPE_LBPH: - storage << "algorithm" << "LBPH"; - break; - default: - storage.release(); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - - storage << "can_recognize" << m_canRecognize; - m_recognizer->save(storage); - - storage.release(); - } - else - { - LOGE("Attempt to save recognition model before learn"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - return MEDIA_VISION_ERROR_NONE; + if (!m_recognizer.empty()) { + /* find directory */ + std::string prefix_path = std::string(app_get_data_path()); + LOGD("prefix_path: %s", prefix_path.c_str()); + + std::string filePath; + filePath += prefix_path; + filePath += fileName; + + /* check the directory is available */ + std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/')); + if (access(prefix_path_check.c_str(), F_OK)) { + LOGE("Can't save recognition model. Path[%s] doesn't existed.", prefix_path_check.c_str()); + + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + cv::FileStorage storage(filePath, cv::FileStorage::WRITE); + if (!storage.isOpened()) { + LOGE("Can't save recognition model. Write to file permission denied."); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } + + switch (m_learnAlgorithmConfig.mModelType) { + case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES: + storage << "algorithm" << "Eigenfaces"; + break; + case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES: + storage << "algorithm" << "Fisherfaces"; + break; + case MEDIA_VISION_FACE_MODEL_TYPE_LBPH: + storage << "algorithm" << "LBPH"; + break; + default: + storage.release(); + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } + + storage << "can_recognize" << m_canRecognize; + m_recognizer->save(storage); + + storage.release(); + } else { + LOGE("Attempt to save recognition model before learn"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + return MEDIA_VISION_ERROR_NONE; } int FaceRecognitionModel::load(const std::string& fileName) { - /* find directory */ - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); - - std::string filePath; - filePath += prefix_path; - filePath += fileName; - - if (access(filePath.c_str(),F_OK)) - { - LOGE("Can't load face recognition model. File[%s] doesn't exist.", filePath.c_str()); - - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - cv::FileStorage storage(filePath, cv::FileStorage::READ); - if (!storage.isOpened()) - { - LOGE("Can't load recognition model. Read from file permission denied."); - - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } - - LOGD("Loading recognition model from file."); - - std::string algName; - int canRecognize = 0; - storage["algorithm"] >> algName; - storage["can_recognize"] >> canRecognize; - - cv::Ptr tempRecognizer; - FaceRecognitionModelConfig tempConfig; - std::set tempLearnedLabels; - - if (algName == "Eigenfaces") - { - tempRecognizer = cv::createEigenFaceRecognizer(); - tempRecognizer->load(storage); - tempConfig.mModelType = - MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES; - tempConfig.mNumComponents = - tempRecognizer->getInt("ncomponents"); - ParseOpenCVLabels(tempRecognizer, tempLearnedLabels); - } - else if (algName == "Fisherfaces") - { - tempRecognizer = cv::createFisherFaceRecognizer(); - tempRecognizer->load(storage); - tempConfig.mModelType = - MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES; - tempConfig.mNumComponents = - tempRecognizer->getInt("ncomponents"); - ParseOpenCVLabels(tempRecognizer, tempLearnedLabels); - } - else if (algName == "LBPH") - { - tempRecognizer = cv::createLBPHFaceRecognizer(); - tempRecognizer->load(storage); - tempConfig.mModelType = - MEDIA_VISION_FACE_MODEL_TYPE_LBPH; - tempConfig.mGridX = tempRecognizer->getInt("grid_x"); - tempConfig.mGridY = tempRecognizer->getInt("grid_y"); - tempConfig.mNeighbors = tempRecognizer->getInt("neighbors"); - tempConfig.mRadius = tempRecognizer->getInt("radius"); - ParseOpenCVLabels(tempRecognizer, tempLearnedLabels); - } - else - { - tempConfig = FaceRecognitionModelConfig(); - LOGE("Failed to load face recognition model from file. File is in " - "unsupported format"); - - storage.release(); - - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - - tempConfig.mThreshold = tempRecognizer->getDouble("threshold"); - - LOGD("Recognition model of [%s] type has been loaded from file", - algName.c_str()); - - storage.release(); - - m_recognizer = tempRecognizer; - m_learnAlgorithmConfig = tempConfig; - m_canRecognize = (canRecognize == 1); - m_learnedLabels.clear(); - m_learnedLabels = tempLearnedLabels; - - return MEDIA_VISION_ERROR_NONE; + /* find directory */ + std::string prefix_path = std::string(app_get_data_path()); + LOGD("prefix_path: %s", prefix_path.c_str()); + + std::string filePath; + filePath += prefix_path; + filePath += fileName; + + if (access(filePath.c_str(), F_OK)) { + LOGE("Can't load face recognition model. File[%s] doesn't exist.", filePath.c_str()); + + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + cv::FileStorage storage(filePath, cv::FileStorage::READ); + if (!storage.isOpened()) { + LOGE("Can't load recognition model. Read from file permission denied."); + + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } + + LOGD("Loading recognition model from file."); + + std::string algName; + int canRecognize = 0; + storage["algorithm"] >> algName; + storage["can_recognize"] >> canRecognize; + + cv::Ptr tempRecognizer; + FaceRecognitionModelConfig tempConfig; + std::set tempLearnedLabels; + + if (algName == "Eigenfaces") { + tempRecognizer = cv::createEigenFaceRecognizer(); + tempRecognizer->load(storage); + tempConfig.mModelType = + MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES; + tempConfig.mNumComponents = + tempRecognizer->getInt("ncomponents"); + ParseOpenCVLabels(tempRecognizer, tempLearnedLabels); + } else if (algName == "Fisherfaces") { + tempRecognizer = cv::createFisherFaceRecognizer(); + tempRecognizer->load(storage); + tempConfig.mModelType = + MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES; + tempConfig.mNumComponents = + tempRecognizer->getInt("ncomponents"); + ParseOpenCVLabels(tempRecognizer, tempLearnedLabels); + } else if (algName == "LBPH") { + tempRecognizer = cv::createLBPHFaceRecognizer(); + tempRecognizer->load(storage); + tempConfig.mModelType = + MEDIA_VISION_FACE_MODEL_TYPE_LBPH; + tempConfig.mGridX = tempRecognizer->getInt("grid_x"); + tempConfig.mGridY = tempRecognizer->getInt("grid_y"); + tempConfig.mNeighbors = tempRecognizer->getInt("neighbors"); + tempConfig.mRadius = tempRecognizer->getInt("radius"); + ParseOpenCVLabels(tempRecognizer, tempLearnedLabels); + } else { + tempConfig = FaceRecognitionModelConfig(); + LOGE("Failed to load face recognition model from file. File is in " + "unsupported format"); + + storage.release(); + + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } + + tempConfig.mThreshold = tempRecognizer->getDouble("threshold"); + + LOGD("Recognition model of [%s] type has been loaded from file", + algName.c_str()); + + storage.release(); + + m_recognizer = tempRecognizer; + m_learnAlgorithmConfig = tempConfig; + m_canRecognize = (canRecognize == 1); + m_learnedLabels.clear(); + m_learnedLabels = tempLearnedLabels; + + return MEDIA_VISION_ERROR_NONE; } int FaceRecognitionModel::addFaceExample( - const cv::Mat& faceImage, - int faceLabel) + const cv::Mat& faceImage, + int faceLabel) { - m_faceSamples[faceLabel].push_back(faceImage); + m_faceSamples[faceLabel].push_back(faceImage); - LOGD("Added face image example for label %i for recognition model", - faceLabel); + LOGD("Added face image example for label %i for recognition model", + faceLabel); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int FaceRecognitionModel::resetFaceExamples(void) { - m_faceSamples.clear(); + m_faceSamples.clear(); - LOGD("All face image examples have been removed from recognition model"); + LOGD("All face image examples have been removed from recognition model"); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int FaceRecognitionModel::resetFaceExamples(int faceLabel) { - if (1 > m_faceSamples.erase(faceLabel)) - { - LOGD("Failed to remove face image examples for label %i. " - "No such examples", faceLabel); + if (1 > m_faceSamples.erase(faceLabel)) { + LOGD("Failed to remove face image examples for label %i. " + "No such examples", faceLabel); - return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; - } + return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; + } - LOGD("Face image examples for label %i have been removed from " - "recognition model", faceLabel); + LOGD("Face image examples for label %i have been removed from " + "recognition model", faceLabel); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } const std::set& FaceRecognitionModel::getFaceLabels(void) const { - return m_learnedLabels; + return m_learnedLabels; } int FaceRecognitionModel::learn(const FaceRecognitionModelConfig& config) { - bool isIncremental = false; - bool isUnisize = false; - - if (MEDIA_VISION_FACE_MODEL_TYPE_LBPH == config.mModelType) - { - isIncremental = true; - } - - if (MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES == config.mModelType || - MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == config.mModelType) - { - isUnisize = true; - } - - std::vector samples; - std::vector labels; - std::set learnedLabels; - - if (isIncremental) - { - learnedLabels.insert(m_learnedLabels.begin(), m_learnedLabels.end()); - } - - std::map >::const_iterator it = - m_faceSamples.begin(); - for (; it != m_faceSamples.end(); ++it) - { - const size_t faceClassSamplesSize = it->second.size(); - labels.insert(labels.end(), faceClassSamplesSize, it->first); - learnedLabels.insert(it->first); - - if (!isUnisize) - { - LOGD("%u examples has been added with label %i", - it->second.size(), it->first); - samples.insert(samples.end(), it->second.begin(), it->second.end()); - } - else - { - for (size_t sampleInd = 0; sampleInd < faceClassSamplesSize; ++sampleInd) - { - cv::Mat resizedSample; - cv::resize(it->second[sampleInd], - resizedSample, - cv::Size(config.mImgWidth, config.mImgHeight), - 1.0, 1.0, cv::INTER_CUBIC); - samples.push_back(resizedSample); - } - } - } - - const size_t samplesSize = samples.size(); - const size_t labelsSize = labels.size(); - - if (0 != samplesSize && samplesSize == labelsSize) - { - LOGD("Start to learn the model for %u samples and %u labels", - samplesSize, labelsSize); - - if (m_learnAlgorithmConfig != config || m_recognizer.empty()) - { - m_recognizer = CreateRecognitionAlgorithm(config); - } - - if (m_recognizer.empty()) - { - LOGE("Can't create recognition algorithm for recognition model. " - "Configuration is not supported by any of known algorithms."); - - return MEDIA_VISION_ERROR_NOT_SUPPORTED; - } - - isIncremental ? m_recognizer->update(samples, labels) : - m_recognizer->train(samples, labels); - m_canRecognize = true; - m_learnedLabels.clear(); - m_learnedLabels = learnedLabels; - } - else - { - LOGE("Can't create recognition algorithm for no examples. Try to add " - "some face examples before learning"); - - return MEDIA_VISION_ERROR_NO_DATA; - } - - m_learnAlgorithmConfig = config; - - LOGD("Recognition model has been learned"); - - return MEDIA_VISION_ERROR_NONE; + bool isIncremental = false; + bool isUnisize = false; + + if (MEDIA_VISION_FACE_MODEL_TYPE_LBPH == config.mModelType) { + isIncremental = true; + } + + if (MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES == config.mModelType || + MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == config.mModelType) { + isUnisize = true; + } + + std::vector samples; + std::vector labels; + std::set learnedLabels; + + if (isIncremental) { + learnedLabels.insert(m_learnedLabels.begin(), m_learnedLabels.end()); + } + + std::map >::const_iterator it = + m_faceSamples.begin(); + for (; it != m_faceSamples.end(); ++it) { + const size_t faceClassSamplesSize = it->second.size(); + labels.insert(labels.end(), faceClassSamplesSize, it->first); + learnedLabels.insert(it->first); + + if (!isUnisize) { + LOGD("%u examples has been added with label %i", + it->second.size(), it->first); + samples.insert(samples.end(), it->second.begin(), it->second.end()); + } else { + for (size_t sampleInd = 0; sampleInd < faceClassSamplesSize; ++sampleInd) { + cv::Mat resizedSample; + cv::resize(it->second[sampleInd], + resizedSample, + cv::Size(config.mImgWidth, config.mImgHeight), + 1.0, 1.0, cv::INTER_CUBIC); + samples.push_back(resizedSample); + } + } + } + + const size_t samplesSize = samples.size(); + const size_t labelsSize = labels.size(); + + if (0 != samplesSize && samplesSize == labelsSize) { + LOGD("Start to learn the model for %u samples and %u labels", + samplesSize, labelsSize); + + if (m_learnAlgorithmConfig != config || m_recognizer.empty()) { + m_recognizer = CreateRecognitionAlgorithm(config); + } + + if (m_recognizer.empty()) { + LOGE("Can't create recognition algorithm for recognition model. " + "Configuration is not supported by any of known algorithms."); + + return MEDIA_VISION_ERROR_NOT_SUPPORTED; + } + + isIncremental ? m_recognizer->update(samples, labels) : + m_recognizer->train(samples, labels); + m_canRecognize = true; + m_learnedLabels.clear(); + m_learnedLabels = learnedLabels; + } else { + LOGE("Can't create recognition algorithm for no examples. Try to add " + "some face examples before learning"); + + return MEDIA_VISION_ERROR_NO_DATA; + } + + m_learnAlgorithmConfig = config; + + LOGD("Recognition model has been learned"); + + return MEDIA_VISION_ERROR_NONE; } int FaceRecognitionModel::recognize(const cv::Mat& image, FaceRecognitionResults& results) { - if (!m_recognizer.empty() && m_canRecognize) - { - double absConf = 0.0; - m_recognizer->predict(image, results.mFaceLabel, absConf); - // Normalize the absolute value of the confidence - absConf = exp(7.5 - (0.05 * absConf)); - results.mConfidence = absConf / (1 + absConf); - results.mIsRecognized = true; - results.mFaceLocation = cv::Rect(0, 0, image.cols, image.rows); - } - else - { - LOGE("Attempt to recognize faces with untrained model"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - return MEDIA_VISION_ERROR_NONE; + if (!m_recognizer.empty() && m_canRecognize) { + double absConf = 0.0; + m_recognizer->predict(image, results.mFaceLabel, absConf); + /* Normalize the absolute value of the confidence */ + absConf = exp(7.5 - (0.05 * absConf)); + results.mConfidence = absConf / (1 + absConf); + results.mIsRecognized = true; + results.mFaceLocation = cv::Rect(0, 0, image.cols, image.rows); + } else { + LOGE("Attempt to recognize faces with untrained model"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + return MEDIA_VISION_ERROR_NONE; } cv::Ptr FaceRecognitionModel::CreateRecognitionAlgorithm( - const FaceRecognitionModelConfig& config) + const FaceRecognitionModelConfig& config) { - cv::Ptr tempRecognizer; - switch (config.mModelType) - { - case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES: - tempRecognizer = cv::createEigenFaceRecognizer( - config.mNumComponents, - config.mThreshold); - break; - case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES: - tempRecognizer = cv::createFisherFaceRecognizer( - config.mNumComponents, - config.mThreshold); - break; - case MEDIA_VISION_FACE_MODEL_TYPE_LBPH: - tempRecognizer = cv::createLBPHFaceRecognizer( - config.mRadius, - config.mNeighbors, - config.mGridX, - config.mGridY, - config.mThreshold); - break; - default: - return NULL; - } - - return tempRecognizer; + cv::Ptr tempRecognizer; + switch (config.mModelType) { + case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES: + tempRecognizer = cv::createEigenFaceRecognizer( + config.mNumComponents, + config.mThreshold); + break; + case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES: + tempRecognizer = cv::createFisherFaceRecognizer( + config.mNumComponents, + config.mThreshold); + break; + case MEDIA_VISION_FACE_MODEL_TYPE_LBPH: + tempRecognizer = cv::createLBPHFaceRecognizer( + config.mRadius, + config.mNeighbors, + config.mGridX, + config.mGridY, + config.mThreshold); + break; + default: + return NULL; + } + + return tempRecognizer; } } /* Face */ diff --git a/mv_face/face/src/FaceTrackingModel.cpp b/mv_face/face/src/FaceTrackingModel.cpp index 2c4fdd6..25fdcb8 100644 --- a/mv_face/face/src/FaceTrackingModel.cpp +++ b/mv_face/face/src/FaceTrackingModel.cpp @@ -23,194 +23,174 @@ #include -namespace MediaVision -{ -namespace Face -{ - +namespace MediaVision { +namespace Face { FaceTrackingResults::FaceTrackingResults() : - mIsTracked(false), - mConfidence(0.f) + mIsTracked(false), + mConfidence(0.f) { - ; /* NULL */ + ; /* NULL */ } FaceTrackingModel::FaceTrackingModel() : - m_canTrack(false), - m_tracker(new cv::TrackerMedianFlow()) + m_canTrack(false), + m_tracker(new cv::TrackerMedianFlow()) { - ; /* NULL */ + ; /* NULL */ } FaceTrackingModel::FaceTrackingModel(const FaceTrackingModel& origin) : - m_canTrack(origin.m_canTrack), - m_tracker(new cv::TrackerMedianFlow()) + m_canTrack(origin.m_canTrack), + m_tracker(new cv::TrackerMedianFlow()) { - if (!origin.m_tracker.empty()) - { - origin.m_tracker->copyTo(*(m_tracker.obj)); - } + if (!origin.m_tracker.empty()) { + origin.m_tracker->copyTo(*(m_tracker.obj)); + } } FaceTrackingModel& FaceTrackingModel::operator=(const FaceTrackingModel& copy) { - if (this != ©) - { - m_canTrack = copy.m_canTrack; - m_tracker = cv::Ptr(new cv::TrackerMedianFlow()); - if (!copy.m_tracker.empty()) - { - copy.m_tracker->copyTo(*(m_tracker.obj)); - } - } - - return *this; + if (this != ©) { + m_canTrack = copy.m_canTrack; + m_tracker = cv::Ptr(new cv::TrackerMedianFlow()); + if (!copy.m_tracker.empty()) { + copy.m_tracker->copyTo(*(m_tracker.obj)); + } + } + + return *this; } FaceTrackingModel::~FaceTrackingModel() { - ; /* NULL */ + ; /* NULL */ } int FaceTrackingModel::save(const std::string& fileName) { - if (m_tracker.empty()) - { - LOGE("Can't save tracking model. No tracking algorithm is used"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } + if (m_tracker.empty()) { + LOGE("Can't save tracking model. No tracking algorithm is used"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); + std::string prefix_path = std::string(app_get_data_path()); + LOGD("prefix_path: %s", prefix_path.c_str()); - std::string filePath; - filePath += prefix_path; - filePath += fileName; + std::string filePath; + filePath += prefix_path; +filePath += fileName; - /* check the directory is available */ - std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/')); - if (access(prefix_path_check.c_str(),F_OK)) - { - LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefix_path_check.c_str()); + /* check the directory is available */ + std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/')); + if (access(prefix_path_check.c_str(), F_OK)) { + LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefix_path_check.c_str()); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + return MEDIA_VISION_ERROR_INVALID_PATH; + } - cv::FileStorage storage(filePath, cv::FileStorage::WRITE); - if (!storage.isOpened()) - { - LOGE("Can't save tracking model. Write to file permission denied."); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } + cv::FileStorage storage(filePath, cv::FileStorage::WRITE); + if (!storage.isOpened()) { + LOGE("Can't save tracking model. Write to file permission denied."); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } - LOGD("Storing tracking model to the file started."); + LOGD("Storing tracking model to the file started."); - storage << "canTrack" << (m_canTrack ? 1 : 0); - m_tracker->write(storage); + storage << "canTrack" << (m_canTrack ? 1 : 0); + m_tracker->write(storage); - LOGD("Storing tracking model to the file finished."); + LOGD("Storing tracking model to the file finished."); - storage.release(); + storage.release(); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int FaceTrackingModel::load(const std::string& fileName) { - /* find directory */ - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); + /* find directory */ + std::string prefix_path = std::string(app_get_data_path()); + LOGD("prefix_path: %s", prefix_path.c_str()); - std::string filePath; - filePath += prefix_path; - filePath += fileName; + std::string filePath; + filePath += prefix_path; + filePath += fileName; - if (access(filePath.c_str(), F_OK)) - { - LOGE("Can't load face tracking model. File[%s] doesn't exist.", filePath.c_str()); + if (access(filePath.c_str(), F_OK)) { + LOGE("Can't load face tracking model. File[%s] doesn't exist.", filePath.c_str()); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + return MEDIA_VISION_ERROR_INVALID_PATH; + } - cv::FileStorage storage(filePath, cv::FileStorage::READ); - if (!storage.isOpened()) - { - LOGE("Can't load tracking model. Read from file permission denied."); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } + cv::FileStorage storage(filePath, cv::FileStorage::READ); + if (!storage.isOpened()) { + LOGE("Can't load tracking model. Read from file permission denied."); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } - int canTrack = 0; - storage["canTrack"] >> canTrack; - m_canTrack = (0 != canTrack); - m_tracker->read(storage); + int canTrack = 0; + storage["canTrack"] >> canTrack; + m_canTrack = (0 != canTrack); + m_tracker->read(storage); - LOGD("Loading tracking model from file."); + LOGD("Loading tracking model from file."); - storage.release(); + storage.release(); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int FaceTrackingModel::prepare(const cv::Mat& image) { - if (m_tracker.empty()) - { - LOGE("Failed to prepare tracking model. No tracking algorithm " - "is available."); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - cv::Rect_ lastBoundingBox; - if (!m_tracker->isInited()) - { - lastBoundingBox.x = 0; - lastBoundingBox.y = 0; - lastBoundingBox.width = image.cols; - lastBoundingBox.height = image.rows; - } - else - { - lastBoundingBox = m_tracker->getLastBoundingBox(); - } - - return prepare(image, lastBoundingBox); + if (m_tracker.empty()) { + LOGE("Failed to prepare tracking model. No tracking algorithm " + "is available."); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + cv::Rect_ lastBoundingBox; + if (!m_tracker->isInited()) { + lastBoundingBox.x = 0; + lastBoundingBox.y = 0; + lastBoundingBox.width = image.cols; + lastBoundingBox.height = image.rows; + } else { + lastBoundingBox = m_tracker->getLastBoundingBox(); + } + + return prepare(image, lastBoundingBox); } int FaceTrackingModel::prepare( - const cv::Mat& image, - const cv::Rect_& boundingBox) + const cv::Mat& image, + const cv::Rect_& boundingBox) { - if (m_tracker.empty()) - { - LOGE("Failed to prepare tracking model. No tracking algorithm " - "is available."); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - if (!m_tracker->init(image, boundingBox)) - { - LOGE("Failed to prepare tracking model."); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - m_canTrack = true; - return MEDIA_VISION_ERROR_NONE; + if (m_tracker.empty()) { + LOGE("Failed to prepare tracking model. No tracking algorithm " + "is available."); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + if (!m_tracker->init(image, boundingBox)) { + LOGE("Failed to prepare tracking model."); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + m_canTrack = true; + return MEDIA_VISION_ERROR_NONE; } int FaceTrackingModel::track(const cv::Mat& image, FaceTrackingResults& results) { - if (!m_tracker.empty() && m_canTrack) - { - results.mIsTracked = m_tracker->update(image, results.mFaceLocation); - results.mConfidence = m_tracker->getLastConfidence(); - } - else - { - LOGE("Attempt to track face with not prepared model"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - return MEDIA_VISION_ERROR_NONE; + if (!m_tracker.empty() && m_canTrack) { + results.mIsTracked = m_tracker->update(image, results.mFaceLocation); + results.mConfidence = m_tracker->getLastConfidence(); + } else { + LOGE("Attempt to track face with not prepared model"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + return MEDIA_VISION_ERROR_NONE; } } /* Face */ diff --git a/mv_face/face/src/FaceUtil.cpp b/mv_face/face/src/FaceUtil.cpp index 7d49dd3..1430fe1 100644 --- a/mv_face/face/src/FaceUtil.cpp +++ b/mv_face/face/src/FaceUtil.cpp @@ -21,117 +21,110 @@ #include #include -namespace MediaVision -{ -namespace Face -{ - +namespace MediaVision { +namespace Face { RecognitionParams::RecognitionParams(FaceRecognitionModelType algType) : - mRecognitionAlgType(algType) + mRecognitionAlgType(algType) { - ; /* NULL */ + ; /* NULL */ } RecognitionParams::RecognitionParams() : - mRecognitionAlgType(MEDIA_VISION_FACE_MODEL_TYPE_LBPH) + mRecognitionAlgType(MEDIA_VISION_FACE_MODEL_TYPE_LBPH) { - ; /* NULL */ + ; /* NULL */ } int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource) { - MEDIA_VISION_INSTANCE_CHECK(mvSource); + MEDIA_VISION_INSTANCE_CHECK(mvSource); - int depth = CV_8U; // Default depth. 1 byte for channel. - unsigned int channelsNumber = 0; - unsigned int width = 0, height = 0; - unsigned int bufferSize = 0; - unsigned char *buffer = NULL; + int depth = CV_8U; /* Default depth. 1 byte for channel. */ + unsigned int channelsNumber = 0; + unsigned int width = 0, height = 0; + unsigned int bufferSize = 0; + unsigned char *buffer = NULL; - mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; + mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; - MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width), - "Failed to get the width."); - MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height), - "Failed to get the height."); - MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace), - "Failed to get the colorspace."); - MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize), - "Failed to get the buffer size."); + MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width), + "Failed to get the width."); + MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height), + "Failed to get the height."); + MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace), + "Failed to get the colorspace."); + MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize), + "Failed to get the buffer size."); - int conversionType = -1; // Type of conversion from given colorspace to gray - switch(colorspace) - { - case MEDIA_VISION_COLORSPACE_INVALID: - LOGE("Error: mv_source has invalid colorspace."); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - case MEDIA_VISION_COLORSPACE_Y800: - channelsNumber = 1; - // Without convertion - break; - case MEDIA_VISION_COLORSPACE_I420: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_I420; - break; - case MEDIA_VISION_COLORSPACE_NV12: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_NV12; - break; - case MEDIA_VISION_COLORSPACE_YV12: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_YV12; - break; - case MEDIA_VISION_COLORSPACE_NV21: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_NV21; - break; - case MEDIA_VISION_COLORSPACE_YUYV: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_YUYV; - break; - case MEDIA_VISION_COLORSPACE_UYVY: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_UYVY; - break; - case MEDIA_VISION_COLORSPACE_422P: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_Y422; - break; - case MEDIA_VISION_COLORSPACE_RGB565: - channelsNumber = 2; - conversionType = CV_BGR5652GRAY; - break; - case MEDIA_VISION_COLORSPACE_RGB888: - channelsNumber = 3; - conversionType = CV_RGB2GRAY; - break; - case MEDIA_VISION_COLORSPACE_RGBA: - channelsNumber = 4; - conversionType = CV_RGBA2GRAY; - break; - default: - LOGE("Error: mv_source has unsupported colorspace."); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } + int conversionType = -1; // Type of conversion from given colorspace to gray + switch(colorspace) { + case MEDIA_VISION_COLORSPACE_INVALID: + LOGE("Error: mv_source has invalid colorspace."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + case MEDIA_VISION_COLORSPACE_Y800: + channelsNumber = 1; + /* Without convertion */ + break; + case MEDIA_VISION_COLORSPACE_I420: + channelsNumber = 1; + height *= 1.5; + conversionType = CV_YUV2GRAY_I420; + break; + case MEDIA_VISION_COLORSPACE_NV12: + channelsNumber = 1; + height *= 1.5; + conversionType = CV_YUV2GRAY_NV12; + break; + case MEDIA_VISION_COLORSPACE_YV12: + channelsNumber = 1; + height *= 1.5; + conversionType = CV_YUV2GRAY_YV12; + break; + case MEDIA_VISION_COLORSPACE_NV21: + channelsNumber = 1; + height *= 1.5; + conversionType = CV_YUV2GRAY_NV21; + break; + case MEDIA_VISION_COLORSPACE_YUYV: + channelsNumber = 2; + conversionType = CV_YUV2GRAY_YUYV; + break; + case MEDIA_VISION_COLORSPACE_UYVY: + channelsNumber = 2; + conversionType = CV_YUV2GRAY_UYVY; + break; + case MEDIA_VISION_COLORSPACE_422P: + channelsNumber = 2; + conversionType = CV_YUV2GRAY_Y422; + break; + case MEDIA_VISION_COLORSPACE_RGB565: + channelsNumber = 2; + conversionType = CV_BGR5652GRAY; + break; + case MEDIA_VISION_COLORSPACE_RGB888: + channelsNumber = 3; + conversionType = CV_RGB2GRAY; + break; + case MEDIA_VISION_COLORSPACE_RGBA: + channelsNumber = 4; + conversionType = CV_RGBA2GRAY; + break; + default: + LOGE("Error: mv_source has unsupported colorspace."); + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } - if (conversionType == -1) // Without conversion - { - cvSource = cv::Mat(cv::Size(width, height), - CV_MAKETYPE(depth, channelsNumber), buffer).clone(); - } - else // Conversion - { - // Class for representation the given image as cv::Mat before conversion - cv::Mat origin(cv::Size(width, height), - CV_MAKETYPE(depth, channelsNumber), buffer); - cv::cvtColor(origin, cvSource, conversionType); - } + if (conversionType == -1) {/* Without conversion */ + cvSource = cv::Mat(cv::Size(width, height), + CV_MAKETYPE(depth, channelsNumber), buffer).clone(); + } else {/* With conversion */ + /* Class for representation the given image as cv::Mat before conversion */ + cv::Mat origin(cv::Size(width, height), + CV_MAKETYPE(depth, channelsNumber), buffer); + cv::cvtColor(origin, cvSource, conversionType); + } - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } } /* Face */ diff --git a/mv_face/face/src/TrackerMedianFlow.cpp b/mv_face/face/src/TrackerMedianFlow.cpp index a7a3b4f..ee4bc98 100644 --- a/mv_face/face/src/TrackerMedianFlow.cpp +++ b/mv_face/face/src/TrackerMedianFlow.cpp @@ -47,414 +47,388 @@ #include #include -namespace -{ - float FloatEps = 10e-6f; +namespace { + float FloatEps = 10e-6f; } /* anonymous namespace */ -namespace cv -{ - +namespace cv { TrackerMedianFlow::Params::Params() { - mPointsInGrid = 10; - mWindowSize = Size(3, 3); - mPyrMaxLevel = 5; + mPointsInGrid = 10; + mWindowSize = Size(3, 3); + mPyrMaxLevel = 5; } -void TrackerMedianFlow::Params::read( const cv::FileNode& fn ) +void TrackerMedianFlow::Params::read(const cv::FileNode& fn) { - mPointsInGrid = fn["pointsInGrid"]; - int winSizeHeight = fn["windowSizeHeight"]; - int winSizeWidth = fn["windowSizeWidth"]; - mWindowSize = Size(winSizeHeight, winSizeWidth); - mPyrMaxLevel = fn["pyrMaxLevel"]; + mPointsInGrid = fn["pointsInGrid"]; + int winSizeHeight = fn["windowSizeHeight"]; + int winSizeWidth = fn["windowSizeWidth"]; + mWindowSize = Size(winSizeHeight, winSizeWidth); + mPyrMaxLevel = fn["pyrMaxLevel"]; } -void TrackerMedianFlow::Params::write( cv::FileStorage& fs ) const +void TrackerMedianFlow::Params::write(cv::FileStorage& fs) const { - fs << "pointsInGrid" << mPointsInGrid; - fs << "windowSizeHeight" << mWindowSize.height; - fs << "windowSizeWidth" << mWindowSize.width; - fs << "pyrMaxLevel" << mPyrMaxLevel; + fs << "pointsInGrid" << mPointsInGrid; + fs << "windowSizeHeight" << mWindowSize.height; + fs << "windowSizeWidth" << mWindowSize.width; + fs << "pyrMaxLevel" << mPyrMaxLevel; } TrackerMedianFlow::TrackerMedianFlow(Params paramsIn) : - termcrit(TermCriteria::COUNT | TermCriteria::EPS,20,0.3), - m_confidence(0.0) + termcrit(TermCriteria::COUNT | TermCriteria::EPS, 20, 0.3), + m_confidence(0.0) { - params = paramsIn; - isInit = false; + params = paramsIn; + isInit = false; } bool TrackerMedianFlow::copyTo(TrackerMedianFlow& copy) const { - copy.isInit = isInit; - copy.params = params; - copy.termcrit = termcrit; - copy.m_boundingBox = m_boundingBox; - copy.m_confidence = m_confidence; - m_image.copyTo(copy.m_image); - return true; + copy.isInit = isInit; + copy.params = params; + copy.termcrit = termcrit; + copy.m_boundingBox = m_boundingBox; + copy.m_confidence = m_confidence; + m_image.copyTo(copy.m_image); + return true; } bool TrackerMedianFlow::init(const Mat& image, const Rect_& boundingBox) { - if (image.empty()) - { - return false; - } - - image.copyTo(m_image); - buildOpticalFlowPyramid( - m_image, m_pyramid, params.mWindowSize, params.mPyrMaxLevel); - m_boundingBox = boundingBox; - - isInit = true; - return isInit; + if (image.empty()) { + return false; + } + + image.copyTo(m_image); + buildOpticalFlowPyramid( + m_image, m_pyramid, params.mWindowSize, params.mPyrMaxLevel); + m_boundingBox = boundingBox; + + isInit = true; + return isInit; } bool TrackerMedianFlow::update(const Mat& image, Rect_& boundingBox) { - if (!isInit || image.empty()) return false; - - // Handles such behaviour when preparation frame has the size - // different to the tracking frame size. In such case, we resize preparation - // frame and bounding box. Then, track as usually: - if (m_image.rows != image.rows || m_image.cols != image.cols) - { - const float xFactor = (float) image.cols / m_image.cols; - const float yFactor = (float) image.rows / m_image.rows; - - resize(m_image, m_image, Size(), xFactor, yFactor); - - m_boundingBox.x *= xFactor; - m_boundingBox.y *= yFactor; - m_boundingBox.width *= xFactor; - m_boundingBox.height *= yFactor; - } - - Mat oldImage = m_image; - - Rect_ oldBox = m_boundingBox; - if(!medianFlowImpl(oldImage, image, oldBox)) - { - return false; - } - - boundingBox = oldBox; - image.copyTo(m_image); - m_boundingBox = boundingBox; - return true; + if (!isInit || image.empty()) + return false; + + /* Handles such behaviour when preparation frame has the size + * different to the tracking frame size. In such case, we resize preparation + * frame and bounding box. Then, track as usually: + */ + if (m_image.rows != image.rows || m_image.cols != image.cols) { + const float xFactor = (float) image.cols / m_image.cols; + const float yFactor = (float) image.rows / m_image.rows; + + resize(m_image, m_image, Size(), xFactor, yFactor); + + m_boundingBox.x *= xFactor; + m_boundingBox.y *= yFactor; + m_boundingBox.width *= xFactor; + m_boundingBox.height *= yFactor; + } + + Mat oldImage = m_image; + + Rect_ oldBox = m_boundingBox; + if(!medianFlowImpl(oldImage, image, oldBox)) { + return false; + } + + boundingBox = oldBox; + image.copyTo(m_image); + m_boundingBox = boundingBox; + return true; } bool TrackerMedianFlow::isInited() const { - return isInit; + return isInit; } float TrackerMedianFlow::getLastConfidence() const { - return m_confidence; + return m_confidence; } Rect_ TrackerMedianFlow::getLastBoundingBox() const { - return m_boundingBox; + return m_boundingBox; } bool TrackerMedianFlow::medianFlowImpl( - Mat oldImage_gray, Mat newImage_gray, Rect_& oldBox) + Mat oldImage_gray, Mat newImage_gray, Rect_& oldBox) { - std::vector pointsToTrackOld, pointsToTrackNew; - - const float gridXStep = oldBox.width / params.mPointsInGrid; - const float gridYStep = oldBox.height / params.mPointsInGrid; - for (int i = 0; i < params.mPointsInGrid; i++) - { - for (int j = 0; j < params.mPointsInGrid; j++) - { - pointsToTrackOld.push_back( - Point2f(oldBox.x + .5f*gridXStep + 1.f*gridXStep*j, - oldBox.y + .5f*gridYStep + 1.f*gridYStep*i)); - } - } - - std::vector status(pointsToTrackOld.size()); - std::vector errors(pointsToTrackOld.size()); - - std::vector tempPyramid; - buildOpticalFlowPyramid( - newImage_gray, - tempPyramid, - params.mWindowSize, - params.mPyrMaxLevel); - - calcOpticalFlowPyrLK(m_pyramid, - tempPyramid, - pointsToTrackOld, - pointsToTrackNew, - status, - errors, - params.mWindowSize, - params.mPyrMaxLevel, - termcrit); - - std::vector di; - for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++) - { - if (status[idx] == 1) - { - di.push_back(pointsToTrackNew[idx] - pointsToTrackOld[idx]); - } - } - - std::vector filter_status; - check_FB(tempPyramid, - pointsToTrackOld, - pointsToTrackNew, - filter_status); - check_NCC(oldImage_gray, - newImage_gray, - pointsToTrackOld, - pointsToTrackNew, - filter_status); - - for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++) - { - if (!filter_status[idx]) - { - pointsToTrackOld.erase(pointsToTrackOld.begin() + idx); - pointsToTrackNew.erase(pointsToTrackNew.begin() + idx); - filter_status.erase(filter_status.begin() + idx); - idx--; - } - } - - if (pointsToTrackOld.size() == 0 || di.size() == 0) - { - return false; - } - - Point2f mDisplacement; - Rect_ boxCandidate = - vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement); - - std::vector displacements; - for (size_t idx = 0u; idx < di.size(); idx++) - { - di[idx] -= mDisplacement; - displacements.push_back(sqrt(di[idx].ddot(di[idx]))); - } - - m_confidence = - (10.f - getMedian(displacements,(int)displacements.size())) / 10.f; - if (m_confidence <= 0.f) - { - m_confidence = 0.f; - return false; - } - - m_pyramid.swap(tempPyramid); - oldBox = boxCandidate; - return true; + std::vector pointsToTrackOld, pointsToTrackNew; + + const float gridXStep = oldBox.width / params.mPointsInGrid; + const float gridYStep = oldBox.height / params.mPointsInGrid; + for (int i = 0; i < params.mPointsInGrid; i++) { + for (int j = 0; j < params.mPointsInGrid; j++) { + pointsToTrackOld.push_back( + Point2f(oldBox.x + .5f*gridXStep + 1.f*gridXStep*j, + oldBox.y + .5f*gridYStep + 1.f*gridYStep*i)); + } + } + + std::vector status(pointsToTrackOld.size()); + std::vector errors(pointsToTrackOld.size()); + + std::vector tempPyramid; + buildOpticalFlowPyramid( + newImage_gray, + tempPyramid, + params.mWindowSize, + params.mPyrMaxLevel); + + calcOpticalFlowPyrLK(m_pyramid, + tempPyramid, + pointsToTrackOld, + pointsToTrackNew, + status, + errors, + params.mWindowSize, + params.mPyrMaxLevel, + termcrit); + + std::vector di; + for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++) { + if (status[idx] == 1) { + di.push_back(pointsToTrackNew[idx] - pointsToTrackOld[idx]); + } + } + + std::vector filter_status; + check_FB(tempPyramid, + pointsToTrackOld, + pointsToTrackNew, + filter_status); + + check_NCC(oldImage_gray, + newImage_gray, + pointsToTrackOld, + pointsToTrackNew, + filter_status); + + for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++) { + if (!filter_status[idx]) { + pointsToTrackOld.erase(pointsToTrackOld.begin() + idx); + pointsToTrackNew.erase(pointsToTrackNew.begin() + idx); + filter_status.erase(filter_status.begin() + idx); + idx--; + } + } + + if (pointsToTrackOld.size() == 0 || di.size() == 0) { + return false; + } + + Point2f mDisplacement; + Rect_ boxCandidate = + vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement); + + std::vector displacements; + for (size_t idx = 0u; idx < di.size(); idx++) { + di[idx] -= mDisplacement; + displacements.push_back(sqrt(di[idx].ddot(di[idx]))); + } + + m_confidence = + (10.f - getMedian(displacements, (int)displacements.size())) / 10.f; + if (m_confidence <= 0.f) { + m_confidence = 0.f; + return false; + } + + m_pyramid.swap(tempPyramid); + oldBox = boxCandidate; + return true; } Rect_ TrackerMedianFlow::vote( - const std::vector& oldPoints, - const std::vector& newPoints, - const Rect_& oldRect, - Point2f& mD) + const std::vector& oldPoints, + const std::vector& newPoints, + const Rect_& oldRect, + Point2f& mD) { - Rect_ newRect; - Point2d newCenter(oldRect.x + oldRect.width/2.0, - oldRect.y + oldRect.height/2.0); - - int n = (int)oldPoints.size(); - std::vector buf(std::max( n*(n-1) / 2, 3), 0.f); - - if(oldPoints.size() == 1) - { - newRect.x = oldRect.x+newPoints[0].x-oldPoints[0].x; - newRect.y = oldRect.y+newPoints[0].y-oldPoints[0].y; - newRect.width=oldRect.width; - newRect.height=oldRect.height; - return newRect; - } - - float xshift = 0.f; - float yshift = 0.f; - for(int i = 0; i < n; i++) - { - buf[i] = newPoints[i].x - oldPoints[i].x; - } - - xshift = getMedian(buf, n); - newCenter.x += xshift; - for(int idx = 0; idx < n; idx++) - { - buf[idx] = newPoints[idx].y - oldPoints[idx].y; - } - - yshift = getMedian(buf, n); - newCenter.y += yshift; - mD = Point2f(xshift, yshift); - - if(oldPoints.size() == 1) - { - newRect.x = newCenter.x - oldRect.width / 2.0; - newRect.y = newCenter.y - oldRect.height / 2.0; - newRect.width = oldRect.width; - newRect.height = oldRect.height; - return newRect; - } - - float nd = 0.f; - float od = 0.f; - for (int i = 0, ctr = 0; i < n; i++) - { - for(int j = 0; j < i; j++) - { - nd = l2distance(newPoints[i], newPoints[j]); - od = l2distance(oldPoints[i], oldPoints[j]); - buf[ctr] = (od == 0.f ? 0.f : nd / od); - ctr++; - } - } - - float scale = getMedian(buf, n*(n-1) / 2); - newRect.x = newCenter.x - scale * oldRect.width / 2.f; - newRect.y = newCenter.y-scale * oldRect.height / 2.f; - newRect.width = scale * oldRect.width; - newRect.height = scale * oldRect.height; - - return newRect; + Rect_ newRect; + Point2d newCenter(oldRect.x + oldRect.width/2.0, + oldRect.y + oldRect.height/2.0); + + int n = (int)oldPoints.size(); + std::vectorbuf(std::max(n*(n-1) / 2, 3), 0.f); + + if(oldPoints.size() == 1) { + newRect.x = oldRect.x+newPoints[0].x-oldPoints[0].x; + newRect.y = oldRect.y+newPoints[0].y-oldPoints[0].y; + newRect.width = oldRect.width; + newRect.height = oldRect.height; + return newRect; + } + + float xshift = 0.f; + float yshift = 0.f; + for(int i = 0; i < n; i++) { + buf[i] = newPoints[i].x - oldPoints[i].x; + } + + xshift = getMedian(buf, n); + newCenter.x += xshift; + for(int idx = 0; idx < n; idx++) { + buf[idx] = newPoints[idx].y - oldPoints[idx].y; + } + + yshift = getMedian(buf, n); + newCenter.y += yshift; + mD = Point2f(xshift, yshift); + + if(oldPoints.size() == 1) { + newRect.x = newCenter.x - oldRect.width / 2.0; + newRect.y = newCenter.y - oldRect.height / 2.0; + newRect.width = oldRect.width; + newRect.height = oldRect.height; + return newRect; + } + + float nd = 0.f; + float od = 0.f; + for (int i = 0, ctr = 0; i < n; i++) { + for(int j = 0; j < i; j++) { + nd = l2distance(newPoints[i], newPoints[j]); + od = l2distance(oldPoints[i], oldPoints[j]); + buf[ctr] = (od == 0.f ? 0.f : nd / od); + ctr++; + } + } + + float scale = getMedian(buf, n*(n-1) / 2); + newRect.x = newCenter.x - scale * oldRect.width / 2.f; + newRect.y = newCenter.y-scale * oldRect.height / 2.f; + newRect.width = scale * oldRect.width; + newRect.height = scale * oldRect.height; + + return newRect; } template T TrackerMedianFlow::getMedian(std::vector& values, int size) { - if (size == -1) - { - size = (int)values.size(); - } - - std::vector copy(values.begin(), values.begin() + size); - std::sort(copy.begin(),copy.end()); - if(size%2==0) - { - return (copy[size/2-1]+copy[size/2])/((T)2.0); - } - else - { - return copy[(size - 1) / 2]; - } + if (size == -1) { + size = (int)values.size(); + } + + std::vector copy(values.begin(), values.begin() + size); + std::sort(copy.begin(), copy.end()); + if(size%2 == 0) { + return (copy[size/2-1]+copy[size/2])/((T)2.0); + } else { + return copy[(size - 1) / 2]; + } } float TrackerMedianFlow::l2distance(Point2f p1, Point2f p2) { - float dx = p1.x - p2.x; - float dy = p1.y - p2.y; - return sqrt(dx * dx + dy * dy); + float dx = p1.x - p2.x; + float dy = p1.y - p2.y; + return sqrt(dx * dx + dy * dy); } void TrackerMedianFlow::check_FB( - std::vector newPyramid, - const std::vector& oldPoints, - const std::vector& newPoints, - std::vector& status) + std::vector newPyramid, + const std::vector& oldPoints, + const std::vector& newPoints, + std::vector& status) { - if(status.size() == 0) - { - status = std::vector(oldPoints.size(), true); - } - - std::vector LKstatus(oldPoints.size()); - std::vector errors(oldPoints.size()); - std::vector FBerror(oldPoints.size()); - std::vector pointsToTrackReprojection; - - calcOpticalFlowPyrLK(newPyramid, - m_pyramid, - newPoints, - pointsToTrackReprojection, - LKstatus, - errors, - params.mWindowSize, - params.mPyrMaxLevel, - termcrit); - - for (size_t idx = 0u; idx < oldPoints.size(); idx++) - { - FBerror[idx] = l2distance(oldPoints[idx], pointsToTrackReprojection[idx]); - } - - float FBerrorMedian = getMedian(FBerror) + FloatEps; - for (size_t idx = 0u; idx < oldPoints.size(); idx++) - { - status[idx] = (FBerror[idx] < FBerrorMedian); - } + if(status.size() == 0) { + status = std::vector(oldPoints.size(), true); + } + + std::vector LKstatus(oldPoints.size()); + std::vector errors(oldPoints.size()); + std::vector FBerror(oldPoints.size()); + std::vector pointsToTrackReprojection; + + calcOpticalFlowPyrLK(newPyramid, + m_pyramid, + newPoints, + pointsToTrackReprojection, + LKstatus, + errors, + params.mWindowSize, + params.mPyrMaxLevel, + termcrit); + + for (size_t idx = 0u; idx < oldPoints.size(); idx++) { + FBerror[idx] = + l2distance(oldPoints[idx], pointsToTrackReprojection[idx]); + } + + float FBerrorMedian = getMedian(FBerror) + FloatEps; + for (size_t idx = 0u; idx < oldPoints.size(); idx++) { + status[idx] = (FBerror[idx] < FBerrorMedian); + } } void TrackerMedianFlow::check_NCC( - const Mat& oldImage, - const Mat& newImage, - const std::vector& oldPoints, - const std::vector& newPoints, - std::vector& status) + const Mat& oldImage, + const Mat& newImage, + const std::vector& oldPoints, + const std::vector& newPoints, + std::vector& status) { - std::vector NCC(oldPoints.size(), 0.f); - Size patch(30, 30); - Mat p1; - Mat p2; - - for (size_t idx = 0u; idx < oldPoints.size(); idx++) - { - getRectSubPix(oldImage, patch, oldPoints[idx], p1); - getRectSubPix(newImage, patch, newPoints[idx], p2); - - const int N = 900; - const float s1 = sum(p1)(0); - const float s2 = sum(p2)(0); - const float n1 = norm(p1); - const float n2 = norm(p2); - const float prod = p1.dot(p2); - const float sq1 = sqrt(n1 * n1 - s1 * s1 / N); - const float sq2 = sqrt(n2 * n2 - s2 * s2 / N); - NCC[idx] = (sq2==0 ? sq1 / std::abs(sq1) - : (prod - s1 * s2 / N) / sq1 / sq2); - } - - float median = getMedian(NCC) - FloatEps; - for(size_t idx = 0u; idx < oldPoints.size(); idx++) - { - status[idx] = status[idx] && (NCC[idx] > median); - } + std::vector NCC(oldPoints.size(), 0.f); + Size patch(30, 30); + Mat p1; + Mat p2; + + for (size_t idx = 0u; idx < oldPoints.size(); idx++) { + getRectSubPix(oldImage, patch, oldPoints[idx], p1); + getRectSubPix(newImage, patch, newPoints[idx], p2); + + const int N = 900; + const float s1 = sum(p1)(0); + const float s2 = sum(p2)(0); + const float n1 = norm(p1); + const float n2 = norm(p2); + const float prod = p1.dot(p2); + const float sq1 = sqrt(n1 * n1 - s1 * s1 / N); + const float sq2 = sqrt(n2 * n2 - s2 * s2 / N); + NCC[idx] = (sq2 == 0 ? sq1 / std::abs(sq1) + : (prod - s1 * s2 / N) / sq1 / sq2); + } + + float median = getMedian(NCC) - FloatEps; + for(size_t idx = 0u; idx < oldPoints.size(); idx++) { + status[idx] = status[idx] && (NCC[idx] > median); + } } -void TrackerMedianFlow::read( cv::FileStorage& fs ) +void TrackerMedianFlow::read(cv::FileStorage& fs) { - params.read(fs.root()); - float bbX = 0.f; - float bbY = 0.f; - float bbW = 0.f; - float bbH = 0.f; - fs["lastLocationX"] >> bbX; - fs["lastLocationY"] >> bbY; - fs["lastLocationW"] >> bbW; - fs["lastLocationH"] >> bbH; - m_boundingBox = Rect_(bbX, bbY, bbW, bbH); - fs["lastImage"] >> m_image; + params.read(fs.root()); + float bbX = 0.f; + float bbY = 0.f; + float bbW = 0.f; + float bbH = 0.f; + fs["lastLocationX"] >> bbX; + fs["lastLocationY"] >> bbY; + fs["lastLocationW"] >> bbW; + fs["lastLocationH"] >> bbH; + m_boundingBox = Rect_(bbX, bbY, bbW, bbH); + fs["lastImage"] >> m_image; } -void TrackerMedianFlow::write( cv::FileStorage& fs ) const +void TrackerMedianFlow::write(cv::FileStorage& fs) const { - params.write(fs); - fs << "lastLocationX" << m_boundingBox.x; - fs << "lastLocationY" << m_boundingBox.y; - fs << "lastLocationW" << m_boundingBox.width; - fs << "lastLocationH" << m_boundingBox.height; - fs << "lastImage" << m_image; + params.write(fs); + fs << "lastLocationX" << m_boundingBox.x; + fs << "lastLocationY" << m_boundingBox.y; + fs << "lastLocationW" << m_boundingBox.width; + fs << "lastLocationH" << m_boundingBox.height; + fs << "lastImage" << m_image; } } /* namespace cv */ diff --git a/mv_face/face/src/mv_face_open.cpp b/mv_face/face/src/mv_face_open.cpp index 41f2398..0412f85 100644 --- a/mv_face/face/src/mv_face_open.cpp +++ b/mv_face/face/src/mv_face_open.cpp @@ -34,1015 +34,918 @@ using namespace ::MediaVision::Face; static const RecognitionParams defaultRecognitionParams = RecognitionParams(); static void extractRecognitionParams( - mv_engine_config_h engine_cfg, - RecognitionParams& recognitionParams) + mv_engine_config_h engine_cfg, + RecognitionParams& recognitionParams) { - mv_engine_config_h working_cfg = NULL; - - if (NULL == engine_cfg) - { - mv_create_engine_config(&working_cfg); - } - else - { - working_cfg = engine_cfg; - } - - int algType = 0; - mv_engine_config_get_int_attribute_c( - working_cfg, - "MV_FACE_RECOGNITION_MODEL_TYPE", - &algType); - - if (0 < algType && 4 > algType) - { - recognitionParams.mRecognitionAlgType = - (FaceRecognitionModelType)algType; - } - else - { - recognitionParams.mRecognitionAlgType = - defaultRecognitionParams.mRecognitionAlgType; - } - - if (NULL == engine_cfg) - { - mv_destroy_engine_config(working_cfg); - } + mv_engine_config_h working_cfg = NULL; + + if (NULL == engine_cfg) { + mv_create_engine_config(&working_cfg); + } else { + working_cfg = engine_cfg; + } + + int algType = 0; + mv_engine_config_get_int_attribute_c( + working_cfg, + "MV_FACE_RECOGNITION_MODEL_TYPE", + &algType); + + if (0 < algType && 4 > algType) { + recognitionParams.mRecognitionAlgType = + (FaceRecognitionModelType)algType; + } else { + recognitionParams.mRecognitionAlgType = + defaultRecognitionParams.mRecognitionAlgType; + } + + if (NULL == engine_cfg) { + mv_destroy_engine_config(working_cfg); + } } inline void convertRectCV2MV(const cv::Rect& src, mv_rectangle_s& dst) { - dst.point.x = src.x; - dst.point.y = src.y; - dst.width = src.width; - dst.height = src.height; + dst.point.x = src.x; + dst.point.y = src.y; + dst.width = src.width; + dst.height = src.height; } int mv_face_detect_open( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_face_detected_cb detected_cb, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_face_detected_cb detected_cb, + void *user_data) { - cv::Mat image; - - int error = convertSourceMV2GrayCV(source, image); - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGE("Convertion mv_source_h to gray failed"); - return error; - } - - char *haarcascadeFilepath; - error = mv_engine_config_get_string_attribute_c( - engine_cfg, - "MV_FACE_DETECTION_MODEL_FILE_PATH", - &haarcascadeFilepath); - - //default path - std::string haarcascadeFilePathStr = - "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml"; - - if (error == MEDIA_VISION_ERROR_NONE) - { - LOGI("Haarcascade file was set as default"); - haarcascadeFilePathStr = std::string(haarcascadeFilepath); - - delete[] haarcascadeFilepath; - } - else - { - LOGE("Error occurred during face detection haarcascade file receiving." - " (%i)", error); - } - - static FaceDetector faceDetector; - - if (!faceDetector.loadHaarcascade(haarcascadeFilePathStr)) - { - LOGE("Loading Haarcascade failed"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - cv::Rect roi(-1, -1, -1, -1); - error = mv_engine_config_get_int_attribute_c( - engine_cfg, - MV_FACE_DETECTION_ROI_X, - &roi.x); - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGE("Error occurred during face detection roi (x) receiving." - " (%i)", error); - } - - error = mv_engine_config_get_int_attribute_c( - engine_cfg, - MV_FACE_DETECTION_ROI_Y, - &roi.y); - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGE("Error occurred during face detection roi (y) receiving." - " (%i)", error); - } - - error = mv_engine_config_get_int_attribute_c( - engine_cfg, - MV_FACE_DETECTION_ROI_WIDTH, - &roi.width); - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGE("Error occurred during face detection roi (width) receiving." - " (%i)", error); - } - - error = mv_engine_config_get_int_attribute_c( - engine_cfg, - MV_FACE_DETECTION_ROI_HEIGHT, - &roi.height); - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGE("Error occurred during face detection roi (height) receiving." - " (%i)", error); - } - - cv::Size minSize(-1, -1); - error = mv_engine_config_get_int_attribute_c( - engine_cfg, - MV_FACE_DETECTION_MIN_SIZE_WIDTH, - &minSize.width); - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGE("Error occurred during face detection minimum width receiving." - " (%i)", error); - } - - error = mv_engine_config_get_int_attribute_c( - engine_cfg, - MV_FACE_DETECTION_MIN_SIZE_HEIGHT, - &minSize.height); - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGE("Error occurred during face detection minimum height receiving." - " (%i)", error); - } - - std::vector faceLocations; - if (!faceDetector.detectFaces(image, roi, minSize, faceLocations)) - { - LOGE("Face detection in OpenCV failed"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - static const int StartMaxResultsNumber = 50; - static std::vector results(StartMaxResultsNumber); - - const int numberOfResults = faceLocations.size(); - if (numberOfResults > StartMaxResultsNumber) - { - results.resize(numberOfResults); - } - - for(int rectNum = 0; rectNum < numberOfResults; ++rectNum) - { - convertRectCV2MV(faceLocations[rectNum], results[rectNum]); - } - - LOGI("Call the detect callback for %i detected faces", numberOfResults); - detected_cb(source, engine_cfg, results.data(), numberOfResults, user_data); - - return MEDIA_VISION_ERROR_NONE; + cv::Mat image; + + int error = convertSourceMV2GrayCV(source, image); + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Convertion mv_source_h to gray failed"); + return error; + } + + char *haarcascadeFilepath; + error = mv_engine_config_get_string_attribute_c( + engine_cfg, + "MV_FACE_DETECTION_MODEL_FILE_PATH", + &haarcascadeFilepath); + + /* default path */ + std::string haarcascadeFilePathStr = + "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml"; + + if (error == MEDIA_VISION_ERROR_NONE) { + LOGI("Haarcascade file was set as default"); + haarcascadeFilePathStr = std::string(haarcascadeFilepath); + + delete[] haarcascadeFilepath; + } else { + LOGE("Error occurred during face detection haarcascade file receiving." + " (%i)", error); + } + + static FaceDetector faceDetector; + + if (!faceDetector.loadHaarcascade(haarcascadeFilePathStr)) { + LOGE("Loading Haarcascade failed"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + cv::Rect roi(-1, -1, -1, -1); + error = mv_engine_config_get_int_attribute_c( + engine_cfg, + MV_FACE_DETECTION_ROI_X, + &roi.x); + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Error occurred during face detection roi (x) receiving." + " (%i)", error); + } + + error = mv_engine_config_get_int_attribute_c( + engine_cfg, + MV_FACE_DETECTION_ROI_Y, + &roi.y); + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Error occurred during face detection roi (y) receiving." + " (%i)", error); +} + + error = mv_engine_config_get_int_attribute_c( + engine_cfg, + MV_FACE_DETECTION_ROI_WIDTH, + &roi.width); + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Error occurred during face detection roi (width) receiving." + " (%i)", error); + } + + error = mv_engine_config_get_int_attribute_c( + engine_cfg, + MV_FACE_DETECTION_ROI_HEIGHT, + &roi.height); + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Error occurred during face detection roi (height) receiving." + " (%i)", error); + } + + cv::Size minSize(-1, -1); + error = mv_engine_config_get_int_attribute_c( + engine_cfg, + MV_FACE_DETECTION_MIN_SIZE_WIDTH, + &minSize.width); + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Error occurred during face detection minimum width receiving." + " (%i)", error); + } + + error = mv_engine_config_get_int_attribute_c( + engine_cfg, + MV_FACE_DETECTION_MIN_SIZE_HEIGHT, + &minSize.height); + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Error occurred during face detection minimum height receiving." + " (%i)", error); + } + + std::vector faceLocations; + if (!faceDetector.detectFaces(image, roi, minSize, faceLocations)) { + LOGE("Face detection in OpenCV failed"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + static const int StartMaxResultsNumber = 50; + static std::vector results(StartMaxResultsNumber); + + const int numberOfResults = faceLocations.size(); + if (numberOfResults > StartMaxResultsNumber) { + results.resize(numberOfResults); + } + + for (int rectNum = 0; rectNum < numberOfResults; ++rectNum) { + convertRectCV2MV(faceLocations[rectNum], results[rectNum]); + } + + LOGI("Call the detect callback for %i detected faces", numberOfResults); + detected_cb(source, engine_cfg, results.data(), numberOfResults, user_data); + + return MEDIA_VISION_ERROR_NONE; } int mv_face_recognize_open( - mv_source_h source, - mv_face_recognition_model_h recognition_model, - mv_engine_config_h engine_cfg, - mv_rectangle_s *face_location, - mv_face_recognized_cb recognized_cb, - void *user_data) + mv_source_h source, + mv_face_recognition_model_h recognition_model, + mv_engine_config_h engine_cfg, + mv_rectangle_s *face_location, + mv_face_recognized_cb recognized_cb, + void *user_data) { - if (!source) - { - LOGE("Can't recognize for the NULL Media Vision source handle"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - if (!recognized_cb) - { - LOGE("Recognition failed. Can't output recognition results without " - "callback function"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - if (!recognition_model) - { - LOGE("Can't recognize for the NULL Media Vision Face recognition model"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - FaceRecognitionModel *pRecModel = static_cast(recognition_model); - - if (!pRecModel) - { - LOGE("Face recognition failed. Incorrect Media Vision Face recognition model handle is used"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - cv::Mat grayImage; - int ret = convertSourceMV2GrayCV(source, grayImage); - - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Convertion mv_source_h to gray failed"); - return ret; - } - - cv::Mat image; - if (NULL == face_location) - { - image = grayImage; - } - else - { - cv::Rect_ roi; - roi.x = face_location->point.x; - roi.y = face_location->point.y; - roi.width = face_location->width; - roi.height = face_location->height; - image = grayImage(roi); - } - - FaceRecognitionResults results; - - LOGD("Face recognition is started"); - - ret = pRecModel->recognize(image, results); - - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Error occurred during the recognition. Failed"); - return ret; - } - - if (!results.mIsRecognized) - { - recognized_cb( - source, - recognition_model, - engine_cfg, - NULL, - NULL, - 0.0, - user_data); - } - else - { - mv_rectangle_s location; - location.point.x = results.mFaceLocation.x; - location.point.y = results.mFaceLocation.y; - location.width = results.mFaceLocation.width; - location.height = results.mFaceLocation.height; - - if (face_location != NULL) - { - location.point.x += face_location->point.x; - location.point.y += face_location->point.y; - } - - recognized_cb( - source, - recognition_model, - engine_cfg, - &location, - &(results.mFaceLabel), - results.mConfidence, - user_data); - } - - LOGD("Face recognition is finished"); - - return ret; + if (!source) { + LOGE("Can't recognize for the NULL Media Vision source handle"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (!recognized_cb) { + LOGE("Recognition failed. Can't output recognition results without " + "callback function"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (!recognition_model) { + LOGE("Can't recognize for the NULL Media Vision Face recognition model"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + FaceRecognitionModel *pRecModel = static_cast(recognition_model); + + if (!pRecModel) { + LOGE("Face recognition failed. Incorrect Media Vision Face recognition model handle is used"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + cv::Mat grayImage; + int ret = convertSourceMV2GrayCV(source, grayImage); + + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Convertion mv_source_h to gray failed"); + return ret; + } + + cv::Mat image; + if (NULL == face_location) { + image = grayImage; + } else { + cv::Rect_ roi; + roi.x = face_location->point.x; + roi.y = face_location->point.y; + roi.width = face_location->width; + roi.height = face_location->height; + image = grayImage(roi); + } + + FaceRecognitionResults results; + + LOGD("Face recognition is started"); + + ret = pRecModel->recognize(image, results); + + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Error occurred during the recognition. Failed"); + return ret; + } + + if (!results.mIsRecognized) { + recognized_cb( + source, + recognition_model, + engine_cfg, + NULL, + NULL, + 0.0, + user_data); + } else { + mv_rectangle_s location; + location.point.x = results.mFaceLocation.x; + location.point.y = results.mFaceLocation.y; + location.width = results.mFaceLocation.width; + location.height = results.mFaceLocation.height; + + if (face_location != NULL) { + location.point.x += face_location->point.x; + location.point.y += face_location->point.y; + } + + recognized_cb( + source, + recognition_model, + engine_cfg, + &location, + &(results.mFaceLabel), + results.mConfidence, + user_data); + } + + LOGD("Face recognition is finished"); + + return ret; } int mv_face_track_open( - mv_source_h source, - mv_face_tracking_model_h tracking_model, - mv_engine_config_h engine_cfg, - mv_face_tracked_cb tracked_cb, - bool /*do_learn*/, - void *user_data) + mv_source_h source, + mv_face_tracking_model_h tracking_model, + mv_engine_config_h engine_cfg, + mv_face_tracked_cb tracked_cb, + bool /*do_learn*/, + void *user_data) { - if (!source) - { - LOGE("Can't track for the NULL Media Vision source handle"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - if (!tracked_cb) - { - LOGE("Tracking failed. Can't output tracking results without " - "callback function"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - if (!tracking_model) - { - LOGE("Can't track for the NULL Media Vision Face tracking model"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - FaceTrackingModel *pTrackModel = - static_cast(tracking_model); - - if (!pTrackModel) - { - LOGE("Face tracking failed. " - "Incorrect Media Vision Face tracking model handle is used"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - cv::Mat grayImage; - int ret = convertSourceMV2GrayCV(source, grayImage); - - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Convertion mv_source_h to gray failed"); - return ret; - } - - FaceTrackingResults results; - ret = pTrackModel->track(grayImage, results); - - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Tracking can't be performed. " - "Check that tracking model is prepared when tracking starts"); - return ret; - } - - if (results.mIsTracked) - { - mv_quadrangle_s predictedLocation; - predictedLocation.points[0].x = results.mFaceLocation.x; - predictedLocation.points[0].y = results.mFaceLocation.y; - predictedLocation.points[1].x = - results.mFaceLocation.x + results.mFaceLocation.width; - predictedLocation.points[1].y = results.mFaceLocation.y; - predictedLocation.points[2].x = - results.mFaceLocation.x + results.mFaceLocation.width; - predictedLocation.points[2].y = - results.mFaceLocation.y + results.mFaceLocation.height; - predictedLocation.points[3].x = results.mFaceLocation.x; - predictedLocation.points[3].y = - results.mFaceLocation.y + results.mFaceLocation.height; - tracked_cb( - source, - tracking_model, - engine_cfg, - &predictedLocation, - results.mConfidence, - user_data); - } - else - { - tracked_cb( - source, - tracking_model, - engine_cfg, - NULL, - results.mConfidence, - user_data); - } - - return ret; + if (!source) { + LOGE("Can't track for the NULL Media Vision source handle"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (!tracked_cb) { + LOGE("Tracking failed. Can't output tracking results without " + "callback function"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (!tracking_model) { + LOGE("Can't track for the NULL Media Vision Face tracking model"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + FaceTrackingModel *pTrackModel = + static_cast(tracking_model); + + if (!pTrackModel) { + LOGE("Face tracking failed. " + "Incorrect Media Vision Face tracking model handle is used"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + cv::Mat grayImage; + int ret = convertSourceMV2GrayCV(source, grayImage); + + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Convertion mv_source_h to gray failed"); + return ret; + } + + FaceTrackingResults results; + ret = pTrackModel->track(grayImage, results); + + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Tracking can't be performed. " + "Check that tracking model is prepared when tracking starts"); + return ret; + } + + if (results.mIsTracked) { + mv_quadrangle_s predictedLocation; + predictedLocation.points[0].x = results.mFaceLocation.x; + predictedLocation.points[0].y = results.mFaceLocation.y; + predictedLocation.points[1].x = + results.mFaceLocation.x + results.mFaceLocation.width; + predictedLocation.points[1].y = results.mFaceLocation.y; + predictedLocation.points[2].x = + results.mFaceLocation.x + results.mFaceLocation.width; + predictedLocation.points[2].y = + results.mFaceLocation.y + results.mFaceLocation.height; + predictedLocation.points[3].x = results.mFaceLocation.x; + predictedLocation.points[3].y = + results.mFaceLocation.y + results.mFaceLocation.height; + tracked_cb( + source, + tracking_model, + engine_cfg, + &predictedLocation, + results.mConfidence, + user_data); + } else { + tracked_cb( + source, + tracking_model, + engine_cfg, + NULL, + results.mConfidence, + user_data); + } + + return ret; } int mv_face_eye_condition_recognize_open( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s face_location, - mv_face_eye_condition_recognized_cb eye_condition_recognized_cb, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s face_location, + mv_face_eye_condition_recognized_cb eye_condition_recognized_cb, + void *user_data) { - cv::Mat image; - - int error = convertSourceMV2GrayCV(source, image); - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGE("Convertion mv_source_h to gray failed"); - return error; - } - - mv_face_eye_condition_e eye_condition; - error = FaceEyeCondition::recognizeEyeCondition( - image, - face_location, - &eye_condition); - - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGE("eye contition recognition failed"); - return error; - } - - eye_condition_recognized_cb( - source, - engine_cfg, - face_location, - eye_condition, - user_data); - - return MEDIA_VISION_ERROR_NONE; + cv::Mat image; + + int error = convertSourceMV2GrayCV(source, image); + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Convertion mv_source_h to gray failed"); + return error; + } + + mv_face_eye_condition_e eye_condition; + error = FaceEyeCondition::recognizeEyeCondition( + image, + face_location, + &eye_condition); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("eye contition recognition failed"); + return error; + } + + eye_condition_recognized_cb( + source, + engine_cfg, + face_location, + eye_condition, + user_data); + + return MEDIA_VISION_ERROR_NONE; } int mv_face_facial_expression_recognize_open( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s face_location, - mv_face_facial_expression_recognized_cb expression_recognized_cb, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s face_location, + mv_face_facial_expression_recognized_cb expression_recognized_cb, + void *user_data) { - cv::Mat image; - - int error = convertSourceMV2GrayCV(source, image); - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGE("Convertion mv_source_h to gray failed"); - return error; - } - - mv_face_facial_expression_e expression; - error = FaceExpressionRecognizer::recognizeFaceExpression( - image, face_location, &expression); - - if (error != MEDIA_VISION_ERROR_NONE) - { - LOGE("eye contition recognition failed"); - return error; - } - - expression_recognized_cb( - source, - engine_cfg, - face_location, - expression, - user_data); - - return MEDIA_VISION_ERROR_NONE; + cv::Mat image; + + int error = convertSourceMV2GrayCV(source, image); + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Convertion mv_source_h to gray failed"); + return error; + } + + mv_face_facial_expression_e expression; + error = FaceExpressionRecognizer::recognizeFaceExpression( + image, face_location, &expression); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("eye contition recognition failed"); + return error; + } + + expression_recognized_cb( + source, + engine_cfg, + face_location, + expression, + user_data); + + return MEDIA_VISION_ERROR_NONE; } int mv_face_recognition_model_create_open( - mv_face_recognition_model_h *recognition_model) + mv_face_recognition_model_h *recognition_model) { - if (recognition_model == NULL) - { - LOGE("Recognition model can't be created because handle pointer is NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (recognition_model == NULL) { + LOGE("Recognition model can't be created because handle pointer is NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - (*recognition_model) = - static_cast(new (std::nothrow)FaceRecognitionModel()); + (*recognition_model) = + static_cast(new (std::nothrow)FaceRecognitionModel()); - if (*recognition_model == NULL) - { - LOGE("Failed to create media vision recognition model"); - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } + if (*recognition_model == NULL) { + LOGE("Failed to create media vision recognition model"); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } - LOGD("Recognition model [%p] has been created", *recognition_model); + LOGD("Recognition model [%p] has been created", *recognition_model); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_face_recognition_model_destroy_open( - mv_face_recognition_model_h recognition_model) + mv_face_recognition_model_h recognition_model) { - if (!recognition_model) - { - LOGE("Recognition model can't be destroyed because handle is NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (!recognition_model) { + LOGE("Recognition model can't be destroyed because handle is NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - LOGD("Destroying media vision recognition model [%p]", recognition_model); - delete static_cast(recognition_model); - LOGD("Media vision recognition model has been destroyed"); + LOGD("Destroying media vision recognition model [%p]", recognition_model); + delete static_cast(recognition_model); + LOGD("Media vision recognition model has been destroyed"); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_face_recognition_model_clone_open( - mv_face_recognition_model_h src, - mv_face_recognition_model_h *dst) + mv_face_recognition_model_h src, + mv_face_recognition_model_h *dst) { - if (!src || !dst) - { - LOGE("Can't clone recognition model. Both source and destination" - "recognition model handles has to be not NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (!src || !dst) { + LOGE("Can't clone recognition model. Both source and destination" + "recognition model handles has to be not NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - (*dst) = static_cast(new (std::nothrow)FaceRecognitionModel()); + (*dst) = static_cast(new (std::nothrow)FaceRecognitionModel()); - if (*dst == NULL) - { - LOGE("Failed to create media vision recognition model"); - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } + if (*dst == NULL) { + LOGE("Failed to create media vision recognition model"); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } - LOGD("Recognition model [%p] has been created", *dst); + LOGD("Recognition model [%p] has been created", *dst); - const FaceRecognitionModel *pSrcModel = static_cast(src); - FaceRecognitionModel *pDstModel = static_cast(*dst); + const FaceRecognitionModel *pSrcModel = static_cast(src); + FaceRecognitionModel *pDstModel = static_cast(*dst); - *pDstModel = *pSrcModel; + *pDstModel = *pSrcModel; - LOGD("Media vision recognition model has been cloned"); - return MEDIA_VISION_ERROR_NONE; + LOGD("Media vision recognition model has been cloned"); + return MEDIA_VISION_ERROR_NONE; } int mv_face_recognition_model_save_open( - const char *file_name, - mv_face_recognition_model_h recognition_model) + const char *file_name, + mv_face_recognition_model_h recognition_model) { - if (!recognition_model) - { - LOGE("Can't save recognition model to the file. Handle has to be not NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (NULL == file_name) - { - LOGE("Can't save recognition model to the file. File name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - FaceRecognitionModel *pRecModel = static_cast(recognition_model); - const int ret = pRecModel->save(std::string(file_name)); - - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Error occurred when save recognition model to the file"); - return ret; - } - - LOGD("Media vision recognition model has been saved to the file [%s]", file_name); - return ret; + if (!recognition_model) { + LOGE("Can't save recognition model to the file. Handle has to be not NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (NULL == file_name) { + LOGE("Can't save recognition model to the file. File name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + FaceRecognitionModel *pRecModel = static_cast(recognition_model); + const int ret = pRecModel->save(std::string(file_name)); + + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Error occurred when save recognition model to the file"); + return ret; + } + + LOGD("Media vision recognition model has been saved to the file [%s]", file_name); + return ret; } int mv_face_recognition_model_load_open( - const char *file_name, - mv_face_recognition_model_h *recognition_model) + const char *file_name, + mv_face_recognition_model_h *recognition_model) { - if (!recognition_model) - { - LOGE("Can't load recognition model from the file. " - "Handle has to be not NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (NULL == file_name) - { - LOGE("Can't load recognition model from the file. " - "File name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - (*recognition_model) = - static_cast(new (std::nothrow)FaceRecognitionModel()); - - if (*recognition_model == NULL) - { - LOGE("Failed to create media vision recognition model"); - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } - - FaceRecognitionModel *pRecModel = - static_cast(*recognition_model); - - if (!pRecModel) - { - LOGE("Loading of the face recognition model from file failed. " - "Incorrect Media Vision Face recognition model handle is used"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - const int ret = pRecModel->load(std::string(file_name)); - - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Error occurred when loading recognition model to the file"); - return ret; - } - - LOGD("Media vision recognition model has been loaded from the file [%s]", file_name); - return ret; + if (!recognition_model) { + LOGE("Can't load recognition model from the file. " + "Handle has to be not NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (NULL == file_name) { + LOGE("Can't load recognition model from the file. " + "File name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + (*recognition_model) = + static_cast(new (std::nothrow)FaceRecognitionModel()); + + if (*recognition_model == NULL) { + LOGE("Failed to create media vision recognition model"); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } + + FaceRecognitionModel *pRecModel = + static_cast(*recognition_model); + + if (!pRecModel) { + LOGE("Loading of the face recognition model from file failed. " + "Incorrect Media Vision Face recognition model handle is used"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + const int ret = pRecModel->load(std::string(file_name)); + + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Error occurred when loading recognition model to the file"); + return ret; + } + + LOGD("Media vision recognition model has been loaded from the file [%s]", file_name); + return ret; } int mv_face_recognition_model_add_open( - const mv_source_h source, - mv_face_recognition_model_h recognition_model, - const mv_rectangle_s *example_location, - int face_label) + const mv_source_h source, + mv_face_recognition_model_h recognition_model, + const mv_rectangle_s *example_location, + int face_label) { - if (!source) - { - LOGE("Can't add face image example for recognition model. " - "Media Vision source handle has to be not NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (!recognition_model) - { - LOGE("Can't add face image example for recognition model. " - "Model handle has to be not NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - FaceRecognitionModel *pRecModel = - static_cast(recognition_model); - - if (!pRecModel) - { - LOGE("Add face image example to the model failed. " - "Incorrect Media Vision Face recognition model handle is used"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - cv::Mat image; - int ret = convertSourceMV2GrayCV(source, image); - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Convertion mv_source_h to gray failed"); - return ret; - } - - if (!example_location) - { - ret = pRecModel->addFaceExample(image, face_label); - } - else - { - cv::Rect_ roi; - roi.x = example_location->point.x; - roi.y = example_location->point.y; - roi.width = example_location->width; - roi.height = example_location->height; - ret = pRecModel->addFaceExample(image(roi).clone(), face_label); - } - - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Error occurred when adding face image example to the recognition model"); - return ret; - } - - LOGD("The face image example labeled %i has been added " - "to the Media Vision recognition model", face_label); - return ret; + if (!source) { + LOGE("Can't add face image example for recognition model. " + "Media Vision source handle has to be not NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (!recognition_model) { + LOGE("Can't add face image example for recognition model. " + "Model handle has to be not NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + FaceRecognitionModel *pRecModel = + static_cast(recognition_model); + + if (!pRecModel) { + LOGE("Add face image example to the model failed. " + "Incorrect Media Vision Face recognition model handle is used"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + cv::Mat image; + int ret = convertSourceMV2GrayCV(source, image); + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Convertion mv_source_h to gray failed"); + return ret; + } + + if (!example_location) { + ret = pRecModel->addFaceExample(image, face_label); + } else { + cv::Rect_ roi; + roi.x = example_location->point.x; + roi.y = example_location->point.y; + roi.width = example_location->width; + roi.height = example_location->height; + ret = pRecModel->addFaceExample(image(roi).clone(), face_label); + } + + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Error occurred when adding face image example to the recognition model"); + return ret; + } + + LOGD("The face image example labeled %i has been added " + "to the Media Vision recognition model", face_label); + return ret; } int mv_face_recognition_model_reset_open( - mv_face_recognition_model_h recognition_model, - const int *face_label) + mv_face_recognition_model_h recognition_model, + const int *face_label) { - if (!recognition_model) - { - LOGE("Can't reset positive examples for NULL recognition model"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - FaceRecognitionModel *pRecModel = - static_cast(recognition_model); - - if (!pRecModel) - { - LOGE("Loading of the face recognition model from file failed. " - "Incorrect Media Vision Face recognition model handle is used"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - int ret = (NULL != face_label ? - pRecModel->resetFaceExamples(*face_label) : - pRecModel->resetFaceExamples()); - - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Error occurred when reset positive examples of the recognition model"); - return ret; - } - - LOGD("The positive examples has been removed from recognition model"); - return ret; + if (!recognition_model) { + LOGE("Can't reset positive examples for NULL recognition model"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + FaceRecognitionModel *pRecModel = + static_cast(recognition_model); + + if (!pRecModel) { + LOGE("Loading of the face recognition model from file failed. " + "Incorrect Media Vision Face recognition model handle is used"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + int ret = (NULL != face_label ? + pRecModel->resetFaceExamples(*face_label) : + pRecModel->resetFaceExamples()); + + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Error occurred when reset positive examples of the recognition model"); + return ret; + } + + LOGD("The positive examples has been removed from recognition model"); + return ret; } int mv_face_recognition_model_learn_open( - mv_engine_config_h engine_cfg, - mv_face_recognition_model_h recognition_model) + mv_engine_config_h engine_cfg, + mv_face_recognition_model_h recognition_model) { - if (!recognition_model) - { - LOGE("Can't learn recognition model. Model handle has to be not NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - FaceRecognitionModel *pRecModel = - static_cast(recognition_model); - - if (!pRecModel) - { - LOGE("Learning of the face recognition model failed. " - "Incorrect Media Vision Face recognition model handle is used"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - RecognitionParams recognitionParams; - extractRecognitionParams(engine_cfg, recognitionParams); - FaceRecognitionModelConfig learnConfig; - learnConfig.mModelType = recognitionParams.mRecognitionAlgType; - - const int ret = pRecModel->learn(learnConfig); - - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Error occurred when learn face recognition model"); - return ret; - } - - LOGD("Face recognition model has been learned"); - return ret; + if (!recognition_model) { + LOGE("Can't learn recognition model. Model handle has to be not NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + FaceRecognitionModel *pRecModel = + static_cast(recognition_model); + + if (!pRecModel) { + LOGE("Learning of the face recognition model failed. " + "Incorrect Media Vision Face recognition model handle is used"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + RecognitionParams recognitionParams; + extractRecognitionParams(engine_cfg, recognitionParams); + FaceRecognitionModelConfig learnConfig; + learnConfig.mModelType = recognitionParams.mRecognitionAlgType; + + const int ret = pRecModel->learn(learnConfig); + + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Error occurred when learn face recognition model"); + return ret; + } + + LOGD("Face recognition model has been learned"); + return ret; } int mv_face_recognition_model_query_labels_open( - mv_face_recognition_model_h recognition_model, - int **labels, - unsigned int *number_of_labels) + mv_face_recognition_model_h recognition_model, + int **labels, + unsigned int *number_of_labels) { - if (!recognition_model) - { - LOGE("Can't get list of labels for NULL recognition model"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (NULL == labels || NULL == number_of_labels) - { - LOGE("Can't get list of labels. labels and number_of_labels out " - "parameters both has to be not NULL."); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - FaceRecognitionModel *pRecModel = - static_cast(recognition_model); - - if (!pRecModel) - { - LOGE("Learning of the face recognition model failed. " - "Incorrect Media Vision Face recognition model handle is used"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - const std::set& learnedLabels = pRecModel->getFaceLabels(); - *number_of_labels = learnedLabels.size(); - (*labels) = new int[*number_of_labels]; - - std::set::const_iterator it = learnedLabels.begin(); - int i = 0; - for (; it != learnedLabels.end(); ++it) - { - (*labels)[i] = *it; - ++i; - } - - LOGD("List of the labels learned by the recognition model has been retrieved"); - return MEDIA_VISION_ERROR_NONE; + if (!recognition_model) { + LOGE("Can't get list of labels for NULL recognition model"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (NULL == labels || NULL == number_of_labels) { + LOGE("Can't get list of labels. labels and number_of_labels out " + "parameters both has to be not NULL."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + FaceRecognitionModel *pRecModel = + static_cast(recognition_model); + + if (!pRecModel) { + LOGE("Learning of the face recognition model failed. " + "Incorrect Media Vision Face recognition model handle is used"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + const std::set& learnedLabels = pRecModel->getFaceLabels(); + *number_of_labels = learnedLabels.size(); + (*labels) = new int[*number_of_labels]; + + std::set::const_iterator it = learnedLabels.begin(); + int i = 0; + for (; it != learnedLabels.end(); ++it) { + (*labels)[i] = *it; + ++i; + } + + LOGD("List of the labels learned by the recognition model has been retrieved"); + return MEDIA_VISION_ERROR_NONE; } int mv_face_tracking_model_create_open( - mv_face_tracking_model_h *tracking_model) + mv_face_tracking_model_h *tracking_model) { - if (tracking_model == NULL) - { - LOGE("Tracking model can't be created because handle pointer is NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (tracking_model == NULL) { + LOGE("Tracking model can't be created because handle pointer is NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - (*tracking_model) = - static_cast(new (std::nothrow)FaceTrackingModel()); + (*tracking_model) = + static_cast(new (std::nothrow)FaceTrackingModel()); - if (*tracking_model == NULL) - { - LOGE("Failed to create media vision tracking model"); - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } + if (*tracking_model == NULL) { + LOGE("Failed to create media vision tracking model"); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } - LOGD("Tracking model [%p] has been created", *tracking_model); + LOGD("Tracking model [%p] has been created", *tracking_model); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_face_tracking_model_destroy_open( - mv_face_tracking_model_h tracking_model) + mv_face_tracking_model_h tracking_model) { - if (!tracking_model) - { - LOGE("Tracking model can't be destroyed because handle is NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (!tracking_model) { + LOGE("Tracking model can't be destroyed because handle is NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - LOGD("Destroying media vision tracking model [%p]", tracking_model); - delete static_cast(tracking_model); - LOGD("Media vision tracking model has been destroyed"); + LOGD("Destroying media vision tracking model [%p]", tracking_model); + delete static_cast(tracking_model); + LOGD("Media vision tracking model has been destroyed"); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_face_tracking_model_prepare_open( - mv_face_tracking_model_h tracking_model, - mv_engine_config_h /*engine_cfg*/, - mv_source_h source, - mv_quadrangle_s *location) + mv_face_tracking_model_h tracking_model, + mv_engine_config_h /*engine_cfg*/, + mv_source_h source, + mv_quadrangle_s *location) { - if (!tracking_model) - { - LOGE("Can't prepare tracking model. Handle has to be not NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (!source) - { - LOGE("Can't prepare tracking model. " - "Media Vision source handle has to be not NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - FaceTrackingModel *pTrackModel = - static_cast(tracking_model); - - if (!pTrackModel) - { - LOGE("Preparation of the face tracking model failed. " - "Incorrect Media Vision Face tracking model handle is used"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - cv::Mat image; - int ret = convertSourceMV2GrayCV(source, image); - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Convertion mv_source_h to gray failed"); - return ret; - } - - cv::Rect_ roi; - if (!location) - { - ret = pTrackModel->prepare(image); - } - else - { - int minX = image.cols; - int minY = image.rows; - int maxX = 0.0; - int maxY = 0.0; - for (unsigned i = 0; i < 4; ++i) - { - minX = minX > location->points[i].x ? location->points[i].x : minX; - minY = minY > location->points[i].y ? location->points[i].y : minY; - maxX = maxX < location->points[i].x ? location->points[i].x : maxX; - maxY = maxY < location->points[i].y ? location->points[i].y : maxY; - } - - roi.x = minX; - roi.y = minY; - roi.width = maxX - minX; - roi.height = maxY - minY; - ret = pTrackModel->prepare(image, roi); - } - - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Error occurred when prepare face tracking model"); - return ret; - } - - LOGD("Face tracking model has been prepared"); - - return ret; + if (!tracking_model) { + LOGE("Can't prepare tracking model. Handle has to be not NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (!source) { + LOGE("Can't prepare tracking model. " + "Media Vision source handle has to be not NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + FaceTrackingModel *pTrackModel = + static_cast(tracking_model); + + if (!pTrackModel) { + LOGE("Preparation of the face tracking model failed. " + "Incorrect Media Vision Face tracking model handle is used"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + cv::Mat image; + int ret = convertSourceMV2GrayCV(source, image); + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Convertion mv_source_h to gray failed"); + return ret; + } + + cv::Rect_ roi; + if (!location) { + ret = pTrackModel->prepare(image); + } else { + int minX = image.cols; + int minY = image.rows; + int maxX = 0.0; + int maxY = 0.0; + for (unsigned i = 0; i < 4; ++i) { + minX = minX > location->points[i].x ? location->points[i].x : minX; + minY = minY > location->points[i].y ? location->points[i].y : minY; + maxX = maxX < location->points[i].x ? location->points[i].x : maxX; + maxY = maxY < location->points[i].y ? location->points[i].y : maxY; + } + + roi.x = minX; + roi.y = minY; + roi.width = maxX - minX; + roi.height = maxY - minY; + ret = pTrackModel->prepare(image, roi); + } + + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Error occurred when prepare face tracking model"); + return ret; + } + + LOGD("Face tracking model has been prepared"); + + return ret; } int mv_face_tracking_model_clone_open( - mv_face_tracking_model_h src, - mv_face_tracking_model_h *dst) + mv_face_tracking_model_h src, + mv_face_tracking_model_h *dst) { - if (!src || !dst) - { - LOGE("Can't clone tracking model. Both source and destination" - "tracking model handles has to be not NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (!src || !dst) { + LOGE("Can't clone tracking model. Both source and destination" + "tracking model handles has to be not NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - (*dst) = static_cast(new (std::nothrow)FaceTrackingModel()); + (*dst) = static_cast(new (std::nothrow)FaceTrackingModel()); - if (*dst == NULL) - { - LOGE("Failed to create media vision tracking model"); - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } + if (*dst == NULL) { + LOGE("Failed to create media vision tracking model"); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } - LOGD("Tracking model [%p] has been created", *dst); + LOGD("Tracking model [%p] has been created", *dst); - const FaceTrackingModel *pSrcModel = static_cast(src); - FaceTrackingModel *pDstModel = static_cast(*dst); + const FaceTrackingModel *pSrcModel = static_cast(src); + FaceTrackingModel *pDstModel = static_cast(*dst); - *pDstModel = *pSrcModel; + *pDstModel = *pSrcModel; - LOGD("Media vision tracking model has been cloned"); + LOGD("Media vision tracking model has been cloned"); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_face_tracking_model_save_open( - const char *file_name, - mv_face_tracking_model_h tracking_model) + const char *file_name, + mv_face_tracking_model_h tracking_model) { - if (!tracking_model) - { - LOGE("Can't save tracking model to the file. " - "Handle has to be not NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (NULL == file_name) - { - LOGE("Can't save tracking model to the file. " - "File name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - FaceTrackingModel *pTrackModel = static_cast(tracking_model); - - if (!pTrackModel) - { - LOGE("Saving of the face tracking model to file failed. " - "Incorrect Media Vision Face tracking model handle is used"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - const int ret = pTrackModel->save(std::string(file_name)); - - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Error occurred when save tracking model to the file"); - return ret; - } - - LOGD("Media vision tracking model has been saved to the file [%s]", file_name); - - return ret; + if (!tracking_model) { + LOGE("Can't save tracking model to the file. " + "Handle has to be not NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (NULL == file_name) { + LOGE("Can't save tracking model to the file. " + "File name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + FaceTrackingModel *pTrackModel = static_cast(tracking_model); + + if (!pTrackModel) { + LOGE("Saving of the face tracking model to file failed. " + "Incorrect Media Vision Face tracking model handle is used"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + const int ret = pTrackModel->save(std::string(file_name)); + + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Error occurred when save tracking model to the file"); + return ret; + } + + LOGD("Media vision tracking model has been saved to the file [%s]", file_name); + + return ret; } int mv_face_tracking_model_load_open( - const char *file_name, - mv_face_tracking_model_h *tracking_model) + const char *file_name, + mv_face_tracking_model_h *tracking_model) { - if (!tracking_model) - { - LOGE("Can't load tracking model from the file. " - "Handle has to be not NULL"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (NULL == file_name) - { - LOGE("Can't load tracking model from the file. " - "File name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - (*tracking_model) = - static_cast(new (std::nothrow)FaceTrackingModel()); - - if (*tracking_model == NULL) - { - LOGE("Failed to create media vision tracking model"); - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } - - FaceTrackingModel *pTrackModel = - static_cast(*tracking_model); - - if (!pTrackModel) - { - LOGE("Loading of the face tracking model from file failed. " - "Incorrect Media Vision Face tracking model handle is used"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - const int ret = pTrackModel->load(std::string(file_name)); - - if (MEDIA_VISION_ERROR_NONE != ret) - { - LOGE("Error occurred when save recognition model to the file"); - return ret; - } - - LOGD("Media vision recognition model has been loaded from the file [%s]", file_name); - - return ret; + if (!tracking_model) { + LOGE("Can't load tracking model from the file. " + "Handle has to be not NULL"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (NULL == file_name) { + LOGE("Can't load tracking model from the file. " + "File name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + (*tracking_model) = + static_cast(new (std::nothrow)FaceTrackingModel()); + + if (*tracking_model == NULL) { + LOGE("Failed to create media vision tracking model"); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } + + FaceTrackingModel *pTrackModel = + static_cast(*tracking_model); + + if (!pTrackModel) { + LOGE("Loading of the face tracking model from file failed. " + "Incorrect Media Vision Face tracking model handle is used"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + const int ret = pTrackModel->load(std::string(file_name)); + + if (MEDIA_VISION_ERROR_NONE != ret) { + LOGE("Error occurred when save recognition model to the file"); + return ret; + } + + LOGD("Media vision recognition model has been loaded from the file [%s]", file_name); + + return ret; } diff --git a/mv_face/face_lic/include/mv_face_lic.h b/mv_face/face_lic/include/mv_face_lic.h index dec74b4..42a39af 100644 --- a/mv_face/face_lic/include/mv_face_lic.h +++ b/mv_face/face_lic/include/mv_face_lic.h @@ -62,10 +62,10 @@ extern "C" { * @see mv_face_detected_cb */ int mv_face_detect_lic( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_face_detected_cb detected_cb, - void *user_data); + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_face_detected_cb detected_cb, + void *user_data); /********************/ @@ -120,12 +120,12 @@ int mv_face_detect_lic( * @see mv_face_recognized_cb */ int mv_face_recognize_lic( - mv_source_h source, - mv_face_recognition_model_h recognition_model, - mv_engine_config_h engine_cfg, - mv_rectangle_s *face_location, - mv_face_recognized_cb recognized_cb, - void *user_data); + mv_source_h source, + mv_face_recognition_model_h recognition_model, + mv_engine_config_h engine_cfg, + mv_rectangle_s *face_location, + mv_face_recognized_cb recognized_cb, + void *user_data); /*****************/ @@ -181,12 +181,12 @@ int mv_face_recognize_lic( * @see mv_face_tracked_cb */ int mv_face_track_lic( - mv_source_h source, - mv_face_tracking_model_h tracking_model, - mv_engine_config_h engine_cfg, - mv_face_tracked_cb tracked_cb, - bool do_learn, - void *user_data); + mv_source_h source, + mv_face_tracking_model_h tracking_model, + mv_engine_config_h engine_cfg, + mv_face_tracked_cb tracked_cb, + bool do_learn, + void *user_data); /********************************/ @@ -222,11 +222,11 @@ int mv_face_track_lic( * @see mv_face_eye_condition_recognized_cb */ int mv_face_eye_condition_recognize_lic( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s face_location, - mv_face_eye_condition_recognized_cb eye_condition_recognized_cb, - void *user_data); + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s face_location, + mv_face_eye_condition_recognized_cb eye_condition_recognized_cb, + void *user_data); /************************************/ @@ -261,11 +261,11 @@ int mv_face_eye_condition_recognize_lic( * @see mv_face_facial_expression_recognized_cb */ int mv_face_facial_expression_recognize_lic( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s face_location, - mv_face_facial_expression_recognized_cb expression_recognized_cb, - void *user_data); + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s face_location, + mv_face_facial_expression_recognized_cb expression_recognized_cb, + void *user_data); /*******************************/ /* Recognition model behavior */ @@ -307,7 +307,7 @@ int mv_face_facial_expression_recognize_lic( * @see mv_face_recognition_model_destroy_lic() */ int mv_face_recognition_model_create_lic( - mv_face_recognition_model_h *recognition_model); + mv_face_recognition_model_h *recognition_model); /** * @brief Destroys the face recognition model handle and releases all its @@ -326,7 +326,7 @@ int mv_face_recognition_model_create_lic( * @see mv_face_recognition_model_create_lic() */ int mv_face_recognition_model_destroy_lic( - mv_face_recognition_model_h recognition_model); + mv_face_recognition_model_h recognition_model); /** * @brief Creates a copy of existed recognition model handle and clones all its @@ -350,8 +350,8 @@ int mv_face_recognition_model_destroy_lic( * @see mv_face_recognition_model_create_lic() */ int mv_face_recognition_model_clone_lic( - mv_face_recognition_model_h src, - mv_face_recognition_model_h *dst); + mv_face_recognition_model_h src, + mv_face_recognition_model_h *dst); /** * @brief Saves recognition model to the file. @@ -381,8 +381,8 @@ int mv_face_recognition_model_clone_lic( * @see mv_face_recognition_model_create_lic() */ int mv_face_recognition_model_save_lic( - const char *file_name, - mv_face_recognition_model_h recognition_model); + const char *file_name, + mv_face_recognition_model_h recognition_model); /** * @brief Loads recognition model from file. @@ -413,8 +413,8 @@ int mv_face_recognition_model_save_lic( * @see mv_face_recognition_model_create_lic() */ int mv_face_recognition_model_load_lic( - const char *file_name, - mv_face_recognition_model_h *recognition_model); + const char *file_name, + mv_face_recognition_model_h *recognition_model); /** * @brief Adds face image example to be used for face recognition model learning @@ -455,10 +455,10 @@ int mv_face_recognition_model_load_lic( * @see mv_face_recognition_model_learn_lic() */ int mv_face_recognition_model_add_lic( - const mv_source_h source, - mv_face_recognition_model_h recognition_model, - const mv_rectangle_s *example_location, - int face_label); + const mv_source_h source, + mv_face_recognition_model_h recognition_model, + const mv_rectangle_s *example_location, + int face_label); /** * @brief Remove from @a recognition_model all collected with @@ -493,8 +493,8 @@ int mv_face_recognition_model_add_lic( * @see mv_face_recognition_model_learn_lic() */ int mv_face_recognition_model_reset_lic( - mv_face_recognition_model_h recognition_model, - const int *face_label); + mv_face_recognition_model_h recognition_model, + const int *face_label); /** * @brief Learns face recognition model. @@ -549,8 +549,8 @@ int mv_face_recognition_model_reset_lic( * @see mv_face_recognize_lic() */ int mv_face_recognition_model_learn_lic( - mv_engine_config_h engine_cfg, - mv_face_recognition_model_h recognition_model); + mv_engine_config_h engine_cfg, + mv_face_recognition_model_h recognition_model); /** * @brief Queries labels list and number of labels had been learned by the model. @@ -580,9 +580,9 @@ int mv_face_recognition_model_learn_lic( * @see mv_face_recognition_model_learn_lic() */ int mv_face_recognition_model_query_labels_lic( - mv_face_recognition_model_h recognition_model, - int **labels, - unsigned int *number_of_labels); + mv_face_recognition_model_h recognition_model, + int **labels, + unsigned int *number_of_labels); /***************************/ /* Tracking model behavior */ @@ -622,7 +622,7 @@ int mv_face_recognition_model_query_labels_lic( * @see mv_face_tracking_model_load_lic() */ int mv_face_tracking_model_create_lic( - mv_face_tracking_model_h *tracking_model); + mv_face_tracking_model_h *tracking_model); /** * @brief Calls this function to destroy the face tracking model handle and @@ -641,7 +641,7 @@ int mv_face_tracking_model_create_lic( * @see mv_face_tracking_model_create_lic() */ int mv_face_tracking_model_destroy_lic( - mv_face_tracking_model_h tracking_model); + mv_face_tracking_model_h tracking_model); /** * @brief Calls this function to initialize tracking model by the location of the @@ -686,10 +686,10 @@ int mv_face_tracking_model_destroy_lic( * @see mv_face_track_lic() */ int mv_face_tracking_model_prepare_lic( - mv_face_tracking_model_h tracking_model, - mv_engine_config_h engine_cfg, - mv_source_h source, - mv_quadrangle_s *location); + mv_face_tracking_model_h tracking_model, + mv_engine_config_h engine_cfg, + mv_source_h source, + mv_quadrangle_s *location); /** * @brief Calls this function to make a copy of existed tracking model handle and @@ -712,8 +712,8 @@ int mv_face_tracking_model_prepare_lic( * @see mv_face_tracking_model_create_lic() */ int mv_face_tracking_model_clone_lic( - mv_face_tracking_model_h src, - mv_face_tracking_model_h *dst); + mv_face_tracking_model_h src, + mv_face_tracking_model_h *dst); /** * @brief Calls this method to save tracking model to the file. @@ -739,8 +739,8 @@ int mv_face_tracking_model_clone_lic( * @see mv_face_tracking_model_create_lic() */ int mv_face_tracking_model_save_lic( - const char *file_name, - mv_face_tracking_model_h tracking_model); + const char *file_name, + mv_face_tracking_model_h tracking_model); /** * @brief Calls this method to load a tracking model from file. @@ -768,8 +768,8 @@ int mv_face_tracking_model_save_lic( * @see mv_face_tracking_model_create_lic() */ int mv_face_tracking_model_load_lic( - const char *file_name, - mv_face_tracking_model_h *tracking_model); + const char *file_name, + mv_face_tracking_model_h *tracking_model); #ifdef __cplusplus } diff --git a/mv_face/face_lic/src/mv_face_lic.c b/mv_face/face_lic/src/mv_face_lic.c index 00a516e..e37f367 100644 --- a/mv_face/face_lic/src/mv_face_lic.c +++ b/mv_face/face_lic/src/mv_face_lic.c @@ -21,12 +21,12 @@ /******************/ int mv_face_detect_lic( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_face_detected_cb detected_cb, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_face_detected_cb detected_cb, + void *user_data) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } @@ -35,14 +35,14 @@ int mv_face_detect_lic( /********************/ int mv_face_recognize_lic( - mv_source_h source, - mv_face_recognition_model_h recognition_model, - mv_engine_config_h engine_cfg, - mv_rectangle_s *face_location, - mv_face_recognized_cb recognized_cb, - void *user_data) + mv_source_h source, + mv_face_recognition_model_h recognition_model, + mv_engine_config_h engine_cfg, + mv_rectangle_s *face_location, + mv_face_recognized_cb recognized_cb, + void *user_data) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } @@ -51,14 +51,14 @@ int mv_face_recognize_lic( /*****************/ int mv_face_track_lic( - mv_source_h source, - mv_face_tracking_model_h tracking_model, - mv_engine_config_h engine_cfg, - mv_face_tracked_cb tracked_cb, - bool do_learn, - void *user_data) + mv_source_h source, + mv_face_tracking_model_h tracking_model, + mv_engine_config_h engine_cfg, + mv_face_tracked_cb tracked_cb, + bool do_learn, + void *user_data) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } @@ -67,13 +67,13 @@ int mv_face_track_lic( /********************************/ int mv_face_eye_condition_recognize_lic( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s face_location, - mv_face_eye_condition_recognized_cb eye_condition_recognized_cb, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s face_location, + mv_face_eye_condition_recognized_cb eye_condition_recognized_cb, + void *user_data) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } @@ -82,13 +82,13 @@ int mv_face_eye_condition_recognize_lic( /************************************/ int mv_face_facial_expression_recognize_lic( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s face_location, - mv_face_facial_expression_recognized_cb expression_recognized_cb, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s face_location, + mv_face_facial_expression_recognized_cb expression_recognized_cb, + void *user_data) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } @@ -97,67 +97,67 @@ int mv_face_facial_expression_recognize_lic( /******************************/ int mv_face_recognition_model_create_lic( - mv_face_recognition_model_h *recognition_model) + mv_face_recognition_model_h *recognition_model) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_face_recognition_model_destroy_lic( - mv_face_recognition_model_h recognition_model) + mv_face_recognition_model_h recognition_model) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_face_recognition_model_clone_lic( - mv_face_recognition_model_h src, - mv_face_recognition_model_h *dst) + mv_face_recognition_model_h src, + mv_face_recognition_model_h *dst) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_face_recognition_model_save_lic( - const char *file_name, - mv_face_recognition_model_h recognition_model) + const char *file_name, + mv_face_recognition_model_h recognition_model) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_face_recognition_model_load_lic( - const char *file_name, - mv_face_recognition_model_h recognition_model_h) + const char *file_name, + mv_face_recognition_model_h recognition_model_h) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_face_recognition_model_add_lic( - const mv_source_h source, - mv_face_recognition_model_h recognition_model, - const mv_rectangle_s *example_location, - int face_label) + const mv_source_h source, + mv_face_recognition_model_h recognition_model, + const mv_rectangle_s *example_location, + int face_label) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_face_recognition_model_reset_lic( - mv_face_recognition_model_h recognition_model, - const int *face_label) + mv_face_recognition_model_h recognition_model, + const int *face_label) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_face_recognition_model_learn_lic( - mv_engine_config_h engine_cfg, - mv_face_recognition_model_h recognition_model) + mv_engine_config_h engine_cfg, + mv_face_recognition_model_h recognition_model) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_face_recognition_model_query_labels_lic( - mv_face_recognition_model_h recognition_model, - int **labels, - unsigned int *number_of_labels) + mv_face_recognition_model_h recognition_model, + int **labels, + unsigned int *number_of_labels) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } @@ -166,43 +166,43 @@ int mv_face_recognition_model_query_labels_lic( /***************************/ int mv_face_tracking_model_create_lic( - mv_face_tracking_model_h *tracking_model) + mv_face_tracking_model_h *tracking_model) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_face_tracking_model_destroy_lic( - mv_face_tracking_model_h tracking_model) + mv_face_tracking_model_h tracking_model) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_face_tracking_model_prepare_lic( - mv_face_tracking_model_h tracking_model, - mv_engine_config_h engine_cfg, - mv_source_h source, - mv_quadrangle_s *location) + mv_face_tracking_model_h tracking_model, + mv_engine_config_h engine_cfg, + mv_source_h source, + mv_quadrangle_s *location) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_face_tracking_model_clone_lic( - mv_face_tracking_model_h src, - mv_face_tracking_model_h *dst) + mv_face_tracking_model_h src, + mv_face_tracking_model_h *dst) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_face_tracking_model_save_lic( - const char *file_name, - mv_face_tracking_model_h tracking_model) + const char *file_name, + mv_face_tracking_model_h tracking_model) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_face_tracking_model_load_lic( - const char *file_name, - mv_face_tracking_model_h tracking_model) + const char *file_name, + mv_face_tracking_model_h tracking_model) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } diff --git a/mv_image/image/include/ImageConfig.h b/mv_image/image/include/ImageConfig.h index 2d43430..8f1b348 100644 --- a/mv_image/image/include/ImageConfig.h +++ b/mv_image/image/include/ImageConfig.h @@ -24,28 +24,24 @@ * @brief This file contains Image Module utility. */ -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { /** * @brief Contains parameters for features extracting from image objects. * * @since_tizen 3.0 */ -struct FeaturesExtractingParams -{ - FeaturesExtractingParams( - double scaleFactor, - int maximumFeaturesNumber); +struct FeaturesExtractingParams { + FeaturesExtractingParams( + double scaleFactor, + int maximumFeaturesNumber); - FeaturesExtractingParams(); + FeaturesExtractingParams(); - double mScaleFactor; /**< Recognition scale factor for the ORB detector. */ + double mScaleFactor; /**< Recognition scale factor for the ORB detector. */ - int mMaximumFeaturesNumber; /**< Maximum number of features, which will be - extracted from object image. */ + int mMaximumFeaturesNumber; /**< Maximum number of features, which will be + extracted from object image. */ }; /** @@ -53,24 +49,23 @@ struct FeaturesExtractingParams * * @since_tizen 3.0 */ -struct RecognitionParams -{ - RecognitionParams( - int minMatchesNumber, - double requiredMatchesPart, - double allowableMatchesPartError); +struct RecognitionParams { + RecognitionParams( + int minMatchesNumber, + double requiredMatchesPart, + double allowableMatchesPartError); - RecognitionParams(); + RecognitionParams(); - int mMinMatchesNumber; /**< The minimum matches number, which - will be taken into account for image objects recognition. */ + int mMinMatchesNumber; /**< The minimum matches number, which + will be taken into account for image objects recognition. */ - double mRequiredMatchesPart; /**< The part of matches, which will be taken - into account for image objects recognition. Too low value will - result in unsustainable behavior, but effect of object overlapping - will be reduced. Value can be from 0 to 1.*/ + double mRequiredMatchesPart; /**< The part of matches, which will be taken + into account for image objects recognition. Too low value will + result in unsustainable behavior, but effect of object overlapping + will be reduced. Value can be from 0 to 1.*/ - double mAllowableMatchesPartError; /**< Allowable error of matches number. */ + double mAllowableMatchesPartError; /**< Allowable error of matches number. */ }; /** @@ -79,29 +74,31 @@ struct RecognitionParams * * @since_tizen 3.0 */ -struct StabilizationParams -{ - StabilizationParams( - int historyAmount, - double allowableShift, - double stabilizationSpeed, - double stabilizationAcceleration); - - StabilizationParams(); - - int mHistoryAmount; /**< Number of previous recognition results, which - will influence the stabilization. */ - - double mAllowableShift; /**< Relative value of maximum shift per one frame, - which will be ignored by stabilization (relative to the object size - in the current frame). */ - - double mStabilizationSpeed; /**< Start speed with which the object will be - stabilized. */ - - double mStabilizationAcceleration; /**< Acceleration with which the object - will be stabilized. (relative to the distance from current location - to stabilized location). Value can be from 0 to 1.*/ +struct StabilizationParams { + StabilizationParams( + int historyAmount, + double allowableShift, + double stabilizationSpeed, + double stabilizationAcceleration); + + StabilizationParams(); + + int mHistoryAmount; /**< Number of previous recognition results, which + will influence the stabilization. */ + + double mAllowableShift; /**< Relative value of maximum shift per one frame, + which will be ignored by stabilization. + It is relative to the object size + in the current frame. */ + + double mStabilizationSpeed; /**< Start speed with which the object will be + stabilized. */ + + double mStabilizationAcceleration; /**< Acceleration with which the object + will be stabilized. + It is relative to the distance from + current location to stabilized location. + Value can be from 0 to 1.*/ }; /** @@ -109,28 +106,28 @@ struct StabilizationParams * * @since_tizen 3.0 */ -struct TrackingParams -{ - TrackingParams( - FeaturesExtractingParams framesFeaturesExtractingParams, - RecognitionParams recognitionParams, - StabilizationParams stabilizationParams, - double expectedOffset); +struct TrackingParams { + TrackingParams( + FeaturesExtractingParams framesFeaturesExtractingParams, + RecognitionParams recognitionParams, + StabilizationParams stabilizationParams, + double expectedOffset); - TrackingParams(); + TrackingParams(); - FeaturesExtractingParams mFramesFeaturesExtractingParams; /**< Parameters - for extracting features from frames. */ + FeaturesExtractingParams mFramesFeaturesExtractingParams; /**< Parameters + for extracting features + from frames. */ - RecognitionParams mRecognitionParams; /**< Parameters for intermediate - recognition. */ + RecognitionParams mRecognitionParams; /**< Parameters for intermediate + recognition. */ - StabilizationParams mStabilizationParams; /**< Parameters for contour - stabilization during tracking. */ + StabilizationParams mStabilizationParams; /**< Parameters for contour + stabilization during tracking. */ - double mExpectedOffset; /**< Relative offset value, for which expected the - object offset. (relative to the object size in the current - frame). */ + double mExpectedOffset; /**< Relative offset value, for which expected the + object offset. The value is relative to + the object size in the current frame. */ }; } /* Image */ diff --git a/mv_image/image/include/ImageContourStabilizator.h b/mv_image/image/include/ImageContourStabilizator.h index 1fae797..4d159cd 100644 --- a/mv_image/image/include/ImageContourStabilizator.h +++ b/mv_image/image/include/ImageContourStabilizator.h @@ -27,11 +27,8 @@ * during tracking. */ -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { /** * @class ImageContourStabilizator * @brief This class contains functionality for image contour stabilization @@ -39,61 +36,57 @@ namespace Image * * @since_tizen 3.0 */ -class ImageContourStabilizator -{ +class ImageContourStabilizator { public: - - /** - * @brief @ref ImageContourStabilizator default constructor. - * - * @since_tizen 3.0 - */ - ImageContourStabilizator(); - - /** - * @brief Stabilizes @a contour. - * - * @since_tizen 3.0 - * @remarks Call this function alternately for each contour from sequence - * @param [in,out] contour @ref contour, which will be stabilized - * @param [in] params configuration parameters - * @return true if contour is stabilized, otherwise return false - */ - bool stabilize( - std::vector& contour, - const StabilizationParams& params); - - /** - * @brief Resets stabilization process. - * - * @since_tizen 3.0 - * @remarks Call it before starting track on the new sequence of contours. - */ - void reset(void); + /** + * @brief @ref ImageContourStabilizator default constructor. + * + * @since_tizen 3.0 + */ + ImageContourStabilizator(); + + /** + * @brief Stabilizes @a contour. + * + * @since_tizen 3.0 + * @remarks Call this function alternately for each contour from sequence + * @param [in,out] contour @ref contour, which will be stabilized + * @param [in] params configuration parameters + * @return true if contour is stabilized, otherwise return false + */ + bool stabilize( + std::vector& contour, + const StabilizationParams& params); + + /** + * @brief Resets stabilization process. + * + * @since_tizen 3.0 + * @remarks Call it before starting track on the new sequence of contours. + */ + void reset(void); private: - - std::vector computeStabilizedQuadrangleContour(void); + std::vector computeStabilizedQuadrangleContour(void); private: + static const size_t MovingHistoryAmount = 3u; - static const size_t MovingHistoryAmount = 3u; - - std::vector m_speeds; + std::vector m_speeds; - std::vector m_currentCornersSpeed; + std::vector m_currentCornersSpeed; - std::deque > m_movingHistory; + std::deque > m_movingHistory; - std::vector m_lastStabilizedContour; + std::vector m_lastStabilizedContour; - size_t m_currentHistoryAmount; + size_t m_currentHistoryAmount; - int m_tempContourIndex; + int m_tempContourIndex; - std::vector m_priorities; + std::vector m_priorities; - bool m_isPrepared; + bool m_isPrepared; }; } /* Image */ diff --git a/mv_image/image/include/ImageMathUtil.h b/mv_image/image/include/ImageMathUtil.h index ebc95d7..f839ac9 100644 --- a/mv_image/image/include/ImageMathUtil.h +++ b/mv_image/image/include/ImageMathUtil.h @@ -24,15 +24,12 @@ * @brief This file contains math utility for Image Module. */ -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { const size_t MinimumNumberOfFeatures = 4u; /* Minimum number of features - when perspective transform - parameters calculation - have sense */ + when perspective transform + parameters calculation + have sense */ const size_t NumberOfQuadrangleCorners = 4u; /* Number of quadrangle corneres */ @@ -45,8 +42,8 @@ const size_t NumberOfQuadrangleCorners = 4u; /* Number of quadrangle corneres */ * @return distance between two points */ float getDistance( - const cv::Point2f& point1, - const cv::Point2f& point2); + const cv::Point2f& point1, + const cv::Point2f& point2); /** * @brief Calculates area of triangle. @@ -58,9 +55,9 @@ float getDistance( * @return area of triangle */ float getTriangleArea( - const cv::Point2f& point1, - const cv::Point2f& point2, - const cv::Point2f& point3); + const cv::Point2f& point1, + const cv::Point2f& point2, + const cv::Point2f& point3); /** * @brief Calculates area of quadrangle. @@ -70,7 +67,7 @@ float getTriangleArea( * @return area of quadrangle */ float getQuadrangleArea( - const cv::Point2f points[NumberOfQuadrangleCorners]); + const cv::Point2f points[NumberOfQuadrangleCorners]); } /* Image */ } /* MediaVision */ diff --git a/mv_image/image/include/ImageObject.h b/mv_image/image/include/ImageObject.h index a494554..4e33e55 100644 --- a/mv_image/image/include/ImageObject.h +++ b/mv_image/image/include/ImageObject.h @@ -26,11 +26,8 @@ * @brief This file contains the @ref ImageObject class. */ -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { /** * @class ImageObject * @brief This class contains the image information, which will @@ -38,180 +35,174 @@ namespace Image * * @since_tizen 3.0 */ -class ImageObject -{ - +class ImageObject { public: - - /** - * @brief @ref ImageObject default constructor. - * - * @since_tizen 3.0 - */ - ImageObject(); - - /** - * @brief @ref ImageObject constructor based on image. - * - * @since_tizen 3.0 - * @remarks Detects keypoints and extracts features from image and creates - * new @ref ImageObject - * @param [in] image The image for which instance of @ref ImageObject - * will be created - * @param [in] params Features extracting parameters - */ - ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params); - - /** - * @brief @ref ImageObject copy constructor. - * @details Creates copy of @ref ImageObject - * - * @since_tizen 3.0 - * @param [in] copy @ref ImageObject which will be copied - */ - ImageObject(const ImageObject& copy); - - /** - * @brief @ref ImageObject copy assignment operator. - * @details Fills the information based on the @a copy - * - * @since_tizen 3.0 - * @param [in] copy @ref ImageObject which will be copied - * - */ - ImageObject& operator=(const ImageObject& copy); - - /** - * @brief @ref ImageObject destructor. - * - * @since_tizen 3.0 - */ - virtual ~ImageObject(); - - /** - * @brief Fills @ref ImageObject class based on image. - * @details Detects keypoints and extracts features from image and creates - * new @ref ImageObject - * - * @since_tizen 3.0 - * @param [in] image The image for which instance of @ref ImageObject - * will be created - * @param [in] params Features extracting parameters - */ - void fill(const cv::Mat& image, const FeaturesExtractingParams& params); - - /** - * @brief Fills @ref ImageObject class based on image. - * @details Detects keypoints and extracts features from image and creates - * new @ref ImageObject - * - * @since_tizen 3.0 - * @param [in] image The image for which instance of @ref - * ImageObject will be created - * @param [in] boundingBox Bounding box of the object being analyzed in - * the @a image - * @param [in] params Features extracting parameters - * @return @a true on success, otherwise a @a false value - * @retval true Successful - * @retval false Invalid ROI (bounding box) - */ - bool fill( - const cv::Mat& image, - const cv::Rect& boundingBox, - const FeaturesExtractingParams& params); - - /** - * @brief Gets a value that determines how well an @ref ImageObject can be recognized. - * @details Confidence can be from 0 to 1. If the recognition rate is 0 object can - * not be recognized - * - * @since_tizen 3.0 - * @return A value that determines how well an @ref ImageObject can be recognized. - */ - float getRecognitionRate(void) const; - - /** - * @brief Check whether the object is filled. - * @details Image object is empty if it wasn't filled. - * - * @since_tizen 3.0 - * @remarks Empty object can not be recognized or tracked. Fill the object - * by using corresponding constructor or function @ref fill() to - * make image object valid. Also you can load image object which is - * not empty by using @ref load(). - * @return @c false if object is filled, otherwise return @c true - */ - bool isEmpty() const; - - /** - * @brief Sets a label for the image object. - * - * @since_tizen 3.0 - * @param [in] label The label which will be assigned to the image object - */ - void setLabel(int label); - - /** - * @brief Gets a label of object. - * - * @since_tizen 3.0 - * @param [out] label The label of image object - * @return @c true if object is labeled, otherwise return @c false - */ - bool getLabel(int& label) const; - - /** - * @brief Stores the @ref ImageObject in a file. - * - * @since_tizen 3.0 - * @param [in] fileName File name which will be generated - * @return @a 0 on success, otherwise a negative error value - */ - int save(const char *fileName) const; - - /** - * @brief Loads the @ref ImageObject from the file. - * - * @since_tizen 3.0 - * @param [in] fileName File name from which will be loaded an @ref ImageObject - * @return @a 0 on success, otherwise a negative error value - */ - int load(const char *fileName); + /** + * @brief @ref ImageObject default constructor. + * + * @since_tizen 3.0 + */ + ImageObject(); + + /** + * @brief @ref ImageObject constructor based on image. + * + * @since_tizen 3.0 + * @remarks Detects keypoints and extracts features from image and creates + * new @ref ImageObject + * @param [in] image The image for which instance of @ref ImageObject + * will be created + * @param [in] params Features extracting parameters + */ + ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params); + + /** + * @brief @ref ImageObject copy constructor. + * @details Creates copy of @ref ImageObject + * + * @since_tizen 3.0 + * @param [in] copy @ref ImageObject which will be copied + */ + ImageObject(const ImageObject& copy); + + /** + * @brief @ref ImageObject copy assignment operator. + * @details Fills the information based on the @a copy + * + * @since_tizen 3.0 + * @param [in] copy @ref ImageObject which will be copied + * + */ + ImageObject& operator=(const ImageObject& copy); + + /** + * @brief @ref ImageObject destructor. + * + * @since_tizen 3.0 + */ + virtual ~ImageObject(); + + /** + * @brief Fills @ref ImageObject class based on image. + * @details Detects keypoints and extracts features from image and creates + * new @ref ImageObject + * + * @since_tizen 3.0 + * @param [in] image The image for which instance of @ref ImageObject + * will be created + * @param [in] params Features extracting parameters + */ + void fill(const cv::Mat& image, const FeaturesExtractingParams& params); + + /** + * @brief Fills @ref ImageObject class based on image. + * @details Detects keypoints and extracts features from image and creates + * new @ref ImageObject + * + * @since_tizen 3.0 + * @param [in] image The image for which instance of @ref + * ImageObject will be created + * @param [in] boundingBox Bounding box of the object being analyzed in + * the @a image + * @param [in] params Features extracting parameters + * @return @a true on success, otherwise a @a false value + * @retval true Successful + * @retval false Invalid ROI (bounding box) + */ + bool fill( + const cv::Mat& image, + const cv::Rect& boundingBox, + const FeaturesExtractingParams& params); + + /** + * @brief Gets a value that determines how well an @ref ImageObject can be recognized. + * @details Confidence can be from 0 to 1. If the recognition rate is 0 object can + * not be recognized + * + * @since_tizen 3.0 + * @return A value that determines how well an @ref ImageObject can be recognized. + */ + float getRecognitionRate(void) const; + + /** + * @brief Check whether the object is filled. + * @details Image object is empty if it wasn't filled. + * + * @since_tizen 3.0 + * @remarks Empty object can not be recognized or tracked. Fill the object + * by using corresponding constructor or function @ref fill() to + * make image object valid. Also you can load image object which is + * not empty by using @ref load(). + * @return @c false if object is filled, otherwise return @c true + */ + bool isEmpty() const; + + /** + * @brief Sets a label for the image object. + * + * @since_tizen 3.0 + * @param [in] label The label which will be assigned to the image object + */ + void setLabel(int label); + + /** + * @brief Gets a label of object. + * + * @since_tizen 3.0 + * @param [out] label The label of image object + * @return @c true if object is labeled, otherwise return @c false + */ + bool getLabel(int& label) const; + + /** + * @brief Stores the @ref ImageObject in a file. + * + * @since_tizen 3.0 + * @param [in] fileName File name which will be generated + * @return @a 0 on success, otherwise a negative error value + */ + int save(const char *fileName) const; + + /** + * @brief Loads the @ref ImageObject from the file. + * + * @since_tizen 3.0 + * @param [in] fileName File name from which will be loaded an @ref ImageObject + * @return @a 0 on success, otherwise a negative error value + */ + int load(const char *fileName); private: - - static const int MinWidth = 5; - static const int MinHeight = 5; + static const int MinWidth = 5; + static const int MinHeight = 5; private: + void extractFeatures( + const cv::Mat& image, + const FeaturesExtractingParams& params); - void extractFeatures( - const cv::Mat& image, - const FeaturesExtractingParams& params); - - void computeRecognitionRate(const cv::Mat& image); + void computeRecognitionRate(const cv::Mat& image); private: + bool m_isEmpty; - bool m_isEmpty; - - bool m_isLabeled; + bool m_isLabeled; - int m_label; + int m_label; - std::vector m_boundingContour; + std::vector m_boundingContour; - std::vector m_objectKeypoints; + std::vector m_objectKeypoints; - cv::Mat m_objectDescriptors; + cv::Mat m_objectDescriptors; - float m_recognitionRate; + float m_recognitionRate; - friend class ImageRecognizer; + friend class ImageRecognizer; - friend std::ostream& operator << (std::ostream& os, const ImageObject& obj); + friend std::ostream& operator << (std::ostream& os, const ImageObject& obj); - friend std::istream& operator >> (std::istream& is, ImageObject& obj); + friend std::istream& operator >> (std::istream& is, ImageObject& obj); }; } /* Image */ diff --git a/mv_image/image/include/ImageRecognizer.h b/mv_image/image/include/ImageRecognizer.h index 8494e3a..5117a27 100644 --- a/mv_image/image/include/ImageRecognizer.h +++ b/mv_image/image/include/ImageRecognizer.h @@ -28,86 +28,79 @@ * @brief This file contains functionality for image object recognition. */ -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { /** * @class ImageRecognizer * @brief This class contains functionality for image object recognition. * * @since_tizen 3.0 */ -class ImageRecognizer -{ +class ImageRecognizer { public: - - /** - * @brief @ref ImageRecognizer constructor based on scene image. - * - * @since_tizen 3.0 - * @param [in] sceneImage The scene in which image objects will be recognized - * @param [in] params Scene features extracting parameters - */ - ImageRecognizer(const cv::Mat& sceneImage, - const FeaturesExtractingParams& params); - - /** - * @brief @ref ImageRecognizer constructor based on thes scene @ref ImageObject. - * - * @since_tizen 3.0 - * @param [in] scene The scene for which the objects will be recognized by - * calling method recognize() - */ - ImageRecognizer(const ImageObject& scene); - - /** - * @brief @ref ImageRecognizer destructor. - * - * @since_tizen 3.0 - */ - virtual ~ImageRecognizer(); - - /** - * @brief Recognizes the @a target on the scene. - * - * @since_tizen 3.0 - * @param [in] target @ref ImageObject, which will be recognized - * @param [in] params Recognition parameters - * @param [out] contour The result contour of @a target object on the scene - * @return true if object is found on the scene, otherwise return false - */ - bool recognize( - const ImageObject& target, - const RecognitionParams& params, - std::vector& contour) const; + /** + * @brief @ref ImageRecognizer constructor based on scene image. + * + * @since_tizen 3.0 + * @param [in] sceneImage The scene in which image objects will be recognized + * @param [in] params Scene features extracting parameters + */ + ImageRecognizer(const cv::Mat& sceneImage, + const FeaturesExtractingParams& params); + + /** + * @brief @ref ImageRecognizer constructor based on thes scene @ref ImageObject. + * + * @since_tizen 3.0 + * @param [in] scene The scene for which the objects will be recognized by + * calling method recognize() + */ + ImageRecognizer(const ImageObject& scene); + + /** + * @brief @ref ImageRecognizer destructor. + * + * @since_tizen 3.0 + */ + virtual ~ImageRecognizer(); + + /** + * @brief Recognizes the @a target on the scene. + * + * @since_tizen 3.0 + * @param [in] target @ref ImageObject, which will be recognized + * @param [in] params Recognition parameters + * @param [out] contour The result contour of @a target object on the scene + * @return true if object is found on the scene, otherwise return false + */ + bool recognize( + const ImageObject& target, + const RecognitionParams& params, + std::vector& contour) const; private: + ImageRecognizer(); - ImageRecognizer(); + bool findHomophraphyMatrix( + const ImageObject& target, + const RecognitionParams& params, + cv::Mat& homophraphyMatrix) const; - bool findHomophraphyMatrix( - const ImageObject& target, - const RecognitionParams& params, - cv::Mat& homophraphyMatrix) const; + size_t matchesSelection( + std::vector& examples, + unsigned int filterAmount, unsigned int allowableError) const; - size_t matchesSelection( - std::vector& examples, - unsigned int filterAmount, unsigned int allowableError) const; + float computeLinearSupportElement( + const std::vector& examples, + int requiredNumber, int leftLimit, int rightLimit) const; - float computeLinearSupportElement( - const std::vector& examples, - int requiredNumber, int leftLimit, int rightLimit) const; - - static bool isPossibleQuadrangleCorners( - const cv::Point2f corners[NumberOfQuadrangleCorners]); + static bool isPossibleQuadrangleCorners( + const cv::Point2f corners[NumberOfQuadrangleCorners]); private: + ImageObject m_scene; - ImageObject m_scene; - - cv::BFMatcher m_matcher; + cv::BFMatcher m_matcher; }; } /* Image */ diff --git a/mv_image/image/include/ImageTracker.h b/mv_image/image/include/ImageTracker.h index 2bfd5b1..ea577f4 100644 --- a/mv_image/image/include/ImageTracker.h +++ b/mv_image/image/include/ImageTracker.h @@ -26,75 +26,65 @@ * @brief This file contains functionality for image object tracking. */ -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { class ImageRecognizer; class ImageTrackingModel; - /** * @class ImageTracker * @brief This class contains functionality for image object tracking. * * @since_tizen 3.0 */ -class ImageTracker -{ +class ImageTracker { private: + struct RecognitionInfo { + cv::Mat mFrame; - struct RecognitionInfo - { - cv::Mat mFrame; - - RecognitionParams mRecognitionParams; + RecognitionParams mRecognitionParams; - FeaturesExtractingParams mSceneFeaturesExtractingParams; + FeaturesExtractingParams mSceneFeaturesExtractingParams; - ImageTrackingModel *mpTarget; - }; + ImageTrackingModel *mpTarget; + }; - static void *recognitionThreadFunc(void *recognitionInfo); + static void *recognitionThreadFunc(void *recognitionInfo); public: - - /** - * @brief @ref ImageTracker constructor based on tracking algorithm - * parameters. - * - * @since_tizen 3.0 - * @param [in] trackingParams Parameters for image objects tracking - */ - ImageTracker(const TrackingParams& trackingParams); - - /** - * @brief Tracks the @a target for the video stream consisting of frames. - * - * @since_tizen 3.0 - * @remarks Call this function alternately for each frame - * @param [in] frame Current frame of the video stream - * @param [in,out] target @ref ImageTrackingModel, which will be tracked - */ - void track(const cv::Mat& frame, ImageTrackingModel& target); + /** + * @brief @ref ImageTracker constructor based on tracking algorithm + * parameters. + * + * @since_tizen 3.0 + * @param [in] trackingParams Parameters for image objects tracking + */ + ImageTracker(const TrackingParams& trackingParams); + + /** + * @brief Tracks the @a target for the video stream consisting of frames. + * + * @since_tizen 3.0 + * @remarks Call this function alternately for each frame + * @param [in] frame Current frame of the video stream + * @param [in,out] target @ref ImageTrackingModel, which will be tracked + */ + void track(const cv::Mat& frame, ImageTrackingModel& target); private: + void trackDetectedObject( + const cv::Mat& frame, + ImageTrackingModel& target); - void trackDetectedObject( - const cv::Mat& frame, - ImageTrackingModel& target); - - void trackUndetectedObject( - const cv::Mat& frame, - ImageTrackingModel& target); + void trackUndetectedObject( + const cv::Mat& frame, + ImageTrackingModel& target); - cv::Rect computeExpectedArea( - const ImageTrackingModel& target, - const cv::Size& frameSize); + cv::Rect computeExpectedArea( + const ImageTrackingModel& target, + const cv::Size& frameSize); private: - - TrackingParams m_trackingParams; + TrackingParams m_trackingParams; }; } /* Image */ diff --git a/mv_image/image/include/ImageTrackingModel.h b/mv_image/image/include/ImageTrackingModel.h index 2f55c2d..2c07b99 100644 --- a/mv_image/image/include/ImageTrackingModel.h +++ b/mv_image/image/include/ImageTrackingModel.h @@ -33,184 +33,176 @@ * @brief This file contains the @ref ImageTrackingModel class. */ -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { class ImageContourStabilizator; - /** * @class ImageTrackingModel * @brief This class contains the tracking functionality for image objects. * * @since_tizen 3.0 */ -class ImageTrackingModel -{ +class ImageTrackingModel { private: - /** - * @brief @ref ImageTrackingModel state enumeration. - * - * @since_tizen 3.0 - */ - enum State - { - Invalid, /**< Invalid tracking model can not be tracked. Set not - empty image object as target by using function - @ref setTarget() to make tracking model valid, also - you can load valid tracking model by using @ref load() */ - Undetected, /**< The object was not recognized on the last frame. Ready - for further recognition */ - Appeared, /**< The object was recognized on one of the last frames - after its absence */ - Tracked, /**< The object was recognized on the last frame. Its - location can be obtained by calling method getLocation() */ - InProcess /**< The object is in the recognition process */ - }; + /** + * @brief @ref ImageTrackingModel state enumeration. + * + * @since_tizen 3.0 + */ + enum State { + Invalid, /**< Invalid tracking model can not be tracked. Set not + empty image object as target by using function + @ref setTarget() to make tracking model valid, also + you can load valid tracking model by using @ref load() */ + Undetected, /**< The object was not recognized on the last frame. Ready + for further recognition */ + Appeared, /**< The object was recognized on one of the last frames + after its absence */ + Tracked, /**< The object was recognized on the last frame. Its + location can be obtained by calling method getLocation() */ + InProcess /**< The object is in the recognition process */ + }; public: - - /** - * @brief @ref ImageTrackingModel default constructor - * - * @since_tizen 3.0 - */ - ImageTrackingModel(); - - /** - * @brief @ref ImageTrackingModel constructor based on tracking algorithm - * parameters. - * - * @since_tizen 3.0 - * @param[in] recognitionObject @ref ImageObject which will be tracked - */ - ImageTrackingModel(const ImageObject& recognitionObject); - - /** - * @brief @ref ImageTrackingModel copy constructor. - * @details Creates copy of @ref ImageTrackingModel - * - * @since_tizen 3.0 - * @param [in] copy @ref ImageTrackingModel which will be copied - */ - ImageTrackingModel(const ImageTrackingModel& copy); - - /** - * @brief @ref ImageTrackingModel destructor. - * - * @since_tizen 3.0 - */ - ~ImageTrackingModel(); - - /** - * @brief Sets @ref ImageObject as target which will be tracked. - * - * @since_tizen 3.0 - * @param [in] target @ref ImageObject which will be tracked - */ - void setTarget(const ImageObject& target); - - /** - * @brief Checks whether the tracking model is valid for tracking. - * @details Image tracking model is valid if its target is set and not empty. - * - * @since_tizen 3.0 - * @remarks Invalid tracking model can not be tracked. Set not empty target - * by using corresponding constructor or function @ref setTarget() - * to make tracking model valid. Also you can load valid tracking - * model by using @ref load(). - * @return @c true if tracking model is valid, otherwise return @c false - */ - bool isValid() const; - - /** - * @brief Refreshes tracking model. - * - * @since_tizen 3.0 - * @remarks Call it before starting track on the new video stream. - */ - void refresh(void); - - /** - * @brief @ref ImageTrackingModel copy assignment operator. - * @details Fills the information based on the @a copy - * - * @since_tizen 3.0 - * @param [in] copy @ref ImageTrackingModel which will be copied - */ - ImageTrackingModel& operator=(const ImageTrackingModel& copy); - - /** - * @brief Stores the @ref ImageTrackingModel in a file. - * - * @since_tizen 3.0 - * @param [in] filepath File name which will be generated - * @return @a 0 on success, otherwise a negative error value - */ - int save(const char *filepath) const; - - /** - * @brief Loads the @ref ImageTrackingModel from the file. - * - * @since_tizen 3.0 - * @param [in] filepath File name from which will be loaded a model - * @return @a 0 on success, otherwise a negative error value - */ - int load(const char *filepath); - - /** - * @brief Checks state of the @ref ImageTrackingModel. - * - * @since_tizen 3.0 - * @return @a true if object was detected on the last processed frame, - * otherwise a @a false value - */ - bool isDetected() const; - - /** - * @brief Gets last location of the @ref ImageTrackingModel. - * - * @since_tizen 3.0 - * @return Last detected location - */ - std::vector getLastlocation() const; + /** + * @brief @ref ImageTrackingModel default constructor + * + * @since_tizen 3.0 + */ + ImageTrackingModel(); + + /** + * @brief @ref ImageTrackingModel constructor based on tracking algorithm + * parameters. + * + * @since_tizen 3.0 + * @param[in] recognitionObject @ref ImageObject which will be tracked + */ + ImageTrackingModel(const ImageObject& recognitionObject); + + /** + * @brief @ref ImageTrackingModel copy constructor. + * @details Creates copy of @ref ImageTrackingModel + * + * @since_tizen 3.0 + * @param [in] copy @ref ImageTrackingModel which will be copied + */ + ImageTrackingModel(const ImageTrackingModel& copy); + + /** + * @brief @ref ImageTrackingModel destructor. + * + * @since_tizen 3.0 + */ + ~ImageTrackingModel(); + + /** + * @brief Sets @ref ImageObject as target which will be tracked. + * + * @since_tizen 3.0 + * @param [in] target @ref ImageObject which will be tracked + */ + void setTarget(const ImageObject& target); + + /** + * @brief Checks whether the tracking model is valid for tracking. + * @details Image tracking model is valid if its target is set and not empty. + * + * @since_tizen 3.0 + * @remarks Invalid tracking model can not be tracked. Set not empty target + * by using corresponding constructor or function @ref setTarget() + * to make tracking model valid. Also you can load valid tracking + * model by using @ref load(). + * @return @c true if tracking model is valid, otherwise return @c false + */ + bool isValid() const; + + /** + * @brief Refreshes tracking model. + * + * @since_tizen 3.0 + * @remarks Call it before starting track on the new video stream. + */ + void refresh(void); + + /** + * @brief @ref ImageTrackingModel copy assignment operator. + * @details Fills the information based on the @a copy + * + * @since_tizen 3.0 + * @param [in] copy @ref ImageTrackingModel which will be copied + */ + ImageTrackingModel& operator=(const ImageTrackingModel& copy); + + /** + * @brief Stores the @ref ImageTrackingModel in a file. + * + * @since_tizen 3.0 + * @param [in] filepath File name which will be generated + * @return @a 0 on success, otherwise a negative error value + */ + int save(const char *filepath) const; + + /** + * @brief Loads the @ref ImageTrackingModel from the file. + * + * @since_tizen 3.0 + * @param [in] filepath File name from which will be loaded a model + * @return @a 0 on success, otherwise a negative error value + */ + int load(const char *filepath); + + /** + * @brief Checks state of the @ref ImageTrackingModel. + * + * @since_tizen 3.0 + * @return @a true if object was detected on the last processed frame, + * otherwise a @a false value + */ + bool isDetected() const; + + /** + * @brief Gets last location of the @ref ImageTrackingModel. + * + * @since_tizen 3.0 + * @return Last detected location + */ + std::vector getLastlocation() const; private: + ImageObject m_recognitionObject; - ImageObject m_recognitionObject; - - ImageContourStabilizator m_stabilizator; + ImageContourStabilizator m_stabilizator; - std::vector m_lastLocation; + std::vector m_lastLocation; - State m_state; + State m_state; - pthread_t m_recognitionThread; + pthread_t m_recognitionThread; - mutable pthread_mutex_t m_globalGuard; + mutable pthread_mutex_t m_globalGuard; - mutable pthread_spinlock_t m_lastLocationGuard; + mutable pthread_spinlock_t m_lastLocationGuard; - mutable pthread_spinlock_t m_stateGuard; + mutable pthread_spinlock_t m_stateGuard; - friend std::ostream& operator << ( - std::ostream& os, - const ImageTrackingModel::State& state); + friend std::ostream& operator << ( + std::ostream& os, + const ImageTrackingModel::State& state); - friend std::istream& operator >> ( - std::istream& is, - ImageTrackingModel::State& state); + friend std::istream& operator >> ( + std::istream& is, + ImageTrackingModel::State& state); - friend std::ostream& operator << ( - std::ostream& os, - const ImageTrackingModel& obj); + friend std::ostream& operator << ( + std::ostream& os, + const ImageTrackingModel& obj); - friend std::istream& operator >> ( - std::istream& is, - ImageTrackingModel& obj); + friend std::istream& operator >> ( + std::istream& is, + ImageTrackingModel& obj); - friend class ImageTracker; + friend class ImageTracker; }; } /* Image */ diff --git a/mv_image/image/include/mv_image_open.h b/mv_image/image/include/mv_image_open.h index f6128fd..a73df98 100644 --- a/mv_image/image/include/mv_image_open.h +++ b/mv_image/image/include/mv_image_open.h @@ -77,12 +77,12 @@ extern "C" { * @see mv_engine_config_h */ int mv_image_recognize_open( - mv_source_h source, - const mv_image_object_h *image_objects, - int number_of_objects, - mv_engine_config_h engine_cfg, - mv_image_recognized_cb recognized_cb, - void *user_data); + mv_source_h source, + const mv_image_object_h *image_objects, + int number_of_objects, + mv_engine_config_h engine_cfg, + mv_image_recognized_cb recognized_cb, + void *user_data); /*************************/ /* Image object tracking */ @@ -140,11 +140,11 @@ int mv_image_recognize_open( * @see mv_image_tracking_model_destroy_open() */ int mv_image_track_open( - mv_source_h source, - mv_image_tracking_model_h image_tracking_model, - mv_engine_config_h engine_cfg, - mv_image_tracked_cb tracked_cb, - void *user_data); + mv_source_h source, + mv_image_tracking_model_h image_tracking_model, + mv_engine_config_h engine_cfg, + mv_image_tracked_cb tracked_cb, + void *user_data); /**************************/ /* Image object behaviour */ @@ -165,7 +165,7 @@ int mv_image_track_open( * @see mv_image_object_destroy_open() */ int mv_image_object_create_open( - mv_image_object_h *image_object); + mv_image_object_h *image_object); /** * @brief Destroys the image object. @@ -179,7 +179,7 @@ int mv_image_object_create_open( * @see mv_image_object_create_open() */ int mv_image_object_destroy_open( - mv_image_object_h image_object); + mv_image_object_h image_object); /** * @brief Fills the image object. @@ -219,10 +219,10 @@ int mv_image_object_destroy_open( * @see mv_engine_config_h */ int mv_image_object_fill_open( - mv_image_object_h image_object, - mv_engine_config_h engine_cfg, - mv_source_h source, - mv_rectangle_s *location); + mv_image_object_h image_object, + mv_engine_config_h engine_cfg, + mv_source_h source, + mv_rectangle_s *location); /** * @brief Gets a value that determines how well an image object can be recognized. @@ -255,8 +255,8 @@ int mv_image_object_fill_open( * @see mv_engine_config_h */ int mv_image_object_get_recognition_rate_open( - mv_image_object_h image_object, - double *recognition_rate); + mv_image_object_h image_object, + double *recognition_rate); /** * @brief Sets a label for the image object. @@ -281,8 +281,8 @@ int mv_image_object_get_recognition_rate_open( * @see mv_image_object_destroy_open() */ int mv_image_object_set_label_open( - mv_image_object_h image_object, - int label); + mv_image_object_h image_object, + int label); /** * @brief Gets a label of image object. @@ -309,8 +309,8 @@ int mv_image_object_set_label_open( * @see mv_image_object_destroy_open() */ int mv_image_object_get_label_open( - mv_image_object_h image_object, - int *label); + mv_image_object_h image_object, + int *label); /** * @brief Clones the image object. @@ -330,8 +330,8 @@ int mv_image_object_get_label_open( * @see mv_image_object_destroy_open() */ int mv_image_object_clone_open( - mv_image_object_h src, - mv_image_object_h *dst); + mv_image_object_h src, + mv_image_object_h *dst); /** * @brief Saves the image object. @@ -351,7 +351,7 @@ int mv_image_object_clone_open( * @see mv_image_object_destroy_open() */ int mv_image_object_save_open( - const char *file_name, mv_image_object_h image_object); + const char *file_name, mv_image_object_h image_object); /** * @brief Loads an image object from the file. @@ -377,7 +377,7 @@ int mv_image_object_save_open( * @see mv_image_object_destroy_open() */ int mv_image_object_load_open( - const char *file_name, mv_image_object_h *image_object); + const char *file_name, mv_image_object_h *image_object); /**********************************/ /* Image tracking model behaviour */ @@ -398,7 +398,7 @@ int mv_image_object_load_open( * @see mv_image_tracking_model_destroy_open() */ int mv_image_tracking_model_create_open( - mv_image_tracking_model_h *image_tracking_model); + mv_image_tracking_model_h *image_tracking_model); /** * @brief Sets target of image tracking model. @@ -431,8 +431,8 @@ int mv_image_tracking_model_create_open( * @see mv_image_tracking_model_destroy_open() */ int mv_image_tracking_model_set_target_open( - mv_image_object_h image_object, - mv_image_tracking_model_h image_tracking_model); + mv_image_object_h image_object, + mv_image_tracking_model_h image_tracking_model); /** * @brief Destroys the image tracking model. @@ -449,7 +449,7 @@ int mv_image_tracking_model_set_target_open( * @see mv_image_tracking_model_create_open() */ int mv_image_tracking_model_destroy_open( - mv_image_tracking_model_h image_tracking_model); + mv_image_tracking_model_h image_tracking_model); /** * @brief Refreshes the state of image tracking model. @@ -480,8 +480,8 @@ int mv_image_tracking_model_destroy_open( * @see mv_image_tracking_model_destroy_open() */ int mv_image_tracking_model_refresh_open( - mv_image_tracking_model_h image_tracking_model, - mv_engine_config_h engine_cfg); + mv_image_tracking_model_h image_tracking_model, + mv_engine_config_h engine_cfg); /** * @brief Clones the image tracking model. @@ -499,8 +499,8 @@ int mv_image_tracking_model_refresh_open( * @see mv_image_tracking_model_destroy_open() */ int mv_image_tracking_model_clone_open( - mv_image_tracking_model_h src, - mv_image_tracking_model_h *dst); + mv_image_tracking_model_h src, + mv_image_tracking_model_h *dst); /** * @brief Saves the image tracking model. @@ -526,7 +526,7 @@ int mv_image_tracking_model_clone_open( * @see mv_image_tracking_model_destroy_open() */ int mv_image_tracking_model_save_open( - const char *file_name, mv_image_tracking_model_h image_tracking_model); + const char *file_name, mv_image_tracking_model_h image_tracking_model); /** * @brief Loads an image tracking model from the file. @@ -552,7 +552,7 @@ int mv_image_tracking_model_save_open( * @see mv_image_tracking_model_destroy_open() */ int mv_image_tracking_model_load_open( - const char *file_name, mv_image_tracking_model_h *image_tracking_model); + const char *file_name, mv_image_tracking_model_h *image_tracking_model); #ifdef __cplusplus } diff --git a/mv_image/image/src/ImageConfig.cpp b/mv_image/image/src/ImageConfig.cpp index 47fdaef..a058965 100644 --- a/mv_image/image/src/ImageConfig.cpp +++ b/mv_image/image/src/ImageConfig.cpp @@ -16,36 +16,33 @@ #include "ImageConfig.h" -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { FeaturesExtractingParams::FeaturesExtractingParams( - double scaleFactor, - int maximumFeaturesNumber) : - mScaleFactor(scaleFactor), - mMaximumFeaturesNumber(maximumFeaturesNumber) + double scaleFactor, + int maximumFeaturesNumber) : + mScaleFactor(scaleFactor), + mMaximumFeaturesNumber(maximumFeaturesNumber) { - ; /* NULL */ + ; /* NULL */ } FeaturesExtractingParams::FeaturesExtractingParams() : - mScaleFactor(1.2), - mMaximumFeaturesNumber(800) + mScaleFactor(1.2), + mMaximumFeaturesNumber(800) { - ; /* NULL */ + ; /* NULL */ } RecognitionParams::RecognitionParams( - int minMatchesNumber, - double requiredMatchesPart, - double allowableMatchesPartError) : - mMinMatchesNumber(minMatchesNumber), - mRequiredMatchesPart(requiredMatchesPart), - mAllowableMatchesPartError(allowableMatchesPartError) + int minMatchesNumber, + double requiredMatchesPart, + double allowableMatchesPartError) : + mMinMatchesNumber(minMatchesNumber), + mRequiredMatchesPart(requiredMatchesPart), + mAllowableMatchesPartError(allowableMatchesPartError) { - ; /* NULL */ + ; /* NULL */ } RecognitionParams::RecognitionParams() : @@ -53,51 +50,51 @@ RecognitionParams::RecognitionParams() : mRequiredMatchesPart(1.0), mAllowableMatchesPartError(0.0) { - ; /* NULL */ + ; /* NULL */ } StabilizationParams::StabilizationParams( - int historyAmount, - double allowableShift, - double stabilizationSpeed, - double stabilizationAcceleration) : - mHistoryAmount(historyAmount), - mAllowableShift(allowableShift), - mStabilizationSpeed(stabilizationSpeed), - mStabilizationAcceleration(stabilizationAcceleration) + int historyAmount, + double allowableShift, + double stabilizationSpeed, + double stabilizationAcceleration) : + mHistoryAmount(historyAmount), + mAllowableShift(allowableShift), + mStabilizationSpeed(stabilizationSpeed), + mStabilizationAcceleration(stabilizationAcceleration) { - ; /* NULL */ + ; /* NULL */ } StabilizationParams::StabilizationParams() : - mHistoryAmount(1), - mAllowableShift(0.0), - mStabilizationSpeed(0.0), - mStabilizationAcceleration(1.0) + mHistoryAmount(1), + mAllowableShift(0.0), + mStabilizationSpeed(0.0), + mStabilizationAcceleration(1.0) { - ; /* NULL */ + ; /* NULL */ } TrackingParams::TrackingParams( - FeaturesExtractingParams framesFeaturesExtractingParams, - RecognitionParams recognitionParams, - StabilizationParams stabilizationParams, - double expectedOffset) : - mFramesFeaturesExtractingParams(framesFeaturesExtractingParams), - mRecognitionParams(recognitionParams), - mStabilizationParams(stabilizationParams), - mExpectedOffset(expectedOffset) + FeaturesExtractingParams framesFeaturesExtractingParams, + RecognitionParams recognitionParams, + StabilizationParams stabilizationParams, + double expectedOffset) : + mFramesFeaturesExtractingParams(framesFeaturesExtractingParams), + mRecognitionParams(recognitionParams), + mStabilizationParams(stabilizationParams), + mExpectedOffset(expectedOffset) { - ; /* NULL */ + ; /* NULL */ } TrackingParams::TrackingParams() : - mFramesFeaturesExtractingParams(), - mRecognitionParams(), - mStabilizationParams(), - mExpectedOffset(0.0) + mFramesFeaturesExtractingParams(), + mRecognitionParams(), + mStabilizationParams(), + mExpectedOffset(0.0) { - ; /* NULL */ + ; /* NULL */ } } /* Image */ diff --git a/mv_image/image/src/ImageContourStabilizator.cpp b/mv_image/image/src/ImageContourStabilizator.cpp index 1c48659..2b6dddc 100644 --- a/mv_image/image/src/ImageContourStabilizator.cpp +++ b/mv_image/image/src/ImageContourStabilizator.cpp @@ -19,281 +19,250 @@ #include "mv_private.h" -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { ImageContourStabilizator::ImageContourStabilizator() : - m_movingHistory(MovingHistoryAmount), - m_priorities(MovingHistoryAmount) + m_movingHistory(MovingHistoryAmount), + m_priorities(MovingHistoryAmount) { - reset(); - - // increasing the stabilization rate - m_speeds.push_back(0.3f); - m_speeds.push_back(0.4f); - m_speeds.push_back(0.5f); - m_speeds.push_back(0.6f); - m_speeds.push_back(0.8f); - m_speeds.push_back(1.f); - - // calculation of priorities for positions in the moving history - for (size_t i = 0u; i < MovingHistoryAmount; ++i) - { - // linear dependence on the elapsed time - m_priorities[i] = (i + 1) / ((MovingHistoryAmount + 1) * MovingHistoryAmount / 2.0f); - } + reset(); + + /* increasing the stabilization rate */ + m_speeds.push_back(0.3f); + m_speeds.push_back(0.4f); + m_speeds.push_back(0.5f); + m_speeds.push_back(0.6f); + m_speeds.push_back(0.8f); + m_speeds.push_back(1.f); + + /* calculation of priorities for positions in the moving history */ + for (size_t i = 0u; i < MovingHistoryAmount; ++i) { + /* linear dependence on the elapsed time */ + m_priorities[i] = (i + 1) / ((MovingHistoryAmount + 1) * MovingHistoryAmount / 2.0f); + } } void ImageContourStabilizator::reset(void) { - m_isPrepared = false; - m_tempContourIndex = -1; - m_currentHistoryAmount = 0; + m_isPrepared = false; + m_tempContourIndex = -1; + m_currentHistoryAmount = 0; - LOGI("Outlier is detected."); + LOGI("Outlier is detected."); } bool ImageContourStabilizator::stabilize( - std::vector& contour, - const StabilizationParams& /*params*/) + std::vector& contour, + const StabilizationParams& /*params*/) { - // current implementation stabilizes quadrangles only - if (contour.size() != NumberOfQuadrangleCorners) - { - LOGW("Not stabilized. Empty contour."); - - return false; - } - - m_currentCornersSpeed.resize(contour.size(), 0); - - if (contour[0].x == contour[1].x && contour[0].y == contour[1].y) - { - LOGW("Not stabilized. Invalid contour."); - - return false; - } - - if (m_lastStabilizedContour.empty()) - { - m_lastStabilizedContour = contour; - } - - std::vector stabilizedState; - - // history amount < 2 it's no sense - if (MovingHistoryAmount >= 2) - { - // first sample - if (m_tempContourIndex == -1) - { - m_movingHistory[1] = contour; - m_tempContourIndex = 1; - m_currentHistoryAmount = 1; - - LOGI("Not stabilized. Too small moving history. (the first one)"); - - return false; - } - - // too short moving history - if (m_currentHistoryAmount < MovingHistoryAmount - 1) - { - ++m_currentHistoryAmount; - ++m_tempContourIndex; - m_movingHistory[m_tempContourIndex] = contour; - - LOGI("Not stabilized. Too small moving history."); - - return false; - } - - // saving into moving history - m_movingHistory.pop_front(); - m_movingHistory.push_back(contour); - - if (!m_isPrepared) - { - m_lastStabilizedContour = m_movingHistory[MovingHistoryAmount - 2]; - - LOGI("Not stabilized. Too small moving history. (the last one)"); - - m_isPrepared = true; - } - - // stabilization - stabilizedState = computeStabilizedQuadrangleContour(); - - if (stabilizedState.empty()) - { - stabilizedState = m_lastStabilizedContour; - } - } - else - { - stabilizedState = m_lastStabilizedContour; - } - - const float tolerantShift = getQuadrangleArea(contour.data()) * 0.00006f + 1.3f; - - const size_t contourSize = stabilizedState.size(); - for (size_t i = 0u; i < contourSize; ++i) - { - if (fabs(getDistance(stabilizedState[i], contour[i])) > tolerantShift) - { - const float dirX = m_lastStabilizedContour[i].x - contour[i].x; - const float dirY = m_lastStabilizedContour[i].y - contour[i].y; - - const float speedX = dirX * m_speeds[m_currentCornersSpeed[i]]; - const float speedY = dirY * m_speeds[m_currentCornersSpeed[i]]; - - // final moving - m_lastStabilizedContour[i].x -= speedX; - m_lastStabilizedContour[i].y -= speedY; - - if (m_currentCornersSpeed[i] < m_speeds.size() - 1) - { - ++m_currentCornersSpeed[i]; - } - } - else - { - m_currentCornersSpeed[i] = 0; - } - } + /* current implementation stabilizes quadrangles only */ + if (contour.size() != NumberOfQuadrangleCorners) { + LOGW("Not stabilized. Empty contour."); + + return false; + } + + m_currentCornersSpeed.resize(contour.size(), 0); + + if (contour[0].x == contour[1].x && contour[0].y == contour[1].y) { + LOGW("Not stabilized. Invalid contour."); + + return false; + } + + if (m_lastStabilizedContour.empty()) { + m_lastStabilizedContour = contour; + } + + std::vector stabilizedState; + + /* history amount < 2 it's no sense */ + if (MovingHistoryAmount >= 2) { + /* first sample */ + if (m_tempContourIndex == -1) { + m_movingHistory[1] = contour; + m_tempContourIndex = 1; + m_currentHistoryAmount = 1; + + LOGI("Not stabilized. Too small moving history. (the first one)"); + + return false; + } + + /* too short moving history */ + if (m_currentHistoryAmount < MovingHistoryAmount - 1) { + ++m_currentHistoryAmount; + ++m_tempContourIndex; + m_movingHistory[m_tempContourIndex] = contour; + + LOGI("Not stabilized. Too small moving history."); + + return false; + } + + /* saving into moving history */ + m_movingHistory.pop_front(); + m_movingHistory.push_back(contour); + + if (!m_isPrepared) { + m_lastStabilizedContour = m_movingHistory[MovingHistoryAmount - 2]; + + LOGI("Not stabilized. Too small moving history. (the last one)"); + + m_isPrepared = true; + } + + /* stabilization */ + stabilizedState = computeStabilizedQuadrangleContour(); + + if (stabilizedState.empty()) { + stabilizedState = m_lastStabilizedContour; + } + } else { + stabilizedState = m_lastStabilizedContour; + } + + const float tolerantShift = getQuadrangleArea(contour.data()) * 0.00006f + 1.3f; + + const size_t contourSize = stabilizedState.size(); + for (size_t i = 0u; i < contourSize; ++i) { + if (fabs(getDistance(stabilizedState[i], contour[i])) > tolerantShift) { + const float dirX = m_lastStabilizedContour[i].x - contour[i].x; + const float dirY = m_lastStabilizedContour[i].y - contour[i].y; + + const float speedX = dirX * m_speeds[m_currentCornersSpeed[i]]; + const float speedY = dirY * m_speeds[m_currentCornersSpeed[i]]; + + /* final moving */ + m_lastStabilizedContour[i].x -= speedX; + m_lastStabilizedContour[i].y -= speedY; + + if (m_currentCornersSpeed[i] < m_speeds.size() - 1) { + ++m_currentCornersSpeed[i]; + } + } else { + m_currentCornersSpeed[i] = 0; + } + } + + /* m_lastStabilizedContour = stabilizedState; */ + contour = m_lastStabilizedContour; + + LOGI("Contour successfully stabilized."); - // m_lastStabilizedContour = stabilizedState; - contour = m_lastStabilizedContour; - - LOGI("Contour successfully stabilized."); - - return true; + return true; } std::vector ImageContourStabilizator::computeStabilizedQuadrangleContour(void) { - // final contour - std::vector stabilizedState( - NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f)); - - // calculation the direction of contour corners to a new location - std::vector directions( - NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f)); - - // computing expected directions and outliers searching - bool expressiveTime = false; - float summPriorityWithoutToLastPos[NumberOfQuadrangleCorners]; - float priorityToLastPos[NumberOfQuadrangleCorners]; - std::vector directionsToLastPos(NumberOfQuadrangleCorners); - for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j) - { - // calculation the moving directions and computing average direction - std::vector trackDirections(MovingHistoryAmount - 1); - cv::Point2f averageDirections(0.f, 0.f); - - for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) - { - averageDirections.x += (trackDirections[i].x = - m_movingHistory[i+1][j].x - m_movingHistory[i][j].x) / - (MovingHistoryAmount - 1); - - averageDirections.y += (trackDirections[i].y = - m_movingHistory[i+1][j].y - m_movingHistory[i][j].y) / - (MovingHistoryAmount - 1); - } - - // calculation a deviations and select outlier - std::vector directionDistances(MovingHistoryAmount - 1); - float maxDistance = 0.f, prevMaxDistance = 0.f; - int idxWithMaxDistance = 0; - int numExpressiveDirection = -1; - for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) - { - directionDistances[i] = getDistance( - trackDirections[i], - averageDirections); - - if (directionDistances[i] > prevMaxDistance) - { - if (directionDistances[i] > maxDistance) - { - prevMaxDistance = maxDistance; - maxDistance = directionDistances[i]; - idxWithMaxDistance = i; - } - else - { - prevMaxDistance = directionDistances[i]; - } - } - } - - // check outlier - if (0.6f * maxDistance > prevMaxDistance) - { - LOGI("Outlier is detected."); - - numExpressiveDirection = idxWithMaxDistance; - } - - // final direction computing - float summPriority = 0.f; - for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) - { - if ((int)i != numExpressiveDirection) - { - directions[j].x += trackDirections[i].x * m_priorities[i]; - directions[j].y += trackDirections[i].y * m_priorities[i]; - summPriority += m_priorities[i]; - } - } - if (numExpressiveDirection == MovingHistoryAmount - 1) - { - expressiveTime = true; - } - - summPriorityWithoutToLastPos[j] = summPriority; - priorityToLastPos[j] = m_priorities[MovingHistoryAmount - 1]; - - directions[j].x -= directionsToLastPos[j].x = - (m_lastStabilizedContour[j].x - - m_movingHistory[MovingHistoryAmount - 1][j].x) * - priorityToLastPos[j]; - - directions[j].y -= directionsToLastPos[j].y = - (m_lastStabilizedContour[j].y - - m_movingHistory[MovingHistoryAmount - 1][j].y) * - priorityToLastPos[j]; - - summPriority += priorityToLastPos[j]; - - directions[j].x /= summPriority; - directions[j].y /= summPriority; - } - - // final corners computing - for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j) - { - if (expressiveTime) - { - directions[j].x *= (summPriorityWithoutToLastPos[j] + - priorityToLastPos[j]); - directions[j].x -= directionsToLastPos[j].x; - directions[j].x /= summPriorityWithoutToLastPos[j]; - - directions[j].y *= (summPriorityWithoutToLastPos[j] + - priorityToLastPos[j]); - directions[j].y -= directionsToLastPos[j].y; - directions[j].y /= summPriorityWithoutToLastPos[j]; - } - - stabilizedState[j].x = m_lastStabilizedContour[j].x + directions[j].x; - stabilizedState[j].y = m_lastStabilizedContour[j].y + directions[j].y; - } - - return stabilizedState; + /* final contour */ + std::vector stabilizedState( + NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f)); + + /* calculation the direction of contour corners to a new location */ + std::vector directions( + NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f)); + + /* computing expected directions and outliers searching */ + bool expressiveTime = false; + float summPriorityWithoutToLastPos[NumberOfQuadrangleCorners]; + float priorityToLastPos[NumberOfQuadrangleCorners]; + std::vector directionsToLastPos(NumberOfQuadrangleCorners); + for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j) { + /* calculation the moving directions and computing average direction */ + std::vector trackDirections(MovingHistoryAmount - 1); + cv::Point2f averageDirections(0.f, 0.f); + + for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) { + averageDirections.x += (trackDirections[i].x = + m_movingHistory[i+1][j].x - m_movingHistory[i][j].x) / + (MovingHistoryAmount - 1); + + averageDirections.y += (trackDirections[i].y = + m_movingHistory[i+1][j].y - m_movingHistory[i][j].y) / + (MovingHistoryAmount - 1); + } + + /* calculation a deviations and select outlier */ + std::vector directionDistances(MovingHistoryAmount - 1); + float maxDistance = 0.f, prevMaxDistance = 0.f; + int idxWithMaxDistance = 0; + int numExpressiveDirection = -1; + for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) { + directionDistances[i] = getDistance( + trackDirections[i], + averageDirections); + + if (directionDistances[i] > prevMaxDistance) { + if (directionDistances[i] > maxDistance) { + prevMaxDistance = maxDistance; + maxDistance = directionDistances[i]; + idxWithMaxDistance = i; + } else { + prevMaxDistance = directionDistances[i]; + } + } + } + + /* check outlier */ + if (0.6f * maxDistance > prevMaxDistance) { + LOGI("Outlier is detected."); + + numExpressiveDirection = idxWithMaxDistance; + } + + /* final direction computing */ + float summPriority = 0.f; + for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) { + if ((int)i != numExpressiveDirection) { + directions[j].x += trackDirections[i].x * m_priorities[i]; + directions[j].y += trackDirections[i].y * m_priorities[i]; + summPriority += m_priorities[i]; + } + } + + if (numExpressiveDirection == MovingHistoryAmount - 1) { + expressiveTime = true; + } + + summPriorityWithoutToLastPos[j] = summPriority; + priorityToLastPos[j] = m_priorities[MovingHistoryAmount - 1]; + + directions[j].x -= directionsToLastPos[j].x = + (m_lastStabilizedContour[j].x - + m_movingHistory[MovingHistoryAmount - 1][j].x) * + priorityToLastPos[j]; + + directions[j].y -= directionsToLastPos[j].y = + (m_lastStabilizedContour[j].y - + m_movingHistory[MovingHistoryAmount - 1][j].y) * + priorityToLastPos[j]; + + summPriority += priorityToLastPos[j]; + + directions[j].x /= summPriority; + directions[j].y /= summPriority; + } + + /* final corners computing */ + for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j) { + if (expressiveTime) { + directions[j].x *= (summPriorityWithoutToLastPos[j] + + priorityToLastPos[j]); + directions[j].x -= directionsToLastPos[j].x; + directions[j].x /= summPriorityWithoutToLastPos[j]; + + directions[j].y *= (summPriorityWithoutToLastPos[j] + + priorityToLastPos[j]); + directions[j].y -= directionsToLastPos[j].y; + directions[j].y /= summPriorityWithoutToLastPos[j]; + } + + stabilizedState[j].x = m_lastStabilizedContour[j].x + directions[j].x; + stabilizedState[j].y = m_lastStabilizedContour[j].y + directions[j].y; + } + + return stabilizedState; } } /* Image */ diff --git a/mv_image/image/src/ImageMathUtil.cpp b/mv_image/image/src/ImageMathUtil.cpp index 8bf5ba8..0da2dbc 100644 --- a/mv_image/image/src/ImageMathUtil.cpp +++ b/mv_image/image/src/ImageMathUtil.cpp @@ -16,43 +16,40 @@ #include "ImageMathUtil.h" -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { float getDistance( - const cv::Point2f& point1, - const cv::Point2f& point2) + const cv::Point2f& point1, + const cv::Point2f& point2) { - return sqrt( - (point1.x - point2.x) * (point1.x - point2.x) + - (point1.y - point2.y) * (point1.y - point2.y)); + return sqrt( + (point1.x - point2.x) * (point1.x - point2.x) + + (point1.y - point2.y) * (point1.y - point2.y)); } float getTriangleArea( - const cv::Point2f& point1, - const cv::Point2f& point2, - const cv::Point2f& point3) + const cv::Point2f& point1, + const cv::Point2f& point2, + const cv::Point2f& point3) { - float distances[3]; + float distances[3]; - distances[0] = getDistance(point1, point2); - distances[1] = getDistance(point2, point3); - distances[2] = getDistance(point3, point1); + distances[0] = getDistance(point1, point2); + distances[1] = getDistance(point2, point3); + distances[2] = getDistance(point3, point1); - const float semiperimeter = (distances[0] + distances[1] + distances[2]) / 2.0f; + const float semiperimeter = (distances[0] + distances[1] + distances[2]) / 2.0f; - return sqrt(semiperimeter * - (semiperimeter - distances[0]) * - (semiperimeter - distances[1]) * - (semiperimeter - distances[2])); + return sqrt(semiperimeter * + (semiperimeter - distances[0]) * + (semiperimeter - distances[1]) * + (semiperimeter - distances[2])); } float getQuadrangleArea(const cv::Point2f points[NumberOfQuadrangleCorners]) { - return getTriangleArea(points[0], points[1], points[2]) + - getTriangleArea(points[0], points[3], points[2]); + return getTriangleArea(points[0], points[1], points[2]) + + getTriangleArea(points[0], points[3], points[2]); } } /* Image */ diff --git a/mv_image/image/src/ImageObject.cpp b/mv_image/image/src/ImageObject.cpp index 531ec62..a562605 100644 --- a/mv_image/image/src/ImageObject.cpp +++ b/mv_image/image/src/ImageObject.cpp @@ -29,446 +29,417 @@ #include #include -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { ImageObject::ImageObject() : - m_isEmpty(true), - m_isLabeled(false), - m_label(0), - m_recognitionRate(0.f) + m_isEmpty(true), + m_isLabeled(false), + m_label(0), + m_recognitionRate(0.f) { - ; /* NULL */ + ; /* NULL */ } ImageObject::ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params) : - m_isEmpty(true), - m_isLabeled(false), - m_label(0), - m_recognitionRate(0.f) + m_isEmpty(true), + m_isLabeled(false), + m_label(0), + m_recognitionRate(0.f) { - fill(image, params); + fill(image, params); } ImageObject::ImageObject(const ImageObject& copy) : - m_isEmpty(copy.m_isEmpty), - m_isLabeled(copy.m_isLabeled), - m_label(copy.m_label), - m_boundingContour(copy.m_boundingContour), - m_objectKeypoints(copy.m_objectKeypoints), - m_objectDescriptors(copy.m_objectDescriptors.clone()), - m_recognitionRate(copy.m_recognitionRate) + m_isEmpty(copy.m_isEmpty), + m_isLabeled(copy.m_isLabeled), + m_label(copy.m_label), + m_boundingContour(copy.m_boundingContour), + m_objectKeypoints(copy.m_objectKeypoints), + m_objectDescriptors(copy.m_objectDescriptors.clone()), + m_recognitionRate(copy.m_recognitionRate) { - ; /* NULL */ + ; /* NULL */ } ImageObject& ImageObject::operator=(const ImageObject& copy) { - if (this != ©) - { - m_isEmpty = copy.m_isEmpty; - m_isLabeled = copy.m_isLabeled; - m_label = copy.m_label; - m_boundingContour = copy.m_boundingContour; - m_objectKeypoints = copy.m_objectKeypoints; - m_objectDescriptors = copy.m_objectDescriptors.clone(); - m_recognitionRate = copy.m_recognitionRate; - } - return *this; + if (this != ©) { + m_isEmpty = copy.m_isEmpty; + m_isLabeled = copy.m_isLabeled; + m_label = copy.m_label; + m_boundingContour = copy.m_boundingContour; + m_objectKeypoints = copy.m_objectKeypoints; + m_objectDescriptors = copy.m_objectDescriptors.clone(); + m_recognitionRate = copy.m_recognitionRate; + } + return *this; } ImageObject::~ImageObject() { - ; /* NULL */ + ; /* NULL */ } void ImageObject::fill(const cv::Mat& image, const FeaturesExtractingParams& params) { - m_isEmpty = false; - m_boundingContour.resize(NumberOfQuadrangleCorners); + m_isEmpty = false; + m_boundingContour.resize(NumberOfQuadrangleCorners); - m_boundingContour[0].x = 0.f; - m_boundingContour[0].y = 0.f; + m_boundingContour[0].x = 0.f; + m_boundingContour[0].y = 0.f; - m_boundingContour[1].x = image.cols; - m_boundingContour[1].y = 0.f; + m_boundingContour[1].x = image.cols; + m_boundingContour[1].y = 0.f; - m_boundingContour[2].x = image.cols; - m_boundingContour[2].y = image.rows; + m_boundingContour[2].x = image.cols; + m_boundingContour[2].y = image.rows; - m_boundingContour[3].x = 0.f; - m_boundingContour[3].y = image.rows; + m_boundingContour[3].x = 0.f; + m_boundingContour[3].y = image.rows; - extractFeatures(image, params); + extractFeatures(image, params); - computeRecognitionRate(image); + computeRecognitionRate(image); - LOGI("[%s] Image object is filled.", __FUNCTION__); + LOGI("[%s] Image object is filled.", __FUNCTION__); } bool ImageObject::fill(const cv::Mat& image, const cv::Rect& boundingBox, - const FeaturesExtractingParams& params) + const FeaturesExtractingParams& params) { - if ((0 > boundingBox.x) || (0 >= boundingBox.width) || - (0 > boundingBox.y) || (0 >= boundingBox.height) || - (image.cols < (boundingBox.x + boundingBox.width)) || - (image.rows < (boundingBox.y + boundingBox.height))) - { - LOGE("[%s] Invalid ROI.", __FUNCTION__); - return false; - } + if ((0 > boundingBox.x) || (0 >= boundingBox.width) || + (0 > boundingBox.y) || (0 >= boundingBox.height) || + (image.cols < (boundingBox.x + boundingBox.width)) || + (image.rows < (boundingBox.y + boundingBox.height))) { + LOGE("[%s] Invalid ROI.", __FUNCTION__); + return false; + } - m_isEmpty = false; - m_boundingContour.resize(NumberOfQuadrangleCorners); + m_isEmpty = false; + m_boundingContour.resize(NumberOfQuadrangleCorners); - m_boundingContour[0].x = 0.f; - m_boundingContour[0].y = 0.f; + m_boundingContour[0].x = 0.f; + m_boundingContour[0].y = 0.f; - m_boundingContour[1].x = boundingBox.width; - m_boundingContour[1].y = 0.f; + m_boundingContour[1].x = boundingBox.width; + m_boundingContour[1].y = 0.f; - m_boundingContour[2].x = boundingBox.width; - m_boundingContour[2].y = boundingBox.height; + m_boundingContour[2].x = boundingBox.width; + m_boundingContour[2].y = boundingBox.height; - m_boundingContour[3].x = 0.f; - m_boundingContour[3].y = boundingBox.height; + m_boundingContour[3].x = 0.f; + m_boundingContour[3].y = boundingBox.height; - cv::Mat objectImage(image, boundingBox); + cv::Mat objectImage(image, boundingBox); - extractFeatures(objectImage, params); + extractFeatures(objectImage, params); - computeRecognitionRate(image); + computeRecognitionRate(image); - LOGI("[%s] Image object is filled.", __FUNCTION__); + LOGI("[%s] Image object is filled.", __FUNCTION__); - return true; + return true; } void ImageObject::extractFeatures(const cv::Mat& image, - const FeaturesExtractingParams& params) + const FeaturesExtractingParams& params) { - cv::ORB orb(params.mMaximumFeaturesNumber, params.mScaleFactor); - - if (image.cols < MinWidth || image.rows < MinHeight) - { - LOGW("[%s] Area is too small, recognition rate is 0.", __FUNCTION__); - m_objectKeypoints.clear(); - m_objectDescriptors = cv::Mat(); - } - else - { - orb.detect(image, m_objectKeypoints); - orb.compute(image, m_objectKeypoints, m_objectDescriptors); - } + cv::ORB orb(params.mMaximumFeaturesNumber, params.mScaleFactor); + + if (image.cols < MinWidth || image.rows < MinHeight) { + LOGW("[%s] Area is too small, recognition rate is 0.", __FUNCTION__); + m_objectKeypoints.clear(); + m_objectDescriptors = cv::Mat(); + } else { + orb.detect(image, m_objectKeypoints); + orb.compute(image, m_objectKeypoints, m_objectDescriptors); + } } void ImageObject::computeRecognitionRate(const cv::Mat& image) { - const size_t numberOfKeypoints = m_objectKeypoints.size(); - - // it is impossible to calculate the perspective transformation parameters - // if number of key points less than MinimumNumberOfFeatures (4) - if (numberOfKeypoints < MinimumNumberOfFeatures) - { - m_recognitionRate = 0.f; - return; - } - - static const size_t xCellsNumber = 10u; - static const size_t yCellsNumber = 10u; - - cv::Mat cells[xCellsNumber][yCellsNumber]; - size_t accumulationCounter[xCellsNumber][yCellsNumber]; - - const size_t cellWidth = image.cols / xCellsNumber; - const size_t cellHeight = image.rows / yCellsNumber; - - for (size_t x = 0u; x < xCellsNumber; ++x) - { - for (size_t y = 0u; y < yCellsNumber; ++y) - { - cells[x][y] = image(cv::Rect( - x * cellWidth, - y * cellHeight, - cellWidth, - cellHeight)); - - accumulationCounter[x][y] = 0; - } - } - - for (size_t i = 0u; i < numberOfKeypoints; ++i) - { - size_t xCellIdx = m_objectKeypoints[i].pt.x / cellWidth; - if (xCellIdx >= xCellsNumber) - { - xCellIdx = xCellsNumber - 1; - } - size_t yCellIdx = m_objectKeypoints[i].pt.y / cellHeight; - if (yCellIdx >= yCellsNumber) - { - yCellIdx = yCellsNumber - 1; - } - ++(accumulationCounter[xCellIdx][yCellIdx]); - } - - const float exceptedNumber = numberOfKeypoints / - (float)(xCellsNumber * yCellsNumber); - - float distributedEvaluation = 0.f; - - for (size_t x = 0u; x < xCellsNumber; ++x) - { - for (size_t y = 0u; y < yCellsNumber; ++y) - { - distributedEvaluation += (accumulationCounter[x][y] - exceptedNumber) * - (accumulationCounter[x][y] - exceptedNumber) / exceptedNumber; - } - } - - float maximumDistributedEvaluation = (xCellsNumber * yCellsNumber - 1) * - exceptedNumber; - - maximumDistributedEvaluation += (numberOfKeypoints - exceptedNumber) * - (numberOfKeypoints - exceptedNumber) / exceptedNumber; - - distributedEvaluation = 1 - - (distributedEvaluation / maximumDistributedEvaluation); - - // Exponentiation to find an approximate confidence value based on the - // number of key points on the image. - const float cardinalityEvaluation = pow(-0.9, numberOfKeypoints - 3) + 1.0f; - - m_recognitionRate = - distributedEvaluation * - cardinalityEvaluation; + const size_t numberOfKeypoints = m_objectKeypoints.size(); + + /* it is impossible to calculate the perspective transformation parameters + * if number of key points less than MinimumNumberOfFeatures (4) + */ + if (numberOfKeypoints < MinimumNumberOfFeatures) { + m_recognitionRate = 0.f; + return; + } + + static const size_t xCellsNumber = 10u; + static const size_t yCellsNumber = 10u; + + cv::Mat cells[xCellsNumber][yCellsNumber]; + size_t accumulationCounter[xCellsNumber][yCellsNumber]; + + const size_t cellWidth = image.cols / xCellsNumber; + const size_t cellHeight = image.rows / yCellsNumber; + + for (size_t x = 0u; x < xCellsNumber; ++x) { + for (size_t y = 0u; y < yCellsNumber; ++y) { + cells[x][y] = image(cv::Rect( + x * cellWidth, + y * cellHeight, + cellWidth, + cellHeight)); + + accumulationCounter[x][y] = 0; + } + } + + for (size_t i = 0u; i < numberOfKeypoints; ++i) { + size_t xCellIdx = m_objectKeypoints[i].pt.x / cellWidth; + if (xCellIdx >= xCellsNumber) { + xCellIdx = xCellsNumber - 1; + } + size_t yCellIdx = m_objectKeypoints[i].pt.y / cellHeight; + if (yCellIdx >= yCellsNumber) { + yCellIdx = yCellsNumber - 1; + } + ++(accumulationCounter[xCellIdx][yCellIdx]); + } + + const float exceptedNumber = numberOfKeypoints / + (float)(xCellsNumber * yCellsNumber); + + float distributedEvaluation = 0.f; + + for (size_t x = 0u; x < xCellsNumber; ++x) { + for (size_t y = 0u; y < yCellsNumber; ++y) { + distributedEvaluation += (accumulationCounter[x][y] - exceptedNumber) * + (accumulationCounter[x][y] - exceptedNumber) / exceptedNumber; + } + } + + float maximumDistributedEvaluation = (xCellsNumber * yCellsNumber - 1) * + exceptedNumber; + + maximumDistributedEvaluation += (numberOfKeypoints - exceptedNumber) * + (numberOfKeypoints - exceptedNumber) / exceptedNumber; + + distributedEvaluation = 1 - + (distributedEvaluation / maximumDistributedEvaluation); + + /* Exponentiation to find an approximate confidence value based on the + * number of key points on the image. + */ + const float cardinalityEvaluation = pow(-0.9, numberOfKeypoints - 3) + 1.0f; + + m_recognitionRate = + distributedEvaluation * + cardinalityEvaluation; } float ImageObject::getRecognitionRate(void) const { - return m_recognitionRate; + return m_recognitionRate; } bool ImageObject::isEmpty() const { - return m_isEmpty; + return m_isEmpty; } void ImageObject::setLabel(int label) { - m_isLabeled = true; - m_label = label; + m_isLabeled = true; + m_label = label; } bool ImageObject::getLabel(int& label) const { - if (!m_isLabeled) - { - LOGW("[%s] Image hasn't label.", __FUNCTION__); - return false; - } - label = m_label; - return true; + if (!m_isLabeled) { + LOGW("[%s] Image hasn't label.", __FUNCTION__); + return false; + } + label = m_label; + return true; } int ImageObject::save(const char *fileName) const { - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); + std::string prefix_path = std::string(app_get_data_path()); + LOGD("prefix_path: %s", prefix_path.c_str()); - std::string filePath; - filePath += prefix_path; - filePath += fileName; + std::string filePath; + filePath += prefix_path; + filePath += fileName; - /* check the directory is available */ - std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/')); - if (access(prefix_path_check.c_str(), F_OK)) - { - LOGE("Can't save image object. Path[%s] doesn't existed.", prefix_path_check.c_str()); + /* check the directory is available */ + std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/')); + if (access(prefix_path_check.c_str(), F_OK)) { + LOGE("Can't save image object. Path[%s] doesn't existed.", prefix_path_check.c_str()); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + return MEDIA_VISION_ERROR_INVALID_PATH; + } - std::ofstream out; + std::ofstream out; - out.open(filePath.c_str()); + out.open(filePath.c_str()); - if (!out.is_open()) - { - LOGE("[%s] Can't create/open file.", __FUNCTION__); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } + if (!out.is_open()) { + LOGE("[%s] Can't create/open file.", __FUNCTION__); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } - out<<(*this); + out << (*this); - out.close(); - LOGI("[%s] Image object is saved.", __FUNCTION__); + out.close(); + LOGI("[%s] Image object is saved.", __FUNCTION__); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int ImageObject::load(const char *fileName) { - /* find directory */ - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); + /* find directory */ + std::string prefix_path = std::string(app_get_data_path()); + LOGD("prefix_path: %s", prefix_path.c_str()); - std::string filePath; - filePath += prefix_path; - filePath += fileName; + std::string filePath; + filePath += prefix_path; + filePath += fileName; - if (access(filePath.c_str(), F_OK)) - { - LOGE("Can't load image object model. Path[%s] doesn't existed.", filePath.c_str()); + if (access(filePath.c_str(), F_OK)) { + LOGE("Can't load image object model. Path[%s] doesn't existed.", filePath.c_str()); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + return MEDIA_VISION_ERROR_INVALID_PATH; + } - std::ifstream in; - in.open(filePath.c_str()); + std::ifstream in; + in.open(filePath.c_str()); - if (!in.is_open()) - { - LOGE("[%s] Can't open file.", __FUNCTION__); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } + if (!in.is_open()) { + LOGE("[%s] Can't open file.", __FUNCTION__); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } - in>>(*this); + in >> (*this); - if (!in.good()) - { - LOGE("[%s] Unexpected end of file.", __FUNCTION__); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } + if (!in.good()) { + LOGE("[%s] Unexpected end of file.", __FUNCTION__); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } - in.close(); - LOGI("[%s] Image object is loaded.", __FUNCTION__); + in.close(); + LOGI("[%s] Image object is loaded.", __FUNCTION__); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } std::ostream& operator << (std::ostream& os, const ImageObject& obj) { - os<(descriptorNum, featureNum)<<' '; - } - } - - return os; + os << std::setprecision(7); + + os << obj.m_isEmpty << '\n'; + os << obj.m_isLabeled << '\n'; + os << obj.m_label << '\n'; + + os << obj.m_boundingContour.size() << '\n'; + for (size_t pointNum = 0u; pointNum < obj.m_boundingContour.size(); ++pointNum) { + os << obj.m_boundingContour[pointNum].x << ' '; + os << obj.m_boundingContour[pointNum].y << '\n'; + } + + os << obj.m_objectKeypoints.size() << '\n'; + for (size_t keypointNum = 0u; keypointNum < obj.m_objectKeypoints.size(); ++keypointNum) { + os << obj.m_objectKeypoints[keypointNum].pt.x << ' '; + os << obj.m_objectKeypoints[keypointNum].pt.y << ' '; + os << obj.m_objectKeypoints[keypointNum].size << ' '; + os << obj.m_objectKeypoints[keypointNum].response << ' '; + os << obj.m_objectKeypoints[keypointNum].angle << ' '; + os << obj.m_objectKeypoints[keypointNum].octave << ' '; + os << obj.m_objectKeypoints[keypointNum].class_id << '\n'; + } + + os << obj.m_objectDescriptors.rows << ' '; + os << obj.m_objectDescriptors.cols << ' '; + os << obj.m_objectDescriptors.type() << '\n'; + for (int descriptorNum = 0; descriptorNum < obj.m_objectDescriptors.rows; + ++descriptorNum) { + for (int featureNum = 0; featureNum < obj.m_objectDescriptors.cols; + ++featureNum, os << '\n') { + os << (int)obj.m_objectDescriptors.at(descriptorNum, featureNum) << ' '; + } + } + + return os; } std::istream& operator >> (std::istream& is, ImageObject& obj) { - size_t numberOfContourPoints = 0u; - size_t numberOfKeyPoints = 0u; - int rows = 0, cols = 0; - int descriptorType = 0; + size_t numberOfContourPoints = 0u; + size_t numberOfKeyPoints = 0u; + int rows = 0, cols = 0; + int descriptorType = 0; - ImageObject temporal; + ImageObject temporal; #define MEDIA_VISION_CHECK_IFSTREAM \ - if (!is.good()) \ - { \ - return is; \ - } - - is>>temporal.m_isEmpty; - MEDIA_VISION_CHECK_IFSTREAM - is>>temporal.m_isLabeled; - MEDIA_VISION_CHECK_IFSTREAM - is>>temporal.m_label; - MEDIA_VISION_CHECK_IFSTREAM - - is>>numberOfContourPoints; - MEDIA_VISION_CHECK_IFSTREAM - - temporal.m_boundingContour.resize(numberOfContourPoints); - for (size_t pointNum = 0; pointNum < temporal.m_boundingContour.size(); ++pointNum) - { - is>>temporal.m_boundingContour[pointNum].x; - MEDIA_VISION_CHECK_IFSTREAM - is>>temporal.m_boundingContour[pointNum].y; - MEDIA_VISION_CHECK_IFSTREAM - } - - is>>numberOfKeyPoints; - temporal.m_objectKeypoints.resize(numberOfKeyPoints); - for (size_t keypointNum = 0; keypointNum < temporal.m_objectKeypoints.size(); ++keypointNum) - { - is>>temporal.m_objectKeypoints[keypointNum].pt.x; - MEDIA_VISION_CHECK_IFSTREAM - is>>temporal.m_objectKeypoints[keypointNum].pt.y; - MEDIA_VISION_CHECK_IFSTREAM - is>>temporal.m_objectKeypoints[keypointNum].size; - MEDIA_VISION_CHECK_IFSTREAM - is>>temporal.m_objectKeypoints[keypointNum].response; - MEDIA_VISION_CHECK_IFSTREAM - is>>temporal.m_objectKeypoints[keypointNum].angle; - MEDIA_VISION_CHECK_IFSTREAM - is>>temporal.m_objectKeypoints[keypointNum].octave; - MEDIA_VISION_CHECK_IFSTREAM - is>>temporal.m_objectKeypoints[keypointNum].class_id; - MEDIA_VISION_CHECK_IFSTREAM - } - - is>>rows; - MEDIA_VISION_CHECK_IFSTREAM - is>>cols; - MEDIA_VISION_CHECK_IFSTREAM - is>>descriptorType; - MEDIA_VISION_CHECK_IFSTREAM - temporal.m_objectDescriptors = cv::Mat(rows, cols, descriptorType); - int value = 0; - for (int descriptorNum = 0; descriptorNum < temporal.m_objectDescriptors.rows; ++descriptorNum) - { - for (int featureNum = 0; featureNum < temporal.m_objectDescriptors.cols; ++featureNum) - { - is>>value; - MEDIA_VISION_CHECK_IFSTREAM - temporal.m_objectDescriptors.at(descriptorNum, featureNum) = (uchar)value; - } - } + if (!is.good()) { \ + return is; \ + } + + is >> temporal.m_isEmpty; + MEDIA_VISION_CHECK_IFSTREAM + is >> temporal.m_isLabeled; + MEDIA_VISION_CHECK_IFSTREAM + is >> temporal.m_label; + MEDIA_VISION_CHECK_IFSTREAM + + is >> numberOfContourPoints; + MEDIA_VISION_CHECK_IFSTREAM + + temporal.m_boundingContour.resize(numberOfContourPoints); + for (size_t pointNum = 0; pointNum < temporal.m_boundingContour.size(); ++pointNum) { + is >> temporal.m_boundingContour[pointNum].x; + MEDIA_VISION_CHECK_IFSTREAM + is >> temporal.m_boundingContour[pointNum].y; + MEDIA_VISION_CHECK_IFSTREAM + } + + is >> numberOfKeyPoints; + temporal.m_objectKeypoints.resize(numberOfKeyPoints); + for (size_t keypointNum = 0; keypointNum < temporal.m_objectKeypoints.size(); ++keypointNum) { + is >> temporal.m_objectKeypoints[keypointNum].pt.x; + MEDIA_VISION_CHECK_IFSTREAM + is >> temporal.m_objectKeypoints[keypointNum].pt.y; + MEDIA_VISION_CHECK_IFSTREAM + is >> temporal.m_objectKeypoints[keypointNum].size; + MEDIA_VISION_CHECK_IFSTREAM + is >> temporal.m_objectKeypoints[keypointNum].response; + MEDIA_VISION_CHECK_IFSTREAM + is >> temporal.m_objectKeypoints[keypointNum].angle; + MEDIA_VISION_CHECK_IFSTREAM + is >> temporal.m_objectKeypoints[keypointNum].octave; + MEDIA_VISION_CHECK_IFSTREAM + is >> temporal.m_objectKeypoints[keypointNum].class_id; + MEDIA_VISION_CHECK_IFSTREAM + } + + is >> rows; + MEDIA_VISION_CHECK_IFSTREAM + is >> cols; + MEDIA_VISION_CHECK_IFSTREAM + is >> descriptorType; + MEDIA_VISION_CHECK_IFSTREAM + temporal.m_objectDescriptors = cv::Mat(rows, cols, descriptorType); + int value = 0; + for (int descriptorNum = 0; descriptorNum < temporal.m_objectDescriptors.rows; ++descriptorNum) { + for (int featureNum = 0; featureNum < temporal.m_objectDescriptors.cols; ++featureNum) { + is >> value; + MEDIA_VISION_CHECK_IFSTREAM + temporal.m_objectDescriptors.at(descriptorNum, featureNum) = (uchar)value; + } + } #undef MEDIA_VISION_CHECK_IFSTREAM - obj = temporal; + obj = temporal; - return is; + return is; } } /* Image */ diff --git a/mv_image/image/src/ImageRecognizer.cpp b/mv_image/image/src/ImageRecognizer.cpp index c921555..73dd335 100644 --- a/mv_image/image/src/ImageRecognizer.cpp +++ b/mv_image/image/src/ImageRecognizer.cpp @@ -19,293 +19,263 @@ #include "mv_private.h" -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { ImageRecognizer::ImageRecognizer( - const cv::Mat& sceneImage, - const FeaturesExtractingParams& params) : - m_scene(sceneImage, params) + const cv::Mat& sceneImage, + const FeaturesExtractingParams& params) : + m_scene(sceneImage, params) { - ; /* NULL */ + ; /* NULL */ } ImageRecognizer::ImageRecognizer(const ImageObject& scene) : - m_scene(scene) + m_scene(scene) { - ; /* NULL */ + ; /* NULL */ } ImageRecognizer::~ImageRecognizer() { - ; /* NULL */ + ; /* NULL */ } bool ImageRecognizer::recognize( - const ImageObject& target, - const RecognitionParams& params, - std::vector& contour) const + const ImageObject& target, + const RecognitionParams& params, + std::vector& contour) const { - cv::Mat homophraphyMatrix; - - contour.clear(); - - if (MinimumNumberOfFeatures > target.m_objectKeypoints.size()) - { - LOGW("[%s] Image object can't be recognized (Recognition rate is too small).", __FUNCTION__); - return false; - } - if (MinimumNumberOfFeatures > m_scene.m_objectKeypoints.size()) - { - LOGW("[%s] Scene image can't be analyzed (Too few features for recognition).", __FUNCTION__); - return false; - } - - if(!findHomophraphyMatrix(target, params, homophraphyMatrix)) - { - LOGE("[%s] Can't match the features.", __FUNCTION__); - return false; - } - - cv::perspectiveTransform(target.m_boundingContour, contour, homophraphyMatrix); - - if (target.m_boundingContour.size() == NumberOfQuadrangleCorners) - { - if (!isPossibleQuadrangleCorners(contour.data())) - { - LOGI("[%s] Image object isn't recognized.", __FUNCTION__); - contour.clear(); - return false; - } - } - - LOGI("[%s] Image object is recognized.", __FUNCTION__); - return true; + cv::Mat homophraphyMatrix; + + contour.clear(); + + if (MinimumNumberOfFeatures > target.m_objectKeypoints.size()) { + LOGW("[%s] Image object can't be recognized (Recognition rate is too small).", __FUNCTION__); + return false; + } + + if (MinimumNumberOfFeatures > m_scene.m_objectKeypoints.size()) { + LOGW("[%s] Scene image can't be analyzed (Too few features for recognition).", __FUNCTION__); + return false; + } + + if(!findHomophraphyMatrix(target, params, homophraphyMatrix)) { + LOGE("[%s] Can't match the features.", __FUNCTION__); + return false; + } + + cv::perspectiveTransform(target.m_boundingContour, contour, homophraphyMatrix); + + if (target.m_boundingContour.size() == NumberOfQuadrangleCorners) { + if (!isPossibleQuadrangleCorners(contour.data())) { + LOGI("[%s] Image object isn't recognized.", __FUNCTION__); + contour.clear(); + return false; + } + } + + LOGI("[%s] Image object is recognized.", __FUNCTION__); + return true; } bool ImageRecognizer::findHomophraphyMatrix( - const ImageObject& target, - const RecognitionParams& params, - cv::Mat& homophraphyMatrix) const + const ImageObject& target, + const RecognitionParams& params, + cv::Mat& homophraphyMatrix) const { - std::vector matches; - - m_matcher.match(target.m_objectDescriptors, m_scene.m_objectDescriptors, matches); - - size_t matchesNumber = matches.size(); - - if (MinimumNumberOfFeatures > matchesNumber) - { - LOGE("[%s] Can't match the features.", __FUNCTION__); - return false; - } - - size_t requiredMatchesNumber = - params.mRequiredMatchesPart * matchesNumber; - - size_t allowableMatchesNumberError = - params.mAllowableMatchesPartError * requiredMatchesNumber; - - if (matchesNumber - allowableMatchesNumberError > - (size_t)params.mMinMatchesNumber && - requiredMatchesNumber + allowableMatchesNumberError < - matchesNumber) - { - if (requiredMatchesNumber - allowableMatchesNumberError < - (size_t)params.mMinMatchesNumber) - { - if (requiredMatchesNumber + allowableMatchesNumberError > - (size_t)params.mMinMatchesNumber) - { - requiredMatchesNumber = ((size_t)params.mMinMatchesNumber + - requiredMatchesNumber + allowableMatchesNumberError) / 2; - - allowableMatchesNumberError = requiredMatchesNumber- - (size_t)params.mMinMatchesNumber + - allowableMatchesNumberError; - } - else - { - const size_t minimalAllowableMatchesNumberError = 2u; - - requiredMatchesNumber = params.mMinMatchesNumber + - minimalAllowableMatchesNumberError; - - allowableMatchesNumberError = minimalAllowableMatchesNumberError; - } - } - - const size_t filterAmount = matchesSelection(matches, - requiredMatchesNumber, - allowableMatchesNumberError); - - if (filterAmount >= MinimumNumberOfFeatures) - { - matches.resize(filterAmount); - } - else - { - LOGW("[%s] Wrong filtration of feature matches.", __FUNCTION__); - } - - matchesNumber = matches.size(); - } - - std::vector objectPoints(matchesNumber); - std::vector scenePoints(matchesNumber); - - for (size_t matchIdx = 0; matchIdx < matchesNumber; ++matchIdx) - { - objectPoints[matchIdx] = - target.m_objectKeypoints[matches[matchIdx].queryIdx].pt; - - scenePoints[matchIdx] = - m_scene.m_objectKeypoints[matches[matchIdx].trainIdx].pt; - } - - homophraphyMatrix = cv::findHomography(objectPoints, scenePoints, CV_RANSAC); - - return true; + std::vector matches; + + m_matcher.match(target.m_objectDescriptors, m_scene.m_objectDescriptors, matches); + + size_t matchesNumber = matches.size(); + + if (MinimumNumberOfFeatures > matchesNumber) { + LOGE("[%s] Can't match the features.", __FUNCTION__); + return false; + } + + size_t requiredMatchesNumber = + params.mRequiredMatchesPart * matchesNumber; + + size_t allowableMatchesNumberError = + params.mAllowableMatchesPartError * requiredMatchesNumber; + + if ((matchesNumber - allowableMatchesNumberError) > + (size_t)params.mMinMatchesNumber && + (requiredMatchesNumber + allowableMatchesNumberError) < + matchesNumber) { + if ((requiredMatchesNumber - allowableMatchesNumberError) < + (size_t)params.mMinMatchesNumber) { + if ((requiredMatchesNumber + allowableMatchesNumberError) > + (size_t)params.mMinMatchesNumber) { + requiredMatchesNumber = ((size_t)params.mMinMatchesNumber + + requiredMatchesNumber + allowableMatchesNumberError) / 2; + + allowableMatchesNumberError = requiredMatchesNumber- + (size_t)params.mMinMatchesNumber + + allowableMatchesNumberError; + } else { + const size_t minimalAllowableMatchesNumberError = 2u; + + requiredMatchesNumber = params.mMinMatchesNumber + + minimalAllowableMatchesNumberError; + + allowableMatchesNumberError = minimalAllowableMatchesNumberError; + } + } + + const size_t filterAmount = matchesSelection(matches, + requiredMatchesNumber, + allowableMatchesNumberError); + + if (filterAmount >= MinimumNumberOfFeatures) { + matches.resize(filterAmount); + } else { + LOGW("[%s] Wrong filtration of feature matches.", __FUNCTION__); + } + + matchesNumber = matches.size(); + } + + std::vector objectPoints(matchesNumber); + std::vector scenePoints(matchesNumber); + + for (size_t matchIdx = 0; matchIdx < matchesNumber; ++matchIdx) { + objectPoints[matchIdx] = + target.m_objectKeypoints[matches[matchIdx].queryIdx].pt; + + scenePoints[matchIdx] = + m_scene.m_objectKeypoints[matches[matchIdx].trainIdx].pt; + } + + homophraphyMatrix = cv::findHomography(objectPoints, scenePoints, CV_RANSAC); + + return true; } size_t ImageRecognizer::matchesSelection( - std::vector& examples, - unsigned int filterAmount, unsigned int allowableError) const + std::vector& examples, + unsigned int filterAmount, unsigned int allowableError) const { - size_t sizeOfExamples = examples.size(); - - if ((filterAmount + allowableError) > sizeOfExamples) - { - return examples.size(); - } - - int startLeftLimit = 0; - int startRightLimit = sizeOfExamples - 1; - - int leftLimit = startLeftLimit; - int rightLimit = startRightLimit; - - int requiredNumber = filterAmount; - - float supportElement = 0.f; - - while (true) - { - if (leftLimit >= rightLimit) - { - if (leftLimit < (requiredNumber - (int)allowableError)) - { - leftLimit = requiredNumber + (int)allowableError; - } - - break; - } - - supportElement = computeLinearSupportElement(examples, requiredNumber, - leftLimit, rightLimit); - - // Iteration similar quicksort - while (true) - { - // Search the leftmost element which have bigger confidence than support element - while (examples[leftLimit].distance <= supportElement && - leftLimit < startRightLimit) - { - ++leftLimit; - } - - // Search the rightmost element which have smaller confidence than support element - while (examples[rightLimit].distance >= supportElement && - rightLimit >= startLeftLimit) - { - --rightLimit; - } - - if (leftLimit >= rightLimit) - { - break; - } - - // Swap - std::swap(examples[leftLimit], examples[rightLimit]); - } - if (abs(filterAmount - leftLimit) <= (int)allowableError) - { - break; - } - if ((int)filterAmount > leftLimit) - { - requiredNumber -= leftLimit - startLeftLimit; - - rightLimit = startRightLimit; - startLeftLimit = leftLimit; - } - else - { - leftLimit = startLeftLimit; - startRightLimit = rightLimit; - } - } - - return (size_t)leftLimit; + size_t sizeOfExamples = examples.size(); + + if ((filterAmount + allowableError) > sizeOfExamples) { + return examples.size(); + } + + int startLeftLimit = 0; + int startRightLimit = sizeOfExamples - 1; + + int leftLimit = startLeftLimit; + int rightLimit = startRightLimit; + + int requiredNumber = filterAmount; + + float supportElement = 0.f; + + while (true) { + if (leftLimit >= rightLimit) { + if (leftLimit < (requiredNumber - (int)allowableError)) { + leftLimit = requiredNumber + (int)allowableError; + } + + break; + } + + supportElement = computeLinearSupportElement(examples, requiredNumber, + leftLimit, rightLimit); + + /* Iteration similar quicksort */ + while (true) { + /* Search the leftmost element + *which have bigger confidence than support element + */ + while (examples[leftLimit].distance <= supportElement && + leftLimit < startRightLimit) { + ++leftLimit; + } + + /* Search the rightmost element + *which have smaller confidence than support element + */ + while (examples[rightLimit].distance >= supportElement && + rightLimit >= startLeftLimit) { + --rightLimit; + } + + if (leftLimit >= rightLimit) { + break; + } + + /* Swap */ + std::swap(examples[leftLimit], examples[rightLimit]); + } + if (abs(filterAmount - leftLimit) <= (int)allowableError) { + break; + } + if ((int)filterAmount > leftLimit) { + requiredNumber -= leftLimit - startLeftLimit; + + rightLimit = startRightLimit; + startLeftLimit = leftLimit; + } else { + leftLimit = startLeftLimit; + startRightLimit = rightLimit; + } + } + + return (size_t)leftLimit; } float ImageRecognizer::computeLinearSupportElement(const std::vector& examples, - int requiredNumber, int leftLimit, int rightLimit) const + int requiredNumber, int leftLimit, int rightLimit) const { - int sizeOfExamples = rightLimit - leftLimit + 1; - - if (sizeOfExamples <= 1) - { - return examples[leftLimit].distance; - } - - float minValue = examples[leftLimit].distance; - float maxValue = examples[leftLimit].distance; - - // Finding the maximum and minimum values - for (int i = leftLimit + 1; i <= rightLimit; ++i) - { - if (minValue > examples[i].distance) - { - minValue = examples[i].distance; - } - else if (maxValue < examples[i].distance) - { - maxValue = examples[i].distance; - } - } - - // Linear approximation. f(x) = k*x + b - // f(sizeOfExamples) = maxValue; f(1) = minValue; - const float b = (maxValue - minValue * sizeOfExamples) / (1 - sizeOfExamples); - const float k = minValue - b; - - // Calculation of the support element - return k * requiredNumber + b; + int sizeOfExamples = rightLimit - leftLimit + 1; + + if (sizeOfExamples <= 1) { + return examples[leftLimit].distance; + } + + float minValue = examples[leftLimit].distance; + float maxValue = examples[leftLimit].distance; + + /* Finding the maximum and minimum values */ + for (int i = leftLimit + 1; i <= rightLimit; ++i) { + if (minValue > examples[i].distance) { + minValue = examples[i].distance; + } else if (maxValue < examples[i].distance) { + maxValue = examples[i].distance; + } + } + + /* Linear approximation. f(x) = k*x + b + * f(sizeOfExamples) = maxValue; f(1) = minValue; + */ + const float b = (maxValue - minValue * sizeOfExamples) / (1 - sizeOfExamples); + const float k = minValue - b; + + /* Calculation of the support element */ + return k * requiredNumber + b; } bool ImageRecognizer::isPossibleQuadrangleCorners( - const cv::Point2f corners[NumberOfQuadrangleCorners]) + const cv::Point2f corners[NumberOfQuadrangleCorners]) { - static const float Epsilon = cv::TermCriteria::EPS; - static const float MinSizeOfDetectedArea = 30.f; + static const float Epsilon = cv::TermCriteria::EPS; + static const float MinSizeOfDetectedArea = 30.f; - const float firstSemiArea = getTriangleArea(corners[0], corners[2], corners[1]) + - getTriangleArea(corners[0], corners[2], corners[3]); + const float firstSemiArea = getTriangleArea(corners[0], corners[2], corners[1]) + + getTriangleArea(corners[0], corners[2], corners[3]); - const float secondSemiArea = getTriangleArea(corners[1], corners[3], corners[2]) + - getTriangleArea(corners[1], corners[3], corners[0]); + const float secondSemiArea = getTriangleArea(corners[1], corners[3], corners[2]) + + getTriangleArea(corners[1], corners[3], corners[0]); - if (Epsilon < fabs(firstSemiArea - secondSemiArea) || - MinSizeOfDetectedArea > (firstSemiArea + secondSemiArea)) - { - return false; - } + if (Epsilon < fabs(firstSemiArea - secondSemiArea) || + MinSizeOfDetectedArea > (firstSemiArea + secondSemiArea)) { + return false; + } - return true; + return true; } } /* Image */ diff --git a/mv_image/image/src/ImageTracker.cpp b/mv_image/image/src/ImageTracker.cpp index 9c114f5..400205c 100644 --- a/mv_image/image/src/ImageTracker.cpp +++ b/mv_image/image/src/ImageTracker.cpp @@ -25,347 +25,307 @@ #include -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { ImageTracker::ImageTracker(const TrackingParams& trackingParams) : - m_trackingParams(trackingParams) + m_trackingParams(trackingParams) { - ; /* NULL */ + ; /* NULL */ } void ImageTracker::track(const cv::Mat& frame, ImageTrackingModel& target) { - ImageTrackingModel::State currentState = ImageTrackingModel::Undetected; - - while (pthread_mutex_trylock(&target.m_globalGuard) != 0) - { - pthread_spin_lock(&target.m_stateGuard); - currentState = target.m_state; - pthread_spin_unlock(&target.m_stateGuard); - - if (ImageTrackingModel::InProcess == currentState) - { - LOGI("[%s] Calling is skipped. Object is recognizing.", __FUNCTION__); - return; - } - } - - pthread_spin_lock(&target.m_stateGuard); - currentState = target.m_state; - pthread_spin_unlock(&target.m_stateGuard); - - if (ImageTrackingModel::Invalid == currentState) - { - pthread_mutex_unlock(&target.m_globalGuard); - LOGE("[%s] Tracking model is invalid.", __FUNCTION__); - return; - } - - switch (target.m_state) - { - case ImageTrackingModel::Appeared: - case ImageTrackingModel::Tracked: - { - pthread_spin_lock(&target.m_stateGuard); - target.m_state = ImageTrackingModel::InProcess; - pthread_spin_unlock(&target.m_stateGuard); - - trackDetectedObject(frame, target); - break; - } - case ImageTrackingModel::Undetected: - { - pthread_spin_lock(&target.m_stateGuard); - target.m_state = ImageTrackingModel::InProcess; - pthread_spin_unlock(&target.m_stateGuard); - - trackUndetectedObject(frame, target); - - // Recognition thread is started. Don't use target here, just exit! - return; - } - case ImageTrackingModel::InProcess: - default: - { - // Abnormal behaviour: - // tracking model state is InProcess but globalGuard is not locked - LOGE("[%s] Abnormal behaviour. Tracking model status is" - "\"InProgress\" but it is not in progress.", __FUNCTION__); - - pthread_spin_lock(&target.m_stateGuard); - if (target.m_recognitionObject.isEmpty()) - { - target.m_state = ImageTrackingModel::Invalid; - LOGI("[%s] Tracking model status is changed on \"Invalid\"", __FUNCTION__); - } - else - { - target.m_state = ImageTrackingModel::Undetected; - LOGI("[%s] Tracking model status is changed on \"Undetected\"", __FUNCTION__); - } - pthread_spin_unlock(&target.m_stateGuard); - - pthread_mutex_unlock(&target.m_globalGuard); - break; - } - } + ImageTrackingModel::State currentState = ImageTrackingModel::Undetected; + + while (pthread_mutex_trylock(&target.m_globalGuard) != 0) { + pthread_spin_lock(&target.m_stateGuard); + currentState = target.m_state; + pthread_spin_unlock(&target.m_stateGuard); + + if (ImageTrackingModel::InProcess == currentState) { + LOGI("[%s] Calling is skipped. Object is recognizing.", __FUNCTION__); + return; + } + } + + pthread_spin_lock(&target.m_stateGuard); + currentState = target.m_state; + pthread_spin_unlock(&target.m_stateGuard); + + if (ImageTrackingModel::Invalid == currentState) { + pthread_mutex_unlock(&target.m_globalGuard); + LOGE("[%s] Tracking model is invalid.", __FUNCTION__); + return; + } + + switch (target.m_state) { + case ImageTrackingModel::Appeared: + case ImageTrackingModel::Tracked: { + pthread_spin_lock(&target.m_stateGuard); + target.m_state = ImageTrackingModel::InProcess; + pthread_spin_unlock(&target.m_stateGuard); + + trackDetectedObject(frame, target); + break; + } + case ImageTrackingModel::Undetected: { + pthread_spin_lock(&target.m_stateGuard); + target.m_state = ImageTrackingModel::InProcess; + pthread_spin_unlock(&target.m_stateGuard); + + trackUndetectedObject(frame, target); + + /* Recognition thread is started. Don't use target here, just exit! */ + return; + } + case ImageTrackingModel::InProcess: + default: { + /* Abnormal behaviour: + * tracking model state is InProcess but globalGuard is not locked + */ + LOGE("[%s] Abnormal behaviour. Tracking model status is" + "\"InProgress\" but it is not in progress.", __FUNCTION__); + + pthread_spin_lock(&target.m_stateGuard); + if (target.m_recognitionObject.isEmpty()) { + target.m_state = ImageTrackingModel::Invalid; + LOGI("[%s] Tracking model status is changed on \"Invalid\"", __FUNCTION__); + } else { + target.m_state = ImageTrackingModel::Undetected; + LOGI("[%s] Tracking model status is changed on \"Undetected\"", __FUNCTION__); + } + pthread_spin_unlock(&target.m_stateGuard); + + pthread_mutex_unlock(&target.m_globalGuard); + break; + } + } } void ImageTracker::trackDetectedObject( - const cv::Mat& frame, - ImageTrackingModel& target) + const cv::Mat& frame, + ImageTrackingModel& target) { - cv::Rect expectedArea = computeExpectedArea(target, frame.size()); - - std::vector resultContour; - - ImageRecognizer recognizer( - frame(expectedArea), - m_trackingParams.mFramesFeaturesExtractingParams); - - const bool isRecognized = recognizer.recognize( - target.m_recognitionObject, - m_trackingParams.mRecognitionParams, - resultContour); - - if (isRecognized) - { - for (size_t pointIdx = 0; pointIdx < resultContour.size(); ++pointIdx) - { - resultContour[pointIdx].x += expectedArea.x; - resultContour[pointIdx].y += expectedArea.y; - } - - if (m_trackingParams.mStabilizationParams.mHistoryAmount > 0) - { - target.m_stabilizator.stabilize( - resultContour, - m_trackingParams.mStabilizationParams); - } - - target.m_stabilizator.stabilize( - resultContour, - m_trackingParams.mStabilizationParams); - - pthread_spin_lock(&target.m_lastLocationGuard); - target.m_lastLocation = resultContour; - pthread_spin_unlock(&target.m_lastLocationGuard); - - pthread_spin_lock(&target.m_stateGuard); - target.m_state = ImageTrackingModel::Tracked; - pthread_spin_unlock(&target.m_stateGuard); - - LOGI("[%s] Object is successfully tracked.", __FUNCTION__); - } - else - { - target.m_stabilizator.reset(); - - pthread_spin_lock(&target.m_stateGuard); - target.m_state = ImageTrackingModel::Undetected; - pthread_spin_unlock(&target.m_stateGuard); - - LOGI("[%s] Object is lost.", __FUNCTION__); - } - - pthread_mutex_unlock(&target.m_globalGuard); + cv::Rect expectedArea = computeExpectedArea(target, frame.size()); + + std::vector resultContour; + + ImageRecognizer recognizer( + frame(expectedArea), + m_trackingParams.mFramesFeaturesExtractingParams); + + const bool isRecognized = recognizer.recognize( + target.m_recognitionObject, + m_trackingParams.mRecognitionParams, + resultContour); + + if (isRecognized) { + for (size_t pointIdx = 0; pointIdx < resultContour.size(); ++pointIdx) { + resultContour[pointIdx].x += expectedArea.x; + resultContour[pointIdx].y += expectedArea.y; + } + + if (m_trackingParams.mStabilizationParams.mHistoryAmount > 0) { + target.m_stabilizator.stabilize( + resultContour, + m_trackingParams.mStabilizationParams); + } + + target.m_stabilizator.stabilize( + resultContour, + m_trackingParams.mStabilizationParams); + + pthread_spin_lock(&target.m_lastLocationGuard); + target.m_lastLocation = resultContour; + pthread_spin_unlock(&target.m_lastLocationGuard); + + pthread_spin_lock(&target.m_stateGuard); + target.m_state = ImageTrackingModel::Tracked; + pthread_spin_unlock(&target.m_stateGuard); + + LOGI("[%s] Object is successfully tracked.", __FUNCTION__); + } else { + target.m_stabilizator.reset(); + + pthread_spin_lock(&target.m_stateGuard); + target.m_state = ImageTrackingModel::Undetected; + pthread_spin_unlock(&target.m_stateGuard); + + LOGI("[%s] Object is lost.", __FUNCTION__); + } + + pthread_mutex_unlock(&target.m_globalGuard); } void *ImageTracker::recognitionThreadFunc(void *recognitionInfo) { - if (NULL == recognitionInfo) - { - return NULL; - } + if (NULL == recognitionInfo) { + return NULL; + } - RecognitionInfo *recogInfo = (RecognitionInfo*)recognitionInfo; + RecognitionInfo *recogInfo = (RecognitionInfo*)recognitionInfo; - std::vector resultContour; + std::vector resultContour; - ImageRecognizer recognizer( - recogInfo->mFrame, - recogInfo->mSceneFeaturesExtractingParams); + ImageRecognizer recognizer( + recogInfo->mFrame, + recogInfo->mSceneFeaturesExtractingParams); - bool isRecognized = recognizer.recognize( - recogInfo->mpTarget->m_recognitionObject, - recogInfo->mRecognitionParams, - resultContour); + bool isRecognized = recognizer.recognize( + recogInfo->mpTarget->m_recognitionObject, + recogInfo->mRecognitionParams, + resultContour); - if (isRecognized) - { - recogInfo->mpTarget->m_stabilizator.reset(); + if (isRecognized) { + recogInfo->mpTarget->m_stabilizator.reset(); - pthread_spin_lock(&(recogInfo->mpTarget->m_lastLocationGuard)); - recogInfo->mpTarget->m_lastLocation = resultContour; - pthread_spin_unlock(&(recogInfo->mpTarget->m_lastLocationGuard)); + pthread_spin_lock(&(recogInfo->mpTarget->m_lastLocationGuard)); + recogInfo->mpTarget->m_lastLocation = resultContour; + pthread_spin_unlock(&(recogInfo->mpTarget->m_lastLocationGuard)); - pthread_spin_lock(&(recogInfo->mpTarget->m_stateGuard)); - recogInfo->mpTarget->m_state = ImageTrackingModel::Appeared; - pthread_spin_unlock(&(recogInfo->mpTarget->m_stateGuard)); - } - else - { - pthread_spin_lock(&(recogInfo->mpTarget->m_stateGuard)); - recogInfo->mpTarget->m_state = ImageTrackingModel::Undetected; - pthread_spin_unlock(&(recogInfo->mpTarget->m_stateGuard)); - } + pthread_spin_lock(&(recogInfo->mpTarget->m_stateGuard)); + recogInfo->mpTarget->m_state = ImageTrackingModel::Appeared; + pthread_spin_unlock(&(recogInfo->mpTarget->m_stateGuard)); + } else { + pthread_spin_lock(&(recogInfo->mpTarget->m_stateGuard)); + recogInfo->mpTarget->m_state = ImageTrackingModel::Undetected; + pthread_spin_unlock(&(recogInfo->mpTarget->m_stateGuard)); + } - recogInfo->mpTarget->m_recognitionThread = 0; + recogInfo->mpTarget->m_recognitionThread = 0; - pthread_mutex_unlock(&(recogInfo->mpTarget->m_globalGuard)); + pthread_mutex_unlock(&(recogInfo->mpTarget->m_globalGuard)); - delete recogInfo; + delete recogInfo; - return NULL; + return NULL; } void ImageTracker::trackUndetectedObject( - const cv::Mat& frame, - ImageTrackingModel& target) + const cv::Mat& frame, + ImageTrackingModel& target) { - RecognitionInfo *recognitionInfo = new RecognitionInfo; - - recognitionInfo->mFrame = frame.clone(); - recognitionInfo->mpTarget = ⌖ - - recognitionInfo->mRecognitionParams = - m_trackingParams.mRecognitionParams; - recognitionInfo->mSceneFeaturesExtractingParams = - m_trackingParams.mFramesFeaturesExtractingParams; - - if (target.m_recognitionThread) - { - // Abnormal behaviour: - // Recognition thread isn't finished but guardian mutex is unlocked - LOGE("[%s] Abnormal behaviour. Recognition thread isn't finished but" - "guardian mutex is unlocked.", __FUNCTION__); - - LOGI("[%s] Try to wait recognition thread.", __FUNCTION__); - pthread_join(target.m_recognitionThread, NULL); - target.m_recognitionThread = 0; - LOGI("[%s] Recognition thread is finished.", __FUNCTION__); - } - - const int err = pthread_create( - &target.m_recognitionThread, - NULL, - recognitionThreadFunc, - recognitionInfo); - - if (0 == err) - { - LOGI("[%s] Recognition thread is started.", __FUNCTION__); - // Recognition thread is started. Don't use target here, just exit! - return; - } - LOGE("[%s] Recognition thread creation is failed.", __FUNCTION__); - - pthread_spin_lock(&target.m_stateGuard); - if (target.m_recognitionObject.isEmpty()) - { - target.m_state = ImageTrackingModel::Invalid; - LOGI("[%s] Tracking model status is changed on \"Invalid\"", __FUNCTION__); - } - else - { - target.m_state = ImageTrackingModel::Undetected; - LOGI("[%s] Tracking model status is changed on \"Undetected\"", __FUNCTION__); - } - pthread_spin_unlock(&target.m_stateGuard); - - pthread_mutex_unlock(&target.m_globalGuard); + RecognitionInfo *recognitionInfo = new RecognitionInfo; + + recognitionInfo->mFrame = frame.clone(); + recognitionInfo->mpTarget = ⌖ + + recognitionInfo->mRecognitionParams = + m_trackingParams.mRecognitionParams; + recognitionInfo->mSceneFeaturesExtractingParams = + m_trackingParams.mFramesFeaturesExtractingParams; + + if (target.m_recognitionThread) { + /* Abnormal behaviour: + * Recognition thread isn't finished but guardian mutex is unlocked + */ + LOGE("[%s] Abnormal behaviour. Recognition thread isn't finished but" + "guardian mutex is unlocked.", __FUNCTION__); + + LOGI("[%s] Try to wait recognition thread.", __FUNCTION__); + pthread_join(target.m_recognitionThread, NULL); + target.m_recognitionThread = 0; + LOGI("[%s] Recognition thread is finished.", __FUNCTION__); + } + + const int err = pthread_create( + &target.m_recognitionThread, + NULL, + recognitionThreadFunc, + recognitionInfo); + + if (0 == err) { + LOGI("[%s] Recognition thread is started.", __FUNCTION__); + /* Recognition thread is started. Don't use target here, just exit! */ + return; + } + LOGE("[%s] Recognition thread creation is failed.", __FUNCTION__); + + pthread_spin_lock(&target.m_stateGuard); + if (target.m_recognitionObject.isEmpty()) { + target.m_state = ImageTrackingModel::Invalid; + LOGI("[%s] Tracking model status is changed on \"Invalid\"", __FUNCTION__); + } else { + target.m_state = ImageTrackingModel::Undetected; + LOGI("[%s] Tracking model status is changed on \"Undetected\"", __FUNCTION__); + } + pthread_spin_unlock(&target.m_stateGuard); + + pthread_mutex_unlock(&target.m_globalGuard); } cv::Rect ImageTracker::computeExpectedArea( const ImageTrackingModel& target, const cv::Size& frameSize) { - if (target.m_state == ImageTrackingModel::Appeared) - { - LOGI("[%s] Expected area for appeared object is full frame.", __FUNCTION__); - return cv::Rect(0, 0, frameSize.width, frameSize.height); - } - - if (target.m_lastLocation.empty()) - { - LOGW("[%s] Can't compute expected area for object without last" - "location.",__FUNCTION__); - return cv::Rect(0, 0, 0, 0); - } - - cv::Point2f ltCorner(target.m_lastLocation[0]); - cv::Point2f rbCorner(target.m_lastLocation[0]); - - const size_t contourPointsNumber = target.m_lastLocation.size(); - - for (size_t pointNum = 1; pointNum < contourPointsNumber; ++pointNum) - { - if (ltCorner.x > target.m_lastLocation[pointNum].x) - { - ltCorner.x = target.m_lastLocation[pointNum].x; - } - else if (rbCorner.x < target.m_lastLocation[pointNum].x) - { - rbCorner.x = target.m_lastLocation[pointNum].x; - } - - if (ltCorner.y > target.m_lastLocation[pointNum].y) - { - ltCorner.y = target.m_lastLocation[pointNum].y; - } - else if (rbCorner.y < target.m_lastLocation[pointNum].y) - { - rbCorner.y = target.m_lastLocation[pointNum].y; - } - } - - cv::Point2f center( - (ltCorner.x + rbCorner.x) / 2.0f, - (ltCorner.y + rbCorner.y) / 2.0f); - - cv::Size2f halfSize( - (center.x - ltCorner.x) * (1 + m_trackingParams.mExpectedOffset), - (center.y - ltCorner.y) * (1 + m_trackingParams.mExpectedOffset)); - - - cv::Rect expectedArea( - center.x - halfSize.width, center.y - halfSize.height, - halfSize.width * 2, halfSize.height * 2); - - if (expectedArea.x < 0) - { - expectedArea.width += expectedArea.x; - expectedArea.x = 0; - } - - if (expectedArea.y < 0) - { - expectedArea.height += expectedArea.y; - expectedArea.y = 0; - } - - if (expectedArea.x + expectedArea.width > frameSize.width) - { - expectedArea.width = frameSize.width - expectedArea.x; - } - - if (expectedArea.y + expectedArea.height > frameSize.height) - { - expectedArea.height = frameSize.height - expectedArea.y; - } - - if (expectedArea.width <= 0 || expectedArea.height <= 0) - { - expectedArea.x = 0; - expectedArea.y = 0; - expectedArea.width = 0; - expectedArea.height = 0; - } - - return expectedArea; + if (target.m_state == ImageTrackingModel::Appeared) { + LOGI("[%s] Expected area for appeared object is full frame.", __FUNCTION__); + return cv::Rect(0, 0, frameSize.width, frameSize.height); + } + + if (target.m_lastLocation.empty()) { + LOGW("[%s] Can't compute expected area for object without last" + "location.", __FUNCTION__); + return cv::Rect(0, 0, 0, 0); + } + + cv::Point2f ltCorner(target.m_lastLocation[0]); + cv::Point2f rbCorner(target.m_lastLocation[0]); + + const size_t contourPointsNumber = target.m_lastLocation.size(); + + for (size_t pointNum = 1; pointNum < contourPointsNumber; ++pointNum) { + if (ltCorner.x > target.m_lastLocation[pointNum].x) { + ltCorner.x = target.m_lastLocation[pointNum].x; + } else if (rbCorner.x < target.m_lastLocation[pointNum].x) { + rbCorner.x = target.m_lastLocation[pointNum].x; + } + + if (ltCorner.y > target.m_lastLocation[pointNum].y) { + ltCorner.y = target.m_lastLocation[pointNum].y; + } else if (rbCorner.y < target.m_lastLocation[pointNum].y) { + rbCorner.y = target.m_lastLocation[pointNum].y; + } + } + + cv::Point2f center( + (ltCorner.x + rbCorner.x) / 2.0f, + (ltCorner.y + rbCorner.y) / 2.0f); + + cv::Size2f halfSize( + (center.x - ltCorner.x) * (1 + m_trackingParams.mExpectedOffset), + (center.y - ltCorner.y) * (1 + m_trackingParams.mExpectedOffset)); + + cv::Rect expectedArea( + center.x - halfSize.width, center.y - halfSize.height, + halfSize.width * 2, halfSize.height * 2); + + if (expectedArea.x < 0) { + expectedArea.width += expectedArea.x; + expectedArea.x = 0; + } + + if (expectedArea.y < 0) { + expectedArea.height += expectedArea.y; + expectedArea.y = 0; + } + + if (expectedArea.x + expectedArea.width > frameSize.width) { + expectedArea.width = frameSize.width - expectedArea.x; + } + + if (expectedArea.y + expectedArea.height > frameSize.height) { + expectedArea.height = frameSize.height - expectedArea.y; + } + + if (expectedArea.width <= 0 || expectedArea.height <= 0) { + expectedArea.x = 0; + expectedArea.y = 0; + expectedArea.width = 0; + expectedArea.height = 0; + } + + return expectedArea; } } /* Image */ diff --git a/mv_image/image/src/ImageTrackingModel.cpp b/mv_image/image/src/ImageTrackingModel.cpp index e0a75c9..014a629 100644 --- a/mv_image/image/src/ImageTrackingModel.cpp +++ b/mv_image/image/src/ImageTrackingModel.cpp @@ -24,242 +24,227 @@ #include #include -namespace MediaVision -{ -namespace Image -{ - +namespace MediaVision { +namespace Image { ImageTrackingModel::ImageTrackingModel() : - m_recognitionObject(), - m_lastLocation(0), - m_state(Invalid), - m_recognitionThread(0) + m_recognitionObject(), + m_lastLocation(0), + m_state(Invalid), + m_recognitionThread(0) { - pthread_mutex_init(&m_globalGuard, NULL); - pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED); - pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED); + pthread_mutex_init(&m_globalGuard, NULL); + pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED); + pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED); } ImageTrackingModel::ImageTrackingModel(const ImageObject& recognitionObject) : - m_recognitionObject(recognitionObject), - m_lastLocation(0), - m_state(Invalid), - m_recognitionThread(0) + m_recognitionObject(recognitionObject), + m_lastLocation(0), + m_state(Invalid), + m_recognitionThread(0) { - if (!recognitionObject.isEmpty()) - { - m_state = Undetected; - } - pthread_mutex_init(&m_globalGuard, NULL); - pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED); - pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED); + if (!recognitionObject.isEmpty()) { + m_state = Undetected; + } + pthread_mutex_init(&m_globalGuard, NULL); + pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED); + pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED); } ImageTrackingModel::ImageTrackingModel(const ImageTrackingModel& copy) : - m_recognitionThread(0) + m_recognitionThread(0) { - pthread_mutex_init(&m_globalGuard, NULL); - pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED); - pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED); + pthread_mutex_init(&m_globalGuard, NULL); + pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED); + pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED); - *this = copy; + *this = copy; } ImageTrackingModel::~ImageTrackingModel() { - if (m_recognitionThread) - { - pthread_join(m_recognitionThread, NULL); - } - - pthread_mutex_destroy(&m_globalGuard); - pthread_spin_destroy(&m_lastLocationGuard); - pthread_spin_destroy(&m_stateGuard); + if (m_recognitionThread) { + pthread_join(m_recognitionThread, NULL); + } + + pthread_mutex_destroy(&m_globalGuard); + pthread_spin_destroy(&m_lastLocationGuard); + pthread_spin_destroy(&m_stateGuard); } void ImageTrackingModel::setTarget(const ImageObject& target) { - pthread_mutex_lock(&m_globalGuard); + pthread_mutex_lock(&m_globalGuard); - pthread_spin_lock(&m_stateGuard); - m_state = target.isEmpty() ? Invalid : Undetected; - pthread_spin_unlock(&m_stateGuard); + pthread_spin_lock(&m_stateGuard); + m_state = target.isEmpty() ? Invalid : Undetected; + pthread_spin_unlock(&m_stateGuard); - pthread_spin_lock(&m_lastLocationGuard); - m_lastLocation.clear(); - pthread_spin_unlock(&m_lastLocationGuard); + pthread_spin_lock(&m_lastLocationGuard); + m_lastLocation.clear(); + pthread_spin_unlock(&m_lastLocationGuard); - LOGI("[%s] Target is set into tracking model.", __FUNCTION__); + LOGI("[%s] Target is set into tracking model.", __FUNCTION__); - m_recognitionObject = target; + m_recognitionObject = target; - pthread_mutex_unlock(&m_globalGuard); + pthread_mutex_unlock(&m_globalGuard); } void ImageTrackingModel::refresh(void) { - pthread_mutex_lock(&m_globalGuard); + pthread_mutex_lock(&m_globalGuard); - pthread_spin_lock(&m_stateGuard); - m_state = m_recognitionObject.isEmpty() ? Invalid : Undetected; - pthread_spin_unlock(&m_stateGuard); + pthread_spin_lock(&m_stateGuard); + m_state = m_recognitionObject.isEmpty() ? Invalid : Undetected; + pthread_spin_unlock(&m_stateGuard); - pthread_spin_lock(&m_lastLocationGuard); - m_lastLocation.clear(); - pthread_spin_unlock(&m_lastLocationGuard); + pthread_spin_lock(&m_lastLocationGuard); + m_lastLocation.clear(); + pthread_spin_unlock(&m_lastLocationGuard); - LOGI("[%s] Image tracking model is refreshed.", __FUNCTION__); + LOGI("[%s] Image tracking model is refreshed.", __FUNCTION__); - pthread_mutex_unlock(&m_globalGuard); + pthread_mutex_unlock(&m_globalGuard); } bool ImageTrackingModel::isValid() const { - bool result = false; + bool result = false; - pthread_spin_lock(&m_stateGuard); - result = (m_state != Invalid); - pthread_spin_unlock(&m_stateGuard); + pthread_spin_lock(&m_stateGuard); + result = (m_state != Invalid); + pthread_spin_unlock(&m_stateGuard); - return result; + return result; } ImageTrackingModel& ImageTrackingModel::operator=(const ImageTrackingModel& copy) { - if (this != ©) - { - pthread_mutex_t *higherMutex = &m_globalGuard; - pthread_mutex_t *lowerMutex = ©.m_globalGuard; - - if (higherMutex < lowerMutex) - { - std::swap(higherMutex, lowerMutex); - } - - pthread_mutex_lock(higherMutex); - pthread_mutex_lock(lowerMutex); - - m_recognitionObject = copy.m_recognitionObject; - - pthread_spin_lock(&m_lastLocationGuard); - m_lastLocation = copy.m_lastLocation; - pthread_spin_unlock(&m_lastLocationGuard); - - if (copy.m_state == InProcess) - { - pthread_spin_lock(&m_stateGuard); - m_state = m_recognitionObject.isEmpty() ? Invalid : Undetected; - pthread_spin_unlock(&m_stateGuard); - } - else - { - pthread_spin_lock(&m_stateGuard); - m_state = copy.m_state; - pthread_spin_unlock(&m_stateGuard); - } - - pthread_mutex_unlock(lowerMutex); - pthread_mutex_unlock(higherMutex); - } - - return *this; + if (this != ©) { + pthread_mutex_t *higherMutex = &m_globalGuard; + pthread_mutex_t *lowerMutex = ©.m_globalGuard; + + if (higherMutex < lowerMutex) { + std::swap(higherMutex, lowerMutex); + } + + pthread_mutex_lock(higherMutex); + pthread_mutex_lock(lowerMutex); + + m_recognitionObject = copy.m_recognitionObject; + + pthread_spin_lock(&m_lastLocationGuard); + m_lastLocation = copy.m_lastLocation; + pthread_spin_unlock(&m_lastLocationGuard); + + if (copy.m_state == InProcess) { + pthread_spin_lock(&m_stateGuard); + m_state = m_recognitionObject.isEmpty() ? Invalid : Undetected; + pthread_spin_unlock(&m_stateGuard); + } else { + pthread_spin_lock(&m_stateGuard); + m_state = copy.m_state; + pthread_spin_unlock(&m_stateGuard); + } + + pthread_mutex_unlock(lowerMutex); + pthread_mutex_unlock(higherMutex); + } + + return *this; } int ImageTrackingModel::save(const char *fileName) const { - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); + std::string prefix_path = std::string(app_get_data_path()); + LOGD("prefix_path: %s", prefix_path.c_str()); - std::string filePath; - filePath += prefix_path; - filePath += fileName; + std::string filePath; + filePath += prefix_path; + filePath += fileName; - /* check the directory is available */ - std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/')); - if (access(prefix_path_check.c_str(),F_OK)) - { - LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefix_path_check.c_str()); + /* check the directory is available */ + std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/')); + if (access(prefix_path_check.c_str(), F_OK)) { + LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefix_path_check.c_str()); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + return MEDIA_VISION_ERROR_INVALID_PATH; + } - std::ofstream out; - out.open(filePath.c_str()); + std::ofstream out; + out.open(filePath.c_str()); - if (!out.is_open()) - { - LOGE("[%s] Can't create/open file.", __FUNCTION__); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } + if (!out.is_open()) { + LOGE("[%s] Can't create/open file.", __FUNCTION__); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } - out<<(*this); + out << (*this); - out.close(); - LOGI("[%s] Image tracking model is saved.", __FUNCTION__); + out.close(); + LOGI("[%s] Image tracking model is saved.", __FUNCTION__); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int ImageTrackingModel::load(const char *fileName) { - /* find directory */ - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); + /* find directory */ + std::string prefix_path = std::string(app_get_data_path()); + LOGD("prefix_path: %s", prefix_path.c_str()); - std::string filePath; - filePath += prefix_path; - filePath += fileName; + std::string filePath; + filePath += prefix_path; + filePath += fileName; - if (access(filePath.c_str(),F_OK)) - { - LOGE("Can't load tracking model. Path[%s] doesn't existed.", filePath.c_str()); + if (access(filePath.c_str(), F_OK)) { + LOGE("Can't load tracking model. Path[%s] doesn't existed.", filePath.c_str()); - return MEDIA_VISION_ERROR_INVALID_PATH; - } + return MEDIA_VISION_ERROR_INVALID_PATH; + } - std::ifstream in; - in.open(filePath.c_str()); + std::ifstream in; + in.open(filePath.c_str()); - if (!in.is_open()) - { - LOGE("[%s] Can't open file.", __FUNCTION__); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } + if (!in.is_open()) { + LOGE("[%s] Can't open file.", __FUNCTION__); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } - in>>(*this); + in >> (*this); - if (!in.good()) - { - LOGE("[%s] Unexpected end of file.", __FUNCTION__); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } + if (!in.good()) { + LOGE("[%s] Unexpected end of file.", __FUNCTION__); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } - in.close(); - LOGI("[%s] Image tracking model is loaded.", __FUNCTION__); + in.close(); + LOGI("[%s] Image tracking model is loaded.", __FUNCTION__); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } bool ImageTrackingModel::isDetected() const { - bool result = false; + bool result = false; - pthread_spin_lock(&m_stateGuard); - result = (m_state == Tracked); - pthread_spin_unlock(&m_stateGuard); + pthread_spin_lock(&m_stateGuard); + result = (m_state == Tracked); + pthread_spin_unlock(&m_stateGuard); - return result; + return result; } std::vector ImageTrackingModel::getLastlocation() const { - std::vector result; + std::vector result; - pthread_spin_lock(&m_lastLocationGuard); - result = m_lastLocation; - pthread_spin_unlock(&m_lastLocationGuard); + pthread_spin_lock(&m_lastLocationGuard); + result = m_lastLocation; + pthread_spin_unlock(&m_lastLocationGuard); - return result; + return result; } #define STATE_UNSEEN_IO_ID 0 @@ -267,34 +252,28 @@ std::vector ImageTrackingModel::getLastlocation() const std::ostream& operator << (std::ostream& os, const ImageTrackingModel::State& state) { - if (ImageTrackingModel::Tracked == state) - { - os<> (std::istream& is, ImageTrackingModel::State& state) { - int stateId = -1; + int stateId = -1; - is>>stateId; + is >> stateId; - if (STATE_VISIBLE_IO_ID == stateId) - { - state = ImageTrackingModel::Tracked; - } - else - { - state = ImageTrackingModel::Undetected; - } + if (STATE_VISIBLE_IO_ID == stateId) { + state = ImageTrackingModel::Tracked; + } else { + state = ImageTrackingModel::Undetected; + } - return is; + return is; } #undef STATE_UNSEEN_IO_ID @@ -302,63 +281,59 @@ std::istream& operator >> (std::istream& is, ImageTrackingModel::State& state) std::ostream& operator << (std::ostream& os, const ImageTrackingModel& obj) { - os<> (std::istream& is, ImageTrackingModel& obj) { #define MEDIA_VISION_CHECK_IFSTREAM \ - if (!is.good()) \ - { \ - return is; \ - } + if (!is.good()) { \ + return is; \ + } - ImageTrackingModel temporal; + ImageTrackingModel temporal; - is>>obj.m_recognitionObject; - MEDIA_VISION_CHECK_IFSTREAM + is >> obj.m_recognitionObject; + MEDIA_VISION_CHECK_IFSTREAM - size_t lastLocationAmount = 0u; - is>>lastLocationAmount; - MEDIA_VISION_CHECK_IFSTREAM + size_t lastLocationAmount = 0u; + is >> lastLocationAmount; + MEDIA_VISION_CHECK_IFSTREAM - temporal.m_lastLocation.resize(lastLocationAmount); - for (size_t pointNum = 0u; pointNum < lastLocationAmount; ++pointNum) - { - is>>temporal.m_lastLocation[pointNum].x; - MEDIA_VISION_CHECK_IFSTREAM - is>>temporal.m_lastLocation[pointNum].y; - MEDIA_VISION_CHECK_IFSTREAM - } + temporal.m_lastLocation.resize(lastLocationAmount); + for (size_t pointNum = 0u; pointNum < lastLocationAmount; ++pointNum) { + is >> temporal.m_lastLocation[pointNum].x; + MEDIA_VISION_CHECK_IFSTREAM + is >> temporal.m_lastLocation[pointNum].y; + MEDIA_VISION_CHECK_IFSTREAM + } - is>>temporal.m_state; - MEDIA_VISION_CHECK_IFSTREAM + is >> temporal.m_state; + MEDIA_VISION_CHECK_IFSTREAM - if (temporal.m_recognitionObject.isEmpty()) - { - temporal.m_state = ImageTrackingModel::Invalid; - } + if (temporal.m_recognitionObject.isEmpty()) { + temporal.m_state = ImageTrackingModel::Invalid; + } - obj = temporal; + obj = temporal; - return is; + return is; } } /* Image */ diff --git a/mv_image/image/src/mv_image_open.cpp b/mv_image/image/src/mv_image_open.cpp index 1d351a4..8c81168 100644 --- a/mv_image/image/src/mv_image_open.cpp +++ b/mv_image/image/src/mv_image_open.cpp @@ -26,759 +26,697 @@ #include -namespace -{ - +namespace { const MediaVision::Image::FeaturesExtractingParams - defaultObjectFeaturesExtractingParams(1.2, 1000); + defaultObjectFeaturesExtractingParams(1.2, 1000); const MediaVision::Image::FeaturesExtractingParams - defaultSceneFeaturesExtractingParams(1.2, 5000); + defaultSceneFeaturesExtractingParams(1.2, 5000); const MediaVision::Image::RecognitionParams - defaultRecognitionParams(15, 0.33, 0.1); + defaultRecognitionParams(15, 0.33, 0.1); const MediaVision::Image::StabilizationParams - defaultStabilizationParams(3, 0.006, 2, 0.001); + defaultStabilizationParams(3, 0.006, 2, 0.001); const MediaVision::Image::TrackingParams - defaultTrackingParams( - defaultSceneFeaturesExtractingParams, - defaultRecognitionParams, - defaultStabilizationParams, - 0.0); + defaultTrackingParams( + defaultSceneFeaturesExtractingParams, + defaultRecognitionParams, + defaultStabilizationParams, + 0.0); void extractTargetFeaturesExtractingParams( - mv_engine_config_h engine_cfg, - MediaVision::Image::FeaturesExtractingParams& featuresExtractingParams) + mv_engine_config_h engine_cfg, + MediaVision::Image::FeaturesExtractingParams& featuresExtractingParams) { - mv_engine_config_h working_cfg = NULL; - - if (NULL == engine_cfg) - { - mv_create_engine_config(&working_cfg); - } - else - { - working_cfg = engine_cfg; - } - - featuresExtractingParams = defaultObjectFeaturesExtractingParams; - - mv_engine_config_get_double_attribute_c( - working_cfg, - "MV_IMAGE_RECOGNITION_OBJECT_SCALE_FACTOR", - &featuresExtractingParams.mScaleFactor); - - mv_engine_config_get_int_attribute_c( - working_cfg, - "MV_IMAGE_RECOGNITION_OBJECT_MAX_KEYPOINTS_NUM", - &featuresExtractingParams.mMaximumFeaturesNumber); - - if (NULL == engine_cfg) - { - mv_destroy_engine_config(working_cfg); - } + mv_engine_config_h working_cfg = NULL; + + if (NULL == engine_cfg) { + mv_create_engine_config(&working_cfg); + } else { + working_cfg = engine_cfg; + } + + featuresExtractingParams = defaultObjectFeaturesExtractingParams; + + mv_engine_config_get_double_attribute_c( + working_cfg, + "MV_IMAGE_RECOGNITION_OBJECT_SCALE_FACTOR", + &featuresExtractingParams.mScaleFactor); + + mv_engine_config_get_int_attribute_c( + working_cfg, + "MV_IMAGE_RECOGNITION_OBJECT_MAX_KEYPOINTS_NUM", + &featuresExtractingParams.mMaximumFeaturesNumber); + + if (NULL == engine_cfg) { + mv_destroy_engine_config(working_cfg); + } } void extractSceneFeaturesExtractingParams( - mv_engine_config_h engine_cfg, - MediaVision::Image::FeaturesExtractingParams& featuresExtractingParams) + mv_engine_config_h engine_cfg, + MediaVision::Image::FeaturesExtractingParams& featuresExtractingParams) { - mv_engine_config_h working_cfg = NULL; - - if (NULL == engine_cfg) - { - mv_create_engine_config(&working_cfg); - } - else - { - working_cfg = engine_cfg; - } - - featuresExtractingParams = defaultSceneFeaturesExtractingParams; - - mv_engine_config_get_double_attribute_c( - working_cfg, - "MV_IMAGE_RECOGNITION_SCENE_SCALE_FACTOR", - &featuresExtractingParams.mScaleFactor); - - mv_engine_config_get_int_attribute_c( - working_cfg, - "MV_IMAGE_RECOGNITION_SCENE_MAX_KEYPOINTS_NUM", - &featuresExtractingParams.mMaximumFeaturesNumber); - - if (NULL == engine_cfg) - { - mv_destroy_engine_config(working_cfg); - } + mv_engine_config_h working_cfg = NULL; + + if (NULL == engine_cfg) { + mv_create_engine_config(&working_cfg); + } else { + working_cfg = engine_cfg; + } + + featuresExtractingParams = defaultSceneFeaturesExtractingParams; + + mv_engine_config_get_double_attribute_c( + working_cfg, + "MV_IMAGE_RECOGNITION_SCENE_SCALE_FACTOR", + &featuresExtractingParams.mScaleFactor); + + mv_engine_config_get_int_attribute_c( + working_cfg, + "MV_IMAGE_RECOGNITION_SCENE_MAX_KEYPOINTS_NUM", + &featuresExtractingParams.mMaximumFeaturesNumber); + + if (NULL == engine_cfg) { + mv_destroy_engine_config(working_cfg); + } } void extractRecognitionParams( - mv_engine_config_h engine_cfg, - MediaVision::Image::RecognitionParams& recognitionParams) + mv_engine_config_h engine_cfg, + MediaVision::Image::RecognitionParams& recognitionParams) { - mv_engine_config_h working_cfg = NULL; - - if (NULL == engine_cfg) - { - mv_create_engine_config(&working_cfg); - } - else - { - working_cfg = engine_cfg; - } - - recognitionParams = defaultRecognitionParams; - - mv_engine_config_get_int_attribute_c( - working_cfg, - "MV_IMAGE_RECOGNITION_MIN_MATCH_NUM", - &recognitionParams.mMinMatchesNumber); - - mv_engine_config_get_double_attribute_c( - working_cfg, - "MV_IMAGE_RECOGNITION_REQ_MATCH_PART", - &recognitionParams.mRequiredMatchesPart); - - mv_engine_config_get_double_attribute_c( - working_cfg, - "MV_IMAGE_RECOGNITION_TOLERANT_MATCH_PART_ERR", - &recognitionParams.mAllowableMatchesPartError); - - if (NULL == engine_cfg) - { - mv_destroy_engine_config(working_cfg); - } + mv_engine_config_h working_cfg = NULL; + + if (NULL == engine_cfg) { + mv_create_engine_config(&working_cfg); + } else { + working_cfg = engine_cfg; + } + + recognitionParams = defaultRecognitionParams; + + mv_engine_config_get_int_attribute_c( + working_cfg, + "MV_IMAGE_RECOGNITION_MIN_MATCH_NUM", + &recognitionParams.mMinMatchesNumber); + + mv_engine_config_get_double_attribute_c( + working_cfg, + "MV_IMAGE_RECOGNITION_REQ_MATCH_PART", + &recognitionParams.mRequiredMatchesPart); + + mv_engine_config_get_double_attribute_c( + working_cfg, + "MV_IMAGE_RECOGNITION_TOLERANT_MATCH_PART_ERR", + &recognitionParams.mAllowableMatchesPartError); + + if (NULL == engine_cfg) { + mv_destroy_engine_config(working_cfg); + } } void extractStabilizationParams( - mv_engine_config_h engine_cfg, - MediaVision::Image::StabilizationParams& stabilizationParams) + mv_engine_config_h engine_cfg, + MediaVision::Image::StabilizationParams& stabilizationParams) { - mv_engine_config_h working_cfg = NULL; - - if (NULL == engine_cfg) - { - mv_create_engine_config(&working_cfg); - } - else - { - working_cfg = engine_cfg; - } - - stabilizationParams = defaultStabilizationParams; - - bool useStabilization = true; - mv_engine_config_get_bool_attribute_c( - working_cfg, - "MV_IMAGE_TRACKING_USE_STABLIZATION", - &useStabilization); - - if (!useStabilization) - { - stabilizationParams.mHistoryAmount = 0; - if (NULL == engine_cfg) - { - mv_destroy_engine_config(working_cfg); - } - return; - } - - mv_engine_config_get_int_attribute_c( - working_cfg, - "MV_IMAGE_TRACKING_HISTORY_AMOUNT", - &stabilizationParams.mHistoryAmount); - - mv_engine_config_get_double_attribute_c( - working_cfg, - "MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT", - &stabilizationParams.mAllowableShift); - - mv_engine_config_get_double_attribute_c( - working_cfg, - "MV_IMAGE_TRACKING_STABLIZATION_SPEED", - &stabilizationParams.mStabilizationSpeed); - - mv_engine_config_get_double_attribute_c( - working_cfg, - "MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION", - &stabilizationParams.mStabilizationAcceleration); - - if (NULL == engine_cfg) - { - mv_destroy_engine_config(working_cfg); - } + mv_engine_config_h working_cfg = NULL; + + if (NULL == engine_cfg) { + mv_create_engine_config(&working_cfg); + } else { + working_cfg = engine_cfg; + } + + stabilizationParams = defaultStabilizationParams; + + bool useStabilization = true; + mv_engine_config_get_bool_attribute_c( + working_cfg, + "MV_IMAGE_TRACKING_USE_STABLIZATION", + &useStabilization); + + if (!useStabilization) { + stabilizationParams.mHistoryAmount = 0; + if (NULL == engine_cfg) { + mv_destroy_engine_config(working_cfg); + } + return; + } + + mv_engine_config_get_int_attribute_c( + working_cfg, + "MV_IMAGE_TRACKING_HISTORY_AMOUNT", + &stabilizationParams.mHistoryAmount); + + mv_engine_config_get_double_attribute_c( + working_cfg, + "MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT", + &stabilizationParams.mAllowableShift); + + mv_engine_config_get_double_attribute_c( + working_cfg, + "MV_IMAGE_TRACKING_STABLIZATION_SPEED", + &stabilizationParams.mStabilizationSpeed); + + mv_engine_config_get_double_attribute_c( + working_cfg, + "MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION", + &stabilizationParams.mStabilizationAcceleration); + + if (NULL == engine_cfg) { + mv_destroy_engine_config(working_cfg); + } } void extractTrackingParams( - mv_engine_config_h engine_cfg, - MediaVision::Image::TrackingParams& trackingParams) + mv_engine_config_h engine_cfg, + MediaVision::Image::TrackingParams& trackingParams) { - mv_engine_config_h working_cfg = NULL; - - if (NULL == engine_cfg) - { - mv_create_engine_config(&working_cfg); - } - else - { - working_cfg = engine_cfg; - } - - trackingParams = defaultTrackingParams; - - extractSceneFeaturesExtractingParams( - working_cfg, - trackingParams.mFramesFeaturesExtractingParams); - - extractRecognitionParams( - working_cfg, - trackingParams.mRecognitionParams); - - extractStabilizationParams( - working_cfg, - trackingParams.mStabilizationParams); - - mv_engine_config_get_double_attribute_c( - working_cfg, - "MV_IMAGE_TRACKING_EXPECTED_OFFSET", - &trackingParams.mExpectedOffset); - - if (NULL == engine_cfg) - { - mv_destroy_engine_config(working_cfg); - } + mv_engine_config_h working_cfg = NULL; + + if (NULL == engine_cfg) { + mv_create_engine_config(&working_cfg); + } else { + working_cfg = engine_cfg; + } + + trackingParams = defaultTrackingParams; + + extractSceneFeaturesExtractingParams( + working_cfg, + trackingParams.mFramesFeaturesExtractingParams); + + extractRecognitionParams( + working_cfg, + trackingParams.mRecognitionParams); + + extractStabilizationParams( + working_cfg, + trackingParams.mStabilizationParams); + + mv_engine_config_get_double_attribute_c( + working_cfg, + "MV_IMAGE_TRACKING_EXPECTED_OFFSET", + &trackingParams.mExpectedOffset); + + if (NULL == engine_cfg) { + mv_destroy_engine_config(working_cfg); + } } int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource) { - MEDIA_VISION_INSTANCE_CHECK(mvSource); - - int depth = CV_8U; // Default depth. 1 byte for channel. - unsigned int channelsNumber = 0u; - unsigned int width = 0u, height = 0u; - unsigned int bufferSize = 0u; - unsigned char *buffer = NULL; - - mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; - - MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width), - "Failed to get the width."); - MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height), - "Failed to get the height."); - MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace), - "Failed to get the colorspace."); - MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize), - "Failed to get the buffer size."); - - int conversionType = -1; // Type of conversion from given colorspace to gray - switch(colorspace) - { - case MEDIA_VISION_COLORSPACE_INVALID: - LOGE("Error: mv_source has invalid colorspace."); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - case MEDIA_VISION_COLORSPACE_Y800: - channelsNumber = 1; - // Without convertion - break; - case MEDIA_VISION_COLORSPACE_I420: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_I420; - break; - case MEDIA_VISION_COLORSPACE_NV12: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_NV12; - break; - case MEDIA_VISION_COLORSPACE_YV12: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_YV12; - break; - case MEDIA_VISION_COLORSPACE_NV21: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_NV21; - break; - case MEDIA_VISION_COLORSPACE_YUYV: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_YUYV; - break; - case MEDIA_VISION_COLORSPACE_UYVY: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_UYVY; - break; - case MEDIA_VISION_COLORSPACE_422P: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_Y422; - break; - case MEDIA_VISION_COLORSPACE_RGB565: - channelsNumber = 2; - conversionType = CV_BGR5652GRAY; - break; - case MEDIA_VISION_COLORSPACE_RGB888: - channelsNumber = 3; - conversionType = CV_RGB2GRAY; - break; - case MEDIA_VISION_COLORSPACE_RGBA: - channelsNumber = 4; - conversionType = CV_RGBA2GRAY; - break; - default: - LOGE("Error: mv_source has unsupported colorspace."); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - - if (conversionType == -1) // Without conversion - { - cvSource = cv::Mat(cv::Size(width, height), - CV_MAKETYPE(depth, channelsNumber), buffer).clone(); - } - else // Conversion - { - // Class for representation the given image as cv::Mat before conversion - cv::Mat origin(cv::Size(width, height), - CV_MAKETYPE(depth, channelsNumber), buffer); - cv::cvtColor(origin, cvSource, conversionType); - } - - return MEDIA_VISION_ERROR_NONE; + MEDIA_VISION_INSTANCE_CHECK(mvSource); + + int depth = CV_8U; // Default depth. 1 byte for channel. + unsigned int channelsNumber = 0u; + unsigned int width = 0u, height = 0u; + unsigned int bufferSize = 0u; + unsigned char *buffer = NULL; + + mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; + + MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width), + "Failed to get the width."); + MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height), + "Failed to get the height."); + MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace), + "Failed to get the colorspace."); + MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize), + "Failed to get the buffer size."); + + int conversionType = -1; /* Type of conversion from given colorspace to gray */ + switch(colorspace) { + case MEDIA_VISION_COLORSPACE_INVALID: + LOGE("Error: mv_source has invalid colorspace."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + case MEDIA_VISION_COLORSPACE_Y800: + channelsNumber = 1; + /* Without convertion */ + break; + case MEDIA_VISION_COLORSPACE_I420: + channelsNumber = 1; + height *= 1.5; + conversionType = CV_YUV2GRAY_I420; + break; + case MEDIA_VISION_COLORSPACE_NV12: + channelsNumber = 1; + height *= 1.5; + conversionType = CV_YUV2GRAY_NV12; + break; + case MEDIA_VISION_COLORSPACE_YV12: + channelsNumber = 1; + height *= 1.5; + conversionType = CV_YUV2GRAY_YV12; + break; + case MEDIA_VISION_COLORSPACE_NV21: + channelsNumber = 1; + height *= 1.5; + conversionType = CV_YUV2GRAY_NV21; + break; + case MEDIA_VISION_COLORSPACE_YUYV: + channelsNumber = 2; + conversionType = CV_YUV2GRAY_YUYV; + break; + case MEDIA_VISION_COLORSPACE_UYVY: + channelsNumber = 2; + conversionType = CV_YUV2GRAY_UYVY; + break; + case MEDIA_VISION_COLORSPACE_422P: + channelsNumber = 2; + conversionType = CV_YUV2GRAY_Y422; + break; + case MEDIA_VISION_COLORSPACE_RGB565: + channelsNumber = 2; + conversionType = CV_BGR5652GRAY; + break; + case MEDIA_VISION_COLORSPACE_RGB888: + channelsNumber = 3; + conversionType = CV_RGB2GRAY; + break; + case MEDIA_VISION_COLORSPACE_RGBA: + channelsNumber = 4; + conversionType = CV_RGBA2GRAY; + break; + default: + LOGE("Error: mv_source has unsupported colorspace."); + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } + + if (conversionType == -1) {/* Without conversion */ + cvSource = cv::Mat(cv::Size(width, height), + CV_MAKETYPE(depth, channelsNumber), buffer).clone(); + } else {/* With conversion */ + /* Class for representation the given image as cv::Mat before conversion */ + cv::Mat origin(cv::Size(width, height), + CV_MAKETYPE(depth, channelsNumber), buffer); + cv::cvtColor(origin, cvSource, conversionType); + } + + return MEDIA_VISION_ERROR_NONE; } } /* anonymous namespace */ int mv_image_recognize_open( - mv_source_h source, - const mv_image_object_h *image_objects, - int number_of_objects, - mv_engine_config_h engine_cfg, - mv_image_recognized_cb recognized_cb, - void *user_data) + mv_source_h source, + const mv_image_object_h *image_objects, + int number_of_objects, + mv_engine_config_h engine_cfg, + mv_image_recognized_cb recognized_cb, + void *user_data) { - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_NULL_ARG_CHECK(image_objects); - for (int objectNum = 0; objectNum < number_of_objects; ++objectNum) - { - MEDIA_VISION_INSTANCE_CHECK(image_objects[objectNum]); - } - MEDIA_VISION_NULL_ARG_CHECK(recognized_cb); - - cv::Mat scene; - MEDIA_VISION_ASSERT( - convertSourceMV2GrayCV(source, scene), - "Failed to convert mv_source."); - - MediaVision::Image::FeaturesExtractingParams featuresExtractingParams; - extractSceneFeaturesExtractingParams(engine_cfg, featuresExtractingParams); - - MediaVision::Image::RecognitionParams recognitionParams; - extractRecognitionParams(engine_cfg, recognitionParams); - - MediaVision::Image::ImageRecognizer recognizer(scene, - featuresExtractingParams); - - mv_quadrangle_s *resultLocations[number_of_objects]; - - for (int objectNum = 0; objectNum < number_of_objects; ++objectNum) - { - std::vector resultContour; - bool isRecognized = recognizer.recognize( - *((MediaVision::Image::ImageObject*)image_objects[objectNum]), - recognitionParams, resultContour); - if (isRecognized && (resultContour.size() == - MediaVision::Image::NumberOfQuadrangleCorners)) - { - resultLocations[objectNum] = new mv_quadrangle_s; - for (size_t pointNum = 0u; - pointNum < MediaVision::Image::NumberOfQuadrangleCorners; - ++pointNum) - { - resultLocations[objectNum]->points[pointNum].x = - resultContour[pointNum].x; - resultLocations[objectNum]->points[pointNum].y = - resultContour[pointNum].y; - } - } - else - { - resultLocations[objectNum] = NULL; - } - } - - recognized_cb( - source, - engine_cfg, - image_objects, - resultLocations, - number_of_objects, - user_data); - - for (int objectNum = 0; objectNum < number_of_objects; ++objectNum) - { - if (resultLocations[objectNum] != NULL) - { - delete resultLocations[objectNum]; - resultLocations[objectNum] = NULL; - } - } - - return MEDIA_VISION_ERROR_NONE; + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_NULL_ARG_CHECK(image_objects); + for (int objectNum = 0; objectNum < number_of_objects; ++objectNum) { + MEDIA_VISION_INSTANCE_CHECK(image_objects[objectNum]); + } + MEDIA_VISION_NULL_ARG_CHECK(recognized_cb); + + cv::Mat scene; + MEDIA_VISION_ASSERT( + convertSourceMV2GrayCV(source, scene), + "Failed to convert mv_source."); + + MediaVision::Image::FeaturesExtractingParams featuresExtractingParams; + extractSceneFeaturesExtractingParams(engine_cfg, featuresExtractingParams); + + MediaVision::Image::RecognitionParams recognitionParams; + extractRecognitionParams(engine_cfg, recognitionParams); + + MediaVision::Image::ImageRecognizer recognizer(scene, + featuresExtractingParams); + + mv_quadrangle_s *resultLocations[number_of_objects]; + + for (int objectNum = 0; objectNum < number_of_objects; ++objectNum) { + std::vector resultContour; + bool isRecognized = recognizer.recognize( + *((MediaVision::Image::ImageObject*)image_objects[objectNum]), + recognitionParams, resultContour); + if (isRecognized && (resultContour.size() == + MediaVision::Image::NumberOfQuadrangleCorners)) { + resultLocations[objectNum] = new mv_quadrangle_s; + for (size_t pointNum = 0u; + pointNum < MediaVision::Image::NumberOfQuadrangleCorners; + ++pointNum) { + resultLocations[objectNum]->points[pointNum].x = + resultContour[pointNum].x; + resultLocations[objectNum]->points[pointNum].y = + resultContour[pointNum].y; + } + } else { + resultLocations[objectNum] = NULL; + } + } + + recognized_cb( + source, + engine_cfg, + image_objects, + resultLocations, + number_of_objects, + user_data); + + for (int objectNum = 0; objectNum < number_of_objects; ++objectNum) { + if (resultLocations[objectNum] != NULL) { + delete resultLocations[objectNum]; + resultLocations[objectNum] = NULL; + } + } + + return MEDIA_VISION_ERROR_NONE; } int mv_image_track_open( - mv_source_h source, - mv_image_tracking_model_h image_tracking_model, - mv_engine_config_h engine_cfg, - mv_image_tracked_cb tracked_cb, - void *user_data) + mv_source_h source, + mv_image_tracking_model_h image_tracking_model, + mv_engine_config_h engine_cfg, + mv_image_tracked_cb tracked_cb, + void *user_data) { - MEDIA_VISION_INSTANCE_CHECK(source); - MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); - MEDIA_VISION_NULL_ARG_CHECK(tracked_cb); - - if (!((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->isValid()) - { - LOGE("[%s] Image tracking model is invalid.", __FUNCTION__); - return MEDIA_VISION_ERROR_INVALID_DATA; - } - - MediaVision::Image::TrackingParams trackingParams; - extractTrackingParams(engine_cfg, trackingParams); - - cv::Mat frame; - MEDIA_VISION_ASSERT( - convertSourceMV2GrayCV(source, frame), - "Failed to convert mv_source."); - - MediaVision::Image::ImageTracker tracker(trackingParams); - - MediaVision::Image::ImageTrackingModel *trackingModel = - (MediaVision::Image::ImageTrackingModel*)image_tracking_model; - - tracker.track(frame, *trackingModel); - - std::vector resultContour = trackingModel->getLastlocation(); - - if (trackingModel->isDetected() && - MediaVision::Image::NumberOfQuadrangleCorners == resultContour.size()) - { - mv_quadrangle_s result; - for (size_t pointNum = 0u; - pointNum < MediaVision::Image::NumberOfQuadrangleCorners; - ++pointNum) - { - result.points[pointNum].x = resultContour[pointNum].x; - result.points[pointNum].y = resultContour[pointNum].y; - } - tracked_cb(source, image_tracking_model, engine_cfg, &result, user_data); - } - else - { - tracked_cb(source, image_tracking_model, engine_cfg, NULL, user_data); - } - - return MEDIA_VISION_ERROR_NONE; + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); + MEDIA_VISION_NULL_ARG_CHECK(tracked_cb); + + if (!((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->isValid()) { + LOGE("[%s] Image tracking model is invalid.", __FUNCTION__); + return MEDIA_VISION_ERROR_INVALID_DATA; + } + + MediaVision::Image::TrackingParams trackingParams; + extractTrackingParams(engine_cfg, trackingParams); + + cv::Mat frame; + MEDIA_VISION_ASSERT( + convertSourceMV2GrayCV(source, frame), + "Failed to convert mv_source."); + + MediaVision::Image::ImageTracker tracker(trackingParams); + + MediaVision::Image::ImageTrackingModel *trackingModel = + (MediaVision::Image::ImageTrackingModel*)image_tracking_model; + + tracker.track(frame, *trackingModel); + + std::vector resultContour = trackingModel->getLastlocation(); + + if (trackingModel->isDetected() && + MediaVision::Image::NumberOfQuadrangleCorners == resultContour.size()) { + mv_quadrangle_s result; + for (size_t pointNum = 0u; + pointNum < MediaVision::Image::NumberOfQuadrangleCorners; + ++pointNum) { + result.points[pointNum].x = resultContour[pointNum].x; + result.points[pointNum].y = resultContour[pointNum].y; + } + tracked_cb(source, image_tracking_model, engine_cfg, &result, user_data); + } else { + tracked_cb(source, image_tracking_model, engine_cfg, NULL, user_data); + } + + return MEDIA_VISION_ERROR_NONE; } int mv_image_object_create_open( - mv_image_object_h *image_object) + mv_image_object_h *image_object) { - MEDIA_VISION_NULL_ARG_CHECK(image_object); + MEDIA_VISION_NULL_ARG_CHECK(image_object); - (*image_object) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject(); - if (*image_object == NULL) - { - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } + (*image_object) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject(); + if (*image_object == NULL) { + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_image_object_destroy_open( - mv_image_object_h image_object) + mv_image_object_h image_object) { - MEDIA_VISION_INSTANCE_CHECK(image_object); + MEDIA_VISION_INSTANCE_CHECK(image_object); - delete (MediaVision::Image::ImageObject*)image_object; + delete (MediaVision::Image::ImageObject*)image_object; - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_image_object_fill_open( - mv_image_object_h image_object, - mv_engine_config_h engine_cfg, - mv_source_h source, - mv_rectangle_s *location) + mv_image_object_h image_object, + mv_engine_config_h engine_cfg, + mv_source_h source, + mv_rectangle_s *location) { - MEDIA_VISION_INSTANCE_CHECK(image_object); - MEDIA_VISION_INSTANCE_CHECK(source); - - cv::Mat image; - MEDIA_VISION_ASSERT( - convertSourceMV2GrayCV(source, image), - "Failed to convert mv_source."); - - MediaVision::Image::FeaturesExtractingParams featuresExtractingParams; - extractTargetFeaturesExtractingParams(engine_cfg, featuresExtractingParams); - - if (NULL == location) - { - ((MediaVision::Image::ImageObject*)image_object)->fill(image, - featuresExtractingParams); - } - else - { - if (!((MediaVision::Image::ImageObject*)image_object)->fill(image, - cv::Rect(location->point.x, location->point.y, - location->width, location->height), - featuresExtractingParams)) - { - // Wrong ROI (bounding box) - LOGE("[%s] Wrong ROI.", __FUNCTION__); - return MEDIA_VISION_ERROR_INVALID_DATA; - } - } - - return MEDIA_VISION_ERROR_NONE; + MEDIA_VISION_INSTANCE_CHECK(image_object); + MEDIA_VISION_INSTANCE_CHECK(source); + + cv::Mat image; + MEDIA_VISION_ASSERT( + convertSourceMV2GrayCV(source, image), + "Failed to convert mv_source."); + + MediaVision::Image::FeaturesExtractingParams featuresExtractingParams; + extractTargetFeaturesExtractingParams(engine_cfg, featuresExtractingParams); + + if (NULL == location) { + ((MediaVision::Image::ImageObject*)image_object)->fill(image, + featuresExtractingParams); + } else { + if (!((MediaVision::Image::ImageObject*)image_object)->fill(image, + cv::Rect(location->point.x, location->point.y, + location->width, location->height), + featuresExtractingParams)) { + /* Wrong ROI (bounding box) */ + LOGE("[%s] Wrong ROI.", __FUNCTION__); + return MEDIA_VISION_ERROR_INVALID_DATA; + } + } + + return MEDIA_VISION_ERROR_NONE; } int mv_image_object_get_recognition_rate_open( - mv_image_object_h image_object, - double *recognition_rate) + mv_image_object_h image_object, + double *recognition_rate) { - MEDIA_VISION_INSTANCE_CHECK(image_object); - MEDIA_VISION_NULL_ARG_CHECK(recognition_rate); + MEDIA_VISION_INSTANCE_CHECK(image_object); + MEDIA_VISION_NULL_ARG_CHECK(recognition_rate); - (*recognition_rate) = - ((MediaVision::Image::ImageObject*)image_object)->getRecognitionRate(); + (*recognition_rate) = + ((MediaVision::Image::ImageObject*)image_object)->getRecognitionRate(); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_image_object_set_label_open( - mv_image_object_h image_object, - int label) + mv_image_object_h image_object, + int label) { - MEDIA_VISION_INSTANCE_CHECK(image_object); + MEDIA_VISION_INSTANCE_CHECK(image_object); - ((MediaVision::Image::ImageObject*)image_object)->setLabel(label); + ((MediaVision::Image::ImageObject*)image_object)->setLabel(label); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_image_object_get_label_open( - mv_image_object_h image_object, - int *label) + mv_image_object_h image_object, + int *label) { - MEDIA_VISION_INSTANCE_CHECK(image_object); - MEDIA_VISION_NULL_ARG_CHECK(label); + MEDIA_VISION_INSTANCE_CHECK(image_object); + MEDIA_VISION_NULL_ARG_CHECK(label); - if (!((MediaVision::Image::ImageObject*)image_object)->getLabel(*label)) - { - LOGW("[%s] Image object haven't a label.", __FUNCTION__); - return MEDIA_VISION_ERROR_NO_DATA; - } + if (!((MediaVision::Image::ImageObject*)image_object)->getLabel(*label)) { + LOGW("[%s] Image object haven't a label.", __FUNCTION__); + return MEDIA_VISION_ERROR_NO_DATA; + } - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_image_object_clone_open( - mv_image_object_h src, - mv_image_object_h *dst) + mv_image_object_h src, + mv_image_object_h *dst) { - MEDIA_VISION_INSTANCE_CHECK(src); - MEDIA_VISION_NULL_ARG_CHECK(dst); + MEDIA_VISION_INSTANCE_CHECK(src); + MEDIA_VISION_NULL_ARG_CHECK(dst); - (*dst) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject(); - if (*dst == NULL) - { - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } + (*dst) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject(); + if (*dst == NULL) { + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } - *(MediaVision::Image::ImageObject*)(*dst) = - *(MediaVision::Image::ImageObject*)src; + *(MediaVision::Image::ImageObject*)(*dst) = + *(MediaVision::Image::ImageObject*)src; - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_image_object_save_open( - const char *file_name, mv_image_object_h image_object) + const char *file_name, mv_image_object_h image_object) { - MEDIA_VISION_INSTANCE_CHECK(image_object); - - if (file_name == NULL) - { - LOGE("File name is NULL. The file name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - int ret = ((MediaVision::Image::ImageObject*)image_object)->save(file_name); - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("Fail to save image object."); - return ret; - } - - return MEDIA_VISION_ERROR_NONE; + MEDIA_VISION_INSTANCE_CHECK(image_object); + + if (file_name == NULL) { + LOGE("File name is NULL. The file name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + int ret = ((MediaVision::Image::ImageObject*)image_object)->save(file_name); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Fail to save image object."); + return ret; + } + + return MEDIA_VISION_ERROR_NONE; } int mv_image_object_load_open( - const char *file_name, mv_image_object_h *image_object) + const char *file_name, mv_image_object_h *image_object) { - MEDIA_VISION_NULL_ARG_CHECK(image_object); - - if (file_name == NULL) - { - LOGE("File name is NULL. The file name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - (*image_object) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject(); - if (*image_object == NULL) - { - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } - - int ret = ((MediaVision::Image::ImageObject*)(*image_object))->load(file_name); - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("Fail to save image object."); - return ret; - } - - return MEDIA_VISION_ERROR_NONE; + MEDIA_VISION_NULL_ARG_CHECK(image_object); + + if (file_name == NULL) { + LOGE("File name is NULL. The file name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + (*image_object) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject(); + if (*image_object == NULL) { + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } + + int ret = ((MediaVision::Image::ImageObject*)(*image_object))->load(file_name); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Fail to save image object."); + return ret; + } + + return MEDIA_VISION_ERROR_NONE; } int mv_image_tracking_model_create_open( - mv_image_tracking_model_h *image_tracking_model) + mv_image_tracking_model_h *image_tracking_model) { - MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model); + MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model); - (*image_tracking_model) = (mv_image_tracking_model_h) - new (std::nothrow)MediaVision::Image::ImageTrackingModel(); - if (*image_tracking_model == NULL) - { - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } + (*image_tracking_model) = (mv_image_tracking_model_h) + new (std::nothrow)MediaVision::Image::ImageTrackingModel(); + if (*image_tracking_model == NULL) { + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_image_tracking_model_set_target_open( - mv_image_object_h image_object, - mv_image_tracking_model_h image_tracking_model) + mv_image_object_h image_object, + mv_image_tracking_model_h image_tracking_model) { - MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); - MEDIA_VISION_INSTANCE_CHECK(image_object); + MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); + MEDIA_VISION_INSTANCE_CHECK(image_object); - if (((MediaVision::Image::ImageObject*)image_object)->isEmpty()) - { - LOGE("[%s] Target is empty and can't be set as target of tracking" - "model.", __FUNCTION__); - return MEDIA_VISION_ERROR_INVALID_DATA; - } + if (((MediaVision::Image::ImageObject*)image_object)->isEmpty()) { + LOGE("[%s] Target is empty and can't be set as target of tracking" + "model.", __FUNCTION__); + return MEDIA_VISION_ERROR_INVALID_DATA; + } - ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->setTarget( - *(MediaVision::Image::ImageObject*)image_object); + ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->setTarget( + *(MediaVision::Image::ImageObject*)image_object); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_image_tracking_model_destroy_open( - mv_image_tracking_model_h image_tracking_model) + mv_image_tracking_model_h image_tracking_model) { - MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); + MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); - delete (MediaVision::Image::ImageTrackingModel*)image_tracking_model; + delete (MediaVision::Image::ImageTrackingModel*)image_tracking_model; - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_image_tracking_model_refresh_open( - mv_image_tracking_model_h image_tracking_model, - mv_engine_config_h /*engine_cfg*/) + mv_image_tracking_model_h image_tracking_model, + mv_engine_config_h /*engine_cfg*/) { - MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); + MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); - if (!((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->isValid()) - { - LOGE("[%s] Image tracking model is invalid.", __FUNCTION__); - return MEDIA_VISION_ERROR_INVALID_DATA; - } + if (!((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->isValid()) { + LOGE("[%s] Image tracking model is invalid.", __FUNCTION__); + return MEDIA_VISION_ERROR_INVALID_DATA; + } - ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->refresh(); + ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->refresh(); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_image_tracking_model_clone_open( - mv_image_tracking_model_h src, - mv_image_tracking_model_h *dst) + mv_image_tracking_model_h src, + mv_image_tracking_model_h *dst) { - MEDIA_VISION_INSTANCE_CHECK(src); - MEDIA_VISION_NULL_ARG_CHECK(dst); + MEDIA_VISION_INSTANCE_CHECK(src); + MEDIA_VISION_NULL_ARG_CHECK(dst); - (*dst) = (mv_image_tracking_model_h)new (std::nothrow)MediaVision::Image::ImageTrackingModel(); - if (*dst == NULL) - { - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } + (*dst) = (mv_image_tracking_model_h)new (std::nothrow)MediaVision::Image::ImageTrackingModel(); + if (*dst == NULL) { + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } - *(MediaVision::Image::ImageObject*)(*dst) = *(MediaVision::Image::ImageObject*)src; + *(MediaVision::Image::ImageObject*)(*dst) = *(MediaVision::Image::ImageObject*)src; - LOGD("Image tracking model has been successfully cloned"); - return MEDIA_VISION_ERROR_NONE; + LOGD("Image tracking model has been successfully cloned"); + return MEDIA_VISION_ERROR_NONE; } int mv_image_tracking_model_save_open( - const char *file_name, mv_image_tracking_model_h image_tracking_model) + const char *file_name, mv_image_tracking_model_h image_tracking_model) { - MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); - - if (file_name == NULL) - { - LOGE("File name is NULL. The file name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - int ret = ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->save(file_name); - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("Failed to save image tracking model"); - return ret; - } - - LOGD("Image tracking model has been successfully saved"); - return MEDIA_VISION_ERROR_NONE; + MEDIA_VISION_INSTANCE_CHECK(image_tracking_model); + + if (file_name == NULL) { + LOGE("File name is NULL. The file name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + int ret = ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->save(file_name); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Failed to save image tracking model"); + return ret; + } + + LOGD("Image tracking model has been successfully saved"); + return MEDIA_VISION_ERROR_NONE; } int mv_image_tracking_model_load_open( - const char *file_name, mv_image_tracking_model_h *image_tracking_model) + const char *file_name, mv_image_tracking_model_h *image_tracking_model) { - MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model); - - if (file_name == NULL) - { - LOGE("File path is NULL. The file name has to be specified"); - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - (*image_tracking_model) = - (mv_image_tracking_model_h) new (std::nothrow)MediaVision::Image::ImageTrackingModel(); - - if (*image_tracking_model == NULL) - { - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; - } - - int ret = ((MediaVision::Image::ImageTrackingModel*)(*image_tracking_model))->load(file_name); - if (ret != MEDIA_VISION_ERROR_NONE) - { - LOGE("Failed to load image tracking model"); - return ret; - } - - LOGD("Image tracking model has been successfully loaded"); - return MEDIA_VISION_ERROR_NONE; + MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model); + + if (file_name == NULL) { + LOGE("File path is NULL. The file name has to be specified"); + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + (*image_tracking_model) = (mv_image_tracking_model_h) + new (std::nothrow)MediaVision::Image::ImageTrackingModel(); + + if (*image_tracking_model == NULL) { + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } + + int ret = ((MediaVision::Image::ImageTrackingModel*)(*image_tracking_model))->load(file_name); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Failed to load image tracking model"); + return ret; + } + + LOGD("Image tracking model has been successfully loaded"); + return MEDIA_VISION_ERROR_NONE; } diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index adf0cee..9d68686 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.2.3 +Version: 0.2.4 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 diff --git a/test/testsuites/barcode/barcode_test_suite.c b/test/testsuites/barcode/barcode_test_suite.c index c9d2980..b4e8606 100644 --- a/test/testsuites/barcode/barcode_test_suite.c +++ b/test/testsuites/barcode/barcode_test_suite.c @@ -28,1252 +28,1133 @@ #include #include -typedef struct -{ - mv_barcode_type_e type; - mv_barcode_qr_ecc_e ecc; - mv_barcode_qr_mode_e mode; - int version; - size_t width; - size_t height; - mv_barcode_image_format_e out_image_format; - mv_colorspace_e colorspace; - char *message; - char *file_name; - char *out_file_name; - unsigned char *out_buffer_ptr; +typedef struct { + mv_barcode_type_e type; + mv_barcode_qr_ecc_e ecc; + mv_barcode_qr_mode_e mode; + int version; + size_t width; + size_t height; + mv_barcode_image_format_e out_image_format; + mv_colorspace_e colorspace; + char *message; + char *file_name; + char *out_file_name; + unsigned char *out_buffer_ptr; } barcode_model_s; -typedef enum -{ - MV_TS_GENERATE_TO_IMAGE_FCN, - MV_TS_GENERATE_TO_SOURCE_FCN +typedef enum { + MV_TS_GENERATE_TO_IMAGE_FCN, + MV_TS_GENERATE_TO_SOURCE_FCN } generation_fcn_e; int convert_rgb_to(unsigned char *src_buffer, unsigned char **dst_buffer, - image_data_s image_data, mv_colorspace_e dst_colorspace, - unsigned long *cvt_buffer_size) + image_data_s image_data, mv_colorspace_e dst_colorspace, + unsigned long *cvt_buffer_size) { - enum PixelFormat pixel_format = PIX_FMT_NONE; - - MEDIA_VISION_FUNCTION_ENTER(); - - switch (dst_colorspace) - { - case MEDIA_VISION_COLORSPACE_Y800: - pixel_format = PIX_FMT_GRAY8; - break; - case MEDIA_VISION_COLORSPACE_I420: - pixel_format = PIX_FMT_YUV420P; - break; - case MEDIA_VISION_COLORSPACE_NV12: - pixel_format = PIX_FMT_NV12; - break; - case MEDIA_VISION_COLORSPACE_YV12: - pixel_format = PIX_FMT_YUV420P; // the same as I420 with inversed U and V - break; - case MEDIA_VISION_COLORSPACE_NV21: - pixel_format = PIX_FMT_NV21; - break; - case MEDIA_VISION_COLORSPACE_YUYV: - pixel_format = PIX_FMT_YUYV422; - break; - case MEDIA_VISION_COLORSPACE_UYVY: - pixel_format = PIX_FMT_UYVY422; - break; - case MEDIA_VISION_COLORSPACE_422P: - pixel_format = PIX_FMT_YUV422P; - break; - case MEDIA_VISION_COLORSPACE_RGB565: - pixel_format = PIX_FMT_RGB565BE; - break; - case MEDIA_VISION_COLORSPACE_RGBA: - pixel_format = PIX_FMT_RGBA; - break; - case MEDIA_VISION_COLORSPACE_RGB888: - *cvt_buffer_size = image_data.image_width * image_data.image_height * 3; - (*dst_buffer) = (unsigned char*)malloc(*cvt_buffer_size); - memcpy(*dst_buffer, src_buffer, *cvt_buffer_size); - - MEDIA_VISION_FUNCTION_LEAVE(); - return MEDIA_VISION_ERROR_NONE; - default: - MEDIA_VISION_FUNCTION_LEAVE(); - return MEDIA_VISION_ERROR_NOT_SUPPORTED; - } - - AVPicture src_picture; - AVPicture dst_picture; - - avpicture_fill(&src_picture, (uint8_t*)src_buffer, PIX_FMT_RGB24, - image_data.image_width, image_data.image_height); - - avpicture_alloc(&dst_picture, pixel_format, - image_data.image_width, image_data.image_height); - - struct SwsContext *context = sws_getContext( - image_data.image_width, image_data.image_height, PIX_FMT_RGB24, - image_data.image_width, image_data.image_height, pixel_format, - SWS_FAST_BILINEAR, 0, 0, 0); - - sws_scale(context, (const uint8_t* const*)src_picture.data, - src_picture.linesize, 0, image_data.image_height, - dst_picture.data, dst_picture.linesize); - - *cvt_buffer_size = avpicture_get_size(pixel_format, - image_data.image_width, image_data.image_height); - (*dst_buffer) = (unsigned char*)malloc(*cvt_buffer_size); - memcpy(*dst_buffer, dst_picture.data[0], *cvt_buffer_size); - - avpicture_free(&dst_picture); - - MEDIA_VISION_FUNCTION_LEAVE(); - - return MEDIA_VISION_ERROR_NONE; + enum PixelFormat pixel_format = PIX_FMT_NONE; + + MEDIA_VISION_FUNCTION_ENTER(); + + switch (dst_colorspace) { + case MEDIA_VISION_COLORSPACE_Y800: + pixel_format = PIX_FMT_GRAY8; + break; + case MEDIA_VISION_COLORSPACE_I420: + pixel_format = PIX_FMT_YUV420P; + break; + case MEDIA_VISION_COLORSPACE_NV12: + pixel_format = PIX_FMT_NV12; + break; + case MEDIA_VISION_COLORSPACE_YV12: + /* the same as I420 with inversed U and V */ + pixel_format = PIX_FMT_YUV420P; + break; + case MEDIA_VISION_COLORSPACE_NV21: + pixel_format = PIX_FMT_NV21; + break; + case MEDIA_VISION_COLORSPACE_YUYV: + pixel_format = PIX_FMT_YUYV422; + break; + case MEDIA_VISION_COLORSPACE_UYVY: + pixel_format = PIX_FMT_UYVY422; + break; + case MEDIA_VISION_COLORSPACE_422P: + pixel_format = PIX_FMT_YUV422P; + break; + case MEDIA_VISION_COLORSPACE_RGB565: + pixel_format = PIX_FMT_RGB565BE; + break; + case MEDIA_VISION_COLORSPACE_RGBA: + pixel_format = PIX_FMT_RGBA; + break; + case MEDIA_VISION_COLORSPACE_RGB888: + *cvt_buffer_size = image_data.image_width * image_data.image_height * 3; + (*dst_buffer) = (unsigned char*)malloc(*cvt_buffer_size); + memcpy(*dst_buffer, src_buffer, *cvt_buffer_size); + + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_NONE; + default: + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_NOT_SUPPORTED; + } + + AVPicture src_picture; + AVPicture dst_picture; + + avpicture_fill(&src_picture, (uint8_t*)src_buffer, PIX_FMT_RGB24, + image_data.image_width, image_data.image_height); + + avpicture_alloc(&dst_picture, pixel_format, + image_data.image_width, image_data.image_height); + + struct SwsContext *context = sws_getContext( + image_data.image_width, image_data.image_height, PIX_FMT_RGB24, + image_data.image_width, image_data.image_height, pixel_format, + SWS_FAST_BILINEAR, 0, 0, 0); + + sws_scale(context, (const uint8_t * const *)src_picture.data, + src_picture.linesize, 0, image_data.image_height, + dst_picture.data, dst_picture.linesize); + + *cvt_buffer_size = avpicture_get_size(pixel_format, + image_data.image_width, image_data.image_height); + (*dst_buffer) = (unsigned char*)malloc(*cvt_buffer_size); + memcpy(*dst_buffer, dst_picture.data[0], *cvt_buffer_size); + + avpicture_free(&dst_picture); + + MEDIA_VISION_FUNCTION_LEAVE(); + + return MEDIA_VISION_ERROR_NONE; } int find_min_x(const mv_quadrangle_s *quadrangle, int *minX) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - if (NULL == quadrangle) - { - MEDIA_VISION_FUNCTION_LEAVE(); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (NULL == quadrangle) { + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - *minX = quadrangle->points[0].x; - *minX = quadrangle->points[1].x < *minX ? quadrangle->points[1].x : *minX; - *minX = quadrangle->points[2].x < *minX ? quadrangle->points[2].x : *minX; - *minX = quadrangle->points[3].x < *minX ? quadrangle->points[3].x : *minX; + *minX = quadrangle->points[0].x; + *minX = quadrangle->points[1].x < *minX ? quadrangle->points[1].x : *minX; + *minX = quadrangle->points[2].x < *minX ? quadrangle->points[2].x : *minX; + *minX = quadrangle->points[3].x < *minX ? quadrangle->points[3].x : *minX; - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int find_min_y(const mv_quadrangle_s *quadrangle, int *minY) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - if (NULL == quadrangle) - { - MEDIA_VISION_FUNCTION_LEAVE(); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (NULL == quadrangle) { + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - *minY = quadrangle->points[0].y; - *minY = quadrangle->points[1].y < *minY ? quadrangle->points[1].y : *minY; - *minY = quadrangle->points[2].y < *minY ? quadrangle->points[2].y : *minY; - *minY = quadrangle->points[3].y < *minY ? quadrangle->points[3].y : *minY; + *minY = quadrangle->points[0].y; + *minY = quadrangle->points[1].y < *minY ? quadrangle->points[1].y : *minY; + *minY = quadrangle->points[2].y < *minY ? quadrangle->points[2].y : *minY; + *minY = quadrangle->points[3].y < *minY ? quadrangle->points[3].y : *minY; - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int find_max_x(const mv_quadrangle_s *quadrangle, int *maxX) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - if (NULL == quadrangle) - { - MEDIA_VISION_FUNCTION_LEAVE(); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (NULL == quadrangle) { + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - *maxX = quadrangle->points[0].x; - *maxX = quadrangle->points[1].x > *maxX ? quadrangle->points[1].x : *maxX; - *maxX = quadrangle->points[2].x > *maxX ? quadrangle->points[2].x : *maxX; - *maxX = quadrangle->points[3].x > *maxX ? quadrangle->points[3].x : *maxX; + *maxX = quadrangle->points[0].x; + *maxX = quadrangle->points[1].x > *maxX ? quadrangle->points[1].x : *maxX; + *maxX = quadrangle->points[2].x > *maxX ? quadrangle->points[2].x : *maxX; + *maxX = quadrangle->points[3].x > *maxX ? quadrangle->points[3].x : *maxX; - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int find_max_y(const mv_quadrangle_s *quadrangle, int *maxY) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - if (NULL == quadrangle) - { - MEDIA_VISION_FUNCTION_LEAVE(); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (NULL == quadrangle) { + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - *maxY = quadrangle->points[0].y; - *maxY = quadrangle->points[1].y > *maxY ? quadrangle->points[1].y : *maxY; - *maxY = quadrangle->points[2].y > *maxY ? quadrangle->points[2].y : *maxY; - *maxY = quadrangle->points[3].y > *maxY ? quadrangle->points[3].y : *maxY; + *maxY = quadrangle->points[0].y; + *maxY = quadrangle->points[1].y > *maxY ? quadrangle->points[1].y : *maxY; + *maxY = quadrangle->points[2].y > *maxY ? quadrangle->points[2].y : *maxY; + *maxY = quadrangle->points[3].y > *maxY ? quadrangle->points[3].y : *maxY; - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } bool _mv_engine_config_supported_attribute(mv_config_attribute_type_e attribute_type, - const char *attribute_name, void *user_data) + const char *attribute_name, void *user_data) { - printf("Callback call for engine configuration attribute\n"); - - if (user_data == NULL) - { - return false; - } - - mv_engine_config_h mv_engine_config = (mv_engine_config_h *)user_data; - - int int_value = 0; - double double_value = 0.0; - bool bool_value = false; - char str_value[1024]; - switch (attribute_type) - { - case MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE: - if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE == - mv_engine_config_get_double_attribute( - mv_engine_config, attribute_name, &double_value)) - { - printf("Default double attribute %s wasn't set in engine\n", - attribute_name); - return false; - } - printf("Default double attribute %s was set to %f in engine\n", - attribute_name, double_value); - break; - case MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER: - if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE == - mv_engine_config_get_int_attribute( - mv_engine_config, attribute_name, &int_value)) - { - printf("Default integer attribute %s wasn't set in engine\n", - attribute_name); - return false; - } - printf("Default interget attribute %s was set to %d in engine\n", - attribute_name, int_value); - break; - case MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN: - if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE == - mv_engine_config_get_bool_attribute( - mv_engine_config, attribute_name, &bool_value)) - { - printf("Default bool attribute %s wasn't set in engine\n", - attribute_name); - return false; - } - printf("Default bool attribute %s was set to %s in engine\n", - attribute_name, bool_value ? "TRUE" : "FALSE"); - break; - case MV_ENGINE_CONFIG_ATTR_TYPE_STRING: - if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE == - mv_engine_config_get_string_attribute( - mv_engine_config, attribute_name, &str_value)) - { - printf("Default string ttribute %s wasn't set in engine\n", - attribute_name); - return false; - } - printf("Default string attribute %s was set to %s in engine\n", - attribute_name, str_value); - break; - default: - printf("Not supported attribute type\n"); - return false; - } - - - return true; + printf("Callback call for engine configuration attribute\n"); + + if (user_data == NULL) + return false; + + mv_engine_config_h mv_engine_config = (mv_engine_config_h *)user_data; + + int int_value = 0; + double double_value = 0.0; + bool bool_value = false; + char str_value[1024]; + switch (attribute_type) { + case MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE: + if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE == + mv_engine_config_get_double_attribute( + mv_engine_config, attribute_name, &double_value)) { + printf("Default double attribute %s wasn't set in engine\n", + attribute_name); + return false; + } + printf("Default double attribute %s was set to %f in engine\n", + attribute_name, double_value); + break; + case MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER: + if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE == + mv_engine_config_get_int_attribute( + mv_engine_config, attribute_name, &int_value)) { + printf("Default integer attribute %s wasn't set in engine\n", + attribute_name); + return false; + } + printf("Default interget attribute %s was set to %d in engine\n", + attribute_name, int_value); + break; + case MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN: + if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE == + mv_engine_config_get_bool_attribute( + mv_engine_config, attribute_name, &bool_value)) { + printf("Default bool attribute %s wasn't set in engine\n", + attribute_name); + return false; + } + printf("Default bool attribute %s was set to %s in engine\n", + attribute_name, bool_value ? "TRUE" : "FALSE"); + break; + case MV_ENGINE_CONFIG_ATTR_TYPE_STRING: + if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE == + mv_engine_config_get_string_attribute( + mv_engine_config, attribute_name, &str_value)) { + printf("Default string ttribute %s wasn't set in engine\n", + attribute_name); + return false; + } + printf("Default string attribute %s was set to %s in engine\n", + attribute_name, str_value); + break; + default: + printf("Not supported attribute type\n"); + return false; + } + + return true; } void barcode_detected_cb( - mv_source_h source, - mv_engine_config_h engine_cfg, - const mv_quadrangle_s *barcodes_locations, - const char *messages[], - const mv_barcode_type_e *types, - int number_of_barcodes, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + const mv_quadrangle_s *barcodes_locations, + const char *messages[], + const mv_barcode_type_e *types, + int number_of_barcodes, + void *user_data) { - MEDIA_VISION_FUNCTION_ENTER(); - - printf("%i barcodes were detected on the image.\n", number_of_barcodes); - if (number_of_barcodes > 0) - { - int is_source_data_loaded = 0; - - char *file_name = NULL; - unsigned char *out_buffer = NULL; - unsigned char *draw_buffer = NULL; - unsigned int buf_size = 0; - image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID }; - // Check Media Vision source: - if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &out_buffer, &buf_size) || - MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) || - MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) || - MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) || - user_data == NULL) - { - printf("ERROR: Creating out image is impossible.\n"); - } - else - { - file_name = ((barcode_model_s *)user_data)->out_file_name; - draw_buffer = ((barcode_model_s *)user_data)->out_buffer_ptr; - image_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888; - is_source_data_loaded = 1; - } - - int i = 0; - for (i = 0; i < number_of_barcodes; ++i) - { - const char *cur_message = messages[i]; - mv_barcode_type_e cur_type = types[i]; - const char *str_type = NULL; - switch (cur_type) - { - case MV_BARCODE_QR: - str_type = "QR"; - break; - case MV_BARCODE_UPC_A: - str_type = "UPC-A"; - break; - case MV_BARCODE_UPC_E: - str_type = "UPC-E"; - break; - case MV_BARCODE_EAN_8: - case MV_BARCODE_EAN_13: - str_type = "EAN-8/13"; - break; - case MV_BARCODE_CODE128: - str_type = "CODE128"; - break; - case MV_BARCODE_CODE39: - str_type = "CODE39"; - break; - case MV_BARCODE_I2_5: - str_type = "I25"; - break; - default: - str_type = "Undetected"; - break; - } - printf("\tBarcode %i : type is %s\n", i, str_type); - if (cur_message != NULL) - { - printf("\t message is %s\n", cur_message); - } - else - { - printf("\t message wasn't detected\n"); - } - - if (is_source_data_loaded == 1) - { - int minX = 0; - int minY = 0; - int maxX = 0; - int maxY = 0; - if (MEDIA_VISION_ERROR_NONE != find_min_x(&barcodes_locations[i], &minX) || - MEDIA_VISION_ERROR_NONE != find_min_y(&barcodes_locations[i], &minY) || - MEDIA_VISION_ERROR_NONE != find_max_x(&barcodes_locations[i], &maxX) || - MEDIA_VISION_ERROR_NONE != find_max_y(&barcodes_locations[i], &maxY)) - { - continue; - } - - const int rectangle_thickness = 6; - const int drawing_color[] = {255, 0, 0}; - if (MEDIA_VISION_ERROR_NONE != draw_rectangle_on_buffer( - minX, - minY, - maxX, - maxY, - drawing_color, - rectangle_thickness, - &image_data, - draw_buffer)) - { - continue; - } - } - } - - if (file_name != NULL && - MEDIA_VISION_ERROR_NONE == save_image_from_buffer(file_name, draw_buffer, &image_data, 100)) - { - printf("Image was generated as %s\n", file_name); - } - else - { - printf("ERROR: Failed to generate output file. Check file name and permissions. \n"); - } - - printf("\n"); - } - - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + + printf("%i barcodes were detected on the image.\n", number_of_barcodes); + if (number_of_barcodes > 0) { + int is_source_data_loaded = 0; + + char *file_name = NULL; + unsigned char *out_buffer = NULL; + unsigned char *draw_buffer = NULL; + unsigned int buf_size = 0; + image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID }; + /* Check Media Vision source: */ + if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &out_buffer, &buf_size) || + MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) || + MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) || + MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) || + user_data == NULL) { + printf("ERROR: Creating out image is impossible.\n"); + } else { + file_name = ((barcode_model_s *)user_data)->out_file_name; + draw_buffer = ((barcode_model_s *)user_data)->out_buffer_ptr; + image_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888; + is_source_data_loaded = 1; + } + + int i = 0; + for (i = 0; i < number_of_barcodes; ++i) { + const char *cur_message = messages[i]; + mv_barcode_type_e cur_type = types[i]; + const char *str_type = NULL; + switch (cur_type) { + case MV_BARCODE_QR: + str_type = "QR"; + break; + case MV_BARCODE_UPC_A: + str_type = "UPC-A"; + break; + case MV_BARCODE_UPC_E: + str_type = "UPC-E"; + break; + case MV_BARCODE_EAN_8: + case MV_BARCODE_EAN_13: + str_type = "EAN-8/13"; + break; + case MV_BARCODE_CODE128: + str_type = "CODE128"; + break; + case MV_BARCODE_CODE39: + str_type = "CODE39"; + break; + case MV_BARCODE_I2_5: + str_type = "I25"; + break; + default: + str_type = "Undetected"; + break; + } + printf("\tBarcode %i : type is %s\n", i, str_type); + if (cur_message != NULL) + printf("\t message is %s\n", cur_message); + else + printf("\t message wasn't detected\n"); + + if (is_source_data_loaded == 1) { + int minX = 0; + int minY = 0; + int maxX = 0; + int maxY = 0; + if (MEDIA_VISION_ERROR_NONE != find_min_x(&barcodes_locations[i], &minX) || + MEDIA_VISION_ERROR_NONE != find_min_y(&barcodes_locations[i], &minY) || + MEDIA_VISION_ERROR_NONE != find_max_x(&barcodes_locations[i], &maxX) || + MEDIA_VISION_ERROR_NONE != find_max_y(&barcodes_locations[i], &maxY)) { + continue; + } + + const int rectangle_thickness = 6; + const int drawing_color[] = {255, 0, 0}; + if (MEDIA_VISION_ERROR_NONE != draw_rectangle_on_buffer( + minX, + minY, + maxX, + maxY, + drawing_color, + rectangle_thickness, + &image_data, + draw_buffer)) { + continue; + } + } + } + + if (file_name != NULL && + MEDIA_VISION_ERROR_NONE == save_image_from_buffer(file_name, draw_buffer, &image_data, 100)) { + printf("Image was generated as %s\n", file_name); + } else { + printf("ERROR: Failed to generate output file. Check file name and permissions. \n"); + } + + printf("\n"); + } + + MEDIA_VISION_FUNCTION_LEAVE(); } int generate_barcode_to_image(barcode_model_s model) { - MEDIA_VISION_FUNCTION_ENTER(); - - if (model.message == NULL || - model.file_name == NULL) - { - MEDIA_VISION_FUNCTION_LEAVE(); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - LOGI("Call the mv_barcode_generate_image() function"); - - const int err = mv_barcode_generate_image( - NULL, - model.message, - model.width, - model.height, - model.type, - model.mode, - model.ecc, - model.version, - model.file_name, - model.out_image_format); - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; + MEDIA_VISION_FUNCTION_ENTER(); + + if (model.message == NULL || + model.file_name == NULL) { + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + LOGI("Call the mv_barcode_generate_image() function"); + + const int err = mv_barcode_generate_image( + NULL, + model.message, + model.width, + model.height, + model.type, + model.mode, + model.ecc, + model.version, + model.file_name, + model.out_image_format); + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; } int generate_barcode_to_source(barcode_model_s model) { - MEDIA_VISION_FUNCTION_ENTER(); - - if (model.message == NULL || - model.file_name == NULL) - { - MEDIA_VISION_FUNCTION_LEAVE(); - - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - LOGI("mv_source_h creation started"); - - mv_source_h source = NULL; - int err = mv_create_source(&source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Error occurred when trying to create Media Vision " - "source. Error code: %i\n", err); - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; - } - - LOGI("mv_source_h creation finished"); - - LOGI("Call the mv_barcode_generate_source() function"); - - err = mv_barcode_generate_source( - NULL, - model.message, - model.type, - model.mode, - model.ecc, - model.version, - source); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Error occurred during generation barcode to the " - "Media Vision source. Error code: %i\n", err); - - const int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("ERROR: Error occurred when try to destroy Media Vision source." - "Error code: %i\n", err2); - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; - } - - unsigned char *data_buffer = NULL; - unsigned int buffer_size = 0; - unsigned int image_width = 0; - unsigned int image_height = 0; - mv_colorspace_e image_colorspace = MEDIA_VISION_COLORSPACE_INVALID; - - bool is_source_corrupted = false; - err = mv_source_get_buffer(source, &data_buffer, &buffer_size); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Error occurred when trying to get buffer from " - "Media Vision source. Error code: %i\n", err); - is_source_corrupted = true; - } - - err = mv_source_get_width(source, &image_width); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Error occurred when trying to get width of " - "Media Vision source. Error code: %i\n", err); - is_source_corrupted = true; - } - - err = mv_source_get_height(source, &image_height); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Error occurred when trying to get height of " - "Media Vision source. Error code: %i\n", err); - is_source_corrupted = true; - } - - err = mv_source_get_colorspace(source, &image_colorspace); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Error occurred when trying to get colorspace of " - "Media Vision source. Error code: %i\n", err); - is_source_corrupted = true; - } - - if (is_source_corrupted) - { - err = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Error occurred when trying to destroy Media Vision " - "source. Error code: %i\n", err); - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return MEDIA_VISION_ERROR_INTERNAL; - } - - const image_data_s image_data = { image_width, image_height, image_colorspace }; - - char *jpeg_file_name = ""; - if (0 == strcmp(model.file_name + strlen(model.file_name) - 4, ".jpg") || - 0 == strcmp(model.file_name + strlen(model.file_name) - 5, ".jpeg")) - { - jpeg_file_name = (char*)malloc(strlen(model.file_name) + 1); - strcpy(jpeg_file_name, model.file_name); - jpeg_file_name[strlen(model.file_name)] = '\0'; - } - else - { - jpeg_file_name = (char*)malloc(strlen(model.file_name) + 5); - strcpy(jpeg_file_name, model.file_name); - strcpy(jpeg_file_name + strlen(model.file_name), ".jpg"); - jpeg_file_name[strlen(model.file_name) + 4] = '\0'; - } - - save_image_from_buffer(jpeg_file_name, data_buffer, &image_data, 100); - - free(jpeg_file_name); - - const int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("ERROR: Error occurred when try to destroy Media Vision source." - "Error code: %i\n", err2); - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; + MEDIA_VISION_FUNCTION_ENTER(); + + if (model.message == NULL || + model.file_name == NULL) { + MEDIA_VISION_FUNCTION_LEAVE(); + + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + LOGI("mv_source_h creation started"); + + mv_source_h source = NULL; + int err = mv_create_source(&source); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Error occurred when trying to create Media Vision " + "source. Error code: %i\n", err); + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; + } + + LOGI("mv_source_h creation finished"); + + LOGI("Call the mv_barcode_generate_source() function"); + + err = mv_barcode_generate_source( + NULL, + model.message, + model.type, + model.mode, + model.ecc, + model.version, + source); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Error occurred during generation barcode to the " + "Media Vision source. Error code: %i\n", err); + + const int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("ERROR: Error occurred when try to destroy Media Vision source." + "Error code: %i\n", err2); + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; + } + + unsigned char *data_buffer = NULL; + unsigned int buffer_size = 0; + unsigned int image_width = 0; + unsigned int image_height = 0; + mv_colorspace_e image_colorspace = MEDIA_VISION_COLORSPACE_INVALID; + + bool is_source_corrupted = false; + err = mv_source_get_buffer(source, &data_buffer, &buffer_size); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Error occurred when trying to get buffer from " + "Media Vision source. Error code: %i\n", err); + is_source_corrupted = true; + } + + err = mv_source_get_width(source, &image_width); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Error occurred when trying to get width of " + "Media Vision source. Error code: %i\n", err); + is_source_corrupted = true; + } + + err = mv_source_get_height(source, &image_height); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Error occurred when trying to get height of " + "Media Vision source. Error code: %i\n", err); + is_source_corrupted = true; + } + + err = mv_source_get_colorspace(source, &image_colorspace); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Error occurred when trying to get colorspace of " + "Media Vision source. Error code: %i\n", err); + is_source_corrupted = true; + } + + if (is_source_corrupted) { + err = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Error occurred when trying to destroy Media Vision " + "source. Error code: %i\n", err); + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return MEDIA_VISION_ERROR_INTERNAL; + } + + const image_data_s image_data = { image_width, image_height, image_colorspace }; + + char *jpeg_file_name = ""; + if (0 == strcmp(model.file_name + strlen(model.file_name) - 4, ".jpg") || + 0 == strcmp(model.file_name + strlen(model.file_name) - 5, ".jpeg")) { + jpeg_file_name = (char*)malloc(strlen(model.file_name) + 1); + strcpy(jpeg_file_name, model.file_name); + jpeg_file_name[strlen(model.file_name)] = '\0'; + } else { + jpeg_file_name = (char*)malloc(strlen(model.file_name) + 5); + strcpy(jpeg_file_name, model.file_name); + strcpy(jpeg_file_name + strlen(model.file_name), ".jpg"); + jpeg_file_name[strlen(model.file_name) + 4] = '\0'; + } + + save_image_from_buffer(jpeg_file_name, data_buffer, &image_data, 100); + + free(jpeg_file_name); + + const int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("ERROR: Error occurred when try to destroy Media Vision source." + "Error code: %i\n", err2); + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; } int detect_barcode(barcode_model_s model, mv_rectangle_s roi) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - unsigned char *data_buffer = NULL; - unsigned long buffer_size = 0; - image_data_s image_data; + unsigned char *data_buffer = NULL; + unsigned long buffer_size = 0; + image_data_s image_data; - int err = load_image_to_buffer( - model.file_name, &data_buffer, &buffer_size, &image_data); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Errors were occurred during opening the file!!! code: %i\n", err); + int err = load_image_to_buffer( + model.file_name, &data_buffer, &buffer_size, &image_data); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Errors were occurred during opening the file!!! code: %i\n", err); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } + return err; + } - unsigned char *converted_buffer = NULL; - unsigned long converted_buffer_size = 0; - err = convert_rgb_to(data_buffer, &converted_buffer, image_data, model.colorspace, &converted_buffer_size); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Can't convert to the selected colorspace!!! code: %i\n", err); + unsigned char *converted_buffer = NULL; + unsigned long converted_buffer_size = 0; + err = convert_rgb_to(data_buffer, &converted_buffer, image_data, model.colorspace, &converted_buffer_size); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Can't convert to the selected colorspace!!! code: %i\n", err); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } + return err; + } - model.out_buffer_ptr = data_buffer; + model.out_buffer_ptr = data_buffer; - mv_engine_config_h mv_engine_config; - err = mv_create_engine_config(&mv_engine_config); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Errors were occurred during creating the media engine config: %i\n", err); - } + mv_engine_config_h mv_engine_config; + err = mv_create_engine_config(&mv_engine_config); + if (MEDIA_VISION_ERROR_NONE != err) + printf("ERROR: Errors were occurred during creating the media engine config: %i\n", err); - mv_engine_config_foreach_supported_attribute(_mv_engine_config_supported_attribute, mv_engine_config); + mv_engine_config_foreach_supported_attribute(_mv_engine_config_supported_attribute, mv_engine_config); - mv_engine_config_set_int_attribute(mv_engine_config, MV_BARCODE_DETECT_ATTR_TARGET, MV_BARCODE_DETECT_ATTR_TARGET_2D_BARCODE); + mv_engine_config_set_int_attribute(mv_engine_config, MV_BARCODE_DETECT_ATTR_TARGET, MV_BARCODE_DETECT_ATTR_TARGET_2D_BARCODE); - mv_source_h source; - err = mv_create_source(&source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Errors were occurred during creating the source!!! code: %i\n", err); + mv_source_h source; + err = mv_create_source(&source); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Errors were occurred during creating the source!!! code: %i\n", err); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } + return err; + } - err = mv_source_fill_by_buffer(source, converted_buffer, converted_buffer_size, - image_data.image_width, image_data.image_height, model.colorspace); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Errors were occurred during filling the source!!! code: %i\n", err); + err = mv_source_fill_by_buffer(source, converted_buffer, converted_buffer_size, + image_data.image_width, image_data.image_height, model.colorspace); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Errors were occurred during filling the source!!! code: %i\n", err); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } + return err; + } - if (converted_buffer != NULL) - { - free(converted_buffer); - } + if (converted_buffer != NULL) + free(converted_buffer); - err = mv_barcode_detect(source, mv_engine_config, roi, barcode_detected_cb, &model); + err = mv_barcode_detect(source, mv_engine_config, roi, barcode_detected_cb, &model); - if (data_buffer != NULL) - { - free(data_buffer); - } + if (data_buffer != NULL) + free(data_buffer); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Errors were occurred during barcode detection!!! code: %i\n", err); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Errors were occurred during barcode detection!!! code: %i\n", err); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } + return err; + } - err = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Errors were occurred during destroying the source!!! code: %i\n", err); - } + err = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err) + printf("ERROR: Errors were occurred during destroying the source!!! code: %i\n", err); - err = mv_destroy_engine_config(mv_engine_config); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Error were occurred during destroying the source!!! code: %i\n", err); - } + err = mv_destroy_engine_config(mv_engine_config); + if (MEDIA_VISION_ERROR_NONE != err) + printf("ERROR: Error were occurred during destroying the source!!! code: %i\n", err); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; + return err; } int input_string(const char *prompt, size_t max_len, char **string) { - MEDIA_VISION_FUNCTION_ENTER(); - - printf("\n"); - printf("%s ", prompt); - - if (scanf("\n") != 0) - { - MEDIA_VISION_FUNCTION_LEAVE(); - return -1; - } - - char buffer[max_len]; - int last_char = 0; - buffer[last_char] = '\0'; - buffer[sizeof(buffer) - 1] = ~'\0'; - if (fgets(buffer, sizeof(buffer), stdin) == NULL) - { - MEDIA_VISION_FUNCTION_LEAVE(); - return -1; - } - size_t real_string_len = strlen(buffer); - buffer[real_string_len - 1] = '\0'; - *string = (char*)malloc(real_string_len * sizeof(char)); - strcpy(*string, buffer); - - size_t str_len = strlen(*string); - - MEDIA_VISION_FUNCTION_LEAVE(); - - return str_len; + MEDIA_VISION_FUNCTION_ENTER(); + + printf("\n"); + printf("%s ", prompt); + + if (scanf("\n") != 0) { + MEDIA_VISION_FUNCTION_LEAVE(); + return -1; + } + + char buffer[max_len]; + int last_char = 0; + buffer[last_char] = '\0'; + buffer[sizeof(buffer) - 1] = ~'\0'; + if (fgets(buffer, sizeof(buffer), stdin) == NULL) { + MEDIA_VISION_FUNCTION_LEAVE(); + return -1; + } + size_t real_string_len = strlen(buffer); + buffer[real_string_len - 1] = '\0'; + *string = (char*)malloc(real_string_len * sizeof(char)); + strcpy(*string, buffer); + + size_t str_len = strlen(*string); + + MEDIA_VISION_FUNCTION_LEAVE(); + + return str_len; } int input_size(const char *prompt, size_t max_size, size_t *size) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - printf("\n"); - printf("%s ", prompt); + printf("\n"); + printf("%s ", prompt); - if (scanf("%20zu", size) == 0) - { - if (scanf("%*[^\n]%*c") != 0) - { - printf("ERROR: Reading the input line error.\n"); - MEDIA_VISION_FUNCTION_LEAVE(); - return -1; - } - printf("ERROR: Incorrect input.\n"); - MEDIA_VISION_FUNCTION_LEAVE(); - return -1; - } + if (scanf("%20zu", size) == 0) { + if (scanf("%*[^\n]%*c") != 0) { + printf("ERROR: Reading the input line error.\n"); + MEDIA_VISION_FUNCTION_LEAVE(); + return -1; + } + printf("ERROR: Incorrect input.\n"); + MEDIA_VISION_FUNCTION_LEAVE(); + return -1; + } - int ret = (*size > max_size ? -1 : 0); + int ret = (*size > max_size ? -1 : 0); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int input_int(const char *prompt, int min_value, int max_value, int *value) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - printf("\n"); - printf("%s ", prompt); + printf("\n"); + printf("%s ", prompt); - if (scanf("%20i", value) == 0) - { - if (scanf("%*[^\n]%*c") != 0) - { - printf("ERROR: Reading the input line error.\n"); - MEDIA_VISION_FUNCTION_LEAVE(); - return -1; - } - printf("ERROR: Incorrect input.\n"); - MEDIA_VISION_FUNCTION_LEAVE(); - return -1; - } + if (scanf("%20i", value) == 0) { + if (scanf("%*[^\n]%*c") != 0) { + printf("ERROR: Reading the input line error.\n"); + MEDIA_VISION_FUNCTION_LEAVE(); + return -1; + } + printf("ERROR: Incorrect input.\n"); + MEDIA_VISION_FUNCTION_LEAVE(); + return -1; + } - int ret = (*value < min_value || *value > max_value ? -1 : 0); + int ret = (*value < min_value || *value > max_value ? -1 : 0); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return ret; + return ret; } int show_menu(const char *title, const int *options, const char **names, int cnt) { - MEDIA_VISION_FUNCTION_ENTER(); - - printf("***************************\n"); - printf("* %23s *\n", title); - printf("*-------------------------*\n"); - int i = 0; - for (i = 0; i < cnt; ++i) - { - printf("* %2i. %19s *\n", options[i], names[i]); - } - printf("***************************\n\n"); - int selection = 0; - printf("Your choise: "); - if (scanf("%20i", &selection) == 0) - { - if (scanf("%*[^\n]%*c") != 0) - { - printf("ERROR: Reading the input line error.\n"); - MEDIA_VISION_FUNCTION_LEAVE(); - return -1; - } - printf("ERROR: Incorrect input.\n"); - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return selection; + MEDIA_VISION_FUNCTION_ENTER(); + + printf("***************************\n"); + printf("* %23s *\n", title); + printf("*-------------------------*\n"); + int i = 0; + for (i = 0; i < cnt; ++i) + printf("* %2i. %19s *\n", options[i], names[i]); + + printf("***************************\n\n"); + int selection = 0; + printf("Your choise: "); + if (scanf("%20i", &selection) == 0) { + if (scanf("%*[^\n]%*c") != 0) { + printf("ERROR: Reading the input line error.\n"); + MEDIA_VISION_FUNCTION_LEAVE(); + return -1; + } + printf("ERROR: Incorrect input.\n"); + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return selection; } mv_barcode_type_e select_type(void) { - mv_barcode_type_e selected_type = MV_BARCODE_UNDEFINED; - int sel_opt = 0; - const int options[8] = { 1, 2, 3, 4, 5, 6, 7, 8 }; - const char *names[8] = { "qr", "upca", "upce", "ean8", "ean13", "code39", "code128", "interleave25" }; - - MEDIA_VISION_FUNCTION_ENTER(); - - while (sel_opt == 0) - { - sel_opt = show_menu("Select barcode type:", options, names, 8); - - switch (sel_opt) - { - case 1: - selected_type = MV_BARCODE_QR; - break; - case 2: - selected_type = MV_BARCODE_UPC_A; - break; - case 3: - selected_type = MV_BARCODE_UPC_E; - break; - case 4: - selected_type = MV_BARCODE_EAN_8; - break; - case 5: - selected_type = MV_BARCODE_EAN_13; - break; - case 6: - selected_type = MV_BARCODE_CODE39; - break; - case 7: - selected_type = MV_BARCODE_CODE128; - break; - case 8: - selected_type = MV_BARCODE_I2_5; - break; - default: - sel_opt = 0; - break; - } - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return selected_type; + mv_barcode_type_e selected_type = MV_BARCODE_UNDEFINED; + int sel_opt = 0; + const int options[8] = { 1, 2, 3, 4, 5, 6, 7, 8 }; + const char *names[8] = { "qr", "upca", "upce", "ean8", "ean13", "code39", "code128", "interleave25" }; + + MEDIA_VISION_FUNCTION_ENTER(); + + while (sel_opt == 0) { + sel_opt = show_menu("Select barcode type:", options, names, 8); + + switch (sel_opt) { + case 1: + selected_type = MV_BARCODE_QR; + break; + case 2: + selected_type = MV_BARCODE_UPC_A; + break; + case 3: + selected_type = MV_BARCODE_UPC_E; + break; + case 4: + selected_type = MV_BARCODE_EAN_8; + break; + case 5: + selected_type = MV_BARCODE_EAN_13; + break; + case 6: + selected_type = MV_BARCODE_CODE39; + break; + case 7: + selected_type = MV_BARCODE_CODE128; + break; + case 8: + selected_type = MV_BARCODE_I2_5; + break; + default: + sel_opt = 0; + break; + } + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return selected_type; } mv_barcode_qr_mode_e select_mode(void) { - mv_barcode_qr_mode_e selected_mode = MV_BARCODE_QR_MODE_UNAVAILABLE; - int sel_opt = 0; - const int options[4] = { 1, 2, 3, 4 }; - const char *names[4] = { "numeric", "alphanumeric", "byte", "utf8" }; - - MEDIA_VISION_FUNCTION_ENTER(); - - while (sel_opt == 0) - { - sel_opt = show_menu("Select encoding mode:", options, names, 4); - switch (sel_opt) - { - case 1: - selected_mode = MV_BARCODE_QR_MODE_NUMERIC; - break; - case 2: - selected_mode = MV_BARCODE_QR_MODE_ALPHANUMERIC; - break; - case 3: - selected_mode = MV_BARCODE_QR_MODE_BYTE; - break; - case 4: - selected_mode = MV_BARCODE_QR_MODE_UTF8; - break; - default: - sel_opt = 0; - break; - } - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return selected_mode; + mv_barcode_qr_mode_e selected_mode = MV_BARCODE_QR_MODE_UNAVAILABLE; + int sel_opt = 0; + const int options[4] = { 1, 2, 3, 4 }; + const char *names[4] = { "numeric", "alphanumeric", "byte", "utf8" }; + + MEDIA_VISION_FUNCTION_ENTER(); + + while (sel_opt == 0) { + sel_opt = show_menu("Select encoding mode:", options, names, 4); + switch (sel_opt) { + case 1: + selected_mode = MV_BARCODE_QR_MODE_NUMERIC; + break; + case 2: + selected_mode = MV_BARCODE_QR_MODE_ALPHANUMERIC; + break; + case 3: + selected_mode = MV_BARCODE_QR_MODE_BYTE; + break; + case 4: + selected_mode = MV_BARCODE_QR_MODE_UTF8; + break; + default: + sel_opt = 0; + break; + } + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return selected_mode; } mv_barcode_qr_ecc_e select_ecc(void) { - mv_barcode_qr_ecc_e selected_ecc = MV_BARCODE_QR_ECC_UNAVAILABLE; - int sel_opt = 0; - const int options[4] = { 1, 2, 3, 4 }; - const char *names[4] = { "low", "medium", "quartile", "high" }; - - MEDIA_VISION_FUNCTION_ENTER(); - - while (sel_opt == 0) - { - sel_opt = show_menu("Select ECC level:", options, names, 4); - switch (sel_opt) - { - case 1: - selected_ecc = MV_BARCODE_QR_ECC_LOW; - break; - case 2: - selected_ecc = MV_BARCODE_QR_ECC_MEDIUM; - break; - case 3: - selected_ecc = MV_BARCODE_QR_ECC_QUARTILE; - break; - case 4: - selected_ecc = MV_BARCODE_QR_ECC_HIGH; - break; - default: - sel_opt = 0; - break; - } - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return selected_ecc; + mv_barcode_qr_ecc_e selected_ecc = MV_BARCODE_QR_ECC_UNAVAILABLE; + int sel_opt = 0; + const int options[4] = { 1, 2, 3, 4 }; + const char *names[4] = { "low", "medium", "quartile", "high" }; + + MEDIA_VISION_FUNCTION_ENTER(); + + while (sel_opt == 0) { + sel_opt = show_menu("Select ECC level:", options, names, 4); + switch (sel_opt) { + case 1: + selected_ecc = MV_BARCODE_QR_ECC_LOW; + break; + case 2: + selected_ecc = MV_BARCODE_QR_ECC_MEDIUM; + break; + case 3: + selected_ecc = MV_BARCODE_QR_ECC_QUARTILE; + break; + case 4: + selected_ecc = MV_BARCODE_QR_ECC_HIGH; + break; + default: + sel_opt = 0; + break; + } + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return selected_ecc; } int select_version(void) { - MEDIA_VISION_FUNCTION_ENTER(); - - int sel_opt = 0; - while (sel_opt == 0) - { - const int options[2] = {1, 40}; - const char *names[2] = { "1..", "..40" }; - sel_opt = show_menu("Select QR version:", options, names, 2); - if (sel_opt < 1 || sel_opt > 40) - { - sel_opt = 0; - } - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return sel_opt; + MEDIA_VISION_FUNCTION_ENTER(); + + int sel_opt = 0; + while (sel_opt == 0) { + const int options[2] = {1, 40}; + const char *names[2] = { "1..", "..40" }; + sel_opt = show_menu("Select QR version:", options, names, 2); + if (sel_opt < 1 || sel_opt > 40) + sel_opt = 0; + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return sel_opt; } generation_fcn_e select_gen_function(void) { - generation_fcn_e ret_fcn_type = MV_TS_GENERATE_TO_IMAGE_FCN; - int sel_opt = 0; - const int options[2] = { 1, 2 }; - const char *names[2] = { "Generate to file", "Generate to source" }; - - MEDIA_VISION_FUNCTION_ENTER(); - - while (sel_opt == 0) - { - sel_opt = show_menu("Select API function:", options, names, 2); - switch (sel_opt) - { - case 1: - ret_fcn_type = MV_TS_GENERATE_TO_IMAGE_FCN; - break; - case 2: - ret_fcn_type = MV_TS_GENERATE_TO_SOURCE_FCN; - break; - default: - sel_opt = 0; - break; - } - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return ret_fcn_type; + generation_fcn_e ret_fcn_type = MV_TS_GENERATE_TO_IMAGE_FCN; + int sel_opt = 0; + const int options[2] = { 1, 2 }; + const char *names[2] = { "Generate to file", "Generate to source" }; + + MEDIA_VISION_FUNCTION_ENTER(); + + while (sel_opt == 0) { + sel_opt = show_menu("Select API function:", options, names, 2); + switch (sel_opt) { + case 1: + ret_fcn_type = MV_TS_GENERATE_TO_IMAGE_FCN; + break; + case 2: + ret_fcn_type = MV_TS_GENERATE_TO_SOURCE_FCN; + break; + default: + sel_opt = 0; + break; + } + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return ret_fcn_type; } mv_barcode_image_format_e select_file_format(void) { - mv_barcode_image_format_e image_format = MV_BARCODE_IMAGE_FORMAT_JPG; - int sel_opt = 0; - const int options[3] = { 1, 2, 3 }; - const char *names[3] = { "BMP", "JPG", "PNG" }; - - MEDIA_VISION_FUNCTION_ENTER(); - - while (sel_opt == 0) - { - sel_opt = show_menu("Select file format:", options, names, 3); - switch (sel_opt) - { - case 1: - image_format = MV_BARCODE_IMAGE_FORMAT_BMP; - break; - case 2: - image_format = MV_BARCODE_IMAGE_FORMAT_JPG; - break; - case 3: - image_format = MV_BARCODE_IMAGE_FORMAT_PNG; - break; - default: - sel_opt = 0; - break; - } - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return image_format; + mv_barcode_image_format_e image_format = MV_BARCODE_IMAGE_FORMAT_JPG; + int sel_opt = 0; + const int options[3] = { 1, 2, 3 }; + const char *names[3] = { "BMP", "JPG", "PNG" }; + + MEDIA_VISION_FUNCTION_ENTER(); + + while (sel_opt == 0) { + sel_opt = show_menu("Select file format:", options, names, 3); + switch (sel_opt) { + case 1: + image_format = MV_BARCODE_IMAGE_FORMAT_BMP; + break; + case 2: + image_format = MV_BARCODE_IMAGE_FORMAT_JPG; + break; + case 3: + image_format = MV_BARCODE_IMAGE_FORMAT_PNG; + break; + default: + sel_opt = 0; + break; + } + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return image_format; } int perform_detect() { - MEDIA_VISION_FUNCTION_ENTER(); - - barcode_model_s detect_model = { - MV_BARCODE_UNDEFINED, - MV_BARCODE_QR_ECC_UNAVAILABLE, - MV_BARCODE_QR_MODE_UNAVAILABLE, - 0, 0, 0, - MV_BARCODE_IMAGE_FORMAT_PNG, - MEDIA_VISION_COLORSPACE_INVALID, - NULL, NULL, NULL, NULL }; - - while (input_string("Input file name to be analyzed:", 1024, &(detect_model.file_name)) == -1) - { - printf("Incorrect input! Try again.\n"); - } - LOGI("Barcode input image has been specified"); - - mv_rectangle_s roi = { {0, 0}, 0, 0 }; - - while (input_int("Input x coordinate for ROI top left vertex:", 0, 10000, &(roi.point.x)) == -1) - { - printf("Incorrect input! Try again.\n"); - } - - while (input_int("Input y coordinate for ROI top left vertex:", 0, 10000, &(roi.point.y)) == -1) - { - printf("Incorrect input! Try again.\n"); - } - - while (input_int("Input ROI width:", 0, 10000, &(roi.width)) == -1) - { - printf("Incorrect input! Try again.\n"); - } - - while (input_int("Input ROI height:", 0, 10000, &(roi.height)) == -1) - { - printf("Incorrect input! Try again.\n"); - } - LOGI("Region of interest (ROI) to detect barcode into has been specified"); - - while (input_string("Input file name to be generated:", 1024, &(detect_model.out_file_name)) == -1) - { - printf("Incorrect input! Try again.\n"); - } - LOGI("Barcode output image has been specified"); - - const int options[11] = { MEDIA_VISION_COLORSPACE_Y800, - MEDIA_VISION_COLORSPACE_I420, - MEDIA_VISION_COLORSPACE_NV12, - MEDIA_VISION_COLORSPACE_YV12, - MEDIA_VISION_COLORSPACE_NV21, - MEDIA_VISION_COLORSPACE_YUYV, - MEDIA_VISION_COLORSPACE_UYVY, - MEDIA_VISION_COLORSPACE_422P, - MEDIA_VISION_COLORSPACE_RGB565, - MEDIA_VISION_COLORSPACE_RGB888, - MEDIA_VISION_COLORSPACE_RGBA }; - const char *names[11] = { "Y800", "I420", "NV12", "YV12", "NV21", - "YUYV", "UYVY", "422P", "RGB565", - "RGB888", "RGBA" }; - - while (true) - { - int sel_opt = show_menu("Select colorspace to test detector on:", options, names, 11); - if (sel_opt < MEDIA_VISION_COLORSPACE_Y800 || - sel_opt > MEDIA_VISION_COLORSPACE_RGBA) - { - continue; - } - detect_model.colorspace = (mv_colorspace_e)sel_opt; - LOGI("User selection is %i", sel_opt); - break; - } - - int err = detect_barcode(detect_model, roi); - - if (detect_model.file_name != NULL) - { - free(detect_model.file_name); - } - - if (detect_model.out_file_name != NULL) - { - free(detect_model.out_file_name); - } - - if (err != MEDIA_VISION_ERROR_NONE) - { - LOGE("Barcode detection failed with error code (0x%08x)", err); - } - - - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; + MEDIA_VISION_FUNCTION_ENTER(); + + barcode_model_s detect_model = { + MV_BARCODE_UNDEFINED, + MV_BARCODE_QR_ECC_UNAVAILABLE, + MV_BARCODE_QR_MODE_UNAVAILABLE, + 0, 0, 0, + MV_BARCODE_IMAGE_FORMAT_PNG, + MEDIA_VISION_COLORSPACE_INVALID, + NULL, NULL, NULL, NULL }; + + while (input_string("Input file name to be analyzed:", 1024, &(detect_model.file_name)) == -1) + printf("Incorrect input! Try again.\n"); + + LOGI("Barcode input image has been specified"); + + mv_rectangle_s roi = { {0, 0}, 0, 0 }; + + while (input_int("Input x coordinate for ROI top left vertex:", 0, 10000, &(roi.point.x)) == -1) + printf("Incorrect input! Try again.\n"); + + while (input_int("Input y coordinate for ROI top left vertex:", 0, 10000, &(roi.point.y)) == -1) + printf("Incorrect input! Try again.\n"); + + while (input_int("Input ROI width:", 0, 10000, &(roi.width)) == -1) + printf("Incorrect input! Try again.\n"); + + while (input_int("Input ROI height:", 0, 10000, &(roi.height)) == -1) + printf("Incorrect input! Try again.\n"); + + LOGI("Region of interest (ROI) to detect barcode into has been specified"); + + while (input_string("Input file name to be generated:", 1024, &(detect_model.out_file_name)) == -1) + printf("Incorrect input! Try again.\n"); + + LOGI("Barcode output image has been specified"); + + const int options[11] = { MEDIA_VISION_COLORSPACE_Y800, + MEDIA_VISION_COLORSPACE_I420, + MEDIA_VISION_COLORSPACE_NV12, + MEDIA_VISION_COLORSPACE_YV12, + MEDIA_VISION_COLORSPACE_NV21, + MEDIA_VISION_COLORSPACE_YUYV, + MEDIA_VISION_COLORSPACE_UYVY, + MEDIA_VISION_COLORSPACE_422P, + MEDIA_VISION_COLORSPACE_RGB565, + MEDIA_VISION_COLORSPACE_RGB888, + MEDIA_VISION_COLORSPACE_RGBA }; + const char *names[11] = { "Y800", "I420", "NV12", "YV12", "NV21", + "YUYV", "UYVY", "422P", "RGB565", + "RGB888", "RGBA" }; + + while (true) { + int sel_opt = show_menu("Select colorspace to test detector on:", options, names, 11); + if (sel_opt < MEDIA_VISION_COLORSPACE_Y800 || + sel_opt > MEDIA_VISION_COLORSPACE_RGBA) { + continue; + } + detect_model.colorspace = (mv_colorspace_e)sel_opt; + LOGI("User selection is %i", sel_opt); + break; + } + + int err = detect_barcode(detect_model, roi); + + if (detect_model.file_name != NULL) + free(detect_model.file_name); + + if (detect_model.out_file_name != NULL) + free(detect_model.out_file_name); + + if (err != MEDIA_VISION_ERROR_NONE) + LOGE("Barcode detection failed with error code (0x%08x)", err); + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; } int perform_generate(void) { - MEDIA_VISION_FUNCTION_ENTER(); - - barcode_model_s generate_model = { - MV_BARCODE_UNDEFINED, - MV_BARCODE_QR_ECC_UNAVAILABLE, - MV_BARCODE_QR_MODE_UNAVAILABLE, - 0, 0, 0, - MV_BARCODE_IMAGE_FORMAT_PNG, - MEDIA_VISION_COLORSPACE_INVALID, - NULL, NULL, NULL, NULL }; - - generation_fcn_e gen_fcn = select_gen_function(); - generate_model.type = select_type(); - LOGI("Barcode type has been selected"); - - if (generate_model.type == MV_BARCODE_QR) - { - generate_model.mode = select_mode(); - LOGI("Barcode encoding mode has been selected"); - generate_model.ecc = select_ecc(); - LOGI("Barcode ecc level has been selected"); - generate_model.version = select_version(); - LOGI("Barcode version has been selected"); - } - - if (gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN) - { - generate_model.out_image_format = select_file_format(); - LOGI("Barcode output image format has been selected"); - } - - while (input_string("Input message:", 7089, &generate_model.message) == -1) - { - printf("Incorrect input! Try again.\n"); - } - LOGI("Barcode message has been specified"); - - while (input_string("Input file name:", 1024, &generate_model.file_name) == -1) - { - printf("Incorrect input! Try again.\n"); - } - LOGI("Barcode output file name has been specified"); - - if (gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN) - { - while (input_size("Input image width:", 10000, &generate_model.width) == -1) - { - printf("Incorrect input! Try again.\n"); - } - LOGI("Barcode output file width has been specified"); - - while (input_size("Input image height:", 10000, &generate_model.height) == -1) - { - printf("Incorrect input! Try again.\n"); - } - LOGI("Barcode output file height has been specified"); - } - - const int err = - gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN ? - generate_barcode_to_image(generate_model) : - generate_barcode_to_source(generate_model); - - if (generate_model.message != NULL) - { - free(generate_model.message); - } - - if (generate_model.file_name != NULL) - { - free(generate_model.file_name); - } - - if (err != MEDIA_VISION_ERROR_NONE) - { - LOGE("Barcode generation failed with error code (0x%08x)", err); - printf("ERROR: Errors were occurred during barcode generation!!!\n"); - MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } - - LOGI("Barcode output file has been generated"); - printf("\nBarcode image was successfully generated.\n"); - - MEDIA_VISION_FUNCTION_LEAVE(); - - return 0; + MEDIA_VISION_FUNCTION_ENTER(); + + barcode_model_s generate_model = { + MV_BARCODE_UNDEFINED, + MV_BARCODE_QR_ECC_UNAVAILABLE, + MV_BARCODE_QR_MODE_UNAVAILABLE, + 0, 0, 0, + MV_BARCODE_IMAGE_FORMAT_PNG, + MEDIA_VISION_COLORSPACE_INVALID, + NULL, NULL, NULL, NULL }; + + generation_fcn_e gen_fcn = select_gen_function(); + generate_model.type = select_type(); + LOGI("Barcode type has been selected"); + + if (generate_model.type == MV_BARCODE_QR) { + generate_model.mode = select_mode(); + LOGI("Barcode encoding mode has been selected"); + generate_model.ecc = select_ecc(); + LOGI("Barcode ecc level has been selected"); + generate_model.version = select_version(); + LOGI("Barcode version has been selected"); + } + + if (gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN) { + generate_model.out_image_format = select_file_format(); + LOGI("Barcode output image format has been selected"); + } + + while (input_string("Input message:", 7089, &generate_model.message) == -1) + printf("Incorrect input! Try again.\n"); + + LOGI("Barcode message has been specified"); + + while (input_string("Input file name:", 1024, &generate_model.file_name) == -1) + printf("Incorrect input! Try again.\n"); + + LOGI("Barcode output file name has been specified"); + + if (gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN) { + while (input_size("Input image width:", 10000, &generate_model.width) == -1) + printf("Incorrect input! Try again.\n"); + + LOGI("Barcode output file width has been specified"); + + while (input_size("Input image height:", 10000, &generate_model.height) == -1) + printf("Incorrect input! Try again.\n"); + + LOGI("Barcode output file height has been specified"); + } + + const int err = + gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN ? + generate_barcode_to_image(generate_model) : + generate_barcode_to_source(generate_model); + + if (generate_model.message != NULL) + free(generate_model.message); + + if (generate_model.file_name != NULL) + free(generate_model.file_name); + + if (err != MEDIA_VISION_ERROR_NONE) { + LOGE("Barcode generation failed with error code (0x%08x)", err); + printf("ERROR: Errors were occurred during barcode generation!!!\n"); + MEDIA_VISION_FUNCTION_LEAVE(); + return err; + } + + LOGI("Barcode output file has been generated"); + printf("\nBarcode image was successfully generated.\n"); + + MEDIA_VISION_FUNCTION_LEAVE(); + + return 0; } int main(void) { - LOGI("Media Vision Testsuite is launched."); - - int err = MEDIA_VISION_ERROR_NONE; - - int sel_opt = 0; - const int options[2] = { 1, 2 }; - const char *names[2] = { "Generate", "Detect" }; - - while (sel_opt == 0) - { - sel_opt = show_menu("Select action:", options, names, 2); - switch (sel_opt) - { - case 1: - LOGI("Start the barcode generation flow"); - err = perform_generate(); - break; - case 2: - LOGI("Start the barcode detection flow"); - err = perform_detect(); - break; - default: - sel_opt = 0; - continue; - } - - int do_another = 0; - - if (err != MEDIA_VISION_ERROR_NONE) - { - printf("ERROR: Action is finished with error code: %i\n", err); - } - - sel_opt = 0; - const int options_last[2] = { 1, 2 }; - const char *names_last[2] = { "YES", "NO" }; - - while (sel_opt == 0) - { - sel_opt = show_menu("Perform another action?", options_last, names_last, 2); - switch (sel_opt) - { - case 1: - do_another = 1; - break; - case 2: - do_another = 0; - break; - default: - sel_opt = 0; - break; - } - } - LOGI("User selection is %i", sel_opt); - - sel_opt = (do_another == 1 ? 0 : sel_opt); - } - - LOGI("Media Vision Testsuite is closed."); - - return err; + LOGI("Media Vision Testsuite is launched."); + + int err = MEDIA_VISION_ERROR_NONE; + + int sel_opt = 0; + const int options[2] = { 1, 2 }; + const char *names[2] = { "Generate", "Detect" }; + + while (sel_opt == 0) { + sel_opt = show_menu("Select action:", options, names, 2); + switch (sel_opt) { + case 1: + LOGI("Start the barcode generation flow"); + err = perform_generate(); + break; + case 2: + LOGI("Start the barcode detection flow"); + err = perform_detect(); + break; + default: + sel_opt = 0; + continue; + } + + int do_another = 0; + + if (err != MEDIA_VISION_ERROR_NONE) + printf("ERROR: Action is finished with error code: %i\n", err); + + sel_opt = 0; + const int options_last[2] = { 1, 2 }; + const char *names_last[2] = { "YES", "NO" }; + + while (sel_opt == 0) { + sel_opt = show_menu("Perform another action?", options_last, names_last, 2); + switch (sel_opt) { + case 1: + do_another = 1; + break; + case 2: + do_another = 0; + break; + default: + sel_opt = 0; + break; + } + } + LOGI("User selection is %i", sel_opt); + + sel_opt = (do_another == 1 ? 0 : sel_opt); + } + + LOGI("Media Vision Testsuite is closed."); + + return err; } diff --git a/test/testsuites/face/face_test_suite.c b/test/testsuites/face/face_test_suite.c index c26fce7..e7ca50a 100644 --- a/test/testsuites/face/face_test_suite.c +++ b/test/testsuites/face/face_test_suite.c @@ -40,816 +40,727 @@ static bool Perform_eye_condition_recognize = false; static bool Perform_facial_expression_recognize = false; void eye_condition_cb( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s face_location, - mv_face_eye_condition_e eye_condition, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s face_location, + mv_face_eye_condition_e eye_condition, + void *user_data) { - switch (eye_condition) - { - case MV_FACE_EYES_NOT_FOUND: - printf("Eyes not found"); - break; - case MV_FACE_EYES_OPEN: - printf("Eyes are open"); - break; - case MV_FACE_EYES_CLOSED: - printf("Eyes are closed"); - break; - } + switch (eye_condition) { + case MV_FACE_EYES_NOT_FOUND: + printf("Eyes not found"); + break; + case MV_FACE_EYES_OPEN: + printf("Eyes are open"); + break; + case MV_FACE_EYES_CLOSED: + printf("Eyes are closed"); + break; + } } void face_expression_cb( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s face_location, - mv_face_facial_expression_e facial_expression, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s face_location, + mv_face_facial_expression_e facial_expression, + void *user_data) { - switch (facial_expression) - { - case MV_FACE_NEUTRAL: - printf("Face expression is neutral"); - break; - case MV_FACE_SMILE: - printf("Face expression is smiling"); - break; - case MV_FACE_UNKNOWN: - printf("Face expression isn't recognized"); - break; - } + switch (facial_expression) { + case MV_FACE_NEUTRAL: + printf("Face expression is neutral"); + break; + case MV_FACE_SMILE: + printf("Face expression is smiling"); + break; + case MV_FACE_UNKNOWN: + printf("Face expression isn't recognized"); + break; + } } void on_face_detected_cb( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s *faces_locations, - int number_of_faces, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s *faces_locations, + int number_of_faces, + void *user_data) { - printf("%i faces were detected on the image.\n", number_of_faces); - if (number_of_faces > 0) - { - int is_source_data_loaded = 0; - - char *file_name = NULL; - unsigned char *out_buffer = NULL; - unsigned int buf_size = 0; - image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID }; - if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &out_buffer, &buf_size) || - MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) || - MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) || - MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) || - user_data == NULL) - { - printf("ERROR: Creating out image is impossible.\n"); - } - else - { - file_name = (char*)user_data; - is_source_data_loaded = 1; - } - - int i = 0; - for (i = 0; i < number_of_faces; ++i) - { - printf("\Face %i : x - %i, y - %i, width - %i, height - %i ", i, - faces_locations[i].point.x, faces_locations[i].point.y, - faces_locations[i].width, faces_locations[i].height); - - if (Perform_eye_condition_recognize) - { - if (MEDIA_VISION_ERROR_NONE != mv_face_eye_condition_recognize( - source, - engine_cfg, - faces_locations[i], - eye_condition_cb, - user_data)) - { - printf(TEXT_RED "\nEye condition recognition for %i face failed" - TEXT_RESET "\n", i); - } - } - - if (Perform_facial_expression_recognize) - { - if (MEDIA_VISION_ERROR_NONE != mv_face_facial_expression_recognize( - source, - engine_cfg, - faces_locations[i], - face_expression_cb, - user_data)) - { - printf(TEXT_RED "\nFacial expression recognition for %i " - "face failed" TEXT_RESET "\n", i); - } - } - - printf("\n"); - - if ((is_source_data_loaded == 1) && !Perform_eye_condition_recognize) - { - const int rectangle_thickness = 3; - const int drawing_color[] = {255, 0, 0}; - if (MEDIA_VISION_ERROR_NONE != draw_rectangle_on_buffer( - faces_locations[i].point.x, - faces_locations[i].point.y, - faces_locations[i].point.x + faces_locations[i].width, - faces_locations[i].point.y + faces_locations[i].height, - rectangle_thickness, - drawing_color, - &image_data, - out_buffer)) - { - continue; - } - } - } - - if (!Perform_eye_condition_recognize) - { - if (file_name != NULL && - MEDIA_VISION_ERROR_NONE == save_image_from_buffer( - file_name, - out_buffer, - &image_data, - 100)) - { - printf("Image was generated as %s\n", file_name); - } - else - { - printf("ERROR: Failed to generate output file. Check file name and permissions. \n"); - } - } - - printf("\n"); - } + printf("%i faces were detected on the image.\n", number_of_faces); + if (number_of_faces > 0) { + int is_source_data_loaded = 0; + + char *file_name = NULL; + unsigned char *out_buffer = NULL; + unsigned int buf_size = 0; + image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID }; + if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &out_buffer, &buf_size) || + MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) || + MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) || + MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) || + user_data == NULL) { + printf("ERROR: Creating out image is impossible.\n"); + } else { + file_name = (char*)user_data; + is_source_data_loaded = 1; + } + + int i = 0; + for (i = 0; i < number_of_faces; ++i) { + printf("\Face %i : x - %i, y - %i, width - %i, height - %i ", i, + faces_locations[i].point.x, faces_locations[i].point.y, + faces_locations[i].width, faces_locations[i].height); + + if (Perform_eye_condition_recognize) { + if (MEDIA_VISION_ERROR_NONE != mv_face_eye_condition_recognize( + source, + engine_cfg, + faces_locations[i], + eye_condition_cb, + user_data)) { + printf(TEXT_RED "\nEye condition recognition for %i face failed" + TEXT_RESET "\n", i); + } + } + + if (Perform_facial_expression_recognize) { + if (MEDIA_VISION_ERROR_NONE != mv_face_facial_expression_recognize( + source, + engine_cfg, + faces_locations[i], + face_expression_cb, + user_data)) { + printf(TEXT_RED "\nFacial expression recognition for %i " + "face failed" TEXT_RESET "\n", i); + } + } + + printf("\n"); + + if ((is_source_data_loaded == 1) && !Perform_eye_condition_recognize) { + const int rectangle_thickness = 3; + const int drawing_color[] = {255, 0, 0}; + if (MEDIA_VISION_ERROR_NONE != draw_rectangle_on_buffer( + faces_locations[i].point.x, + faces_locations[i].point.y, + faces_locations[i].point.x + faces_locations[i].width, + faces_locations[i].point.y + faces_locations[i].height, + rectangle_thickness, + drawing_color, + &image_data, + out_buffer)) { + continue; + } + } + } + + if (!Perform_eye_condition_recognize) { + if (file_name != NULL && + MEDIA_VISION_ERROR_NONE == save_image_from_buffer( + file_name, + out_buffer, + &image_data, + 100)) { + printf("Image was generated as %s\n", file_name); + } else { + printf("ERROR: Failed to generate output file. Check file name and permissions. \n"); + } + } + + printf("\n"); + } } void on_face_recognized_cb( - mv_source_h source, - mv_face_recognition_model_h recognition_model, - mv_engine_config_h engine_cfg, - mv_rectangle_s *face_location, - const int *face_label, - double confidence, - void *user_data) + mv_source_h source, + mv_face_recognition_model_h recognition_model, + mv_engine_config_h engine_cfg, + mv_rectangle_s *face_location, + const int *face_label, + double confidence, + void *user_data) { - if (NULL == face_location) - { - printf(TEXT_YELLOW "No faces were recognized in the source" - TEXT_RESET "\n"); - } - else - { - printf(TEXT_GREEN "Face labeled %i was recognized in the source with " - "recognition confidence of %.2f" - TEXT_RESET "\n", *face_label, confidence); - } + if (NULL == face_location) { + printf(TEXT_YELLOW "No faces were recognized in the source" + TEXT_RESET "\n"); + } else { + printf(TEXT_GREEN "Face labeled %i was recognized in the source with " + "recognition confidence of %.2f" + TEXT_RESET "\n", *face_label, confidence); + } } int perform_detect() { - char *in_file_name = NULL; - char *out_file_name = NULL; - - // 1. Loading media source - while (input_string("Input file name to be analyzed:", 1024, &(in_file_name)) == -1) - { - printf("Incorrect input! Try again.\n"); - } - - mv_source_h source; - int err = mv_create_source(&source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during creating the source!!! code: %i" - TEXT_RESET "\n", err); - - free(in_file_name); - - return err; - } - - err = load_mv_source_from_file(in_file_name, source); - if (MEDIA_VISION_ERROR_NONE != err) - { - const int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED - "ERROR: Errors were occurred during destroying the source!!! code: %i" - TEXT_RESET "\n", err2); - - free(in_file_name); - - return err2; - } - - free(in_file_name); - - return err; - } - - free(in_file_name); - - // 2. Select output file to be generated - while (input_string("Input file name to be generated:", 1024, &(out_file_name)) == -1) - { - printf("Incorrect input! Try again.\n"); - } - - // 3. Select Haar cascade - const int options[3] = { 1, 2, 3 }; - const char *names[3] = { "haarcascade_frontalface_alt.xml", - "haarcascade_frontalface_alt2.xml", - "haarcascade_frontalface_alt_tree.xml"}; - - const int haarcascade = show_menu("Select Haarcascade:", options, names, 3); - - mv_engine_config_h eng_config; - err = mv_create_engine_config(&eng_config); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during creating the engine config!!! code: %i" - TEXT_RESET "\n", err); - - free(out_file_name); - - return err; - } - - switch (haarcascade) - { - case 1: - mv_engine_config_set_string_attribute( - eng_config, - MV_FACE_DETECTION_MODEL_FILE_PATH, - "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"); - break; - case 2: - mv_engine_config_set_string_attribute( - eng_config, - MV_FACE_DETECTION_MODEL_FILE_PATH, - "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml"); - break; - case 3: - mv_engine_config_set_string_attribute( - eng_config, - MV_FACE_DETECTION_MODEL_FILE_PATH, - "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt_tree.xml"); - break; - default: - printf(TEXT_YELLOW "Default Haar cascade was set.\n" TEXT_RESET); - } - - // 4. Perform detect - err = mv_face_detect(source, eng_config, on_face_detected_cb, out_file_name); - - free(out_file_name); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during face detection!!! code: %i" - TEXT_RESET "\n", err); - - int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED - "ERROR: Errors were occurred during destroying the source!!! code: %i" - TEXT_RESET "\n", err2); - return err2; - } - - err2 = mv_destroy_engine_config(eng_config); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED - "ERROR: Errors were occurred during destroying the engine config!!! code: %i" - TEXT_RESET "\n", err2); - return err2; - } - - return err; - } - - err = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during destroying the source!!! code: %i" - TEXT_RESET "\n", err); - return err; - } - - err = mv_destroy_engine_config(eng_config); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during destroying the engine config!!! code: %i" - TEXT_RESET "\n", err); - return err; - } - - return err; + char *in_file_name = NULL; + char *out_file_name = NULL; + + /* 1. Loading media source */ + while (input_string("Input file name to be analyzed:", 1024, &(in_file_name)) == -1) + printf("Incorrect input! Try again.\n"); + + mv_source_h source; + int err = mv_create_source(&source); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during creating the source!!! code: %i" + TEXT_RESET "\n", err); + + free(in_file_name); + + return err; + } + + err = load_mv_source_from_file(in_file_name, source); + if (MEDIA_VISION_ERROR_NONE != err) { + const int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED + "ERROR: Errors were occurred during destroying the source!!! code: %i" + TEXT_RESET "\n", err2); + + free(in_file_name); + + return err2; + } + + free(in_file_name); + + return err; + } + + free(in_file_name); + + /* 2. Select output file to be generated */ + while (input_string("Input file name to be generated:", 1024, &(out_file_name)) == -1) + printf("Incorrect input! Try again.\n"); + + /* 3. Select Haar cascade */ + const int options[3] = { 1, 2, 3 }; + const char *names[3] = { "haarcascade_frontalface_alt.xml", + "haarcascade_frontalface_alt2.xml", + "haarcascade_frontalface_alt_tree.xml"}; + + const int haarcascade = show_menu("Select Haarcascade:", options, names, 3); + + mv_engine_config_h eng_config; + err = mv_create_engine_config(&eng_config); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during creating the engine config!!! code: %i" + TEXT_RESET "\n", err); + + free(out_file_name); + + return err; + } + + switch (haarcascade) { + case 1: + mv_engine_config_set_string_attribute( + eng_config, + MV_FACE_DETECTION_MODEL_FILE_PATH, + "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"); + break; + case 2: + mv_engine_config_set_string_attribute( + eng_config, + MV_FACE_DETECTION_MODEL_FILE_PATH, + "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml"); + break; + case 3: + mv_engine_config_set_string_attribute( + eng_config, + MV_FACE_DETECTION_MODEL_FILE_PATH, + "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt_tree.xml"); + break; + default: + printf(TEXT_YELLOW "Default Haar cascade was set.\n" TEXT_RESET); + } + + /* 4. Perform detect */ + err = mv_face_detect(source, eng_config, on_face_detected_cb, out_file_name); + + free(out_file_name); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during face detection!!! code: %i" + TEXT_RESET "\n", err); + + int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED + "ERROR: Errors were occurred during destroying the source!!! code: %i" + TEXT_RESET "\n", err2); + return err2; + } + + err2 = mv_destroy_engine_config(eng_config); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED + "ERROR: Errors were occurred during destroying the engine config!!! code: %i" + TEXT_RESET "\n", err2); + return err2; + } + + return err; + } + + err = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during destroying the source!!! code: %i" + TEXT_RESET "\n", err); + return err; + } + + err = mv_destroy_engine_config(eng_config); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during destroying the engine config!!! code: %i" + TEXT_RESET "\n", err); + return err; + } + + return err; } int perform_mv_face_recognize(mv_face_recognition_model_h model) { - char *in_file_name = NULL; - - mv_source_h source = NULL; - int err = mv_create_source(&source); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during creating the source!!! code: %i" - TEXT_RESET "\n", err); - return err; - } - - printf(TEXT_GREEN "HINT:" TEXT_RESET "\n" - TEXT_YELLOW "To achieve appropriate accuracy of recognition,\n" - "choose images with only faces. I.e. face has to cover\n" - "approximately 95-100%% of the image (passport photos\n" - "are the best example :)). Note that if this value is\n" - "less than 95%, accuracy can be significantly reduced.\n" - "In real code such images can be achieved by cropping\n" - "faces from images with face detection functionality.\n" - TEXT_RESET); - while (-1 == input_string( - "Input file name with the face to be recognized:", - 1024, - &(in_file_name))) - { - printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); - } - - err = load_mv_source_from_file(in_file_name, source); - - if (MEDIA_VISION_ERROR_NONE != err) - { - free(in_file_name); - - const int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED - "ERROR: Errors were occurred during destroying the source!!! code: %i" - TEXT_RESET "\n", err2); - return err2; - } - - return err; - } - - err = mv_face_recognize(source, model, NULL, NULL, on_face_recognized_cb, NULL); - - if (MEDIA_VISION_ERROR_NONE != err) - { - free(in_file_name); - - printf(TEXT_RED - "ERROR: Errors were occurred during face recognition!!! code: %i" - TEXT_RESET "\n", err); - - int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED - "ERROR: Errors were occurred during destroying the source!!! code: %i" - TEXT_RESET "\n", err2); - return err2; - } - - return err; - } - - free(in_file_name); - - return err; + char *in_file_name = NULL; + + mv_source_h source = NULL; + int err = mv_create_source(&source); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during creating the source!!! code: %i" + TEXT_RESET "\n", err); + return err; + } + + printf(TEXT_GREEN "HINT:" TEXT_RESET "\n" + TEXT_YELLOW "To achieve appropriate accuracy of recognition,\n" + "choose images with only faces. I.e. face has to cover\n" + "approximately 95-100%% of the image (passport photos\n" + "are the best example :)). Note that if this value is\n" + "less than 95%, accuracy can be significantly reduced.\n" + "In real code such images can be achieved by cropping\n" + "faces from images with face detection functionality.\n" + TEXT_RESET); + while (-1 == input_string( + "Input file name with the face to be recognized:", + 1024, + &(in_file_name))) { + printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); + } + + err = load_mv_source_from_file(in_file_name, source); + + if (MEDIA_VISION_ERROR_NONE != err) { + free(in_file_name); + + const int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED + "ERROR: Errors were occurred during destroying the source!!! code: %i" + TEXT_RESET "\n", err2); + return err2; + } + + return err; + } + + err = mv_face_recognize(source, model, NULL, NULL, on_face_recognized_cb, NULL); + + if (MEDIA_VISION_ERROR_NONE != err) { + free(in_file_name); + + printf(TEXT_RED + "ERROR: Errors were occurred during face recognition!!! code: %i" + TEXT_RESET "\n", err); + + int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED + "ERROR: Errors were occurred during destroying the source!!! code: %i" + TEXT_RESET "\n", err2); + return err2; + } + + return err; + } + + free(in_file_name); + + return err; } int add_single_example( - mv_face_recognition_model_h model, const char *in_file_name, - mv_rectangle_s *roi, int *face_label) + mv_face_recognition_model_h model, const char *in_file_name, + mv_rectangle_s *roi, int *face_label) { - mv_source_h source; - int err = mv_create_source(&source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during creating the source!!! code: %i" - TEXT_RESET "\n", err); - - return err; - } - - err = load_mv_source_from_file(in_file_name, source); - if (MEDIA_VISION_ERROR_NONE != err) - { - const int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED - "ERROR: Errors were occurred during destroying the source!!! code: %i" - TEXT_RESET "\n", err2); - return err2; - } - - return err; - } - - if (NULL != roi && !show_confirm_dialog("Do you want to use full image?")) - { - printf(TEXT_YELLOW "Specify the ROI as rectangle where face is located.\n" - "Use negative values if you want to check correctness\n" - "of error handling.\n" - TEXT_RESET); - - while (-1 == input_int("Specify top left ROI x coordinate:", - INT_MIN, INT_MAX, &(roi->point.x))) - { - printf("Incorrect input! Try again.\n"); - } - - while (-1 == input_int("Specify top left ROI y coordinate:", - INT_MIN, INT_MAX, &(roi->point.y))) - { - printf("Incorrect input! Try again.\n"); - } - - while (-1 == input_int("Specify top left ROI width:", - INT_MIN, INT_MAX, &(roi->width))) - { - printf("Incorrect input! Try again.\n"); - } - - while (-1 == input_int("Specify top left ROI height:", - INT_MIN, INT_MAX, &(roi->height))) - { - printf("Incorrect input! Try again.\n"); - } - } - else - { - roi = NULL; - } - - int real_label = 0; - if (NULL == face_label) - { - printf(TEXT_YELLOW "Also, you has to assign label for the face in the\n" - "image. You has assign the same labels for the same\n" - "persons. For example, always assign label '1' for\n" - "images with Alice's face; label '2' for Bob's faces,\n" - "'3' for Ann's faces and so on...\n" - TEXT_RESET); - - face_label = &real_label; - while (-1 == input_int("Specify label as integer:", - MIN_ALLOWED_LABEL, - MAX_ALLOWED_LABEL, - face_label)) - { - printf("Incorrect input! You can use %i-%i labels only. Try again.\n", - MIN_ALLOWED_LABEL, - MAX_ALLOWED_LABEL); - } - } - - err = mv_face_recognition_model_add(source, model, roi, *face_label); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during adding the sample image from " - "[%s] to the face recognition model!!! code: %i" - TEXT_RESET "\n", in_file_name, err); - } - - const int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED - "ERROR: Errors were occurred during destroying the source!!! code: %i" - TEXT_RESET "\n", err2); - } - - return err; + mv_source_h source; + int err = mv_create_source(&source); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during creating the source!!! code: %i" + TEXT_RESET "\n", err); + + return err; + } + + err = load_mv_source_from_file(in_file_name, source); + if (MEDIA_VISION_ERROR_NONE != err) { + const int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED + "ERROR: Errors were occurred during destroying the source!!! code: %i" + TEXT_RESET "\n", err2); + return err2; + } + + return err; + } + + if (NULL != roi && !show_confirm_dialog("Do you want to use full image?")) { + printf(TEXT_YELLOW "Specify the ROI as rectangle where face is located.\n" + "Use negative values if you want to check correctness\n" + "of error handling.\n" + TEXT_RESET); + + while (-1 == input_int("Specify top left ROI x coordinate:", + INT_MIN, INT_MAX, &(roi->point.x))) { + printf("Incorrect input! Try again.\n"); + } + + while (-1 == input_int("Specify top left ROI y coordinate:", + INT_MIN, INT_MAX, &(roi->point.y))) { + printf("Incorrect input! Try again.\n"); + } + + while (-1 == input_int("Specify top left ROI width:", + INT_MIN, INT_MAX, &(roi->width))) { + printf("Incorrect input! Try again.\n"); + } + + while (-1 == input_int("Specify top left ROI height:", + INT_MIN, INT_MAX, &(roi->height))) { + printf("Incorrect input! Try again.\n"); + } + } else { + roi = NULL; + } + + int real_label = 0; + if (NULL == face_label) { + printf(TEXT_YELLOW "Also, you has to assign label for the face in the\n" + "image. You has assign the same labels for the same\n" + "persons. For example, always assign label '1' for\n" + "images with Alice's face; label '2' for Bob's faces,\n" + "'3' for Ann's faces and so on...\n" + TEXT_RESET); + + face_label = &real_label; + while (-1 == input_int("Specify label as integer:", + MIN_ALLOWED_LABEL, + MAX_ALLOWED_LABEL, + face_label)) { + printf("Incorrect input! You can use %i-%i labels only. Try again.\n", + MIN_ALLOWED_LABEL, + MAX_ALLOWED_LABEL); + } + } + + err = mv_face_recognition_model_add(source, model, roi, *face_label); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during adding the sample image from " + "[%s] to the face recognition model!!! code: %i" + TEXT_RESET "\n", in_file_name, err); + } + + const int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED + "ERROR: Errors were occurred during destroying the source!!! code: %i" + TEXT_RESET "\n", err2); + } + + return err; } int perform_mv_face_recognition_model_add_face_example( - mv_face_recognition_model_h model, - notification_type_e *notification_type) + mv_face_recognition_model_h model, + notification_type_e *notification_type) { - char *in_file_name = NULL; - - printf(TEXT_GREEN "HINT:" TEXT_RESET "\n" - TEXT_YELLOW "To achieve appropriate accuracy of recognition,\n" - "choose images with only faces. I.e. face has to cover\n" - "approximately 95-100%% of the image (passport photos\n" - "are the best example :)). Note that if this value is\n" - "less than 95%, accuracy can be significantly reduced.\n" - "In real code such images can be achieved by cropping\n" - "faces from images with face detection functionality.\n" - TEXT_RESET); - - const bool from_dir = show_confirm_dialog("Do add images from directory?"); - const char *input_path_msg = - from_dir ? "Input path to the directory with the face images to be " - "loaded to the model:" - : "Input file name with the face to be loaded to the model:"; - - while (-1 == input_string(input_path_msg, 1024, &(in_file_name))) - { - printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); - } - - int err = MEDIA_VISION_ERROR_NONE; - - if (from_dir) - { - *notification_type = FAIL_OR_DONE; - int face_label = 0; - while (-1 == input_int("Specify label as integer:", - MIN_ALLOWED_LABEL, - MAX_ALLOWED_LABEL, - &face_label)) - { - printf("Incorrect input! You can use %i-%i labels only. Try again.\n", - MIN_ALLOWED_LABEL, - MAX_ALLOWED_LABEL); - } - - DIR *dir; - struct dirent *ent; - if ((dir = opendir(in_file_name)) != NULL) - { - char file_path[1024] = ""; - - // Traverses all the files and directories within source directory - while ((ent = readdir(dir)) != NULL) - { - // Determine current entry name - const char *file_name = ent->d_name; - - // If current entry is directory, or hidden object, skip the step: - if (file_name[0] == '.') - { - continue; - } - - sprintf(file_path, "%s/%s", in_file_name, file_name); - err = add_single_example(model, file_path, NULL, &face_label); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "Failed to add example from %s. " - "Error code: %i\n" TEXT_RESET, - file_path, err); - } - else - { - printf(TEXT_GREEN "Example labeled [%i] added from " TEXT_RESET - TEXT_YELLOW "%s\n" TEXT_RESET, face_label, file_path); - } - } - - closedir(dir); - } - else - { - printf(TEXT_RED "Can't read from specified directory (%s)\n" - TEXT_RESET, in_file_name); - } - } - else - { - *notification_type = FAIL_OR_SUCCESSS; - mv_rectangle_s roi; - err = add_single_example(model, in_file_name, &roi, NULL); - } - - free(in_file_name); - - return err; + char *in_file_name = NULL; + + printf(TEXT_GREEN "HINT:" TEXT_RESET "\n" + TEXT_YELLOW "To achieve appropriate accuracy of recognition,\n" + "choose images with only faces. I.e. face has to cover\n" + "approximately 95-100%% of the image (passport photos\n" + "are the best example :)). Note that if this value is\n" + "less than 95%, accuracy can be significantly reduced.\n" + "In real code such images can be achieved by cropping\n" + "faces from images with face detection functionality.\n" + TEXT_RESET); + + const bool from_dir = show_confirm_dialog("Do add images from directory?"); + const char *input_path_msg = + from_dir ? "Input path to the directory with the face images to be " + "loaded to the model:" + : "Input file name with the face to be loaded to the model:"; + + while (-1 == input_string(input_path_msg, 1024, &(in_file_name))) + printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); + + int err = MEDIA_VISION_ERROR_NONE; + + if (from_dir) { + *notification_type = FAIL_OR_DONE; + int face_label = 0; + while (-1 == input_int("Specify label as integer:", + MIN_ALLOWED_LABEL, + MAX_ALLOWED_LABEL, + &face_label)) { + printf("Incorrect input! You can use %i-%i labels only. Try again.\n", + MIN_ALLOWED_LABEL, + MAX_ALLOWED_LABEL); + } + + DIR *dir; + struct dirent *ent; + if ((dir = opendir(in_file_name)) != NULL) { + char file_path[1024] = ""; + + /* Traverses all the files and directories within source directory */ + while ((ent = readdir(dir)) != NULL) { + /* Determine current entry name */ + const char *file_name = ent->d_name; + + /* If current entry is directory, or hidden object, skip the step: */ + if (file_name[0] == '.') + continue; + + sprintf(file_path, "%s/%s", in_file_name, file_name); + err = add_single_example(model, file_path, NULL, &face_label); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "Failed to add example from %s. " + "Error code: %i\n" TEXT_RESET, + file_path, err); + } else { + printf(TEXT_GREEN "Example labeled [%i] added from " TEXT_RESET + TEXT_YELLOW "%s\n" TEXT_RESET, face_label, file_path); + } + } + + closedir(dir); + } else { + printf(TEXT_RED "Can't read from specified directory (%s)\n" + TEXT_RESET, in_file_name); + } + } else { + *notification_type = FAIL_OR_SUCCESSS; + mv_rectangle_s roi; + err = add_single_example(model, in_file_name, &roi, NULL); + } + + free(in_file_name); + + return err; } int perform_mv_face_recognition_model_reset_face_examples( - mv_face_recognition_model_h model, - bool full_reset) + mv_face_recognition_model_h model, + bool full_reset) { - printf(TEXT_GREEN "HINT:" TEXT_RESET "\n" - TEXT_YELLOW "Reset of the examples will affect only examples has\n" - "been collected via mv_face_recognition_model_add()\n" - "function calls (i.e. through 'Add image example' menu\n" - "item). Previously learned model will be not affected,\n" - "so it is possible to recognize faces with this model\n" - "after examples reset. Reset of the examples can be\n" - "useful to erase a class of faces (i.e. all examples\n" - "related to this class) before learning the model.\n" - "Or, if it is needed to reset all collected previously\n" - "examples as an alternative to the creating the new\n" - "model.\n" - TEXT_RESET); - - int err = MEDIA_VISION_ERROR_NONE; - - if (full_reset) - { - err = mv_face_recognition_model_reset(model, NULL); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during reset of all examples!!!" - " code: %i" TEXT_RESET "\n", err); - return err; - } - } - else - { - int reset_label = 0; - - while (-1 == input_int("Specify label for the examples to be reset:", - MIN_ALLOWED_LABEL, - MAX_ALLOWED_LABEL, - &reset_label)) - { - printf("Incorrect input! You can use %i-%i labels only. Try again.\n", - MIN_ALLOWED_LABEL, - MAX_ALLOWED_LABEL); - } - - err = mv_face_recognition_model_reset(model, &reset_label); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during reset of examples labeled" - " with %i!!! code: %i" TEXT_RESET "\n", reset_label, err); - return err; - } - } - - return err; + printf(TEXT_GREEN "HINT:" TEXT_RESET "\n" + TEXT_YELLOW "Reset of the examples will affect only examples has\n" + "been collected via mv_face_recognition_model_add()\n" + "function calls (i.e. through 'Add image example' menu\n" + "item). Previously learned model will be not affected,\n" + "so it is possible to recognize faces with this model\n" + "after examples reset. Reset of the examples can be\n" + "useful to erase a class of faces (i.e. all examples\n" + "related to this class) before learning the model.\n" + "Or, if it is needed to reset all collected previously\n" + "examples as an alternative to the creating the new\n" + "model.\n" + TEXT_RESET); + + int err = MEDIA_VISION_ERROR_NONE; + + if (full_reset) { + err = mv_face_recognition_model_reset(model, NULL); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during reset of all examples!!!" + " code: %i" TEXT_RESET "\n", err); + return err; + } + } else { + int reset_label = 0; + + while (-1 == input_int("Specify label for the examples to be reset:", + MIN_ALLOWED_LABEL, + MAX_ALLOWED_LABEL, + &reset_label)) { + printf("Incorrect input! You can use %i-%i labels only. Try again.\n", + MIN_ALLOWED_LABEL, + MAX_ALLOWED_LABEL); + } + + err = mv_face_recognition_model_reset(model, &reset_label); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during reset of examples labeled" + " with %i!!! code: %i" TEXT_RESET "\n", reset_label, err); + return err; + } + } + + return err; } int perform_mv_face_recognition_model_save(mv_face_recognition_model_h model) { - char *out_file_name = NULL; + char *out_file_name = NULL; - while (input_string("Input file name to save the model:", - 1024, &(out_file_name)) == -1) - { - printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); - } + while (input_string("Input file name to save the model:", + 1024, &(out_file_name)) == -1) { + printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); + } - const int err = mv_face_recognition_model_save(out_file_name, model); + const int err = mv_face_recognition_model_save(out_file_name, model); - free(out_file_name); + free(out_file_name); - return err; + return err; } int perform_mv_face_recognition_model_load(mv_face_recognition_model_h *model) { - char *in_file_name = NULL; + char *in_file_name = NULL; - while (input_string("Input file name to load model from:", - 1024, &(in_file_name)) == -1) - { - printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); - } + while (input_string("Input file name to load model from:", + 1024, &(in_file_name)) == -1) { + printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); + } - const int err = mv_face_recognition_model_load(in_file_name,model); + const int err = mv_face_recognition_model_load(in_file_name, model); - free(in_file_name); + free(in_file_name); - return err; + return err; } int perform_mv_face_recognition_model_clone( - mv_face_recognition_model_h model_to_clone) + mv_face_recognition_model_h model_to_clone) { - int err = MEDIA_VISION_ERROR_NONE; - - mv_face_recognition_model_h cloned_model = NULL; - - printf(TEXT_GREEN "Perform clone of the recognition model..." - TEXT_RESET "\n"); - - err = mv_face_recognition_model_clone(model_to_clone, &cloned_model); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "Errors were occurred during model clone. Error code %i" - TEXT_RESET "\n", err); - return err; - } - - printf(TEXT_YELLOW "Model cloning is done." TEXT_RESET "\n"); - - if (show_confirm_dialog("Save " TEXT_YELLOW "source model" TEXT_RESET - " to file?")) - { - const int serr = perform_mv_face_recognition_model_save(model_to_clone); - if (MEDIA_VISION_ERROR_NONE != serr) - { - printf(TEXT_RED - "Errors were occurred when trying to save " - "source model to file. Error code %i" TEXT_RESET "\n", serr); - } - } - - if (show_confirm_dialog("Save " TEXT_YELLOW "destination model" TEXT_RESET - " to file?")) - { - const int serr = perform_mv_face_recognition_model_save(cloned_model); - if (MEDIA_VISION_ERROR_NONE != serr) - { - printf(TEXT_RED - "Errors were occurred when trying to save destination model " - "to file. Error code %i" TEXT_RESET "\n", serr); - } - } - - if (cloned_model) - { - const int dest_err = mv_face_recognition_model_destroy(cloned_model); - if (MEDIA_VISION_ERROR_NONE != dest_err) - { - printf(TEXT_RED - "Errors were occurred when destroying destination model ." - "Error code %i" TEXT_RESET "\n", dest_err); - } - } - - return err; + int err = MEDIA_VISION_ERROR_NONE; + + mv_face_recognition_model_h cloned_model = NULL; + + printf(TEXT_GREEN "Perform clone of the recognition model..." + TEXT_RESET "\n"); + + err = mv_face_recognition_model_clone(model_to_clone, &cloned_model); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "Errors were occurred during model clone. Error code %i" + TEXT_RESET "\n", err); + return err; + } + + printf(TEXT_YELLOW "Model cloning is done." TEXT_RESET "\n"); + + if (show_confirm_dialog("Save " TEXT_YELLOW "source model" TEXT_RESET + " to file?")) { + const int serr = perform_mv_face_recognition_model_save(model_to_clone); + if (MEDIA_VISION_ERROR_NONE != serr) { + printf(TEXT_RED + "Errors were occurred when trying to save " + "source model to file. Error code %i" TEXT_RESET "\n", serr); + } + } + + if (show_confirm_dialog("Save " TEXT_YELLOW "destination model" TEXT_RESET + " to file?")) { + const int serr = perform_mv_face_recognition_model_save(cloned_model); + if (MEDIA_VISION_ERROR_NONE != serr) { + printf(TEXT_RED + "Errors were occurred when trying to save destination model " + "to file. Error code %i" TEXT_RESET "\n", serr); + } + } + + if (cloned_model) { + const int dest_err = mv_face_recognition_model_destroy(cloned_model); + if (MEDIA_VISION_ERROR_NONE != dest_err) { + printf(TEXT_RED + "Errors were occurred when destroying destination model ." + "Error code %i" TEXT_RESET "\n", dest_err); + } + } + + return err; } int perform_mv_face_recognition_model_learn(mv_face_recognition_model_h model) { - printf(TEXT_YELLOW "Learning the model has to be performed after\n" - "adding some amount of examples to the model.\n" - "If you learn without examples, you will get useless\n" - "model, which will be unavailable to recognize. Anyway,\n" - "you can add examples and launch this method again to\n" - "get the appropriate recognition model suitable for\n" - "recognition." - TEXT_RESET "\n"); - - printf(TEXT_GREEN "Start learning process..." TEXT_RESET "\n"); - - const int err = mv_face_recognition_model_learn(NULL, model); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "Learning the model failed. Error code: %i. " - "But you still can test with this model.\n" - TEXT_RESET "\n", err); - } - else - { - printf(TEXT_YELLOW "Recognition model has been learned." - TEXT_RESET "\n"); - } - - return err; + printf(TEXT_YELLOW "Learning the model has to be performed after\n" + "adding some amount of examples to the model.\n" + "If you learn without examples, you will get useless\n" + "model, which will be unavailable to recognize. Anyway,\n" + "you can add examples and launch this method again to\n" + "get the appropriate recognition model suitable for\n" + "recognition." + TEXT_RESET "\n"); + + printf(TEXT_GREEN "Start learning process..." TEXT_RESET "\n"); + + const int err = mv_face_recognition_model_learn(NULL, model); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "Learning the model failed. Error code: %i. " + "But you still can test with this model.\n" + TEXT_RESET "\n", err); + } else { + printf(TEXT_YELLOW "Recognition model has been learned." + TEXT_RESET "\n"); + } + + return err; } int perform_mv_face_recognition_model_query_labels(mv_face_recognition_model_h model) { - int *learned_labels = NULL; - int learned_labels_n = 0; + int *learned_labels = NULL; + int learned_labels_n = 0; - const int err = mv_face_recognition_model_query_labels(model, &learned_labels, &learned_labels_n); + const int err = mv_face_recognition_model_query_labels(model, &learned_labels, &learned_labels_n); - if (MEDIA_VISION_ERROR_NONE != err) - { - free(learned_labels); + if (MEDIA_VISION_ERROR_NONE != err) { + free(learned_labels); - return err; - } + return err; + } - int i = 0; - printf(TEXT_YELLOW "Recognition model had been learned for the following labels: " - TEXT_RESET "\n" TEXT_GREEN); - for (i = 0; i < learned_labels_n; ++i) - { - printf("%i, ", learned_labels[i]); - } - printf(TEXT_RESET "\n"); + int i = 0; + printf(TEXT_YELLOW "Recognition model had been learned for the following labels: " + TEXT_RESET "\n" TEXT_GREEN); + for (i = 0; i < learned_labels_n; ++i) + printf("%i, ", learned_labels[i]); - free(learned_labels); + printf(TEXT_RESET "\n"); - return MEDIA_VISION_ERROR_NONE; + free(learned_labels); + + return MEDIA_VISION_ERROR_NONE; } static int TP = 0; @@ -859,777 +770,683 @@ static int FN = 0; static double THRESHOLD = 0.75; void evaluation_cb( - mv_source_h source, - mv_face_recognition_model_h recognition_model, - mv_engine_config_h engine_cfg, - mv_rectangle_s *face_location, - const int *face_label, - double confidence, - void *user_data) + mv_source_h source, + mv_face_recognition_model_h recognition_model, + mv_engine_config_h engine_cfg, + mv_rectangle_s *face_location, + const int *face_label, + double confidence, + void *user_data) { - if (NULL != user_data) - { - const int real_label = *((int*)user_data); - const int rec_label = (NULL != face_label ? *face_label : -1); - if (real_label == -1) - { - confidence >= THRESHOLD ? ++FP : ++TN; - } - else if (real_label == rec_label) - { - confidence >= THRESHOLD ? ++TP : ++FN; - } - else - { - if (confidence >= THRESHOLD) { ++FP; } - ++FN; - } - } + if (NULL != user_data) { + const int real_label = *((int*)user_data); + const int rec_label = (NULL != face_label ? *face_label : -1); + if (real_label == -1) { + confidence >= THRESHOLD ? ++FP : ++TN; + } else if (real_label == rec_label) { + confidence >= THRESHOLD ? ++TP : ++FN; + } else { + if (confidence >= THRESHOLD) + ++FP; + + ++FN; + } + } } int perform_model_evaluation(mv_face_recognition_model_h model) { - int *learned_labels = NULL; - int learned_labels_n = 0; - - mv_face_recognition_model_query_labels(model, &learned_labels, &learned_labels_n); - - int i = 0; - - printf(TEXT_YELLOW "Evaluating model had been learned for the following labels: " - TEXT_RESET "\n" TEXT_GREEN); - for (i = 0; i < learned_labels_n; ++i) - { - printf("%i, ", learned_labels[i]); - } - printf(TEXT_RESET "\n"); - - // 100 directories are allowed: - const int max_dir_allowed = 100; - char (*directories)[1024] = malloc(sizeof *directories * max_dir_allowed); - int labels[max_dir_allowed]; - int unique_checks[MAX_ALLOWED_LABEL + 1]; - for (i = 0; i < MAX_ALLOWED_LABEL + 1; ++i) - { - unique_checks[i] = 0; - } - - int dir_n = 0; - int label_count = 0; - while (show_confirm_dialog("Add test images directory?") && - dir_n < max_dir_allowed) - { - char *in_file_name = NULL; - while (-1 == input_string("Specify path to the test images directory:", 1024, &(in_file_name))) - { - printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); - } - - DIR *dir; - if ((dir = opendir(in_file_name)) == NULL) - { - printf(TEXT_RED "Incorrect input! Directory %s can't be read.\n" - TEXT_RESET, in_file_name); - free(in_file_name); - in_file_name = NULL; - continue; - } - else - { - closedir(dir); - } - - int face_label = 0; - if (-1 == input_int("Specify label as integer:", - MIN_ALLOWED_LABEL, - MAX_ALLOWED_LABEL, - &face_label)) - { - printf(TEXT_RED "Incorrect input! You can use %i-%i labels only.\n" - TEXT_RESET, - MIN_ALLOWED_LABEL, - MAX_ALLOWED_LABEL); - free(in_file_name); - in_file_name = NULL; - continue; - } - - bool known_label = false; - for (i = 0; i < learned_labels_n; ++i) - { - if (learned_labels[i] == face_label) - { - known_label = true; - break; - } - } - - if (!known_label) - { - printf(TEXT_YELLOW "Recognition model didn't learn with specified label.\n" - "Images will be marked as unknown (-1)\n" TEXT_RESET); - } - - labels[dir_n] = known_label ? face_label : -1; - strcpy(directories[dir_n], in_file_name); - label_count += (0 == unique_checks[face_label] ? 1 : 0); - if (labels[dir_n] >= 0) - { - unique_checks[labels[dir_n]] += 1; - } - - free(in_file_name); - - ++dir_n; - - printf(TEXT_GREEN "Current test set for %i unique labels:\n" TEXT_RESET, label_count); - for (i = 0; i < dir_n; ++i) - { - printf(TEXT_YELLOW "Label %i: " TEXT_RESET "%s\n", labels[i], directories[i]); - } - } - - free(learned_labels); - - int rec_threshold = 0; - while (-1 == input_int("Specify recognition confidence threshold (0-100%):", 0, 100, &rec_threshold)) - { - printf(TEXT_RED "Incorrect input! You can use 0-100 values only." TEXT_RESET "\n"); - } - THRESHOLD = (double) rec_threshold / 100.0; - - TP = 0; - FP = 0; - TN = 0; - FN = 0; - - mv_source_h source = NULL; - int err = mv_create_source(&source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during creating the source!!! code: %i" - TEXT_RESET "\n", err); - return err; - } - - for (i = 0; i < dir_n; ++i) - { - DIR *dir; - struct dirent *ent; - printf("Processing %s...\n", directories[i]); - if ((dir = opendir(directories[i])) != NULL) - { - char file_path[1024] = ""; - - // Traverses all the files and directories within source directory - while ((ent = readdir(dir)) != NULL) - { - // Determine current entry name - const char *file_name = ent->d_name; - - // If current entry is directory, or hidden object, skip the step: - if (file_name[0] == '.') - { - continue; - } - - sprintf(file_path, "%s/%s", directories[i], file_name); - err = load_mv_source_from_file(file_path, source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "Failed to test on example from %s. " - "Example will not affect the evaluation. " - "Error code: %i.\n" TEXT_RESET, - file_path, err); - } - else - { - err = mv_face_recognize(source, model, NULL, NULL, evaluation_cb, &(labels[i])); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "Failed to recognize on example from %s. " - "Example will not affect the evaluation. " - "Error code: %i\n" TEXT_RESET, - file_path, err); - } - } - } - - closedir(dir); - } - else - { - printf(TEXT_RED "Can't read from directory [%s]\n" - TEXT_RESET, directories[i]); - } - } - - int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED - "ERROR: Errors were occurred during destroying the source!!! code: %i" - TEXT_RESET "\n", err2); - } - } - - double accuracy = (TP + TN) / (double) (TP + FP + TN + FN); - double precision = TP / (double) (TP + FP); - double recall = TP / (double) (TP + FN); - double f1 = 2 * precision * recall / (precision + recall); - - printf(TEXT_GREEN "Evaluation results:\n" TEXT_RESET); - printf(TEXT_YELLOW "\tTRUE POSITIVE : " TEXT_RESET "%5i\n", TP); - printf(TEXT_YELLOW "\tFALSE POSITIVE : " TEXT_RESET "%5i\n", FP); - printf(TEXT_YELLOW "\tTRUE NEGATIVE : " TEXT_RESET "%5i\n", TN); - printf(TEXT_YELLOW "\tFALSE NEGATIVE : " TEXT_RESET "%5i\n", FN); - printf(TEXT_YELLOW "\tAccuracy : " TEXT_RESET "%f\n", accuracy); - printf(TEXT_YELLOW "\tPrecision : " TEXT_RESET "%f\n", precision); - printf(TEXT_YELLOW "\tRecall : " TEXT_RESET "%f\n", recall); - printf(TEXT_YELLOW "\tF1 score : " TEXT_RESET "%f\n", f1); - - free(directories); - - return err; + int *learned_labels = NULL; + int learned_labels_n = 0; + + mv_face_recognition_model_query_labels(model, &learned_labels, &learned_labels_n); + + int i = 0; + + printf(TEXT_YELLOW "Evaluating model had been learned for the following labels: " + TEXT_RESET "\n" TEXT_GREEN); + for (i = 0; i < learned_labels_n; ++i) + printf("%i, ", learned_labels[i]); + + printf(TEXT_RESET "\n"); + + /* 100 directories are allowed: */ + const int max_dir_allowed = 100; + char (*directories)[1024] = malloc(sizeof *directories * max_dir_allowed); + int labels[max_dir_allowed]; + int unique_checks[MAX_ALLOWED_LABEL + 1]; + for (i = 0; i < MAX_ALLOWED_LABEL + 1; ++i) + unique_checks[i] = 0; + + int dir_n = 0; + int label_count = 0; + while (show_confirm_dialog("Add test images directory?") && + dir_n < max_dir_allowed) { + char *in_file_name = NULL; + while (-1 == input_string("Specify path to the test images directory:", 1024, &(in_file_name))) + printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); + + DIR *dir; + if ((dir = opendir(in_file_name)) == NULL) { + printf(TEXT_RED "Incorrect input! Directory %s can't be read.\n" + TEXT_RESET, in_file_name); + free(in_file_name); + in_file_name = NULL; + continue; + } else { + closedir(dir); + } + + int face_label = 0; + if (-1 == input_int("Specify label as integer:", + MIN_ALLOWED_LABEL, + MAX_ALLOWED_LABEL, + &face_label)) { + printf(TEXT_RED "Incorrect input! You can use %i-%i labels only.\n" + TEXT_RESET, + MIN_ALLOWED_LABEL, + MAX_ALLOWED_LABEL); + free(in_file_name); + in_file_name = NULL; + continue; + } + + bool known_label = false; + for (i = 0; i < learned_labels_n; ++i) { + if (learned_labels[i] == face_label) { + known_label = true; + break; + } + } + + if (!known_label) { + printf(TEXT_YELLOW "Recognition model didn't learn with specified label.\n" + "Images will be marked as unknown (-1)\n" TEXT_RESET); + } + + labels[dir_n] = known_label ? face_label : -1; + strcpy(directories[dir_n], in_file_name); + label_count += (0 == unique_checks[face_label] ? 1 : 0); + if (labels[dir_n] >= 0) + unique_checks[labels[dir_n]] += 1; + + free(in_file_name); + + ++dir_n; + + printf(TEXT_GREEN "Current test set for %i unique labels:\n" TEXT_RESET, label_count); + for (i = 0; i < dir_n; ++i) + printf(TEXT_YELLOW "Label %i: " TEXT_RESET "%s\n", labels[i], directories[i]); + } + + free(learned_labels); + + int rec_threshold = 0; + while (-1 == input_int("Specify recognition confidence threshold (0-100%):", 0, 100, &rec_threshold)) + printf(TEXT_RED "Incorrect input! You can use 0-100 values only." TEXT_RESET "\n"); + + THRESHOLD = (double) rec_threshold / 100.0; + + TP = 0; + FP = 0; + TN = 0; + FN = 0; + + mv_source_h source = NULL; + int err = mv_create_source(&source); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during creating the source!!! code: %i" + TEXT_RESET "\n", err); + return err; + } + + for (i = 0; i < dir_n; ++i) { + DIR *dir; + struct dirent *ent; + printf("Processing %s...\n", directories[i]); + if ((dir = opendir(directories[i])) != NULL) { + char file_path[1024] = ""; + + /* Traverses all the files and directories within source directory */ + while ((ent = readdir(dir)) != NULL) { + /* Determine current entry name */ + const char *file_name = ent->d_name; + + /* If current entry is directory, or hidden object, skip the step: */ + if (file_name[0] == '.') + continue; + + sprintf(file_path, "%s/%s", directories[i], file_name); + err = load_mv_source_from_file(file_path, source); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "Failed to test on example from %s. " + "Example will not affect the evaluation. " + "Error code: %i.\n" TEXT_RESET, + file_path, err); + } else { + err = mv_face_recognize(source, model, NULL, NULL, evaluation_cb, &(labels[i])); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "Failed to recognize on example from %s. " + "Example will not affect the evaluation. " + "Error code: %i\n" TEXT_RESET, + file_path, err); + } + } + } + + closedir(dir); + } else { + printf(TEXT_RED "Can't read from directory [%s]\n" + TEXT_RESET, directories[i]); + } + } + + int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED + "ERROR: Errors were occurred during destroying the source!!! code: %i" + TEXT_RESET "\n", err2); + } + } + + double accuracy = (TP + TN) / (double) (TP + FP + TN + FN); + double precision = TP / (double) (TP + FP); + double recall = TP / (double) (TP + FN); + double f1 = 2 * precision * recall / (precision + recall); + + printf(TEXT_GREEN "Evaluation results:\n" TEXT_RESET); + printf(TEXT_YELLOW "\tTRUE POSITIVE : " TEXT_RESET "%5i\n", TP); + printf(TEXT_YELLOW "\tFALSE POSITIVE : " TEXT_RESET "%5i\n", FP); + printf(TEXT_YELLOW "\tTRUE NEGATIVE : " TEXT_RESET "%5i\n", TN); + printf(TEXT_YELLOW "\tFALSE NEGATIVE : " TEXT_RESET "%5i\n", FN); + printf(TEXT_YELLOW "\tAccuracy : " TEXT_RESET "%f\n", accuracy); + printf(TEXT_YELLOW "\tPrecision : " TEXT_RESET "%f\n", precision); + printf(TEXT_YELLOW "\tRecall : " TEXT_RESET "%f\n", recall); + printf(TEXT_YELLOW "\tF1 score : " TEXT_RESET "%f\n", f1); + + free(directories); + + return err; } int perform_recognize() { - printf("\n" TEXT_YELLOW - "Recognition model isn't now created.\n" - "You may create it to perform positive \n" - "testing, or don't create to check the \n" - "functionality behaviour for uncreated model." - TEXT_RESET - "\n"); - - int err = MEDIA_VISION_ERROR_NONE; - mv_face_recognition_model_h recognition_model = NULL; - const bool do_create = show_confirm_dialog("Do Create Recognition Model?"); - if (do_create) - { - printf(TEXT_GREEN "Creating recognition model..." TEXT_RESET "\n"); - - err = mv_face_recognition_model_create(&recognition_model); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "Creating the model failed. Error code: %i. " - "But you still can test with uncreated model.\n" - TEXT_RESET "\n", err); - } - else - { - printf(TEXT_YELLOW "Recognition model has been created." - TEXT_RESET "\n"); - } - } - - int sel_opt = 0; - const int options[11] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 }; - const char *names[11] = { "Add image example", - "Reset examples by id", - "Reset all examples", - "Clone the model", - "Learn the model", - "Show learned labels", - "Save model to file", - "Load model from file", - "Recognize with model", - "Evaluate the model", - "Destroy model and exit" }; - - while(!sel_opt) - { - sel_opt = show_menu("Select action:", options, names, 11); - notification_type_e notification_type = FAIL_OR_SUCCESSS; - - switch (sel_opt) - { - case 1: - err = perform_mv_face_recognition_model_add_face_example(recognition_model, ¬ification_type); - break; - case 2: - err = perform_mv_face_recognition_model_reset_face_examples(recognition_model, false); - break; - case 3: - err = perform_mv_face_recognition_model_reset_face_examples(recognition_model, true); - break; - case 4: - err = perform_mv_face_recognition_model_clone(recognition_model); - break; - case 5: - err = perform_mv_face_recognition_model_learn(recognition_model); - break; - case 6: - err = perform_mv_face_recognition_model_query_labels(recognition_model); - break; - case 7: - err = perform_mv_face_recognition_model_save(recognition_model); - break; - case 8: - err = perform_mv_face_recognition_model_load(&recognition_model); - break; - case 9: - err = perform_mv_face_recognize(recognition_model); - break; - case 10: - err = perform_model_evaluation(recognition_model); - break; - case 11: - if (do_create) - { - err = mv_face_recognition_model_destroy(recognition_model); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "Error with code %i was occurred during destoy" - TEXT_RESET "\n", err); - } - - return err; - } - else - { - return MEDIA_VISION_ERROR_NONE; - } - default: - sel_opt = 0; - printf("ERROR: Incorrect option was selected.\n"); - continue; - } - - print_action_result(names[sel_opt - 1], err, notification_type); - - sel_opt = 0; - } + printf("\n" TEXT_YELLOW + "Recognition model isn't now created.\n" + "You may create it to perform positive \n" + "testing, or don't create to check the \n" + "functionality behaviour for uncreated model." + TEXT_RESET + "\n"); + + int err = MEDIA_VISION_ERROR_NONE; + mv_face_recognition_model_h recognition_model = NULL; + const bool do_create = show_confirm_dialog("Do Create Recognition Model?"); + if (do_create) { + printf(TEXT_GREEN "Creating recognition model..." TEXT_RESET "\n"); + + err = mv_face_recognition_model_create(&recognition_model); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "Creating the model failed. Error code: %i. " + "But you still can test with uncreated model.\n" + TEXT_RESET "\n", err); + } else { + printf(TEXT_YELLOW "Recognition model has been created." + TEXT_RESET "\n"); + } + } + + int sel_opt = 0; + const int options[11] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 }; + const char *names[11] = { "Add image example", + "Reset examples by id", + "Reset all examples", + "Clone the model", + "Learn the model", + "Show learned labels", + "Save model to file", + "Load model from file", + "Recognize with model", + "Evaluate the model", + "Destroy model and exit" }; + + while (!sel_opt) { + sel_opt = show_menu("Select action:", options, names, 11); + notification_type_e notification_type = FAIL_OR_SUCCESSS; + + switch (sel_opt) { + case 1: + err = perform_mv_face_recognition_model_add_face_example(recognition_model, ¬ification_type); + break; + case 2: + err = perform_mv_face_recognition_model_reset_face_examples(recognition_model, false); + break; + case 3: + err = perform_mv_face_recognition_model_reset_face_examples(recognition_model, true); + break; + case 4: + err = perform_mv_face_recognition_model_clone(recognition_model); + break; + case 5: + err = perform_mv_face_recognition_model_learn(recognition_model); + break; + case 6: + err = perform_mv_face_recognition_model_query_labels(recognition_model); + break; + case 7: + err = perform_mv_face_recognition_model_save(recognition_model); + break; + case 8: + err = perform_mv_face_recognition_model_load(&recognition_model); + break; + case 9: + err = perform_mv_face_recognize(recognition_model); + break; + case 10: + err = perform_model_evaluation(recognition_model); + break; + case 11: + if (do_create) { + err = mv_face_recognition_model_destroy(recognition_model); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "Error with code %i was occurred during destoy" + TEXT_RESET "\n", err); + } + + return err; + } else { + return MEDIA_VISION_ERROR_NONE; + } + default: + sel_opt = 0; + printf("ERROR: Incorrect option was selected.\n"); + continue; + } + + print_action_result(names[sel_opt - 1], err, notification_type); + + sel_opt = 0; + } } int perform_mv_face_tracking_model_save(mv_face_tracking_model_h model) { - char *out_file_name = NULL; + char *out_file_name = NULL; - while (input_string("Input file name to save the model:", - 1024, &(out_file_name)) == -1) - { - printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); - } + while (input_string("Input file name to save the model:", + 1024, &(out_file_name)) == -1) { + printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); + } - const int err = mv_face_tracking_model_save(out_file_name, model); + const int err = mv_face_tracking_model_save(out_file_name, model); - free(out_file_name); + free(out_file_name); - return err; + return err; } int perform_mv_face_tracking_model_load(mv_face_tracking_model_h *model) { - char *in_file_name = NULL; + char *in_file_name = NULL; - while (input_string("Input file name to load model from:", - 1024, &(in_file_name)) == -1) - { - printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); - } + while (input_string("Input file name to load model from:", + 1024, &(in_file_name)) == -1) { + printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); + } - const int err = mv_face_tracking_model_load(in_file_name, model); + const int err = mv_face_tracking_model_load(in_file_name, model); - free(in_file_name); + free(in_file_name); - return err; + return err; } int perform_mv_face_tracking_model_clone( - mv_face_tracking_model_h model_to_clone) + mv_face_tracking_model_h model_to_clone) { - int err = MEDIA_VISION_ERROR_NONE; - - mv_face_tracking_model_h cloned_model = NULL; - - printf(TEXT_GREEN "Perform clone of the tracking model..." - TEXT_RESET "\n"); - - err = mv_face_tracking_model_clone(model_to_clone, &cloned_model); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "Errors were occurred during model clone. Error code %i" - TEXT_RESET "\n", err); - return err; - } - - printf(TEXT_YELLOW "Model cloning is done." TEXT_RESET "\n"); - - if (show_confirm_dialog("Save " TEXT_YELLOW "source model" TEXT_RESET - " to file?")) - { - const int serr = perform_mv_face_tracking_model_save(model_to_clone); - if (MEDIA_VISION_ERROR_NONE != serr) - { - printf(TEXT_RED - "Errors were occurred when trying to save " - "source model to file. Error code %i" TEXT_RESET "\n", serr); - } - } - - if (show_confirm_dialog("Save " TEXT_YELLOW "destination model" TEXT_RESET - " to file?")) - { - const int serr = perform_mv_face_tracking_model_save(cloned_model); - if (MEDIA_VISION_ERROR_NONE != serr) - { - printf(TEXT_RED - "Errors were occurred when trying to save destination model " - "to file. Error code %i" TEXT_RESET "\n", serr); - } - } - - if (cloned_model) - { - const int dest_err = mv_face_tracking_model_destroy(cloned_model); - if (MEDIA_VISION_ERROR_NONE != dest_err) - { - printf(TEXT_RED - "Errors were occurred when destroying destination model ." - "Error code %i" TEXT_RESET "\n", dest_err); - } - } - - return err; + int err = MEDIA_VISION_ERROR_NONE; + + mv_face_tracking_model_h cloned_model = NULL; + + printf(TEXT_GREEN "Perform clone of the tracking model..." + TEXT_RESET "\n"); + + err = mv_face_tracking_model_clone(model_to_clone, &cloned_model); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "Errors were occurred during model clone. Error code %i" + TEXT_RESET "\n", err); + return err; + } + + printf(TEXT_YELLOW "Model cloning is done." TEXT_RESET "\n"); + + if (show_confirm_dialog("Save " TEXT_YELLOW "source model" TEXT_RESET + " to file?")) { + const int serr = perform_mv_face_tracking_model_save(model_to_clone); + if (MEDIA_VISION_ERROR_NONE != serr) { + printf(TEXT_RED + "Errors were occurred when trying to save " + "source model to file. Error code %i" TEXT_RESET "\n", serr); + } + } + + if (show_confirm_dialog("Save " TEXT_YELLOW "destination model" TEXT_RESET + " to file?")) { + const int serr = perform_mv_face_tracking_model_save(cloned_model); + if (MEDIA_VISION_ERROR_NONE != serr) { + printf(TEXT_RED + "Errors were occurred when trying to save destination model " + "to file. Error code %i" TEXT_RESET "\n", serr); + } + } + + if (cloned_model) { + const int dest_err = mv_face_tracking_model_destroy(cloned_model); + if (MEDIA_VISION_ERROR_NONE != dest_err) { + printf(TEXT_RED + "Errors were occurred when destroying destination model ." + "Error code %i" TEXT_RESET "\n", dest_err); + } + } + + return err; } static volatile bool frame_read = false; void video_1_sample_cb( - char *buffer, - unsigned int buffer_size, - image_data_s image_data, - void *user_data) + char *buffer, + unsigned int buffer_size, + image_data_s image_data, + void *user_data) { - if (!frame_read) - { - mv_source_h source = (mv_source_h)user_data; - - const int err = mv_source_fill_by_buffer( - source, - buffer, - buffer_size, - image_data.image_width, - image_data.image_height, - image_data.image_colorspace); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during filling the " - "source based on the video frame! Error code: %i" - TEXT_RESET, err); - } - - frame_read = true; - } + if (!frame_read) { + mv_source_h source = (mv_source_h)user_data; + + const int err = mv_source_fill_by_buffer( + source, + buffer, + buffer_size, + image_data.image_width, + image_data.image_height, + image_data.image_colorspace); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during filling the " + "source based on the video frame! Error code: %i" + TEXT_RESET, err); + } + + frame_read = true; + } } void face_detected_for_tracking_cb( - mv_source_h source, - mv_engine_config_h engine_cfg, - mv_rectangle_s *faces_locations, - int number_of_faces, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s *faces_locations, + int number_of_faces, + void *user_data) { - if (number_of_faces < 1) - { - printf(TEXT_RED "Unfortunatly, no faces were detected on the\n" - "preparation frame. You has to specify bounding\n" - "quadrangles for tracking without advices." - TEXT_RESET "\n"); - return; - } - - printf(TEXT_YELLOW "%i face(s) were detected at the preparation frame.\n" - "Following list includes information on faces bounding\n" - "boxes coordinates:" - TEXT_RESET "\n", number_of_faces); - - int idx = 0; - while (idx < number_of_faces) - { - printf(TEXT_MAGENTA "Face %i bounding box: " TEXT_RESET "\n", ++idx); - printf(TEXT_CYAN "\tTop left point: x1: %4i; y1: %4i\n" TEXT_RESET, - faces_locations[idx - 1].point.x, - faces_locations[idx - 1].point.y); - printf(TEXT_CYAN "\tTop right point: x2: %4i; y2: %4i\n" TEXT_RESET, - faces_locations[idx - 1].point.x + faces_locations[idx - 1].width, - faces_locations[idx - 1].point.y); - printf(TEXT_CYAN "\tBottom right point: x3: %4i; y3: %4i\n" TEXT_RESET, - faces_locations[idx - 1].point.x + faces_locations[idx - 1].width, - faces_locations[idx - 1].point.y + faces_locations[idx - 1].height); - printf(TEXT_CYAN "\tBottom right point: x4: %4i; y4: %4i\n" TEXT_RESET, - faces_locations[idx - 1].point.x, - faces_locations[idx - 1].point.y + faces_locations[idx - 1].height); - } + if (number_of_faces < 1) { + printf(TEXT_RED "Unfortunatly, no faces were detected on the\n" + "preparation frame. You has to specify bounding\n" + "quadrangles for tracking without advices." + TEXT_RESET "\n"); + return; + } + + printf(TEXT_YELLOW "%i face(s) were detected at the preparation frame.\n" + "Following list includes information on faces bounding\n" + "boxes coordinates:" + TEXT_RESET "\n", number_of_faces); + + int idx = 0; + while (idx < number_of_faces) { + printf(TEXT_MAGENTA "Face %i bounding box: " TEXT_RESET "\n", ++idx); + printf(TEXT_CYAN "\tTop left point: x1: %4i; y1: %4i\n" TEXT_RESET, + faces_locations[idx - 1].point.x, + faces_locations[idx - 1].point.y); + printf(TEXT_CYAN "\tTop right point: x2: %4i; y2: %4i\n" TEXT_RESET, + faces_locations[idx - 1].point.x + faces_locations[idx - 1].width, + faces_locations[idx - 1].point.y); + printf(TEXT_CYAN "\tBottom right point: x3: %4i; y3: %4i\n" TEXT_RESET, + faces_locations[idx - 1].point.x + faces_locations[idx - 1].width, + faces_locations[idx - 1].point.y + faces_locations[idx - 1].height); + printf(TEXT_CYAN "\tBottom right point: x4: %4i; y4: %4i\n" TEXT_RESET, + faces_locations[idx - 1].point.x, + faces_locations[idx - 1].point.y + faces_locations[idx - 1].height); + } } int load_source_from_first_video_frame(const char *video_file, mv_source_h source) { - mv_video_reader_h reader = NULL; - int err = mv_create_video_reader(&reader); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during creating the video " - "reader! Error code: %i\n" TEXT_RESET, err); - return err; - } - - err = mv_video_reader_set_new_sample_cb( - reader, - video_1_sample_cb, - source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during new sample " - "callback set! Error code: %i\n" TEXT_RESET, err); - - const int err2 = mv_destroy_video_reader(reader); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED "ERROR: Errors were occurred during video reader " - "destroy! Error code: %i\n" TEXT_RESET, err); - } - - return err; - } - - frame_read = false; - image_data_s video_info; - unsigned int fps; - err = mv_video_reader_load(reader, video_file, &video_info, &fps); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during loading the video " - "by reader! Error code: %i\n" TEXT_RESET, err); - - const int err2 = mv_destroy_video_reader(reader); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED "ERROR: Errors were occurred during video reader " - "destroy! Error code: %i\n" TEXT_RESET, err); - } - - return err; - } - - //wait for the video reading thread - while (true) - { - if (frame_read) - { - int err2 = mv_video_reader_stop(reader); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED "ERROR: Errors were occurred during attempt to " - "stop video reader! Error code: %i\n" TEXT_RESET, err2); - } - - err2 = mv_destroy_video_reader(reader); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED "ERROR: Errors were occurred during video " - "reader destroy! Error code: %i\n" TEXT_RESET, err2); - } - - break; - } - } - - return MEDIA_VISION_ERROR_NONE; + mv_video_reader_h reader = NULL; + int err = mv_create_video_reader(&reader); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during creating the video " + "reader! Error code: %i\n" TEXT_RESET, err); + return err; + } + + err = mv_video_reader_set_new_sample_cb( + reader, + video_1_sample_cb, + source); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during new sample " + "callback set! Error code: %i\n" TEXT_RESET, err); + + const int err2 = mv_destroy_video_reader(reader); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED "ERROR: Errors were occurred during video reader " + "destroy! Error code: %i\n" TEXT_RESET, err); + } + + return err; + } + + frame_read = false; + image_data_s video_info; + unsigned int fps; + err = mv_video_reader_load(reader, video_file, &video_info, &fps); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during loading the video " + "by reader! Error code: %i\n" TEXT_RESET, err); + + const int err2 = mv_destroy_video_reader(reader); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED "ERROR: Errors were occurred during video reader " + "destroy! Error code: %i\n" TEXT_RESET, err); + } + + return err; + } + + /* wait for the video reading thread */ + while (true) { + if (frame_read) { + int err2 = mv_video_reader_stop(reader); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED "ERROR: Errors were occurred during attempt to " + "stop video reader! Error code: %i\n" TEXT_RESET, err2); + } + + err2 = mv_destroy_video_reader(reader); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED "ERROR: Errors were occurred during video " + "reader destroy! Error code: %i\n" TEXT_RESET, err2); + } + + break; + } + } + + return MEDIA_VISION_ERROR_NONE; } int perform_mv_face_tracking_model_prepare(mv_face_tracking_model_h model) { - printf(TEXT_YELLOW "Before any tracking session the tracking model\n" - "preparation is required. Exception is the case when\n" - "the next tracking session will be performed with the\n" - "video which is the direct continuation of the video\n" - "has been used at the previous tracking session.\n" - "Preparation has to be done with the first frame of\n" - "the video or first image from continuous image\n" - "sequence for which next tracking session plan to be\n" - "performed.\nTracking model preparation includes\n" - "specifying the location of the face to be tracked on\n" - "the first frame. Face tracking algorithm will try to\n" - "grab the face image significant features and\n" - "optionally will try to determine the background.\n" - "Actually, preparation is model-dependent and may\n" - "differs in respect to used tracking algorithm." - TEXT_RESET "\n"); - - int sel_opt = 0; - const int options[2] = { 1, 2 }; - const char *names[2] = { "Prepare with the video file", - "Prepare with the image file" }; - - bool is_video = false; - - while(!sel_opt) - { - sel_opt = show_menu("Select action:", options, names, 2); - switch (sel_opt) - { - case 1: - is_video = true; - break; - case 2: - is_video = false; - break; - default: - sel_opt = 0; - continue; - } - } - - mv_source_h preparation_frame = NULL; - int err = mv_create_source(&preparation_frame); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during creating the source!!! code: %i" - TEXT_RESET "\n", err); - - return err; - } - - char *init_frame_file_name = NULL; - const char *prompt_str = - (is_video ? "Input video file name to prepare the model:" - : "Input image file name to prepare the model:"); - - while (input_string(prompt_str, 1024, &(init_frame_file_name)) == -1) - { - printf(TEXT_RED "Incorrect input! Try again.\n" TEXT_RESET); - } - - if (is_video) - { - err = load_source_from_first_video_frame(init_frame_file_name, preparation_frame); - } - else - { - err = load_mv_source_from_file(init_frame_file_name, preparation_frame); - } - - free(init_frame_file_name); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during preparation " - "frame/image load! Error code: %i\n" TEXT_RESET, err); - - int err2 = mv_destroy_source(preparation_frame); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED "ERROR: Errors were occurred during destroying the " - "source! Error code: %i\n" TEXT_RESET, err2); - } - - return err; - } - - mv_engine_config_h eng_config = NULL; - err = mv_create_engine_config(&eng_config); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during creating the " - "engine config! Error code: %i\n" TEXT_RESET, err); - } - else - { - err = mv_engine_config_set_string_attribute( - eng_config, - MV_FACE_DETECTION_MODEL_FILE_PATH, - "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml"); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during setting of the " - "the 'MV_FACE_DETECTION_MODEL_FILE_PATH' attribute " - "for engine configuration! Check media-vision-config.json " - "file existence. Error code: %i" TEXT_RESET, err); - } - } - - err = mv_face_detect( - preparation_frame, - eng_config, - face_detected_for_tracking_cb, - NULL); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during face detection! " - "Error code: %i\n" TEXT_RESET, err); - - int err2 = mv_destroy_engine_config(eng_config); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED "ERROR: Errors were occurred during destroying the " - "engine configuration! Error code: %i\n" TEXT_RESET, err2); - } - - return err; - } - - mv_quadrangle_s roi; - - if (show_confirm_dialog("Do specify the face location?")) - { - printf(TEXT_YELLOW "Specify the coordinates of the quadrangle to be used\n" - "for tracking model preparation:" TEXT_RESET "\n"); - int idx = 0; - char str_prompt[100]; - while (idx < 4) - { - ++idx; - sprintf(str_prompt, "Specify point %i x coordinate: x%i = ", - idx - 1, idx); - while (-1 == input_int(str_prompt, INT_MIN, INT_MAX, - &(roi.points[idx - 1].x))) - { - printf("Incorrect input! Try again.\n"); - } - sprintf(str_prompt, "Specify point %i y coordinate: y%i = ", - idx - 1, idx); - while (-1 == input_int(str_prompt, INT_MIN, INT_MAX, - &(roi.points[idx - 1].y))) - { - printf("Incorrect input! Try again.\n"); - } - } - - err = mv_face_tracking_model_prepare( - model, eng_config, preparation_frame, &roi); - } - else - { - err = mv_face_tracking_model_prepare( - model, eng_config, preparation_frame, NULL); - } - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during the tracking model " - "preparation! Error code: %i\n" TEXT_RESET, err); - } - - const int err2 = mv_destroy_source(preparation_frame); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED "ERROR: Errors were occurred during destroying the " - "source! Error code: %i\n" TEXT_RESET, err2); - } - - return err; + printf(TEXT_YELLOW "Before any tracking session the tracking model\n" + "preparation is required. Exception is the case when\n" + "the next tracking session will be performed with the\n" + "video which is the direct continuation of the video\n" + "has been used at the previous tracking session.\n" + "Preparation has to be done with the first frame of\n" + "the video or first image from continuous image\n" + "sequence for which next tracking session plan to be\n" + "performed.\nTracking model preparation includes\n" + "specifying the location of the face to be tracked on\n" + "the first frame. Face tracking algorithm will try to\n" + "grab the face image significant features and\n" + "optionally will try to determine the background.\n" + "Actually, preparation is model-dependent and may\n" + "differs in respect to used tracking algorithm." + TEXT_RESET "\n"); + + int sel_opt = 0; + const int options[2] = { 1, 2 }; + const char *names[2] = { "Prepare with the video file", + "Prepare with the image file" }; + bool is_video = false; + + while (!sel_opt) { + sel_opt = show_menu("Select action:", options, names, 2); + switch (sel_opt) { + case 1: + is_video = true; + break; + case 2: + is_video = false; + break; + default: + sel_opt = 0; + continue; + } + } + + mv_source_h preparation_frame = NULL; + int err = mv_create_source(&preparation_frame); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during creating the source!!! code: %i" + TEXT_RESET "\n", err); + + return err; + } + + char *init_frame_file_name = NULL; + const char *prompt_str = + (is_video ? "Input video file name to prepare the model:" + : "Input image file name to prepare the model:"); + + while (input_string(prompt_str, 1024, &(init_frame_file_name)) == -1) + printf(TEXT_RED "Incorrect input! Try again.\n" TEXT_RESET); + + if (is_video) + err = load_source_from_first_video_frame(init_frame_file_name, preparation_frame); + else + err = load_mv_source_from_file(init_frame_file_name, preparation_frame); + + free(init_frame_file_name); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during preparation " + "frame/image load! Error code: %i\n" TEXT_RESET, err); + + int err2 = mv_destroy_source(preparation_frame); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED "ERROR: Errors were occurred during destroying the " + "source! Error code: %i\n" TEXT_RESET, err2); + } + + return err; + } + + mv_engine_config_h eng_config = NULL; + err = mv_create_engine_config(&eng_config); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during creating the " + "engine config! Error code: %i\n" TEXT_RESET, err); + } else { + err = mv_engine_config_set_string_attribute( + eng_config, + MV_FACE_DETECTION_MODEL_FILE_PATH, + "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml"); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during setting of the " + "the 'MV_FACE_DETECTION_MODEL_FILE_PATH' attribute " + "for engine configuration! Check media-vision-config.json " + "file existence. Error code: %i" TEXT_RESET, err); + } + } + + err = mv_face_detect( + preparation_frame, + eng_config, + face_detected_for_tracking_cb, + NULL); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during face detection! " + "Error code: %i\n" TEXT_RESET, err); + + int err2 = mv_destroy_engine_config(eng_config); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED "ERROR: Errors were occurred during destroying the " + "engine configuration! Error code: %i\n" TEXT_RESET, err2); + } + + return err; + } + + mv_quadrangle_s roi; + + if (show_confirm_dialog("Do specify the face location?")) { + printf(TEXT_YELLOW "Specify the coordinates of the quadrangle to be used\n" + "for tracking model preparation:" TEXT_RESET "\n"); + int idx = 0; + char str_prompt[100]; + while (idx < 4) { + ++idx; + sprintf(str_prompt, "Specify point %i x coordinate: x%i = ", + idx - 1, idx); + while (-1 == input_int(str_prompt, INT_MIN, INT_MAX, + &(roi.points[idx - 1].x))) { + printf("Incorrect input! Try again.\n"); + } + sprintf(str_prompt, "Specify point %i y coordinate: y%i = ", + idx - 1, idx); + while (-1 == input_int(str_prompt, INT_MIN, INT_MAX, + &(roi.points[idx - 1].y))) { + printf("Incorrect input! Try again.\n"); + } + } + + err = mv_face_tracking_model_prepare( + model, eng_config, preparation_frame, &roi); + } else { + err = mv_face_tracking_model_prepare( + model, eng_config, preparation_frame, NULL); + } + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during the tracking model " + "preparation! Error code: %i\n" TEXT_RESET, err); + } + + const int err2 = mv_destroy_source(preparation_frame); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED "ERROR: Errors were occurred during destroying the " + "source! Error code: %i\n" TEXT_RESET, err2); + } + + return err; } static char *track_output_dir = NULL; @@ -1637,510 +1454,463 @@ static char *track_output_dir = NULL; static int track_frame_counter = 0; void track_cb( - mv_source_h source, - mv_face_tracking_model_h tracking_model, - mv_engine_config_h engine_cfg, - mv_quadrangle_s *location, - double confidence, - void *user_data) + mv_source_h source, + mv_face_tracking_model_h tracking_model, + mv_engine_config_h engine_cfg, + mv_quadrangle_s *location, + double confidence, + void *user_data) { - static bool track_catch_face = false; - - ++track_frame_counter; - - unsigned char *out_buffer = NULL; - unsigned int buf_size = 0; - image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID }; - if (MEDIA_VISION_ERROR_NONE != - mv_source_get_buffer(source, &out_buffer, &buf_size) || - MEDIA_VISION_ERROR_NONE != - mv_source_get_width(source, &(image_data.image_width)) || - MEDIA_VISION_ERROR_NONE != - mv_source_get_height(source, &(image_data.image_height)) || - MEDIA_VISION_ERROR_NONE != - mv_source_get_colorspace(source, &(image_data.image_colorspace))) - { - printf("ERROR: Creating out image is impossible.\n"); - - return; - } - - if (NULL != location) - { - if (!track_catch_face) - { - printf(TEXT_GREEN "Frame %i : Tracked object is appeared" TEXT_RESET "\n", - track_frame_counter); - track_catch_face = true; - } - else - { - printf(TEXT_YELLOW "Frame %i : Tracked object is tracked" TEXT_RESET "\n", - track_frame_counter); - } - - const int rectangle_thickness = 3; - const int drawing_color[] = {255, 0, 0}; - - printf(TEXT_YELLOW - "Location: (%i,%i) -> (%i,%i) -> (%i,%i) -> (%i,%i)\n" - TEXT_RESET, - location->points[0].x, - location->points[0].y, - location->points[1].x, - location->points[1].y, - location->points[2].x, - location->points[2].y, - location->points[3].x, - location->points[3].y); - printf(TEXT_YELLOW "Track confidence: %f" TEXT_RESET "\n", confidence); - - const int err = draw_quadrangle_on_buffer( - *location, - rectangle_thickness, - drawing_color, - &image_data, - out_buffer); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Quadrangle wasn't drew on frame buffer! " - "Error code: %i\n" TEXT_RESET, err); - - return; - } - } - else - { - if (track_catch_face) - { - printf(TEXT_RED "Frame %i : Tracked object is lost" TEXT_RESET "\n", - track_frame_counter); - track_catch_face = false; - } - else - { - printf(TEXT_YELLOW "Frame %i : Tracked object isn't detected" TEXT_RESET "\n", - track_frame_counter); - } - } - - char file_path[1024]; - sprintf(file_path, "%s/%05d.jpg", track_output_dir, track_frame_counter); - if (MEDIA_VISION_ERROR_NONE == save_image_from_buffer( - file_path, out_buffer, &image_data, 100)) - { - printf("Frame %i was outputted as %s\n", track_frame_counter, file_path); - } - else - { - printf(TEXT_RED "ERROR: Failed to generate output file %s. " - "Check file name and permissions.\n" TEXT_RESET, file_path); - } + static bool track_catch_face = false; + + ++track_frame_counter; + + unsigned char *out_buffer = NULL; + unsigned int buf_size = 0; + image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID }; + if (MEDIA_VISION_ERROR_NONE != + mv_source_get_buffer(source, &out_buffer, &buf_size) || + MEDIA_VISION_ERROR_NONE != + mv_source_get_width(source, &(image_data.image_width)) || + MEDIA_VISION_ERROR_NONE != + mv_source_get_height(source, &(image_data.image_height)) || + MEDIA_VISION_ERROR_NONE != + mv_source_get_colorspace(source, &(image_data.image_colorspace))) { + printf("ERROR: Creating out image is impossible.\n"); + + return; + } + + if (NULL != location) { + if (!track_catch_face) { + printf(TEXT_GREEN "Frame %i : Tracked object is appeared" TEXT_RESET "\n", + track_frame_counter); + track_catch_face = true; + } else { + printf(TEXT_YELLOW "Frame %i : Tracked object is tracked" TEXT_RESET "\n", + track_frame_counter); + } + + const int rectangle_thickness = 3; + const int drawing_color[] = {255, 0, 0}; + + printf(TEXT_YELLOW + "Location: (%i,%i) -> (%i,%i) -> (%i,%i) -> (%i,%i)\n" + TEXT_RESET, + location->points[0].x, + location->points[0].y, + location->points[1].x, + location->points[1].y, + location->points[2].x, + location->points[2].y, + location->points[3].x, + location->points[3].y); + printf(TEXT_YELLOW "Track confidence: %f" TEXT_RESET "\n", confidence); + + const int err = draw_quadrangle_on_buffer( + *location, + rectangle_thickness, + drawing_color, + &image_data, + out_buffer); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Quadrangle wasn't drew on frame buffer! " + "Error code: %i\n" TEXT_RESET, err); + + return; + } + } else { + if (track_catch_face) { + printf(TEXT_RED "Frame %i : Tracked object is lost" TEXT_RESET "\n", + track_frame_counter); + track_catch_face = false; + } else { + printf(TEXT_YELLOW "Frame %i : Tracked object isn't detected" TEXT_RESET "\n", + track_frame_counter); + } + } + + char file_path[1024]; + sprintf(file_path, "%s/%05d.jpg", track_output_dir, track_frame_counter); + if (MEDIA_VISION_ERROR_NONE == save_image_from_buffer( + file_path, out_buffer, &image_data, 100)) { + printf("Frame %i was outputted as %s\n", track_frame_counter, file_path); + } else { + printf(TEXT_RED "ERROR: Failed to generate output file %s. " + "Check file name and permissions.\n" TEXT_RESET, file_path); + } } void track_on_sample_cb( - char *buffer, - unsigned int buffer_size, - image_data_s image_data, - void *user_data) + char *buffer, + unsigned int buffer_size, + image_data_s image_data, + void *user_data) { - mv_source_h source = NULL; - int err = mv_create_source(&source); + mv_source_h source = NULL; + int err = mv_create_source(&source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during creating the source " - "based on the video frame! Error code: %i\n" TEXT_RESET, err); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during creating the source " + "based on the video frame! Error code: %i\n" TEXT_RESET, err); - return; - } + return; + } - err = mv_source_fill_by_buffer( - source, - buffer, - buffer_size, - image_data.image_width, - image_data.image_height, - image_data.image_colorspace); + err = mv_source_fill_by_buffer( + source, + buffer, + buffer_size, + image_data.image_width, + image_data.image_height, + image_data.image_colorspace); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during filling the source " - "based on the video frame! Error code: %i\n" TEXT_RESET , err); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during filling the source " + "based on the video frame! Error code: %i\n" TEXT_RESET , err); - return; - } + return; + } - mv_face_tracking_model_h tracking_model = - (mv_face_tracking_model_h)user_data; + mv_face_tracking_model_h tracking_model = + (mv_face_tracking_model_h)user_data; - err = mv_face_track(source, tracking_model, NULL, track_cb, false, NULL); + err = mv_face_track(source, tracking_model, NULL, track_cb, false, NULL); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during tracking the face " - TEXT_RESET "on the video frame! Error code: %i\n", err); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during tracking the face " + TEXT_RESET "on the video frame! Error code: %i\n", err); - return; - } + return; + } } -// end of stream callback +/* end of stream callback */ void eos_cb(void *user_data) { - printf("Video was fully processed\n"); - if (NULL == user_data) - { - printf(TEXT_RED - "ERROR: eos callback can't stop tracking process."TEXT_RESET); - return; - } - - pthread_mutex_unlock((pthread_mutex_t*)user_data); + printf("Video was fully processed\n"); + if (NULL == user_data) { + printf(TEXT_RED + "ERROR: eos callback can't stop tracking process."TEXT_RESET); + return; + } + + pthread_mutex_unlock((pthread_mutex_t*)user_data); } int generate_image_sequence( - mv_face_tracking_model_h tracking_model, - const char *track_target_file_name) + mv_face_tracking_model_h tracking_model, + const char *track_target_file_name) { - mv_video_reader_h reader = NULL; - int err = mv_create_video_reader(&reader); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during creating the video " - "reader! Error code: %i" TEXT_RESET "\n", err); - return err; - } - - image_data_s video_info; - unsigned int fps; - // init_frame_file_name - err = mv_video_reader_load(reader, track_target_file_name, &video_info, &fps); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during loading the video " - "by reader! Error code: %i" TEXT_RESET "\n", err); - - const int err2 = mv_destroy_video_reader(reader); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED "ERROR: Errors were occurred during video reader " - "destroy! Error code: %i" TEXT_RESET "\n", err); - } - - return err; - } - - err = mv_video_reader_set_new_sample_cb( - reader, - track_on_sample_cb, - tracking_model); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during new sample callback set!" - " Error code: %i" TEXT_RESET "\n", err); - - const int err2 = mv_destroy_video_reader(reader); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED "ERROR: Errors were occurred during video reader " - "destroy! Error code: %i" TEXT_RESET "\n", err); - } - - return err; - } - - pthread_mutex_t block_during_tracking_mutex; - pthread_mutex_init(&block_during_tracking_mutex, NULL); - pthread_mutex_lock(&block_during_tracking_mutex); - - // set end of stream callback - err = mv_video_reader_set_eos_cb(reader, eos_cb, &block_during_tracking_mutex); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "ERROR: Errors were occurred during setting the eos " - "callback for reader! Error code: %i" TEXT_RESET "\n", err); - - const int err2 = mv_destroy_video_reader(reader); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED - "ERROR: Errors were occurred during video reader destroy!" - " Error code: %i" TEXT_RESET "\n", err); - } - - pthread_mutex_unlock(&block_during_tracking_mutex); - pthread_mutex_destroy(&block_during_tracking_mutex); - - return err; - } - - err = mv_video_reader_start(reader); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during starting the " - "video reader! Error code: %i" TEXT_RESET "\n", err); - - const int err2 = mv_destroy_video_reader(reader); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf(TEXT_RED - "ERROR: Errors were occurred during video reader destroy!" - " Error code: %i" TEXT_RESET "\n", err); - } - - pthread_mutex_unlock(&block_during_tracking_mutex); - pthread_mutex_destroy(&block_during_tracking_mutex); - - return err; - } - - //wait for the video reading thread - - pthread_mutex_lock(&block_during_tracking_mutex); - pthread_mutex_unlock(&block_during_tracking_mutex); - pthread_mutex_destroy(&block_during_tracking_mutex); - - err = mv_video_reader_stop(reader); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during " - "attempt to stop video reader! Error code: %i\n" - TEXT_RESET, err); - } - - err = mv_destroy_video_reader(reader); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "ERROR: Errors were occurred during video " - "reader destroy! Error code: %i\n" TEXT_RESET, err); - } - - return MEDIA_VISION_ERROR_NONE; + mv_video_reader_h reader = NULL; + int err = mv_create_video_reader(&reader); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during creating the video " + "reader! Error code: %i" TEXT_RESET "\n", err); + return err; + } + + image_data_s video_info; + unsigned int fps; + /* init_frame_file_name */ + err = mv_video_reader_load(reader, track_target_file_name, &video_info, &fps); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during loading the video " + "by reader! Error code: %i" TEXT_RESET "\n", err); + + const int err2 = mv_destroy_video_reader(reader); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED "ERROR: Errors were occurred during video reader " + "destroy! Error code: %i" TEXT_RESET "\n", err); + } + + return err; + } + + err = mv_video_reader_set_new_sample_cb( + reader, + track_on_sample_cb, + tracking_model); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during new sample callback set!" + " Error code: %i" TEXT_RESET "\n", err); + + const int err2 = mv_destroy_video_reader(reader); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED "ERROR: Errors were occurred during video reader " + "destroy! Error code: %i" TEXT_RESET "\n", err); + } + + return err; + } + + pthread_mutex_t block_during_tracking_mutex; + pthread_mutex_init(&block_during_tracking_mutex, NULL); + pthread_mutex_lock(&block_during_tracking_mutex); + + /* set end of stream callback */ + err = mv_video_reader_set_eos_cb(reader, eos_cb, &block_during_tracking_mutex); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "ERROR: Errors were occurred during setting the eos " + "callback for reader! Error code: %i" TEXT_RESET "\n", err); + + const int err2 = mv_destroy_video_reader(reader); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED + "ERROR: Errors were occurred during video reader destroy!" + " Error code: %i" TEXT_RESET "\n", err); + } + + pthread_mutex_unlock(&block_during_tracking_mutex); + pthread_mutex_destroy(&block_during_tracking_mutex); + + return err; + } + + err = mv_video_reader_start(reader); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during starting the " + "video reader! Error code: %i" TEXT_RESET "\n", err); + + const int err2 = mv_destroy_video_reader(reader); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf(TEXT_RED + "ERROR: Errors were occurred during video reader destroy!" + " Error code: %i" TEXT_RESET "\n", err); + } + + pthread_mutex_unlock(&block_during_tracking_mutex); + pthread_mutex_destroy(&block_during_tracking_mutex); + + return err; + } + + /* wait for the video reading thread */ + + pthread_mutex_lock(&block_during_tracking_mutex); + pthread_mutex_unlock(&block_during_tracking_mutex); + pthread_mutex_destroy(&block_during_tracking_mutex); + + err = mv_video_reader_stop(reader); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during " + "attempt to stop video reader! Error code: %i\n" + TEXT_RESET, err); + } + + err = mv_destroy_video_reader(reader); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "ERROR: Errors were occurred during video " + "reader destroy! Error code: %i\n" TEXT_RESET, err); + } + + return MEDIA_VISION_ERROR_NONE; } int perform_mv_face_track(mv_face_tracking_model_h tracking_model) { - printf(TEXT_YELLOW "Before any tracking session the tracking model\n" - "preparation is required. Exception is the case when\n" - "the next tracking session will be performed with the\n" - "video which is the direct continuation of the video\n" - "has been used at the previous tracking session.\n" - "If you want to test correct tracking case, don't\n" - "forget to perform preparation before tracking." - TEXT_RESET "\n"); - - char *track_target_file_name = NULL; - - while (input_string("Input video file name to track on:", - 1024, &(track_target_file_name)) == -1) - { - printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); - } - - while (input_string("Input directory to save tracking results:", - 1024, &(track_output_dir)) == -1) - { - printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); - } - - track_frame_counter = 0; - - return generate_image_sequence(tracking_model, track_target_file_name); + printf(TEXT_YELLOW "Before any tracking session the tracking model\n" + "preparation is required. Exception is the case when\n" + "the next tracking session will be performed with the\n" + "video which is the direct continuation of the video\n" + "has been used at the previous tracking session.\n" + "If you want to test correct tracking case, don't\n" + "forget to perform preparation before tracking." + TEXT_RESET "\n"); + + char *track_target_file_name = NULL; + + while (input_string("Input video file name to track on:", + 1024, &(track_target_file_name)) == -1) { + printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); + } + + while (input_string("Input directory to save tracking results:", + 1024, &(track_output_dir)) == -1) { + printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n"); + } + + track_frame_counter = 0; + + return generate_image_sequence(tracking_model, track_target_file_name); } int perform_track() { - printf("\n" TEXT_YELLOW - "Tracking model isn't now created.\n" - "You may create it to perform positive \n" - "testing, or don't create to check the \n" - "functionality behaviour for uncreated model." - TEXT_RESET - "\n"); - - int err = MEDIA_VISION_ERROR_NONE; - mv_face_tracking_model_h tracking_model = NULL; - const bool do_create = show_confirm_dialog("Do Create Tracking Model?"); - if (do_create) - { - printf(TEXT_GREEN "Creating tracking model..." TEXT_RESET "\n"); - - err = mv_face_tracking_model_create(&tracking_model); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED "Creating the model failed. Error code: %i. " - "But you still can test with uncreated model.\n" - TEXT_RESET "\n", err); - } - else - { - printf(TEXT_YELLOW "Tracking model has been created." - TEXT_RESET "\n"); - } - } - - int sel_opt = 0; - const int options[6] = { 1, 2, 3, 4, 5, 6 }; - const char *names[6] = { "Prepare the model", - "Clone the model", - "Save model to file", - "Load model from file", - "Track with model", - "Destroy model and exit" }; - - while(!sel_opt) - { - sel_opt = show_menu("Select action:", options, names, 6); - notification_type_e notification_type = FAIL_OR_SUCCESSS; - - switch (sel_opt) - { - case 1: - err = perform_mv_face_tracking_model_prepare(tracking_model); - break; - case 2: - err = perform_mv_face_tracking_model_clone(tracking_model); - break; - case 3: - err = perform_mv_face_tracking_model_save(tracking_model); - break; - case 4: - err = perform_mv_face_tracking_model_load(&tracking_model); - break; - case 5: - err = perform_mv_face_track(tracking_model); - notification_type = FAIL_OR_DONE; - break; - case 6: - if (do_create) - { - err = mv_face_tracking_model_destroy(tracking_model); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf(TEXT_RED - "Error with code %i was occurred during destroy" - TEXT_RESET "\n", err); - } - - return err; - } - else - { - return MEDIA_VISION_ERROR_NONE; - } - default: - sel_opt = 0; - printf("ERROR: Incorrect input.\n"); - continue; - } - - print_action_result(names[sel_opt - 1], err, notification_type); - - sel_opt = 0; - } + printf("\n" TEXT_YELLOW + "Tracking model isn't now created.\n" + "You may create it to perform positive \n" + "testing, or don't create to check the \n" + "functionality behaviour for uncreated model." + TEXT_RESET + "\n"); + + int err = MEDIA_VISION_ERROR_NONE; + mv_face_tracking_model_h tracking_model = NULL; + const bool do_create = show_confirm_dialog("Do Create Tracking Model?"); + if (do_create) { + printf(TEXT_GREEN "Creating tracking model..." TEXT_RESET "\n"); + + err = mv_face_tracking_model_create(&tracking_model); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED "Creating the model failed. Error code: %i. " + "But you still can test with uncreated model.\n" + TEXT_RESET "\n", err); + } else { + printf(TEXT_YELLOW "Tracking model has been created." + TEXT_RESET "\n"); + } + } + + int sel_opt = 0; + const int options[6] = { 1, 2, 3, 4, 5, 6 }; + const char *names[6] = { "Prepare the model", + "Clone the model", + "Save model to file", + "Load model from file", + "Track with model", + "Destroy model and exit" }; + + while (!sel_opt) { + sel_opt = show_menu("Select action:", options, names, 6); + notification_type_e notification_type = FAIL_OR_SUCCESSS; + + switch (sel_opt) { + case 1: + err = perform_mv_face_tracking_model_prepare(tracking_model); + break; + case 2: + err = perform_mv_face_tracking_model_clone(tracking_model); + break; + case 3: + err = perform_mv_face_tracking_model_save(tracking_model); + break; + case 4: + err = perform_mv_face_tracking_model_load(&tracking_model); + break; + case 5: + err = perform_mv_face_track(tracking_model); + notification_type = FAIL_OR_DONE; + break; + case 6: + if (do_create) { + err = mv_face_tracking_model_destroy(tracking_model); + if (MEDIA_VISION_ERROR_NONE != err) { + printf(TEXT_RED + "Error with code %i was occurred during destroy" + TEXT_RESET "\n", err); + } + + return err; + } else { + return MEDIA_VISION_ERROR_NONE; + } + default: + sel_opt = 0; + printf("ERROR: Incorrect input.\n"); + continue; + } + + print_action_result(names[sel_opt - 1], err, notification_type); + + sel_opt = 0; + } } int perform_eye_condition_recognize() { - Perform_eye_condition_recognize = true; + Perform_eye_condition_recognize = true; - const int err = perform_detect(); + const int err = perform_detect(); - Perform_eye_condition_recognize = false; + Perform_eye_condition_recognize = false; - return err; + return err; } int perform_face_expression_recognize() { - Perform_facial_expression_recognize = true; + Perform_facial_expression_recognize = true; - const int err = perform_detect(); + const int err = perform_detect(); - Perform_facial_expression_recognize = false; + Perform_facial_expression_recognize = false; - return err; + return err; } int main(void) { - int err = MEDIA_VISION_ERROR_NONE; - - int sel_opt = 0; - const int options[6] = { 1, 2, 3, 4, 5, 6 }; - const char *names[6] = { "Detect", - "Track", - "Recognize", - "Eye condition", - "Face expression", - "Exit" }; - - while (sel_opt == 0) - { - sel_opt = show_menu("Select action:", options, names, 6); - switch (sel_opt) - { - case 1: - err = perform_detect(); - break; - case 2: - err = perform_track(); - break; - case 3: - err = perform_recognize(); - break; - case 4: - err = perform_eye_condition_recognize(); - break; - case 5: - err = perform_face_expression_recognize(); - break; - case 6: - return 0; - default: - sel_opt = 0; - printf("Invalid option.\n"); - continue; - } - - int do_another = 0; - - if (err != MEDIA_VISION_ERROR_NONE) - { - printf("ERROR: Action is finished with error code: %i\n", err); - } - - sel_opt = 0; - const int options_last[2] = { 1, 2 }; - const char *names_last[2] = { "YES", "NO" }; - - while (sel_opt == 0) - { - sel_opt = show_menu("Perform another action?", options_last, names_last, 2); - - switch (sel_opt) - { - case 1: - do_another = 1; - break; - case 2: - do_another = 0; - break; - default: - sel_opt = 0; - printf("Invalid option.\n"); - break; - } - } - - sel_opt = (do_another == 1 ? 0 : sel_opt); - } - - return 0; + int err = MEDIA_VISION_ERROR_NONE; + + int sel_opt = 0; + const int options[6] = { 1, 2, 3, 4, 5, 6 }; + const char *names[6] = { "Detect", + "Track", + "Recognize", + "Eye condition", + "Face expression", + "Exit" }; + + while (sel_opt == 0) { + sel_opt = show_menu("Select action:", options, names, 6); + switch (sel_opt) { + case 1: + err = perform_detect(); + break; + case 2: + err = perform_track(); + break; + case 3: + err = perform_recognize(); + break; + case 4: + err = perform_eye_condition_recognize(); + break; + case 5: + err = perform_face_expression_recognize(); + break; + case 6: + return 0; + default: + sel_opt = 0; + printf("Invalid option.\n"); + continue; + } + + int do_another = 0; + + if (err != MEDIA_VISION_ERROR_NONE) + printf("ERROR: Action is finished with error code: %i\n", err); + + sel_opt = 0; + const int options_last[2] = { 1, 2 }; + const char *names_last[2] = { "YES", "NO" }; + + while (sel_opt == 0) { + sel_opt = show_menu("Perform another action?", options_last, names_last, 2); + + switch (sel_opt) { + case 1: + do_another = 1; + break; + case 2: + do_another = 0; + break; + default: + sel_opt = 0; + printf("Invalid option.\n"); + break; + } + } + + sel_opt = (do_another == 1 ? 0 : sel_opt); + } + + return 0; } diff --git a/test/testsuites/image/image_test_suite.c b/test/testsuites/image/image_test_suite.c index a0d1a11..1cd9ba7 100644 --- a/test/testsuites/image/image_test_suite.c +++ b/test/testsuites/image/image_test_suite.c @@ -26,2051 +26,1827 @@ #include -typedef enum -{ - SOURCE_TYPE_GENERATION, - SOURCE_TYPE_LOADING, - SOURCE_TYPE_CLONING, - SOURCE_TYPE_EMPTY, - SOURCE_TYPE_INVALID +typedef enum { + SOURCE_TYPE_GENERATION, + SOURCE_TYPE_LOADING, + SOURCE_TYPE_CLONING, + SOURCE_TYPE_EMPTY, + SOURCE_TYPE_INVALID } source_type_e; -typedef enum -{ - OBJECT_TYPE_IMAGE_OBJECT, - OBJECT_TYPE_IMAGE_TRACKING_MODEL, - OBJECT_TYPE_INVALID +typedef enum { + OBJECT_TYPE_IMAGE_OBJECT, + OBJECT_TYPE_IMAGE_TRACKING_MODEL, + OBJECT_TYPE_INVALID } testing_object_type_e; #define testing_object_maximum_label_length 300 -typedef struct testing_object_s -{ - void *entity; +typedef struct testing_object_s { + void *entity; - char origin_label[testing_object_maximum_label_length]; + char origin_label[testing_object_maximum_label_length]; - char actual_label[testing_object_maximum_label_length]; + char actual_label[testing_object_maximum_label_length]; - testing_object_type_e object_type; + testing_object_type_e object_type; - source_type_e source_type; + source_type_e source_type; - int cloning_counter; + int cloning_counter; } testing_object; typedef testing_object *testing_object_h; void testing_object_create(testing_object_h *result) { - (*result) = malloc(sizeof(testing_object)); - - (*result)->entity = (void*)NULL; - (*result)->object_type = OBJECT_TYPE_INVALID; - (*result)->source_type = SOURCE_TYPE_INVALID; - (*result)->cloning_counter = 0; - (*result)->origin_label[0] = '\0'; - (*result)->actual_label[0] = '\0'; + (*result) = malloc(sizeof(testing_object)); + + (*result)->entity = (void*)NULL; + (*result)->object_type = OBJECT_TYPE_INVALID; + (*result)->source_type = SOURCE_TYPE_INVALID; + (*result)->cloning_counter = 0; + (*result)->origin_label[0] = '\0'; + (*result)->actual_label[0] = '\0'; } void testing_object_fill( - testing_object_h target, - void *entity, - testing_object_type_e object_type, - source_type_e source_type, - void *source) + testing_object_h target, + void *entity, + testing_object_type_e object_type, + source_type_e source_type, + void *source) { - target->entity = entity; - target->object_type = object_type; - target->source_type = source_type; - target->cloning_counter = 0; - - switch (source_type) - { - case SOURCE_TYPE_GENERATION: - { - if (OBJECT_TYPE_IMAGE_OBJECT == object_type) - { - sprintf( - target->origin_label, - "generated from \"%s\"", - (char*)source); - } - else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == object_type) - { - sprintf( - target->origin_label, - "generated from image object which is %s", - ((testing_object_h)source)->actual_label); - } - else - { - sprintf( - target->origin_label, - "generated unknown type of testing object"); - } - - strcpy(target->actual_label, target->origin_label); - break; - } - case SOURCE_TYPE_LOADING: - { - sprintf(target->origin_label, "loaded from \"%s\"", (char*)source); - strcpy(target->actual_label, target->origin_label); - break; - } - case SOURCE_TYPE_CLONING: - { - testing_object_h source_object = (testing_object_h)source; - strcpy(target->origin_label, source_object->origin_label); - target->cloning_counter = source_object->cloning_counter + 1; - - char number_of_cloning[10]; - number_of_cloning[0] = '\0'; - if (1 < target->cloning_counter) - { - sprintf(number_of_cloning, "%s%i%s", - "(x", target->cloning_counter, ")"); - } - - char type_name[20]; - if (OBJECT_TYPE_IMAGE_OBJECT == object_type) - { - sprintf(type_name, "image object"); - } - else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == object_type) - { - sprintf(type_name, "tracking model"); - } - else - { - sprintf(type_name, "unknown object"); - } - sprintf(target->actual_label, "%s%s%s%s%s%s", - "cloned ", number_of_cloning, - " from ", type_name, - " which is ", target->origin_label); - break; - } - case SOURCE_TYPE_EMPTY: - { - strcpy(target->origin_label, "created an empty"); - strcpy(target->actual_label, target->origin_label); - break; - } - default: - { - strcpy(target->origin_label, "having unknown source"); - break; - } - } + target->entity = entity; + target->object_type = object_type; + target->source_type = source_type; + target->cloning_counter = 0; + + switch (source_type) { + case SOURCE_TYPE_GENERATION: { + if (OBJECT_TYPE_IMAGE_OBJECT == object_type) { + sprintf( + target->origin_label, + "generated from \"%s\"", + (char*)source); + } else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == object_type) { + sprintf( + target->origin_label, + "generated from image object which is %s", + ((testing_object_h)source)->actual_label); + } else { + sprintf( + target->origin_label, + "generated unknown type of testing object"); + } + + strcpy(target->actual_label, target->origin_label); + break; + } + case SOURCE_TYPE_LOADING: { + sprintf(target->origin_label, "loaded from \"%s\"", (char*)source); + strcpy(target->actual_label, target->origin_label); + break; + } + case SOURCE_TYPE_CLONING: { + testing_object_h source_object = (testing_object_h)source; + strcpy(target->origin_label, source_object->origin_label); + target->cloning_counter = source_object->cloning_counter + 1; + + char number_of_cloning[10]; + number_of_cloning[0] = '\0'; + if (1 < target->cloning_counter) { + sprintf(number_of_cloning, "%s%i%s", + "(x", target->cloning_counter, ")"); + } + + char type_name[20]; + if (OBJECT_TYPE_IMAGE_OBJECT == object_type) + sprintf(type_name, "image object"); + else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == object_type) + sprintf(type_name, "tracking model"); + else + sprintf(type_name, "unknown object"); + + sprintf(target->actual_label, "%s%s%s%s%s%s", + "cloned ", number_of_cloning, + " from ", type_name, + " which is ", target->origin_label); + break; + } + case SOURCE_TYPE_EMPTY: { + strcpy(target->origin_label, "created an empty"); + strcpy(target->actual_label, target->origin_label); + break; + } + default: { + strcpy(target->origin_label, "having unknown source"); + break; + } + } } void testing_object_destroy(testing_object_h *target) { - switch ((*target)->object_type) - { - case OBJECT_TYPE_IMAGE_OBJECT: - { - int err = mv_image_object_destroy((mv_image_object_h)((*target)->entity)); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nERROR: Errors were occurred during image object " - "destroying; code %i\n", err); - } - break; - } - case OBJECT_TYPE_IMAGE_TRACKING_MODEL: - { - int err = mv_image_tracking_model_destroy( - (mv_image_tracking_model_h)((*target)->entity)); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nERROR: Errors were occurred during image tracking " - "model destroying; code %i\n", err); - } - break; - } - } - free(*target); - (*target) = NULL; + switch ((*target)->object_type) { + case OBJECT_TYPE_IMAGE_OBJECT: { + int err = mv_image_object_destroy((mv_image_object_h)((*target)->entity)); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nERROR: Errors were occurred during image object " + "destroying; code %i\n", err); + } + break; + } + case OBJECT_TYPE_IMAGE_TRACKING_MODEL: { + int err = mv_image_tracking_model_destroy( + (mv_image_tracking_model_h)((*target)->entity)); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nERROR: Errors were occurred during image tracking " + "model destroying; code %i\n", err); + } + break; + } + } + free(*target); + (*target) = NULL; } -typedef struct -{ - mv_quadrangle_s **locations; - unsigned int locations_size; - unsigned int currently_number; +typedef struct { + mv_quadrangle_s **locations; + unsigned int locations_size; + unsigned int currently_number; } recognition_result; void destroy_recognition_result(recognition_result *result) { - if (result->locations_size == 0) - { - return; - } - - int i = 0; - for (; i < result->locations_size; ++i) - { - if (NULL != result->locations[i]) - { - free(result->locations[i]); - } - } - free(result->locations); + if (result->locations_size == 0) + return; + + int i = 0; + for (; i < result->locations_size; ++i) { + if (NULL != result->locations[i]) + free(result->locations[i]); + } + free(result->locations); } void recognized_cb( - mv_source_h source, - mv_engine_config_h engine_cfg, - const mv_image_object_h *image_objects, - mv_quadrangle_s **locations, - unsigned int number_of_objects, - void *user_data) + mv_source_h source, + mv_engine_config_h engine_cfg, + const mv_image_object_h *image_objects, + mv_quadrangle_s **locations, + unsigned int number_of_objects, + void *user_data) { - MEDIA_VISION_FUNCTION_ENTER(); - - if (NULL == user_data) - { - return; - } - - recognition_result *result = (recognition_result*)user_data; - - int object_num = 0; - for(; object_num < number_of_objects; ++object_num) - { - if (result->currently_number >= result->locations_size) - { - return; - } - - if (NULL == locations[object_num]) - { - result->locations[result->currently_number] = NULL; - } - else - { - result->locations[result->currently_number] = malloc(sizeof(mv_quadrangle_s)); - *(result->locations[result->currently_number]) = *(locations[object_num]); - } - - ++result->currently_number; - } - - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_ENTER(); + + if (NULL == user_data) + return; + + recognition_result *result = (recognition_result*)user_data; + + int object_num = 0; + for (; object_num < number_of_objects; ++object_num) { + if (result->currently_number >= result->locations_size) + return; + + if (NULL == locations[object_num]) { + result->locations[result->currently_number] = NULL; + } else { + result->locations[result->currently_number] = malloc(sizeof(mv_quadrangle_s)); + *(result->locations[result->currently_number]) = *(locations[object_num]); + } + + ++result->currently_number; + } + + MEDIA_VISION_FUNCTION_LEAVE(); } void handle_recognition_result( - const recognition_result *result, - int number_of_objects, - mv_source_h *source, - char *file_name) + const recognition_result *result, + int number_of_objects, + mv_source_h *source, + char *file_name) { - int is_source_data_loaded = 0; - - unsigned char *out_buffer = NULL; - int buffer_size = 0; - image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID }; - - if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &(out_buffer), &buffer_size) || - MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) || - MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) || - MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) || - NULL == file_name) - { - printf("ERROR: Creating out image is impossible.\n"); - } - else - { - is_source_data_loaded = 1; - } - - int object_num = 0; - - - for (; object_num < number_of_objects; ++object_num) - { - if (NULL == result->locations[object_num]) - { - printf("\nImage #%i is not recognized\n", object_num); - continue; - } - - printf("\nImage #%i is recognized\n", object_num); - printf("Recognized image coordinates:\n"); - - int point_num = 0; - for (; point_num < 4; ++point_num) - { - printf("%d point - x = %d, y = %d\n", point_num + 1, - result->locations[object_num]->points[point_num].x, - result->locations[object_num]->points[point_num].y); - } - - if (is_source_data_loaded) - { - const int thickness = 2; - const int color[] = {0, 255, 0}; - - const int err = draw_quadrangle_on_buffer( - *(result->locations[object_num]), - thickness, - color, - &image_data, - out_buffer); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Impossible to draw quadrangle\n"); - } - } - } - if (save_image_from_buffer(file_name, out_buffer, - &image_data, 100) != MEDIA_VISION_ERROR_NONE) - { - printf("\nERROR: Failed to generate output file\n"); - } - else - { - printf("\nImage was generated as %s\n", file_name); - } + int is_source_data_loaded = 0; + + unsigned char *out_buffer = NULL; + int buffer_size = 0; + image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID }; + + if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &(out_buffer), &buffer_size) || + MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) || + MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) || + MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) || + NULL == file_name) { + printf("ERROR: Creating out image is impossible.\n"); + } else { + is_source_data_loaded = 1; + } + + int object_num = 0; + + for (; object_num < number_of_objects; ++object_num) { + if (NULL == result->locations[object_num]) { + printf("\nImage #%i is not recognized\n", object_num); + continue; + } + + printf("\nImage #%i is recognized\n", object_num); + printf("Recognized image coordinates:\n"); + + int point_num = 0; + for (; point_num < 4; ++point_num) { + printf("%d point - x = %d, y = %d\n", point_num + 1, + result->locations[object_num]->points[point_num].x, + result->locations[object_num]->points[point_num].y); + } + + if (is_source_data_loaded) { + const int thickness = 2; + const int color[] = {0, 255, 0}; + + const int err = draw_quadrangle_on_buffer( + *(result->locations[object_num]), + thickness, + color, + &image_data, + out_buffer); + + if (MEDIA_VISION_ERROR_NONE != err) + printf("ERROR: Impossible to draw quadrangle\n"); + } + } + + if (save_image_from_buffer(file_name, out_buffer, + &image_data, 100) != MEDIA_VISION_ERROR_NONE) { + printf("\nERROR: Failed to generate output file\n"); + } else { + printf("\nImage was generated as %s\n", file_name); + } } int generate_image_object_from_file(const char *path_to_image, - bool roi_selected, - mv_rectangle_s roi, - mv_image_object_h *result) + bool roi_selected, + mv_rectangle_s roi, + mv_image_object_h *result) { - MEDIA_VISION_FUNCTION_ENTER(); - - mv_source_h source; - int err = mv_create_source(&source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Errors were occurred during source creating!!! code %i\n", err); - } - - err = load_mv_source_from_file(path_to_image, source); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: image is not loaded; code %i\n", err); - - int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during source " - "destroying; code %i\n", err2); - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; - } - - mv_engine_config_h config; - err = mv_create_engine_config(&config); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: engine configuration is not created; code %i\n", err); - - int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during source " - "destroying; code %i\n", err2); - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; - } - - err = mv_image_object_create(result); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Errors were occurred during creating image object; " - "code %i\n", err); - - int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during source " - "destroying; code %i\n", err2); - } - - err2 = mv_destroy_engine_config(config); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during engine config " - "destroying; code %i\n", err2); - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; - } - - if (roi_selected) - { - err = mv_image_object_fill(*result, config, source, &roi); - } - else - { - err = mv_image_object_fill(*result, config, source, NULL); - } - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Errors were occurred during filling image object; " - "code %i\n", err); - - int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during source " - "destroying; code %i\n", err2); - } - - err2 = mv_image_object_destroy(*result); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during image object " - "destroying; code %i\n", err2); - } - - err2 = mv_destroy_engine_config(config); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during engine config " - "destroying; code %i\n", err2); - } - - *result = NULL; - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; - } - - err = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nERROR: Errors were occurred during source " - "destroying; code %i\n", err); - - int err2 = mv_destroy_engine_config(config); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during engine config " - "destroying; code %i\n", err2); - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; - } - - err = mv_destroy_engine_config(config); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nERROR: Errors were occurred during engine config " - "destroying; code %i\n", err); - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; - } - - MEDIA_VISION_FUNCTION_LEAVE(); - return err; + MEDIA_VISION_FUNCTION_ENTER(); + + mv_source_h source; + int err = mv_create_source(&source); + if (MEDIA_VISION_ERROR_NONE != err) + printf("ERROR: Errors were occurred during source creating!!! code %i\n", err); + + err = load_mv_source_from_file(path_to_image, source); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: image is not loaded; code %i\n", err); + + int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during source " + "destroying; code %i\n", err2); + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; + } + + mv_engine_config_h config; + err = mv_create_engine_config(&config); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: engine configuration is not created; code %i\n", err); + + int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during source " + "destroying; code %i\n", err2); + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; + } + + err = mv_image_object_create(result); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Errors were occurred during creating image object; " + "code %i\n", err); + + int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during source " + "destroying; code %i\n", err2); + } + + err2 = mv_destroy_engine_config(config); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during engine config " + "destroying; code %i\n", err2); + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; + } + + if (roi_selected) + err = mv_image_object_fill(*result, config, source, &roi); + else + err = mv_image_object_fill(*result, config, source, NULL); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Errors were occurred during filling image object; " + "code %i\n", err); + + int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during source " + "destroying; code %i\n", err2); + } + + err2 = mv_image_object_destroy(*result); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during image object " + "destroying; code %i\n", err2); + } + + err2 = mv_destroy_engine_config(config); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during engine config " + "destroying; code %i\n", err2); + } + + *result = NULL; + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; + } + + err = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nERROR: Errors were occurred during source " + "destroying; code %i\n", err); + + int err2 = mv_destroy_engine_config(config); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during engine config " + "destroying; code %i\n", err2); + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; + } + + err = mv_destroy_engine_config(config); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nERROR: Errors were occurred during engine config " + "destroying; code %i\n", err); + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; + } + + MEDIA_VISION_FUNCTION_LEAVE(); + return err; } int recognize_image(const char *path_to_image, - const char *path_to_generated_image, - mv_image_object_h *targets, - int number_of_targets) + const char *path_to_generated_image, + mv_image_object_h *targets, + int number_of_targets) { - MEDIA_VISION_FUNCTION_ENTER(); - - - if (NULL == targets) - { - printf("\nYou must create at least one model for recognition\n"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - mv_source_h source; - int err = mv_create_source(&source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nERROR: Errors were occurred during source creating; code %i\n", err); - return err; - } - - err = load_mv_source_from_file(path_to_image, source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: image is not loaded; code %i\n", err); - int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during source destroying; " - "code %i\n", err2); - } - MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } - - recognition_result result; - result.currently_number = 0; - if (0 < number_of_targets) - { - result.locations = malloc(sizeof(mv_quadrangle_s*) * number_of_targets); - result.locations_size = number_of_targets; - } - else - { - result.locations = NULL; - result.locations_size = 0; - } - - mv_engine_config_h config; - err = mv_create_engine_config(&config); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: engine configuration is not created; code %i\n", err); - int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during source destroying;" - "code %i\n", err2); - } - MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } - - err = mv_image_recognize(source, targets, number_of_targets, config, - recognized_cb, &result); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nERROR: Image is not recognized; code %i\n", err); - - destroy_recognition_result(&result); - - int err2 = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during source " - "destroying; code %i\n", err2); - } - err2 = mv_destroy_engine_config(config); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during engine config " - "destroying; code %i\n", err2); - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; - } - - handle_recognition_result(&result, number_of_targets, source, - path_to_generated_image); - - destroy_recognition_result(&result); - - err = mv_destroy_source(source); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nERROR: Errors were occurred during source destroying; code %i\n", - err); - - int err2 = mv_destroy_engine_config(config); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during engine config " - "destroying; code %i\n", err2); - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; - } - - err = mv_destroy_engine_config(config); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nERROR: Errors were occurred during engine config destroying; " - "code %i\n", err); - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; - } - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; + MEDIA_VISION_FUNCTION_ENTER(); + + if (NULL == targets) { + printf("\nYou must create at least one model for recognition\n"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + mv_source_h source; + int err = mv_create_source(&source); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nERROR: Errors were occurred during source creating; code %i\n", err); + return err; + } + + err = load_mv_source_from_file(path_to_image, source); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: image is not loaded; code %i\n", err); + int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during source destroying; " + "code %i\n", err2); + } + MEDIA_VISION_FUNCTION_LEAVE(); + return err; + } + + recognition_result result; + result.currently_number = 0; + if (0 < number_of_targets) { + result.locations = malloc(sizeof(mv_quadrangle_s*) * number_of_targets); + result.locations_size = number_of_targets; + } else { + result.locations = NULL; + result.locations_size = 0; + } + + mv_engine_config_h config; + err = mv_create_engine_config(&config); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: engine configuration is not created; code %i\n", err); + int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during source destroying;" + "code %i\n", err2); + } + MEDIA_VISION_FUNCTION_LEAVE(); + return err; + } + + err = mv_image_recognize(source, targets, number_of_targets, config, + recognized_cb, &result); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nERROR: Image is not recognized; code %i\n", err); + + destroy_recognition_result(&result); + + int err2 = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during source " + "destroying; code %i\n", err2); + } + err2 = mv_destroy_engine_config(config); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during engine config " + "destroying; code %i\n", err2); + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; + } + + handle_recognition_result(&result, number_of_targets, source, + path_to_generated_image); + + destroy_recognition_result(&result); + + err = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nERROR: Errors were occurred during source destroying; code %i\n", + err); + + int err2 = mv_destroy_engine_config(config); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during engine config " + "destroying; code %i\n", err2); + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; + } + + err = mv_destroy_engine_config(config); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nERROR: Errors were occurred during engine config destroying; " + "code %i\n", err); + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; + } + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; } int perform_get_confidence(mv_image_object_h target) { - if (NULL == target) - { - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - double confidence = 0; - const int err = mv_image_object_get_recognition_rate(target, &confidence); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nError: confidence hasn't been received with error code %i\n", err); - return err; - } - - printf("\nConfidence has been successfully received. Its value equal %f.\n", confidence); - - return MEDIA_VISION_ERROR_NONE; + if (NULL == target) + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + + double confidence = 0; + const int err = mv_image_object_get_recognition_rate(target, &confidence); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nError: confidence hasn't been received with error code %i\n", err); + return err; + } + + printf("\nConfidence has been successfully received. Its value equal %f.\n", confidence); + + return MEDIA_VISION_ERROR_NONE; } int perform_set_label(mv_image_object_h target) { - if (NULL == target) - { - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (NULL == target) + return MEDIA_VISION_ERROR_INVALID_PARAMETER; - int label = 0; + int label = 0; - while (input_int("Input label (int):", INT_MIN, INT_MAX, - &label) == -1) - { - printf("Incorrect input! Try again.\n"); - } + while (input_int("Input label (int):", INT_MIN, INT_MAX, &label) == -1) + printf("Incorrect input! Try again.\n"); - const int err = mv_image_object_set_label(target, label); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nError: the label hasn't been set with error code %i\n", err); - return err; - } + const int err = mv_image_object_set_label(target, label); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nError: the label hasn't been set with error code %i\n", err); + return err; + } - printf("\nLabel has been successfully set.\n"); + printf("\nLabel has been successfully set.\n"); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int perform_get_label(mv_image_object_h target) { - if (NULL == target) - { - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - int label = 0; - const int err = mv_image_object_get_label(target, &label); - if (MEDIA_VISION_ERROR_NO_DATA == err) - { - printf("\nSelected image object haven't label.\n"); - return MEDIA_VISION_ERROR_NONE; - } - else if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nError: label hasn't been received with error code %i\n", err); - return err; - } - - printf("\nLabel has been successfully received. Its equal %i.\n", label); - - return MEDIA_VISION_ERROR_NONE; + if (NULL == target) + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + + int label = 0; + const int err = mv_image_object_get_label(target, &label); + if (MEDIA_VISION_ERROR_NO_DATA == err) { + printf("\nSelected image object haven't label.\n"); + return MEDIA_VISION_ERROR_NONE; + } else if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nError: label hasn't been received with error code %i\n", err); + return err; + } + + printf("\nLabel has been successfully received. Its equal %i.\n", label); + + return MEDIA_VISION_ERROR_NONE; } int perform_recognize(mv_image_object_h *targets, int number_of_targets) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - char *path_to_image = NULL; - char *path_to_generated_image = NULL; + char *path_to_image = NULL; + char *path_to_generated_image = NULL; - while (input_string("Input file name with image for recognizing:", - 1024, &path_to_image) == -1) - { - printf("Incorrect input! Try again.\n"); - } + while (input_string("Input file name with image for recognizing:", + 1024, &path_to_image) == -1) { + printf("Incorrect input! Try again.\n"); + } - while (input_string("Input file name for generated image:", - 1024, &path_to_generated_image) == -1) - { - printf("Incorrect input! Try again.\n"); - } + while (input_string("Input file name for generated image:", + 1024, &path_to_generated_image) == -1) { + printf("Incorrect input! Try again.\n"); + } - const int err = recognize_image(path_to_image, path_to_generated_image, targets, - number_of_targets); + const int err = recognize_image(path_to_image, path_to_generated_image, targets, + number_of_targets); - free(path_to_image); - free(path_to_generated_image); + free(path_to_image); + free(path_to_generated_image); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; + return err; } int perform_load_image_object(char **path_to_object, mv_image_object_h *result) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - if (NULL != (*result)) - { - mv_image_object_destroy(*result); - *result = NULL; - } + if (NULL != (*result)) { + mv_image_object_destroy(*result); + *result = NULL; + } - while (input_string("Input file name with image object to be loaded:", - 1024, path_to_object) == -1) - { - printf("Incorrect input! Try again.\n"); - } + while (input_string("Input file name with image object to be loaded:", + 1024, path_to_object) == -1) { + printf("Incorrect input! Try again.\n"); + } - int err = mv_image_object_load(result, *path_to_object); + int err = mv_image_object_load(result, *path_to_object); - if (MEDIA_VISION_ERROR_NONE != err && NULL != (*result)) - { - printf("Error: object isn't loaded with error code %i\n", err); - return err; - } + if (MEDIA_VISION_ERROR_NONE != err && NULL != (*result)) { + printf("Error: object isn't loaded with error code %i\n", err); + return err; + } - printf("\nObject successfully loaded\n"); + printf("\nObject successfully loaded\n"); - MEDIA_VISION_FUNCTION_LEAVE(); - return err; + MEDIA_VISION_FUNCTION_LEAVE(); + return err; } int perform_save_image_object(mv_image_object_h object) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int err = MEDIA_VISION_ERROR_NONE; - char *path_to_object = NULL; + int err = MEDIA_VISION_ERROR_NONE; + char *path_to_object = NULL; - while (input_string("Input file name to be generated for image object storing:", - 1024, &path_to_object) == -1) - { - printf("Incorrect input! Try again.\n"); - } + while (input_string("Input file name to be generated for image object storing:", + 1024, &path_to_object) == -1) { + printf("Incorrect input! Try again.\n"); + } - err = mv_image_object_save(path_to_object, object); + err = mv_image_object_save(path_to_object, object); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nError during saving the image object. Error code is %i\n", err); - free(path_to_object); - return err; - } + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nError during saving the image object. Error code is %i\n", err); + free(path_to_object); + return err; + } - printf("\nObject successfully saved\n"); + printf("\nObject successfully saved\n"); - free(path_to_object); + free(path_to_object); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; + return err; } int perform_generate_image_object(mv_image_object_h *result, char **path_to_image) { - MEDIA_VISION_FUNCTION_ENTER(); - - if (NULL == path_to_image || NULL == result) - { - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - while (input_string("Input file name with image to be analyzed:", - 1024, path_to_image) == -1) - { - printf("Incorrect input! Try again.\n"); - } - - mv_rectangle_s roi; - const bool sel_roi = show_confirm_dialog("Select if you want to set ROI"); - if (sel_roi) - { - printf("\nInput ROI coordinates\n"); - while (input_int("Input x coordinate:", INT_MIN, INT_MAX, - &(roi.point.x)) == -1) - { - printf("Incorrect input! Try again.\n"); - } - - while (input_int("Input y coordinate:", INT_MIN, INT_MAX, - &(roi.point.y)) == -1) - { - printf("Incorrect input! Try again.\n"); - } - - while (input_int("Input ROI width:", INT_MIN, INT_MAX, - &(roi.width)) == -1) - { - printf("Incorrect input! Try again.\n"); - } - - while (input_int("Input ROI height:", INT_MIN, INT_MAX, - &(roi.height)) == -1) - { - printf("Incorrect input! Try again.\n"); - } - - } - - int err = generate_image_object_from_file(*path_to_image, sel_roi, roi, result); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nError in generation image object. Error code is %i\n", err); - - if (NULL != (*result)) - { - mv_image_object_destroy(*result); - (*result) = NULL; - } - - return err; - } - - printf("\nObject successfully generated\n"); - - MEDIA_VISION_FUNCTION_LEAVE(); - - return err; + MEDIA_VISION_FUNCTION_ENTER(); + + if (NULL == path_to_image || NULL == result) + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + + while (input_string("Input file name with image to be analyzed:", + 1024, path_to_image) == -1) { + printf("Incorrect input! Try again.\n"); + } + + mv_rectangle_s roi; + const bool sel_roi = show_confirm_dialog("Select if you want to set ROI"); + if (sel_roi) { + printf("\nInput ROI coordinates\n"); + while (input_int("Input x coordinate:", INT_MIN, INT_MAX, + &(roi.point.x)) == -1) { + printf("Incorrect input! Try again.\n"); + } + + while (input_int("Input y coordinate:", INT_MIN, INT_MAX, + &(roi.point.y)) == -1) { + printf("Incorrect input! Try again.\n"); + } + + while (input_int("Input ROI width:", INT_MIN, INT_MAX, + &(roi.width)) == -1) { + printf("Incorrect input! Try again.\n"); + } + + while (input_int("Input ROI height:", INT_MIN, INT_MAX, + &(roi.height)) == -1) { + printf("Incorrect input! Try again.\n"); + } + + } + + int err = generate_image_object_from_file(*path_to_image, sel_roi, roi, result); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nError in generation image object. Error code is %i\n", err); + + if (NULL != (*result)) { + mv_image_object_destroy(*result); + (*result) = NULL; + } + + return err; + } + + printf("\nObject successfully generated\n"); + + MEDIA_VISION_FUNCTION_LEAVE(); + + return err; } int perform_clone_image_object(mv_image_object_h src, mv_image_object_h *result) { - int err = mv_image_object_clone(src, result); + int err = mv_image_object_clone(src, result); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nError: object isn't cloned with error code %i\n", err); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nError: object isn't cloned with error code %i\n", err); - int err2 = mv_image_object_destroy(*result); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during image object " - "destroying; code %i\n", err); - } + int err2 = mv_image_object_destroy(*result); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during image object " + "destroying; code %i\n", err); + } - (*result) = NULL; + (*result) = NULL; - return err; - } + return err; + } - printf("\nObject successfully cloned\n"); + printf("\nObject successfully cloned\n"); - return err; + return err; } int handle_tracking_result( - mv_video_writer_h writer, - mv_source_h frame, - int frame_number, - mv_quadrangle_s *location) + mv_video_writer_h writer, + mv_source_h frame, + int frame_number, + mv_quadrangle_s *location) { - unsigned char *data_buffer = NULL; - unsigned int buffer_size = 0; - image_data_s image_data; - - int err = mv_source_get_buffer(frame, &data_buffer, &buffer_size); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf( - "ERROR: Errors were occurred during getting buffer from the " - "source; code %i\n", - err); - return err; - } - - err = mv_source_get_width(frame, &image_data.image_width); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf( - "ERROR: Errors were occurred during getting width from the " - "source; code %i\n", - err); - return err; - } - - err = mv_source_get_height(frame, &image_data.image_height); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf( - "ERROR: Errors were occurred during getting height from the " - "source; code %i\n", - err); - return err; - } - - if (location) - { - printf( - "Frame #%i: object is found." - "Location: {%i, %i}; {%i, %i}; {%i, %i}; {%i, %i}.\n", - frame_number, - location->points[0].x, - location->points[0].y, - location->points[1].x, - location->points[1].y, - location->points[2].x, - location->points[2].y, - location->points[3].x, - location->points[3].y); - const int thickness = 2; - const int color[] = {0, 255, 0}; - - err = draw_quadrangle_on_buffer( - *location, - thickness, - color, - &image_data, - data_buffer); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf( - "ERROR: Errors were occurred during drawing quadrangle on " - "the frame; code %i\n", - err); - return err; - } - } - else - { - usleep(1000000); - printf("Frame #%i: object isn't found.\n", frame_number); - } - - err = mv_video_writer_write_frame(writer, data_buffer); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf( - "ERROR: Errors were occurred during writing frame to the " - "result video file; code %i\n", - err); - return err; - } - - return err; + unsigned char *data_buffer = NULL; + unsigned int buffer_size = 0; + image_data_s image_data; + + int err = mv_source_get_buffer(frame, &data_buffer, &buffer_size); + if (MEDIA_VISION_ERROR_NONE != err) { + printf( + "ERROR: Errors were occurred during getting buffer from the " + "source; code %i\n", + err); + return err; + } + + err = mv_source_get_width(frame, &image_data.image_width); + if (MEDIA_VISION_ERROR_NONE != err) { + printf( + "ERROR: Errors were occurred during getting width from the " + "source; code %i\n", + err); + return err; + } + + err = mv_source_get_height(frame, &image_data.image_height); + if (MEDIA_VISION_ERROR_NONE != err) { + printf( + "ERROR: Errors were occurred during getting height from the " + "source; code %i\n", + err); + return err; + } + + if (location) { + printf( + "Frame #%i: object is found." + "Location: {%i, %i}; {%i, %i}; {%i, %i}; {%i, %i}.\n", + frame_number, + location->points[0].x, + location->points[0].y, + location->points[1].x, + location->points[1].y, + location->points[2].x, + location->points[2].y, + location->points[3].x, + location->points[3].y); + const int thickness = 2; + const int color[] = {0, 255, 0}; + + err = draw_quadrangle_on_buffer( + *location, + thickness, + color, + &image_data, + data_buffer); + if (MEDIA_VISION_ERROR_NONE != err) { + printf( + "ERROR: Errors were occurred during drawing quadrangle on " + "the frame; code %i\n", + err); + return err; + } + } else { + usleep(1000000); + printf("Frame #%i: object isn't found.\n", frame_number); + } + + err = mv_video_writer_write_frame(writer, data_buffer); + if (MEDIA_VISION_ERROR_NONE != err) { + printf( + "ERROR: Errors were occurred during writing frame to the " + "result video file; code %i\n", + err); + return err; + } + + return err; } -typedef struct -{ - mv_image_tracking_model_h target; - mv_video_writer_h writer; - int frame_number; +typedef struct { + mv_image_tracking_model_h target; + mv_video_writer_h writer; + int frame_number; } tracking_cb_data; void tracked_cb( - mv_source_h source, - mv_image_object_h image_object, - mv_engine_config_h engine_cfg, - mv_quadrangle_s *location, - void *user_data) + mv_source_h source, + mv_image_object_h image_object, + mv_engine_config_h engine_cfg, + mv_quadrangle_s *location, + void *user_data) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - if (NULL == user_data) - { - return; - } + if (NULL == user_data) + return; - tracking_cb_data *cb_data = (tracking_cb_data*)user_data; + tracking_cb_data *cb_data = (tracking_cb_data*)user_data; - handle_tracking_result(cb_data->writer, source, cb_data->frame_number, location); + handle_tracking_result(cb_data->writer, source, cb_data->frame_number, location); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); } void new_frame_cb( - char *buffer, - unsigned int buffer_size, - image_data_s image_data, - void *user_data) + char *buffer, + unsigned int buffer_size, + image_data_s image_data, + void *user_data) { - if (NULL == user_data) - { - return; - } + if (NULL == user_data) + return; - mv_source_h frame = NULL; + mv_source_h frame = NULL; #define release_resources() \ - if (frame) \ - { \ - const int err2 = mv_destroy_source(frame); \ - if (MEDIA_VISION_ERROR_NONE != err2) \ - { \ - printf( \ - "\nERROR: Errors were occurred during source destroying; " \ - "code %i\n", \ - err2); \ - } \ - } - - tracking_cb_data *cb_data = (tracking_cb_data*)user_data; - - ++(cb_data->frame_number); - - int err = mv_create_source(&frame); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf( - "\nERROR: Errors were occurred during source creating; " - "code %i\n", - err); - release_resources(); - return; - } - - err = mv_source_fill_by_buffer( - frame, - buffer, - buffer_size, - image_data.image_width, - image_data.image_height, - image_data.image_colorspace); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: mv_source_h for frame is not filled; code %i\n", err); - release_resources(); - return; - } - - err = mv_image_track( - frame, - cb_data->target, - NULL, - tracked_cb, - cb_data); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf( - "ERROR: Errors were occurred during tracking object on " - "the video; code %i\n", - err); - release_resources(); - return; - } - - release_resources() + if (frame) { \ + const int err2 = mv_destroy_source(frame); \ + if (MEDIA_VISION_ERROR_NONE != err2) { \ + printf( \ + "\nERROR: Errors were occurred during source destroying; " \ + "code %i\n", \ + err2); \ + } \ + } + + tracking_cb_data *cb_data = (tracking_cb_data*)user_data; + + ++(cb_data->frame_number); + + int err = mv_create_source(&frame); + if (MEDIA_VISION_ERROR_NONE != err) { + printf( + "\nERROR: Errors were occurred during source creating; " + "code %i\n", + err); + release_resources(); + return; + } + + err = mv_source_fill_by_buffer( + frame, + buffer, + buffer_size, + image_data.image_width, + image_data.image_height, + image_data.image_colorspace); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: mv_source_h for frame is not filled; code %i\n", err); + release_resources(); + return; + } + + err = mv_image_track( + frame, + cb_data->target, + NULL, + tracked_cb, + cb_data); + if (MEDIA_VISION_ERROR_NONE != err) { + printf( + "ERROR: Errors were occurred during tracking object on " + "the video; code %i\n", + err); + release_resources(); + return; + } + + release_resources() #undef release_resources() } void eos_frame_cb( - void *user_data) + void *user_data) { - if (NULL == user_data) - { - printf("ERROR: eos callback can't stop tracking process."); - return; - } + if (NULL == user_data) { + printf("ERROR: eos callback can't stop tracking process."); + return; + } - pthread_mutex_unlock((pthread_mutex_t*)user_data); + pthread_mutex_unlock((pthread_mutex_t*)user_data); } int perform_track(mv_image_tracking_model_h target) { - if (NULL == target) - { - printf("\nTarget is invalid. It is impossible to track of this target.\n"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (NULL == target) { + printf("\nTarget is invalid. It is impossible to track of this target.\n"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - mv_video_reader_h reader = NULL; - mv_video_writer_h writer = NULL; - char *path_to_video = NULL; - char *path_to_generated_video = NULL; - image_data_s image_data = {0}; - unsigned int fps = 0; + mv_video_reader_h reader = NULL; + mv_video_writer_h writer = NULL; + char *path_to_video = NULL; + char *path_to_generated_video = NULL; + image_data_s image_data = {0}; + unsigned int fps = 0; #define release_resources() \ - int err2 = MEDIA_VISION_ERROR_NONE; \ - if (reader) \ - { \ - err2 = mv_destroy_video_reader(reader); \ - if (MEDIA_VISION_ERROR_NONE != err2) \ - { \ - printf( \ - "\nERROR: Errors were occurred during video reader destroying; " \ - "code %i\n", \ - err2); \ - } \ - } \ - if (writer) \ - { \ - err2 = mv_destroy_video_writer(writer); \ - if (MEDIA_VISION_ERROR_NONE != err2) \ - { \ - printf( \ - "\nERROR: Errors were occurred during video writer destroying; " \ - "code %i\n", \ - err2); \ - } \ - } \ - if (path_to_video) \ - { \ - free(path_to_video); \ - } \ - if (path_to_generated_video) \ - { \ - free(path_to_generated_video); \ - } - - while (input_string("Input file name with video for tracking:", - 1024, &path_to_video) == -1) - { - printf("Incorrect input! Try again.\n"); - } - - while (input_string("Input file name for generated video:", - 1024, &path_to_generated_video) == -1) - { - printf("Incorrect input! Try again.\n"); - } - - int err = mv_create_video_reader(&reader); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nERROR: Errors were occurred during video reader creating; " - "code %i\n", err); - release_resources(); - MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } - - err = mv_create_video_writer(&writer); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf( - "\nERROR: Errors were occurred during video writer creating; " - "code %i\n", - err); - release_resources(); - MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } - - err = mv_video_reader_load( - reader, - path_to_video, - &image_data, - &fps); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nERROR: Errors were occurred during video loading; code %i\n", err); - release_resources(); - MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } - - printf("Receive frame metadata: wxh - %ux%u, fps - %u, format - %d\n", - image_data.image_width, image_data.image_height, fps, image_data.image_colorspace); - - // Temporary we accept only RGB888 - image_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888; - - err = mv_video_writer_init( - writer, - path_to_generated_video, - image_data, - fps); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf( - "\nERROR: Errors were occurred during video writer initializing; " - "code %i\n", - err); - release_resources(); - MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } - - tracking_cb_data cb_data; - cb_data.target = target; - cb_data.writer = writer; - cb_data.frame_number = 0; - err = mv_video_reader_set_new_sample_cb(reader, new_frame_cb, &cb_data); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf( - "\nERROR: Errors were occurred during set new frame callback; " - "code %i\n", - err); - release_resources(); - MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } - - pthread_mutex_t block_during_tracking_mutex; - pthread_mutex_init(&block_during_tracking_mutex, NULL); - pthread_mutex_lock(&block_during_tracking_mutex); - err = mv_video_reader_set_eos_cb(reader, eos_frame_cb, &block_during_tracking_mutex); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf( - "\nERROR: Errors were occurred during set new frame callback; " - "code %i\n", - err); - release_resources(); - pthread_mutex_unlock(&block_during_tracking_mutex); - pthread_mutex_destroy(&block_during_tracking_mutex); - MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } - - err = mv_video_reader_start(reader); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf( - "\nERROR: Errors were occurred during video reading starts; " - "code %i\n", - err); - release_resources(); - pthread_mutex_unlock(&block_during_tracking_mutex); - pthread_mutex_destroy(&block_during_tracking_mutex); - MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } - - pthread_mutex_lock(&block_during_tracking_mutex); - pthread_mutex_unlock(&block_during_tracking_mutex); - pthread_mutex_destroy(&block_during_tracking_mutex); - release_resources(); + int err2 = MEDIA_VISION_ERROR_NONE; \ + if (reader) { \ + err2 = mv_destroy_video_reader(reader); \ + if (MEDIA_VISION_ERROR_NONE != err2) { \ + printf( \ + "\nERROR: Errors were occurred during video reader destroying; " \ + "code %i\n", \ + err2); \ + } \ + } \ + if (writer) { \ + err2 = mv_destroy_video_writer(writer); \ + if (MEDIA_VISION_ERROR_NONE != err2) { \ + printf( \ + "\nERROR: Errors were occurred during video writer destroying; " \ + "code %i\n", \ + err2); \ + } \ + } \ + if (path_to_video) { \ + free(path_to_video); \ + } \ + if (path_to_generated_video) { \ + free(path_to_generated_video); \ + } + + while (input_string("Input file name with video for tracking:", + 1024, &path_to_video) == -1) { + printf("Incorrect input! Try again.\n"); + } + + while (input_string("Input file name for generated video:", + 1024, &path_to_generated_video) == -1) { + printf("Incorrect input! Try again.\n"); + } + + int err = mv_create_video_reader(&reader); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nERROR: Errors were occurred during video reader creating; " + "code %i\n", err); + release_resources(); + MEDIA_VISION_FUNCTION_LEAVE(); + return err; + } + + err = mv_create_video_writer(&writer); + if (MEDIA_VISION_ERROR_NONE != err) { + printf( + "\nERROR: Errors were occurred during video writer creating; " + "code %i\n", + err); + release_resources(); + MEDIA_VISION_FUNCTION_LEAVE(); + return err; + } + + err = mv_video_reader_load( + reader, + path_to_video, + &image_data, + &fps); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nERROR: Errors were occurred during video loading; code %i\n", err); + release_resources(); + MEDIA_VISION_FUNCTION_LEAVE(); + return err; + } + + printf("Receive frame metadata: wxh - %ux%u, fps - %u, format - %d\n", + image_data.image_width, image_data.image_height, fps, image_data.image_colorspace); + + /* Temporary we accept only RGB888 */ + image_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888; + + err = mv_video_writer_init( + writer, + path_to_generated_video, + image_data, + fps); + if (MEDIA_VISION_ERROR_NONE != err) { + printf( + "\nERROR: Errors were occurred during video writer initializing; " + "code %i\n", + err); + release_resources(); + MEDIA_VISION_FUNCTION_LEAVE(); + return err; + } + + tracking_cb_data cb_data; + cb_data.target = target; + cb_data.writer = writer; + cb_data.frame_number = 0; + err = mv_video_reader_set_new_sample_cb(reader, new_frame_cb, &cb_data); + if (MEDIA_VISION_ERROR_NONE != err) { + printf( + "\nERROR: Errors were occurred during set new frame callback; " + "code %i\n", + err); + release_resources(); + MEDIA_VISION_FUNCTION_LEAVE(); + return err; + } + + pthread_mutex_t block_during_tracking_mutex; + pthread_mutex_init(&block_during_tracking_mutex, NULL); + pthread_mutex_lock(&block_during_tracking_mutex); + err = mv_video_reader_set_eos_cb(reader, eos_frame_cb, &block_during_tracking_mutex); + if (MEDIA_VISION_ERROR_NONE != err) { + printf( + "\nERROR: Errors were occurred during set new frame callback; " + "code %i\n", + err); + release_resources(); + pthread_mutex_unlock(&block_during_tracking_mutex); + pthread_mutex_destroy(&block_during_tracking_mutex); + MEDIA_VISION_FUNCTION_LEAVE(); + return err; + } + + err = mv_video_reader_start(reader); + if (MEDIA_VISION_ERROR_NONE != err) { + printf( + "\nERROR: Errors were occurred during video reading starts; " + "code %i\n", + err); + release_resources(); + pthread_mutex_unlock(&block_during_tracking_mutex); + pthread_mutex_destroy(&block_during_tracking_mutex); + MEDIA_VISION_FUNCTION_LEAVE(); + return err; + } + + pthread_mutex_lock(&block_during_tracking_mutex); + pthread_mutex_unlock(&block_during_tracking_mutex); + pthread_mutex_destroy(&block_during_tracking_mutex); + release_resources(); #undef release_resources() - printf("\nTracking process is finished\n"); + printf("\nTracking process is finished\n"); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; + return err; } int perform_save_image_tracking_model(mv_image_tracking_model_h model) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int err = MEDIA_VISION_ERROR_NONE; - char *path_to_file = NULL; + int err = MEDIA_VISION_ERROR_NONE; + char *path_to_file = NULL; - while (input_string( - "Input file name to be generated for image tracking model storing:", - 1024, - &path_to_file) == -1) - { - printf("Incorrect input! Try again.\n"); - } + while (input_string( + "Input file name to be generated for image tracking model storing:", + 1024, &path_to_file) == -1) { + printf("Incorrect input! Try again.\n"); + } - err = mv_image_tracking_model_save(path_to_file, model); + err = mv_image_tracking_model_save(path_to_file, model); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf( - "\nError during saving the image tracking model. " - "Error code is %i\n", - err); - free(path_to_file); - return err; - } + if (MEDIA_VISION_ERROR_NONE != err) { + printf( + "\nError during saving the image tracking model. " + "Error code is %i\n", + err); + free(path_to_file); + return err; + } - printf("\nTracking model successfully saved\n"); + printf("\nTracking model successfully saved\n"); - free(path_to_file); + free(path_to_file); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; + return err; } int perform_load_image_tracking_model( - char **path_to_file, mv_image_tracking_model_h *result) + char **path_to_file, mv_image_tracking_model_h *result) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - while (input_string( - "Input file name with image tracking model to be loaded:", - 1024, - path_to_file) == -1) - { - printf("Incorrect input! Try again.\n"); - } + while (input_string( + "Input file name with image tracking model to be loaded:", + 1024, path_to_file) == -1) { + printf("Incorrect input! Try again.\n"); + } - int err = mv_image_tracking_model_load(*path_to_file, result); + int err = mv_image_tracking_model_load(*path_to_file, result); - if (MEDIA_VISION_ERROR_NONE != err && NULL != (*result)) - { - printf("Error: tracking model isn't loaded with error code %i\n", err); + if (MEDIA_VISION_ERROR_NONE != err && NULL != (*result)) { + printf("Error: tracking model isn't loaded with error code %i\n", err); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } + return err; + } - printf("\nTracking model successfully loaded\n"); + printf("\nTracking model successfully loaded\n"); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; + return err; } int perform_clone_image_tracking_model( - mv_image_tracking_model_h src, - mv_image_tracking_model_h *result) + mv_image_tracking_model_h src, + mv_image_tracking_model_h *result) { - MEDIA_VISION_FUNCTION_ENTER(); + MEDIA_VISION_FUNCTION_ENTER(); - int err = mv_image_tracking_model_clone(src, result); + int err = mv_image_tracking_model_clone(src, result); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nError: tracking model isn't cloned with error code %i\n", err); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nError: tracking model isn't cloned with error code %i\n", err); - int err2 = mv_image_tracking_model_destroy(*result); - if (MEDIA_VISION_ERROR_NONE != err2) - { - printf("\nERROR: Errors were occurred during tracking model " - "destroying; code %i\n", err); - } + int err2 = mv_image_tracking_model_destroy(*result); + if (MEDIA_VISION_ERROR_NONE != err2) { + printf("\nERROR: Errors were occurred during tracking model " + "destroying; code %i\n", err); + } - (*result) = NULL; + (*result) = NULL; - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; - } + return err; + } - printf("\nTracking model successfully cloned\n"); + printf("\nTracking model successfully cloned\n"); - MEDIA_VISION_FUNCTION_LEAVE(); + MEDIA_VISION_FUNCTION_LEAVE(); - return err; + return err; } int perform_refresh_image_tracking_model(mv_image_tracking_model_h target) { - if (NULL == target) - { - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (NULL == target) + return MEDIA_VISION_ERROR_INVALID_PARAMETER; - const int err = mv_image_tracking_model_refresh(target, NULL); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("\nError: tracking model isn't refreshed with error code %i\n", err); - return err; - } + const int err = mv_image_tracking_model_refresh(target, NULL); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("\nError: tracking model isn't refreshed with error code %i\n", err); + return err; + } - printf("\nTracking model is successfully refreshed.\n"); + printf("\nTracking model is successfully refreshed.\n"); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } void show_testing_objects(const char *title, GArray *testing_objects) { - printf("\n"); - int i = 0; - if (1 > testing_objects->len) - { - printf("There are no created objects.\n"); - } - else - { - printf("%s:\n", title); - printf("-------------------------------------------------------------------------------------\n"); - for (i = 0; i < testing_objects->len; ++i) - { - testing_object_h temp = g_array_index (testing_objects, testing_object_h, i); - if (OBJECT_TYPE_IMAGE_OBJECT == temp->object_type) - { - printf("Image object "); - } - else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == temp->object_type) - { - printf("Image tracking model "); - } - else - { - printf("Unknown testing object "); - } - printf("#%i. %s\n", i, temp->actual_label); - } - printf("-------------------------------------------------------------------------------------\n"); - } + printf("\n"); + int i = 0; + if (1 > testing_objects->len) { + printf("There are no created objects.\n"); + } else { + printf("%s:\n", title); + printf("-------------------------------------------------------------------------------------\n"); + for (i = 0; i < testing_objects->len; ++i) { + testing_object_h temp = g_array_index(testing_objects, testing_object_h, i); + if (OBJECT_TYPE_IMAGE_OBJECT == temp->object_type) + printf("Image object "); + else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == temp->object_type) + printf("Image tracking model "); + else + printf("Unknown testing object "); + + printf("#%i. %s\n", i, temp->actual_label); + } + printf("-------------------------------------------------------------------------------------\n"); + } } int select_testing_object(GArray *testing_objects, testing_object_h *result, char *title) { - if (0 == testing_objects->len) - { - printf("Firstly you must create at least one testing object.\n"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - show_testing_objects(title, testing_objects); - int sel_index = 0; - while (input_int("Input number of element:", 0, - testing_objects->len - 1, &sel_index) == -1) - { - printf("Incorrect input! Try again.\n"); - } - (*result) = g_array_index(testing_objects, testing_object_h, sel_index); - return MEDIA_VISION_ERROR_NONE; + if (0 == testing_objects->len) { + printf("Firstly you must create at least one testing object.\n"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + show_testing_objects(title, testing_objects); + int sel_index = 0; + while (input_int("Input number of element:", 0, + testing_objects->len - 1, &sel_index) == -1) { + printf("Incorrect input! Try again.\n"); + } + (*result) = g_array_index(testing_objects, testing_object_h, sel_index); + return MEDIA_VISION_ERROR_NONE; } int select_testing_object_index(GArray *testing_objects, guint *result_index, char *title) { - if (0 == testing_objects->len) - { - printf("Firstly you must create at least one testing object.\n"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - show_testing_objects(title, testing_objects); - - int sel_index = 0; - while (input_int("Input number of element:", 0, - testing_objects->len - 1, &sel_index) == -1) - { - printf("Incorrect input! Try again.\n"); - } - (*result_index) = sel_index; - return MEDIA_VISION_ERROR_NONE; + if (0 == testing_objects->len) { + printf("Firstly you must create at least one testing object.\n"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + show_testing_objects(title, testing_objects); + + int sel_index = 0; + while (input_int("Input number of element:", 0, + testing_objects->len - 1, &sel_index) == -1) { + printf("Incorrect input! Try again.\n"); + } + (*result_index) = sel_index; + return MEDIA_VISION_ERROR_NONE; } int add_testing_object(GArray *testing_objects, testing_object_h object) { - if (NULL == object) - { - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - g_array_append_val(testing_objects, object); - return MEDIA_VISION_ERROR_NONE; + if (NULL == object) + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + + g_array_append_val(testing_objects, object); + return MEDIA_VISION_ERROR_NONE; } int remove_testing_object(GArray *testing_objects, guint index) { - if (index >= testing_objects->len) - { - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - g_array_remove_index(testing_objects, index); - return MEDIA_VISION_ERROR_NONE; + if (index >= testing_objects->len) + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + + g_array_remove_index(testing_objects, index); + return MEDIA_VISION_ERROR_NONE; } void perform_recognition_cases(GArray *image_objects) { - const char *names[] = { - "Show created set of image objects", - "Generate new image object from source image (mv_image_object_fill )", - "Load existed image object from file (mv_image_object_load)", - "Clone existed image object (mv_image_object_clone)", - "Create empty image object (mv_image_object_create)", - "Save existed image object to the file (mv_image_object_save)", - "Remove image object from created set (mv_image_object_destroy)", - "Get confidence from existed image object (mv_image_object_get_recognition_rate)", - "Recognize all image objects on image (mv_image_recognize)", - "Set label for existed image object (mv_image_set_label_of_object)", - "Get label from existed image object (mv_image_get_label_of_object)", - "Back to the main menu"}; - - int number_of_options = sizeof(names) / sizeof(names[0]); - int options[number_of_options]; - int index = 0; - for (; index < number_of_options; ++index) - { - options[index] = index + 1; - } - - while (1) - { - int err = MEDIA_VISION_ERROR_NONE; - - int sel_opt = show_menu("Select action:", options, names, number_of_options); - - switch (sel_opt) - { - case 1: // Show created set of image objects - { - show_testing_objects("Set of image objects", image_objects); - break; - } - case 2: // Generate new image object from source image (mv_image_object_fill) - { - mv_image_object_h temporary = NULL; - char *path_to_image = NULL; - - err = perform_generate_image_object(&temporary, &path_to_image); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("Generation failed (error code - %i)\n", err); - if (NULL != path_to_image) - { - free(path_to_image); - } - break; - } - - testing_object_h added_object; - testing_object_create(&added_object); - testing_object_fill(added_object, temporary, - OBJECT_TYPE_IMAGE_OBJECT, SOURCE_TYPE_GENERATION, path_to_image); - - if (NULL != path_to_image) - { - free(path_to_image); - } - - add_testing_object(image_objects, added_object); - break; - } - case 3: // Load existed image object from file (mv_image_object_load) - { - mv_image_object_h temporary_image_object = NULL; - char *path_to_object = NULL; - - err = perform_load_image_object( - &path_to_object, &temporary_image_object); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("Loading failed (error code - %i)\n", err); - break; - } - - testing_object_h added_object = NULL; - testing_object_create(&added_object); - testing_object_fill( - added_object, - temporary_image_object, - OBJECT_TYPE_IMAGE_OBJECT, - SOURCE_TYPE_LOADING, - path_to_object); - - free(path_to_object); - - add_testing_object(image_objects, added_object); - break; - } - case 4: // Clone existed image object (mv_image_object_clone) - { - if (image_objects->len <= 0) - { - printf("\nFirstly you must create at least one image object.\n"); - break; - } - - testing_object_h temporary_testing_object = NULL; - select_testing_object( - image_objects, - &temporary_testing_object, - "Select the object you want to clone"); - - mv_image_object_h temporary_image_object = NULL; - perform_clone_image_object( - temporary_testing_object->entity, - &temporary_image_object); - - testing_object_h added_object = NULL; - testing_object_create(&added_object); - testing_object_fill( - added_object, - temporary_image_object, - OBJECT_TYPE_IMAGE_OBJECT, - SOURCE_TYPE_CLONING, - temporary_testing_object); - - add_testing_object(image_objects, added_object); - break; - } - case 5: // Create empty image object (mv_image_object_create) - { - mv_image_object_h temporary_image_object = NULL; - int err = mv_image_object_create(&temporary_image_object); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: image object creation is failed with code %i\n", err); - break; - } - - testing_object_h added_object = NULL; - testing_object_create(&added_object); - testing_object_fill( - added_object, - temporary_image_object, - OBJECT_TYPE_IMAGE_OBJECT, - SOURCE_TYPE_EMPTY, - NULL); - - add_testing_object(image_objects, added_object); - printf("\nImage object successfully created\n"); - break; - } - case 6: // Save existed image object to the file (mv_image_object_save) - { - if (image_objects->len <= 0) - { - printf("\nFirstly you must create at least one image object.\n"); - break; - } - - testing_object_h temporary_testing_object = NULL; - select_testing_object(image_objects, &temporary_testing_object, - "Select the object you want to save"); - perform_save_image_object(temporary_testing_object->entity); - break; - } - case 7: // Remove image object from created set (mv_image_object_destroy) - { - if (image_objects->len <= 0) - { - printf("\nFirstly you must create at least one image object.\n"); - break; - } - - guint selected_index; - int err = select_testing_object_index( - image_objects, - &selected_index, - "Select the object you want to remove"); - if (MEDIA_VISION_ERROR_NONE == err) - { - remove_testing_object(image_objects, selected_index); - printf("\nImage object successfully removed\n"); - } - break; - } - case 8: // Get confidence from existed image object (mv_image_object_get_recognition_rate) - { - if (image_objects->len <= 0) - { - printf("\nFirstly you must create at least one image object.\n"); - break; - } - - testing_object_h temporary_testing_object = NULL; - select_testing_object(image_objects, &temporary_testing_object, - "Select the object from which you want getting confidence"); - perform_get_confidence(temporary_testing_object->entity); - break; - } - case 9: // Recognize all image objects on image (mv_image_recognize) - { - if (image_objects->len <= 0) - { - printf("\nFirstly you must create at least one image object.\n"); - break; - } - - mv_image_object_h *objects_pool = malloc(sizeof(mv_image_object_h) * image_objects->len); - int index = 0; - for (;index < image_objects->len; ++index) - { - objects_pool[index] = g_array_index(image_objects, testing_object_h, index)->entity; - } - perform_recognize(objects_pool, image_objects->len); - free(objects_pool); - break; - } - case 10: // Set label for existed image object (mv_image_object_set_label) - { - if (image_objects->len <= 0) - { - printf("\nFirstly you must create at least one image object.\n"); - break; - } - - testing_object_h temporary_testing_object = NULL; - select_testing_object(image_objects, &temporary_testing_object, - "Select the object for which you want setting label"); - perform_set_label(temporary_testing_object->entity); - break; - } - case 11: // Get label from existed image object (mv_image_object_get_label) - { - if (image_objects->len <= 0) - { - printf("\nFirstly you must create at least one image object.\n"); - break; - } - - testing_object_h temporary_testing_object = NULL; - select_testing_object(image_objects, &temporary_testing_object, - "Select the object from which you want getting label"); - perform_get_label(temporary_testing_object->entity); - break; - } - case 12: // Back to the main menu - { - return; - } - default: - printf("Invalid option.\n"); - } - } + const char *names[] = { + "Show created set of image objects", + "Generate new image object from source image (mv_image_object_fill )", + "Load existed image object from file (mv_image_object_load)", + "Clone existed image object (mv_image_object_clone)", + "Create empty image object (mv_image_object_create)", + "Save existed image object to the file (mv_image_object_save)", + "Remove image object from created set (mv_image_object_destroy)", + "Get confidence from existed image object (mv_image_object_get_recognition_rate)", + "Recognize all image objects on image (mv_image_recognize)", + "Set label for existed image object (mv_image_set_label_of_object)", + "Get label from existed image object (mv_image_get_label_of_object)", + "Back to the main menu"}; + + int number_of_options = sizeof(names) / sizeof(names[0]); + int options[number_of_options]; + int index = 0; + for (; index < number_of_options; ++index) + options[index] = index + 1; + + while (1) { + int err = MEDIA_VISION_ERROR_NONE; + + int sel_opt = show_menu("Select action:", options, names, number_of_options); + + switch (sel_opt) { + case 1: { + /* Show created set of image objects */ + show_testing_objects("Set of image objects", image_objects); + break; + } + case 2: { + /* Generate new image object from source image (mv_image_object_fill) */ + mv_image_object_h temporary = NULL; + char *path_to_image = NULL; + + err = perform_generate_image_object(&temporary, &path_to_image); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("Generation failed (error code - %i)\n", err); + if (NULL != path_to_image) + free(path_to_image); + + break; + } + + testing_object_h added_object; + testing_object_create(&added_object); + testing_object_fill(added_object, temporary, + OBJECT_TYPE_IMAGE_OBJECT, SOURCE_TYPE_GENERATION, path_to_image); + + if (NULL != path_to_image) + free(path_to_image); + + add_testing_object(image_objects, added_object); + break; + } + case 3: { + /* Load existed image object from file (mv_image_object_load) */ + mv_image_object_h temporary_image_object = NULL; + char *path_to_object = NULL; + + err = perform_load_image_object( + &path_to_object, &temporary_image_object); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf("Loading failed (error code - %i)\n", err); + break; + } + + testing_object_h added_object = NULL; + testing_object_create(&added_object); + testing_object_fill( + added_object, + temporary_image_object, + OBJECT_TYPE_IMAGE_OBJECT, + SOURCE_TYPE_LOADING, + path_to_object); + + free(path_to_object); + + add_testing_object(image_objects, added_object); + break; + } + case 4: { + /* Clone existed image object (mv_image_object_clone) */ + if (image_objects->len <= 0) { + printf("\nFirstly you must create at least one image object.\n"); + break; + } + + testing_object_h temporary_testing_object = NULL; + select_testing_object( + image_objects, + &temporary_testing_object, + "Select the object you want to clone"); + + mv_image_object_h temporary_image_object = NULL; + perform_clone_image_object( + temporary_testing_object->entity, + &temporary_image_object); + + testing_object_h added_object = NULL; + testing_object_create(&added_object); + testing_object_fill( + added_object, + temporary_image_object, + OBJECT_TYPE_IMAGE_OBJECT, + SOURCE_TYPE_CLONING, + temporary_testing_object); + + add_testing_object(image_objects, added_object); + break; + } + case 5: { + /* Create empty image object (mv_image_object_create) */ + mv_image_object_h temporary_image_object = NULL; + int err = mv_image_object_create(&temporary_image_object); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: image object creation is failed with code %i\n", err); + break; + } + + testing_object_h added_object = NULL; + testing_object_create(&added_object); + testing_object_fill( + added_object, + temporary_image_object, + OBJECT_TYPE_IMAGE_OBJECT, + SOURCE_TYPE_EMPTY, + NULL); + + add_testing_object(image_objects, added_object); + printf("\nImage object successfully created\n"); + break; + } + case 6: { + /* Save existed image object to the file (mv_image_object_save) */ + if (image_objects->len <= 0) { + printf("\nFirstly you must create at least one image object.\n"); + break; + } + + testing_object_h temporary_testing_object = NULL; + select_testing_object(image_objects, &temporary_testing_object, + "Select the object you want to save"); + perform_save_image_object(temporary_testing_object->entity); + break; + } + case 7: { + /* Remove image object from created set (mv_image_object_destroy) */ + if (image_objects->len <= 0) { + printf("\nFirstly you must create at least one image object.\n"); + break; + } + + guint selected_index; + int err = select_testing_object_index( + image_objects, + &selected_index, + "Select the object you want to remove"); + if (MEDIA_VISION_ERROR_NONE == err) { + remove_testing_object(image_objects, selected_index); + printf("\nImage object successfully removed\n"); + } + break; + } + case 8: { + /* Get confidence from existed image object (mv_image_object_get_recognition_rate) */ + if (image_objects->len <= 0) { + printf("\nFirstly you must create at least one image object.\n"); + break; + } + + testing_object_h temporary_testing_object = NULL; + select_testing_object(image_objects, &temporary_testing_object, + "Select the object from which you want getting confidence"); + perform_get_confidence(temporary_testing_object->entity); + break; + } + case 9: { + /* Recognize all image objects on image (mv_image_recognize) */ + if (image_objects->len <= 0) { + printf("\nFirstly you must create at least one image object.\n"); + break; + } + + mv_image_object_h *objects_pool = malloc(sizeof(mv_image_object_h) * image_objects->len); + int index = 0; + for (; index < image_objects->len; ++index) + objects_pool[index] = g_array_index(image_objects, testing_object_h, index)->entity; + + perform_recognize(objects_pool, image_objects->len); + free(objects_pool); + break; + } + case 10: { + /* Set label for existed image object (mv_image_object_set_label) */ + if (image_objects->len <= 0) { + printf("\nFirstly you must create at least one image object.\n"); + break; + } + + testing_object_h temporary_testing_object = NULL; + select_testing_object(image_objects, &temporary_testing_object, + "Select the object for which you want setting label"); + perform_set_label(temporary_testing_object->entity); + break; + } + case 11: { + /* Get label from existed image object (mv_image_object_get_label) */ + if (image_objects->len <= 0) { + printf("\nFirstly you must create at least one image object.\n"); + break; + } + + testing_object_h temporary_testing_object = NULL; + select_testing_object(image_objects, &temporary_testing_object, + "Select the object from which you want getting label"); + perform_get_label(temporary_testing_object->entity); + break; + } + case 12: { + /* Back to the main menu */ + return; + } + default: + printf("Invalid option.\n"); + } + } } void perform_tracking_cases(GArray *image_objects, GArray *image_tracking_models) { - const char *names[] = { - "Show created set of tracking models", - "Create empty tracking model (mv_image_tracking_model_create)", - "Generate model based on image object (mv_image_tracking_model_set_target)", - "Load existed tracking model from file (mv_image_tracking_model_load)", - "Clone existed tracking model (mv_image_tracking_model_clone)", - "Save existed tracking model to the file (mv_image_tracking_model_save)", - "Remove tracking model from created set (mv_image_tracking_model_destroy)", - "Refresh tracking model (mv_image_tracking_model_refresh)", - "Track (mv_image_track)", - "Back to the main menu"}; - - int number_of_options = sizeof(names) / sizeof(names[0]); - int options[number_of_options]; - int index = 0; - for (; index < number_of_options; ++index) - { - options[index] = index + 1; - } - - while (1) - { - int err = MEDIA_VISION_ERROR_NONE; - - int sel_opt = show_menu("Select action:", options, names, number_of_options); - - switch (sel_opt) - { - case 1: // Show created set of tracking models - { - show_testing_objects("Set of image tracking models", image_tracking_models); - break; - } - case 2: // Create empty tracking model (mv_image_tracking_model_create) - { - mv_image_tracking_model_h temporary_image_tracking_model = NULL; - - int err = mv_image_tracking_model_create(&temporary_image_tracking_model); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: tracking model creation is failed with code %i\n", err); - break; - } - - testing_object_h added_object = NULL; - testing_object_create(&added_object); - testing_object_fill( - added_object, - temporary_image_tracking_model, - OBJECT_TYPE_IMAGE_TRACKING_MODEL, - SOURCE_TYPE_EMPTY, - NULL); - - add_testing_object(image_tracking_models, added_object); - printf("\nTracking model successfully created\n"); - break; - } - case 3: // Generate model based on image object (mv_image_tracking_model_set_target) - { - if (image_objects->len <= 0) - { - printf("\nFirstly you must create at least one image object.\n"); - break; - } - - mv_image_tracking_model_h temporary_image_tracking_model = NULL; - err = mv_image_tracking_model_create(&temporary_image_tracking_model); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("Error: tracking model isn't created with error code %i\n", err); - break; - } - - testing_object_h temporary_testing_object = NULL; - select_testing_object( - image_objects, - &temporary_testing_object, - "Select the image object for tracking"); - - err = mv_image_tracking_model_set_target( - (mv_image_object_h)(temporary_testing_object->entity), - temporary_image_tracking_model); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("Error: target isn't set with error code %i\n", err); - break; - } - - testing_object_h added_object = NULL; - testing_object_create(&added_object); - testing_object_fill( - added_object, - temporary_image_tracking_model, - OBJECT_TYPE_IMAGE_TRACKING_MODEL, - SOURCE_TYPE_GENERATION, - temporary_testing_object); - - add_testing_object(image_tracking_models, added_object); - printf("\nTracking model successfully generated\n"); - break; - } - case 4: // Load existed tracking model from file (mv_image_tracking_model_load) - { - mv_image_tracking_model_h temporary_image_tracking_model = NULL; - char *path_to_object = NULL; - - err = perform_load_image_tracking_model( - &path_to_object, &temporary_image_tracking_model); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("Loading failed (error code - %i)\n", err); - break; - } - - testing_object_h added_object = NULL; - testing_object_create(&added_object); - testing_object_fill( - added_object, - temporary_image_tracking_model, - OBJECT_TYPE_IMAGE_TRACKING_MODEL, - SOURCE_TYPE_LOADING, - path_to_object); - - free(path_to_object); - - add_testing_object(image_tracking_models, added_object); - break; - } - case 5: // Clone existed tracking model (mv_image_tracking_model_clone) - { - if (image_tracking_models->len <= 0) - { - printf( - "\nFirstly you must create at least one image " - "tracking model.\n"); - break; - } - - testing_object_h temporary_testing_object = NULL; - select_testing_object( - image_tracking_models, - &temporary_testing_object, - "Select the tracking model you want to clone"); - - mv_image_tracking_model_h temporary_image_tracking_model = NULL; - perform_clone_image_tracking_model( - temporary_testing_object->entity, - &temporary_image_tracking_model); - - testing_object_h added_object = NULL; - testing_object_create(&added_object); - testing_object_fill( - added_object, - temporary_image_tracking_model, - OBJECT_TYPE_IMAGE_TRACKING_MODEL, - SOURCE_TYPE_CLONING, - temporary_testing_object); - - add_testing_object(image_tracking_models, added_object); - break; - } - case 6: // Save existed tracking model to the file (mv_image_tracking_model_save) - { - if (image_tracking_models->len <= 0) - { - printf( - "\nFirstly you must create at least one image " - "tracking model.\n"); - break; - } - - testing_object_h temporary_testing_object = NULL; - select_testing_object( - image_tracking_models, - &temporary_testing_object, - "Select the tracking model you want to save"); - - perform_save_image_tracking_model(temporary_testing_object->entity); - break; - } - case 7: // Remove tracking model from created set (mv_image_tracking_model_destroy) - { - if (image_tracking_models->len <= 0) - { - printf( - "\nFirstly you must create at least one image " - "tracking model.\n"); - break; - } - - guint selected_index; - err = select_testing_object_index( - image_tracking_models, - &selected_index, - "Select the object you want to remove"); - - if (MEDIA_VISION_ERROR_NONE == err) - { - remove_testing_object(image_tracking_models, selected_index); - printf("\nTracking model successfully removed\n"); - } - break; - } - case 8: // Refresh tracking model (mv_image_tracking_model_refresh) - { - if (image_tracking_models->len <= 0) - { - printf( - "\nFirstly you must create at least one image " - "tracking model.\n"); - break; - } - - testing_object_h temporary_testing_object = NULL; - select_testing_object( - image_tracking_models, - &temporary_testing_object, - "Select the tracking model you want to refresh"); - - perform_refresh_image_tracking_model(temporary_testing_object->entity); - break; - } - case 9: // Track (mv_image_track) - { - if (image_tracking_models->len <= 0) - { - printf( - "\nFirstly you must create at least one image " - "tracking model.\n"); - break; - } - - testing_object_h temporary_testing_object = NULL; - err = select_testing_object( - image_tracking_models, - &temporary_testing_object, - "Select the object which you want to track on video"); - - if (MEDIA_VISION_ERROR_NONE == err) - { - perform_track(temporary_testing_object->entity); - } - break; - } - case 10: // Back to the main menu - { - return; - } - } - } + const char *names[] = { + "Show created set of tracking models", + "Create empty tracking model (mv_image_tracking_model_create)", + "Generate model based on image object (mv_image_tracking_model_set_target)", + "Load existed tracking model from file (mv_image_tracking_model_load)", + "Clone existed tracking model (mv_image_tracking_model_clone)", + "Save existed tracking model to the file (mv_image_tracking_model_save)", + "Remove tracking model from created set (mv_image_tracking_model_destroy)", + "Refresh tracking model (mv_image_tracking_model_refresh)", + "Track (mv_image_track)", + "Back to the main menu"}; + + int number_of_options = sizeof(names) / sizeof(names[0]); + int options[number_of_options]; + int index = 0; + for (; index < number_of_options; ++index) + options[index] = index + 1; + + while (1) { + int err = MEDIA_VISION_ERROR_NONE; + + int sel_opt = show_menu("Select action:", options, names, number_of_options); + + switch (sel_opt) { + case 1: { + /* Show created set of tracking models */ + show_testing_objects("Set of image tracking models", image_tracking_models); + break; + } + case 2: { + /* Create empty tracking model (mv_image_tracking_model_create) */ + mv_image_tracking_model_h temporary_image_tracking_model = NULL; + + int err = mv_image_tracking_model_create(&temporary_image_tracking_model); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: tracking model creation is failed with code %i\n", err); + break; + } + + testing_object_h added_object = NULL; + testing_object_create(&added_object); + testing_object_fill( + added_object, + temporary_image_tracking_model, + OBJECT_TYPE_IMAGE_TRACKING_MODEL, + SOURCE_TYPE_EMPTY, + NULL); + + add_testing_object(image_tracking_models, added_object); + printf("\nTracking model successfully created\n"); + break; + } + case 3: { + /* Generate model based on image object (mv_image_tracking_model_set_target) */ + if (image_objects->len <= 0) { + printf("\nFirstly you must create at least one image object.\n"); + break; + } + + mv_image_tracking_model_h temporary_image_tracking_model = NULL; + err = mv_image_tracking_model_create(&temporary_image_tracking_model); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("Error: tracking model isn't created with error code %i\n", err); + break; + } + + testing_object_h temporary_testing_object = NULL; + select_testing_object( + image_objects, + &temporary_testing_object, + "Select the image object for tracking"); + + err = mv_image_tracking_model_set_target( + (mv_image_object_h)(temporary_testing_object->entity), + temporary_image_tracking_model); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("Error: target isn't set with error code %i\n", err); + break; + } + + testing_object_h added_object = NULL; + testing_object_create(&added_object); + testing_object_fill( + added_object, + temporary_image_tracking_model, + OBJECT_TYPE_IMAGE_TRACKING_MODEL, + SOURCE_TYPE_GENERATION, + temporary_testing_object); + + add_testing_object(image_tracking_models, added_object); + printf("\nTracking model successfully generated\n"); + break; + } + case 4: { + /* Load existed tracking model from file (mv_image_tracking_model_load) */ + mv_image_tracking_model_h temporary_image_tracking_model = NULL; + char *path_to_object = NULL; + + err = perform_load_image_tracking_model( + &path_to_object, &temporary_image_tracking_model); + + if (MEDIA_VISION_ERROR_NONE != err) { + printf("Loading failed (error code - %i)\n", err); + break; + } + + testing_object_h added_object = NULL; + testing_object_create(&added_object); + testing_object_fill( + added_object, + temporary_image_tracking_model, + OBJECT_TYPE_IMAGE_TRACKING_MODEL, + SOURCE_TYPE_LOADING, + path_to_object); + + free(path_to_object); + + add_testing_object(image_tracking_models, added_object); + break; + } + case 5: { + /* Clone existed tracking model (mv_image_tracking_model_clone) */ + if (image_tracking_models->len <= 0) { + printf( + "\nFirstly you must create at least one image " + "tracking model.\n"); + break; + } + + testing_object_h temporary_testing_object = NULL; + select_testing_object( + image_tracking_models, + &temporary_testing_object, + "Select the tracking model you want to clone"); + + mv_image_tracking_model_h temporary_image_tracking_model = NULL; + perform_clone_image_tracking_model( + temporary_testing_object->entity, + &temporary_image_tracking_model); + + testing_object_h added_object = NULL; + testing_object_create(&added_object); + testing_object_fill( + added_object, + temporary_image_tracking_model, + OBJECT_TYPE_IMAGE_TRACKING_MODEL, + SOURCE_TYPE_CLONING, + temporary_testing_object); + + add_testing_object(image_tracking_models, added_object); + break; + } + case 6: { + /* Save existed tracking model to the file (mv_image_tracking_model_save) */ + if (image_tracking_models->len <= 0) { + printf( + "\nFirstly you must create at least one image " + "tracking model.\n"); + break; + } + + testing_object_h temporary_testing_object = NULL; + select_testing_object( + image_tracking_models, + &temporary_testing_object, + "Select the tracking model you want to save"); + + perform_save_image_tracking_model(temporary_testing_object->entity); + break; + } + case 7: { + /* Remove tracking model from created set (mv_image_tracking_model_destroy) */ + if (image_tracking_models->len <= 0) { + printf( + "\nFirstly you must create at least one image " + "tracking model.\n"); + break; + } + + guint selected_index; + err = select_testing_object_index( + image_tracking_models, + &selected_index, + "Select the object you want to remove"); + + if (MEDIA_VISION_ERROR_NONE == err) { + remove_testing_object(image_tracking_models, selected_index); + printf("\nTracking model successfully removed\n"); + } + break; + } + case 8: { + /* Refresh tracking model (mv_image_tracking_model_refresh) */ + if (image_tracking_models->len <= 0) { + printf( + "\nFirstly you must create at least one image " + "tracking model.\n"); + break; + } + + testing_object_h temporary_testing_object = NULL; + select_testing_object( + image_tracking_models, + &temporary_testing_object, + "Select the tracking model you want to refresh"); + + perform_refresh_image_tracking_model(temporary_testing_object->entity); + break; + } + case 9: { + /* Track (mv_image_track) */ + if (image_tracking_models->len <= 0) { + printf( + "\nFirstly you must create at least one image " + "tracking model.\n"); + break; + } + + testing_object_h temporary_testing_object = NULL; + err = select_testing_object( + image_tracking_models, + &temporary_testing_object, + "Select the object which you want to track on video"); + + if (MEDIA_VISION_ERROR_NONE == err) + perform_track(temporary_testing_object->entity); + + break; + } + case 10: { + /* Back to the main menu */ + return; + } + } + } } int main(void) { - LOGI("Image Media Vision Testsuite is launched."); - - GArray *image_objects = g_array_new(FALSE, FALSE, sizeof(testing_object_h)); - GArray *image_tracking_models = g_array_new(FALSE, FALSE,sizeof(testing_object_h)); - - const int options[3] = { 1, 2, 3 }; - const char *names[3] = { - "Recognition cases", - "Tracking cases", - "Exit" }; - - mv_image_object_h current_object = NULL; - - while(1) - { - char exit = 'n'; - int sel_opt = show_menu("Select action:", options, names, 3); - switch (sel_opt) - { - case 1: // Recognition cases - perform_recognition_cases(image_objects); - break; - case 2: // Tracking cases - perform_tracking_cases(image_objects, image_tracking_models); - break; - case 3: // Exit - exit = 'y'; - break; - default: - printf("Invalid option.\n"); - sel_opt = 0; - continue; - } - if ('y' == exit) - { - sel_opt = 0; - const int options_last[2] = { 1, 2 }; - const char *names_last[2] = { "No", "Yes" }; - - while (sel_opt == 0) - { - sel_opt = show_menu("Are you sure?", - options_last, names_last, 2); - switch (sel_opt) - { - case 1: - exit = 'n'; - break; - case 2: - exit = 'y'; - break; - default: - printf("Invalid option. Back to the main menu."); - sel_opt = 0; - break; - } - } - - if ('y' == exit) - { - break; - } - } - - } - - guint i = 0; - for (i = 0; i < image_objects->len; ++i) - { - testing_object_h temp = g_array_index( - image_objects, - testing_object_h, - i); - testing_object_destroy(&temp); - } - g_array_free(image_objects, TRUE); - - for (i = 0; i < image_tracking_models->len; ++i) - { - testing_object_h temp = g_array_index( - image_tracking_models, - testing_object_h, - i); - testing_object_destroy(&temp); - } - g_array_free(image_tracking_models, TRUE); - - LOGI("Image Media Vision Testsuite is closed"); - - return 0; + LOGI("Image Media Vision Testsuite is launched."); + + GArray *image_objects = g_array_new(FALSE, FALSE, sizeof(testing_object_h)); + GArray *image_tracking_models = g_array_new(FALSE, FALSE, sizeof(testing_object_h)); + + const int options[3] = { 1, 2, 3 }; + const char *names[3] = { + "Recognition cases", + "Tracking cases", + "Exit" }; + + mv_image_object_h current_object = NULL; + + while (1) { + char exit = 'n'; + int sel_opt = show_menu("Select action:", options, names, 3); + switch (sel_opt) { + case 1: + /* Recognition cases */ + perform_recognition_cases(image_objects); + break; + case 2: + /* Tracking cases */ + perform_tracking_cases(image_objects, image_tracking_models); + break; + case 3: + /* Exit */ + exit = 'y'; + break; + default: + printf("Invalid option.\n"); + sel_opt = 0; + continue; + } + + if ('y' == exit) { + sel_opt = 0; + const int options_last[2] = { 1, 2 }; + const char *names_last[2] = { "No", "Yes" }; + + while (sel_opt == 0) { + sel_opt = show_menu("Are you sure?", + options_last, names_last, 2); + switch (sel_opt) { + case 1: + exit = 'n'; + break; + case 2: + exit = 'y'; + break; + default: + printf("Invalid option. Back to the main menu."); + sel_opt = 0; + break; + } + } + + if ('y' == exit) + break; + } + } + + guint i = 0; + for (i = 0; i < image_objects->len; ++i) { + testing_object_h temp = g_array_index( + image_objects, + testing_object_h, i); + testing_object_destroy(&temp); + } + g_array_free(image_objects, TRUE); + + for (i = 0; i < image_tracking_models->len; ++i) { + testing_object_h temp = g_array_index( + image_tracking_models, + testing_object_h, i); + testing_object_destroy(&temp); + } + g_array_free(image_tracking_models, TRUE); + + LOGI("Image Media Vision Testsuite is closed"); + + return 0; } -- 2.7.4 From ee608e5cbcbae6c8ee6258ca68e7f0b42c1b68a8 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Mon, 9 Nov 2015 16:40:39 +0900 Subject: [PATCH 05/16] Updated Image recognition and tracking functionality, and Fixed face and image save/load functionality Change-Id: I239f59dbda12f66a8f47c438a9a5ef49e3e2f578 Signed-off-by: Tae-Young Chung --- CMakeLists.txt | 6 +- include/mv_image.h | 25 +- media-vision-config.json | 8 +- mv_face/face/src/FaceRecognitionModel.cpp | 84 ++-- mv_face/face/src/FaceTrackingModel.cpp | 35 +- mv_image/image/CMakeLists.txt | 4 +- .../image/include/Features/BasicExtractorFactory.h | 40 ++ mv_image/image/include/Features/FeatureExtractor.h | 79 ++++ .../include/Features/FeatureExtractorFactory.h | 37 ++ mv_image/image/include/Features/FeatureMatcher.h | 73 ++++ mv_image/image/include/Features/FeaturePack.h | 79 ++++ .../image/include/Features/ORBExtractorFactory.h | 55 +++ mv_image/image/include/ImageConfig.h | 82 +++- mv_image/image/include/ImageMathUtil.h | 42 +- mv_image/image/include/ImageTracker.h | 93 ----- .../image/include/{ => Recognition}/ImageObject.h | 56 +-- .../include/{ => Recognition}/ImageRecognizer.h | 27 +- mv_image/image/include/Tracking/AsyncTracker.h | 131 ++++++ mv_image/image/include/Tracking/CascadeTracker.h | 132 ++++++ .../include/Tracking/FeatureSubstitutionTracker.h | 108 +++++ .../{ => Tracking}/ImageContourStabilizator.h | 34 +- .../include/{ => Tracking}/ImageTrackingModel.h | 117 ++---- mv_image/image/include/Tracking/MFTracker.h | 151 +++++++ mv_image/image/include/Tracking/ObjectTracker.h | 80 ++++ .../include/Tracking/RecognitionBasedTracker.h | 93 +++++ .../image/src/Features/BasicExtractorFactory.cpp | 48 +++ mv_image/image/src/Features/FeatureExtractor.cpp | 140 +++++++ .../image/src/Features/FeatureExtractorFactory.cpp | 28 ++ mv_image/image/src/Features/FeatureMatcher.cpp | 244 +++++++++++ mv_image/image/src/Features/FeaturePack.cpp | 58 +++ .../image/src/Features/ORBExtractorFactory.cpp | 145 +++++++ mv_image/image/src/ImageConfig.cpp | 31 +- mv_image/image/src/ImageMathUtil.cpp | 90 ++++- mv_image/image/src/ImageObject.cpp | 446 --------------------- mv_image/image/src/ImageTracker.cpp | 332 --------------- mv_image/image/src/ImageTrackingModel.cpp | 340 ---------------- mv_image/image/src/Recognition/ImageObject.cpp | 376 +++++++++++++++++ .../src/{ => Recognition}/ImageRecognizer.cpp | 121 +++--- mv_image/image/src/Tracking/AsyncTracker.cpp | 184 +++++++++ mv_image/image/src/Tracking/CascadeTracker.cpp | 195 +++++++++ .../src/Tracking/FeatureSubstitutionTracker.cpp | 132 ++++++ .../{ => Tracking}/ImageContourStabilizator.cpp | 186 ++++++--- mv_image/image/src/Tracking/ImageTrackingModel.cpp | 362 +++++++++++++++++ mv_image/image/src/Tracking/MFTracker.cpp | 410 +++++++++++++++++++ mv_image/image/src/Tracking/ObjectTracker.cpp | 27 ++ .../image/src/Tracking/RecognitionBasedTracker.cpp | 77 ++++ mv_image/image/src/mv_image_open.cpp | 224 +++++++---- packaging/capi-media-vision.spec | 2 +- 48 files changed, 4216 insertions(+), 1653 deletions(-) create mode 100644 mv_image/image/include/Features/BasicExtractorFactory.h create mode 100644 mv_image/image/include/Features/FeatureExtractor.h create mode 100644 mv_image/image/include/Features/FeatureExtractorFactory.h create mode 100644 mv_image/image/include/Features/FeatureMatcher.h create mode 100644 mv_image/image/include/Features/FeaturePack.h create mode 100644 mv_image/image/include/Features/ORBExtractorFactory.h delete mode 100644 mv_image/image/include/ImageTracker.h rename mv_image/image/include/{ => Recognition}/ImageObject.h (82%) rename mv_image/image/include/{ => Recognition}/ImageRecognizer.h (81%) create mode 100644 mv_image/image/include/Tracking/AsyncTracker.h create mode 100644 mv_image/image/include/Tracking/CascadeTracker.h create mode 100644 mv_image/image/include/Tracking/FeatureSubstitutionTracker.h rename mv_image/image/include/{ => Tracking}/ImageContourStabilizator.h (68%) rename mv_image/image/include/{ => Tracking}/ImageTrackingModel.h (60%) create mode 100644 mv_image/image/include/Tracking/MFTracker.h create mode 100644 mv_image/image/include/Tracking/ObjectTracker.h create mode 100644 mv_image/image/include/Tracking/RecognitionBasedTracker.h create mode 100644 mv_image/image/src/Features/BasicExtractorFactory.cpp create mode 100644 mv_image/image/src/Features/FeatureExtractor.cpp create mode 100644 mv_image/image/src/Features/FeatureExtractorFactory.cpp create mode 100644 mv_image/image/src/Features/FeatureMatcher.cpp create mode 100644 mv_image/image/src/Features/FeaturePack.cpp create mode 100644 mv_image/image/src/Features/ORBExtractorFactory.cpp delete mode 100644 mv_image/image/src/ImageObject.cpp delete mode 100644 mv_image/image/src/ImageTracker.cpp delete mode 100644 mv_image/image/src/ImageTrackingModel.cpp create mode 100644 mv_image/image/src/Recognition/ImageObject.cpp rename mv_image/image/src/{ => Recognition}/ImageRecognizer.cpp (69%) create mode 100644 mv_image/image/src/Tracking/AsyncTracker.cpp create mode 100644 mv_image/image/src/Tracking/CascadeTracker.cpp create mode 100644 mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp rename mv_image/image/src/{ => Tracking}/ImageContourStabilizator.cpp (61%) create mode 100644 mv_image/image/src/Tracking/ImageTrackingModel.cpp create mode 100644 mv_image/image/src/Tracking/MFTracker.cpp create mode 100644 mv_image/image/src/Tracking/ObjectTracker.cpp create mode 100644 mv_image/image/src/Tracking/RecognitionBasedTracker.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index ebd2df1..8452d0a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -104,12 +104,12 @@ ADD_SUBDIRECTORY(mv_face) aux_source_directory(src SOURCES) ADD_LIBRARY(${fw_name} SHARED ${SOURCES}) -TARGET_LINK_LIBRARIES(${fw_name} ${${fw_name}_LDFLAGS} - ${MV_COMMON_LIB_NAME} +TARGET_LINK_LIBRARIES(${fw_name} ${MV_COMMON_LIB_NAME} ${MV_BARCODE_DETECTOR_LIB_NAME} ${MV_BARCODE_GENERATOR_LIB_NAME} ${MV_IMAGE_LIB_NAME} - ${MV_FACE_LIB_NAME}) + ${MV_FACE_LIB_NAME} + ${${fw_name}_LDFLAGS}) SET_TARGET_PROPERTIES(${fw_name} PROPERTIES diff --git a/include/mv_image.h b/include/mv_image.h index 5fa9cf6..5efb9b6 100644 --- a/include/mv_image.h +++ b/include/mv_image.h @@ -150,7 +150,7 @@ extern "C" { /** * @brief Defines MV_IMAGE_TRACKING_EXPECTED_OFFSET to set the expected tracking * offset attribute of the engine configuration. - * @detials Relative offset value, for which the object offset is + * @details Relative offset value, for which the object offset is * expected (relative to the object size in the current frame). * Value is a double and the defalut is 0 * @@ -162,7 +162,7 @@ extern "C" { /** * @brief Defines MV_IMAGE_TRACKING_USE_STABLIZATION to enable the contour - * stabilization during tracking process. + * stabilization during tracking process. Default value is true. * * @since_tizen 3.0 * @see mv_engine_config_set_bool_attribute() @@ -172,11 +172,14 @@ extern "C" { /** * @brief Defines MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT to set the - * tolerant shift for the tracking stabilization attribute of the engine - * configuration. - * @details Relative value of maximum shift per one frame which will be ignored by - * stabilization (relative to the object size in the current frame). - * Value is a double and the defalut is 0.006 + * relative tolerant shift for the tracking stabilization attribute of + * the engine configuration. + * @details It is component of tolerant shift which will be ignored by + * stabilization process. (this value is relative to the object size in + * the current frame). Tolerant shift will be computed like R * S + C, + * where R - value set to MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT, + * S - area of object location on frame, C - constant value equal 1.3. + * Value is a double * * @since_tizen 3.0 * @see mv_engine_config_set_double_attribute() @@ -189,7 +192,7 @@ extern "C" { * speed of the tracking stabilization attribute of the engine * configuration. * @details Start speed will be used for image stabilization. Value is a double - * and the defalut is 2 + * and the defalut is 0.3 * @since_tizen 3.0 * @see mv_engine_config_set_double_attribute() * @see mv_engine_config_get_double_attribute() @@ -202,7 +205,7 @@ extern "C" { * configuration. * @details Acceleration will be used for image stabilization (relative to * the distance from current location to stabilized location). - * Value is double from 0 to 1 and the defalut is 0.001 + * Value is double from 0 to 1 and the defalut is 0.1 * * @since_tizen 3.0 * @see mv_engine_config_set_double_attribute() @@ -222,7 +225,7 @@ extern "C" { * @since_tizen 3.0 * @remarks Values @a source, @a engine_cfg, @a image_objects, and @a number_of_objects * are the same as values of input parameters of @ref mv_image_recognize(). - * @remarks @locations are valid only inside callback. + * @remarks @a locations are valid only inside callback. * @param [in] source The handle to the source image on which the * recognition was carried out * @param [in] engine_cfg The handle to the configuration of engine @@ -326,7 +329,7 @@ int mv_image_recognize( * but @a location will be NULL. * @remarks Handles @a image_tracking_model, @a source and @a engine_cfg the * same as input parameters of @ref mv_image_track(). - * @remarks @location pointer is valid only inside callback + * @remarks @a location pointer is valid only inside callback * @param [in] source The handle to the source image on which * the tracking was carried out * @param [in] image_tracking_model The handle to the image tracking model diff --git a/media-vision-config.json b/media-vision-config.json index 9185a1a..2f0b46c 100644 --- a/media-vision-config.json +++ b/media-vision-config.json @@ -93,22 +93,22 @@ { "name" : "MV_IMAGE_TRACKING_USE_STABLIZATION", "type" : "boolean", - "value" : false + "value" : true }, { "name" : "MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT", "type" : "double", - "value" : 0.006 + "value" : 0.00006 }, { "name" : "MV_IMAGE_TRACKING_STABLIZATION_SPEED", "type" : "double", - "value" : 2 + "value" : 0.3 }, { "name" : "MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION", "type" : "double", - "value" : 0.001 + "value" : 0.1 }, { "name" : "MV_FACE_RECOGNITION_MODEL_TYPE", diff --git a/mv_face/face/src/FaceRecognitionModel.cpp b/mv_face/face/src/FaceRecognitionModel.cpp index c18de17..1c0c55c 100644 --- a/mv_face/face/src/FaceRecognitionModel.cpp +++ b/mv_face/face/src/FaceRecognitionModel.cpp @@ -16,11 +16,11 @@ #include "FaceRecognitionModel.h" -#include - #include "mv_private.h" #include "mv_common.h" +#include + #include #include @@ -29,6 +29,10 @@ namespace MediaVision { namespace Face { namespace { + +unsigned int DefaultUnisizeWidth = 200; +unsigned int DefaultUnisizeHeight = 200; + int CopyOpenCVAlgorithmParameters(const cv::Ptr& srcAlg, cv::Ptr& dstAlg) { @@ -116,8 +120,8 @@ FaceRecognitionModelConfig::FaceRecognitionModelConfig() : mNeighbors(8), mGridX(8), mGridY(8), - mImgWidth(150), - mImgHeight(150) + mImgWidth(DefaultUnisizeWidth), + mImgHeight(DefaultUnisizeHeight) { ; /* NULL */ } @@ -189,18 +193,20 @@ FaceRecognitionModel::~FaceRecognitionModel() int FaceRecognitionModel::save(const std::string& fileName) { if (!m_recognizer.empty()) { - /* find directory */ - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); std::string filePath; - filePath += prefix_path; - filePath += fileName; + char *cPath = app_get_data_path(); + if (NULL == cPath) + filePath = fileName; + else + filePath = std::string(cPath) + fileName; + + std::string prefixPath = filePath.substr(0, filePath.find_last_of('/')); + LOGD("prefixPath: %s", prefixPath.c_str()); /* check the directory is available */ - std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/')); - if (access(prefix_path_check.c_str(), F_OK)) { - LOGE("Can't save recognition model. Path[%s] doesn't existed.", prefix_path_check.c_str()); + if (access(prefixPath.c_str(), F_OK)) { + LOGE("Can't save recognition model. Path[%s] doesn't existed.", prefixPath.c_str()); return MEDIA_VISION_ERROR_INVALID_PATH; } @@ -214,9 +220,13 @@ int FaceRecognitionModel::save(const std::string& fileName) switch (m_learnAlgorithmConfig.mModelType) { case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES: storage << "algorithm" << "Eigenfaces"; + storage << "resizeW" << m_learnAlgorithmConfig.mImgWidth; + storage << "resizeH" << m_learnAlgorithmConfig.mImgHeight; break; case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES: storage << "algorithm" << "Fisherfaces"; + storage << "resizeW" << m_learnAlgorithmConfig.mImgWidth; + storage << "resizeH" << m_learnAlgorithmConfig.mImgHeight; break; case MEDIA_VISION_FACE_MODEL_TYPE_LBPH: storage << "algorithm" << "LBPH"; @@ -240,16 +250,15 @@ int FaceRecognitionModel::save(const std::string& fileName) int FaceRecognitionModel::load(const std::string& fileName) { - /* find directory */ - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); - std::string filePath; - filePath += prefix_path; - filePath += fileName; + char *cPath = app_get_data_path(); + if (NULL == cPath) + filePath = fileName; + else + filePath = std::string(cPath) + fileName; if (access(filePath.c_str(), F_OK)) { - LOGE("Can't load face recognition model. File[%s] doesn't exist.", filePath.c_str()); + LOGE("Can't load face recognition model. File[%s] doesn't existed.", filePath.c_str()); return MEDIA_VISION_ERROR_INVALID_PATH; } @@ -274,6 +283,8 @@ int FaceRecognitionModel::load(const std::string& fileName) if (algName == "Eigenfaces") { tempRecognizer = cv::createEigenFaceRecognizer(); + storage["resizeW"] >> tempConfig.mImgWidth; + storage["resizeH"] >> tempConfig.mImgHeight; tempRecognizer->load(storage); tempConfig.mModelType = MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES; @@ -282,6 +293,8 @@ int FaceRecognitionModel::load(const std::string& fileName) ParseOpenCVLabels(tempRecognizer, tempLearnedLabels); } else if (algName == "Fisherfaces") { tempRecognizer = cv::createFisherFaceRecognizer(); + storage["resizeW"] >> tempConfig.mImgWidth; + storage["resizeH"] >> tempConfig.mImgHeight; tempRecognizer->load(storage); tempConfig.mModelType = MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES; @@ -404,7 +417,7 @@ int FaceRecognitionModel::learn(const FaceRecognitionModelConfig& config) cv::resize(it->second[sampleInd], resizedSample, cv::Size(config.mImgWidth, config.mImgHeight), - 1.0, 1.0, cv::INTER_CUBIC); + 0.0, 0.0, cv::INTER_CUBIC); samples.push_back(resizedSample); } } @@ -451,11 +464,32 @@ int FaceRecognitionModel::recognize(const cv::Mat& image, FaceRecognitionResults { if (!m_recognizer.empty() && m_canRecognize) { double absConf = 0.0; - m_recognizer->predict(image, results.mFaceLabel, absConf); - /* Normalize the absolute value of the confidence */ - absConf = exp(7.5 - (0.05 * absConf)); - results.mConfidence = absConf / (1 + absConf); - results.mIsRecognized = true; + if ((MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES == m_learnAlgorithmConfig.mModelType || + MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == m_learnAlgorithmConfig.mModelType) && + (image.cols != m_learnAlgorithmConfig.mImgWidth || + image.rows != m_learnAlgorithmConfig.mImgHeight)) { + cv::Mat predictionImg( + m_learnAlgorithmConfig.mImgWidth, + m_learnAlgorithmConfig.mImgHeight, + CV_8UC1); + cv::resize(image, predictionImg, predictionImg.size()); + m_recognizer->predict(predictionImg, results.mFaceLabel, absConf); + + if (-1 != results.mFaceLabel) { + results.mConfidence = 1.0; + results.mIsRecognized = true; + } else { + results.mConfidence = 0.0; + results.mIsRecognized = false; + } + } else { + m_recognizer->predict(image, results.mFaceLabel, absConf); + /* Normalize the absolute value of the confidence */ + absConf = exp(7.5 - (0.05 * absConf)); + results.mConfidence = absConf / (1 + absConf); + results.mIsRecognized = true; + } + results.mFaceLocation = cv::Rect(0, 0, image.cols, image.rows); } else { LOGE("Attempt to recognize faces with untrained model"); diff --git a/mv_face/face/src/FaceTrackingModel.cpp b/mv_face/face/src/FaceTrackingModel.cpp index 25fdcb8..5feeb2a 100644 --- a/mv_face/face/src/FaceTrackingModel.cpp +++ b/mv_face/face/src/FaceTrackingModel.cpp @@ -16,11 +16,11 @@ #include "FaceTrackingModel.h" -#include - #include "mv_private.h" #include "mv_common.h" +#include + #include namespace MediaVision { @@ -73,17 +73,19 @@ int FaceTrackingModel::save(const std::string& fileName) return MEDIA_VISION_ERROR_INVALID_OPERATION; } - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); - std::string filePath; - filePath += prefix_path; -filePath += fileName; + char *cPath = app_get_data_path(); + if (NULL == cPath) + filePath = fileName; + else + filePath = std::string(cPath) + fileName; + + std::string prefixPath = filePath.substr(0, filePath.find_last_of('/')); + LOGD("prefixPath: %s", prefixPath.c_str()); /* check the directory is available */ - std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/')); - if (access(prefix_path_check.c_str(), F_OK)) { - LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefix_path_check.c_str()); + if (access(prefixPath.c_str(), F_OK)) { + LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefixPath.c_str()); return MEDIA_VISION_ERROR_INVALID_PATH; } @@ -108,16 +110,15 @@ filePath += fileName; int FaceTrackingModel::load(const std::string& fileName) { - /* find directory */ - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); - std::string filePath; - filePath += prefix_path; - filePath += fileName; + char *cPath = app_get_data_path(); + if (NULL == cPath) + filePath = fileName; + else + filePath = std::string(cPath) + fileName; if (access(filePath.c_str(), F_OK)) { - LOGE("Can't load face tracking model. File[%s] doesn't exist.", filePath.c_str()); + LOGE("Can't load face tracking model. File[%s] doesn't existed.", filePath.c_str()); return MEDIA_VISION_ERROR_INVALID_PATH; } diff --git a/mv_image/image/CMakeLists.txt b/mv_image/image/CMakeLists.txt index 0269712..739d5f2 100644 --- a/mv_image/image/CMakeLists.txt +++ b/mv_image/image/CMakeLists.txt @@ -11,8 +11,8 @@ include_directories("${INC_DIR}") include_directories("${PROJECT_SOURCE_DIR}/include") include_directories("${PROJECT_SOURCE_DIR}/src") -file(GLOB MV_IMAGE_INC_LIST "${PROJECT_SOURCE_DIR}/include/*.h") -file(GLOB MV_IMAGE_SRC_LIST "${PROJECT_SOURCE_DIR}/src/*.cpp") +file(GLOB_RECURSE MV_IMAGE_INC_LIST "${PROJECT_SOURCE_DIR}/include/*.h") +file(GLOB_RECURSE MV_IMAGE_SRC_LIST "${PROJECT_SOURCE_DIR}/src/*.cpp") find_package(OpenCV REQUIRED core highgui imgproc objdetect features2d calib3d) if(NOT OpenCV_FOUND) diff --git a/mv_image/image/include/Features/BasicExtractorFactory.h b/mv_image/image/include/Features/BasicExtractorFactory.h new file mode 100644 index 0000000..bbfc824 --- /dev/null +++ b/mv_image/image/include/Features/BasicExtractorFactory.h @@ -0,0 +1,40 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __IMAGEFEATURES_BASICEXTRACTORFACTORY_H__ +#define __IMAGEFEATURES_BASICEXTRACTORFACTORY_H__ + +#include "Features/FeatureExtractorFactory.h" + +namespace MediaVision { +namespace Image { + +class BasicExtractorFactory : public FeatureExtractorFactory { +public: + BasicExtractorFactory(KeypointType keypointsType, DescriptorType descType); + + virtual cv::Ptr buildFeatureExtractor(); + +private: + KeypointType m_kpType; + + DescriptorType m_descType; +}; + +} /* Image */ +} /* MediaVision */ + +#endif /* __IMAGEFEATURES_BASICEXTRACTORFACTORY_H__ */ diff --git a/mv_image/image/include/Features/FeatureExtractor.h b/mv_image/image/include/Features/FeatureExtractor.h new file mode 100644 index 0000000..ae55503 --- /dev/null +++ b/mv_image/image/include/Features/FeatureExtractor.h @@ -0,0 +1,79 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __IMAGEFEATURES_FEATUREEXTRACTOR_H__ +#define __IMAGEFEATURES_FEATUREEXTRACTOR_H__ + +#include "ImageConfig.h" + +#include "Features/FeaturePack.h" + +namespace cv { +class FeatureDetector; +class DescriptorExtractor; +} + +namespace MediaVision { +namespace Image { +/** + * @class FeatureExtractor + * @brief Class contains functionality to extract features from an image + * + * @since_tizen 3.0 + */ +class FeatureExtractor { +public: + FeatureExtractor(); + + void setFeatureDetector( + const cv::Ptr detector, + KeypointType keypointType); + + void setDescriptorExtractor( + cv::Ptr extractor, + DescriptorType descriptorType); + + void setRecognitionRateMetric( + float (*computeRecognitionRate)( + const cv::Mat&, + const std::vector&)); + + bool extract( + const cv::Mat& image, + FeaturePack& result, + const std::vector& roi = std::vector()); + +private: + static const cv::Size MinSize; + +private: + KeypointType m_kpType; + + cv::Ptr m_detector; + + DescriptorType m_descType; + + cv::Ptr m_extractor; + + float (*m_computeRecognitionRate)( + const cv::Mat&, + const std::vector&); +}; + +} /* Image */ +} /* MediaVision */ + +#endif /* __IMAGEFEATURES_FEATUREEXTRACTOR_H__ */ diff --git a/mv_image/image/include/Features/FeatureExtractorFactory.h b/mv_image/image/include/Features/FeatureExtractorFactory.h new file mode 100644 index 0000000..d421478 --- /dev/null +++ b/mv_image/image/include/Features/FeatureExtractorFactory.h @@ -0,0 +1,37 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __IMAGEFEATURES_FEATUREEXTRACTORFACTORY_H__ +#define __IMAGEFEATURES_FEATUREEXTRACTORFACTORY_H__ + +#include "Features/FeatureExtractor.h" + +#include + +namespace MediaVision { +namespace Image { + +class FeatureExtractorFactory { +public: + virtual ~FeatureExtractorFactory(); + + virtual cv::Ptr buildFeatureExtractor() = 0; +}; + +} /* Image */ +} /* MediaVision */ + +#endif /* __IMAGEFEATURES_FEATUREEXTRACTORFACTORY_H__ */ diff --git a/mv_image/image/include/Features/FeatureMatcher.h b/mv_image/image/include/Features/FeatureMatcher.h new file mode 100644 index 0000000..f3c2463 --- /dev/null +++ b/mv_image/image/include/Features/FeatureMatcher.h @@ -0,0 +1,73 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __IMAGEFEATURES_FEATUREMATCHER_H__ +#define __IMAGEFEATURES_FEATUREMATCHER_H__ + +#include "Features/FeaturePack.h" + +#include + +namespace MediaVision { +namespace Image { + +class FeatureMatcher { +public: + enum MatchError{ + InvalidFeaturePackFrom, + InvalidFeaturePackTo, + DisparateTypes, + MatchesNotFound, + Success + }; + +public: + FeatureMatcher( + float affectingPart = 1.f, + float tolerantError = 0.f, + size_t minimumMatchesNumber = 0u); + + MatchError match( + const FeaturePack& from, + const FeaturePack& to, + cv::Mat& homophraphyMatrix) const; + + float getAffectingPart() const; + + void setAffectingPart(float affectingPart); + + float getTolerantError() const; + + void setTolerantError(float tolerantError); + + size_t getMinimumMatchesNumber() const; + + void setMinimumMatchesNumber(size_t minimumMatchesNumber); + +private: + cv::BFMatcher m_matcher; + + float m_affectingPart; + + float m_tolerantError; + + size_t m_minimumMatchesNumber; +}; + +} /* Image */ +} /* MediaVision */ + +#endif /* __IMAGEFEATURES_FEATUREMATCHER_H__ */ diff --git a/mv_image/image/include/Features/FeaturePack.h b/mv_image/image/include/Features/FeaturePack.h new file mode 100644 index 0000000..a100ba6 --- /dev/null +++ b/mv_image/image/include/Features/FeaturePack.h @@ -0,0 +1,79 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __IMAGEFEATURES_FEATUREPACK_H__ +#define __IMAGEFEATURES_FEATUREPACK_H__ + +#include "ImageConfig.h" + +#include +#include + +namespace cv { +class KeyPoint; +} + +namespace MediaVision { +namespace Image { +/** + * @class FeaturePack + * @brief This class contains information about features and can be used for + * recognition. + * + * @since_tizen 3.0 + */ +class FeaturePack { +public: + /** + * @brief @ref FeaturePack default constructor. + * + * @since_tizen 3.0 + */ + FeaturePack(); + + /** + * @brief @ref FeaturePack copy constructor. + * @details Creates copy of @ref FeaturePack + * + * @since_tizen 3.0 + * @param [in] copy @ref FeaturePack which will be copied + */ + FeaturePack(const FeaturePack& copy); + + /** + * @brief @ref FeaturePack copy assignment operator. + * @details Fills the information based on the @a copy + * + * @since_tizen 3.0 + * @param [in] copy @ref FeaturePack which will be copied + */ + FeaturePack& operator= (const FeaturePack& copy); + + KeypointType m_keypointsType; + + std::vector m_objectKeypoints; + + DescriptorType m_descriptorsType; + + cv::Mat m_objectDescriptors; + + float m_recognitionRate; +}; + +} /* Image */ +} /* MediaVision */ + +#endif /* __IMAGEFEATURES_FEATUREPACK_H__ */ diff --git a/mv_image/image/include/Features/ORBExtractorFactory.h b/mv_image/image/include/Features/ORBExtractorFactory.h new file mode 100644 index 0000000..50f6ad6 --- /dev/null +++ b/mv_image/image/include/Features/ORBExtractorFactory.h @@ -0,0 +1,55 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __IMAGEFEATURES_ORBEXTRACTORFACTORY_H__ +#define __IMAGEFEATURES_ORBEXTRACTORFACTORY_H__ + +#include "Features/FeatureExtractorFactory.h" + +namespace MediaVision { +namespace Image { + +class ORBExtractorFactory : public FeatureExtractorFactory { +public: + ORBExtractorFactory( + float scaleFactor = 1.2f, + size_t maximumFeaturesNumber = 800u); + + virtual cv::Ptr buildFeatureExtractor(); + + float getScaleFactor() const; + + void setScaleFactor(float scaleFactor); + + size_t getMaximumFeaturesNumber() const; + + void setMaximumFeaturesNumber(size_t maximumFeaturesNumber); + +private: + static float computeRecognitionRate( + const cv::Mat&, + const std::vector&); + + float m_scaleFactor; /**< Recognition scale factor for the ORB detector. */ + + size_t m_maximumFeaturesNumber; /**< Maximum number of features, which will + be extracted from object image. */ +}; + +} /* Image */ +} /* MediaVision */ + +#endif /* __IMAGEFEATURES_ORBEXTRACTORFACTORY_H__ */ diff --git a/mv_image/image/include/ImageConfig.h b/mv_image/image/include/ImageConfig.h index 8f1b348..6caa166 100644 --- a/mv_image/image/include/ImageConfig.h +++ b/mv_image/image/include/ImageConfig.h @@ -14,37 +14,77 @@ * limitations under the License. */ -#ifndef __IMAGEUTIL_H__ -#define __IMAGEUTIL_H__ +#ifndef __IMAGECONFIG_H__ +#define __IMAGECONFIG_H__ -#include +#include /** - * @file ImageUtil.h - * @brief This file contains Image Module utility. + * @file ImageConfig.h + * @brief This file contains Image Module Configuration. */ namespace MediaVision { namespace Image { /** + * @brief Keypoint's type enumeration. + * + * @since_tizen 3.0 + */ +enum KeypointType { + KT_INVALID = -1, /**< Undefined keypoint's type */ + KT_ORB, /**< Oriented FAST keypoint's type */ + KT_GFTT, /**< Keypoint's type of good features to track */ + KT_SIZE /**< Number of keypoint's types */ +}; + +const std::string KeypointNames[KT_SIZE] = { + [KT_ORB] = "ORB", + [KT_GFTT] = "GFTT" +}; + +/* + * @brief Descriptor's type enumeration. + * + * @since_tizen 3.0 + */ +enum DescriptorType { + DT_INVALID = -1, /**< Undefined descriptor's type */ + DT_ORB, /**< Rotated BRIEF descriptor's type */ + DT_BRIEF, /**< Descriptor's type of binary robust independent + elementary features */ + DT_SIZE /**< Number of descriptor's types */ +}; + +const std::string DescriptorNames[DT_SIZE] = { + [DT_ORB] = "ORB", + [DT_BRIEF] = "BRIEF" +}; + +/** * @brief Contains parameters for features extracting from image objects. * * @since_tizen 3.0 */ struct FeaturesExtractingParams { - FeaturesExtractingParams( - double scaleFactor, - int maximumFeaturesNumber); FeaturesExtractingParams(); - double mScaleFactor; /**< Recognition scale factor for the ORB detector. */ + KeypointType mKeypointType; /**< Keypoint's type. */ + + DescriptorType mDescriptorType; /**< Descriptor's type. */ - int mMaximumFeaturesNumber; /**< Maximum number of features, which will be - extracted from object image. */ + union { /**< Extracting parameters for concretes algorithms */ + struct { /**< Extracting parameters for ORB algorithm. */ + double mScaleFactor; /**< Recognition scale factor for the ORB detector. */ + int mMaximumFeaturesNumber; /**< Maximum number of features, + which will be extracted from object image.*/ + } ORB; + }; }; /** + * @class RecognitionParams * @brief Contains parameters for image objects recognition. * * @since_tizen 3.0 @@ -53,7 +93,7 @@ struct RecognitionParams { RecognitionParams( int minMatchesNumber, double requiredMatchesPart, - double allowableMatchesPartError); + double tolerantMatchesPartError); RecognitionParams(); @@ -65,10 +105,11 @@ struct RecognitionParams { result in unsustainable behavior, but effect of object overlapping will be reduced. Value can be from 0 to 1.*/ - double mAllowableMatchesPartError; /**< Allowable error of matches number. */ + double mTolerantMatchesPartError; /**< Tolerant error of matches number. */ }; /** + * @class StabilizationParams * @brief Contains parameters for contour stabilization during tracking of image * objects. * @@ -76,21 +117,29 @@ struct RecognitionParams { */ struct StabilizationParams { StabilizationParams( + bool isEnabled, int historyAmount, - double allowableShift, + double tolerantShift, + double tolerantShiftExtra, double stabilizationSpeed, double stabilizationAcceleration); StabilizationParams(); + bool mIsEnabled; /**< Flag that specifies whether to use the stabilization. */ + int mHistoryAmount; /**< Number of previous recognition results, which will influence the stabilization. */ - double mAllowableShift; /**< Relative value of maximum shift per one frame, + double mTolerantShift; /**< Relative value of maximum shift per one frame, which will be ignored by stabilization. It is relative to the object size in the current frame. */ + double mTolerantShiftExtra; /**< Constant value which will be added to + maximum shift per one frame, + which will be ignored by stabilization. */ + double mStabilizationSpeed; /**< Start speed with which the object will be stabilized. */ @@ -102,6 +151,7 @@ struct StabilizationParams { }; /** + * @calss TrackingParams * @brief Contains parameters for image objects tracking. * * @since_tizen 3.0 @@ -133,4 +183,4 @@ struct TrackingParams { } /* Image */ } /* MediaVision */ -#endif /* __IMAGEUTIL_H__ */ +#endif /* __IMAGECONFIG_H__ */ diff --git a/mv_image/image/include/ImageMathUtil.h b/mv_image/image/include/ImageMathUtil.h index f839ac9..41cdb0c 100644 --- a/mv_image/image/include/ImageMathUtil.h +++ b/mv_image/image/include/ImageMathUtil.h @@ -14,13 +14,13 @@ * limitations under the License. */ -#ifndef __MATHUTIL_H__ -#define __MATHUTIL_H__ +#ifndef __IMAGEMATHUTIL_H__ +#define __IMAGEMATHUTIL_H__ #include /** - * @file MathUtil.h + * @file ImageMathUtil.h * @brief This file contains math utility for Image Module. */ @@ -69,7 +69,41 @@ float getTriangleArea( float getQuadrangleArea( const cv::Point2f points[NumberOfQuadrangleCorners]); +/** + * @brief Checks point on the accessory region. + * + * @since_tizen 3.0 + * @param [in] point Point which will be checked on the accessory region + * @param [in] region Contour of region + * @return true if point is inside the region, otherwise return false + */ +bool checkAccessory( + const cv::Point2f& point, + const std::vector& region); + +/** + * @brief Cuts a rectangle according to the maximum size. + * @details From the rectangle will remain only the part which is inside the + * rectangle from {0,0} to @a maxSize + * + * @since_tizen 3.0 + * @param [in] rectange Rectangle which will be cut + * @param [in] maxSize Maximum values of needed rectangle + */ +void catRect(cv::Rect& rectange, const cv::Size& maxSize); + +/** + * @brief Resizes a region. + * + * @since_tizen 3.0 + * @param [in] roi Contour of region which will be resized + * @param [in] scalingCoefficient Scaling coefficient + */ +std::vector contourResize( + const std::vector& roi, + float scalingCoefficient); + } /* Image */ } /* MediaVision */ -#endif /* __MATHUTIL_H__ */ +#endif /* __IMAGEMATHUTIL_H__ */ diff --git a/mv_image/image/include/ImageTracker.h b/mv_image/image/include/ImageTracker.h deleted file mode 100644 index ea577f4..0000000 --- a/mv_image/image/include/ImageTracker.h +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __IMAGETRACKER_H__ -#define __IMAGETRACKER_H__ - -#include "ImageConfig.h" - -#include - -/** - * @file ImageTracker.h - * @brief This file contains functionality for image object tracking. - */ - -namespace MediaVision { -namespace Image { -class ImageRecognizer; -class ImageTrackingModel; -/** - * @class ImageTracker - * @brief This class contains functionality for image object tracking. - * - * @since_tizen 3.0 - */ -class ImageTracker { -private: - struct RecognitionInfo { - cv::Mat mFrame; - - RecognitionParams mRecognitionParams; - - FeaturesExtractingParams mSceneFeaturesExtractingParams; - - ImageTrackingModel *mpTarget; - }; - - static void *recognitionThreadFunc(void *recognitionInfo); - -public: - /** - * @brief @ref ImageTracker constructor based on tracking algorithm - * parameters. - * - * @since_tizen 3.0 - * @param [in] trackingParams Parameters for image objects tracking - */ - ImageTracker(const TrackingParams& trackingParams); - - /** - * @brief Tracks the @a target for the video stream consisting of frames. - * - * @since_tizen 3.0 - * @remarks Call this function alternately for each frame - * @param [in] frame Current frame of the video stream - * @param [in,out] target @ref ImageTrackingModel, which will be tracked - */ - void track(const cv::Mat& frame, ImageTrackingModel& target); - -private: - void trackDetectedObject( - const cv::Mat& frame, - ImageTrackingModel& target); - - void trackUndetectedObject( - const cv::Mat& frame, - ImageTrackingModel& target); - - cv::Rect computeExpectedArea( - const ImageTrackingModel& target, - const cv::Size& frameSize); - -private: - TrackingParams m_trackingParams; -}; - -} /* Image */ -} /* MediaVision */ - -#endif /* __IMAGETRACKER_H__ */ diff --git a/mv_image/image/include/ImageObject.h b/mv_image/image/include/Recognition/ImageObject.h similarity index 82% rename from mv_image/image/include/ImageObject.h rename to mv_image/image/include/Recognition/ImageObject.h index 4e33e55..e8bc67a 100644 --- a/mv_image/image/include/ImageObject.h +++ b/mv_image/image/include/Recognition/ImageObject.h @@ -19,6 +19,8 @@ #include "ImageConfig.h" +#include "Features/FeaturePack.h" + #include /** @@ -88,31 +90,15 @@ public: * new @ref ImageObject * * @since_tizen 3.0 - * @param [in] image The image for which instance of @ref ImageObject - * will be created - * @param [in] params Features extracting parameters - */ - void fill(const cv::Mat& image, const FeaturesExtractingParams& params); - - /** - * @brief Fills @ref ImageObject class based on image. - * @details Detects keypoints and extracts features from image and creates - * new @ref ImageObject - * - * @since_tizen 3.0 * @param [in] image The image for which instance of @ref * ImageObject will be created - * @param [in] boundingBox Bounding box of the object being analyzed in - * the @a image * @param [in] params Features extracting parameters - * @return @a true on success, otherwise a @a false value - * @retval true Successful - * @retval false Invalid ROI (bounding box) + * @param [in] roi Region of interested object on the @a image */ - bool fill( - const cv::Mat& image, - const cv::Rect& boundingBox, - const FeaturesExtractingParams& params); + void fill( + const cv::Mat& image, + const FeaturesExtractingParams& params, + const std::vector& roi = std::vector()); /** * @brief Gets a value that determines how well an @ref ImageObject can be recognized. @@ -138,6 +124,14 @@ public: bool isEmpty() const; /** + * @brief Sets a contour for the image object. + * + * @since_tizen 3.0 + * @param [in] contour The contour which will be used with @ref ImageObject + */ + void setContour(const std::vector& contour); + + /** * @brief Sets a label for the image object. * * @since_tizen 3.0 @@ -173,17 +167,17 @@ public: int load(const char *fileName); private: - static const int MinWidth = 5; - static const int MinHeight = 5; - -private: void extractFeatures( const cv::Mat& image, - const FeaturesExtractingParams& params); - - void computeRecognitionRate(const cv::Mat& image); + const FeaturesExtractingParams& params, + const std::vector& roi); private: + + FeaturesExtractingParams m_featureExtractingParams; + + FeaturePack m_features; + bool m_isEmpty; bool m_isLabeled; @@ -192,12 +186,6 @@ private: std::vector m_boundingContour; - std::vector m_objectKeypoints; - - cv::Mat m_objectDescriptors; - - float m_recognitionRate; - friend class ImageRecognizer; friend std::ostream& operator << (std::ostream& os, const ImageObject& obj); diff --git a/mv_image/image/include/ImageRecognizer.h b/mv_image/image/include/Recognition/ImageRecognizer.h similarity index 81% rename from mv_image/image/include/ImageRecognizer.h rename to mv_image/image/include/Recognition/ImageRecognizer.h index 5117a27..37d7e23 100644 --- a/mv_image/image/include/ImageRecognizer.h +++ b/mv_image/image/include/Recognition/ImageRecognizer.h @@ -19,7 +19,8 @@ #include "ImageMathUtil.h" #include "ImageConfig.h" -#include "ImageObject.h" + +#include "Recognition/ImageObject.h" #include @@ -39,17 +40,7 @@ namespace Image { class ImageRecognizer { public: /** - * @brief @ref ImageRecognizer constructor based on scene image. - * - * @since_tizen 3.0 - * @param [in] sceneImage The scene in which image objects will be recognized - * @param [in] params Scene features extracting parameters - */ - ImageRecognizer(const cv::Mat& sceneImage, - const FeaturesExtractingParams& params); - - /** - * @brief @ref ImageRecognizer constructor based on thes scene @ref ImageObject. + * @brief @ref ImageRecognizer constructor based on the scene @ref ImageObject. * * @since_tizen 3.0 * @param [in] scene The scene for which the objects will be recognized by @@ -70,13 +61,17 @@ public: * @since_tizen 3.0 * @param [in] target @ref ImageObject, which will be recognized * @param [in] params Recognition parameters - * @param [out] contour The result contour of @a target object on the scene + * @param [out] contour The result contour of @a target object on the + * scene + * @param [out] ignoreFactor Scaling factor of area near the contour + * of object which will be ignored * @return true if object is found on the scene, otherwise return false */ bool recognize( const ImageObject& target, const RecognitionParams& params, - std::vector& contour) const; + std::vector& contour, + float ignoreFactor = 0.f) const; private: ImageRecognizer(); @@ -84,7 +79,8 @@ private: bool findHomophraphyMatrix( const ImageObject& target, const RecognitionParams& params, - cv::Mat& homophraphyMatrix) const; + cv::Mat& homophraphyMatrix, + float ignoreFactor) const; size_t matchesSelection( std::vector& examples, @@ -98,6 +94,7 @@ private: const cv::Point2f corners[NumberOfQuadrangleCorners]); private: + /* TODO: Replace to cv::Ptr */ ImageObject m_scene; cv::BFMatcher m_matcher; diff --git a/mv_image/image/include/Tracking/AsyncTracker.h b/mv_image/image/include/Tracking/AsyncTracker.h new file mode 100644 index 0000000..890f655 --- /dev/null +++ b/mv_image/image/include/Tracking/AsyncTracker.h @@ -0,0 +1,131 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __IMAGETRACKING_ASYNCTRACKER_H__ +#define __IMAGETRACKING_ASYNCTRACKER_H__ + +#include "Tracking/ObjectTracker.h" + +#include + +namespace MediaVision { +namespace Image { +/** + * @class AsyncTracker + * @brief Tracker is based on the another tracker and extends the + * functionality for asynchronous use + * + * @since_tizen 3.0 + */ +class AsyncTracker : public ObjectTracker { +public: + /** + * @brief @ref AsyncTracker copy constructor. + * + * @since_tizen 3.0 + * @param [in] copy @ref AsyncTracker which will be copied + */ + AsyncTracker(const AsyncTracker& copy); + + /** + * @brief @ref AsyncTracker constructor based on the another tracker. + * + * @since_tizen 3.0 + * @remarks You guarantee that frame will not be changed while tracking is + * working if @a copyingPolicy value equal false. + * @param [in] baseTracker Tracker which will be aggregated + * @param [in] copyingPolicy Flag that determines whether the frame is + * copied inside @ref track() function + */ + AsyncTracker(cv::Ptr baseTracker, bool copyingPolicy = true); + + /** + * @brief @ref AsyncTracker destructor + * + * @since_tizen 3.0 + */ + virtual ~AsyncTracker(); + + /** + * @brief Tracks the target for the video stream consisting of frames. + * + * @since_tizen 3.0 + * @remarks Call this function alternately for each frame + * @param [in] frame Current frame of the video stream + * @param [out] result Result contour + * @return true if object is tracked, otherwise return false + */ + virtual bool track(const cv::Mat& frame, std::vector& result); + + /** + * @brief Provides the current location of a target. + * + * @since_tizen 3.0 + * @param [in] location Current location of a target + */ + virtual void reinforcement(const std::vector& location); + + /* + * @brief Creates a copy of itself + * + * @since_tizen 3.0 + * @return clone + */ + virtual cv::Ptr clone() const; + + bool wait(); + + bool isRun(); + + bool isUpdated(std::vector& result); + + bool getResult(std::vector& location); + +private: + AsyncTracker& operator= (const AsyncTracker& copy); + + bool baseTrack(std::vector& result); + + static void *asyncTrack(void *data); + +private: + cv::Ptr m_baseTracker; + + cv::Mat m_frame; + + std::vector m_result; + + bool m_isRun; + + bool m_isUpdated; + + bool m_copyingPolicy; + + pthread_t m_thread; + + mutable pthread_mutex_t m_globalGuard; + + mutable pthread_spinlock_t m_resultGuard; + + mutable pthread_spinlock_t m_isRunGuard; + + mutable pthread_spinlock_t m_isUpdatedGuard; +}; + +} /* Image */ +} /* MediaVision */ + +#endif /* __IMAGETRACKING_ASYNCTRACKER_H__ */ diff --git a/mv_image/image/include/Tracking/CascadeTracker.h b/mv_image/image/include/Tracking/CascadeTracker.h new file mode 100644 index 0000000..4ac0ec3 --- /dev/null +++ b/mv_image/image/include/Tracking/CascadeTracker.h @@ -0,0 +1,132 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __IMAGETRACKING_CASCADETRACKER_H__ +#define __IMAGETRACKING_CASCADETRACKER_H__ + +#include "Tracking/ObjectTracker.h" + +#include + +#include + +namespace MediaVision { +namespace Image { +/** + * @class CascadeTracker + * @brief Tracker is based on the another trackers and combines the results. + * + * @since_tizen 3.0 + */ +class CascadeTracker : public ObjectTracker { +public: + /** + * @brief @ref CascadeTracker default constructor + * + * @since_tizen 3.0 + * @param [in] minimumArea Minimum detected area + */ + CascadeTracker(float minimumArea = 2000); + + /** + * @brief @ref CascadeTracker copy constructor. + * + * @since_tizen 3.0 + * @param [in] copy @ref CascadeTracker which will be copied + */ + CascadeTracker(const CascadeTracker& copy); + + /** + * @brief @ref CascadeTracker destructor + * + * @since_tizen 3.0 + */ + virtual ~CascadeTracker(); + + /** + * @brief Tracks the target for the video stream consisting of frames. + * + * @since_tizen 3.0 + * @remarks Call this function alternately for each frame + * @param [in] frame Current frame of the video stream + * @param [out] result Result contour + * @return true if object is tracked, otherwise return false + */ + virtual bool track(const cv::Mat& frame, std::vector& result); + + /** + * @brief Provides the current location of a target. + * + * @since_tizen 3.0 + * @param [in] location Current location of a target + */ + virtual void reinforcement(const std::vector& location); + + /* + * @brief Creates a copy of itself + * + * @since_tizen 3.0 + * @return clone + */ + virtual cv::Ptr clone() const; + + /** + * @brief Assignment operator. + * + * @since_tizen 3.0 + * @param [in] copy @ref CascadeTracker which will be copied + * @return itself + */ + virtual CascadeTracker& operator=(const CascadeTracker& copy); + + bool enableTracker(cv::Ptr tracker, float priority); + + bool disableTracker(cv::Ptr tracker); + +private: + void internalReinforcement(); + + bool mergeResults(std::vector& result) const; + +private: + struct TrackerInfo { + TrackerInfo(cv::Ptr, float); + + bool operator<(const TrackerInfo&) const; + + bool operator==(const TrackerInfo&) const; + + bool operator!=(const TrackerInfo&) const; + + cv::Ptr mTracker; + + float mPriority; + + mutable std::vector mResult; + }; + + /* don't use m_trackers.find() because + operator==() and operator<() are independent + TODO: Fix it with aggregator or something like that */ + std::set m_trackers; + + float m_minimumArea; +}; + +} /* Image */ +} /* MediaVision */ + +#endif /* __IMAGETRACKING_CASCADETRACKER_H__ */ diff --git a/mv_image/image/include/Tracking/FeatureSubstitutionTracker.h b/mv_image/image/include/Tracking/FeatureSubstitutionTracker.h new file mode 100644 index 0000000..010ca89 --- /dev/null +++ b/mv_image/image/include/Tracking/FeatureSubstitutionTracker.h @@ -0,0 +1,108 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __IMAGETRACKING_FEATURESUBSTITUTIONTRACKER_H__ +#define __IMAGETRACKING_FEATURESUBSTITUTIONTRACKER_H__ + +#include "Tracking/ObjectTracker.h" + +#include "Recognition/ImageObject.h" + +namespace MediaVision { +namespace Image { +/** + * @class FeatureSubstitutionTracker + * @brief Tracker uses feature substitution. + * + * @since_tizen 3.0 + */ +class FeatureSubstitutionTracker : public ObjectTracker { +public: + /** + * @brief @ref FeatureSubstitutionTracker constructor. + * + * @since_tizen 3.0 + * @param [in] featuresExtractingParams Parameters of feature extracting + * @param [in] recognitionParams Parameters of recognition + * @param [in] expectedOffset Expected offset + * @param [in] sceneScalingFactor Additional area around target + * contour on the frame which will + * be use for recognition + * (recommended value is 1 - 1.5) + * @param [in] objectScalingFactor Additional area near the contour + * of object which will be ignored by + * recognition + * (recommended value is 0.5 - 1) + */ + FeatureSubstitutionTracker( + const FeaturesExtractingParams& featuresExtractingParams, + const RecognitionParams& recognitionParams, + float expectedOffset, + float sceneScalingFactor = 1.2f, + float objectScalingFactor = 0.85f); + + /** + * @brief Tracks the target for the video stream consisting of frames. + * + * @since_tizen 3.0 + * @remarks Call this function alternately for each frame + * @param [in] frame Current frame of the video stream + * @param [out] result Result contour + * @return true if object is tracked, otherwise return false + */ + virtual bool track(const cv::Mat& frame, std::vector& result); + + /** + * @brief Provides the current location of a target. + * + * @since_tizen 3.0 + * @param [in] location Current location of a target + */ + virtual void reinforcement(const std::vector& location); + + /* + * @brief Creates a copy of itself + * + * @since_tizen 3.0 + * @return clone + */ + virtual cv::Ptr clone() const; + +private: + std::vector computeExpectedArea(); + +private: + bool m_isInit; + + cv::Ptr m_target; + + std::vector m_location; + + FeaturesExtractingParams m_featureExtractingParams; + + RecognitionParams m_recogParams; + + float m_expectedOffset; + + float m_sceneScalingFactor; + + float m_objectScalingFactor; +}; + +} /* Image */ +} /* MediaVision */ + +#endif /* __IMAGETRACKING_FEATURESUBSTITUTIONTRACKER_H__ */ diff --git a/mv_image/image/include/ImageContourStabilizator.h b/mv_image/image/include/Tracking/ImageContourStabilizator.h similarity index 68% rename from mv_image/image/include/ImageContourStabilizator.h rename to mv_image/image/include/Tracking/ImageContourStabilizator.h index 4d159cd..0d1e295 100644 --- a/mv_image/image/include/ImageContourStabilizator.h +++ b/mv_image/image/include/Tracking/ImageContourStabilizator.h @@ -39,6 +39,20 @@ namespace Image { class ImageContourStabilizator { public: /** + * @brief Enumeration for stabilization return value + * + * @since_tizen 3.0 + */ + enum StabilizationError { + Successfully, /**< Contour is stabilized. */ + TooShortMovingHistory, /**< Too short moving history, it's normal + behavior, you can continue to call + stabilization in order to accumulate it. */ + InvalidSettings, /**< Invalid settings. */ + UnsupportedContourType /**< Unsupported contour type. */ + }; + + /** * @brief @ref ImageContourStabilizator default constructor. * * @since_tizen 3.0 @@ -52,11 +66,15 @@ public: * @remarks Call this function alternately for each contour from sequence * @param [in,out] contour @ref contour, which will be stabilized * @param [in] params configuration parameters - * @return true if contour is stabilized, otherwise return false + * @return Successfully if contour is stabilized, otherwise return error + * @retval #Successfully Contour is stabilized + * @retval #TooShortMovingHistory Too short moving history + * @retval #InvalidSettings Invalid settings + * @retval #UnsupportedContourType Unsupported contour type */ - bool stabilize( - std::vector& contour, - const StabilizationParams& params); + StabilizationError stabilize( + std::vector& contour, + const StabilizationParams& params); /** * @brief Resets stabilization process. @@ -67,10 +85,14 @@ public: void reset(void); private: + bool updateSettings(const StabilizationParams& params); + std::vector computeStabilizedQuadrangleContour(void); private: - static const size_t MovingHistoryAmount = 3u; + float m_tolerantShift; + + float m_tolerantShiftExtra; std::vector m_speeds; @@ -80,6 +102,8 @@ private: std::vector m_lastStabilizedContour; + size_t m_historyAmount; + size_t m_currentHistoryAmount; int m_tempContourIndex; diff --git a/mv_image/image/include/ImageTrackingModel.h b/mv_image/image/include/Tracking/ImageTrackingModel.h similarity index 60% rename from mv_image/image/include/ImageTrackingModel.h rename to mv_image/image/include/Tracking/ImageTrackingModel.h index 2c07b99..5a97174 100644 --- a/mv_image/image/include/ImageTrackingModel.h +++ b/mv_image/image/include/Tracking/ImageTrackingModel.h @@ -17,16 +17,10 @@ #ifndef __IMAGETRACKINGMODEL_H__ #define __IMAGETRACKINGMODEL_H__ -#include "ImageObject.h" +#include "Recognition/ImageObject.h" -#include "ImageContourStabilizator.h" - -#include - -#include - -#include -#include +#include "Tracking/ObjectTracker.h" +#include "Tracking/ImageContourStabilizator.h" /** * @file ImageTrackingModel.h @@ -35,7 +29,6 @@ namespace MediaVision { namespace Image { -class ImageContourStabilizator; /** * @class ImageTrackingModel * @brief This class contains the tracking functionality for image objects. @@ -43,26 +36,6 @@ class ImageContourStabilizator; * @since_tizen 3.0 */ class ImageTrackingModel { -private: - /** - * @brief @ref ImageTrackingModel state enumeration. - * - * @since_tizen 3.0 - */ - enum State { - Invalid, /**< Invalid tracking model can not be tracked. Set not - empty image object as target by using function - @ref setTarget() to make tracking model valid, also - you can load valid tracking model by using @ref load() */ - Undetected, /**< The object was not recognized on the last frame. Ready - for further recognition */ - Appeared, /**< The object was recognized on one of the last frames - after its absence */ - Tracked, /**< The object was recognized on the last frame. Its - location can be obtained by calling method getLocation() */ - InProcess /**< The object is in the recognition process */ - }; - public: /** * @brief @ref ImageTrackingModel default constructor @@ -72,15 +45,6 @@ public: ImageTrackingModel(); /** - * @brief @ref ImageTrackingModel constructor based on tracking algorithm - * parameters. - * - * @since_tizen 3.0 - * @param[in] recognitionObject @ref ImageObject which will be tracked - */ - ImageTrackingModel(const ImageObject& recognitionObject); - - /** * @brief @ref ImageTrackingModel copy constructor. * @details Creates copy of @ref ImageTrackingModel * @@ -90,13 +54,6 @@ public: ImageTrackingModel(const ImageTrackingModel& copy); /** - * @brief @ref ImageTrackingModel destructor. - * - * @since_tizen 3.0 - */ - ~ImageTrackingModel(); - - /** * @brief Sets @ref ImageObject as target which will be tracked. * * @since_tizen 3.0 @@ -118,6 +75,17 @@ public: bool isValid() const; /** + * @brief Tracks the target for the video stream consisting of frames. + * + * @since_tizen 3.0 + * @remarks Call this function alternately for each frame + * @param [in] frame Current frame of the video stream + * @param [out] result Result contour + * @return true if target is tracked, otherwise return false + */ + bool track(const cv::Mat& frame, std::vector& result); + + /** * @brief Refreshes tracking model. * * @since_tizen 3.0 @@ -149,52 +117,10 @@ public: * @since_tizen 3.0 * @param [in] filepath File name from which will be loaded a model * @return @a 0 on success, otherwise a negative error value - */ - int load(const char *filepath); - - /** - * @brief Checks state of the @ref ImageTrackingModel. - * - * @since_tizen 3.0 - * @return @a true if object was detected on the last processed frame, - * otherwise a @a false value */ - bool isDetected() const; - - /** - * @brief Gets last location of the @ref ImageTrackingModel. - * - * @since_tizen 3.0 - * @return Last detected location - */ - std::vector getLastlocation() const; - -private: - ImageObject m_recognitionObject; - - ImageContourStabilizator m_stabilizator; - - std::vector m_lastLocation; - - State m_state; - - pthread_t m_recognitionThread; - - mutable pthread_mutex_t m_globalGuard; - - mutable pthread_spinlock_t m_lastLocationGuard; - - mutable pthread_spinlock_t m_stateGuard; - - friend std::ostream& operator << ( - std::ostream& os, - const ImageTrackingModel::State& state); - - friend std::istream& operator >> ( - std::istream& is, - ImageTrackingModel::State& state); + int load(const char *filepath); - friend std::ostream& operator << ( + friend std::ostream& operator << ( std::ostream& os, const ImageTrackingModel& obj); @@ -202,7 +128,16 @@ private: std::istream& is, ImageTrackingModel& obj); - friend class ImageTracker; +private: + ImageObject m_target; + + cv::Ptr m_tracker; + + ImageContourStabilizator m_stabilizator; + + std::vector m_location; + + StabilizationParams m_stabilizationParams; }; } /* Image */ diff --git a/mv_image/image/include/Tracking/MFTracker.h b/mv_image/image/include/Tracking/MFTracker.h new file mode 100644 index 0000000..90652e4 --- /dev/null +++ b/mv_image/image/include/Tracking/MFTracker.h @@ -0,0 +1,151 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __IMAGETRACKING_MFTRACKER_H__ +#define __IMAGETRACKING_MFTRACKER_H__ + +#include "Tracking/ObjectTracker.h" + +namespace MediaVision { +namespace Image { +/** + * @class MFTracker + * @brief Median Flow tracker implementation. + * + * @since_tizen 3.0 + */ +class MFTracker : public ObjectTracker { +public: + struct Params { + /** + * @brief TrackerMedianFlow algorithm parameters constructor + */ + Params(); + + int mPointsInGrid; /**< Square root of number of used keypoints. + Increase it to trade accurateness for speed. + Default value is sensible and recommended */ + + cv::Size mWindowSize; /**< Size of the search window at each pyramid level + for Lucas-Kanade optical flow search used for + tracking */ + + int mPyrMaxLevel; /**< Number of pyramid levels for Lucas-Kanade optical + flow search used for tracking */ + + /* TODO: add lifetime*/ + /*time_t mLifetime;*/ /**< Time of tracking without reinforcement. */ + }; + + /** + * @brief @ref MFTracker constructor based on tracking algorithm parameters. + * + * @since_tizen 3.0 + * @param [in] params Parameters for objects tracking + */ + MFTracker(Params params = Params()); + + /** + * @brief Tracks the target for the video stream consisting of frames. + * + * @since_tizen 3.0 + * @remarks Call this function alternately for each frame + * @param [in] frame Current frame of the video stream + * @param [out] result Result contour + * @return true if object is tracked, otherwise return false + */ + virtual bool track(const cv::Mat& frame, std::vector& result); + + /** + * @brief Provides the current location of a target. + * + * @since_tizen 3.0 + * @param [in] location Current location of a target + */ + virtual void reinforcement(const std::vector& location); + + /** + * @brief Creates a copy of itself + * + * @since_tizen 3.0 + * @return clone + */ + virtual cv::Ptr clone() const; + +private: + bool isInited() const; + + bool init(const cv::Mat& image); + + bool update(const cv::Mat& image); + + float getLastConfidence() const; + + cv::Rect_ getLastBoundingBox() const; + + bool medianFlowImpl(cv::Mat oldImage, cv::Mat newImage, cv::Rect_& oldBox); + + cv::Rect_ vote( + const std::vector& oldPoints, + const std::vector& newPoints, + const cv::Rect_& oldRect, + cv::Point2f& mD); + + void check_FB( + std::vector newPyramid, + const std::vector& oldPoints, + const std::vector& newPoints, + std::vector& status); + + void check_NCC( + const cv::Mat& oldImage, + const cv::Mat& newImage, + const std::vector& oldPoints, + const std::vector& newPoints, + std::vector& status); + +private: + bool m_isInit; /**< Flag is used to determine the model + initialization */ + + Params m_params; /**< Parameters used during tracking, see + @ref TrackerMedianFlow::Params */ + + cv::TermCriteria m_termcrit; /**< Terminating criteria for OpenCV + Lucas–Kanade optical flow algorithm used + during tracking */ + + std::vector m_startLocation; /**< Tracking object start + location with relative values + to the bounding box */ + + cv::Rect_ m_boundingBox; /**< Tracking object bounding box */ + + float m_confidence; /**< Confidence that object was tracked + correctly at the last tracking iteration */ + + cv::Mat m_image; /**< Last image for which tracking was + performed */ + + std::vector m_pyramid; /**< The pyramid had been calculated for + the previous frame (or when + initialize the model) */ +}; + +} /* Image */ +} /* MediaVision */ + +#endif /* __IMAGETRACKING_MFTRACKER_H__ */ diff --git a/mv_image/image/include/Tracking/ObjectTracker.h b/mv_image/image/include/Tracking/ObjectTracker.h new file mode 100644 index 0000000..77e884e --- /dev/null +++ b/mv_image/image/include/Tracking/ObjectTracker.h @@ -0,0 +1,80 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __IMAGETRACKING_OBJECTTRACKER_H__ +#define __IMAGETRACKING_OBJECTTRACKER_H__ + +#include + +namespace MediaVision { +namespace Image { +/** + * @class ObjectTracker + * @brief Basic object tracker. + * + * @since_tizen 3.0 + */ +class ObjectTracker { +public: + /** + * @brief @ref ObjectTracker destructor + * + * @since_tizen 3.0 + */ + virtual ~ObjectTracker(); + + /** + * @brief Tracks the target for the video stream consisting of frames. + * + * @since_tizen 3.0 + * @remarks Call this function alternately for each frame + * @param [in] frame Current frame of the video stream + * @param [out] result Result contour + * @return true if object is tracked, otherwise return false + */ + virtual bool track(const cv::Mat& frame, std::vector& result) = 0; + + /** + * @brief Provides the current location of a target. + * + * @since_tizen 3.0 + * @param [in] location Current location of a target + */ + virtual void reinforcement(const std::vector& location) = 0; + + /* + * @brief Creates a copy of itself + * + * @since_tizen 3.0 + * @return clone + */ + virtual cv::Ptr clone() const = 0; + +private: + /** + * @brief Assignment operator for the base class @ref ObjectTracker. + * + * @since_tizen 3.0 + * @param [in] copy @ref ObjectTracker which will be copied + * @return itself + */ + ObjectTracker& operator=(const ObjectTracker& copy); +}; + +} /* Image */ +} /* MediaVision */ + +#endif /* __IMAGETRACKING_OBJECTTRACKER_H__ */ diff --git a/mv_image/image/include/Tracking/RecognitionBasedTracker.h b/mv_image/image/include/Tracking/RecognitionBasedTracker.h new file mode 100644 index 0000000..8106860 --- /dev/null +++ b/mv_image/image/include/Tracking/RecognitionBasedTracker.h @@ -0,0 +1,93 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __IMAGETRACKING_RECOGNITIONBASEDTRACKER_H__ +#define __IMAGETRACKING_RECOGNITIONBASEDTRACKER_H__ + +#include "Tracking/ObjectTracker.h" + +#include "Recognition/ImageObject.h" + +namespace MediaVision { +namespace Image { +/** + * @class FeatureSubstitutionTracker + * @brief Tracker uses recognition of target on the entire frame. + * + * @since_tizen 3.0 + */ +class RecognitionBasedTracker : public ObjectTracker { +public: + /** + * @brief @ref RecognitionBasedTracker constructor. + * + * @since_tizen 3.0 + * @param [in] target Target of recognition + * @param [in] sceneFeaturesExtractingParams Parameters of feature + * extracting from frames + * @param [in] recognitionParams Parameters of recognition + */ + RecognitionBasedTracker( + const ImageObject& target, + const FeaturesExtractingParams& sceneFeaturesExtractingParams, + const RecognitionParams& recognitionParams); + + /** + * @brief @ref RecognitionBasedTracker destructor + * + * @since_tizen 3.0 + */ + virtual ~RecognitionBasedTracker(); + + /** + * @brief Tracks the target for the video stream consisting of frames. + * + * @since_tizen 3.0 + * @remarks Call this function alternately for each frame + * @param [in] frame Current frame of the video stream + * @param [out] result Result contour + * @return true if object is tracked, otherwise return false + */ + virtual bool track(const cv::Mat& frame, std::vector& result); + + /** + * @brief Provides the current location of a target. + * + * @since_tizen 3.0 + * @param [in] location Current location of a target + */ + virtual void reinforcement(const std::vector& location); + + /* + * @brief Creates a copy of itself + * + * @since_tizen 3.0 + * @return clone + */ + virtual cv::Ptr clone() const; + +private: + ImageObject m_target; + + FeaturesExtractingParams m_sceneFeatureExtractingParams; + + RecognitionParams m_recogParams; +}; + +} /* Image */ +} /* MediaVision */ + +#endif /* __IMAGETRACKING_RECOGNITIONBASEDTRACKER_H__ */ diff --git a/mv_image/image/src/Features/BasicExtractorFactory.cpp b/mv_image/image/src/Features/BasicExtractorFactory.cpp new file mode 100644 index 0000000..09285da --- /dev/null +++ b/mv_image/image/src/Features/BasicExtractorFactory.cpp @@ -0,0 +1,48 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Features/BasicExtractorFactory.h" + +#include + +namespace MediaVision { +namespace Image { +BasicExtractorFactory::BasicExtractorFactory( + KeypointType keypointsType, + DescriptorType descType) : + m_kpType(keypointsType), + m_descType(descType) +{ +} + +cv::Ptr BasicExtractorFactory::buildFeatureExtractor() +{ + cv::Ptr featureExtractor(new FeatureExtractor()); + + cv::Ptr detector = + cv::FeatureDetector::create(KeypointNames[m_kpType]); + + cv::Ptr extractor = + cv::DescriptorExtractor::create(DescriptorNames[m_descType]); + + featureExtractor->setFeatureDetector(detector, m_kpType); + featureExtractor->setDescriptorExtractor(extractor, m_descType); + + return featureExtractor; +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/Features/FeatureExtractor.cpp b/mv_image/image/src/Features/FeatureExtractor.cpp new file mode 100644 index 0000000..be9224b --- /dev/null +++ b/mv_image/image/src/Features/FeatureExtractor.cpp @@ -0,0 +1,140 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Features/FeatureExtractor.h" + +#include "ImageMathUtil.h" + +#include + +namespace MediaVision { +namespace Image { +const cv::Size FeatureExtractor::MinSize = cv::Size(5, 5); + +FeatureExtractor::FeatureExtractor() : + m_kpType(KT_INVALID), + m_detector(), + m_descType(DT_INVALID), + m_extractor(), + m_computeRecognitionRate(NULL) +{ +} + +void FeatureExtractor::setFeatureDetector( + const cv::Ptr detector, + KeypointType keypointType) +{ + m_detector = detector; + m_kpType = keypointType; +} + +void FeatureExtractor::setDescriptorExtractor( + cv::Ptr extractor, + DescriptorType descriptorType) +{ + m_extractor = extractor; + m_descType = descriptorType; +} + +void FeatureExtractor::setRecognitionRateMetric( + float (*computeRecognitionRate)( + const cv::Mat&, + const std::vector&)) +{ + m_computeRecognitionRate = computeRecognitionRate; +} + +bool FeatureExtractor::extract( + const cv::Mat& image, + FeaturePack& result, + const std::vector& roi) +{ + if (m_detector.empty() || m_extractor.empty()) + return false; + + cv::Rect boundingBox; + + if (roi.empty()) { + boundingBox.x = 0; + boundingBox.y = 0; + boundingBox.width = image.cols; + boundingBox.height = image.rows; + } else { + if (roi.size() < 3) + return false; + + boundingBox = cv::boundingRect(roi); + catRect(boundingBox, image.size()); + } + + if (boundingBox.width < MinSize.width || boundingBox.height < MinSize.height) + return false; + + result.m_objectKeypoints.clear(); + + std::vector keypoints; + + m_detector->detect( + image(boundingBox), + keypoints); + + result.m_objectKeypoints = keypoints; + if (!roi.empty()) { + const size_t numberOfKeypoints = keypoints.size(); + result.m_objectKeypoints.resize(numberOfKeypoints); + for (size_t i = 0; i < numberOfKeypoints; ++i) { + result.m_objectKeypoints[i].pt.x += boundingBox.x; + result.m_objectKeypoints[i].pt.y += boundingBox.y; + } + } + + if (!roi.empty()) { + /* TODO: Ecode roi to reduce the boundary effect. Provide new parameter + / for this action cause roi is a bounding contour for the object. */ + + for (size_t i = 0; i < result.m_objectKeypoints.size(); ++i) { + if (!checkAccessory(result.m_objectKeypoints[i].pt, roi)) { + result.m_objectKeypoints.erase(result.m_objectKeypoints.begin() + i); + --i; + } + } + } + + m_extractor->compute( + image, + result.m_objectKeypoints, + result.m_objectDescriptors); + + if (NULL != m_computeRecognitionRate) { + result.m_recognitionRate = m_computeRecognitionRate( + image(boundingBox), + keypoints); + } else { + /* Default recognition rate metric */ + if (result.m_objectKeypoints.size() < MinimumNumberOfFeatures) + result.m_recognitionRate = 0.f; + else + result.m_recognitionRate = 0.5f; + } + + result.m_keypointsType = m_kpType; + result.m_descriptorsType = m_descType; + + return true; +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/Features/FeatureExtractorFactory.cpp b/mv_image/image/src/Features/FeatureExtractorFactory.cpp new file mode 100644 index 0000000..be022d5 --- /dev/null +++ b/mv_image/image/src/Features/FeatureExtractorFactory.cpp @@ -0,0 +1,28 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Features/FeatureExtractorFactory.h" + +namespace MediaVision { +namespace Image { + +FeatureExtractorFactory::~FeatureExtractorFactory() +{ + ; /* NULL */ +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/Features/FeatureMatcher.cpp b/mv_image/image/src/Features/FeatureMatcher.cpp new file mode 100644 index 0000000..dbf72df --- /dev/null +++ b/mv_image/image/src/Features/FeatureMatcher.cpp @@ -0,0 +1,244 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Features/FeatureMatcher.h" + +#include "ImageMathUtil.h" + +#include + +namespace MediaVision { +namespace Image { + +namespace { +float computeLinearSupportElement( + const std::vector& examples, + int requiredNumber, + int leftLimit, + int rightLimit) +{ + int sizeOfExamples = rightLimit - leftLimit + 1; + + if (sizeOfExamples <= 1) + return examples[leftLimit].distance; + + float minValue = examples[leftLimit].distance; + float maxValue = examples[leftLimit].distance; + + /* Finding the maximum and minimum values */ + for (int i = leftLimit + 1; i <= rightLimit; ++i) { + if (minValue > examples[i].distance) + minValue = examples[i].distance; + else if (maxValue < examples[i].distance) + maxValue = examples[i].distance; + } + + /* Linear approximation. f(x) = k*x + b */ + /* f(sizeOfExamples) = maxValue; f(1) = minValue; */ + const float b = (maxValue - minValue * sizeOfExamples) / (1 - sizeOfExamples); + const float k = minValue - b; + + /* Calculation of the support element */ + return k * requiredNumber + b; +} + +size_t matchesSelection( + std::vector& examples, + size_t filterAmount, + size_t allowableError) +{ + size_t sizeOfExamples = examples.size(); + + if ((filterAmount + allowableError) > sizeOfExamples) + return sizeOfExamples; + + int startLeftLimit = 0; + int startRightLimit = sizeOfExamples - 1; + + int leftLimit = startLeftLimit; + int rightLimit = startRightLimit; + + int requiredNumber = filterAmount; + + float supportElement = 0.f; + + while (true) { + if (leftLimit >= rightLimit) { + if (leftLimit < (requiredNumber - (int)allowableError)) + leftLimit = requiredNumber + (int)allowableError; + + break; + } + + supportElement = computeLinearSupportElement(examples, requiredNumber, + leftLimit, rightLimit); + + /* Iteration similar quicksort */ + while (true) { + /* Search the leftmost element which have bigger confidence than support element */ + while (examples[leftLimit].distance <= supportElement && + leftLimit < startRightLimit) { + ++leftLimit; + } + + /* Search the rightmost element which have smaller confidence than support element */ + while (examples[rightLimit].distance >= supportElement && + rightLimit >= startLeftLimit) { + --rightLimit; + } + + if (leftLimit >= rightLimit) + break; + + /* Swap */ + std::swap(examples[leftLimit], examples[rightLimit]); + } + + if (abs(filterAmount - leftLimit) <= (int)allowableError) + break; + + if ((int)filterAmount > leftLimit) { + requiredNumber -= leftLimit - startLeftLimit; + + rightLimit = startRightLimit; + startLeftLimit = leftLimit; + } else { + leftLimit = startLeftLimit; + startRightLimit = rightLimit; + } + } + + return (size_t)leftLimit; +} + +} /* anonymous namespace */ + +FeatureMatcher::FeatureMatcher( + float affectingPart, + float tolerantError, + size_t minimumMatchesNumber) +{ + setAffectingPart(affectingPart); + setTolerantError(tolerantError); + setMinimumMatchesNumber(minimumMatchesNumber); +} + +FeatureMatcher::MatchError FeatureMatcher::match( + const FeaturePack& from, + const FeaturePack& to, + cv::Mat& homophraphyMatrix) const +{ + if (MinimumNumberOfFeatures > from.m_objectKeypoints.size()) + return InvalidFeaturePackFrom; + + if (MinimumNumberOfFeatures > to.m_objectKeypoints.size()) + return InvalidFeaturePackTo; + + if (from.m_descriptorsType != to.m_descriptorsType) + return DisparateTypes; + + std::vector matches; + + m_matcher.match(from.m_objectDescriptors, to.m_objectDescriptors, matches); + + size_t matchesNumber = matches.size(); + + if (MinimumNumberOfFeatures > matchesNumber) + return MatchesNotFound; + + size_t requiredMatchesNumber = m_affectingPart * matchesNumber; + size_t allowableMatchesNumberError = m_tolerantError * requiredMatchesNumber; + + if (matchesNumber - allowableMatchesNumberError > MinimumNumberOfFeatures && + requiredMatchesNumber + allowableMatchesNumberError < matchesNumber) { + if (requiredMatchesNumber - allowableMatchesNumberError < + m_minimumMatchesNumber) { + if (requiredMatchesNumber + allowableMatchesNumberError > + m_minimumMatchesNumber) { + requiredMatchesNumber = (requiredMatchesNumber + + m_minimumMatchesNumber + allowableMatchesNumberError) / 2; + + allowableMatchesNumberError = requiredMatchesNumber - + m_minimumMatchesNumber + allowableMatchesNumberError; + } else { + const size_t minimalAllowableMatchesNumberError = 2u; + + requiredMatchesNumber = minimalAllowableMatchesNumberError + + m_minimumMatchesNumber; + + allowableMatchesNumberError = minimalAllowableMatchesNumberError; + } + } + + const size_t filterAmount = matchesSelection( + matches, + requiredMatchesNumber, + allowableMatchesNumberError); + + if (filterAmount >= MinimumNumberOfFeatures) + matches.resize(filterAmount); + + matchesNumber = matches.size(); + } + + std::vector objectPoints(matchesNumber); + std::vector scenePoints(matchesNumber); + + for (size_t matchIdx = 0; matchIdx < matchesNumber; ++matchIdx) { + objectPoints[matchIdx] = + from.m_objectKeypoints[matches[matchIdx].queryIdx].pt; + + scenePoints[matchIdx] = + to.m_objectKeypoints[matches[matchIdx].trainIdx].pt; + } + + homophraphyMatrix = cv::findHomography(objectPoints, scenePoints, CV_RANSAC); + + return Success; +} + +float FeatureMatcher::getAffectingPart() const +{ + return m_affectingPart; +} + +void FeatureMatcher::setAffectingPart(float affectingPart) +{ + m_affectingPart = std::max(0.f, std::min(1.f, affectingPart)); +} + +float FeatureMatcher::getTolerantError() const +{ + return m_tolerantError; +} + +void FeatureMatcher::setTolerantError(float tolerantError) +{ + m_affectingPart = std::max(0.f, std::min(1.f, tolerantError)); +} + +size_t FeatureMatcher::getMinimumMatchesNumber() const +{ + return m_minimumMatchesNumber; +} + +void FeatureMatcher::setMinimumMatchesNumber(size_t minimumMatchesNumber) +{ + m_minimumMatchesNumber = minimumMatchesNumber; +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/Features/FeaturePack.cpp b/mv_image/image/src/Features/FeaturePack.cpp new file mode 100644 index 0000000..61364f5 --- /dev/null +++ b/mv_image/image/src/Features/FeaturePack.cpp @@ -0,0 +1,58 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Features/FeaturePack.h" + +#include + +namespace MediaVision { +namespace Image { + +FeaturePack::FeaturePack() : + m_keypointsType(KT_INVALID), + m_objectKeypoints(), + m_descriptorsType(DT_INVALID), + m_objectDescriptors(), + m_recognitionRate(0.f) +{ + ; /* NULL */ +} + +FeaturePack::FeaturePack(const FeaturePack& copy) : + m_keypointsType(copy.m_keypointsType), + m_objectKeypoints(copy.m_objectKeypoints), + m_descriptorsType(copy.m_descriptorsType), + m_objectDescriptors(copy.m_objectDescriptors.clone()), + m_recognitionRate(copy.m_recognitionRate) +{ + ; /* NULL */ +} + +FeaturePack& FeaturePack::operator= (const FeaturePack& copy) +{ + if (this != ©) { + m_keypointsType = copy.m_keypointsType; + m_objectKeypoints = copy.m_objectKeypoints; + m_descriptorsType = copy.m_descriptorsType; + m_objectDescriptors = copy.m_objectDescriptors.clone(); + m_recognitionRate = copy.m_recognitionRate; + } + + return *this; +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/Features/ORBExtractorFactory.cpp b/mv_image/image/src/Features/ORBExtractorFactory.cpp new file mode 100644 index 0000000..2ac5d81 --- /dev/null +++ b/mv_image/image/src/Features/ORBExtractorFactory.cpp @@ -0,0 +1,145 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Features/ORBExtractorFactory.h" + +#include "ImageMathUtil.h" + +#include + +namespace MediaVision { +namespace Image { +ORBExtractorFactory::ORBExtractorFactory( + float scaleFactor, + size_t maximumFeaturesNumber) +{ + setScaleFactor(scaleFactor); + setMaximumFeaturesNumber(maximumFeaturesNumber); +} + +cv::Ptr ORBExtractorFactory::buildFeatureExtractor() +{ + cv::Ptr featureExtractor(new FeatureExtractor()); + + cv::Ptr detector( + new cv::ORB( + m_maximumFeaturesNumber, + m_scaleFactor)); + + cv::Ptr extractor = detector; + + featureExtractor->setFeatureDetector(detector, KT_ORB); + featureExtractor->setDescriptorExtractor(extractor, DT_ORB); + featureExtractor->setRecognitionRateMetric(computeRecognitionRate); + + return featureExtractor; +} + +float ORBExtractorFactory::getScaleFactor() const +{ + return m_scaleFactor; +} + +void ORBExtractorFactory::setScaleFactor(float scaleFactor) +{ + m_scaleFactor = scaleFactor; +} + +size_t ORBExtractorFactory::getMaximumFeaturesNumber() const +{ + return m_scaleFactor; +} + +void ORBExtractorFactory::setMaximumFeaturesNumber(size_t maximumFeaturesNumber) +{ + m_maximumFeaturesNumber = maximumFeaturesNumber; +} + +float ORBExtractorFactory::computeRecognitionRate( + const cv::Mat& image, + const std::vector& keypoints) +{ + const size_t numberOfKeypoints = keypoints.size(); + + /* it is impossible to calculate the perspective transformation parameters + * if number of key points less than MinimumNumberOfFeatures (4) */ + if (numberOfKeypoints < MinimumNumberOfFeatures) + return 0.f; + + static const size_t xCellsNumber = 10u; + static const size_t yCellsNumber = 10u; + + cv::Mat cells[xCellsNumber][yCellsNumber]; + size_t accumulationCounter[xCellsNumber][yCellsNumber]; + + const size_t cellWidth = image.cols / xCellsNumber; + const size_t cellHeight = image.rows / yCellsNumber; + + for (size_t x = 0u; x < xCellsNumber; ++x) { + for (size_t y = 0u; y < yCellsNumber; ++y) { + cells[x][y] = image(cv::Rect( + x * cellWidth, + y * cellHeight, + cellWidth, + cellHeight)); + + accumulationCounter[x][y] = 0; + } + } + + for (size_t i = 0u; i < numberOfKeypoints; ++i) { + size_t xCellIdx = keypoints[i].pt.x / cellWidth; + if (xCellIdx >= xCellsNumber) + xCellIdx = xCellsNumber - 1; + + size_t yCellIdx = keypoints[i].pt.y / cellHeight; + if (yCellIdx >= yCellsNumber) + yCellIdx = yCellsNumber - 1; + + ++(accumulationCounter[xCellIdx][yCellIdx]); + } + + const float exceptedNumber = numberOfKeypoints / + (float)(xCellsNumber * yCellsNumber); + + float distributedEvaluation = 0.f; + + for (size_t x = 0u; x < xCellsNumber; ++x) { + for (size_t y = 0u; y < yCellsNumber; ++y) { + distributedEvaluation += (accumulationCounter[x][y] - exceptedNumber) * + (accumulationCounter[x][y] - exceptedNumber) / exceptedNumber; + } + } + + float maximumDistributedEvaluation = (xCellsNumber * yCellsNumber - 1) * + exceptedNumber; + + maximumDistributedEvaluation += (numberOfKeypoints - exceptedNumber) * + (numberOfKeypoints - exceptedNumber) / exceptedNumber; + + distributedEvaluation = 1 - + (distributedEvaluation / maximumDistributedEvaluation); + + /* Exponentiation to find an approximate confidence value based on the + * number of key points on the image. */ + const float cardinalityEvaluation = pow(-0.9, numberOfKeypoints - 3) + 1.0f; + + /* Result metric */ + return distributedEvaluation * cardinalityEvaluation; +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/ImageConfig.cpp b/mv_image/image/src/ImageConfig.cpp index a058965..0f31d66 100644 --- a/mv_image/image/src/ImageConfig.cpp +++ b/mv_image/image/src/ImageConfig.cpp @@ -18,18 +18,9 @@ namespace MediaVision { namespace Image { -FeaturesExtractingParams::FeaturesExtractingParams( - double scaleFactor, - int maximumFeaturesNumber) : - mScaleFactor(scaleFactor), - mMaximumFeaturesNumber(maximumFeaturesNumber) -{ - ; /* NULL */ -} - FeaturesExtractingParams::FeaturesExtractingParams() : - mScaleFactor(1.2), - mMaximumFeaturesNumber(800) + mKeypointType(KT_INVALID), + mDescriptorType(DT_INVALID) { ; /* NULL */ } @@ -37,10 +28,10 @@ FeaturesExtractingParams::FeaturesExtractingParams() : RecognitionParams::RecognitionParams( int minMatchesNumber, double requiredMatchesPart, - double allowableMatchesPartError) : + double tolerantMatchesPartError) : mMinMatchesNumber(minMatchesNumber), mRequiredMatchesPart(requiredMatchesPart), - mAllowableMatchesPartError(allowableMatchesPartError) + mTolerantMatchesPartError(tolerantMatchesPartError) { ; /* NULL */ } @@ -48,18 +39,22 @@ RecognitionParams::RecognitionParams( RecognitionParams::RecognitionParams() : mMinMatchesNumber(0), mRequiredMatchesPart(1.0), - mAllowableMatchesPartError(0.0) + mTolerantMatchesPartError(0.0) { ; /* NULL */ } StabilizationParams::StabilizationParams( + bool isEnabled, int historyAmount, - double allowableShift, + double tolerantShift, + double tolerantShiftExtra, double stabilizationSpeed, double stabilizationAcceleration) : + mIsEnabled(isEnabled), mHistoryAmount(historyAmount), - mAllowableShift(allowableShift), + mTolerantShift(tolerantShift), + mTolerantShiftExtra(tolerantShiftExtra), mStabilizationSpeed(stabilizationSpeed), mStabilizationAcceleration(stabilizationAcceleration) { @@ -67,8 +62,10 @@ StabilizationParams::StabilizationParams( } StabilizationParams::StabilizationParams() : + mIsEnabled(false), mHistoryAmount(1), - mAllowableShift(0.0), + mTolerantShift(0.0), + mTolerantShiftExtra(0.0), mStabilizationSpeed(0.0), mStabilizationAcceleration(1.0) { diff --git a/mv_image/image/src/ImageMathUtil.cpp b/mv_image/image/src/ImageMathUtil.cpp index 0da2dbc..f8d7890 100644 --- a/mv_image/image/src/ImageMathUtil.cpp +++ b/mv_image/image/src/ImageMathUtil.cpp @@ -40,10 +40,15 @@ float getTriangleArea( const float semiperimeter = (distances[0] + distances[1] + distances[2]) / 2.0f; - return sqrt(semiperimeter * + const float res2x = semiperimeter * (semiperimeter - distances[0]) * (semiperimeter - distances[1]) * - (semiperimeter - distances[2])); + (semiperimeter - distances[2]); + + if (res2x < 0.f) + return 0.f; + + return sqrt(res2x); } float getQuadrangleArea(const cv::Point2f points[NumberOfQuadrangleCorners]) @@ -52,5 +57,86 @@ float getQuadrangleArea(const cv::Point2f points[NumberOfQuadrangleCorners]) getTriangleArea(points[0], points[3], points[2]); } +bool checkAccessory( + const cv::Point2f& point, + const std::vector& region) +{ + if (region.size() < 3) + return false; + + bool insideFlag = false; + const size_t numberOfContourPoints = region.size(); + + for (size_t i = 0u, j = numberOfContourPoints - 1; i < numberOfContourPoints; j = i++) { + if (((region[i].y > point.y) != (region[j].y > point.y)) && + ((float) point.x < (float) + (region[j].x - region[i].x) * (point.y - region[i].y) / + (region[j].y - region[i].y) + region[i].x)) { + insideFlag = !insideFlag; + } + } + + return insideFlag; +} + +void catRect(cv::Rect& rectange, const cv::Size& maxSize) +{ + if (rectange.width < 0) { + rectange.x += rectange.width; + rectange.width *= -1; + } + + if (rectange.height < 0) { + rectange.y += rectange.height; + rectange.height *= -1; + } + + if (rectange.x > maxSize.width || rectange.y > maxSize.height) { + rectange.x = 0; + rectange.y = 0; + rectange.width = 0; + rectange.height = 0; + return; + } + + if (rectange.x < 0) { + rectange.width += rectange.x; + rectange.x = 0; + } + + if (rectange.y < 0) { + rectange.height += rectange.y; + rectange.y = 0; + } + + if (rectange.x + rectange.width > maxSize.width) + rectange.width = maxSize.width - rectange.x; + + if (rectange.y + rectange.height > maxSize.height) + rectange.height = maxSize.height - rectange.y; +} + +std::vector contourResize( + const std::vector& roi, + float scalingCoefficient) +{ + const size_t numberOfContourPoints = roi.size(); + cv::Point2f centre(0, 0); + for (size_t i = 0; i < numberOfContourPoints; ++i) { + centre.x += roi[i].x; + centre.y += roi[i].y; + } + centre.x /= numberOfContourPoints; + centre.y /= numberOfContourPoints; + + std::vector result(numberOfContourPoints); + for (size_t i = 0; i < numberOfContourPoints; ++i) { + result[i].x = (roi[i].x - centre.x) * scalingCoefficient + centre.x; + result[i].y = (roi[i].y - centre.y) * scalingCoefficient + centre.y; + } + + return result; +} + } /* Image */ } /* MediaVision */ diff --git a/mv_image/image/src/ImageObject.cpp b/mv_image/image/src/ImageObject.cpp deleted file mode 100644 index a562605..0000000 --- a/mv_image/image/src/ImageObject.cpp +++ /dev/null @@ -1,446 +0,0 @@ -/** - * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ImageObject.h" - -#include "ImageMathUtil.h" - -#include - -#include "mv_private.h" -#include "mv_common.h" - -#include -#include - -#include -#include - -namespace MediaVision { -namespace Image { -ImageObject::ImageObject() : - m_isEmpty(true), - m_isLabeled(false), - m_label(0), - m_recognitionRate(0.f) -{ - ; /* NULL */ -} - -ImageObject::ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params) : - m_isEmpty(true), - m_isLabeled(false), - m_label(0), - m_recognitionRate(0.f) -{ - fill(image, params); -} - -ImageObject::ImageObject(const ImageObject& copy) : - m_isEmpty(copy.m_isEmpty), - m_isLabeled(copy.m_isLabeled), - m_label(copy.m_label), - m_boundingContour(copy.m_boundingContour), - m_objectKeypoints(copy.m_objectKeypoints), - m_objectDescriptors(copy.m_objectDescriptors.clone()), - m_recognitionRate(copy.m_recognitionRate) -{ - ; /* NULL */ -} - -ImageObject& ImageObject::operator=(const ImageObject& copy) -{ - if (this != ©) { - m_isEmpty = copy.m_isEmpty; - m_isLabeled = copy.m_isLabeled; - m_label = copy.m_label; - m_boundingContour = copy.m_boundingContour; - m_objectKeypoints = copy.m_objectKeypoints; - m_objectDescriptors = copy.m_objectDescriptors.clone(); - m_recognitionRate = copy.m_recognitionRate; - } - return *this; -} - -ImageObject::~ImageObject() -{ - ; /* NULL */ -} - -void ImageObject::fill(const cv::Mat& image, const FeaturesExtractingParams& params) -{ - m_isEmpty = false; - m_boundingContour.resize(NumberOfQuadrangleCorners); - - m_boundingContour[0].x = 0.f; - m_boundingContour[0].y = 0.f; - - m_boundingContour[1].x = image.cols; - m_boundingContour[1].y = 0.f; - - m_boundingContour[2].x = image.cols; - m_boundingContour[2].y = image.rows; - - m_boundingContour[3].x = 0.f; - m_boundingContour[3].y = image.rows; - - extractFeatures(image, params); - - computeRecognitionRate(image); - - LOGI("[%s] Image object is filled.", __FUNCTION__); -} - -bool ImageObject::fill(const cv::Mat& image, const cv::Rect& boundingBox, - const FeaturesExtractingParams& params) -{ - if ((0 > boundingBox.x) || (0 >= boundingBox.width) || - (0 > boundingBox.y) || (0 >= boundingBox.height) || - (image.cols < (boundingBox.x + boundingBox.width)) || - (image.rows < (boundingBox.y + boundingBox.height))) { - LOGE("[%s] Invalid ROI.", __FUNCTION__); - return false; - } - - m_isEmpty = false; - m_boundingContour.resize(NumberOfQuadrangleCorners); - - m_boundingContour[0].x = 0.f; - m_boundingContour[0].y = 0.f; - - m_boundingContour[1].x = boundingBox.width; - m_boundingContour[1].y = 0.f; - - m_boundingContour[2].x = boundingBox.width; - m_boundingContour[2].y = boundingBox.height; - - m_boundingContour[3].x = 0.f; - m_boundingContour[3].y = boundingBox.height; - - cv::Mat objectImage(image, boundingBox); - - extractFeatures(objectImage, params); - - computeRecognitionRate(image); - - LOGI("[%s] Image object is filled.", __FUNCTION__); - - return true; -} - -void ImageObject::extractFeatures(const cv::Mat& image, - const FeaturesExtractingParams& params) -{ - cv::ORB orb(params.mMaximumFeaturesNumber, params.mScaleFactor); - - if (image.cols < MinWidth || image.rows < MinHeight) { - LOGW("[%s] Area is too small, recognition rate is 0.", __FUNCTION__); - m_objectKeypoints.clear(); - m_objectDescriptors = cv::Mat(); - } else { - orb.detect(image, m_objectKeypoints); - orb.compute(image, m_objectKeypoints, m_objectDescriptors); - } -} - -void ImageObject::computeRecognitionRate(const cv::Mat& image) -{ - const size_t numberOfKeypoints = m_objectKeypoints.size(); - - /* it is impossible to calculate the perspective transformation parameters - * if number of key points less than MinimumNumberOfFeatures (4) - */ - if (numberOfKeypoints < MinimumNumberOfFeatures) { - m_recognitionRate = 0.f; - return; - } - - static const size_t xCellsNumber = 10u; - static const size_t yCellsNumber = 10u; - - cv::Mat cells[xCellsNumber][yCellsNumber]; - size_t accumulationCounter[xCellsNumber][yCellsNumber]; - - const size_t cellWidth = image.cols / xCellsNumber; - const size_t cellHeight = image.rows / yCellsNumber; - - for (size_t x = 0u; x < xCellsNumber; ++x) { - for (size_t y = 0u; y < yCellsNumber; ++y) { - cells[x][y] = image(cv::Rect( - x * cellWidth, - y * cellHeight, - cellWidth, - cellHeight)); - - accumulationCounter[x][y] = 0; - } - } - - for (size_t i = 0u; i < numberOfKeypoints; ++i) { - size_t xCellIdx = m_objectKeypoints[i].pt.x / cellWidth; - if (xCellIdx >= xCellsNumber) { - xCellIdx = xCellsNumber - 1; - } - size_t yCellIdx = m_objectKeypoints[i].pt.y / cellHeight; - if (yCellIdx >= yCellsNumber) { - yCellIdx = yCellsNumber - 1; - } - ++(accumulationCounter[xCellIdx][yCellIdx]); - } - - const float exceptedNumber = numberOfKeypoints / - (float)(xCellsNumber * yCellsNumber); - - float distributedEvaluation = 0.f; - - for (size_t x = 0u; x < xCellsNumber; ++x) { - for (size_t y = 0u; y < yCellsNumber; ++y) { - distributedEvaluation += (accumulationCounter[x][y] - exceptedNumber) * - (accumulationCounter[x][y] - exceptedNumber) / exceptedNumber; - } - } - - float maximumDistributedEvaluation = (xCellsNumber * yCellsNumber - 1) * - exceptedNumber; - - maximumDistributedEvaluation += (numberOfKeypoints - exceptedNumber) * - (numberOfKeypoints - exceptedNumber) / exceptedNumber; - - distributedEvaluation = 1 - - (distributedEvaluation / maximumDistributedEvaluation); - - /* Exponentiation to find an approximate confidence value based on the - * number of key points on the image. - */ - const float cardinalityEvaluation = pow(-0.9, numberOfKeypoints - 3) + 1.0f; - - m_recognitionRate = - distributedEvaluation * - cardinalityEvaluation; -} - -float ImageObject::getRecognitionRate(void) const -{ - return m_recognitionRate; -} - -bool ImageObject::isEmpty() const -{ - return m_isEmpty; -} - -void ImageObject::setLabel(int label) -{ - m_isLabeled = true; - m_label = label; -} - -bool ImageObject::getLabel(int& label) const -{ - if (!m_isLabeled) { - LOGW("[%s] Image hasn't label.", __FUNCTION__); - return false; - } - label = m_label; - return true; -} - -int ImageObject::save(const char *fileName) const -{ - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); - - std::string filePath; - filePath += prefix_path; - filePath += fileName; - - /* check the directory is available */ - std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/')); - if (access(prefix_path_check.c_str(), F_OK)) { - LOGE("Can't save image object. Path[%s] doesn't existed.", prefix_path_check.c_str()); - - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - std::ofstream out; - - out.open(filePath.c_str()); - - if (!out.is_open()) { - LOGE("[%s] Can't create/open file.", __FUNCTION__); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } - - out << (*this); - - out.close(); - LOGI("[%s] Image object is saved.", __FUNCTION__); - - return MEDIA_VISION_ERROR_NONE; -} - -int ImageObject::load(const char *fileName) -{ - /* find directory */ - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); - - std::string filePath; - filePath += prefix_path; - filePath += fileName; - - if (access(filePath.c_str(), F_OK)) { - LOGE("Can't load image object model. Path[%s] doesn't existed.", filePath.c_str()); - - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - std::ifstream in; - in.open(filePath.c_str()); - - if (!in.is_open()) { - LOGE("[%s] Can't open file.", __FUNCTION__); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } - - in >> (*this); - - if (!in.good()) { - LOGE("[%s] Unexpected end of file.", __FUNCTION__); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } - - in.close(); - LOGI("[%s] Image object is loaded.", __FUNCTION__); - - return MEDIA_VISION_ERROR_NONE; -} - -std::ostream& operator << (std::ostream& os, const ImageObject& obj) -{ - os << std::setprecision(7); - - os << obj.m_isEmpty << '\n'; - os << obj.m_isLabeled << '\n'; - os << obj.m_label << '\n'; - - os << obj.m_boundingContour.size() << '\n'; - for (size_t pointNum = 0u; pointNum < obj.m_boundingContour.size(); ++pointNum) { - os << obj.m_boundingContour[pointNum].x << ' '; - os << obj.m_boundingContour[pointNum].y << '\n'; - } - - os << obj.m_objectKeypoints.size() << '\n'; - for (size_t keypointNum = 0u; keypointNum < obj.m_objectKeypoints.size(); ++keypointNum) { - os << obj.m_objectKeypoints[keypointNum].pt.x << ' '; - os << obj.m_objectKeypoints[keypointNum].pt.y << ' '; - os << obj.m_objectKeypoints[keypointNum].size << ' '; - os << obj.m_objectKeypoints[keypointNum].response << ' '; - os << obj.m_objectKeypoints[keypointNum].angle << ' '; - os << obj.m_objectKeypoints[keypointNum].octave << ' '; - os << obj.m_objectKeypoints[keypointNum].class_id << '\n'; - } - - os << obj.m_objectDescriptors.rows << ' '; - os << obj.m_objectDescriptors.cols << ' '; - os << obj.m_objectDescriptors.type() << '\n'; - for (int descriptorNum = 0; descriptorNum < obj.m_objectDescriptors.rows; - ++descriptorNum) { - for (int featureNum = 0; featureNum < obj.m_objectDescriptors.cols; - ++featureNum, os << '\n') { - os << (int)obj.m_objectDescriptors.at(descriptorNum, featureNum) << ' '; - } - } - - return os; -} - -std::istream& operator >> (std::istream& is, ImageObject& obj) -{ - size_t numberOfContourPoints = 0u; - size_t numberOfKeyPoints = 0u; - int rows = 0, cols = 0; - int descriptorType = 0; - - ImageObject temporal; - -#define MEDIA_VISION_CHECK_IFSTREAM \ - if (!is.good()) { \ - return is; \ - } - - is >> temporal.m_isEmpty; - MEDIA_VISION_CHECK_IFSTREAM - is >> temporal.m_isLabeled; - MEDIA_VISION_CHECK_IFSTREAM - is >> temporal.m_label; - MEDIA_VISION_CHECK_IFSTREAM - - is >> numberOfContourPoints; - MEDIA_VISION_CHECK_IFSTREAM - - temporal.m_boundingContour.resize(numberOfContourPoints); - for (size_t pointNum = 0; pointNum < temporal.m_boundingContour.size(); ++pointNum) { - is >> temporal.m_boundingContour[pointNum].x; - MEDIA_VISION_CHECK_IFSTREAM - is >> temporal.m_boundingContour[pointNum].y; - MEDIA_VISION_CHECK_IFSTREAM - } - - is >> numberOfKeyPoints; - temporal.m_objectKeypoints.resize(numberOfKeyPoints); - for (size_t keypointNum = 0; keypointNum < temporal.m_objectKeypoints.size(); ++keypointNum) { - is >> temporal.m_objectKeypoints[keypointNum].pt.x; - MEDIA_VISION_CHECK_IFSTREAM - is >> temporal.m_objectKeypoints[keypointNum].pt.y; - MEDIA_VISION_CHECK_IFSTREAM - is >> temporal.m_objectKeypoints[keypointNum].size; - MEDIA_VISION_CHECK_IFSTREAM - is >> temporal.m_objectKeypoints[keypointNum].response; - MEDIA_VISION_CHECK_IFSTREAM - is >> temporal.m_objectKeypoints[keypointNum].angle; - MEDIA_VISION_CHECK_IFSTREAM - is >> temporal.m_objectKeypoints[keypointNum].octave; - MEDIA_VISION_CHECK_IFSTREAM - is >> temporal.m_objectKeypoints[keypointNum].class_id; - MEDIA_VISION_CHECK_IFSTREAM - } - - is >> rows; - MEDIA_VISION_CHECK_IFSTREAM - is >> cols; - MEDIA_VISION_CHECK_IFSTREAM - is >> descriptorType; - MEDIA_VISION_CHECK_IFSTREAM - temporal.m_objectDescriptors = cv::Mat(rows, cols, descriptorType); - int value = 0; - for (int descriptorNum = 0; descriptorNum < temporal.m_objectDescriptors.rows; ++descriptorNum) { - for (int featureNum = 0; featureNum < temporal.m_objectDescriptors.cols; ++featureNum) { - is >> value; - MEDIA_VISION_CHECK_IFSTREAM - temporal.m_objectDescriptors.at(descriptorNum, featureNum) = (uchar)value; - } - } - -#undef MEDIA_VISION_CHECK_IFSTREAM - - obj = temporal; - - return is; -} - -} /* Image */ -} /* MediaVision */ diff --git a/mv_image/image/src/ImageTracker.cpp b/mv_image/image/src/ImageTracker.cpp deleted file mode 100644 index 400205c..0000000 --- a/mv_image/image/src/ImageTracker.cpp +++ /dev/null @@ -1,332 +0,0 @@ -/** - * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ImageTracker.h" - -#include "ImageMathUtil.h" -#include "ImageRecognizer.h" -#include "ImageTrackingModel.h" -#include "ImageContourStabilizator.h" - -#include "mv_private.h" - -#include - -namespace MediaVision { -namespace Image { -ImageTracker::ImageTracker(const TrackingParams& trackingParams) : - m_trackingParams(trackingParams) -{ - ; /* NULL */ -} - -void ImageTracker::track(const cv::Mat& frame, ImageTrackingModel& target) -{ - ImageTrackingModel::State currentState = ImageTrackingModel::Undetected; - - while (pthread_mutex_trylock(&target.m_globalGuard) != 0) { - pthread_spin_lock(&target.m_stateGuard); - currentState = target.m_state; - pthread_spin_unlock(&target.m_stateGuard); - - if (ImageTrackingModel::InProcess == currentState) { - LOGI("[%s] Calling is skipped. Object is recognizing.", __FUNCTION__); - return; - } - } - - pthread_spin_lock(&target.m_stateGuard); - currentState = target.m_state; - pthread_spin_unlock(&target.m_stateGuard); - - if (ImageTrackingModel::Invalid == currentState) { - pthread_mutex_unlock(&target.m_globalGuard); - LOGE("[%s] Tracking model is invalid.", __FUNCTION__); - return; - } - - switch (target.m_state) { - case ImageTrackingModel::Appeared: - case ImageTrackingModel::Tracked: { - pthread_spin_lock(&target.m_stateGuard); - target.m_state = ImageTrackingModel::InProcess; - pthread_spin_unlock(&target.m_stateGuard); - - trackDetectedObject(frame, target); - break; - } - case ImageTrackingModel::Undetected: { - pthread_spin_lock(&target.m_stateGuard); - target.m_state = ImageTrackingModel::InProcess; - pthread_spin_unlock(&target.m_stateGuard); - - trackUndetectedObject(frame, target); - - /* Recognition thread is started. Don't use target here, just exit! */ - return; - } - case ImageTrackingModel::InProcess: - default: { - /* Abnormal behaviour: - * tracking model state is InProcess but globalGuard is not locked - */ - LOGE("[%s] Abnormal behaviour. Tracking model status is" - "\"InProgress\" but it is not in progress.", __FUNCTION__); - - pthread_spin_lock(&target.m_stateGuard); - if (target.m_recognitionObject.isEmpty()) { - target.m_state = ImageTrackingModel::Invalid; - LOGI("[%s] Tracking model status is changed on \"Invalid\"", __FUNCTION__); - } else { - target.m_state = ImageTrackingModel::Undetected; - LOGI("[%s] Tracking model status is changed on \"Undetected\"", __FUNCTION__); - } - pthread_spin_unlock(&target.m_stateGuard); - - pthread_mutex_unlock(&target.m_globalGuard); - break; - } - } -} - -void ImageTracker::trackDetectedObject( - const cv::Mat& frame, - ImageTrackingModel& target) -{ - cv::Rect expectedArea = computeExpectedArea(target, frame.size()); - - std::vector resultContour; - - ImageRecognizer recognizer( - frame(expectedArea), - m_trackingParams.mFramesFeaturesExtractingParams); - - const bool isRecognized = recognizer.recognize( - target.m_recognitionObject, - m_trackingParams.mRecognitionParams, - resultContour); - - if (isRecognized) { - for (size_t pointIdx = 0; pointIdx < resultContour.size(); ++pointIdx) { - resultContour[pointIdx].x += expectedArea.x; - resultContour[pointIdx].y += expectedArea.y; - } - - if (m_trackingParams.mStabilizationParams.mHistoryAmount > 0) { - target.m_stabilizator.stabilize( - resultContour, - m_trackingParams.mStabilizationParams); - } - - target.m_stabilizator.stabilize( - resultContour, - m_trackingParams.mStabilizationParams); - - pthread_spin_lock(&target.m_lastLocationGuard); - target.m_lastLocation = resultContour; - pthread_spin_unlock(&target.m_lastLocationGuard); - - pthread_spin_lock(&target.m_stateGuard); - target.m_state = ImageTrackingModel::Tracked; - pthread_spin_unlock(&target.m_stateGuard); - - LOGI("[%s] Object is successfully tracked.", __FUNCTION__); - } else { - target.m_stabilizator.reset(); - - pthread_spin_lock(&target.m_stateGuard); - target.m_state = ImageTrackingModel::Undetected; - pthread_spin_unlock(&target.m_stateGuard); - - LOGI("[%s] Object is lost.", __FUNCTION__); - } - - pthread_mutex_unlock(&target.m_globalGuard); -} - -void *ImageTracker::recognitionThreadFunc(void *recognitionInfo) -{ - if (NULL == recognitionInfo) { - return NULL; - } - - RecognitionInfo *recogInfo = (RecognitionInfo*)recognitionInfo; - - std::vector resultContour; - - ImageRecognizer recognizer( - recogInfo->mFrame, - recogInfo->mSceneFeaturesExtractingParams); - - bool isRecognized = recognizer.recognize( - recogInfo->mpTarget->m_recognitionObject, - recogInfo->mRecognitionParams, - resultContour); - - if (isRecognized) { - recogInfo->mpTarget->m_stabilizator.reset(); - - pthread_spin_lock(&(recogInfo->mpTarget->m_lastLocationGuard)); - recogInfo->mpTarget->m_lastLocation = resultContour; - pthread_spin_unlock(&(recogInfo->mpTarget->m_lastLocationGuard)); - - pthread_spin_lock(&(recogInfo->mpTarget->m_stateGuard)); - recogInfo->mpTarget->m_state = ImageTrackingModel::Appeared; - pthread_spin_unlock(&(recogInfo->mpTarget->m_stateGuard)); - } else { - pthread_spin_lock(&(recogInfo->mpTarget->m_stateGuard)); - recogInfo->mpTarget->m_state = ImageTrackingModel::Undetected; - pthread_spin_unlock(&(recogInfo->mpTarget->m_stateGuard)); - } - - recogInfo->mpTarget->m_recognitionThread = 0; - - pthread_mutex_unlock(&(recogInfo->mpTarget->m_globalGuard)); - - delete recogInfo; - - return NULL; -} - -void ImageTracker::trackUndetectedObject( - const cv::Mat& frame, - ImageTrackingModel& target) -{ - RecognitionInfo *recognitionInfo = new RecognitionInfo; - - recognitionInfo->mFrame = frame.clone(); - recognitionInfo->mpTarget = ⌖ - - recognitionInfo->mRecognitionParams = - m_trackingParams.mRecognitionParams; - recognitionInfo->mSceneFeaturesExtractingParams = - m_trackingParams.mFramesFeaturesExtractingParams; - - if (target.m_recognitionThread) { - /* Abnormal behaviour: - * Recognition thread isn't finished but guardian mutex is unlocked - */ - LOGE("[%s] Abnormal behaviour. Recognition thread isn't finished but" - "guardian mutex is unlocked.", __FUNCTION__); - - LOGI("[%s] Try to wait recognition thread.", __FUNCTION__); - pthread_join(target.m_recognitionThread, NULL); - target.m_recognitionThread = 0; - LOGI("[%s] Recognition thread is finished.", __FUNCTION__); - } - - const int err = pthread_create( - &target.m_recognitionThread, - NULL, - recognitionThreadFunc, - recognitionInfo); - - if (0 == err) { - LOGI("[%s] Recognition thread is started.", __FUNCTION__); - /* Recognition thread is started. Don't use target here, just exit! */ - return; - } - LOGE("[%s] Recognition thread creation is failed.", __FUNCTION__); - - pthread_spin_lock(&target.m_stateGuard); - if (target.m_recognitionObject.isEmpty()) { - target.m_state = ImageTrackingModel::Invalid; - LOGI("[%s] Tracking model status is changed on \"Invalid\"", __FUNCTION__); - } else { - target.m_state = ImageTrackingModel::Undetected; - LOGI("[%s] Tracking model status is changed on \"Undetected\"", __FUNCTION__); - } - pthread_spin_unlock(&target.m_stateGuard); - - pthread_mutex_unlock(&target.m_globalGuard); -} - -cv::Rect ImageTracker::computeExpectedArea( - const ImageTrackingModel& target, - const cv::Size& frameSize) -{ - if (target.m_state == ImageTrackingModel::Appeared) { - LOGI("[%s] Expected area for appeared object is full frame.", __FUNCTION__); - return cv::Rect(0, 0, frameSize.width, frameSize.height); - } - - if (target.m_lastLocation.empty()) { - LOGW("[%s] Can't compute expected area for object without last" - "location.", __FUNCTION__); - return cv::Rect(0, 0, 0, 0); - } - - cv::Point2f ltCorner(target.m_lastLocation[0]); - cv::Point2f rbCorner(target.m_lastLocation[0]); - - const size_t contourPointsNumber = target.m_lastLocation.size(); - - for (size_t pointNum = 1; pointNum < contourPointsNumber; ++pointNum) { - if (ltCorner.x > target.m_lastLocation[pointNum].x) { - ltCorner.x = target.m_lastLocation[pointNum].x; - } else if (rbCorner.x < target.m_lastLocation[pointNum].x) { - rbCorner.x = target.m_lastLocation[pointNum].x; - } - - if (ltCorner.y > target.m_lastLocation[pointNum].y) { - ltCorner.y = target.m_lastLocation[pointNum].y; - } else if (rbCorner.y < target.m_lastLocation[pointNum].y) { - rbCorner.y = target.m_lastLocation[pointNum].y; - } - } - - cv::Point2f center( - (ltCorner.x + rbCorner.x) / 2.0f, - (ltCorner.y + rbCorner.y) / 2.0f); - - cv::Size2f halfSize( - (center.x - ltCorner.x) * (1 + m_trackingParams.mExpectedOffset), - (center.y - ltCorner.y) * (1 + m_trackingParams.mExpectedOffset)); - - cv::Rect expectedArea( - center.x - halfSize.width, center.y - halfSize.height, - halfSize.width * 2, halfSize.height * 2); - - if (expectedArea.x < 0) { - expectedArea.width += expectedArea.x; - expectedArea.x = 0; - } - - if (expectedArea.y < 0) { - expectedArea.height += expectedArea.y; - expectedArea.y = 0; - } - - if (expectedArea.x + expectedArea.width > frameSize.width) { - expectedArea.width = frameSize.width - expectedArea.x; - } - - if (expectedArea.y + expectedArea.height > frameSize.height) { - expectedArea.height = frameSize.height - expectedArea.y; - } - - if (expectedArea.width <= 0 || expectedArea.height <= 0) { - expectedArea.x = 0; - expectedArea.y = 0; - expectedArea.width = 0; - expectedArea.height = 0; - } - - return expectedArea; -} - -} /* Image */ -} /* MediaVision */ diff --git a/mv_image/image/src/ImageTrackingModel.cpp b/mv_image/image/src/ImageTrackingModel.cpp deleted file mode 100644 index 014a629..0000000 --- a/mv_image/image/src/ImageTrackingModel.cpp +++ /dev/null @@ -1,340 +0,0 @@ -/** - * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ImageTrackingModel.h" - -#include - -#include "mv_private.h" -#include "mv_common.h" - -#include -#include - -namespace MediaVision { -namespace Image { -ImageTrackingModel::ImageTrackingModel() : - m_recognitionObject(), - m_lastLocation(0), - m_state(Invalid), - m_recognitionThread(0) -{ - pthread_mutex_init(&m_globalGuard, NULL); - pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED); - pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED); -} - -ImageTrackingModel::ImageTrackingModel(const ImageObject& recognitionObject) : - m_recognitionObject(recognitionObject), - m_lastLocation(0), - m_state(Invalid), - m_recognitionThread(0) -{ - if (!recognitionObject.isEmpty()) { - m_state = Undetected; - } - pthread_mutex_init(&m_globalGuard, NULL); - pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED); - pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED); -} - -ImageTrackingModel::ImageTrackingModel(const ImageTrackingModel& copy) : - m_recognitionThread(0) -{ - pthread_mutex_init(&m_globalGuard, NULL); - pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED); - pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED); - - *this = copy; -} - -ImageTrackingModel::~ImageTrackingModel() -{ - if (m_recognitionThread) { - pthread_join(m_recognitionThread, NULL); - } - - pthread_mutex_destroy(&m_globalGuard); - pthread_spin_destroy(&m_lastLocationGuard); - pthread_spin_destroy(&m_stateGuard); -} - -void ImageTrackingModel::setTarget(const ImageObject& target) -{ - pthread_mutex_lock(&m_globalGuard); - - pthread_spin_lock(&m_stateGuard); - m_state = target.isEmpty() ? Invalid : Undetected; - pthread_spin_unlock(&m_stateGuard); - - pthread_spin_lock(&m_lastLocationGuard); - m_lastLocation.clear(); - pthread_spin_unlock(&m_lastLocationGuard); - - LOGI("[%s] Target is set into tracking model.", __FUNCTION__); - - m_recognitionObject = target; - - pthread_mutex_unlock(&m_globalGuard); -} - -void ImageTrackingModel::refresh(void) -{ - pthread_mutex_lock(&m_globalGuard); - - pthread_spin_lock(&m_stateGuard); - m_state = m_recognitionObject.isEmpty() ? Invalid : Undetected; - pthread_spin_unlock(&m_stateGuard); - - pthread_spin_lock(&m_lastLocationGuard); - m_lastLocation.clear(); - pthread_spin_unlock(&m_lastLocationGuard); - - LOGI("[%s] Image tracking model is refreshed.", __FUNCTION__); - - pthread_mutex_unlock(&m_globalGuard); -} - -bool ImageTrackingModel::isValid() const -{ - bool result = false; - - pthread_spin_lock(&m_stateGuard); - result = (m_state != Invalid); - pthread_spin_unlock(&m_stateGuard); - - return result; -} - -ImageTrackingModel& ImageTrackingModel::operator=(const ImageTrackingModel& copy) -{ - if (this != ©) { - pthread_mutex_t *higherMutex = &m_globalGuard; - pthread_mutex_t *lowerMutex = ©.m_globalGuard; - - if (higherMutex < lowerMutex) { - std::swap(higherMutex, lowerMutex); - } - - pthread_mutex_lock(higherMutex); - pthread_mutex_lock(lowerMutex); - - m_recognitionObject = copy.m_recognitionObject; - - pthread_spin_lock(&m_lastLocationGuard); - m_lastLocation = copy.m_lastLocation; - pthread_spin_unlock(&m_lastLocationGuard); - - if (copy.m_state == InProcess) { - pthread_spin_lock(&m_stateGuard); - m_state = m_recognitionObject.isEmpty() ? Invalid : Undetected; - pthread_spin_unlock(&m_stateGuard); - } else { - pthread_spin_lock(&m_stateGuard); - m_state = copy.m_state; - pthread_spin_unlock(&m_stateGuard); - } - - pthread_mutex_unlock(lowerMutex); - pthread_mutex_unlock(higherMutex); - } - - return *this; -} - -int ImageTrackingModel::save(const char *fileName) const -{ - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); - - std::string filePath; - filePath += prefix_path; - filePath += fileName; - - /* check the directory is available */ - std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/')); - if (access(prefix_path_check.c_str(), F_OK)) { - LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefix_path_check.c_str()); - - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - std::ofstream out; - out.open(filePath.c_str()); - - if (!out.is_open()) { - LOGE("[%s] Can't create/open file.", __FUNCTION__); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } - - out << (*this); - - out.close(); - LOGI("[%s] Image tracking model is saved.", __FUNCTION__); - - return MEDIA_VISION_ERROR_NONE; -} - -int ImageTrackingModel::load(const char *fileName) -{ - /* find directory */ - std::string prefix_path = std::string(app_get_data_path()); - LOGD("prefix_path: %s", prefix_path.c_str()); - - std::string filePath; - filePath += prefix_path; - filePath += fileName; - - if (access(filePath.c_str(), F_OK)) { - LOGE("Can't load tracking model. Path[%s] doesn't existed.", filePath.c_str()); - - return MEDIA_VISION_ERROR_INVALID_PATH; - } - - std::ifstream in; - in.open(filePath.c_str()); - - if (!in.is_open()) { - LOGE("[%s] Can't open file.", __FUNCTION__); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } - - in >> (*this); - - if (!in.good()) { - LOGE("[%s] Unexpected end of file.", __FUNCTION__); - return MEDIA_VISION_ERROR_PERMISSION_DENIED; - } - - in.close(); - LOGI("[%s] Image tracking model is loaded.", __FUNCTION__); - - return MEDIA_VISION_ERROR_NONE; -} - -bool ImageTrackingModel::isDetected() const -{ - bool result = false; - - pthread_spin_lock(&m_stateGuard); - result = (m_state == Tracked); - pthread_spin_unlock(&m_stateGuard); - - return result; -} - -std::vector ImageTrackingModel::getLastlocation() const -{ - std::vector result; - - pthread_spin_lock(&m_lastLocationGuard); - result = m_lastLocation; - pthread_spin_unlock(&m_lastLocationGuard); - - return result; -} - -#define STATE_UNSEEN_IO_ID 0 -#define STATE_VISIBLE_IO_ID 1 - -std::ostream& operator << (std::ostream& os, const ImageTrackingModel::State& state) -{ - if (ImageTrackingModel::Tracked == state) { - os << STATE_VISIBLE_IO_ID; - } else { - os << STATE_UNSEEN_IO_ID; - } - - return os; -} - -std::istream& operator >> (std::istream& is, ImageTrackingModel::State& state) -{ - int stateId = -1; - - is >> stateId; - - if (STATE_VISIBLE_IO_ID == stateId) { - state = ImageTrackingModel::Tracked; - } else { - state = ImageTrackingModel::Undetected; - } - - return is; -} - -#undef STATE_UNSEEN_IO_ID -#undef STATE_VISIBLE_IO_ID - -std::ostream& operator << (std::ostream& os, const ImageTrackingModel& obj) -{ - os << std::setprecision(7); - - pthread_mutex_lock(&obj.m_globalGuard); - - os << obj.m_recognitionObject; - - os << obj.m_lastLocation.size(); - for (size_t pointNum = 0u; pointNum < obj.m_lastLocation.size(); ++pointNum) { - os << ' ' << obj.m_lastLocation[pointNum].x << ' ' << obj.m_lastLocation[pointNum].y; - } - os << '\n'; - - os << obj.m_state << '\n'; - - pthread_mutex_unlock(&obj.m_globalGuard); - - return os; -} - -std::istream& operator >> (std::istream& is, ImageTrackingModel& obj) -{ -#define MEDIA_VISION_CHECK_IFSTREAM \ - if (!is.good()) { \ - return is; \ - } - - ImageTrackingModel temporal; - - is >> obj.m_recognitionObject; - MEDIA_VISION_CHECK_IFSTREAM - - size_t lastLocationAmount = 0u; - is >> lastLocationAmount; - MEDIA_VISION_CHECK_IFSTREAM - - temporal.m_lastLocation.resize(lastLocationAmount); - for (size_t pointNum = 0u; pointNum < lastLocationAmount; ++pointNum) { - is >> temporal.m_lastLocation[pointNum].x; - MEDIA_VISION_CHECK_IFSTREAM - is >> temporal.m_lastLocation[pointNum].y; - MEDIA_VISION_CHECK_IFSTREAM - } - - is >> temporal.m_state; - MEDIA_VISION_CHECK_IFSTREAM - - if (temporal.m_recognitionObject.isEmpty()) { - temporal.m_state = ImageTrackingModel::Invalid; - } - - obj = temporal; - - return is; -} - -} /* Image */ -} /* MediaVision */ diff --git a/mv_image/image/src/Recognition/ImageObject.cpp b/mv_image/image/src/Recognition/ImageObject.cpp new file mode 100644 index 0000000..ac6569f --- /dev/null +++ b/mv_image/image/src/Recognition/ImageObject.cpp @@ -0,0 +1,376 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Recognition/ImageObject.h" + +#include "ImageMathUtil.h" + +#include "Features/FeatureExtractor.h" +#include "Features/BasicExtractorFactory.h" +#include "Features/ORBExtractorFactory.h" + +#include "mv_private.h" +#include "mv_common.h" + +#include + +#include +#include + +#include +#include + +namespace MediaVision { +namespace Image { +ImageObject::ImageObject() : + m_features(), + m_isEmpty(true), + m_isLabeled(false), + m_label(0) +{ + ; /* NULL */ +} + +ImageObject::ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params) : + m_featureExtractingParams(), + m_features(), + m_isEmpty(true), + m_isLabeled(false), + m_label(0) +{ + fill(image, params); +} + +ImageObject::ImageObject(const ImageObject& copy) : + m_featureExtractingParams(copy.m_featureExtractingParams), + m_features(copy.m_features), + m_isEmpty(copy.m_isEmpty), + m_isLabeled(copy.m_isLabeled), + m_label(copy.m_label), + m_boundingContour(copy.m_boundingContour) +{ + ; /* NULL */ +} + +ImageObject& ImageObject::operator=(const ImageObject& copy) +{ + if (this != ©) { + m_isEmpty = copy.m_isEmpty; + m_isLabeled = copy.m_isLabeled; + m_label = copy.m_label; + m_boundingContour = copy.m_boundingContour; + + m_features = copy.m_features; + } + + return *this; +} + +ImageObject::~ImageObject() +{ + ; /* NULL */ +} + +void ImageObject::fill( + const cv::Mat& image, + const FeaturesExtractingParams& params, + const std::vector& roi) +{ + m_isEmpty = false; + + if (!roi.empty()) { + m_boundingContour = roi; + } else { + m_boundingContour.resize(NumberOfQuadrangleCorners); + + m_boundingContour[0].x = 0.f; + m_boundingContour[0].y = 0.f; + + m_boundingContour[1].x = image.cols; + m_boundingContour[1].y = 0.f; + + m_boundingContour[2].x = image.cols; + m_boundingContour[2].y = image.rows; + + m_boundingContour[3].x = 0.f; + m_boundingContour[3].y = image.rows; + } + + extractFeatures(image, params, m_boundingContour); + + m_featureExtractingParams = params; + + LOGI("[%s] Image object is filled.", __FUNCTION__); +} + +float ImageObject::getRecognitionRate(void) const +{ + return m_features.m_recognitionRate; +} + +void ImageObject::extractFeatures( + const cv::Mat& image, + const FeaturesExtractingParams& params, + const std::vector& roi) +{ + /* TODO: It is advisable to consider the distribution of functional */ + + cv::Ptr extractor; + + if (params.mKeypointType == KT_ORB && + params.mDescriptorType == DT_ORB) { + ORBExtractorFactory extractorFactory; + + extractorFactory.setScaleFactor((float)params.ORB.mScaleFactor); + extractorFactory.setMaximumFeaturesNumber(params.ORB.mMaximumFeaturesNumber); + + extractor = extractorFactory.buildFeatureExtractor(); + } else { + BasicExtractorFactory extractorFactory( + params.mKeypointType, + params.mDescriptorType); + + extractor = extractorFactory.buildFeatureExtractor(); + } + + if (!extractor.empty()) + extractor->extract(image, m_features, roi); +} + +bool ImageObject::isEmpty() const +{ + return (m_features.m_objectKeypoints.empty() || + m_features.m_objectDescriptors.empty()); +} + +void ImageObject::setContour(const std::vector& contour) +{ + m_boundingContour = contour; +} + +void ImageObject::setLabel(int label) +{ + m_isLabeled = true; + m_label = label; +} + +bool ImageObject::getLabel(int& label) const +{ + if (!m_isLabeled) { + LOGW("[%s] Image hasn't label.", __FUNCTION__); + return false; + } + label = m_label; + return true; +} + +int ImageObject::save(const char *fileName) const +{ + std::string filePath; + char *cPath = app_get_data_path(); + if (NULL == cPath) + filePath = fileName; + else + filePath = std::string(cPath) + std::string(fileName); + + std::string prefixPath = filePath.substr(0, filePath.find_last_of('/')); + LOGD("prefixPath: %s", prefixPath.c_str()); + + /* check the directory is available */ + if (access(prefixPath.c_str(), F_OK)) { + LOGE("Can't save image object. Path[%s] doesn't existed.", filePath.c_str()); + + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + std::ofstream out; + + out.open(filePath.c_str()); + + if (!out.is_open()) { + LOGE("[%s] Can't create/open file.", __FUNCTION__); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } + + out<<(*this); + + out.close(); + LOGI("[%s] Image object is saved.", __FUNCTION__); + + return MEDIA_VISION_ERROR_NONE; +} + +int ImageObject::load(const char *fileName) +{ + std::string filePath; + char *cPath = app_get_data_path(); + if (NULL == cPath) + filePath = fileName; + else + filePath = std::string(cPath) + std::string(fileName); + + if (access(filePath.c_str(), F_OK)) { + LOGE("Can't load image object model. Path[%s] doesn't existed.", filePath.c_str()); + + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + std::ifstream in; + in.open(filePath.c_str()); + + if (!in.is_open()) { + LOGE("[%s] Can't open file.", __FUNCTION__); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } + + in>>(*this); + + if (!in.good()) { + /* TODO: Provide another error code */ + LOGE("[%s] Unexpected end of file.", __FUNCTION__); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } + + in.close(); + LOGI("[%s] Image object is loaded.", __FUNCTION__); + + return MEDIA_VISION_ERROR_NONE; +} + +std::ostream& operator << (std::ostream& os, const ImageObject& obj) +{ + os<( + descriptorNum, + featureNum)<<' '; + } + } + + return os; +} + +std::istream& operator >> (std::istream& is, ImageObject& obj) +{ + size_t numberOfContourPoints = 0u; + size_t numberOfKeypoints = 0u; + int rows = 0, cols = 0; + int descriptorType = 0; + + ImageObject temporal; + +#define MEDIA_VISION_CHECK_IFSTREAM \ + if (!is.good()) { \ + return is; \ + } + + is>>temporal.m_isEmpty; + MEDIA_VISION_CHECK_IFSTREAM + is>>temporal.m_isLabeled; + MEDIA_VISION_CHECK_IFSTREAM + is>>temporal.m_label; + MEDIA_VISION_CHECK_IFSTREAM + + is>>numberOfContourPoints; + MEDIA_VISION_CHECK_IFSTREAM + + temporal.m_boundingContour.resize(numberOfContourPoints); + for (size_t pointNum = 0; pointNum < numberOfContourPoints; ++pointNum) { + is>>temporal.m_boundingContour[pointNum].x; + MEDIA_VISION_CHECK_IFSTREAM + is>>temporal.m_boundingContour[pointNum].y; + MEDIA_VISION_CHECK_IFSTREAM + } + + is>>numberOfKeypoints; + temporal.m_features.m_objectKeypoints.resize(numberOfKeypoints); + for (size_t keypointNum = 0; keypointNum < numberOfKeypoints; ++keypointNum) { + is>>temporal.m_features.m_objectKeypoints[keypointNum].pt.x; + MEDIA_VISION_CHECK_IFSTREAM + is>>temporal.m_features.m_objectKeypoints[keypointNum].pt.y; + MEDIA_VISION_CHECK_IFSTREAM + is>>temporal.m_features.m_objectKeypoints[keypointNum].size; + MEDIA_VISION_CHECK_IFSTREAM + is>>temporal.m_features.m_objectKeypoints[keypointNum].response; + MEDIA_VISION_CHECK_IFSTREAM + is>>temporal.m_features.m_objectKeypoints[keypointNum].angle; + MEDIA_VISION_CHECK_IFSTREAM + is>>temporal.m_features.m_objectKeypoints[keypointNum].octave; + MEDIA_VISION_CHECK_IFSTREAM + is>>temporal.m_features.m_objectKeypoints[keypointNum].class_id; + MEDIA_VISION_CHECK_IFSTREAM + } + + is>>rows; + MEDIA_VISION_CHECK_IFSTREAM + is>>cols; + MEDIA_VISION_CHECK_IFSTREAM + is>>descriptorType; + MEDIA_VISION_CHECK_IFSTREAM + temporal.m_features.m_objectDescriptors = cv::Mat(rows, cols, descriptorType); + int value = 0; + for (int descriptorNum = 0; descriptorNum < rows; ++descriptorNum) { + for (int featureNum = 0; featureNum < cols; ++featureNum) { + is>>value; + MEDIA_VISION_CHECK_IFSTREAM + + temporal.m_features.m_objectDescriptors.at(descriptorNum, featureNum) = + (uchar)value; + } + } + +#undef MEDIA_VISION_CHECK_IFSTREAM + + obj = temporal; + + return is; +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/ImageRecognizer.cpp b/mv_image/image/src/Recognition/ImageRecognizer.cpp similarity index 69% rename from mv_image/image/src/ImageRecognizer.cpp rename to mv_image/image/src/Recognition/ImageRecognizer.cpp index 73dd335..ebfc386 100644 --- a/mv_image/image/src/ImageRecognizer.cpp +++ b/mv_image/image/src/Recognition/ImageRecognizer.cpp @@ -14,23 +14,15 @@ * limitations under the License. */ -#include "ImageRecognizer.h" -#include "ImageObject.h" +#include "Recognition/ImageRecognizer.h" +#include "Recognition/ImageObject.h" #include "mv_private.h" namespace MediaVision { namespace Image { -ImageRecognizer::ImageRecognizer( - const cv::Mat& sceneImage, - const FeaturesExtractingParams& params) : - m_scene(sceneImage, params) -{ - ; /* NULL */ -} - ImageRecognizer::ImageRecognizer(const ImageObject& scene) : - m_scene(scene) + m_scene(scene) { ; /* NULL */ } @@ -43,23 +35,24 @@ ImageRecognizer::~ImageRecognizer() bool ImageRecognizer::recognize( const ImageObject& target, const RecognitionParams& params, - std::vector& contour) const + std::vector& contour, + float ignoreFactor) const { cv::Mat homophraphyMatrix; contour.clear(); - if (MinimumNumberOfFeatures > target.m_objectKeypoints.size()) { + if (MinimumNumberOfFeatures > target.m_features.m_objectKeypoints.size()) { LOGW("[%s] Image object can't be recognized (Recognition rate is too small).", __FUNCTION__); return false; } - if (MinimumNumberOfFeatures > m_scene.m_objectKeypoints.size()) { + if (MinimumNumberOfFeatures > m_scene.m_features.m_objectKeypoints.size()) { LOGW("[%s] Scene image can't be analyzed (Too few features for recognition).", __FUNCTION__); return false; } - if(!findHomophraphyMatrix(target, params, homophraphyMatrix)) { + if(!findHomophraphyMatrix(target, params, homophraphyMatrix, ignoreFactor)) { LOGE("[%s] Can't match the features.", __FUNCTION__); return false; } @@ -81,11 +74,15 @@ bool ImageRecognizer::recognize( bool ImageRecognizer::findHomophraphyMatrix( const ImageObject& target, const RecognitionParams& params, - cv::Mat& homophraphyMatrix) const + cv::Mat& homophraphyMatrix, + float ignoreFactor) const { std::vector matches; - m_matcher.match(target.m_objectDescriptors, m_scene.m_objectDescriptors, matches); + m_matcher.match( + target.m_features.m_objectDescriptors, + m_scene.m_features.m_objectDescriptors, + matches); size_t matchesNumber = matches.size(); @@ -98,18 +95,18 @@ bool ImageRecognizer::findHomophraphyMatrix( params.mRequiredMatchesPart * matchesNumber; size_t allowableMatchesNumberError = - params.mAllowableMatchesPartError * requiredMatchesNumber; - - if ((matchesNumber - allowableMatchesNumberError) > - (size_t)params.mMinMatchesNumber && - (requiredMatchesNumber + allowableMatchesNumberError) < - matchesNumber) { - if ((requiredMatchesNumber - allowableMatchesNumberError) < - (size_t)params.mMinMatchesNumber) { - if ((requiredMatchesNumber + allowableMatchesNumberError) > + params.mTolerantMatchesPartError * requiredMatchesNumber; + + if (matchesNumber - allowableMatchesNumberError > + (size_t)params.mMinMatchesNumber && + requiredMatchesNumber + allowableMatchesNumberError < + matchesNumber) { + if (requiredMatchesNumber - allowableMatchesNumberError < (size_t)params.mMinMatchesNumber) { + if (requiredMatchesNumber + allowableMatchesNumberError > + (size_t)params.mMinMatchesNumber) { requiredMatchesNumber = ((size_t)params.mMinMatchesNumber + - requiredMatchesNumber + allowableMatchesNumberError) / 2; + requiredMatchesNumber + allowableMatchesNumberError) / 2; allowableMatchesNumberError = requiredMatchesNumber- (size_t)params.mMinMatchesNumber + @@ -128,13 +125,12 @@ bool ImageRecognizer::findHomophraphyMatrix( requiredMatchesNumber, allowableMatchesNumberError); - if (filterAmount >= MinimumNumberOfFeatures) { + if (filterAmount >= MinimumNumberOfFeatures) matches.resize(filterAmount); - } else { + else LOGW("[%s] Wrong filtration of feature matches.", __FUNCTION__); - } - matchesNumber = matches.size(); + matchesNumber = matches.size(); } std::vector objectPoints(matchesNumber); @@ -142,12 +138,29 @@ bool ImageRecognizer::findHomophraphyMatrix( for (size_t matchIdx = 0; matchIdx < matchesNumber; ++matchIdx) { objectPoints[matchIdx] = - target.m_objectKeypoints[matches[matchIdx].queryIdx].pt; + target.m_features.m_objectKeypoints[matches[matchIdx].queryIdx].pt; scenePoints[matchIdx] = - m_scene.m_objectKeypoints[matches[matchIdx].trainIdx].pt; + m_scene.m_features.m_objectKeypoints[matches[matchIdx].trainIdx].pt; + } + + if (ignoreFactor > FLT_EPSILON) { + const std::vector significantArea = contourResize( + target.m_boundingContour, + ignoreFactor); + + for (size_t matchIdx = 0; matchIdx < objectPoints.size(); ++matchIdx) { + if (!checkAccessory(objectPoints[matchIdx], significantArea)) { + objectPoints.erase(objectPoints.begin() + matchIdx); + scenePoints.erase(scenePoints.begin() + matchIdx); + --matchIdx; + } + } } + if (objectPoints.size() < MinimumNumberOfFeatures) + return false; + homophraphyMatrix = cv::findHomography(objectPoints, scenePoints, CV_RANSAC); return true; @@ -159,9 +172,8 @@ size_t ImageRecognizer::matchesSelection( { size_t sizeOfExamples = examples.size(); - if ((filterAmount + allowableError) > sizeOfExamples) { + if ((filterAmount + allowableError) > sizeOfExamples) return examples.size(); - } int startLeftLimit = 0; int startRightLimit = sizeOfExamples - 1; @@ -175,44 +187,39 @@ size_t ImageRecognizer::matchesSelection( while (true) { if (leftLimit >= rightLimit) { - if (leftLimit < (requiredNumber - (int)allowableError)) { + if (leftLimit < (requiredNumber - (int)allowableError)) leftLimit = requiredNumber + (int)allowableError; - } break; } supportElement = computeLinearSupportElement(examples, requiredNumber, - leftLimit, rightLimit); + leftLimit, rightLimit); /* Iteration similar quicksort */ while (true) { - /* Search the leftmost element - *which have bigger confidence than support element - */ + /* Search the leftmost element which have bigger confidence than support element */ while (examples[leftLimit].distance <= supportElement && - leftLimit < startRightLimit) { + leftLimit < startRightLimit) { ++leftLimit; } - /* Search the rightmost element - *which have smaller confidence than support element - */ + /* Search the rightmost element which have smaller confidence than support element */ while (examples[rightLimit].distance >= supportElement && - rightLimit >= startLeftLimit) { + rightLimit >= startLeftLimit) { --rightLimit; } - if (leftLimit >= rightLimit) { + if (leftLimit >= rightLimit) break; - } /* Swap */ std::swap(examples[leftLimit], examples[rightLimit]); } - if (abs(filterAmount - leftLimit) <= (int)allowableError) { + + if (abs(filterAmount - leftLimit) <= (int)allowableError) break; - } + if ((int)filterAmount > leftLimit) { requiredNumber -= leftLimit - startLeftLimit; @@ -248,9 +255,8 @@ float ImageRecognizer::computeLinearSupportElement(const std::vector } } - /* Linear approximation. f(x) = k*x + b - * f(sizeOfExamples) = maxValue; f(1) = minValue; - */ + /* Linear approximation. f(x) = k*x + b */ + /* f(sizeOfExamples) = maxValue; f(1) = minValue; */ const float b = (maxValue - minValue * sizeOfExamples) / (1 - sizeOfExamples); const float k = minValue - b; @@ -261,8 +267,10 @@ float ImageRecognizer::computeLinearSupportElement(const std::vector bool ImageRecognizer::isPossibleQuadrangleCorners( const cv::Point2f corners[NumberOfQuadrangleCorners]) { - static const float Epsilon = cv::TermCriteria::EPS; - static const float MinSizeOfDetectedArea = 30.f; + static const float Epsilon = 0.1f; + + /* TODO: move the MinSizeOfDetectedArea out of the ImageRecognizer */ + static const float MinSizeOfDetectedArea = 64.f; const float firstSemiArea = getTriangleArea(corners[0], corners[2], corners[1]) + getTriangleArea(corners[0], corners[2], corners[3]); @@ -271,9 +279,8 @@ bool ImageRecognizer::isPossibleQuadrangleCorners( getTriangleArea(corners[1], corners[3], corners[0]); if (Epsilon < fabs(firstSemiArea - secondSemiArea) || - MinSizeOfDetectedArea > (firstSemiArea + secondSemiArea)) { + MinSizeOfDetectedArea > (firstSemiArea + secondSemiArea)) return false; - } return true; } diff --git a/mv_image/image/src/Tracking/AsyncTracker.cpp b/mv_image/image/src/Tracking/AsyncTracker.cpp new file mode 100644 index 0000000..5ae18ae --- /dev/null +++ b/mv_image/image/src/Tracking/AsyncTracker.cpp @@ -0,0 +1,184 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Tracking/AsyncTracker.h" + +namespace MediaVision { +namespace Image { +AsyncTracker::AsyncTracker(const AsyncTracker& copy) : + m_baseTracker(copy.m_baseTracker.obj->clone()), + m_result(copy.m_result), + m_isRun(false), + m_isUpdated(copy.m_isUpdated), + m_copyingPolicy(copy.m_copyingPolicy), + m_thread(0) +{ + pthread_mutex_init(&m_globalGuard, NULL); + pthread_spin_init(&m_resultGuard, PTHREAD_PROCESS_SHARED); + pthread_spin_init(&m_isRunGuard, PTHREAD_PROCESS_SHARED); + pthread_spin_init(&m_isUpdatedGuard, PTHREAD_PROCESS_SHARED); +} + +AsyncTracker::AsyncTracker( + cv::Ptr baseTracker, + bool copyingPolicy) : + m_baseTracker(baseTracker), + m_result(), + m_isRun(false), + m_isUpdated(false), + m_copyingPolicy(copyingPolicy), + m_thread(0) +{ + pthread_mutex_init(&m_globalGuard, NULL); + pthread_spin_init(&m_resultGuard, PTHREAD_PROCESS_SHARED); + pthread_spin_init(&m_isRunGuard, PTHREAD_PROCESS_SHARED); + pthread_spin_init(&m_isUpdatedGuard, PTHREAD_PROCESS_SHARED); +} + +AsyncTracker::~AsyncTracker() +{ + if(isRun()) + pthread_join(m_thread, NULL); + + pthread_mutex_destroy(&m_globalGuard); + pthread_spin_destroy(&m_resultGuard); + pthread_spin_destroy(&m_isRunGuard); + pthread_spin_destroy(&m_isUpdatedGuard); +} + +bool AsyncTracker::track( + const cv::Mat& frame, + std::vector& result) +{ + while (pthread_mutex_trylock(&m_globalGuard) != 0) { + return getResult(result); + } + + pthread_spin_lock(&m_isRunGuard); + m_isRun = true; + pthread_spin_unlock(&m_isRunGuard); + + if (m_copyingPolicy) + m_frame = frame.clone(); + else + m_frame = frame; + + const int err = pthread_create(&m_thread, NULL, asyncTrack, this); + + if (0 == err) + return getResult(result); + + pthread_spin_lock(&m_isRunGuard); + m_isRun = false; + pthread_spin_unlock(&m_isRunGuard); + + pthread_mutex_unlock(&m_globalGuard); + + return getResult(result); +} + +void AsyncTracker::reinforcement(const std::vector& location) +{ + /* TODO: Unsafe. Need to redesign. */ + m_baseTracker->reinforcement(location); + + pthread_spin_lock(&m_resultGuard); + m_result = location; + pthread_spin_unlock(&m_resultGuard); +} + +cv::Ptr AsyncTracker::clone() const +{ + return cv::Ptr(new AsyncTracker(*this)); +} + +bool AsyncTracker::baseTrack(std::vector& result) +{ + return m_baseTracker->track(m_frame, result); +} + +void *AsyncTracker::asyncTrack(void *data) +{ + AsyncTracker *tracker = reinterpret_cast(data); + + std::vector result; + tracker->baseTrack(result); + + pthread_spin_lock(&tracker->m_resultGuard); + tracker->m_result = result; + pthread_spin_unlock(&tracker->m_resultGuard); + + pthread_spin_lock(&tracker->m_isUpdatedGuard); + tracker->m_isUpdated = true; + pthread_spin_unlock(&tracker->m_isUpdatedGuard); + + pthread_mutex_unlock(&tracker->m_globalGuard); + + pthread_spin_lock(&tracker->m_isRunGuard); + tracker->m_isRun = false; + pthread_spin_unlock(&tracker->m_isRunGuard); + + return NULL; +} + +bool AsyncTracker::wait() +{ + if(isRun()) { + pthread_join(m_thread, NULL); + return true; + } + return false; +} + +bool AsyncTracker::isRun() +{ + bool result = false; + + pthread_spin_lock(&m_isRunGuard); + result = m_isRun; + pthread_spin_unlock(&m_isRunGuard); + + return result; +} + +bool AsyncTracker::isUpdated(std::vector& result) +{ + bool isUpdated = false; + + getResult(result); + + pthread_spin_lock(&m_isUpdatedGuard); + isUpdated = m_isUpdated; + m_isUpdated = false; + pthread_spin_unlock(&m_isUpdatedGuard); + + return isUpdated; +} + +bool AsyncTracker::getResult(std::vector& result) +{ + bool isTracked = false; + + pthread_spin_lock(&m_resultGuard); + isTracked = !m_result.empty(); + result = m_result; + pthread_spin_unlock(&m_resultGuard); + + return isTracked; +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/Tracking/CascadeTracker.cpp b/mv_image/image/src/Tracking/CascadeTracker.cpp new file mode 100644 index 0000000..ed56f09 --- /dev/null +++ b/mv_image/image/src/Tracking/CascadeTracker.cpp @@ -0,0 +1,195 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Tracking/CascadeTracker.h" +#include "Tracking/AsyncTracker.h" + +#include "ImageMathUtil.h" + +namespace MediaVision { +namespace Image { +CascadeTracker::CascadeTracker(float minimumArea) : + m_trackers(), + m_minimumArea(minimumArea) +{ + ; /* NULL */ +} + +CascadeTracker::CascadeTracker(const CascadeTracker& copy) : + m_trackers(), + m_minimumArea(copy.m_minimumArea) +{ + *this = copy; +} + +CascadeTracker::~CascadeTracker() +{ + ; /* NULL */ +} + +bool CascadeTracker::track(const cv::Mat& frame, std::vector& result) +{ + internalReinforcement(); + + std::set::iterator it = m_trackers.begin(); + + for (; it != m_trackers.end(); ++it) { + if (!it->mTracker.obj->track(frame, it->mResult)) { + it->mResult.clear(); + } + } + + return mergeResults(result); +} + +void CascadeTracker::reinforcement(const std::vector& location) +{ + std::set::iterator it = m_trackers.begin(); + + for (; it != m_trackers.end(); ++it) + it->mTracker.obj->reinforcement(location); +} + +cv::Ptr CascadeTracker::clone() const +{ + return cv::Ptr(new CascadeTracker(*this)); +} + +CascadeTracker& CascadeTracker::operator=(const CascadeTracker& copy) +{ + if (this != ©) { + this->m_minimumArea = copy.m_minimumArea; + this->m_trackers.clear(); + + std::set::iterator it = copy.m_trackers.begin(); + for (; it != copy.m_trackers.end(); ++it) { + TrackerInfo temp(it->mTracker.obj->clone(), it->mPriority); + temp.mResult = it->mResult; + + m_trackers.insert(temp); + } + } + + return *this; +} + +bool CascadeTracker::enableTracker(cv::Ptr tracker, float priority) +{ + TrackerInfo temp(tracker, priority); + + std::set::iterator it = + std::find(m_trackers.begin(), m_trackers.end(), temp); + + if (it != m_trackers.end()) + m_trackers.erase(it); + + return m_trackers.insert(temp).second; +} + +bool CascadeTracker::disableTracker(cv::Ptr tracker) +{ + TrackerInfo target(tracker, 0); + + std::set::iterator it = + std::find(m_trackers.begin(), m_trackers.end(), target); + + if (it == m_trackers.end()) + return false; + + m_trackers.erase(it); + + return true; +} + +void CascadeTracker::internalReinforcement() +{ + std::set::iterator it1 = m_trackers.begin(); + for (; it1 != m_trackers.end(); ++it1) { + bool isUpdated = true; + + /* TODO: Redesign without dynamic_cast */ + AsyncTracker *asyncView = dynamic_cast(it1->mTracker.obj); + if (NULL != asyncView) + isUpdated = asyncView->isUpdated(it1->mResult); + + if (!it1->mResult.empty() && isUpdated) { + const size_t numberOfContourPoints = it1->mResult.size(); + std::vector checkedArea(numberOfContourPoints); + for (size_t i = 0; i < numberOfContourPoints; ++i) { + checkedArea[i].x = it1->mResult[i].x; + checkedArea[i].y = it1->mResult[i].y; + } + + if (getQuadrangleArea(checkedArea.data()) < m_minimumArea) { + it1->mResult = std::vector(0); + it1->mTracker.obj->reinforcement(it1->mResult); + } + + float priority = it1->mPriority; + std::set::iterator it2 = m_trackers.begin(); + + for (; it2 != m_trackers.end(); ++it2) { + if (it1 != it2 && + priority > it2->mPriority) { + it2->mTracker.obj->reinforcement(it1->mResult); + } + } + } + } +} + +bool CascadeTracker::mergeResults(std::vector& result) const +{ + result.clear(); + + std::set::iterator it = m_trackers.begin(); + + float resPriotiry = 0.f; + for (; it != m_trackers.end(); ++it) { + if (result.empty() || resPriotiry > it->mPriority) { + resPriotiry = it->mPriority; + result = it->mResult; + } + } + + return !(result.empty()); +} + +CascadeTracker::TrackerInfo::TrackerInfo(cv::Ptr tracker, float priority) : + mTracker(tracker), + mPriority(priority), + mResult() +{ + ; /* NULL */ +} + +bool CascadeTracker::TrackerInfo::operator<(const TrackerInfo& second) const +{ + return (this->mPriority < second.mPriority); +} + +bool CascadeTracker::TrackerInfo::operator==(const TrackerInfo& second) const +{ + return (this->mTracker == second.mTracker); +} + +bool CascadeTracker::TrackerInfo::operator!=(const TrackerInfo& second) const +{ + return !(*this == second); +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp b/mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp new file mode 100644 index 0000000..eaf8bef --- /dev/null +++ b/mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp @@ -0,0 +1,132 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Tracking/FeatureSubstitutionTracker.h" + +#include "Recognition/ImageRecognizer.h" + +namespace MediaVision { +namespace Image { +FeatureSubstitutionTracker::FeatureSubstitutionTracker( + const FeaturesExtractingParams& featuresExtractingParams, + const RecognitionParams& recognitionParams, + float expectedOffset, + float sceneScalingFactor, + float objectScalingFactor) : + m_isInit(false), + m_target(), + m_location(), + m_featureExtractingParams(featuresExtractingParams), + m_recogParams(recognitionParams), + m_expectedOffset(expectedOffset), + m_sceneScalingFactor(sceneScalingFactor), + m_objectScalingFactor(objectScalingFactor) +{ + ; /* NULL */ +} + +bool FeatureSubstitutionTracker::track( + const cv::Mat& frame, + std::vector& result) +{ + std::vector contour; + size_t numberOfContourPoints = m_location.size(); + contour.resize(numberOfContourPoints); + for(size_t i = 0u; i < numberOfContourPoints; ++i) { + contour[i].x = m_location[i].x; + contour[i].y = m_location[i].y; + } + + if (!m_isInit) { + if (m_location.empty()) { + return false; + } else { + m_target = new ImageObject; + m_target->fill( + frame, + m_featureExtractingParams, + contourResize(contour, m_objectScalingFactor)); + m_target->setContour(contour); + m_isInit = true; + result = m_location; + return true; + } + } + + cv::Ptr sceneImageObject = new ImageObject; + + sceneImageObject->fill(frame, m_featureExtractingParams, computeExpectedArea()); + + ImageRecognizer recognizer(*sceneImageObject.obj); + + const bool isTracked = + recognizer.recognize( + *m_target.obj, + m_recogParams, + contour, + m_objectScalingFactor); + + if (isTracked) { + numberOfContourPoints = contour.size(); + m_location.resize(numberOfContourPoints); + for(size_t i = 0u; i < numberOfContourPoints; ++i) { + m_location[i].x = (int)contour[i].x; + m_location[i].y = (int)contour[i].y; + } + + result = m_location; + m_target = sceneImageObject; + m_target->setContour(contour); + } else { + m_location.clear(); + m_isInit = false; + } + + return isTracked; +} + +void FeatureSubstitutionTracker::reinforcement(const std::vector& location) +{ + m_isInit = false; + + if (location.size() < 3) { + m_location.clear(); + return; + } + + m_location = location; +} + +cv::Ptr FeatureSubstitutionTracker::clone() const +{ + return cv::Ptr(new FeatureSubstitutionTracker(*this)); +} + +std::vector FeatureSubstitutionTracker::computeExpectedArea() +{ + std::vector contour; + const size_t numberOfContourPoints = m_location.size(); + contour.resize(numberOfContourPoints); + for(size_t i = 0u; i < numberOfContourPoints; ++i) { + contour[i].x = m_location[i].x; + contour[i].y = m_location[i].y; + } + + return contourResize(contour, m_sceneScalingFactor); +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/ImageContourStabilizator.cpp b/mv_image/image/src/Tracking/ImageContourStabilizator.cpp similarity index 61% rename from mv_image/image/src/ImageContourStabilizator.cpp rename to mv_image/image/src/Tracking/ImageContourStabilizator.cpp index 2b6dddc..00a25a0 100644 --- a/mv_image/image/src/ImageContourStabilizator.cpp +++ b/mv_image/image/src/Tracking/ImageContourStabilizator.cpp @@ -14,32 +14,19 @@ * limitations under the License. */ -#include "ImageContourStabilizator.h" #include "ImageMathUtil.h" +#include "Tracking/ImageContourStabilizator.h" + #include "mv_private.h" namespace MediaVision { namespace Image { ImageContourStabilizator::ImageContourStabilizator() : - m_movingHistory(MovingHistoryAmount), - m_priorities(MovingHistoryAmount) + m_movingHistory(), + m_priorities() { reset(); - - /* increasing the stabilization rate */ - m_speeds.push_back(0.3f); - m_speeds.push_back(0.4f); - m_speeds.push_back(0.5f); - m_speeds.push_back(0.6f); - m_speeds.push_back(0.8f); - m_speeds.push_back(1.f); - - /* calculation of priorities for positions in the moving history */ - for (size_t i = 0u; i < MovingHistoryAmount; ++i) { - /* linear dependence on the elapsed time */ - m_priorities[i] = (i + 1) / ((MovingHistoryAmount + 1) * MovingHistoryAmount / 2.0f); - } } void ImageContourStabilizator::reset(void) @@ -47,57 +34,58 @@ void ImageContourStabilizator::reset(void) m_isPrepared = false; m_tempContourIndex = -1; m_currentHistoryAmount = 0; - - LOGI("Outlier is detected."); + m_historyAmount = 0; + m_movingHistory.clear(); } -bool ImageContourStabilizator::stabilize( +ImageContourStabilizator::StabilizationError ImageContourStabilizator::stabilize( std::vector& contour, - const StabilizationParams& /*params*/) + const StabilizationParams& params) { + if (!updateSettings(params)) { + LOGW("Not stabilized. Invalid settings."); + + return InvalidSettings; + } + /* current implementation stabilizes quadrangles only */ if (contour.size() != NumberOfQuadrangleCorners) { - LOGW("Not stabilized. Empty contour."); + LOGW("Not stabilized. Unsupported contour type."); - return false; + return UnsupportedContourType; } m_currentCornersSpeed.resize(contour.size(), 0); - if (contour[0].x == contour[1].x && contour[0].y == contour[1].y) { - LOGW("Not stabilized. Invalid contour."); - - return false; - } - - if (m_lastStabilizedContour.empty()) { + if (m_lastStabilizedContour.empty()) m_lastStabilizedContour = contour; - } std::vector stabilizedState; /* history amount < 2 it's no sense */ - if (MovingHistoryAmount >= 2) { + if (m_historyAmount >= 2) { /* first sample */ if (m_tempContourIndex == -1) { - m_movingHistory[1] = contour; + m_movingHistory.push_back(contour); + m_movingHistory.push_back(contour); + m_tempContourIndex = 1; - m_currentHistoryAmount = 1; + m_currentHistoryAmount = 2; LOGI("Not stabilized. Too small moving history. (the first one)"); - return false; + return TooShortMovingHistory; } /* too short moving history */ - if (m_currentHistoryAmount < MovingHistoryAmount - 1) { + if (m_currentHistoryAmount < m_historyAmount) { ++m_currentHistoryAmount; ++m_tempContourIndex; - m_movingHistory[m_tempContourIndex] = contour; + m_movingHistory.push_back(contour); - LOGI("Not stabilized. Too small moving history."); + LOGI("Not stabilized. Too short moving history."); - return false; + return TooShortMovingHistory; } /* saving into moving history */ @@ -105,7 +93,7 @@ bool ImageContourStabilizator::stabilize( m_movingHistory.push_back(contour); if (!m_isPrepared) { - m_lastStabilizedContour = m_movingHistory[MovingHistoryAmount - 2]; + m_lastStabilizedContour = m_movingHistory[m_historyAmount - 2]; LOGI("Not stabilized. Too small moving history. (the last one)"); @@ -115,14 +103,14 @@ bool ImageContourStabilizator::stabilize( /* stabilization */ stabilizedState = computeStabilizedQuadrangleContour(); - if (stabilizedState.empty()) { + if (stabilizedState.empty()) stabilizedState = m_lastStabilizedContour; - } } else { stabilizedState = m_lastStabilizedContour; } - const float tolerantShift = getQuadrangleArea(contour.data()) * 0.00006f + 1.3f; + const float tolerantShift = getQuadrangleArea(contour.data()) * + m_tolerantShift + m_tolerantShiftExtra; const size_t contourSize = stabilizedState.size(); for (size_t i = 0u; i < contourSize; ++i) { @@ -150,6 +138,77 @@ bool ImageContourStabilizator::stabilize( LOGI("Contour successfully stabilized."); + return Successfully; +} + +bool ImageContourStabilizator::updateSettings(const StabilizationParams& params) +{ + if (params.mHistoryAmount < 1) + return false; + + m_tolerantShift = (float)params.mTolerantShift; + m_tolerantShiftExtra = (float)params.mTolerantShiftExtra; + + if (m_historyAmount != (size_t)params.mHistoryAmount) { + m_historyAmount = (size_t)params.mHistoryAmount; + + m_priorities.resize(m_historyAmount); + + /* calculation of priorities for positions in the moving history */ + for (size_t i = 0u; i < m_historyAmount; ++i) { + /* linear dependence on the elapsed time */ + m_priorities[i] = ((i + 1) * 2.0f) / + ((m_historyAmount + 1) * m_historyAmount); + } + } + + while (m_historyAmount > (size_t)params.mHistoryAmount) { + m_movingHistory.pop_front(); + --m_historyAmount; + } + + if ((size_t)params.mHistoryAmount > m_historyAmount) { + /* TODO: save current moving history */ + + m_tempContourIndex = -1; + m_historyAmount = (size_t)params.mHistoryAmount; + m_movingHistory.clear(); + } + + bool speedIsValid = false; + if (m_speeds.size() > 1) { + const static float Epsilon = 0.0001f; + if (fabs(m_speeds[0] - params.mStabilizationSpeed) < Epsilon && + fabs((m_speeds[1] - m_speeds[0]) - + params.mStabilizationAcceleration) < Epsilon) { + speedIsValid = true; + } + } + + if (!speedIsValid) { + m_speeds.clear(); + + int speedsSize = (int)((1 - params.mStabilizationSpeed) / + params.mStabilizationAcceleration) + 1; + + if (speedsSize < 1) { + m_speeds.push_back(1.0f); + } else { + static const int MaxSpeedsSize = 25; + + if (speedsSize > MaxSpeedsSize) + speedsSize = MaxSpeedsSize; + + float speed = std::max(0.f, + std::min((float)params.mStabilizationSpeed, 1.0f)); + + for (int i = 0; i < speedsSize; ++i) { + m_speeds.push_back(speed); + speed += params.mStabilizationAcceleration; + } + } + } + return true; } @@ -157,11 +216,11 @@ std::vector ImageContourStabilizator::computeStabilizedQuadrangleCo { /* final contour */ std::vector stabilizedState( - NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f)); + NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f)); /* calculation the direction of contour corners to a new location */ std::vector directions( - NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f)); + NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f)); /* computing expected directions and outliers searching */ bool expressiveTime = false; @@ -170,25 +229,25 @@ std::vector ImageContourStabilizator::computeStabilizedQuadrangleCo std::vector directionsToLastPos(NumberOfQuadrangleCorners); for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j) { /* calculation the moving directions and computing average direction */ - std::vector trackDirections(MovingHistoryAmount - 1); + std::vector trackDirections(m_historyAmount - 1); cv::Point2f averageDirections(0.f, 0.f); - for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) { + for (size_t i = 0u; i < m_historyAmount - 1; ++i) { averageDirections.x += (trackDirections[i].x = m_movingHistory[i+1][j].x - m_movingHistory[i][j].x) / - (MovingHistoryAmount - 1); + (m_historyAmount - 1); averageDirections.y += (trackDirections[i].y = m_movingHistory[i+1][j].y - m_movingHistory[i][j].y) / - (MovingHistoryAmount - 1); + (m_historyAmount - 1); } /* calculation a deviations and select outlier */ - std::vector directionDistances(MovingHistoryAmount - 1); + std::vector directionDistances(m_historyAmount - 1); float maxDistance = 0.f, prevMaxDistance = 0.f; int idxWithMaxDistance = 0; int numExpressiveDirection = -1; - for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) { + for (size_t i = 0u; i < m_historyAmount - 1; ++i) { directionDistances[i] = getDistance( trackDirections[i], averageDirections); @@ -213,7 +272,7 @@ std::vector ImageContourStabilizator::computeStabilizedQuadrangleCo /* final direction computing */ float summPriority = 0.f; - for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) { + for (size_t i = 0u; i < m_historyAmount - 1; ++i) { if ((int)i != numExpressiveDirection) { directions[j].x += trackDirections[i].x * m_priorities[i]; directions[j].y += trackDirections[i].y * m_priorities[i]; @@ -221,22 +280,21 @@ std::vector ImageContourStabilizator::computeStabilizedQuadrangleCo } } - if (numExpressiveDirection == MovingHistoryAmount - 1) { - expressiveTime = true; - } + if (numExpressiveDirection == (int)(m_historyAmount - 1)) + expressiveTime = true; summPriorityWithoutToLastPos[j] = summPriority; - priorityToLastPos[j] = m_priorities[MovingHistoryAmount - 1]; + priorityToLastPos[j] = m_priorities[m_historyAmount - 1]; directions[j].x -= directionsToLastPos[j].x = - (m_lastStabilizedContour[j].x - - m_movingHistory[MovingHistoryAmount - 1][j].x) * - priorityToLastPos[j]; + (m_lastStabilizedContour[j].x - + m_movingHistory[m_historyAmount - 1][j].x) * + priorityToLastPos[j]; directions[j].y -= directionsToLastPos[j].y = - (m_lastStabilizedContour[j].y - - m_movingHistory[MovingHistoryAmount - 1][j].y) * - priorityToLastPos[j]; + (m_lastStabilizedContour[j].y - + m_movingHistory[m_historyAmount - 1][j].y) * + priorityToLastPos[j]; summPriority += priorityToLastPos[j]; @@ -248,12 +306,12 @@ std::vector ImageContourStabilizator::computeStabilizedQuadrangleCo for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j) { if (expressiveTime) { directions[j].x *= (summPriorityWithoutToLastPos[j] + - priorityToLastPos[j]); + priorityToLastPos[j]); directions[j].x -= directionsToLastPos[j].x; directions[j].x /= summPriorityWithoutToLastPos[j]; directions[j].y *= (summPriorityWithoutToLastPos[j] + - priorityToLastPos[j]); + priorityToLastPos[j]); directions[j].y -= directionsToLastPos[j].y; directions[j].y /= summPriorityWithoutToLastPos[j]; } diff --git a/mv_image/image/src/Tracking/ImageTrackingModel.cpp b/mv_image/image/src/Tracking/ImageTrackingModel.cpp new file mode 100644 index 0000000..6240ea8 --- /dev/null +++ b/mv_image/image/src/Tracking/ImageTrackingModel.cpp @@ -0,0 +1,362 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Tracking/ImageTrackingModel.h" + +#include "Tracking/CascadeTracker.h" +#include "Tracking/RecognitionBasedTracker.h" +#include "Tracking/FeatureSubstitutionTracker.h" +#include "Tracking/AsyncTracker.h" +#include "Tracking/MFTracker.h" + +#include "mv_private.h" +#include "mv_common.h" + +#include + +#include +#include + +namespace MediaVision { +namespace Image { +ImageTrackingModel::ImageTrackingModel() : + m_target(), + m_tracker(), + m_stabilizator(), + m_location(), + m_stabilizationParams() +{ + ; /* NULL */ +} + +ImageTrackingModel::ImageTrackingModel(const ImageTrackingModel& copy) : + m_target(copy.m_target), + m_tracker(copy.m_tracker.empty()? NULL: copy.m_tracker->clone()), + m_stabilizator(copy.m_stabilizator), + m_location(copy.m_location), + m_stabilizationParams(copy.m_stabilizationParams) +{ + ; /* NULL */ +} + +void ImageTrackingModel::setTarget(const ImageObject& target) +{ + /* TODO: Here are all the settings. + * This can be transferred to configuration file. + * + * Parameters of recognition based tracker + */ + + FeaturesExtractingParams orbFeatureExtractingParams; + + orbFeatureExtractingParams.mKeypointType = KT_ORB; + orbFeatureExtractingParams.mDescriptorType = DT_ORB; + orbFeatureExtractingParams.ORB.mMaximumFeaturesNumber = 5000; + orbFeatureExtractingParams.ORB.mScaleFactor = 1.15; + + RecognitionParams orbRecogParams; + + orbRecogParams.mMinMatchesNumber = 70; + orbRecogParams.mRequiredMatchesPart = 0.005; + orbRecogParams.mTolerantMatchesPartError = 0.1; + + /* Parameters of feature substitution tracker */ + + FeaturesExtractingParams gfttWbriefFeatureExtractingParams; + + gfttWbriefFeatureExtractingParams.mKeypointType = KT_GFTT; + gfttWbriefFeatureExtractingParams.mDescriptorType = DT_BRIEF; + + RecognitionParams gfttWbriefRecogParams; + + gfttWbriefRecogParams.mMinMatchesNumber = 30; + gfttWbriefRecogParams.mRequiredMatchesPart = 0.05; + gfttWbriefRecogParams.mTolerantMatchesPartError = 0.1; + + const float expectedOffset = 1.0f; + + /* Parameters of median flow tracker */ + + MFTracker::Params medianflowTrackingParams; + + medianflowTrackingParams.mPointsInGrid = 10; + medianflowTrackingParams.mWindowSize = cv::Size(16, 16); + medianflowTrackingParams.mPyrMaxLevel = 16; + + /* Parameters of cascade tracker */ + + const float recognitionBasedTrackerPriotity = 1.0f; + const float featureSubstitutionTrackerPriotity = 0.6f; + const float medianFlowTrackerPriotity = 0.1f; + + /* Parameters of stabilization */ + + m_stabilizationParams.mIsEnabled = true; + m_stabilizationParams.mHistoryAmount = 3; + m_stabilizationParams.mTolerantShift = 0.00006; + m_stabilizationParams.mTolerantShiftExtra = 1.3; + m_stabilizationParams.mStabilizationSpeed = 0.3; + m_stabilizationParams.mStabilizationAcceleration = 0.1; + + /* Parameters definition is finished */ + + /* Creating a basic tracker which will have other trackers */ + + cv::Ptr mainTracker = new CascadeTracker; + + /* Adding asynchronous recognition based tracker */ + + cv::Ptr recogTracker = + new RecognitionBasedTracker( + target, + orbFeatureExtractingParams, + orbRecogParams); + + cv::Ptr asyncRecogTracker = + new AsyncTracker( + recogTracker, + true); + + mainTracker->enableTracker( + asyncRecogTracker, + recognitionBasedTrackerPriotity); + + /* Adding asynchronous feature substitution based tracker */ + + cv::Ptr substitutionTracker = + new FeatureSubstitutionTracker( + gfttWbriefFeatureExtractingParams, + gfttWbriefRecogParams, + expectedOffset); + + cv::Ptr asyncSubstitutionTracker = + new AsyncTracker( + substitutionTracker, + true); + + mainTracker->enableTracker( + asyncSubstitutionTracker, + featureSubstitutionTrackerPriotity); + + /* Adding median flow tracker */ + + cv::Ptr mfTracker = new MFTracker(medianflowTrackingParams); + + mainTracker->enableTracker( + mfTracker, + medianFlowTrackerPriotity); + + m_tracker = mainTracker; + m_target = target; +} + +bool ImageTrackingModel::isValid() const +{ + return !(m_target.isEmpty()); +} + +bool ImageTrackingModel::track(const cv::Mat& frame, std::vector& result) +{ + result.clear(); + + if (m_tracker.empty()) + return false; + + if (!(m_tracker->track(frame, m_location))) { + m_stabilizator.reset(); + return false; + } + + const size_t numberOfContourPoints = m_location.size(); + std::vector stabilizedContour(numberOfContourPoints); + for (size_t i = 0; i < numberOfContourPoints; ++i) { + stabilizedContour[i].x = (float)m_location[i].x; + stabilizedContour[i].y = (float)m_location[i].y; + } + + m_stabilizator.stabilize(stabilizedContour, m_stabilizationParams); + for (size_t i = 0; i < numberOfContourPoints; ++i) { + m_location[i].x = (int)stabilizedContour[i].x; + m_location[i].y = (int)stabilizedContour[i].y; + } + + result = m_location; + + return true; +} + +void ImageTrackingModel::refresh(void) +{ + m_location.clear(); +} + +ImageTrackingModel& ImageTrackingModel::operator=(const ImageTrackingModel& copy) +{ + if (this != ©) { + m_target = copy.m_target; + if (!copy.m_tracker.empty()) + m_tracker = copy.m_tracker->clone(); + else + m_tracker.release(); + + m_stabilizator = copy.m_stabilizator; + m_location = copy.m_location; + m_stabilizationParams = copy.m_stabilizationParams; + } + + return *this; +} + +int ImageTrackingModel::save(const char *filepath) const +{ + std::string filePath; + char *cPath = app_get_data_path(); + if (NULL == cPath) + filePath = std::string(filepath); + else + filePath = std::string(cPath) + std::string(filepath); + + std::string prefixPath = filePath.substr(0, filePath.find_last_of('/')); + LOGD("prefixPath: %s", prefixPath.c_str()); + + /* check the directory is available */ + if (access(prefixPath.c_str(),F_OK)) { + LOGE("Can't save tracking model. Path[%s] doesn't existed.", filePath.c_str()); + + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + std::ofstream out; + out.open(filePath.c_str()); + + if (!out.is_open()) { + LOGE("[%s] Can't create/open file.", __FUNCTION__); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } + + out<<(*this); + + out.close(); + LOGI("[%s] Image tracking model is saved.", __FUNCTION__); + + return MEDIA_VISION_ERROR_NONE; +} + +int ImageTrackingModel::load(const char *filepath) +{ + std::string filePath; + char *cPath = app_get_data_path(); + if (NULL == cPath) + filePath = std::string(filepath); + else + filePath = std::string(cPath) + std::string(filepath); + + if (access(filePath.c_str(),F_OK)) { + LOGE("Can't load tracking model. Path[%s] doesn't existed.", filepath); + + return MEDIA_VISION_ERROR_INVALID_PATH; + } + + std::ifstream in; + in.open(filePath.c_str()); + + if (!in.is_open()) { + LOGE("[%s] Can't open file.", __FUNCTION__); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } + + in>>(*this); + + if (!in.good()) { + LOGE("[%s] Unexpected end of file.", __FUNCTION__); + return MEDIA_VISION_ERROR_PERMISSION_DENIED; + } + + in.close(); + LOGI("[%s] Image tracking model is loaded.", __FUNCTION__); + + return MEDIA_VISION_ERROR_NONE; +} + +std::ostream& operator << (std::ostream& os, const ImageTrackingModel& obj) +{ + os<> (std::istream& is, ImageTrackingModel& obj) +{ +#define MEDIA_VISION_CHECK_IFSTREAM \ + if (!is.good()) { \ + return is; \ + } + + ImageObject target; + std::vector location; + + is>>target; + MEDIA_VISION_CHECK_IFSTREAM + + StabilizationParams params; + is>>params.mIsEnabled; + is>>params.mHistoryAmount; + is>>params.mStabilizationSpeed; + is>>params.mStabilizationAcceleration; + is>>params.mTolerantShift; + is>>params.mTolerantShiftExtra; + + size_t numberOfContourPoints = 0u; + is>>numberOfContourPoints; + MEDIA_VISION_CHECK_IFSTREAM + + location.resize(numberOfContourPoints); + for (size_t pointNum = 0u; pointNum < numberOfContourPoints; ++pointNum) { + is>>location[pointNum].x; + MEDIA_VISION_CHECK_IFSTREAM + is>>location[pointNum].y; + MEDIA_VISION_CHECK_IFSTREAM + } + +#undef MEDIA_VISION_CHECK_IFSTREAM + + obj.m_stabilizationParams = params; + obj.m_location = location; + if (!(target.isEmpty())) { + obj.setTarget(target); + obj.m_tracker->reinforcement(location); + } + + return is; +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/Tracking/MFTracker.cpp b/mv_image/image/src/Tracking/MFTracker.cpp new file mode 100644 index 0000000..9dc20cc --- /dev/null +++ b/mv_image/image/src/Tracking/MFTracker.cpp @@ -0,0 +1,410 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Tracking/MFTracker.h" + +#include + +namespace MediaVision { +namespace Image { +namespace { + const float FloatEps = 10e-6f; + + template + T getMedian(std::vector& values, int size = -1) { + if (size == -1) + size = (int)values.size(); + + std::vector copy(values.begin(), values.begin() + size); + std::sort(copy.begin(),copy.end()); + if(size%2==0) + return (copy[size/2-1]+copy[size/2])/((T)2.0); + else + return copy[(size - 1) / 2]; + + } + + inline float l2distance(cv::Point2f p1, cv::Point2f p2) { + const float dx = p1.x - p2.x; + const float dy = p1.y - p2.y; + return sqrtf(dx * dx + dy * dy); + } +} /* anonymous namespace */ + +MFTracker::Params::Params() +{ + mPointsInGrid = 10; + mWindowSize = cv::Size(3, 3); + mPyrMaxLevel = 5; +} + +MFTracker::MFTracker(Params params) : + m_isInit(false), + m_params(params), + m_termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.3), + m_confidence(0.0f) +{ +} + +bool MFTracker::track(const cv::Mat& frame, std::vector& result) +{ + result.clear(); + + if (!m_isInit) { + if (m_startLocation.empty()) + return false; + + if (!init(frame)) + return false; + } else { + if (!update(frame)) { + m_isInit = false; + m_startLocation.clear(); + return false; + } + } + + const size_t numberOfContourPoints = m_startLocation.size(); + result.resize(numberOfContourPoints); + + for (size_t i = 0; i < numberOfContourPoints; ++i) { + result[i].x = (int)(m_boundingBox.x + + m_startLocation[i].x * m_boundingBox.width); + result[i].y = (int)(m_boundingBox.y + + m_startLocation[i].y * m_boundingBox.height); + } + + return true; +} + +void MFTracker::reinforcement(const std::vector& location) +{ + m_isInit = false; + + if (location.size() < 3) { + m_startLocation.clear(); + m_boundingBox.x = 0; + m_boundingBox.y = 0; + m_boundingBox.width = 0; + m_boundingBox.height = 0; + + return; + } + + const cv::Rect_& boundingBox = cv::boundingRect(location); + m_boundingBox = boundingBox; + + const size_t numberOfContourPoints = location.size(); + m_startLocation.resize(numberOfContourPoints); + for (size_t i = 0; i < numberOfContourPoints; ++i) { + m_startLocation[i].x = (location[i].x - boundingBox.x) / boundingBox.width; + m_startLocation[i].y = (location[i].y - boundingBox.y) / boundingBox.height; + } +} + +cv::Ptr MFTracker::clone() const +{ + return cv::Ptr(new MFTracker(*this)); +} + +bool MFTracker::init(const cv::Mat& image) +{ + if (image.empty()) + return false; + + image.copyTo(m_image); + buildOpticalFlowPyramid( + m_image, + m_pyramid, + m_params.mWindowSize, + m_params.mPyrMaxLevel); + + m_isInit = true; + return m_isInit; +} + +bool MFTracker::update(const cv::Mat& image) +{ + if (!m_isInit || image.empty()) + return false; + + /* Handles such behaviour when preparation frame has the size + * different to the tracking frame size. In such case, we resize preparation + *frame and bounding box. Then, track as usually: + */ + if (m_image.rows != image.rows || m_image.cols != image.cols) { + const float xFactor = (float) image.cols / m_image.cols; + const float yFactor = (float) image.rows / m_image.rows; + + resize(m_image, m_image, cv::Size(), xFactor, yFactor); + + m_boundingBox.x *= xFactor; + m_boundingBox.y *= yFactor; + m_boundingBox.width *= xFactor; + m_boundingBox.height *= yFactor; + } + + cv::Mat oldImage = m_image; + + cv::Rect_ oldBox = m_boundingBox; + if(!medianFlowImpl(oldImage, image, oldBox)) + return false; + + image.copyTo(m_image); + m_boundingBox = oldBox; + + return true; +} + +bool MFTracker::isInited() const +{ + return m_isInit; +} + +float MFTracker::getLastConfidence() const +{ + return m_confidence; +} + +cv::Rect_ MFTracker::getLastBoundingBox() const +{ + return m_boundingBox; +} + +bool MFTracker::medianFlowImpl( + cv::Mat oldImage_gray, cv::Mat newImage_gray, cv::Rect_& oldBox) +{ + std::vector pointsToTrackOld, pointsToTrackNew; + + const float gridXStep = oldBox.width / m_params.mPointsInGrid; + const float gridYStep = oldBox.height / m_params.mPointsInGrid; + for (int i = 0; i < m_params.mPointsInGrid; i++) { + for (int j = 0; j < m_params.mPointsInGrid; j++) { + pointsToTrackOld.push_back( + cv::Point2f(oldBox.x + .5f*gridXStep + 1.f*gridXStep*j, + oldBox.y + .5f*gridYStep + 1.f*gridYStep*i)); + } + } + + const size_t numberOfPointsToTrackOld = pointsToTrackOld.size(); + std::vector status(numberOfPointsToTrackOld); + std::vector errors(numberOfPointsToTrackOld); + + std::vector tempPyramid; + cv::buildOpticalFlowPyramid( + newImage_gray, + tempPyramid, + m_params.mWindowSize, + m_params.mPyrMaxLevel); + + cv::calcOpticalFlowPyrLK(m_pyramid, + tempPyramid, + pointsToTrackOld, + pointsToTrackNew, + status, + errors, + m_params.mWindowSize, + m_params.mPyrMaxLevel, + m_termcrit); + + std::vector di; + for (size_t idx = 0u; idx < numberOfPointsToTrackOld; idx++) { + if (status[idx] == 1) + di.push_back(pointsToTrackNew[idx] - pointsToTrackOld[idx]); + } + + std::vector filter_status; + check_FB(tempPyramid, + pointsToTrackOld, + pointsToTrackNew, + filter_status); + + check_NCC(oldImage_gray, + newImage_gray, + pointsToTrackOld, + pointsToTrackNew, + filter_status); + + for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++) { + if (!filter_status[idx]) { + pointsToTrackOld.erase(pointsToTrackOld.begin() + idx); + pointsToTrackNew.erase(pointsToTrackNew.begin() + idx); + filter_status.erase(filter_status.begin() + idx); + idx--; + } + } + + if (pointsToTrackOld.empty() || di.empty()) + return false; + + cv::Point2f mDisplacement; + cv::Rect_ boxCandidate = + vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement); + + std::vector displacements; + for (size_t idx = 0u; idx < di.size(); idx++) { + di[idx] -= mDisplacement; + displacements.push_back(sqrt(di[idx].ddot(di[idx]))); + } + + m_confidence = + (10.f - getMedian(displacements,(int)displacements.size())) / 10.f; + + if (m_confidence < 0.f) { + m_confidence = 0.f; + return false; + } + + m_pyramid.swap(tempPyramid); + oldBox = boxCandidate; + return true; +} + +cv::Rect_ MFTracker::vote( + const std::vector& oldPoints, + const std::vector& newPoints, + const cv::Rect_& oldRect, + cv::Point2f& mD) +{ + cv::Rect_ newRect; + cv::Point2f newCenter( + oldRect.x + oldRect.width / 2.f, + oldRect.y + oldRect.height / 2.f); + + const int n = (int)oldPoints.size(); + std::vector buf(std::max( n*(n-1) / 2, 3), 0.f); + + if(oldPoints.size() == 1) { + newRect.x = oldRect.x+newPoints[0].x-oldPoints[0].x; + newRect.y = oldRect.y+newPoints[0].y-oldPoints[0].y; + newRect.width=oldRect.width; + newRect.height=oldRect.height; + + return newRect; + } + + float xshift = 0.f; + float yshift = 0.f; + for(int i = 0; i < n; i++) + buf[i] = newPoints[i].x - oldPoints[i].x; + + xshift = getMedian(buf, n); + newCenter.x += xshift; + for(int idx = 0; idx < n; idx++) + buf[idx] = newPoints[idx].y - oldPoints[idx].y; + + yshift = getMedian(buf, n); + newCenter.y += yshift; + mD = cv::Point2f(xshift, yshift); + + if(oldPoints.size() == 1) { + newRect.x = newCenter.x - oldRect.width / 2.f; + newRect.y = newCenter.y - oldRect.height / 2.f; + newRect.width = oldRect.width; + newRect.height = oldRect.height; + + return newRect; + } + + float nd = 0.f; + float od = 0.f; + for (int i = 0, ctr = 0; i < n; i++) { + for(int j = 0; j < i; j++) { + nd = l2distance(newPoints[i], newPoints[j]); + od = l2distance(oldPoints[i], oldPoints[j]); + buf[ctr] = (od == 0.f ? 0.f : nd / od); + ctr++; + } + } + + float scale = getMedian(buf, n*(n-1) / 2); + newRect.x = newCenter.x - scale * oldRect.width / 2.f; + newRect.y = newCenter.y-scale * oldRect.height / 2.f; + newRect.width = scale * oldRect.width; + newRect.height = scale * oldRect.height; + + return newRect; +} + +void MFTracker::check_FB( + std::vector newPyramid, + const std::vector& oldPoints, + const std::vector& newPoints, + std::vector& status) +{ + const size_t numberOfOldPoints = oldPoints.size(); + + if(status.empty()) + status = std::vector(numberOfOldPoints, true); + + std::vector LKstatus(numberOfOldPoints); + std::vector errors(numberOfOldPoints); + std::vector FBerror(numberOfOldPoints); + std::vector pointsToTrackReprojection; + + calcOpticalFlowPyrLK(newPyramid, + m_pyramid, + newPoints, + pointsToTrackReprojection, + LKstatus, + errors, + m_params.mWindowSize, + m_params.mPyrMaxLevel, + m_termcrit); + + for (size_t idx = 0u; idx < numberOfOldPoints; idx++) + FBerror[idx] = l2distance(oldPoints[idx], pointsToTrackReprojection[idx]); + + float FBerrorMedian = getMedian(FBerror) + FloatEps; + for (size_t idx = 0u; idx < numberOfOldPoints; idx++) + status[idx] = (FBerror[idx] < FBerrorMedian); +} + +void MFTracker::check_NCC( + const cv::Mat& oldImage, + const cv::Mat& newImage, + const std::vector& oldPoints, + const std::vector& newPoints, + std::vector& status) +{ + std::vector NCC(oldPoints.size(), 0.f); + cv::Size patch(30, 30); + cv::Mat p1; + cv::Mat p2; + + for (size_t idx = 0u; idx < oldPoints.size(); idx++) { + getRectSubPix(oldImage, patch, oldPoints[idx], p1); + getRectSubPix(newImage, patch, newPoints[idx], p2); + + const int N = 900; + const float s1 = sum(p1)(0); + const float s2 = sum(p2)(0); + const float n1 = norm(p1); + const float n2 = norm(p2); + const float prod = p1.dot(p2); + const float sq1 = sqrt(n1 * n1 - s1 * s1 / N); + const float sq2 = sqrt(n2 * n2 - s2 * s2 / N); + NCC[idx] = (sq2==0 ? sq1 / std::abs(sq1) + : (prod - s1 * s2 / N) / sq1 / sq2); + } + + float median = getMedian(NCC) - FloatEps; + for(size_t idx = 0u; idx < oldPoints.size(); idx++) + status[idx] = status[idx] && (NCC[idx] > median); +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/Tracking/ObjectTracker.cpp b/mv_image/image/src/Tracking/ObjectTracker.cpp new file mode 100644 index 0000000..4e73730 --- /dev/null +++ b/mv_image/image/src/Tracking/ObjectTracker.cpp @@ -0,0 +1,27 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Tracking/ObjectTracker.h" + +namespace MediaVision { +namespace Image { +ObjectTracker::~ObjectTracker() +{ + ; /* NULL */ +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/Tracking/RecognitionBasedTracker.cpp b/mv_image/image/src/Tracking/RecognitionBasedTracker.cpp new file mode 100644 index 0000000..218ac16 --- /dev/null +++ b/mv_image/image/src/Tracking/RecognitionBasedTracker.cpp @@ -0,0 +1,77 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Tracking/RecognitionBasedTracker.h" + +#include "Recognition/ImageRecognizer.h" + +namespace MediaVision { +namespace Image { +RecognitionBasedTracker::RecognitionBasedTracker( + const ImageObject& target, + const FeaturesExtractingParams& sceneFeaturesExtractingParams, + const RecognitionParams& recognitionParams) : + m_target(target), + m_sceneFeatureExtractingParams(sceneFeaturesExtractingParams), + m_recogParams(recognitionParams) +{ + ; /* NULL */ +} + +RecognitionBasedTracker::~RecognitionBasedTracker() +{ + ; /* NULL */ +} + +bool RecognitionBasedTracker::track( + const cv::Mat& frame, + std::vector& result) +{ + result.clear(); + + ImageObject scene(frame, m_sceneFeatureExtractingParams); + + ImageRecognizer recognizer(scene); + + std::vector contour; + + bool isRecognized = recognizer.recognize(m_target, m_recogParams, contour); + + if (isRecognized) { + size_t numberOfContourPoints = contour.size(); + result.resize(numberOfContourPoints); + for(size_t i = 0u; i < numberOfContourPoints; ++i) { + result[i].x = (int)contour[i].x; + result[i].y = (int)contour[i].y; + } + } + + return isRecognized; +} + +void RecognitionBasedTracker::reinforcement(const std::vector& location) +{ + ; /* The tracker is based on the recognition on the entire image. + *The reinforcement does not make a sense.*/ +} + +cv::Ptr RecognitionBasedTracker::clone() const +{ + return cv::Ptr(new RecognitionBasedTracker(*this)); +} + +} /* Image */ +} /* MediaVision */ diff --git a/mv_image/image/src/mv_image_open.cpp b/mv_image/image/src/mv_image_open.cpp index 8c81168..df17707 100644 --- a/mv_image/image/src/mv_image_open.cpp +++ b/mv_image/image/src/mv_image_open.cpp @@ -19,32 +19,106 @@ #include "mv_private.h" #include "mv_common_c.h" -#include "ImageObject.h" -#include "ImageRecognizer.h" -#include "ImageTrackingModel.h" -#include "ImageTracker.h" +#include "ImageConfig.h" +#include "Features/FeatureExtractor.h" +#include "Features/ORBExtractorFactory.h" +#include "Recognition/ImageObject.h" +#include "Recognition/ImageRecognizer.h" +#include "Tracking/ImageTrackingModel.h" #include namespace { -const MediaVision::Image::FeaturesExtractingParams - defaultObjectFeaturesExtractingParams(1.2, 1000); +class DefaultConfiguration { +public: + static const DefaultConfiguration& getInstance(); -const MediaVision::Image::FeaturesExtractingParams - defaultSceneFeaturesExtractingParams(1.2, 5000); + MediaVision::Image::FeaturesExtractingParams getObjectFeaturesExtractingParams() const; -const MediaVision::Image::RecognitionParams - defaultRecognitionParams(15, 0.33, 0.1); + MediaVision::Image::FeaturesExtractingParams getSceneFeaturesExtractingParams() const; -const MediaVision::Image::StabilizationParams - defaultStabilizationParams(3, 0.006, 2, 0.001); + MediaVision::Image::RecognitionParams getRecognitionParams() const; -const MediaVision::Image::TrackingParams - defaultTrackingParams( - defaultSceneFeaturesExtractingParams, - defaultRecognitionParams, - defaultStabilizationParams, - 0.0); + MediaVision::Image::StabilizationParams getStabilizationParams() const; + + MediaVision::Image::TrackingParams getTrackingParams() const; + +private: + DefaultConfiguration(); + +private: + static DefaultConfiguration instance; + + MediaVision::Image::FeaturesExtractingParams m_objectFeaturesExtractingParams; + + MediaVision::Image::FeaturesExtractingParams m_sceneFeaturesExtractingParams; + + MediaVision::Image::RecognitionParams m_recognitionParams; + + MediaVision::Image::StabilizationParams m_stabilizationParams; + + MediaVision::Image::TrackingParams m_trackingParams; +}; + +DefaultConfiguration DefaultConfiguration::instance; + +DefaultConfiguration::DefaultConfiguration() : + m_objectFeaturesExtractingParams(), + m_sceneFeaturesExtractingParams(), + m_recognitionParams(15, 0.33, 0.1), + m_stabilizationParams(true, 3, 0.00006, 1.3, 2, 0.001), + m_trackingParams() +{ + m_objectFeaturesExtractingParams.mKeypointType = MediaVision::Image::KT_ORB; + m_objectFeaturesExtractingParams.mDescriptorType = MediaVision::Image::DT_ORB; + m_objectFeaturesExtractingParams.ORB.mScaleFactor = 1.2; + m_objectFeaturesExtractingParams.ORB.mMaximumFeaturesNumber = 1000; + + m_sceneFeaturesExtractingParams.mKeypointType = MediaVision::Image::KT_ORB; + m_sceneFeaturesExtractingParams.mDescriptorType = MediaVision::Image::DT_ORB; + m_sceneFeaturesExtractingParams.ORB.mScaleFactor = 1.2; + m_sceneFeaturesExtractingParams.ORB.mMaximumFeaturesNumber = 5000; + + m_trackingParams.mFramesFeaturesExtractingParams = m_sceneFeaturesExtractingParams; + m_trackingParams.mRecognitionParams = m_recognitionParams; + m_trackingParams.mStabilizationParams = m_stabilizationParams; + m_trackingParams.mExpectedOffset = 0.0; +} + +const DefaultConfiguration& DefaultConfiguration::getInstance() +{ + return instance; +} + +MediaVision::Image::FeaturesExtractingParams +DefaultConfiguration::getObjectFeaturesExtractingParams() const +{ + return m_objectFeaturesExtractingParams; +} + +MediaVision::Image::FeaturesExtractingParams +DefaultConfiguration::getSceneFeaturesExtractingParams() const +{ + return m_sceneFeaturesExtractingParams; +} + +MediaVision::Image::RecognitionParams +DefaultConfiguration::getRecognitionParams() const +{ + return m_recognitionParams; +} + +MediaVision::Image::StabilizationParams +DefaultConfiguration::getStabilizationParams() const +{ + return m_stabilizationParams; +} + +MediaVision::Image::TrackingParams +DefaultConfiguration::getTrackingParams() const +{ + return m_trackingParams; +} void extractTargetFeaturesExtractingParams( mv_engine_config_h engine_cfg, @@ -58,17 +132,18 @@ void extractTargetFeaturesExtractingParams( working_cfg = engine_cfg; } - featuresExtractingParams = defaultObjectFeaturesExtractingParams; + featuresExtractingParams = + DefaultConfiguration::getInstance().getObjectFeaturesExtractingParams(); mv_engine_config_get_double_attribute_c( working_cfg, - "MV_IMAGE_RECOGNITION_OBJECT_SCALE_FACTOR", - &featuresExtractingParams.mScaleFactor); + MV_IMAGE_RECOGNITION_OBJECT_SCALE_FACTOR, + &featuresExtractingParams.ORB.mScaleFactor); mv_engine_config_get_int_attribute_c( working_cfg, - "MV_IMAGE_RECOGNITION_OBJECT_MAX_KEYPOINTS_NUM", - &featuresExtractingParams.mMaximumFeaturesNumber); + MV_IMAGE_RECOGNITION_OBJECT_MAX_KEYPOINTS_NUM, + &featuresExtractingParams.ORB.mMaximumFeaturesNumber); if (NULL == engine_cfg) { mv_destroy_engine_config(working_cfg); @@ -87,17 +162,18 @@ void extractSceneFeaturesExtractingParams( working_cfg = engine_cfg; } - featuresExtractingParams = defaultSceneFeaturesExtractingParams; + featuresExtractingParams = + DefaultConfiguration::getInstance().getSceneFeaturesExtractingParams(); mv_engine_config_get_double_attribute_c( working_cfg, - "MV_IMAGE_RECOGNITION_SCENE_SCALE_FACTOR", - &featuresExtractingParams.mScaleFactor); + MV_IMAGE_RECOGNITION_SCENE_SCALE_FACTOR, + &featuresExtractingParams.ORB.mScaleFactor); mv_engine_config_get_int_attribute_c( working_cfg, - "MV_IMAGE_RECOGNITION_SCENE_MAX_KEYPOINTS_NUM", - &featuresExtractingParams.mMaximumFeaturesNumber); + MV_IMAGE_RECOGNITION_SCENE_MAX_KEYPOINTS_NUM, + &featuresExtractingParams.ORB.mMaximumFeaturesNumber); if (NULL == engine_cfg) { mv_destroy_engine_config(working_cfg); @@ -116,22 +192,23 @@ void extractRecognitionParams( working_cfg = engine_cfg; } - recognitionParams = defaultRecognitionParams; + recognitionParams = + DefaultConfiguration::getInstance().getRecognitionParams(); mv_engine_config_get_int_attribute_c( working_cfg, - "MV_IMAGE_RECOGNITION_MIN_MATCH_NUM", + MV_IMAGE_RECOGNITION_MIN_MATCH_NUM, &recognitionParams.mMinMatchesNumber); mv_engine_config_get_double_attribute_c( working_cfg, - "MV_IMAGE_RECOGNITION_REQ_MATCH_PART", + MV_IMAGE_RECOGNITION_REQ_MATCH_PART, &recognitionParams.mRequiredMatchesPart); mv_engine_config_get_double_attribute_c( working_cfg, - "MV_IMAGE_RECOGNITION_TOLERANT_MATCH_PART_ERR", - &recognitionParams.mAllowableMatchesPartError); + MV_IMAGE_RECOGNITION_TOLERANT_MATCH_PART_ERR, + &recognitionParams.mTolerantMatchesPartError); if (NULL == engine_cfg) { mv_destroy_engine_config(working_cfg); @@ -150,40 +227,32 @@ void extractStabilizationParams( working_cfg = engine_cfg; } - stabilizationParams = defaultStabilizationParams; + stabilizationParams = + DefaultConfiguration::getInstance().getStabilizationParams(); - bool useStabilization = true; mv_engine_config_get_bool_attribute_c( working_cfg, - "MV_IMAGE_TRACKING_USE_STABLIZATION", - &useStabilization); - - if (!useStabilization) { - stabilizationParams.mHistoryAmount = 0; - if (NULL == engine_cfg) { - mv_destroy_engine_config(working_cfg); - } - return; - } + MV_IMAGE_TRACKING_USE_STABLIZATION, + &stabilizationParams.mIsEnabled); mv_engine_config_get_int_attribute_c( working_cfg, - "MV_IMAGE_TRACKING_HISTORY_AMOUNT", + MV_IMAGE_TRACKING_HISTORY_AMOUNT, &stabilizationParams.mHistoryAmount); mv_engine_config_get_double_attribute_c( working_cfg, - "MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT", - &stabilizationParams.mAllowableShift); + MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT, + &stabilizationParams.mTolerantShift); mv_engine_config_get_double_attribute_c( working_cfg, - "MV_IMAGE_TRACKING_STABLIZATION_SPEED", + MV_IMAGE_TRACKING_STABLIZATION_SPEED, &stabilizationParams.mStabilizationSpeed); mv_engine_config_get_double_attribute_c( working_cfg, - "MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION", + MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION, &stabilizationParams.mStabilizationAcceleration); if (NULL == engine_cfg) { @@ -203,7 +272,8 @@ void extractTrackingParams( working_cfg = engine_cfg; } - trackingParams = defaultTrackingParams; + trackingParams = + DefaultConfiguration::getInstance().getTrackingParams(); extractSceneFeaturesExtractingParams( working_cfg, @@ -219,7 +289,7 @@ void extractTrackingParams( mv_engine_config_get_double_attribute_c( working_cfg, - "MV_IMAGE_TRACKING_EXPECTED_OFFSET", + MV_IMAGE_TRACKING_EXPECTED_OFFSET, &trackingParams.mExpectedOffset); if (NULL == engine_cfg) { @@ -344,11 +414,12 @@ int mv_image_recognize_open( MediaVision::Image::FeaturesExtractingParams featuresExtractingParams; extractSceneFeaturesExtractingParams(engine_cfg, featuresExtractingParams); + MediaVision::Image::ImageObject sceneImageObject(scene, featuresExtractingParams); + MediaVision::Image::RecognitionParams recognitionParams; extractRecognitionParams(engine_cfg, recognitionParams); - MediaVision::Image::ImageRecognizer recognizer(scene, - featuresExtractingParams); + MediaVision::Image::ImageRecognizer recognizer(sceneImageObject); mv_quadrangle_s *resultLocations[number_of_objects]; @@ -415,16 +486,13 @@ int mv_image_track_open( convertSourceMV2GrayCV(source, frame), "Failed to convert mv_source."); - MediaVision::Image::ImageTracker tracker(trackingParams); - MediaVision::Image::ImageTrackingModel *trackingModel = (MediaVision::Image::ImageTrackingModel*)image_tracking_model; - tracker.track(frame, *trackingModel); - - std::vector resultContour = trackingModel->getLastlocation(); + std::vector resultContour; + const bool isTracked = trackingModel->track(frame, resultContour); - if (trackingModel->isDetected() && + if (isTracked && MediaVision::Image::NumberOfQuadrangleCorners == resultContour.size()) { mv_quadrangle_s result; for (size_t pointNum = 0u; @@ -478,22 +546,30 @@ int mv_image_object_fill_open( convertSourceMV2GrayCV(source, image), "Failed to convert mv_source."); + std::vector roi; + if (NULL != location) { + roi.resize(4); + + roi[0].x = location->point.x; + roi[0].y = location->point.y; + + roi[1].x = roi[0].x + location->width; + roi[1].y = roi[0].y; + + roi[2].x = roi[1].x; + roi[2].y = roi[1].y + location->height; + + roi[3].x = roi[0].x; + roi[3].y = roi[2].y; + } + MediaVision::Image::FeaturesExtractingParams featuresExtractingParams; extractTargetFeaturesExtractingParams(engine_cfg, featuresExtractingParams); - if (NULL == location) { - ((MediaVision::Image::ImageObject*)image_object)->fill(image, - featuresExtractingParams); - } else { - if (!((MediaVision::Image::ImageObject*)image_object)->fill(image, - cv::Rect(location->point.x, location->point.y, - location->width, location->height), - featuresExtractingParams)) { - /* Wrong ROI (bounding box) */ - LOGE("[%s] Wrong ROI.", __FUNCTION__); - return MEDIA_VISION_ERROR_INVALID_DATA; - } - } + static_cast(image_object)->fill( + image, + featuresExtractingParams, + roi); return MEDIA_VISION_ERROR_NONE; } @@ -668,7 +744,7 @@ int mv_image_tracking_model_clone_open( return MEDIA_VISION_ERROR_OUT_OF_MEMORY; } - *(MediaVision::Image::ImageObject*)(*dst) = *(MediaVision::Image::ImageObject*)src; + *(MediaVision::Image::ImageTrackingModel*)(*dst) = *(MediaVision::Image::ImageTrackingModel*)src; LOGD("Image tracking model has been successfully cloned"); return MEDIA_VISION_ERROR_NONE; diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index 9d68686..db59b53 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.2.4 +Version: 0.2.5 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 -- 2.7.4 From 18434240eefdbcf2727d3f07a399a36dbd8caa64 Mon Sep 17 00:00:00 2001 From: SeokHoon Lee Date: Tue, 1 Dec 2015 16:40:12 +0900 Subject: [PATCH 06/16] MediaVision Surveillance feature : initial version Signed-off-by: SeokHoon Lee Change-Id: I4f6596a891c7bda729b70bd026026c224974fdc3 --- AUTHORS | 2 +- CMakeLists.txt | 18 +- doc/mediavision_doc.h | 126 ++- include/mv_surveillance.h | 1154 ++++++++++++++++++++ include/mv_surveillance_private.h | 48 + media-vision-config.json | 15 + mv_surveillance/CMakeLists.txt | 8 + mv_surveillance/surveillance/CMakeLists.txt | 26 + mv_surveillance/surveillance/include/EventDefs.h | 71 ++ .../surveillance/include/EventManager.h | 190 ++++ mv_surveillance/surveillance/include/EventResult.h | 59 + .../surveillance/include/EventTrigger.h | 227 ++++ .../include/EventTriggerMovementDetection.h | 159 +++ .../include/EventTriggerPersonAppearance.h | 211 ++++ .../include/EventTriggerPersonRecognition.h | 192 ++++ mv_surveillance/surveillance/include/HoGDetector.h | 194 ++++ .../surveillance/include/SurveillanceHelper.h | 70 ++ mv_surveillance/surveillance/include/mv_absdiff.h | 56 + .../surveillance/include/mv_apply_mask.h | 56 + .../surveillance/include/mv_mask_buffer.h | 55 + .../surveillance/include/mv_surveillance_open.h | 194 ++++ mv_surveillance/surveillance/src/EventManager.cpp | 410 +++++++ mv_surveillance/surveillance/src/EventTrigger.cpp | 197 ++++ .../src/EventTriggerMovementDetection.cpp | 290 +++++ .../src/EventTriggerPersonAppearance.cpp | 460 ++++++++ .../src/EventTriggerPersonRecognition.cpp | 397 +++++++ mv_surveillance/surveillance/src/HoGDetector.cpp | 1006 +++++++++++++++++ .../surveillance/src/SurveillanceHelper.cpp | 200 ++++ mv_surveillance/surveillance/src/mv_absdiff.c | 81 ++ mv_surveillance/surveillance/src/mv_apply_mask.c | 77 ++ mv_surveillance/surveillance/src/mv_mask_buffer.c | 89 ++ .../surveillance/src/mv_surveillance_open.cpp | 140 +++ mv_surveillance/surveillance_lic/CMakeLists.txt | 25 + .../surveillance_lic/include/mv_surveillance_lic.h | 187 ++++ .../surveillance_lic/src/mv_surveillance_lic.c | 63 ++ packaging/capi-media-vision.spec | 13 +- src/mv_surveillance.c | 363 ++++++ test/testsuites/CMakeLists.txt | 1 + test/testsuites/surveillance/CMakeLists.txt | 31 + .../surveillance/surveillance_test_suite.c | 1137 +++++++++++++++++++ 40 files changed, 8292 insertions(+), 6 deletions(-) create mode 100644 include/mv_surveillance.h create mode 100644 include/mv_surveillance_private.h create mode 100644 mv_surveillance/CMakeLists.txt create mode 100644 mv_surveillance/surveillance/CMakeLists.txt create mode 100644 mv_surveillance/surveillance/include/EventDefs.h create mode 100644 mv_surveillance/surveillance/include/EventManager.h create mode 100644 mv_surveillance/surveillance/include/EventResult.h create mode 100644 mv_surveillance/surveillance/include/EventTrigger.h create mode 100644 mv_surveillance/surveillance/include/EventTriggerMovementDetection.h create mode 100644 mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h create mode 100644 mv_surveillance/surveillance/include/EventTriggerPersonRecognition.h create mode 100644 mv_surveillance/surveillance/include/HoGDetector.h create mode 100644 mv_surveillance/surveillance/include/SurveillanceHelper.h create mode 100644 mv_surveillance/surveillance/include/mv_absdiff.h create mode 100644 mv_surveillance/surveillance/include/mv_apply_mask.h create mode 100644 mv_surveillance/surveillance/include/mv_mask_buffer.h create mode 100644 mv_surveillance/surveillance/include/mv_surveillance_open.h create mode 100644 mv_surveillance/surveillance/src/EventManager.cpp create mode 100644 mv_surveillance/surveillance/src/EventTrigger.cpp create mode 100644 mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp create mode 100644 mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp create mode 100644 mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp create mode 100644 mv_surveillance/surveillance/src/HoGDetector.cpp create mode 100644 mv_surveillance/surveillance/src/SurveillanceHelper.cpp create mode 100644 mv_surveillance/surveillance/src/mv_absdiff.c create mode 100644 mv_surveillance/surveillance/src/mv_apply_mask.c create mode 100644 mv_surveillance/surveillance/src/mv_mask_buffer.c create mode 100644 mv_surveillance/surveillance/src/mv_surveillance_open.cpp create mode 100644 mv_surveillance/surveillance_lic/CMakeLists.txt create mode 100644 mv_surveillance/surveillance_lic/include/mv_surveillance_lic.h create mode 100644 mv_surveillance/surveillance_lic/src/mv_surveillance_lic.c create mode 100644 src/mv_surveillance.c create mode 100644 test/testsuites/surveillance/CMakeLists.txt create mode 100644 test/testsuites/surveillance/surveillance_test_suite.c diff --git a/AUTHORS b/AUTHORS index 14ccbda..c096057 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,4 +1,3 @@ -ByungWook Jang Tae-Young Chung Oleg Kopysov Ievgen Vagin @@ -6,3 +5,4 @@ Anton Artyukh Yaroslav Zatsikha Sergii Rudenko SeokHoon Lee +Heechul Jeon diff --git a/CMakeLists.txt b/CMakeLists.txt index 8452d0a..2ec1ac7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -19,6 +19,8 @@ option(MEDIA_VISION_IMAGE_LICENSE_PORT "Turn on building of licensed port of the image module (if OFF - open port will be built)." OFF) option(MEDIA_VISION_FACE_LICENSE_PORT "Turn on building of licensed port of the face module (if OFF - open port will be built)." OFF) +option(MEDIA_VISION_SURVEILLANCE_LICENSE_PORT + "Turn on building of licensed port of the surveillance module (if OFF - open port will be built)." OFF) set(MV_COMMON_LIB_NAME "mv_common") set(MV_BARCODE_DETECTOR_LIB_NAME "mv_barcode_detector" CACHE STRING @@ -29,6 +31,8 @@ set(MV_IMAGE_LIB_NAME "mv_image" CACHE STRING "Name of the library will be built for image module (without extension).") set(MV_FACE_LIB_NAME "mv_face" CACHE STRING "Name of the library will be built for barcode generating module (without extension).") +set(MV_SURVEILLANCE_LIB_NAME "mv_surveillance" CACHE STRING + "Name of the library will be built for surveillance module (without extension).") SET(INC_DIR "${PROJECT_SOURCE_DIR}/include") @@ -64,12 +68,20 @@ else() SET(INC_FACE "${PROJECT_SOURCE_DIR}/mv_face/face/include") endif() +if(MEDIA_VISION_SURVEILLANCE_LICENSE_PORT) + add_definitions(-DMEDIA_VISION_SURVEILLANCE_LICENSE_PORT) + SET(INC_SURVEILLANCE "${PROJECT_SOURCE_DIR}/mv_surveillance/surveillance_lic/include") +else() + SET(INC_SURVEILLANCE "${PROJECT_SOURCE_DIR}/mv_surveillance/surveillance/include") +endif() + INCLUDE_DIRECTORIES(${INC_DIR} ${INC_COMMON} ${INC_BARCODE_DETECTOR} ${INC_BARCODE_GENERATOR} ${INC_FACE} - ${INC_IMAGE}) + ${INC_IMAGE} + ${INC_SURVEILLANCE}) SET(dependents "dlog capi-media-tool capi-system-info capi-appfw-application") SET(pc_dependents "dlog") @@ -100,6 +112,7 @@ ADD_SUBDIRECTORY(mv_common) ADD_SUBDIRECTORY(mv_barcode) ADD_SUBDIRECTORY(mv_image) ADD_SUBDIRECTORY(mv_face) +ADD_SUBDIRECTORY(mv_surveillance) aux_source_directory(src SOURCES) ADD_LIBRARY(${fw_name} SHARED ${SOURCES}) @@ -109,7 +122,8 @@ TARGET_LINK_LIBRARIES(${fw_name} ${MV_COMMON_LIB_NAME} ${MV_BARCODE_GENERATOR_LIB_NAME} ${MV_IMAGE_LIB_NAME} ${MV_FACE_LIB_NAME} - ${${fw_name}_LDFLAGS}) + ${${fw_name}_LDFLAGS} + ${MV_SURVEILLANCE_LIB_NAME}) SET_TARGET_PROPERTIES(${fw_name} PROPERTIES diff --git a/doc/mediavision_doc.h b/doc/mediavision_doc.h index b7ab18d..8402b85 100644 --- a/doc/mediavision_doc.h +++ b/doc/mediavision_doc.h @@ -24,13 +24,16 @@ * * Face detection, recognition, and tracking;\n * * Barcode detection and generation;\n * * Flat Image detection, recognition and tracking;\n - * * Flat Image features extraction. + * * Flat Image features extraction;\n + * * Surveillance: movement detection, person appearance/disappearance, + * person recognition. * * @defgroup CAPI_MEDIA_VISION_COMMON_MODULE Media Vision Common * @ingroup CAPI_MEDIA_VISION_MODULE * @brief Common functions and enumerations used in * @ref CAPI_MEDIA_VISION_FACE_MODULE, - * @ref CAPI_MEDIA_VISION_IMAGE_MODULE and + * @ref CAPI_MEDIA_VISION_IMAGE_MODULE, + * @ref CAPI_MEDIA_VISION_SURVEILLANCE_MODULE and * @ref CAPI_MEDIA_VISION_BARCODE_MODULE submodules. * @section CAPI_MEDIA_VISION_COMMON_MODULE_HEADER Required Header * \#include @@ -247,6 +250,125 @@ * For QR codes it is possible to specify error correction code and encoding * mode (see @ref mv_barcode_qr_mode_e). Generation to file supports several * formats (see @ref mv_barcode_image_format_e). + * + * @defgroup CAPI_MEDIA_VISION_SURVEILLANCE_MODULE Media Vision Surveillance + * @ingroup CAPI_MEDIA_VISION_MODULE + * @brief Video surveillance module. + * @section CAPI_MEDIA_VISION_SURVEILLANCE_MODULE_HEADER Required Header + * \#include + * + * @section CAPI_MEDIA_VISION_SURVEILLANCE_MODULE_FEATURE Related Features + * This API is related with the following features:\n + * - http://tizen.org/feature/vision.image_recognition\n + * - http://tizen.org/feature/vision.face_recognition\n + * + * It is recommended to design feature related codes in your application for + * reliability.\n + * You can check if a device supports the related features for this API by using + * @ref CAPI_SYSTEM_SYSTEM_INFO_MODULE, thereby controlling the procedure of + * your application.\n + * To ensure your application is only running on the device with specific + * features, please define the features in your manifest file using the manifest + * editor in the SDK.\n + * More details on featuring your application can be found from + * + * Feature Element. + * + * + * @section CAPI_MEDIA_VISION_SURVEILLANCE_MODULE_OVERVIEW Overview + * @ref CAPI_MEDIA_VISION_SURVEILLANCE_MODULE provides functionality can be + * utilized for creation of video surveillance systems. The main idea underlying + * surveillance is event subscription model. By default, supported event types + * are described in @ref CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES section. + * @ref mv_surveillance_subscribe_event_trigger() function has to be used to + * create subscription to the particular event trigger. Triggers are handled by + * @ref mv_surveillance_event_trigger_h type. Such type handlers can be created + * with @ref mv_surveillance_event_trigger_create() function and destroyed with + * @ref mv_surveillance_event_trigger_destroy() function. Once event trigger + * subscription is created, corresponding @ref mv_surveillance_event_occurred_cb + * callback will be invoked each time when event is detected, i.e. trigger is + * activated. @ref mv_surveillance_result_h event detection result handler will + * be passed to the callback together with identifier of the video stream where + * event was detected and @ref mv_source_h handler containing frame in which + * detection was performed. It is possible to retrieve specific to event type + * result values using @ref mv_surveillance_result_h handler. In the + * @ref mv_surveillance_get_result_value() function documentation can be found + * detailed description of result values retrieving. Following table contains + * general events and corresponding event detection results can be obtained by + * this approach: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
EventEvent result values
@ref MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED + * @ref MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS;
+ * @ref MV_SURVEILLANCE_MOVEMENT_REGIONS + *
@ref MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED + * @ref MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER;
+ * @ref MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER;
+ * @ref MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER;
+ * @ref MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS;
+ * @ref MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS;
+ * @ref MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS + *
@ref MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED + * @ref MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER;
+ * @ref MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS;
+ * @ref MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS;
+ * @ref MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES + *
+ * Before subscription of the event trigger with + * @ref mv_surveillance_subscribe_event_trigger() call it is possible to create + * @ref mv_engine_config_h handle and configurate following attributes: + * - @ref MV_SURVEILLANCE_SKIP_FRAMES_COUNT to setup number of frames will be + ignored by event trigger; + * - @ref MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD to specify sensitivity of + * the @ref MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED event detection; + * - @ref MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH to specify the file + * where face recognition model to be used for recognition is stored. + * + * Created engine config has to be used as a parameter of + * @ref mv_surveillance_subscribe_event_trigger() to apply the configuration. If + * NULL will be passed instead of valid @ref mv_engine_config_h handle, then + * default attribute values will be used for subsriptions. + * To make surveillance system work with video sequences + * @ref mv_surveillance_push_source() function has to + * be used for each frame in the sequence in the correct order. Multiple video + * sources can be supported by the system. To distinguish different video + * sources unique stream identifier has to be assigned to each subscription. + * Then, particular identifier can be passed as a parameter to the + * @ref mv_surveillance_push_source() function. After pushing the source to the + * surveillance system, it will notify all triggers which were subscribed to + * process frames coming from video stream which source has been pushed. + * If trigger(s) is(are) activated on the source, then corresponding callback(s) + * of @ref mv_surveillance_event_occurred_cb type will be called. + * Additionally, region where event detection will be performed by the triggers + * can be set with @ref mv_surveillance_set_event_trigger_roi() function and + * gotten with @ref mv_surveillance_get_event_trigger_roi(). ROI is specified + * independently for the each event trigger, so it is possible to detect events + * of different types in the different parts of the incoming frames. + * Event trigger subscription can be stopped any time using + * @ref mv_surveillance_unsubscribe_event_trigger() function. Additionally, + * @ref mv_surveillance_foreach_supported_event_type() and + * @ref mv_surveillance_foreach_event_result_name() functions can be found + * useful if it is required to obtain supported event types list or result + * value names list dynamically. + * + * @defgroup CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES Media Vision Surveillance Event Types + * @ingroup CAPI_MEDIA_VISION_SURVEILLANCE_MODULE + * @brief Event types supported by the Surveillance module. */ #endif /* __TIZEN_MEDIAVISION_DOC_H__ */ diff --git a/include/mv_surveillance.h b/include/mv_surveillance.h new file mode 100644 index 0000000..f17c77b --- /dev/null +++ b/include/mv_surveillance.h @@ -0,0 +1,1154 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __TIZEN_MEDIAVISION_SURVEILLANCE_H__ +#define __TIZEN_MEDIAVISION_SURVEILLANCE_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @file mv_surveillance.h + * @brief This file contains the Media Vision Surveillance API. + */ + +/** + * @addtogroup CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES + * @{ + */ + +/** + * @brief Name of the movement detection event type. + * @details This is common event for a movement detection. When this event + * occurs @ref mv_surveillance_result_h allowed from callback can be + * used to get number of regions where movement has been detected and + * their positions. Out parameters (can be accessed in the + * @ref mv_surveillance_event_occurred_cb callback using + * @ref mv_surveillance_get_result_value() function): + * * @ref MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS - the number + * of regions where movement has been detected;\n + * * @ref MV_SURVEILLANCE_MOVEMENT_REGIONS - the set + * of rectangular regions where movement has been detected. + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED \ + "MV_SURVEILLANCE_EVENT_MOVEMENT_DETECTED" + +/** + * @brief Name of the event result value that contains number of regions where + * movement was detected. + * @details This event result value can be accessed after event triggers of + * @ref MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED type activation. + * Result value is of @c size_t type, so has to be casted as in the + * following example: + * @code{.c} + * void event_occurred_cb(mv_surveillance_event_trigger_h trigger, + * mv_source_h source, + * int video_stream_id, + * mv_surveillance_result_h event_result, + * void *user_data) + * { + * const char *event_type = NULL; + * int err = mv_surveillance_get_event_trigger_type(trigger, + * event_type); + * if (MEDIA_VISION_ERROR_NONE != err) return; + * + * if (0 == strncmp(event_type, + * MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, + * 255)) + * { + * size_t move_regions_num = 0; + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS, + * &move_regions_num); + * if (MEDIA_VISION_ERROR_NONE != err) + * return; + * + * // Do something with number of regions where movement + * // was detected... + * } + * } + * @endcode + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS \ + "NUMBER_OF_MOVEMENT_REGIONS" + +/** + * @brief Name of the event result value that contains rectangular regions where + * movement was detected. + * @details This event result value can be accessed after event triggers of + * @ref MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED type activation. + * Result value is of @a mv_rectangle_s array type, so has to be casted + * as in the following example: + * @code{.c} + * void event_occurred_cb(mv_surveillance_event_trigger_h trigger, + * mv_source_h source, + * int video_stream_id, + * mv_surveillance_result_h event_result, + * void *user_data) + * { + * const char *event_type = NULL; + * int err = mv_surveillance_get_event_trigger_type(trigger, + * event_type); + * if (MEDIA_VISION_ERROR_NONE != err) return; + * + * if (0 == strncmp(event_type, + * MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, + * 255)) + * { + * size_t move_regions_num = 0; + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS, + * &move_regions_num); + * + * if (MEDIA_VISION_ERROR_NONE != err || 0 == move_regions_num) + * return; + * + * mv_rectangle_s *regions = + * (mv_rectangle_s*) + * malloc(sizeof(mv_rectangle_s) * move_regions_num); + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_MOVEMENT_REGIONS, + * regions); + * + * // Do something with movement regions... + * + * free (regions); + * } + * } + * @endcode + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_MOVEMENT_REGIONS "MOVEMENT_REGIONS" + +/** + * @brief Name of the person appearance/disappearance event type. + * @details This is common event for a person appearing (disappearing). + * The result will be: + * * a number of persons, which were appeared and their positions;\n + * * a number of persons, which were tracked and their positions;\n + * * a number of persons, which were disappeared and their last + * positions.\n + * For the first time when a source is loaded, a result should contain + * all detected persons (detection will be made using face detection + * API). Next time when the source should be loaded, the previously + * detected persons will be tracked and new persons will be detected. + * For the previously detected persons their locations will be updated. + * Out parameters (can be accessed in the + * @ref mv_surveillance_event_occurred_cb callback using + * @ref mv_surveillance_get_result_value() function): + * * @ref MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER - the number + * of persons which were appeared;\n + * * @ref MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS - the locations + * of persons which were appeared;\n + * * @ref MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER - the number + * of persons which were tracked;\n + * * @ref MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS - the locations + * of persons which were tracked;\n + * * @ref MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER - the number + * of persons which were disappeared;\n + * * @ref MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS - the locations + * of persons which were disappeared. + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED \ + "MV_SURVEILLANCE_EVENT_PERSON_APPEARED_DISAPEARED" + +/** + * @brief Name of the event result value that contains number + * of persons that have been appeared. + * @details This event result value can be accessed after event triggers of + * @ref MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED type + * activation. Result value is of @c size_t type, so has to be casted + * as in the following example: + * @code{.c} + * void event_occurred_cb(mv_surveillance_event_trigger_h trigger, + * mv_source_h source, + * int video_stream_id, + * mv_surveillance_result_h event_result, + * void *user_data) + * { + * const char *event_type = NULL; + * int err = mv_surveillance_get_event_trigger_type(trigger, + * event_type); + * if (MEDIA_VISION_ERROR_NONE != err) return; + * + * if (0 == strncmp(event_type, + * MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED, + * 255)) + * { + * size_t appear_person_num = 0; + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER, + * &appear_person_num); + * if (MEDIA_VISION_ERROR_NONE != err) + * return; + * + * // Do something with number of appeared persons... + * } + * } + * @endcode + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER \ + "NUMBER_OF_APPEARED_PERSONS" + +/** + * @brief Name of the event result value that contains number + * of persons that have been disappeared. + * @details This event result value can be accessed after event triggers of + * @ref MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED type + * activation. Result value is of @c size_t type, so has to be casted + * as in the following example: + * @code{.c} + * void event_occurred_cb(mv_surveillance_event_trigger_h trigger, + * mv_source_h source, + * int video_stream_id, + * mv_surveillance_result_h event_result, + * void *user_data) + * { + * const char *event_type = NULL; + * int err = mv_surveillance_get_event_trigger_type(trigger, + * event_type); + * if (MEDIA_VISION_ERROR_NONE != err) return; + * + * if (0 == strncmp(event_type, + * MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED, + * 255)) + * { + * size_t disappear_person_num = 0; + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER, + * &disappear_person_num); + * if (MEDIA_VISION_ERROR_NONE != err) + * return; + * + * // Do something with number of disappeared persons... + * } + * } + * @endcode + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER \ + "NUMBER_OF_DISAPPEARED_PERSONS" + +/** + * @brief Name of the event result value that contains number + * of persons that have been tracked. + * @details This event result value can be accessed after event triggers of + * @ref MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED type + * activation. Result value is of @c size_t type, so has to be casted + * as in the following example: + * @code{.c} + * void event_occurred_cb(mv_surveillance_event_trigger_h trigger, + * mv_source_h source, + * int video_stream_id, + * mv_surveillance_result_h event_result, + * void *user_data) + * { + * const char *event_type = NULL; + * int err = mv_surveillance_get_event_trigger_type(trigger, + * event_type); + * if (MEDIA_VISION_ERROR_NONE != err) return; + * + * if (0 == strncmp(event_type, + * MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED, + * 255)) + * { + * size_t tracked_person_num = 0; + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER, + * &tracked_person_num); + * if (MEDIA_VISION_ERROR_NONE != err) + * return; + * + * // Do something with number of tracked persons... + * } + * } + * @endcode + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER \ + "NUMBER_OF_TRACKED_PERSONS" + +/** + * @brief Name of the event result value that contains a set of rectangular + * locations where appearances of the persons were detected. + * @details This event result value can be accessed after event triggers of + * @ref MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED type + * activation. Result value is of @a mv_rectangle_s array type, so has + * to be casted as in the following example: + * @code{.c} + * void event_occurred_cb(mv_surveillance_event_trigger_h trigger, + * mv_source_h source, + * int video_stream_id, + * mv_surveillance_result_h event_result, + * void *user_data) + * { + * const char *event_type = NULL; + * int err = mv_surveillance_get_event_trigger_type(trigger, + * event_type); + * if (MEDIA_VISION_ERROR_NONE != err) return; + * + * if (0 == strncmp(event_type, + * MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED, + * 255)) + * { + * size_t appear_person_num = 0; + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER, + * &appear_person_num); + * + * if (MEDIA_VISION_ERROR_NONE != err || + * 0 == appear_person_num) return; + * + * mv_rectangle_s *appear_locations = + * (mv_rectangle_s*) + * malloc(sizeof(mv_rectangle_s) * appear_person_num); + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS, + * appear_locations); + * + * // Do something with locations where persons were + * // appeared... + * + * free (appear_locations); + * } + * } + * @endcode + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS \ + "APPEARED_PERSONS_LOCATIONS" + +/** + * @brief Name of the event result value that contains a set of rectangular + * locations where disappearances of the persons were detected. + * @details This event result value can be accessed after event triggers of + * @ref MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED type + * activation. Result value is of @a mv_rectangle_s array type, so has + * to be casted as in the following example: + * @code{.c} + * void event_occurred_cb(mv_surveillance_event_trigger_h trigger, + * mv_source_h source, + * int video_stream_id, + * mv_surveillance_result_h event_result, + * void *user_data) + * { + * const char *event_type = NULL; + * int err = mv_surveillance_get_event_trigger_type(trigger, + * event_type); + * if (MEDIA_VISION_ERROR_NONE != err) return; + * + * if (0 == strncmp(event_type, + * MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED, + * 255)) + * { + * size_t disappear_person_num = 0; + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER, + * &disappear_person_num); + * + * if (MEDIA_VISION_ERROR_NONE != err || + * 0 == disappear_person_num) return; + * + * mv_rectangle_s *disappear_locations = + * (mv_rectangle_s*) + * malloc(sizeof(mv_rectangle_s) * disappear_person_num); + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS, + * disappear_locations); + * + * // Do something with locations where persons were + * // disappeared... + * + * free (disappear_locations); + * } + * } + * @endcode + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS \ + "DISAPPEARED_PERSONS_LOCATIONS" + +/** + * @brief Name of the event result value that contains a set of rectangular + * locations where persons were tracked. + * @details This event result value can be accessed after event triggers of + * @ref MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED type + * activation. Result value is of @a mv_rectangle_s array type, so has + * to be casted as in the following example: + * @code{.c} + * void event_occurred_cb(mv_surveillance_event_trigger_h trigger, + * mv_source_h source, + * int video_stream_id, + * mv_surveillance_result_h event_result, + * void *user_data) + * { + * const char *event_type = NULL; + * int err = mv_surveillance_get_event_trigger_type(trigger, + * event_type); + * if (MEDIA_VISION_ERROR_NONE != err) return; + * + * if (0 == strncmp(event_type, + * MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED, + * 255)) + * { + * size_t tracked_person_num = 0; + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER, + * &tracked_person_num); + * + * if (MEDIA_VISION_ERROR_NONE != err || 0 == tracked_person_num) + * return; + * + * mv_rectangle_s *track_locations = + * (mv_rectangle_s*) + * malloc(sizeof(mv_rectangle_s) * tracked_person_num); + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS, + * track_locations); + * + * // Do something with locations where persons were tracked... + * + * free (track_locations); + * } + * } + * @endcode + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS \ + "TRACKED_PERSONS_LOCATIONS" + +/** + * @brief Name of the person recognition event type. + * + * @details This is common event for a person recognizing. The result will be + * a number of persons, which were recognized, their positions (face + * locations), labels and confidences of the recognition models (see + * documentation for @ref mv_face_recognize() in + * @ref CAPI_MEDIA_VISION_FACE_MODULE). When one subscribes to this + * event, the engine configuration must be filled by path to the saved + * face recognition model. These path should be set using + * @ref mv_engine_config_set_string_attribute() as attribute named + * @ref MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH. + * See documentation for face recognition in + * @ref CAPI_MEDIA_VISION_FACE_MODULE and for engine configuration in + * @ref CAPI_MEDIA_VISION_COMMON_MODULE for details. Out parameters + * (can be accessed in the @ref mv_surveillance_event_occurred_cb + * callback using @ref mv_surveillance_get_result_value() function): + * * @ref MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER - the number + * of persons which were recognized;\n + * * @ref MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS - the locations + * of persons which were recognized;\n + * * @ref MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS - the labels of + * persons which were recognized;\n + * * @ref MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES - + * the confidences values that persons were recognized correctly. + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED \ + "MV_SURVEILLANCE_EVENT_PERSON_RECOGNIZED" + +/** + * @brief Name of the event result value that contains number of locations where + * faces were recognized. + * @details This event result value can be accessed after event triggers of + * @ref MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED type activation. + * Result value is of @c size_t type, so has to be casted as in + * the following example: + * @code{.c} + * void event_occurred_cb(mv_surveillance_event_trigger_h trigger, + * mv_source_h source, + * int video_stream_id, + * mv_surveillance_result_h event_result, + * void *user_data) + * { + * const char *event_type = NULL; + * int err = mv_surveillance_get_event_trigger_type(trigger, + * event_type); + * if (MEDIA_VISION_ERROR_NONE != err) return; + * + * if (0 == strncmp(event_type, + * MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED, + * 255)) + * { + * size_t rec_person_num = 0; + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER, + * &rec_person_num); + * if (MEDIA_VISION_ERROR_NONE != err) + * return; + * + * // Do something with number of recognized persons... + * } + * } + * @endcode + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER \ + "NUMBER_OF_PERSONS" + +/** + * @brief Name of the event result value that contains a set of rectangular + * locations where person faces were recognized. + * @details This event result value can be accessed after event triggers of + * @ref MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED type activation. + * Result value is of @a mv_rectangle_s array type, so has to be casted + * as in the following example: + * @code{.c} + * void event_occurred_cb(mv_surveillance_event_trigger_h trigger, + * mv_source_h source, + * int video_stream_id, + * mv_surveillance_result_h event_result, + * void *user_data) + * { + * const char *event_type = NULL; + * int err = mv_surveillance_get_event_trigger_type(trigger, + * event_type); + * if (MEDIA_VISION_ERROR_NONE != err) return; + * + * if (0 == strncmp(event_type, + * MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED, + * 255)) + * { + * size_t rec_person_num = 0; + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER, + * &rec_person_num); + * if (MEDIA_VISION_ERROR_NONE != err || 0 == rec_person_num) + * return; + * + * mv_rectangle_s *locations = + * (mv_rectangle_s*) + * malloc(sizeof(mv_rectangle_s) * rec_person_num); + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS, + * locations); + * + * // Do something with locations... + * + * free (locations); + * } + * } + * @endcode + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS \ + "PERSONS_LOCATIONS" + +/** + * @brief Name of the event result value that contains a set of labels that + * correspond to the recognized persons. + * @details This event result value can be accessed after event triggers of + * @ref MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED type activation. + * Result value is of integers array type, so has to be casted + * as in the following example: + * @code{.c} + * void event_occurred_cb(mv_surveillance_event_trigger_h trigger, + * mv_source_h source, + * int video_stream_id, + * mv_surveillance_result_h event_result, + * void *user_data) + * { + * const char *event_type = NULL; + * int err = mv_surveillance_get_event_trigger_type(trigger, + * event_type); + * if (MEDIA_VISION_ERROR_NONE != err) return; + * + * if (0 == strncmp(event_type, + * MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED, + * 255)) + * { + * size_t rec_person_num = 0; + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER, + * &rec_person_num); + * if (MEDIA_VISION_ERROR_NONE != err || 0 == rec_person_num) + * return; + * + * int *labels = + * (int*)malloc(sizeof(int) * rec_person_num); + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS, + * labels); + * + * // Do something with labels... + * + * free (labels); + * } + * } + * @endcode + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS \ + "PERSONS_LABELS" + +/** + * @brief Name of the event result value that contains a set of confidence + * values that correspond to the recognized persons. + * @details This event result value can be accessed after event triggers of + * @ref MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED type activation. + * Result value is of doubles array type, so has to be casted + * as in the following example: + * @code{.c} + * void event_occurred_cb(mv_surveillance_event_trigger_h trigger, + * mv_source_h source, + * int video_stream_id, + * mv_surveillance_result_h event_result, + * void *user_data) + * { + * const char *event_type = NULL; + * int err = mv_surveillance_get_event_trigger_type(trigger, + * event_type); + * if (MEDIA_VISION_ERROR_NONE != err) return; + * + * if (0 == strncmp(event_type, + * MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED, + * 255)) + * { + * size_t rec_person_num = 0; + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER, + * &rec_person_num); + * if (MEDIA_VISION_ERROR_NONE != err || 0 == rec_person_num) + * return; + * + * double *confidences = + * (double*)malloc(sizeof(double) * rec_person_num); + * err = mv_surveillance_get_result_value( + * event_result, + * MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES, + * confidences); + * + * // Do something with confidences... + * + * free (confidences); + * } + * } + * @endcode + * + * @since_tizen 3.0 + */ +#define MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES \ + "PERSONS_CONFIDENCES" + +/** + * @} + */ + +/** + * @addtogroup CAPI_MEDIA_VISION_SURVEILLANCE_MODULE + * @{ + */ + +/** + * @brief Defines MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH to set face + * recognition model file path. It is an attribute of the engine + * configuration. + * @details This value HAS TO BE set in engine configuration before subscription + * on @ref MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED event trigger + * + * @since_tizen 3.0 + * @see mv_engine_config_set_string_attribute() + * @see mv_engine_config_get_string_attribute() + */ +#define MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH \ + "MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH" + +/** + * @brief Defines MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESOLD to set movement + * detection threshold. It is an attribute of the engine configuration. + * @details This value might be set in engine configuration before subscription + * on @ref MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED event trigger + * to specify sensitivity of the movement detector. This value has to + * be integer in 0..255 range where 255 means that no movements will + * be detected, and 0 means that all frame changes will be interpreted + * as movements. Default value is 10. + * + * @since_tizen 3.0 + * @see mv_engine_config_set_int_attribute() + * @see mv_engine_config_get_int_attribute() + */ +#define MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD \ + "MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD" + +/** + * @brief Defines MV_SURVEILLANCE_SKIP_FRAMES_COUNT to set how many frames + * will be skipped during push source. It is an attribute of the engine + * configuration. + * @details This integer value might be set in engine configuration to specify + * number of @ref mv_surveillance_push_source() function calls will be + * ignored by subscription of the event trigger. Default value is 0. + * It means that no frames will be skipped and all + * @ref mv_surveillance_push_source() function calls will be processed. + * + * @since_tizen 3.0 + * @see mv_engine_config_set_int_attribute() + * @see mv_engine_config_get_int_attribute() + */ +#define MV_SURVEILLANCE_SKIP_FRAMES_COUNT "MV_SURVEILLANCE_SKIP_FRAMES_COUNT" + +/** + * @brief The handle to event trigger. + * + * @since_tizen 3.0 + * @remarks See supported event types and their descriptions in + * @ref CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES documentation + * section. + * Also the list of supported events can be obtained using + * @ref mv_surveillance_foreach_supported_event_type() function + */ +typedef void *mv_surveillance_event_trigger_h; + +/** + * @brief The handle to event trigger activation result. + * @details Result is a handle to the output values which are specific for each event. + * See the output values names in the event types descriptions located + * in @ref CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES documentation + * section. + * Result values can be gotten by @ref mv_surveillance_get_result_value() + * function one by one in order specified in the event description ( + * the same order of event value names is supported by + * @ref mv_surveillance_foreach_event_result_name() function). + * This pointer will be destroyed when + * @ref mv_surveillance_event_occurred_cb passed. + * + * @since_tizen 3.0 + */ +typedef void *mv_surveillance_result_h; + +/** + * @brief Called when event trigger activation has been detected. + * + * @since_tizen 3.0 + * @remarks Handle @a event_result is valid only inside callback + * @param [in] trigger The event trigger handle + * @param [in] source The handle to the media source + * @param [in] video_stream_id The identifier of the video source where event + * has been detected + * @param [in] event_result The event result passed from the + * @ref mv_surveillance_subscribe_event_trigger() + * @param [in] user_data The user data passed from the + * @ref mv_surveillance_subscribe_event_trigger() + * function + * + * @pre Callback can be invoked only after + * @ref mv_surveillance_subscribe_event_trigger() + * was called for particular event trigger. + * + * @see mv_surveillance_subscribe_event_trigger() + * @see mv_surveillance_unsubscribe_event_trigger() + */ +typedef void (*mv_surveillance_event_occurred_cb)( + mv_surveillance_event_trigger_h trigger, + mv_source_h source, + int video_stream_id, + mv_surveillance_result_h event_result, + void *user_data); + +/** + * @brief Called to get the information once for each supported event type. + * + * @since_tizen 3.0 + * @remarks Don't release memory of @a event_type + * @param [in] event_type Character string containing name of the event type + * @param [in] user_data The user data passed from the + * @ref mv_surveillance_foreach_supported_event_type() + * function + * @return @c true to continue with the next iteration of the loop, \n + * otherwise @c false to break out of the loop + * + * @pre mv_surveillance_foreach_supported_event_type() will invoke this callback + * @see mv_surveillance_foreach_supported_event_type() + */ +typedef bool (*mv_surveillance_event_type_cb)( + const char *event_type, + void *user_data); + +/** + * @brief Called to get the result name from the triggered event. + * + * @since_tizen 3.0 + * @remarks Don't release memory of @a value_name + * @param [in] name Character string containing the name of value that + * can be obtained from @ref mv_surveillance_result_h + * handle by @ref mv_surveillance_get_result_value() + * function + * @param [in] user_data The user data passed from the + * @ref mv_surveillance_foreach_event_result_name() + * function + * @return @c true to continue with the next iteration of the loop, \n + * otherwise @c false to break out of the loop + * + * @pre mv_surveillance_foreach_event_result_name() will invoke this + * callback + * @see mv_surveillance_foreach_event_result_name() + */ +typedef bool (*mv_surveillance_event_result_name_cb)( + const char *name, + void *user_data); + +/** + * @brief Creates surveillance event trigger handle. + * + * @since_tizen 3.0 + * @remarks List of supported event types can be obtained by + * @ref mv_surveillance_foreach_supported_event_type function + * @remarks You must release @a trigger by using + * @ref mv_surveillance_event_trigger_destroy() + * @param [in] event_type Name of the event type to be supported by the + * @a trigger + * @param [out] trigger A new handle to the event trigger + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @see mv_surveillance_event_trigger_destroy() + * @see mv_surveillance_foreach_supported_event_type() + */ +int mv_surveillance_event_trigger_create( + const char *event_type, + mv_surveillance_event_trigger_h *trigger); + +/** + * @brief Destroys the surveillance event trigger handle and releases all its + * resources. + * + * @since_tizen 3.0 + * @param [in] trigger The handle to the event trigger to be destroyed + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @see mv_surveillance_event_trigger_create + */ +int mv_surveillance_event_trigger_destroy( + mv_surveillance_event_trigger_h trigger); + +/** + * @brief Gets the surveillance event trigger type as character string. + * + * @since_tizen 3.0 + * @remarks The @a event_type should be freed using free() + * @param [in] trigger The handle to the event trigger + * @param [out] event_type The pointer to the character string which will be + * filled by textual name of the event type + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @pre Event trigger has to be created by + * @ref mv_surveillance_event_trigger_create() function + */ +int mv_surveillance_get_event_trigger_type( + mv_surveillance_event_trigger_h trigger, + char **event_type); + +/** + * @brief Sets ROI (Region Of Interest) to the event trigger. + * @details When ROI is set for the event trigger, then event check for this + * @a triger will be performed only inside the polygonal region + * determined by @a roi parameter. + * If this method has been never called for the @a trigger, then event + * will be checked for the whole input frame (event check is performed + * for each @ref mv_surveillance_push_source() function call). + * It is possible to change the ROI between + * @ref mv_surveillance_push_source() calls. + * + * @since_tizen 3.0 + * @param [in] trigger The handle to the event trigger + * @param [in] number_of_points The number of ROI points + * @param [in] roi The input array with ROI points + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @pre Event trigger has to be created by + * @ref mv_surveillance_event_trigger_create() function + * + * @see mv_surveillance_event_trigger_h + * @see mv_surveillance_get_event_trigger_roi() + */ +int mv_surveillance_set_event_trigger_roi( + mv_surveillance_event_trigger_h trigger, + int number_of_points, + mv_point_s *roi); + +/** + * @brief Gets ROI (Region Of Interest) from the event trigger. + * + * @since_tizen 3.0 + * @remark If @ref mv_surveillance_set_event_trigger_roi() has been never + * called for @a trigger, then @a number_of_points output value will be + * zero and @a roi pointer will be not changed. + * @param [in] trigger The handle to the event trigger + * @param [out] number_of_points The number of ROI points + * @param [out] roi The output array with ROI points + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @pre Event trigger has to be created by + * @ref mv_surveillance_event_trigger_create() function + * + * @post Memory for @a roi array must be released + * + * @see mv_surveillance_event_trigger_h + * @see mv_surveillance_set_event_trigger_roi() + */ +int mv_surveillance_get_event_trigger_roi( + mv_surveillance_event_trigger_h trigger, + int *number_of_points, + mv_point_s **roi); + +/** + * @brief Subscribes @a trigger to process sources pushed from video identified + * by @a video_stream_id. + * @details When @a trigger is subscribed, then each time when function + * @ref mv_surveillance_push_source() is called for @a video_stream_id, + * event occurrence is checked. If this check is successful, + * @a callback is invoked. Details on occurred event can be obtained + * using @ref mv_surveillance_result_h handle from @a callback. + * + * @since_tizen 3.0 + * @remarks Use @ref mv_surveillance_unsubscribe_event_trigger() function for + * the same @a trigger and @a video_stream_id parameters to stop + * subscription. + * @param [in] trigger The handle to the event trigger activating + * calls of the @a callback function + * @param [in] video_stream_id The identifier of the video stream for which + * event trigger activation will be checked + * @param [in] engine_cfg The engine configuration of the event + * @param [in] callback Callback to be called each time when event + * occurrence is detected + * @param [in] user_data The user data to be passed to the @a callback + * function + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @post @ref mv_surveillance_event_occurred_cb will be called each time + * @a trigger is activated after @ref mv_surveillance_push_source() call + * + * @see mv_surveillance_event_trigger_h + * @see mv_surveillance_unsubscribe_event_trigger() + * @see mv_surveillance_push_source() + */ +int mv_surveillance_subscribe_event_trigger( + mv_surveillance_event_trigger_h trigger, + int video_stream_id, + mv_engine_config_h engine_cfg, + mv_surveillance_event_occurred_cb callback, + void *user_data); + +/** + * @brief Unsubscribes @a trigger from the event and stop calling @a callback. + * + * @since_tizen 3.0 + * @remarks To start handling trigger activation use + @ref mv_surveillance_subscribe_event_trigger(). + * @param [in] trigger The handle to the event trigger for which + * subscription will be stopped + * @param [in] video_stream_id The identifier of the video source for which + * subscription will be stopped + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @pre To stop subscription it has to be created earlier with + * @ref mv_surveillance_subscribe_event_trigger() function + * + * @see mv_surveillance_event_trigger_h + * @see mv_surveillance_subscribe_event_trigger() + */ +int mv_surveillance_unsubscribe_event_trigger( + mv_surveillance_event_trigger_h trigger, + int video_stream_id); + +/** + * @brief Pushes source to the surveillance system to detect events. + * @details mv_surveillance_event_occurred_cb() will be called when any + * subscribing event detected. + * + * @since_tizen 3.0 + * @remarks @ref mv_surveillance_set_event_trigger_roi() function can be used + * to specify the polygon region where event can be detected only + * @param [in] source The handle to the media source + * @param [in] video_stream_id The identifier of video stream from which + * @a source is coming + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @pre To receive surveillance results, some event triggers has to be + * subscribed by @ref mv_surveillance_subscribe_event_trigger() function + * before @ref mv_surveillance_push_source() calls + * @pre Before calling of this method @a source has to be correctly filled. + * @ref mv_source_fill_by_media_packet(), @ref mv_source_fill_by_buffer() + * functions can be used to fill @a source + * + * @see mv_surveillance_event_trigger_h + * @see mv_surveillance_event_occurred_cb + * @see mv_surveillance_subscribe_event_trigger() + * @see mv_surveillance_unsubscribe_event_trigger() + */ +int mv_surveillance_push_source( + mv_source_h source, + int video_stream_id); + +/** + * @brief Starts traversing through list of supported event types. + * + * @since_tizen 3.0 + * @remarks Supported event types and their descriptions can be found in + * @ref CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES documentation + * section + * @param [in] callback The callback function to be called for each + * supported event type + * @param [in] user_data The user data to be passed to the @a callback + * function + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @see mv_surveillance_event_type_cb + * @see mv_surveillance_foreach_event_result_name() + */ +int mv_surveillance_foreach_supported_event_type( + mv_surveillance_event_type_cb callback, + void *user_data); + +/** + * @brief Starts traversing through list of supported event result value names. + * + * @since_tizen 3.0 + * @remarks Supported event types, event result value names and their + * descriptions can be found in + * @ref CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES documentation + * section + * @param [in] event_type The name of the event type for which result value + * names will be passed to the @a callback. Can be + * set @c NULL. If set @c NULL then all supported + * event result value names will be traversed + * @param [in] callback The callback function to be called for each + * supported event result value name + * @param [in] user_data The user data to be passed to the @a callback + * function + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @see mv_surveillance_event_result_name_cb + * @see mv_surveillance_foreach_supported_event_type() + * @see mv_surveillance_get_result_value() + */ +int mv_surveillance_foreach_event_result_name( + const char *event_type, + mv_surveillance_event_result_name_cb callback, + void *user_data); + +/** + * @brief Gets result value. + * @details See the output values names in the event types descriptions located + * in @ref CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES documentation + * section. + * + * @since_tizen 3.0 + * @remarks The name can be obtained by + * @ref mv_surveillance_foreach_event_result_name function + * @param [in] result The handle to the event result + * @param [in] name The name of the value to be gotten + * @param [in, out] value The pointer to variable which will be filled + * by result value. To find the type of @a value + * please refer to the + * @ref CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES + * documentation section + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @pre Memory for value has to be allocated + * + * @see mv_surveillance_event_trigger_h + * @see mv_surveillance_event_occurred_cb + * @see mv_surveillance_subscribe_event_trigger() + * @see mv_surveillance_unsubscribe_event_trigger() + * @see mv_surveillance_foreach_supported_event_type() + * @see mv_surveillance_foreach_event_result_name() + */ +int mv_surveillance_get_result_value( + mv_surveillance_result_h result, + const char *name, + void *value); + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __TIZEN_MEDIAVISION_SURVEILLANCE_H__ */ diff --git a/include/mv_surveillance_private.h b/include/mv_surveillance_private.h new file mode 100644 index 0000000..4dc4bad --- /dev/null +++ b/include/mv_surveillance_private.h @@ -0,0 +1,48 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __TIZEN_MEDIAVISION_SURVEILLANCE_PRIVATE_H__ +#define __TIZEN_MEDIAVISION_SURVEILLANCE_PRIVATE_H__ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "mv_common.h" + +/** + * @brief Event trigger structure. + * @details See supported event types and their descriptions in + * @ref CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES documentation + * section. + * Also the list of supported events can be obtained using + * @ref mv_surveillance_foreach_supported_event_type() function + * + * @since_tizen 3.0 + */ +typedef struct +{ + unsigned int trigger_id; /**< Unique event trigger identifier */ + const char *event_type; /**< Type of the event */ + int number_of_roi_points; /**< Number of ROI (Region of interest) points */ + mv_point_s *roi; /**< ROI points array */ +} mv_surveillance_event_trigger_s; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __TIZEN_MEDIAVISION_SURVEILLANCE_PRIVATE_H__ */ diff --git a/media-vision-config.json b/media-vision-config.json index 2f0b46c..207e575 100644 --- a/media-vision-config.json +++ b/media-vision-config.json @@ -114,6 +114,21 @@ "name" : "MV_FACE_RECOGNITION_MODEL_TYPE", "type" : "integer", "value" : 3 + }, + { + "name" : "MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH", + "type" : "string", + "value" : "" + }, + { + "name" : "MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD", + "type" : "integer", + "value" : 10 + }, + { + "name" : "MV_SURVEILLANCE_SKIP_FRAMES_COUNT", + "type" : "integer", + "value" : 0 } ] } diff --git a/mv_surveillance/CMakeLists.txt b/mv_surveillance/CMakeLists.txt new file mode 100644 index 0000000..e6a0a44 --- /dev/null +++ b/mv_surveillance/CMakeLists.txt @@ -0,0 +1,8 @@ +project(mv_surveillance_port) +cmake_minimum_required(VERSION 2.6) + +if(MEDIA_VISION_SURVEILLANCE_LICENSE_PORT) + add_subdirectory(${PROJECT_SOURCE_DIR}/surveillance_lic) # Licensed port +else() + add_subdirectory(${PROJECT_SOURCE_DIR}/surveillance) # Open port +endif() diff --git a/mv_surveillance/surveillance/CMakeLists.txt b/mv_surveillance/surveillance/CMakeLists.txt new file mode 100644 index 0000000..b7b5f03 --- /dev/null +++ b/mv_surveillance/surveillance/CMakeLists.txt @@ -0,0 +1,26 @@ +project(${MV_SURVEILLANCE_LIB_NAME}) +cmake_minimum_required(VERSION 2.6) + +set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG) + +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${LIB_INSTALL_DIR}) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${LIB_INSTALL_DIR}) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) + +include_directories("${INC_DIR}") +include_directories("${PROJECT_SOURCE_DIR}/include") +include_directories("${PROJECT_SOURCE_DIR}/src") + +file(GLOB MV_SURVEILLANCE_INC_LIST "${PROJECT_SOURCE_DIR}/include/*.h") +file(GLOB MV_SURVEILLANCE_SRC_LIST "${PROJECT_SOURCE_DIR}/src/*.cpp" + "${PROJECT_SOURCE_DIR}/src/*.c") + +if(FORCED_STATIC_BUILD) + add_library(${PROJECT_NAME} STATIC ${MV_SURVEILLANCE_INC_LIST} ${MV_SURVEILLANCE_SRC_LIST}) +else() + add_library(${PROJECT_NAME} SHARED ${MV_SURVEILLANCE_INC_LIST} ${MV_SURVEILLANCE_SRC_LIST}) +endif() + +target_link_libraries(${PROJECT_NAME} ${MV_COMMON_LIB_NAME} ${MV_FACE_LIB_NAME}) + +INSTALL(TARGETS ${PROJECT_NAME} DESTINATION ${LIB_INSTALL_DIR}) diff --git a/mv_surveillance/surveillance/include/EventDefs.h b/mv_surveillance/surveillance/include/EventDefs.h new file mode 100644 index 0000000..9d92ade --- /dev/null +++ b/mv_surveillance/surveillance/include/EventDefs.h @@ -0,0 +1,71 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEDIA_VISION_EVENT_DEFS_H__ +#define __MEDIA_VISION_EVENT_DEFS_H__ + +/** + * @file EventDefs.h + * @brief This file contains definitions for event triggers. + */ + +#include + +#include +#include +#include +#include + +#include + +namespace mediavision { +namespace surveillance { + +typedef std::map > EventTypesMap; +typedef EventTypesMap::iterator EventTypesMapIter; +typedef EventTypesMap::const_iterator EventTypesMapConstIter; + +typedef std::vector StringVector; +typedef StringVector::iterator StringIter; +typedef StringVector::const_iterator StringConstIter; + +typedef std::vector IntVector; +typedef IntVector::iterator IntIter; +typedef IntVector::const_iterator IntConstIter; + +typedef std::vector DoubleVector; +typedef DoubleVector::iterator DoubleIter; +typedef DoubleVector::const_iterator DoubleConstIter; + +typedef std::vector MVRectangles; +typedef MVRectangles::iterator MVRectanglesIter; +typedef MVRectangles::const_iterator MVRectanglesConstIter; + +typedef std::vector CVRectangles; +typedef CVRectangles::iterator CVRectanglesIter; +typedef CVRectangles::const_iterator CVRectanglesConstIter; + +typedef std::vector MVPoints; +typedef MVPoints::iterator MVPointsIter; +typedef MVPoints::const_iterator MVPointsConstIter; + +typedef std::vector CVPoints; +typedef std::vector Contours; + +} /* surveillance */ +} /* mediavision */ + +#endif /* __MEDIA_VISION_EVENT_DEFS_H__ */ diff --git a/mv_surveillance/surveillance/include/EventManager.h b/mv_surveillance/surveillance/include/EventManager.h new file mode 100644 index 0000000..fd13f52 --- /dev/null +++ b/mv_surveillance/surveillance/include/EventManager.h @@ -0,0 +1,190 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEDIA_VISION_EVENT_MANAGER_H__ +#define __MEDIA_VISION_EVENT_MANAGER_H__ + +/** + * @file EventManager.h + * @brief This file contains functionality for event manager. + */ + +#include "EventTrigger.h" +#include "EventDefs.h" + +namespace mediavision { +namespace surveillance { + +class EventManager; + +/** + * @class EventManagerDestroyer + * @brief This class contains event manager destroyer functionality. + * + * @since_tizen 3.0 + */ +class EventManagerDestroyer { +public: + + /** + * @brief Default destructor. + * + * @since_tizen 3.0 + */ + ~EventManagerDestroyer(); + + /** + * @brief Initializes pointer to EventManager which will be destroyed. + * + * @since_tizen 3.0 + * @param [in] pointer The pointer to EventManager which will be destroyed + */ + void initialize(EventManager *pointer); + +private: + + EventManager *__pInstance; +}; + +/** + * @class EventManager + * @brief This class contains event manager functionality. + * + * @since_tizen 3.0 + */ + +class EventManager { +public: + + /** + * @brief Gets EventManager instance. + * + * @since_tizen 3.0 + */ + static EventManager& getInstance(); + + /** + * @brief Registers event. + * + * @since_tizen 3.0 + * @param [in] eventTrigger The event trigger to be register (NULL if internal) + * @param [in] triggerId Unique event trigger identifier to be register + * @param [in] eventType Type of the event + * @param [in] videoStreamId Video stream identificator + * @param [in] engineCfg The engine configuration for event trigger + * @param [in] callback The callback to be called if event will be occured + * @param [in] user_data The user data to be passed to the callback function + * @param [in] numberOfPoints The number of ROI points + * @param [in] roi The intput array with ROI points + * @return @c 0 on success, otherwise a negative error value + */ + int registerEvent( + mv_surveillance_event_trigger_h eventTrigger, + long int triggerId, + const char *eventType, + int videoStreamId, + mv_engine_config_h engineCfg, + mv_surveillance_event_occurred_cb callback, + void *user_data, + int numberOfPoints, + mv_point_s *roi); + + /** + * @brief Unregisters event. + * + * @since_tizen 3.0 + * @param [in] triggerId Unique event trigger identifier to be + * unregister + * @param [in] videoStreamId Video stream identifier for which event + * will be unregistered + * @return @c 0 on success, otherwise a negative error value + */ + int unregisterEvent(long int triggerId, int videoStreamId); + + /** + * @brief Pushes media source to run event triggers. + * + * @since_tizen 3.0 + * @param [in] source The media source to be pushed + * @param [in] videoStreamId The video stream identificator for media source + * @return @c 0 on success, otherwise a negative error value + */ + int pushSource(mv_source_h source, int videoStreamId); + + /** + * @brief Gets supported event types. + * + * @since_tizen 3.0 + * @param [out] eventTypes The supported event types + * @return @c 0 on success, otherwise a negative error value + */ + static int getSupportedEventTypes(StringVector& eventTypes); + + /** + * @brief Gets all supported event result value names. + * + * @since_tizen 3.0 + * @param [out] eventResValNames The supported event result value names + * @return @c 0 on success, otherwise a negative error value + */ + static int getSupportedEventResultValueNames(StringVector& eventResValNames); + + /** + * @brief Gets supported event result value names for an event type. + * + * @since_tizen 3.0 + * @param [in] eventTypeName The name of the event type to return + * result value names for + * @param [out] eventResValNames The supported event result value names + * @return @c 0 on success, otherwise a negative error value + */ + static int getSupportedEventResultValueNames( + const std::string& eventTypeName, + StringVector& eventResValNames); + +private: + + EventManager(); + + EventManager(const EventManager&); + + EventManager& operator=(EventManager&); + + ~EventManager(); + + static void setSupportedEventTypes(); + + EventTriggersIter isTriggerExists(EventTrigger *trigger, int videoStreamId); + + friend class EventManagerDestroyer; + +private: + + static EventManager *__pInstance; + + static EventManagerDestroyer Destroyer; + + static EventTypesMap SupportedEventTypes; + +private: + + EventTriggersMap __eventTriggers; +}; + +} /* surveillance */ +} /* mediavision */ + +#endif /* __MEDIA_VISION_EVENT_MANAGER_H__ */ diff --git a/mv_surveillance/surveillance/include/EventResult.h b/mv_surveillance/surveillance/include/EventResult.h new file mode 100644 index 0000000..7ce8f45 --- /dev/null +++ b/mv_surveillance/surveillance/include/EventResult.h @@ -0,0 +1,59 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEDIA_VISION_EVENT_RESULT_H__ +#define __MEDIA_VISION_EVENT_RESULT_H__ + +/** + * @file EventTrigger.h + * @brief This file contains interface for event trigger. + */ + +namespace mediavision { +namespace surveillance { + +/** + * @class EventResult + * @brief This class contains event result interface. + * + * @since_tizen 3.0 + */ +class EventResult { +public: + /** + * @brief Default destructor. + * + * @since_tizen 3.0 + */ + virtual ~EventResult() {} + + /** + * @brief Gets result value. + * + * @since_tizen 3.0 + * @param [in] valueName The name of the value to be gotten + * @param [in, out] value The pointer to variable which will be filled + * by result value + * @return @c 0 on success, otherwise a negative error value + */ + virtual int getResultValue(const char *valueName, void *value) const = 0; +}; + + +} /* surveillance */ +} /* mediavision */ + +#endif /* _MEDIA_VISION__EVENT_RESULT_H__ */ diff --git a/mv_surveillance/surveillance/include/EventTrigger.h b/mv_surveillance/surveillance/include/EventTrigger.h new file mode 100644 index 0000000..d585195 --- /dev/null +++ b/mv_surveillance/surveillance/include/EventTrigger.h @@ -0,0 +1,227 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEDIA_VISION_EVENT_TRIGGER_H__ +#define __MEDIA_VISION_EVENT_TRIGGER_H__ + +/** + * @file EventTrigger.h + * @brief This file contains interface for event trigger. + */ + +#include "EventDefs.h" + +#include +#include + +#include +#include +#include + +namespace mediavision { +namespace surveillance { + +/** + * @class EventTrigger + * @brief This class contains event trigger interface. + * + * @since_tizen 3.0 + */ +class EventTrigger { +public: + /** + * @brief Default constructor. + * + * @since_tizen 3.0 + * @param [in] eventTrigger The event trigger to be register (NULL if internal) + * @param [in] triggerId Unique event trigger identifier to be register + * @param [in] videoStreamId Video stream identifier + * @param [in] callback The callback to be called if event will be occured + * @param [in] user_data The user data to be passed to the callback function + * @param [in] numberOfPoints The number of ROI points + * @param [in] roi The intput array with ROI points + */ + EventTrigger( + mv_surveillance_event_trigger_h eventTrigger, + long int triggerId, + int videoStreamId, + mv_surveillance_event_occurred_cb callback, + void *userData, + int numberOfPoints, + mv_point_s *roi); + + /** + * @brief Default destructor. + * + * @since_tizen 3.0 + */ + virtual ~EventTrigger(); + + /** + * @brief Parses engine configuration. + * + * @since_tizen 3.0 + * @param [in] engineConfig The engine configuration to be parsed + * @return @c 0 on success, otherwise a negative error value + */ + virtual int parseEngineConfig(mv_engine_config_h engineConfig) = 0; + + /** + * @brief Pushes media source. + * + * @since_tizen 3.0 + * @param [in] source The media source to be parsed + * @param [in] graySource The media source converted to gray scale + * @param [in] grayImage The converted to gray scale source + * @return @c 0 on success, otherwise a negative error value + */ + virtual int pushSource( + mv_source_h source, + mv_source_h graySource, + const cv::Mat& grayImage) = 0; + + /** + * @brief Gets event type. + * + * @since_tizen 3.0 + * @return string with event type + */ + virtual std::string getEventType() const = 0; + + /** + * @brief Gets video stream identifier of event trigger. + * + * @since_tizen 3.0 + * @return video stream identifier + */ + int getVideoStreamId() const; + + /** + * @brief Checks if callback with the identifier is subscribed. + * + * @since_tizen 3.0 + * @return true if suscribed, false otherwise + */ + bool isCallbackSubscribed(long int triggerId) const; + + /** + * @brief Subscibes callback with unique identifier. + * + * @since_tizen 3.0 + * @param [in] eventTrigger The event trigger to be register (NULL if internal) + * @param [in] triggerId Unique event trigger identifier to be subscribed + * @param [in] callback The callback to be called if event will be occured + * @param [in] user_data The user data to be passed to the callback function + * @param [in] numberOfPoints The number of ROI points + * @param [in] roi The intput array with ROI points + * @return @c true on success, false otherwise + */ + bool subscribeCallback( + mv_surveillance_event_trigger_h eventTrigger, + long int triggerId, + mv_surveillance_event_occurred_cb callback, + void *userData, + int numberOfPoints, + mv_point_s *roi); + + /** + * @brief Unsubscibes callback with unique identifier. + * + * @since_tizen 3.0 + * @param [in] triggerId Unique event trigger identifier to be unsubscribed + * @return @c true on success, false otherwise + */ + bool unsubscribeCallback(long int triggerId); + + /** + * @brief Checks if there are no subscribed callbacks. + * + * @since_tizen 3.0 + * @return @c true at least one callback is subscribed, false otherwise + */ + bool isCallbacksEmpty() const; + + /** + * @brief Applies ROI (Region Of Interest) to input image. + * + * @since_tizen 3.0 + * @param [in, out] image The input image where ROI will be applied + * @param [in] imageWidth The input image width + * @param [in] imageHeight The input image height + * @param [in] scalePoints True if ROI points must be scaled, false oterwise + * @param [in] scaleX The scale for X ROI point coordinate + * @param [in] scaleY The scale for Y ROI point coordinate + * @return @c true on success, false otherwise + */ + int applyROIToImage( + unsigned char *image, + int imageWidth, + int imageHeight, + bool scalePoints = false, + int scaleX = 1, + int scaleY = 1); + + /** + * @brief Comparison operator for equal case. + * + * @since_tizen 3.0 + * @return true if event trigger is equal to other, false otherwise + */ + virtual bool operator==(const EventTrigger& other) const; + + /** + * @brief Comparison operator for not equal case. + * + * @since_tizen 3.0 + * @return true if event trigger is not equal to other, false otherwise + */ + virtual bool operator!=(const EventTrigger& other) const; + +protected: + struct CallbackData { + mv_surveillance_event_trigger_h eventTrigger; + + mv_surveillance_event_occurred_cb callback; + + void *userData; + }; + + typedef std::map CallbackDataMap; + typedef CallbackDataMap::const_iterator CallbackDataMapConstIter; + typedef CallbackDataMap::iterator CallbackDataMapIter; + + typedef std::pair CallbackDataPair; + +protected: + static long int InternalTriggersCounter; + +protected: + int __videoStreamId; + + MVPoints __roi; + + CallbackDataMap __callbackDataMap; +}; + +typedef std::list EventTriggers; +typedef std::map EventTriggersMap; +typedef EventTriggers::const_iterator EventTriggersConstIter; +typedef EventTriggers::iterator EventTriggersIter; + +} /* surveillance */ +} /* mediavision */ + +#endif /* __MEDIA_VISION_EVENT_TRIGGER_H__ */ diff --git a/mv_surveillance/surveillance/include/EventTriggerMovementDetection.h b/mv_surveillance/surveillance/include/EventTriggerMovementDetection.h new file mode 100644 index 0000000..e3b57a9 --- /dev/null +++ b/mv_surveillance/surveillance/include/EventTriggerMovementDetection.h @@ -0,0 +1,159 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEDIA_VISION_EVENT_TRIGGER_MOVEMENT_DETECTION_H__ +#define __MEDIA_VISION_EVENT_TRIGGER_MOVEMENT_DETECTION_H__ + +/** + * @file EventTriggerMovementDetection.h + * @brief This file contains interface for movement detection events. + */ + +#include "EventTrigger.h" + +#include "EventResult.h" +#include "EventDefs.h" + +#include + +namespace mediavision { +namespace surveillance { + +/** + * @class EventResultMovementDetection + * @brief This class contains movement detection event results. + * + * @since_tizen 3.0 + */ +class EventResultMovementDetection : public EventResult { +public: + /** + * @brief Gets result value. + * + * @since_tizen 3.0 + * @param [in] valueName The name of the value to be gotten + * @param [in, out] value The pointer to variable which will be filled + * by result value + * @return @c 0 on success, otherwise a negative error value + */ + virtual int getResultValue(const char *valueName, void *value) const; + +public: + MVRectangles __movementRegions; /**< Regions where movements were detected */ + + cv::Mat __grayImage; /** Current gray image (only for internal usage) */ +}; + +/** + * @class EventTriggerMovementDetection + * @brief This class contains movement detection events. + * + * @since_tizen 3.0 + */ +class EventTriggerMovementDetection : public EventTrigger { +public: + /** + * @brief Default constructor. + * + * @since_tizen 3.0 + * @param [in] eventTrigger The event trigger to be register (NULL if internal) + * @param [in] triggerId Unique event trigger identifier to be register + * @param [in] videoStreamId Video stream identifier + * @param [in] callback The callback to be called if event will be occured + * @param [in] user_data The user data to be passed to the callback function + * @param [in] numberOfPoints The number of ROI points + * @param [in] roi The intput array with ROI points + */ + EventTriggerMovementDetection( + mv_surveillance_event_trigger_h eventTrigger, + long int triggerId, + int videoStreamId, + mv_surveillance_event_occurred_cb callback, + void *userData, + int numberOfPoints, + mv_point_s *roi); + + /** + * @brief Default destructor. + * + * @since_tizen 3.0 + */ + virtual ~EventTriggerMovementDetection(); + + /** + * @brief Parses engine configuration. + * + * @since_tizen 3.0 + * @param [in] engineConfig The engine configuration to be parsed + * @return @c 0 on success, otherwise a negative error value + */ + virtual int parseEngineConfig(mv_engine_config_h engineConfig); + + /** + * @brief Pushes media source. + * + * @since_tizen 3.0 + * @param [in] source The media source to be parsed + * @param [in] graySource The media source converted to gray scale + * @param [in] grayImage The converted to gray scale source + * @return @c 0 on success, otherwise a negative error value + */ + virtual int pushSource( + mv_source_h source, + mv_source_h graySource, + const cv::Mat& grayImage); + + /** + * @brief Gets event type. + * + * @since_tizen 3.0 + * @return string with event type + */ + virtual std::string getEventType() const; + + /** + * @brief Comparison operator for equal case. + * + * @since_tizen 3.0 + * @return true if event trigger is equal to other, false otherwise + */ + virtual bool operator==(const EventTriggerMovementDetection& other) const; + + /** + * @brief Comparison operator for not equal case. + * + * @since_tizen 3.0 + * @return true if event trigger is not equal to other, false otherwise + */ + virtual bool operator!=(const EventTriggerMovementDetection& other) const; + +private: + static const cv::Mat __ERODE_KERNEL; + + static const cv::Mat __DILATE_KERNEL; + +private: + cv::Mat __previousImage; + + EventResultMovementDetection *__eventResult; + + int __diffThreshold; +}; + +} /* surveillance */ +} /* mediavision */ + +#endif /* __MEDIA_VISION_EVENT_TRIGGER_MOVEMENT_DETECTION_H__ */ diff --git a/mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h b/mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h new file mode 100644 index 0000000..7aeecff --- /dev/null +++ b/mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h @@ -0,0 +1,211 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEDIA_VISION_EVENT_TRIGGER_PERSON_APPEARANCE_H__ +#define __MEDIA_VISION_EVENT_TRIGGER_PERSON_APPEARANCE_H__ + +/** + * @file EventTriggerPersonAppearance.h + * @brief This file contains interface for person appeared / disapeared events. + */ + +#include "EventTrigger.h" + +#include "EventResult.h" +#include "EventDefs.h" +#include "HoGDetector.h" + +#include + +namespace mediavision { +namespace surveillance { + +/** + * @class EventResultPersonAppearance + * @brief This class contains person appeared / disapeared event results. + * + * @since_tizen 3.0 + */ +class EventResultPersonAppearance : public EventResult { +public: + /** + * @brief Gets result value. + * + * @since_tizen 3.0 + * @param [in] valueName The name of the value to be gotten + * @param [in, out] value The pointer to variable which will be filled + * by result value + * @return @c 0 on success, otherwise a negative error value + */ + virtual int getResultValue(const char *valueName, void *value) const; + +public: + MVRectangles __appearedLocations; /**< Locations of persons which were appeared + first time*/ + + MVRectangles __trackedLocations; /**< Locations of persons which were tracked + from previous frame*/ + + MVRectangles __disappearedLocations; /**< Locations of persons which were + disappeared */ +}; + +/** + * @class EventTriggerPersonAppearance + * @brief This class contains person appeared / disapeared events. + * + * @since_tizen 3.0 + */ +class EventTriggerPersonAppearance : public EventTrigger { +public: + /** + * @brief Default constructor. + * + * @since_tizen 3.0 + * @param [in] eventTrigger The event trigger to be register (NULL if internal) + * @param [in] triggerId Unique event trigger identifier to be register + * @param [in] videoStreamId Video stream identifier + * @param [in] callback The callback to be called if event will be occured + * @param [in] user_data The user data to be passed to the callback function + * @param [in] numberOfPoints The number of ROI points + * @param [in] roi The intput array with ROI points + */ + EventTriggerPersonAppearance( + mv_surveillance_event_trigger_h eventTrigger, + long int triggerId, + int videoStreamId, + mv_surveillance_event_occurred_cb callback, + void *userData, + int numberOfPoints, + mv_point_s *roi); + + /** + * @brief Default destructor. + * + * @since_tizen 3.0 + */ + virtual ~EventTriggerPersonAppearance(); + + /** + * @brief Parses engine configuration. + * + * @since_tizen 3.0 + * @param [in] engineConfig The engine configuration to be parsed + * @return @c 0 on success, otherwise a negative error value + */ + virtual int parseEngineConfig(mv_engine_config_h engineConfig); + + /** + * @brief Pushes media source. + * + * @since_tizen 3.0 + * @param [in] source The media source to be parsed + * @param [in] graySource The media source converted to gray scale + * @param [in] grayImage The converted to gray scale source + * @return @c 0 on success, otherwise a negative error value + */ + virtual int pushSource( + mv_source_h source, + mv_source_h graySource, + const cv::Mat& grayImage); + + /** + * @brief Gets event type. + * + * @since_tizen 3.0 + * @return string with event type + */ + virtual std::string getEventType() const; + + /** + * @brief Comparison operator for equal case. + * + * @since_tizen 3.0 + * @return true if event trigger is equal to other, false otherwise + */ + virtual bool operator==(const EventTriggerPersonAppearance& other) const; + + /** + * @brief Comparison operator for not equal case. + * + * @since_tizen 3.0 + * @return true if event trigger is not equal to other, false otherwise + */ + virtual bool operator!=(const EventTriggerPersonAppearance& other) const; + +private: + static void movementDetectedCB( + mv_surveillance_event_trigger_h event_trigger, + mv_source_h source, + int video_stream_id, + mv_surveillance_result_h event_result, + void *user_data); + +private: + + void runCallbacks(mv_source_h source); + +private: + + struct TrackedRectangle { + cv::Rect rect; + + int framesCount; + + TrackedRectangle(cv::Rect _rect, int _framesCount) + { + rect = _rect; + framesCount = _framesCount; + } + }; + + typedef std::list TrackedRectangles; + typedef TrackedRectangles::const_iterator TrackedRectanglesConstIter; + typedef TrackedRectangles::iterator TrackedRectanglesIter; + +private: + int __skipFramesCount; + + int __frameCounter; /**< Counts frames on which detection has not be launched */ + + long int __movementDetectedEventId; + + float __factorX; + + float __factorY; + + cv::Rect __rectToDetect; + + cv::Rect __rectToDetectPrevious; + + TrackedRectangles __trackedRects; + + CVRectangles __appearedRects; + + CVRectangles __disappearedRects; + + modifiedcv::HOGDescriptor __hogClassifier; /**< Classifier to be used for full body + person detection */ + + MVRectangles __detectedLocations; + + EventResultPersonAppearance *__eventResult; +}; + +} /* surveillance */ +} /* mediavision */ + +#endif /* __MEDIA_VISION_EVENT_TRIGGER_PERSON_APPEARANCE_H__ */ diff --git a/mv_surveillance/surveillance/include/EventTriggerPersonRecognition.h b/mv_surveillance/surveillance/include/EventTriggerPersonRecognition.h new file mode 100644 index 0000000..cd3448e --- /dev/null +++ b/mv_surveillance/surveillance/include/EventTriggerPersonRecognition.h @@ -0,0 +1,192 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEDIA_VISION_EVENT_TRIGGER_PERSON_RECOGNITION_H__ +#define __MEDIA_VISION_EVENT_TRIGGER_PERSON_RECOGNITION_H__ + +/** + * @file EventTriggerPersonRecognition.h + * @brief This file contains interface for person recognized events. + */ + +#include + +#include "EventTrigger.h" + +#include "EventResult.h" +#include "EventDefs.h" + +#include "EventTriggerPersonAppearance.h" + +namespace mediavision { +namespace surveillance { + +/** + * @class EventResultPersonRecogniton + * @brief This class contains person recognized event results. + * + * @since_tizen 3.0 + */ +class EventResultPersonRecognition : public EventResult { +public: + /** + * @brief Gets result value. + * + * @since_tizen 3.0 + * @param [in] valueName The name of the value to be gotten + * @param [in, out] value The pointer to variable which will be filled + * by result value + * @return @c 0 on success, otherwise a negative error value + */ + virtual int getResultValue(const char *valueName, void *value) const; + +public: + MVRectangles __locations; /**< Persons locations */ + + IntVector __faceLabels; /**< Persons face lables */ + + DoubleVector __confidences; /**< Persons face recognition confidences */ +}; + +/** + * @class EventTriggerPersonRecognition + * @brief This class contains person recognized events. + * + * @since_tizen 3.0 + */ +class EventTriggerPersonRecognition : public EventTrigger { +public: + /** + * @brief Default constructor. + * + * @since_tizen 3.0 + * @param [in] eventTrigger The event trigger to be register (NULL if internal) + * @param [in] triggerId Unique event trigger identifier to be register + * @param [in] videoStreamId Video stream identifier + * @param [in] callback The callback to be called if event will be occured + * @param [in] user_data The user data to be passed to the callback function + * @param [in] numberOfPoints The number of ROI points + * @param [in] roi The intput array with ROI points + */ + EventTriggerPersonRecognition( + mv_surveillance_event_trigger_h eventTrigger, + long int triggerId, + int videoStreamId, + mv_surveillance_event_occurred_cb callback, + void *userData, + int numberOfPoints, + mv_point_s *roi); + + /** + * @brief Default destructor. + * + * @since_tizen 3.0 + */ + virtual ~EventTriggerPersonRecognition(); + + /** + * @brief Parses engine configuration. + * + * @since_tizen 3.0 + * @param [in] engineConfig The engine configuration to be parsed + * @return @c 0 on success, otherwise a negative error value + */ + virtual int parseEngineConfig(mv_engine_config_h engineConfig); + + /** + * @brief Pushes media source. + * + * @since_tizen 3.0 + * @param [in] source The media source to be parsed + * @param [in] graySource The media source converted to gray scale + * @param [in] grayImage The converted to gray scale source + * @return @c 0 on success, otherwise a negative error value + */ + virtual int pushSource( + mv_source_h source, + mv_source_h graySource, + const cv::Mat& grayImage); + + /** + * @brief Gets event type. + * + * @since_tizen 3.0 + * @return string with event type + */ + virtual std::string getEventType() const; + + /** + * @brief Comparison operator for equal case. + * + * @since_tizen 3.0 + * @return true if event trigger is equal to other, false otherwise + */ + virtual bool operator==(const EventTriggerPersonRecognition& other) const; + + /** + * @brief Comparison operator for not equal case. + * + * @since_tizen 3.0 + * @return true if event trigger is not equal to other, false otherwise + */ + virtual bool operator!=(const EventTriggerPersonRecognition& other) const; + + /** + * @brief Sets event results. + * + * @since_tizen 3.0 + * @param [in] faceLocation The location of the face recognized on @a source. + * @param [in] faceLabel The label that identifies face which was + * recognized in the @a source. + * @param [in] confidence The confidence of the @a recognition_model + * that face has been recognized correctly + * (value from 0.0 to 1.0). + * @return @c 0 on success, otherwise a negative error value + */ + void setEventResults( + mv_rectangle_s faceLocation, + int faceLabel, + double confidence); + +private: + mv_face_recognition_model_h __faceRecognitionModel; + + mv_source_h __lastFrame; + + EventResultPersonRecognition *__eventResult; + +private: + static void faceDetectedCB( + mv_source_h source, + mv_engine_config_h engine_cfg, + mv_rectangle_s *faces_locations, + int number_of_faces, + void *user_data); + + static void faceRecognizedCB( + mv_source_h source, + mv_face_recognition_model_h recognition_model, + mv_engine_config_h engine_cfg, + mv_rectangle_s *face_location, + const int *face_label, + double confidence, + void *user_data); +}; + +} /* surveillance */ +} /* mediaVision */ + +#endif /* __MEDIA_VISION_EVENT_TRIGGER_PERSON_RECOGNITION_H__ */ diff --git a/mv_surveillance/surveillance/include/HoGDetector.h b/mv_surveillance/surveillance/include/HoGDetector.h new file mode 100644 index 0000000..d4bb400 --- /dev/null +++ b/mv_surveillance/surveillance/include/HoGDetector.h @@ -0,0 +1,194 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __MEDIA_VISION_HOGDETECTOR_H__ +#define __MEDIA_VISION_HOGDETECTOR_H__ + +/** + * @file HOGDetector.h + * @brief This file contains structure of HOG detector. + */ + +#include "opencv2/core/core.hpp" +#include "opencv2/objdetect/objdetect.hpp" + +#include + +namespace modifiedcv { + +using namespace cv; + +struct HOGDescriptor { + enum { L2Hys = 0 }; + enum { DEFAULT_NLEVELS = 64 }; + + // default constructor + HOGDescriptor() : + winSize(64, 128), + blockSize(16, 16), + blockStride(8, 8), + cellSize(8, 8), + nbins(9), + derivAperture(1), + winSigma(-1), + histogramNormType(HOGDescriptor::L2Hys), + L2HysThreshold(0.2), + gammaCorrection(true), + nlevels(HOGDescriptor::DEFAULT_NLEVELS) + {} + + // constructor + HOGDescriptor( + Size _winSize, + Size _blockSize, + Size _blockStride, + Size _cellSize, + int _nbins, + int _derivAperture = 1, + double _winSigma = -1., + int _histogramNormType = L2Hys, + double _L2HysThreshold = 0.2, + bool _gammaCorrection = false, + int _nlevels = DEFAULT_NLEVELS) : + winSize(_winSize), + blockSize(_blockSize), + blockStride(_blockStride), + cellSize(_cellSize), + nbins(_nbins), + derivAperture(_derivAperture), + winSigma(_winSigma), + histogramNormType(_histogramNormType), + L2HysThreshold(_L2HysThreshold), + gammaCorrection(_gammaCorrection), + nlevels(_nlevels) + {} + + // default destructor + virtual ~HOGDescriptor() {} + + size_t getDescriptorSize() const; + + bool checkDetectorSize() const; + + double getWinSigma() const; + + virtual void setSVMDetector(InputArray _svmdetector); + + virtual void compute( + const Mat& img, + CV_OUT vector& descriptors, + Size winStride = Size(), + Size padding = Size(), + const vector& locations = vector()) const; + + //with found weights output + virtual void detect( + const Mat& img, + CV_OUT vector& foundLocations, + CV_OUT vector& weights, + double hitThreshold = 0., + Size winStride = Size(), + Size padding = Size(), + const vector& searchLocations = vector()) const; + + //without found weights output + virtual void detect( + const Mat& img, + CV_OUT vector& foundLocations, + double hitThreshold = 0., + Size winStride = Size(), + Size padding = Size(), + const vector& searchLocations = vector()) const; + + //with result weights output + virtual void detectMultiScale( + const Mat& img, + CV_OUT vector& foundLocations, + CV_OUT vector& foundWeights, + double hitThreshold = 0, + Size winStride = Size(), + Size padding = Size(), + double scale = 1.05, + double finalThreshold = 2.0, + bool useMeanshiftGrouping = false) const; + + //without found weights output + virtual void detectMultiScale( + const Mat& img, + CV_OUT vector& foundLocations, + double hitThreshold = 0., + Size winStride = Size(), + Size padding = Size(), + double scale = 1.05, + double finalThreshold = 2.0, + bool useMeanshiftGrouping = false) const; + + virtual void computeGradient( + const Mat& img, + CV_OUT Mat& grad, + CV_OUT Mat& angleOfs, + Size paddingTL = Size(), + Size paddingBR = Size()) const; + + void groupRectangles( + vector& rectList, + vector& weights, + int groupThreshold, + double eps) const; + + Size winSize; + Size blockSize; + Size blockStride; + Size cellSize; + int nbins; + int derivAperture; + double winSigma; + int histogramNormType; + double L2HysThreshold; + bool gammaCorrection; + vector svmDetector; + int nlevels; +}; + +} /* modifiedcv */ + +#endif /* __MEDIA_VISION_HOGDETECTOR_H__ */ diff --git a/mv_surveillance/surveillance/include/SurveillanceHelper.h b/mv_surveillance/surveillance/include/SurveillanceHelper.h new file mode 100644 index 0000000..ffb6302 --- /dev/null +++ b/mv_surveillance/surveillance/include/SurveillanceHelper.h @@ -0,0 +1,70 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEDIA_VISION_SURVEILLANCE_HELPER_H__ +#define __MEDIA_VISION_SURVEILLANCE_HELPER_H__ + +/** + * @file SurveillanceHelper.h + * @brief This file contains interface for surveillance helper. + */ + +#include + +#include + +namespace mediavision { +namespace surveillance { + +/** + * @class SurveillanceHelper + * @brief This class contains surveillance helper interface (common class for + * surveillance module). + * + * @since_tizen 3.0 + */ +class SurveillanceHelper { +public: + /** + * @brief Converts mediavision source to cv::Mat in gray scale. + * + * @since_tizen 3.0 + * @param [in] mvSource The input media source handle + * @param [out] cvSource The outut matrix with gray scaled image + * @return @c 0 on success, otherwise a negative error value + */ + static int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource); + +#ifdef ENABLE_NEON + /** + * @brief Converts mediavision source to cv::Mat in gray scale with NEON. + * @details Works only with RGB color space + * + * @since_tizen 3.0 + * @param [in] mvSource The input media source handle + * @param [out] cvSource The outut matrix with gray scaled image + * @return @c 0 on success, otherwise a negative error value + */ + static int convertSourceMVRGB2GrayCVNeon(mv_source_h mvSource, cv::Mat& cvSource); +#endif + +}; + + +} /* surveillance */ +} /* mediavision */ + +#endif /* __MEDIA_VISION_SURVEILLANCE_HELPER_H__ */ diff --git a/mv_surveillance/surveillance/include/mv_absdiff.h b/mv_surveillance/surveillance/include/mv_absdiff.h new file mode 100644 index 0000000..1ad0a8a --- /dev/null +++ b/mv_surveillance/surveillance/include/mv_absdiff.h @@ -0,0 +1,56 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEDIA_VISION_MV_ABSDIFF_H__ +#define __MEDIA_VISION_MV_ABSDIFF_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @brief Absolute difference between two buffers. + * @details Works only with grayscale buffers. + * + * @since_tizen 3.0 + * @remarks If NEON is enabled (ENABLE_NEON flag), then @a width has to be + * multiple of 16 + * @param [in] src1 The first input buffer. + * @param [in] src2 The second input buffer. + * @param [in] width The ROI width. Must be the multiple of 16. + * @param [in] height The ROI height. + * @param [in] stride The stride. + * @param [out] dst The output buffer. + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + */ +int mv_absdiff( + uint8_t *__restrict__ src1, + uint8_t *__restrict__ src2, + int width, + int height, + int stride, + uint8_t *__restrict__ dst); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __MEDIA_VISION_MV_ABSDIFF_H__ */ + diff --git a/mv_surveillance/surveillance/include/mv_apply_mask.h b/mv_surveillance/surveillance/include/mv_apply_mask.h new file mode 100644 index 0000000..a639c03 --- /dev/null +++ b/mv_surveillance/surveillance/include/mv_apply_mask.h @@ -0,0 +1,56 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEDIA_VISION_MV_APPLY_MASK_H__ +#define __MEDIA_VISION_MV_APPLY_MASK_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @brief Applies a binary mask to the input buffer. + * @details Works only with grayscale buffers. + * + * @since_tizen 3.0 + * @remarks If NEON is enabled (ENABLE_NEON flag), then @a width has to be + * multiple of 16 + * @param [in] src_buffer The source buffer. + * @param [in] mask The mask, which should contain only values of 0 or 255. + * @param [in] width The image width. + * @param [in] height The image height. + * @param [in] stride The stride. + * @param [out] dst_buffer The destination buffer. + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + */ +int mv_apply_mask( + uint8_t *src_buffer, + uint8_t *__restrict mask, + int width, + int height, + int stride, + uint8_t *dst_buffer); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __MEDIA_VISION_MV_APPLY_MASK_H__ */ + diff --git a/mv_surveillance/surveillance/include/mv_mask_buffer.h b/mv_surveillance/surveillance/include/mv_mask_buffer.h new file mode 100644 index 0000000..abc690f --- /dev/null +++ b/mv_surveillance/surveillance/include/mv_mask_buffer.h @@ -0,0 +1,55 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEDIA_VISION_MV_MASK_BUFFER_H__ +#define __MEDIA_VISION_MV_MASK_BUFFER_H__ + +#include "mv_common.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @brief Gets mask buffer from buffer with known size. + * @details Mask buffer values: 0 ouside polygon and 255 inside polygon. + * + * @since_tizen 3.0 + * @param [in] buffer_width The buffer width + * @param [in] buffer_height The buffer height + * @param [in] polygon The array with polygon + * @param [in] points_number The size of array with polygon + * @param [out] mask_buffer The output mask buffer. mask_buffer size is + * the same as the buffer size in media source + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * + * @post Free memory for mask_buffer. + */ +int mv_get_mask_buffer( + unsigned int buffer_width, + unsigned int buffer_height, + mv_point_s *polygon, + unsigned int points_number, + unsigned char **mask_buffer); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __MEDIA_VISION_MV_MASK_BUFFER_H__ */ + diff --git a/mv_surveillance/surveillance/include/mv_surveillance_open.h b/mv_surveillance/surveillance/include/mv_surveillance_open.h new file mode 100644 index 0000000..e70c90b --- /dev/null +++ b/mv_surveillance/surveillance/include/mv_surveillance_open.h @@ -0,0 +1,194 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEDIA_VISION_SURVEILLANCE_OPEN_H__ +#define __MEDIA_VISION_SURVEILLANCE_OPEN_H__ + +#include "mv_surveillance.h" +#include "mv_surveillance_private.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @file mv_surveillance_open.h + * @brief This file contains the Media Vision surveillance API + */ + +/** + * @brief Allows to subscribe to the event and start calling @a callback + * each time when the @a source is pushed using + * @ref mv_surveillance_push_source_open() and event is detected. + * + * @since_tizen 3.0 + * @remarks To stop handling triggering use + * @ref mv_surveillance_unsubscribe_event_trigger_open(). + * @param [in] event_trigger The event trigger activating calls of the + * @a callback function + * @param [in] video_stream_id The identifier of the video stream for which + * event trigger activation will be checked + * @param [in] engine_cfg The engine configuration of the event + * @param [in] callback Callback to be called each time when event + * occurrence is detected + * @param [in] user_data The user data to be passed to the callback function + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @post @ref mv_surveillance_event_occurred_cb + * + * @see mv_surveillance_event_trigger_s + * @see mv_surveillance_unsubscribe_event_trigger_open() + */ +int mv_surveillance_subscribe_event_trigger_open( + mv_surveillance_event_trigger_h event_trigger, + int video_stream_id, + mv_engine_config_h engine_cfg, + mv_surveillance_event_occurred_cb callback, + void *user_data); + +/** + * @brief Allows to unsubscribe from the event and stop calling @a callback. + * + * @since_tizen 3.0 + * @remarks To start handling trigger activation use + @ref mv_surveillance_subscribe_event_trigger_open(). + * @param [in] event_trigger The event trigger for which subscription will + * be stopped + * @param [in] video_stream_id The identifier of the video source for which + * subscription will be stopped + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @post @ref mv_surveillance_event_occurred_cb + * + * @see mv_surveillance_event_trigger_s + * @see mv_surveillance_subscribe_event_trigger_open() + */ +int mv_surveillance_unsubscribe_event_trigger_open( + mv_surveillance_event_trigger_h event_trigger, + int video_stream_id); + +/** + * @brief Allows to push source to the event trigger and start calling @a callback. + * + * @since_tizen 3.0 + * @param [in] source The handle to the media source + * @param [in] video_stream_id The video stream, wthich will be updated + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @see mv_surveillance_event_trigger_s + * @see mv_surveillance_event_occurred_cb + * @see mv_surveillance_subscribe_event_trigger_open() + * @see mv_surveillance_unsubscribe_event_trigger_open() + */ +int mv_surveillance_push_source_open( + mv_source_h source, + int video_stream_id); + +/** + * @brief Starts traversing through list of supported event types. + * + * @since_tizen 3.0 + * @remarks Supported event types and their descriptions can be found in + * @ref CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES documentation + * section + * @param [in] callback The callback function to be called for each + * supported event type + * @param [in] user_data The user data to be passed to the @a callback + * function + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @see mv_surveillance_event_type_cb + * @see mv_surveillance_foreach_event_result_value_name_open() + */ +int mv_surveillance_foreach_event_type_open( + mv_surveillance_event_type_cb callback, + void *user_data); + +/** + * @brief Starts traversing through list of supported event result value names. + * + * @since_tizen 3.0 + * @remarks Supported event types, event result value names and their + * descriptions can be found in + * @ref CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES documentation + * section + * @param [in] event_type The name of the event type for which result value + * names will be passed to the @a callback. Can be + * set @c NULL. If set @c NULL then all supported + * event result value names will be traversed + * @param [in] callback The callback function to be called for each + * supported event result value name + * @param [in] user_data The user data to be passed to the @a callback + * function + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @see mv_surveillance_event_result_value_name_cb + * @see mv_surveillance_foreach_event_type_open() + * @see mv_surveillance_get_result_value_open() + */ +int mv_surveillance_foreach_event_result_value_name_open( + const char *event_type, + mv_surveillance_event_result_name_cb callback, + void *user_data); + +/** + * @brief Gets result value. + * @details See the output values names in the event types descriptions located + * in /usr/share/config/capi-media-vision/surveillance-event-types.txt. + * + * @since_tizen 3.0 + * @param [in] result The event result + * @param [in] value_name The name of the value to be gotten + * @param [in, out] value The pointer to variable which will be filled + * by result value + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @pre Memory for value has to be allocated + * + * @see mv_surveillance_event_trigger_s + * @see mv_surveillance_event_occurred_cb + * @see mv_surveillance_subscribe_event_trigger_open() + * @see mv_surveillance_unsubscribe_event_trigger_open() + * @see mv_surveillance_query_events_open() + */ +int mv_surveillance_get_result_value_open( + mv_surveillance_result_h result, + const char *value_name, + void *value); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __MEDIA_VISION_SURVEILLANCE_OPEN_H__ */ diff --git a/mv_surveillance/surveillance/src/EventManager.cpp b/mv_surveillance/surveillance/src/EventManager.cpp new file mode 100644 index 0000000..0a3a05b --- /dev/null +++ b/mv_surveillance/surveillance/src/EventManager.cpp @@ -0,0 +1,410 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "EventManager.h" + +#include "SurveillanceHelper.h" + +#include "EventTriggerPersonAppearance.h" +#include "EventTriggerPersonRecognition.h" +#include "EventTriggerMovementDetection.h" + +#include + +namespace mediavision { +namespace surveillance { + +static const int MAX_VALUE_NAME_LENGTH = 255; + +EventManager *EventManager::__pInstance = 0; +EventManagerDestroyer EventManager::Destroyer; +EventTypesMap EventManager::SupportedEventTypes; + +EventManagerDestroyer::~EventManagerDestroyer() +{ + delete __pInstance; +} + +void EventManagerDestroyer::initialize(EventManager* pointer) +{ + __pInstance = pointer; +} + +EventManager& EventManager::getInstance() +{ + if(!__pInstance) { + __pInstance = new EventManager(); + Destroyer.initialize(__pInstance); + setSupportedEventTypes(); + } + + return *__pInstance; +} + +void EventManager::setSupportedEventTypes() +{ + /* Add supported event types here */ + SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED] + .push_back(MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS); + SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED] + .push_back(MV_SURVEILLANCE_MOVEMENT_REGIONS); + + SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED] + .push_back(MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER); + SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED] + .push_back(MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER); + SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED] + .push_back(MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER); + SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED] + .push_back(MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS); + SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED] + .push_back(MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS); + SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED] + .push_back(MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS); + + SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED] + .push_back(MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER); + SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED] + .push_back(MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS); + SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED] + .push_back(MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS); + SupportedEventTypes[MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED] + .push_back(MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES); +} + +EventManager::EventManager() +{ + ; /* NULL */ +} + +EventManager::~EventManager() +{ + ; /* NULL */ +} + +int EventManager::registerEvent( + mv_surveillance_event_trigger_h eventTrigger, + long int triggerId, + const char *eventType, + int videoStreamId, + mv_engine_config_h engineCfg, + mv_surveillance_event_occurred_cb callback, + void *user_data, + int numberOfPoints, + mv_point_s *roi) +{ + if (NULL == callback || NULL == eventType) { + LOGE("Input event trigger or callback is NULL. Event registering failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + EventTriggersConstIter iter = __eventTriggers[videoStreamId].begin(); + + for (; iter != __eventTriggers[videoStreamId].end(); ++iter) { + if ((*iter)->isCallbackSubscribed(triggerId)) { + LOGE("Callback with id %d is already subscribed. " + "Event registering failed.", triggerId); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + } + + /* Add appropriate event trigger here */ + if (strncmp(eventType, MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED, + MAX_VALUE_NAME_LENGTH) == 0) { + EventTriggerPersonAppearance* trigger = + new EventTriggerPersonAppearance( + eventTrigger, + triggerId, + videoStreamId, + callback, + user_data, + numberOfPoints, + roi); + const int error = trigger->parseEngineConfig(engineCfg); + + if (error != MEDIA_VISION_ERROR_NONE) { + delete trigger; + + LOGE("Input engine configuration is wrong. Event registering failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + EventTriggersIter iter2 = isTriggerExists(trigger, videoStreamId); + + if (iter2 != __eventTriggers[videoStreamId].end()) { + (*iter2)->subscribeCallback( + eventTrigger, + triggerId, + callback, + user_data, + numberOfPoints, + roi); + + delete trigger; + } else { + __eventTriggers[videoStreamId].push_back(trigger); + } + } else if (strncmp(eventType, MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED, + MAX_VALUE_NAME_LENGTH) == 0) { + EventTriggerPersonRecognition* trigger = + new EventTriggerPersonRecognition( + eventTrigger, + triggerId, + videoStreamId, + callback, + user_data, + numberOfPoints, + roi); + + const int error = trigger->parseEngineConfig(engineCfg); + if (error != MEDIA_VISION_ERROR_NONE) { + delete trigger; + + LOGE("Input engine configuration is wrong. Event registering failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + EventTriggersIter iter2 = isTriggerExists(trigger, videoStreamId); + + if (iter2 != __eventTriggers[videoStreamId].end()) { + (*iter2)->subscribeCallback( + eventTrigger, + triggerId, + callback, + user_data, + numberOfPoints, + roi); + + delete trigger; + } else { + __eventTriggers[videoStreamId].push_back(trigger); + } + } else if (strncmp(eventType, MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, + MAX_VALUE_NAME_LENGTH) == 0) { + EventTriggerMovementDetection* trigger = + new EventTriggerMovementDetection( + eventTrigger, + triggerId, + videoStreamId, + callback, + user_data, + numberOfPoints, + roi); + + const int error = trigger->parseEngineConfig(engineCfg); + + if (error != MEDIA_VISION_ERROR_NONE) { + delete trigger; + + LOGE("Input engine configuration is wrong. Event registering failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + EventTriggersIter iter2 = isTriggerExists(trigger, videoStreamId); + + if (iter2 != __eventTriggers[videoStreamId].end()) { + (*iter2)->subscribeCallback( + eventTrigger, + triggerId, + callback, + user_data, + numberOfPoints, + roi); + + delete trigger; + } else { + __eventTriggers[videoStreamId].push_back(trigger); + } + } else { + LOGE("Input event trigger has wrong type. Event registering failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + return MEDIA_VISION_ERROR_NONE; +} + +int EventManager::unregisterEvent(long int triggerId, int videoStreamId) +{ + EventTriggersIter iter = __eventTriggers[videoStreamId].begin(); + + while (iter != __eventTriggers[videoStreamId].end()) { + if ((*iter)->unsubscribeCallback(triggerId)) { + if ((*iter)->isCallbacksEmpty()) { + delete *iter; + __eventTriggers[videoStreamId].erase(iter); + } + + return MEDIA_VISION_ERROR_NONE; + } + + ++iter; + } + + if (iter == __eventTriggers[videoStreamId].end()) { + LOGE("Event trigger doesn't exist. Event unregistering failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + return MEDIA_VISION_ERROR_NONE; +} + +int EventManager::pushSource(mv_source_h source, int videoStreamId) +{ + if (NULL == source) { + LOGE("Input source is NULL. Push source failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (__eventTriggers[videoStreamId].empty()) { + LOGE("There are no events yet. Push source failed."); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + unsigned int width = 0; + unsigned int height = 0; + mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; + + MEDIA_VISION_ASSERT(mv_source_get_width(source, &width), + "Failed to get the width."); + MEDIA_VISION_ASSERT(mv_source_get_height(source, &height), + "Failed to get the height."); + MEDIA_VISION_ASSERT(mv_source_get_colorspace(source, &colorspace), + "Failed to get the colorspace."); + + cv::Mat grayImage; + + int error = MEDIA_VISION_ERROR_NONE; + +#ifdef ENABLE_NEON + if (colorspace == MEDIA_VISION_COLORSPACE_RGB888 && (width * height % 8) == 0) + error = SurveillanceHelper::convertSourceMVRGB2GrayCVNeon(source, grayImage); + else +#endif /* ENABLE_NEON */ + error = SurveillanceHelper::convertSourceMV2GrayCV(source, grayImage); + + if (error != MEDIA_VISION_ERROR_NONE || grayImage.empty()) { + LOGE("Media source conversion failed."); + return error; + } + + mv_source_h graySource; + error = mv_create_source(&graySource); + if (MEDIA_VISION_ERROR_NONE != error) { + LOGE("Errors were occurred during source creating %i", error); + return error; + } + + error = mv_source_fill_by_buffer( + graySource, + grayImage.data, + grayImage.cols * grayImage.rows, + grayImage.cols, + grayImage.rows, + MEDIA_VISION_COLORSPACE_Y800); + + if (MEDIA_VISION_ERROR_NONE != error) { + mv_destroy_source(graySource); + LOGE("Errors were occurred during source filling %i", error); + return error; + } + + EventTriggersConstIter iter = __eventTriggers[videoStreamId].begin(); + + for (; iter != __eventTriggers[videoStreamId].end(); ++iter) { + error = (*iter)->pushSource(source, graySource, grayImage); + + if (error != MEDIA_VISION_ERROR_NONE) + LOGE("Push source failed for event ", (*iter)->getEventType().c_str()); + } + + error = mv_destroy_source(graySource); + + if (MEDIA_VISION_ERROR_NONE != error) + LOGE("Errors were occurred during gray source destroying %i", error); + + return MEDIA_VISION_ERROR_NONE; +} + +int EventManager::getSupportedEventTypes(StringVector& eventTypes) +{ + eventTypes.clear(); + + if (!__pInstance) + setSupportedEventTypes(); + + EventTypesMapConstIter etIter = SupportedEventTypes.begin(); + while (etIter != SupportedEventTypes.end()) { + eventTypes.push_back(etIter->first); + ++etIter; + } + + return MEDIA_VISION_ERROR_NONE; +} + +int EventManager::getSupportedEventResultValueNames( + StringVector& eventResValNames) +{ + eventResValNames.clear(); + + if (!__pInstance) + setSupportedEventTypes(); + + EventTypesMapConstIter etIter = SupportedEventTypes.begin(); + while (etIter != SupportedEventTypes.end()) { + eventResValNames.insert( + eventResValNames.end(), + etIter->second.begin(), + etIter->second.end()); + ++etIter; + } + + return MEDIA_VISION_ERROR_NONE; +} + +int EventManager::getSupportedEventResultValueNames( + const std::string& eventTypeName, + StringVector& eventResValNames) +{ + eventResValNames.clear(); + + if (!__pInstance) + setSupportedEventTypes(); + + EventTypesMapConstIter etIter = SupportedEventTypes.find(eventTypeName); + if (etIter == SupportedEventTypes.end()) + return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; + + eventResValNames = etIter->second; + + return MEDIA_VISION_ERROR_NONE; +} + +EventTriggersIter EventManager::isTriggerExists( + EventTrigger* trigger, + int videoStreamId) +{ + EventTriggersIter iter = __eventTriggers[videoStreamId].begin(); + + for (; iter != __eventTriggers[videoStreamId].end(); ++iter) + if (*(*iter) == *trigger) + return iter; + + return iter; +} + +} /* surveillance */ +} /* mediavision */ + diff --git a/mv_surveillance/surveillance/src/EventTrigger.cpp b/mv_surveillance/surveillance/src/EventTrigger.cpp new file mode 100644 index 0000000..d6b4cec --- /dev/null +++ b/mv_surveillance/surveillance/src/EventTrigger.cpp @@ -0,0 +1,197 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "EventTrigger.h" + +#include +#include +#include + +namespace mediavision { +namespace surveillance { + +long int EventTrigger::InternalTriggersCounter = -1l; + +EventTrigger::EventTrigger( + mv_surveillance_event_trigger_h eventTrigger, + long int triggerId, + int videoStreamId, + mv_surveillance_event_occurred_cb callback, + void *userData, + int numberOfPoints, + mv_point_s *roi): + __videoStreamId(videoStreamId), + __roi(numberOfPoints) +{ + CallbackData callbackData; + callbackData.eventTrigger = eventTrigger; + callbackData.callback = callback; + callbackData.userData = userData; + + __callbackDataMap.insert(CallbackDataPair(triggerId, callbackData)); + + for (int i = 0; i < numberOfPoints; ++i) + __roi[i] = roi[i]; +} + +EventTrigger::~EventTrigger() +{ + ; /* NULL */ +} + +int EventTrigger::getVideoStreamId() const +{ + return __videoStreamId; +} + +bool EventTrigger::isCallbackSubscribed(long int triggerId) const +{ + return __callbackDataMap.find(triggerId) != __callbackDataMap.end(); +} + +bool EventTrigger::subscribeCallback( + mv_surveillance_event_trigger_h eventTrigger, + long int triggerId, + mv_surveillance_event_occurred_cb callback, + void *userData, + int numberOfPoints, + mv_point_s *roi) +{ + if (isCallbackSubscribed(triggerId)) { + LOGE("Callback with id %d is already subscribed. " + "Callback subscribing failed.", triggerId); + return false; + } + + CallbackData callbackData; + callbackData.eventTrigger = eventTrigger; + callbackData.callback = callback; + callbackData.userData = userData; + + __callbackDataMap.insert(CallbackDataPair(triggerId, callbackData)); + + /* TODO: implement support of multiple ROI */ + __roi.clear(); + __roi.resize(numberOfPoints); + + for (int i = 0; i < numberOfPoints; ++i) + __roi[i] = roi[i]; + + return true; +} + +bool EventTrigger::unsubscribeCallback(long int triggerId) +{ + CallbackDataMapIter iter = __callbackDataMap.find(triggerId); + + if (iter == __callbackDataMap.end()) { + LOGE("Callback with id %d was not subscribed. " + "Callback unsubscribing failed.", triggerId); + return false; + } + + iter->second.callback = NULL; + iter->second.userData = NULL; + __callbackDataMap.erase(iter); + + return true; +} + +bool EventTrigger::isCallbacksEmpty() const +{ + return __callbackDataMap.empty(); +} + +int EventTrigger::applyROIToImage( + unsigned char *image, + int imageWidth, + int imageHeight, + bool scalePoints, + int scaleX, + int scaleY) +{ + const size_t roiSize = __roi.size(); + + if (roiSize >= 3) { + MVPoints scaledPoints = __roi; + + if (scalePoints) + for (size_t i = 0u; i < roiSize; ++i) { + scaledPoints[i].x /= scaleX; + scaledPoints[i].y /= scaleY; + } + + unsigned char *maskBuffer = NULL; + + int error = mv_get_mask_buffer( + imageWidth, + imageHeight, + scaledPoints.data(), + (int) roiSize, + &maskBuffer); + + if (error != MEDIA_VISION_ERROR_NONE || maskBuffer == NULL) { + if (maskBuffer != NULL) + delete maskBuffer; + + LOGE("Getting mask buffer failed."); + return error; + } + + error = mv_apply_mask( + image, + maskBuffer, + imageWidth / 16 * 16, + imageHeight, + imageWidth, + image); + + delete maskBuffer; + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Applying mask buffer failed."); + return error; + } + } + + return MEDIA_VISION_ERROR_NONE; +} + +bool EventTrigger::operator==(const EventTrigger& other) const +{ + const std::string currentEventType = this->getEventType(); + const std::string otherEventType = other.getEventType(); + + if (__videoStreamId != other.__videoStreamId || + currentEventType.compare(otherEventType) != 0 || + __roi.size() != other.__roi.size()) + return false; + + size_t size = __roi.size(); + for (size_t i = 0; i < size; ++i) + if (__roi[i].x != other.__roi[i].x || __roi[i].y != other.__roi[i].y) + return false; + + return true; +} + +bool EventTrigger::operator!=(const EventTrigger& other) const +{ + return !(*this == other); +} + +} /* surveillance */ +} /* mediavision */ diff --git a/mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp b/mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp new file mode 100644 index 0000000..cc6ee91 --- /dev/null +++ b/mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp @@ -0,0 +1,290 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "EventTriggerMovementDetection.h" + +#include "EventDefs.h" +#include "mv_absdiff.h" +#include "SurveillanceHelper.h" + +#include + +#include "opencv2/highgui/highgui.hpp" + +namespace mediavision { +namespace surveillance { + +static const int DEFAULT_DIFF_THRESHOLD = 10; + +static const int MAX_VALUE_NAME_LENGTH = 255; + +const cv::Mat EventTriggerMovementDetection::__ERODE_KERNEL = + cv::getStructuringElement(cv::MORPH_RECT, cv::Size(4, 4)); + +const cv::Mat EventTriggerMovementDetection::__DILATE_KERNEL = + cv::getStructuringElement(cv::MORPH_RECT, cv::Size(24, 24)); + +static const cv::Rect DEFAULT_RECT = cv::Rect(0, 0, 0, 0); + +namespace { + +inline void convertRectCV2MV(const cv::Rect& src, mv_rectangle_s& dst) +{ + dst.point.x = src.x; + dst.point.y = src.y; + dst.width = src.width; + dst.height = src.height; +} + +void mergeOverlappedRects(CVRectangles& rects) +{ + const size_t rectsSize = rects.size(); + + for (size_t i = 0; i < rectsSize; ++i) { + const int area1 = rects[i].area(); + + for (size_t j = i + 1; j < rectsSize; ++j) { + const int area2 = rects[j].area(); + const int intersectionArea = (rects[i] & rects[j]).area(); + + if (intersectionArea != 0 && + intersectionArea > std::min(area1, area2) / 2) { + rects[i] |= rects[j]; + rects[j] = DEFAULT_RECT; + } + } + } +} + +} /* anonymous namespace */ + +int EventResultMovementDetection::getResultValue( + const char *valueName, + void *value) const +{ + if (valueName == NULL) { + LOGE("Invalid pointer for value name. Getting result value failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (value == NULL) { + LOGE("Invalid pointer for value. Getting result value failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (strncmp(valueName, MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS, + MAX_VALUE_NAME_LENGTH) == 0) { + size_t *const numberOfDetectedMovements = (size_t*) value; + *numberOfDetectedMovements = __movementRegions.size(); + } else if (strncmp(valueName, MV_SURVEILLANCE_MOVEMENT_REGIONS, + MAX_VALUE_NAME_LENGTH) == 0) { + mv_rectangle_s *const movementsRegions = (mv_rectangle_s*) value; + + const size_t numberOfDetectedMovements = __movementRegions.size(); + + for (size_t i = 0u; i < numberOfDetectedMovements; ++i) { + movementsRegions[i] = __movementRegions[i]; + } + } else { + LOGE("This value name doesn't exist. Getting result value failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + return MEDIA_VISION_ERROR_NONE; +} + +EventTriggerMovementDetection::EventTriggerMovementDetection( + mv_surveillance_event_trigger_h eventTrigger, + long int triggerId, + int videoStreamId, + mv_surveillance_event_occurred_cb callback, + void *userData, + int numberOfPoints, + mv_point_s *roi) : EventTrigger( + eventTrigger, + triggerId, + videoStreamId, + callback, + userData, + numberOfPoints, + roi), + __previousImage(), + __eventResult(new EventResultMovementDetection()), + __diffThreshold(DEFAULT_DIFF_THRESHOLD) +{ + ; /* NULL */ +} + +EventTriggerMovementDetection::~EventTriggerMovementDetection() +{ + delete __eventResult; +} + +int EventTriggerMovementDetection::parseEngineConfig(mv_engine_config_h engineConfig) +{ + if (NULL == engineConfig) { + LOGI("Default value for movement detection threshold was set."); + return MEDIA_VISION_ERROR_NONE; + } + + const int error = mv_engine_config_get_int_attribute( + engineConfig, + MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD, + &__diffThreshold); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Getting movement detection threshold from engine configuration failed."); + return error; + } + + return MEDIA_VISION_ERROR_NONE; +} + +int EventTriggerMovementDetection::pushSource( + mv_source_h source, + mv_source_h graySource, + const cv::Mat& grayImage) +{ + if (source == NULL || graySource == NULL || grayImage.empty()) { + LOGE("Media source is NULL. Pushing source failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + __eventResult->__movementRegions.clear(); + + int error = MEDIA_VISION_ERROR_NONE; + + if (__previousImage.empty()) { + __previousImage = grayImage.clone(); + + LOGI("Previous media source is empty. Push next source."); + return error; + } + + cv::Mat image = grayImage.clone(); + + const int bufSize = image.cols * image.rows * sizeof(uint8_t); + uint8_t *diffBuffer = (uint8_t*) malloc(bufSize * sizeof(uint8_t)); + memset(diffBuffer, 0, bufSize); + + error = mv_absdiff( + image.data, + __previousImage.data, + image.cols, + image.rows, + image.cols, + diffBuffer); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Absolute difference calculation failed. Pushing source failed."); + return error; + } + + error = applyROIToImage(diffBuffer, image.cols, image.rows); + + if (error != MEDIA_VISION_ERROR_NONE || image.empty()) { + LOGE("Applying ROI failed with error %d.", error); + return error; + } + + cv::Mat imgDiff = cv::Mat(cv::Size(image.cols, image.rows), + CV_8UC1, diffBuffer); + + cv::erode(imgDiff, imgDiff, __ERODE_KERNEL); + cv::dilate(imgDiff, imgDiff, __DILATE_KERNEL); + + cv::threshold(imgDiff, imgDiff, __diffThreshold, 255, CV_THRESH_BINARY); + + Contours contours; + + cv::findContours(imgDiff, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); + + free(diffBuffer); + + const size_t contoursSize = contours.size(); + CVRectangles rects(contoursSize); + + for (size_t i = 0u; i < contoursSize; ++i) + rects[i] = cv::boundingRect(cv::Mat(contours[i])); + + mergeOverlappedRects(rects); + + const size_t roiSize = __roi.size(); + CVPoints roi(roiSize); + + cv::Rect roiRect(0, 0, imgDiff.cols, imgDiff.rows); + + if (roiSize >= 3u) { + for (size_t i = 0u; i < roiSize; ++i) + roi[i] = cv::Point(__roi[i].x, __roi[i].y); + + roiRect = cv::boundingRect(roi); + } + + const size_t rectsSize = rects.size(); + for (size_t i = 0u; i < rectsSize; ++i) + if (rects[i] != DEFAULT_RECT && + roiRect.contains(rects[i].tl()) && + roiRect.contains(rects[i].br())) { + mv_rectangle_s rectMV; + convertRectCV2MV(rects[i], rectMV); + + __eventResult->__movementRegions.push_back(rectMV); + } + + __previousImage = image; + __eventResult->__grayImage = __previousImage; + + // Don't invoke the callback if movement wasn't detected at the frame + if (__eventResult->__movementRegions.size() > 0) { + CallbackDataMapConstIter iter = __callbackDataMap.begin(); + + for (; iter != __callbackDataMap.end(); ++iter) { + mv_surveillance_event_occurred_cb callback = iter->second.callback; + callback( + iter->second.eventTrigger, + source, + __videoStreamId, + __eventResult, + iter->second.userData); + } + } + + return MEDIA_VISION_ERROR_NONE; +} + +std::string EventTriggerMovementDetection::getEventType() const +{ + return MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED; +} + +bool EventTriggerMovementDetection::operator==(const EventTriggerMovementDetection& other) const +{ + if (EventTrigger::operator !=(other)) + return false; + + /* TODO: compare private values if necessary */ + + return true; +} + +bool EventTriggerMovementDetection::operator!=(const EventTriggerMovementDetection& other) const +{ + return !(*this == other); +} + +} /* surveillance */ +} /* mediavision */ diff --git a/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp b/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp new file mode 100644 index 0000000..ca14d45 --- /dev/null +++ b/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp @@ -0,0 +1,460 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "EventTriggerPersonAppearance.h" + +#include "EventManager.h" +#include "FaceDetector.h" +#include "SurveillanceHelper.h" +#include "EventTriggerMovementDetection.h" + +#include "opencv2/opencv.hpp" +#include "opencv2/highgui/highgui.hpp" + +#include + +#include +#include + +namespace mediavision { +namespace surveillance { + +using namespace cv; + +static const int MAX_VALUE_NAME_LENGHT = 255; + +static const int DEFAULT_SKIP_FRAMES_COUNT = 6; + +static const int DEFAULT_FRAME_WIDTH = 640; + +static const int DEFAULT_FRAME_HEIGHT = 480; + +static const cv::Size DEFAULT_FRAME_SIZE(DEFAULT_FRAME_WIDTH, DEFAULT_FRAME_HEIGHT); + +static const cv::Rect ALL_IMAGE_RECT(0, 0, DEFAULT_FRAME_WIDTH, DEFAULT_FRAME_HEIGHT); + +static const cv::Size DEFAULT_DETECTION_STEPS = cv::Size(8, 8); + +static const std::vector DEFAULT_SVM_PEOPLE_DETECTOR = + cv::HOGDescriptor::getDefaultPeopleDetector(); + +namespace { + +inline void convertRectMV2CV(const mv_rectangle_s& src, cv::Rect& dst) +{ + dst.x = src.point.x; + dst.y = src.point.y; + dst.width = src.width; + dst.height = src.height; +} + +inline void convertRectCV2MV(const cv::Rect& src, mv_rectangle_s& dst) +{ + dst.point.x = src.x; + dst.point.y = src.y; + dst.width = src.width; + dst.height = src.height; +} + +} /* Anonymous namespace*/ + +int EventResultPersonAppearance::getResultValue(const char *valueName, + void *value) const +{ + if (valueName == NULL) { + LOGE("Invalid pointer for value name. Getting result value failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (value == NULL) { + LOGE("Invalid pointer for value. Getting result value failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER, + MAX_VALUE_NAME_LENGHT) == 0) { + size_t * const numberOfAppearedPersons = (size_t*) value; + *numberOfAppearedPersons = __appearedLocations.size(); + } + else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS, + MAX_VALUE_NAME_LENGHT) == 0) { + mv_rectangle_s * const appearedLocations = (mv_rectangle_s*) value; + + const size_t numberOfAppearedPersons = __appearedLocations.size(); + + for (size_t i = 0u; i < numberOfAppearedPersons; ++i) + appearedLocations[i] = __appearedLocations[i]; + } + else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER, + MAX_VALUE_NAME_LENGHT) == 0) { + size_t * const numberOfTrackedPersons = (size_t*) value; + *numberOfTrackedPersons = __trackedLocations.size(); + } + else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS, + MAX_VALUE_NAME_LENGHT) == 0) { + mv_rectangle_s * const trackedLocations = (mv_rectangle_s*) value; + + const size_t numberOfTrackedPersons = __trackedLocations.size(); + + for (size_t i = 0u; i < numberOfTrackedPersons; ++i) + trackedLocations[i] = __trackedLocations[i]; + } + else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER, + MAX_VALUE_NAME_LENGHT) == 0) { + size_t * const numberOfDisappearedPersons = (size_t*) value; + *numberOfDisappearedPersons = __disappearedLocations.size(); + } + else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS, + MAX_VALUE_NAME_LENGHT) == 0) { + mv_rectangle_s * const disappearedLocations = (mv_rectangle_s*) value; + + const size_t numberOfDisappearedPersons = __disappearedLocations.size(); + + for (size_t i = 0u; i < numberOfDisappearedPersons; ++i) + disappearedLocations[i] = __disappearedLocations[i]; + } + else { + LOGE("This value name doesn't exist. Getting result value failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + return MEDIA_VISION_ERROR_NONE; +} + +EventTriggerPersonAppearance::EventTriggerPersonAppearance( + mv_surveillance_event_trigger_h eventTrigger, long int triggerId, + int videoStreamId, mv_surveillance_event_occurred_cb callback, + void *userData, int numberOfPoints, mv_point_s *roi) : + EventTrigger(eventTrigger, triggerId, videoStreamId, callback, userData, + numberOfPoints, roi), __skipFramesCount(DEFAULT_SKIP_FRAMES_COUNT), + __frameCounter(0), __movementDetectedEventId(InternalTriggersCounter--), + __factorX(1.f), __factorY(1.f), __rectToDetect(ALL_IMAGE_RECT), + __rectToDetectPrevious(ALL_IMAGE_RECT), __trackedRects(), + __appearedRects(), __disappearedRects(), __hogClassifier(), + __eventResult(new EventResultPersonAppearance()) +{ + __hogClassifier.setSVMDetector(DEFAULT_SVM_PEOPLE_DETECTOR); + + EventManager::getInstance().registerEvent( + NULL, __movementDetectedEventId, + MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, videoStreamId, + NULL, movementDetectedCB, this, numberOfPoints, roi); +} + +EventTriggerPersonAppearance::~EventTriggerPersonAppearance() +{ + EventManager::getInstance().unregisterEvent(__movementDetectedEventId, + __videoStreamId); + + delete __eventResult; +} + +int EventTriggerPersonAppearance::parseEngineConfig( + mv_engine_config_h engineConfig) +{ + if (NULL == engineConfig) { + LOGI("Default value for frame skip count was set."); + return MEDIA_VISION_ERROR_NONE; + } + + const int error = mv_engine_config_get_int_attribute(engineConfig, + MV_SURVEILLANCE_SKIP_FRAMES_COUNT, &__skipFramesCount); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Getting frame skip count from engine configuration failed."); + return error; + } + + return MEDIA_VISION_ERROR_NONE; +} + +int EventTriggerPersonAppearance::pushSource(mv_source_h source, + mv_source_h graySource, const cv::Mat& grayImage) +{ + if (source == NULL || graySource == NULL || grayImage.empty()) { + LOGE("Media source is NULL. Pushing source failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + return MEDIA_VISION_ERROR_NONE; +} + +std::string EventTriggerPersonAppearance::getEventType() const +{ + return MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED; +} + +bool EventTriggerPersonAppearance::operator==( + const EventTriggerPersonAppearance& other) const +{ + if (EventTrigger::operator !=(other)) + return false; + + /* TODO: compare private values if necessary */ + + return true; +} + +bool EventTriggerPersonAppearance::operator!=( + const EventTriggerPersonAppearance& other) const +{ + return !(*this == other); +} + +void EventTriggerPersonAppearance::movementDetectedCB( + mv_surveillance_event_trigger_h /*event_trigger*/, mv_source_h source, + int /*video_stream_id*/, mv_surveillance_result_h event_result, + void *user_data) +{ + EventTriggerPersonAppearance *trigger = + (EventTriggerPersonAppearance*) user_data; + + /* 1. Get input image in grayscale and resize it */ + EventResultMovementDetection *result = + static_cast(event_result); + + cv::Mat resizedImage; + cv::resize(result->__grayImage, resizedImage, DEFAULT_FRAME_SIZE); + + int error = trigger->applyROIToImage(resizedImage.data, resizedImage.cols, + resizedImage.rows, true, DEFAULT_FRAME_WIDTH, DEFAULT_FRAME_HEIGHT); + + if (error != MEDIA_VISION_ERROR_NONE || resizedImage.empty()) { + trigger->runCallbacks(source); + LOGE("Applying ROI failed with error %d.", error); + return; + } + + trigger->__factorX = (float) DEFAULT_FRAME_WIDTH / result->__grayImage.cols; + trigger->__factorY = (float) DEFAULT_FRAME_HEIGHT / result->__grayImage.rows; + + /* 2. Get detected movement regions */ + const size_t numberOfMovementRegions = result->__movementRegions.size(); + CVRectangles movementRegions(numberOfMovementRegions); + + for (size_t i = 0u; i < numberOfMovementRegions; ++i) { + convertRectMV2CV(result->__movementRegions[i], movementRegions[i]); + movementRegions[i].x *= trigger->__factorX; + movementRegions[i].y *= trigger->__factorY; + movementRegions[i].width *= trigger->__factorX; + movementRegions[i].height *= trigger->__factorY; + } + + /* 3. Calculate rectangle where person will be detect */ + if (movementRegions.empty()) { + trigger->__rectToDetect = ALL_IMAGE_RECT; + } else { + trigger->__rectToDetect = movementRegions[0]; + + for (size_t j = 1u; j < numberOfMovementRegions; ++j) + trigger->__rectToDetect |= movementRegions[j]; + + if (trigger->__rectToDetect.width + < trigger->__hogClassifier.winSize.width + || trigger->__rectToDetect.height + < trigger->__hogClassifier.winSize.height) + trigger->__rectToDetect |= trigger->__rectToDetectPrevious; + } + + trigger->__rectToDetect &= ALL_IMAGE_RECT; + + /* 4. Perform Hog detector or try to track using movement regions */ + if ((trigger->__skipFramesCount == 0 || + trigger->__frameCounter % trigger->__skipFramesCount == 0) && + (trigger->__rectToDetect != ALL_IMAGE_RECT)) { + /* 4.1 Perform Hog detector */ + TrackedRectanglesConstIter iter = trigger->__trackedRects.begin(); + for (; iter != trigger->__trackedRects.end(); ++iter) + trigger->__rectToDetect |= iter->rect; + + // Slightly extend detection area... + const int xShift = .25f * trigger->__rectToDetect.width; + const int yShift = .25f * trigger->__rectToDetect.height; + trigger->__rectToDetect.x -= xShift / 2; + trigger->__rectToDetect.y -= yShift / 2; + trigger->__rectToDetect.width += xShift; + trigger->__rectToDetect.height += yShift; + trigger->__rectToDetect &= ALL_IMAGE_RECT; + // and fit it to the HOG cell size + const int xRest = trigger->__rectToDetect.width % 8; + const int yRest = trigger->__rectToDetect.height % 8; + trigger->__rectToDetect.x += xRest / 2; + trigger->__rectToDetect.y += yRest / 2; + trigger->__rectToDetect.width -= xRest; + trigger->__rectToDetect.height -= yRest; + + CVRectangles hogRects; + + trigger->__hogClassifier.detectMultiScale( + resizedImage(trigger->__rectToDetect), hogRects, 0, + DEFAULT_DETECTION_STEPS, cv::Size(32, 32), 1.059, 2); + + const size_t hogRectsSize = hogRects.size(); + + for (size_t i = 0u; i < hogRectsSize; ++i) { + hogRects[i].x += trigger->__rectToDetect.x; + hogRects[i].y += trigger->__rectToDetect.y; + } + + std::vector trackChecks(hogRectsSize, false); + TrackedRectanglesIter trackRectIter = trigger->__trackedRects.begin(); + for (; trackRectIter != trigger->__trackedRects.end(); + ++trackRectIter) { + size_t bestArea = 0; + size_t bestIdx = 0; + for (size_t idx = 0u; idx < hogRectsSize; ++idx) { + if (trackChecks[idx]) + continue; + const size_t curArea = + (hogRects[idx] & trackRectIter->rect).area(); + if (bestArea < curArea) { + bestArea = curArea; + bestIdx = idx; + } + } + if (bestArea > 10) { + trackChecks[bestIdx] = true; + trackRectIter->rect = hogRects[bestIdx]; + } + } + + trigger->__appearedRects.clear(); + for (size_t idx = 0u; idx < hogRectsSize; ++idx) + if (!trackChecks[idx]) + trigger->__appearedRects.push_back(hogRects[idx]); + } + else { + /* 4.2 Try to track */ + CVRectanglesConstIter appearedIter = trigger->__appearedRects.begin(); + for (; appearedIter != trigger->__appearedRects.end(); ++appearedIter) + trigger->__trackedRects.push_back( + TrackedRectangle(*appearedIter, 7)); + + trigger->__appearedRects.clear(); + + TrackedRectanglesIter iter = trigger->__trackedRects.begin(); + while (iter != trigger->__trackedRects.end()) { + bool tracked = false; + + for (size_t j = 0u; j < numberOfMovementRegions; ++j) { + cv::Rect rect = iter->rect; + if ((rect & movementRegions[j]).area() != 0 && + movementRegions[j].area() <= 3 * rect.area() / 2) { + cv::Rect r1 = rect | movementRegions[j]; + const int dx = r1.width - rect.width; + const int dy = r1.height - rect.height; + + if (r1.x < movementRegions[j].x) + r1.x += dx; + else if (r1.x > movementRegions[j].x) + r1.x -= dx; + + if (r1.y < movementRegions[j].y) + r1.y += dy; + else if (r1.y > movementRegions[j].y) + r1.y -= dy; + + r1.height = rect.height; + r1.width = rect.width; + + iter->rect = r1; + + tracked = true; + } + } + + if (tracked) + ++iter; + else { + if (iter->framesCount == 0) { + trigger->__disappearedRects.push_back(iter->rect); + iter = trigger->__trackedRects.erase(iter); + } + else { + --(iter->framesCount); + ++iter; + } + } + } + } + + trigger->__rectToDetectPrevious = trigger->__rectToDetect; + ++trigger->__frameCounter; + + /* 5. Update event result and run callbacks */ + trigger->runCallbacks(source); + + trigger->__disappearedRects.clear(); +} + +void EventTriggerPersonAppearance::runCallbacks(mv_source_h source) +{ + __eventResult->__appearedLocations.clear(); + __eventResult->__disappearedLocations.clear(); + __eventResult->__trackedLocations.clear(); + + const size_t appearedLocationsSize = __appearedRects.size(); + __eventResult->__appearedLocations.resize(appearedLocationsSize); + + for (size_t i = 0u; i < appearedLocationsSize; ++i) { + convertRectCV2MV(__appearedRects[i], + __eventResult->__appearedLocations[i]); + __eventResult->__appearedLocations[i].point.x /= __factorX; + __eventResult->__appearedLocations[i].point.y /= __factorY; + __eventResult->__appearedLocations[i].width /= __factorX; + __eventResult->__appearedLocations[i].height /= __factorY; + } + + const size_t disappearedLocationsSize = __disappearedRects.size(); + __eventResult->__disappearedLocations.resize(disappearedLocationsSize); + + for (size_t i = 0u; i < disappearedLocationsSize; ++i) { + convertRectCV2MV(__disappearedRects[i], + __eventResult->__disappearedLocations[i]); + __eventResult->__disappearedLocations[i].point.x /= __factorX; + __eventResult->__disappearedLocations[i].point.y /= __factorY; + __eventResult->__disappearedLocations[i].width /= __factorX; + __eventResult->__disappearedLocations[i].height /= __factorY; + } + + const size_t trackedLocationsSize = __trackedRects.size(); + __eventResult->__trackedLocations.resize(trackedLocationsSize); + + TrackedRectanglesConstIter trackedIter = __trackedRects.begin(); + for (size_t i = 0u; i < trackedLocationsSize; ++i, ++trackedIter) { + convertRectCV2MV(trackedIter->rect, + __eventResult->__trackedLocations[i]); + __eventResult->__trackedLocations[i].point.x /= __factorX; + __eventResult->__trackedLocations[i].point.y /= __factorY; + __eventResult->__trackedLocations[i].width /= __factorX; + __eventResult->__trackedLocations[i].height /= __factorY; + } + + // Don't invoke the callback if no appearance, disappearance or tracking + if (appearedLocationsSize > 0 || disappearedLocationsSize > 0 + || trackedLocationsSize > 0) { + CallbackDataMapConstIter iter = __callbackDataMap.begin(); + + for (; iter != __callbackDataMap.end(); ++iter) { + mv_surveillance_event_occurred_cb callback = iter->second.callback; + callback(iter->second.eventTrigger, source, __videoStreamId, + __eventResult, iter->second.userData); + } + } +} + +} /* surveillance */ +} /* mediavision */ diff --git a/mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp b/mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp new file mode 100644 index 0000000..7b28709 --- /dev/null +++ b/mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp @@ -0,0 +1,397 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "EventTriggerPersonRecognition.h" +#include "EventManager.h" + +#include +#include +#include + +#include + +namespace mediavision { +namespace surveillance { + +static const int MAX_VALUE_NAME_LENGHT = 255; + +namespace { + +template +std::string numberToString(T Number) +{ + std::ostringstream ss; + ss << Number; + return ss.str(); +} + +} /* Anonymous namespace*/ + +int EventResultPersonRecognition::getResultValue( + const char *valueName, + void *value) const +{ + if (valueName == NULL) { + LOGE("Invalid pointer for value name. Getting result value failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (value == NULL) { + LOGE("Invalid pointer for value. Getting result value failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + const size_t numberOfPersons = __locations.size(); + + if (strncmp(valueName, + MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER, + MAX_VALUE_NAME_LENGHT) == 0) { + size_t *outNumberOfPersons = (size_t*) value; + *outNumberOfPersons = numberOfPersons; + } else if (strncmp(valueName, + MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS, + MAX_VALUE_NAME_LENGHT) == 0) { + mv_rectangle_s *locations = (mv_rectangle_s*) value; + + for (size_t i = 0; i < numberOfPersons; ++i) + locations[i] = __locations[i]; + } else if (strncmp(valueName, + MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS, + MAX_VALUE_NAME_LENGHT) == 0) { + int *labels = (int*) value; + + for (size_t i = 0; i < numberOfPersons; ++i) + labels[i] = __faceLabels[i]; + } else if (strncmp(valueName, + MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES, + MAX_VALUE_NAME_LENGHT) == 0) { + double *confidences = (double*) value; + + for (size_t i = 0; i < numberOfPersons; ++i) + confidences[i] = __confidences[i]; + } else { + LOGE("This value name doesn't exist. Getting result value failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + return MEDIA_VISION_ERROR_NONE; +} + +EventTriggerPersonRecognition::EventTriggerPersonRecognition( + mv_surveillance_event_trigger_h eventTrigger, + long int triggerId, + int videoStreamId, + mv_surveillance_event_occurred_cb callback, + void *userData, + int numberOfPoints, + mv_point_s *roi) : EventTrigger(eventTrigger, + triggerId, + videoStreamId, + callback, + userData, + numberOfPoints, + roi), + __faceRecognitionModel(NULL), + __lastFrame(NULL), + __eventResult(new EventResultPersonRecognition()) +{ + ; /* NULL */ +} + +EventTriggerPersonRecognition::~EventTriggerPersonRecognition() +{ + if (NULL != __faceRecognitionModel) { + const int err = mv_face_recognition_model_destroy(__faceRecognitionModel); + if (MEDIA_VISION_ERROR_NONE != err) + LOGE("Error while trying to delete face recognition model when " + "event trigger had been destroyed. Error code: %i.", err); + } + + delete __eventResult; +} + +int EventTriggerPersonRecognition::parseEngineConfig(mv_engine_config_h engineConfig) +{ + if (engineConfig == NULL) { + LOGE("Engine configuration is NULL. Parsing failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + char *modelPath = NULL; + + int error = mv_engine_config_get_string_attribute( + engineConfig, + MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH, + &modelPath); + + if (error != MEDIA_VISION_ERROR_NONE) { + if (modelPath != NULL) + delete[] modelPath; + + LOGE("Getting recognition model from engine configuration failed."); + + return error; + } + + mv_face_recognition_model_h recognitionModel = NULL; + + error = mv_face_recognition_model_load(modelPath, &recognitionModel); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Loading recognition model from file %s failed.", + modelPath); + + if (modelPath != NULL) + delete[] modelPath; + + return error; + } + + if (NULL != __faceRecognitionModel) { + error = mv_face_recognition_model_destroy(__faceRecognitionModel); + if (MEDIA_VISION_ERROR_NONE != error) { + LOGE("Error while trying to delete old face recognition model when " + "new model is trying to be loaded. Error code: %i.", error); + } + } + + __faceRecognitionModel = recognitionModel; + + if (NULL == __faceRecognitionModel) { + LOGE("Failed to load face recognition model. Check %s attribute of the " + "engine config.", MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH); + + if (modelPath != NULL) + delete[] modelPath; + + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (modelPath != NULL) + delete[] modelPath; + + return MEDIA_VISION_ERROR_NONE; +} + +int EventTriggerPersonRecognition::pushSource( + mv_source_h source, + mv_source_h graySource, + const cv::Mat& grayImage) +{ + if (source == NULL || graySource == NULL || grayImage.empty()) { + LOGE("Media source is NULL. Pushing source failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + __lastFrame = source; + + __eventResult->__locations.clear(); + __eventResult->__faceLabels.clear(); + __eventResult->__confidences.clear(); + + unsigned char *data_buffer = NULL; + unsigned int buffer_size = 0; + mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; + unsigned int width = 0; + unsigned int height = 0; + + int error = mv_source_get_buffer(graySource, &data_buffer, &buffer_size); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Operation with media source failed with error %d.", error); + return error; + } + + error = mv_source_get_colorspace(graySource, &colorspace); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Operation with media source failed with error %d.", error); + return error; + } + + error = mv_source_get_width(graySource, &width); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Operation with media source failed with error %d.", error); + return error; + } + + error = mv_source_get_height(graySource, &height); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Operation with media source failed with error %d.", error); + return error; + } + + if (buffer_size != width * height) + { + // Unexcepted behaviour + LOGE("Grayscale source interpretation failed."); + return MEDIA_VISION_ERROR_INTERNAL; + } + + mv_source_h sourceCopy = NULL; + + error = mv_create_source(&sourceCopy); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Operation with media source failed with error %d.", error); + return error; + } + + error = mv_source_fill_by_buffer(sourceCopy, data_buffer, buffer_size, + width, height, colorspace); + + if (error != MEDIA_VISION_ERROR_NONE) { + mv_destroy_source(sourceCopy); + LOGE("Operation with media source failed with error %d.", error); + return error; + } + + error = mv_source_get_buffer(sourceCopy, &data_buffer, &buffer_size); + + if (error != MEDIA_VISION_ERROR_NONE) { + mv_destroy_source(sourceCopy); + LOGE("Operation with media source failed with error %d.", error); + return error; + } + + error = applyROIToImage(data_buffer, width, height); + + if (error != MEDIA_VISION_ERROR_NONE) { + mv_destroy_source(sourceCopy); + LOGE("Applying ROI failed with error %d.", error); + return error; + } + + error = mv_face_detect(sourceCopy, NULL, faceDetectedCB, this); + + if (MEDIA_VISION_ERROR_NONE != error) { + mv_destroy_source(sourceCopy); + LOGE("Errors were occurred during face detecting %i", error); + return error; + } + + error = mv_destroy_source(sourceCopy); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Operation with media source failed with error %d.", error); + return error; + } + + return error; +} + +std::string EventTriggerPersonRecognition::getEventType() const +{ + return MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED; +} + +bool EventTriggerPersonRecognition::operator==(const EventTriggerPersonRecognition& other) const +{ + if (EventTrigger::operator !=(other)) + return false; + + /* TODO: compare private values if necessary */ + + return true; +} + +bool EventTriggerPersonRecognition::operator!=(const EventTriggerPersonRecognition& other) const +{ + return !(*this == other); +} + +void EventTriggerPersonRecognition::setEventResults( + mv_rectangle_s faceLocation, + int faceLabel, + double confidence) +{ + __eventResult->__locations.push_back(faceLocation); + __eventResult->__faceLabels.push_back(faceLabel); + __eventResult->__confidences.push_back(confidence); +} + +void EventTriggerPersonRecognition::faceDetectedCB( + mv_source_h source, + mv_engine_config_h /*engine_cfg*/, + mv_rectangle_s *faces_locations, + int number_of_faces, + void *user_data) +{ + if (NULL == user_data) { + LOGE("Invalid user data passed"); + return; + } + + EventTriggerPersonRecognition *recognitionTrigger = + (EventTriggerPersonRecognition*)user_data; + + int location_idx = 0; + for (; location_idx < number_of_faces; ++location_idx) { + LOGI("Start surveillance face recognition"); + + const int error = mv_face_recognize( + source, + recognitionTrigger->__faceRecognitionModel, + NULL, + &faces_locations[location_idx], + faceRecognizedCB, + recognitionTrigger); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGW("Face recognition for one model failed. Continue"); + continue; + } + + LOGI("Face has been successfully recognized"); + } +} + +void EventTriggerPersonRecognition::faceRecognizedCB( + mv_source_h source, + mv_face_recognition_model_h /*recognition_model*/, + mv_engine_config_h /*engine_cfg*/, + mv_rectangle_s *face_location, + const int *face_label, + double confidence, + void *user_data) +{ + if (face_location == NULL || face_label == NULL) { + LOGI("Face wasn't recognized"); + return; + } + + EventTriggerPersonRecognition *trigger = + (EventTriggerPersonRecognition*) user_data; + + trigger->setEventResults(*face_location, *face_label, confidence); + + CallbackDataMapConstIter iter = trigger->__callbackDataMap.begin(); + + for (; iter != trigger->__callbackDataMap.end(); ++iter) { + mv_surveillance_event_occurred_cb callback = iter->second.callback; + callback( + iter->second.eventTrigger, + trigger->__lastFrame, + trigger->__videoStreamId, + trigger->__eventResult, + iter->second.userData); + } +} + +} /* surveillance */ +} /* mediavision */ diff --git a/mv_surveillance/surveillance/src/HoGDetector.cpp b/mv_surveillance/surveillance/src/HoGDetector.cpp new file mode 100644 index 0000000..4d1ea0c --- /dev/null +++ b/mv_surveillance/surveillance/src/HoGDetector.cpp @@ -0,0 +1,1006 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include +#include "HoGDetector.h" +#include "opencv2/imgproc/imgproc.hpp" +#include + +#ifdef ENABLE_NEON +#include +#endif + +#ifdef ENABLE_OMP +#include +#define NCORES 4 +static int coreids[NCORES] = {1, 2, 3, 4}; +#endif + +/****************************************************************************************\ + The code below is implementation of HOG (Histogram-of-Oriented Gradients) + descriptor and object detection, introduced by Navneet Dalal and Bill Triggs. + + The computed feature vectors are compatible with the + INRIA Object Detection and Localization Toolkit + (http://pascal.inrialpes.fr/soft/olt/) +\****************************************************************************************/ + +namespace modifiedcv { + +class ParallelLoopBodyWrapper { +public: + ParallelLoopBodyWrapper(const cv::ParallelLoopBody& _body, const cv::Range& _r) { + body = &_body; + wholeRange = _r; + nstripes = cvRound(wholeRange.end - wholeRange.start); + } + void operator()(const cv::Range& sr) const { + cv::Range r; + r.start = (int)(wholeRange.start + + ((uint64)sr.start*(wholeRange.end - wholeRange.start) + nstripes/2)/nstripes); + r.end = sr.end >= nstripes ? wholeRange.end : (int)(wholeRange.start + + ((uint64)sr.end*(wholeRange.end - wholeRange.start) + nstripes/2)/nstripes); + (*body)(r); + } + cv::Range stripeRange() const { + return cv::Range(0, nstripes); + } + +protected: + const cv::ParallelLoopBody* body; + cv::Range wholeRange; + int nstripes; +}; + +void parallel_for_(const cv::Range& range, const cv::ParallelLoopBody& body) +{ +#if defined ENABLE_OMP + ParallelLoopBodyWrapper pbody(body, range); + cv::Range stripeRange = pbody.stripeRange(); + int i = 0; + #pragma omp parallel for private(i) num_threads(NCORES) + for (i = stripeRange.start; i < stripeRange.end; ++i) { + cpu_set_t mask; + CPU_ZERO(&mask); + CPU_SET(coreids[i % 4], &mask); + + if (sched_setaffinity (0, sizeof(mask), &mask) == -1) { + printf("Could not set CPU Affinity, continuing..."); + } + + pbody(Range(i, i + 1)); + } +#else + cv::parallel_for_(range, body); +#endif +} + +size_t HOGDescriptor::getDescriptorSize() const +{ + return (size_t)nbins* + (blockSize.width/cellSize.width)* + (blockSize.height/cellSize.height)* + ((winSize.width - blockSize.width)/blockStride.width + 1)* + ((winSize.height - blockSize.height)/blockStride.height + 1); +} + +double HOGDescriptor::getWinSigma() const +{ + return winSigma >= 0 ? winSigma : (blockSize.width + blockSize.height)/8.; +} + +bool HOGDescriptor::checkDetectorSize() const +{ + size_t detectorSize = svmDetector.size(), descriptorSize = getDescriptorSize(); + return detectorSize == 0 || + detectorSize == descriptorSize || + detectorSize == descriptorSize + 1; +} + +void HOGDescriptor::setSVMDetector(InputArray _svmDetector) +{ + _svmDetector.getMat().convertTo(svmDetector, CV_32F); + CV_Assert(checkDetectorSize()); +} + +void HOGDescriptor::computeGradient(const Mat& img, Mat& grad, Mat& qangle, + Size paddingTL, Size paddingBR) const +{ + CV_Assert(img.type() == CV_8U); + + Size gradsize(img.cols + paddingTL.width + paddingBR.width, + img.rows + paddingTL.height + paddingBR.height); + grad.create(gradsize, CV_32FC2); /* */ + qangle.create(gradsize, CV_8UC2); /* [0..nbins-1] - quantized gradient orientation */ + Size wholeSize; + Point roiofs; + img.locateROI(wholeSize, roiofs); + + int i, x, y; + /* int cn = img.channels(); */ + + Mat_ _lut(1, 256); + const float* lut = &_lut(0, 0); + + if ( gammaCorrection ) + for ( i = 0; i < 256; i++ ) + _lut(0, i) = std::sqrt((float)i); + else + for ( i = 0; i < 256; i++ ) + _lut(0, i) = (float)i; + + AutoBuffer mapbuf(gradsize.width + gradsize.height + 4); + int* xmap = (int*)mapbuf + 1; + int* ymap = xmap + gradsize.width + 2; + + const int borderType = (int)cv::BORDER_REFLECT_101; + + for ( x = -1; x < gradsize.width + 1; x++ ) + xmap[x] = cv::borderInterpolate(x - paddingTL.width + roiofs.x, + wholeSize.width, borderType) - roiofs.x; + for ( y = -1; y < gradsize.height + 1; y++ ) + ymap[y] = cv::borderInterpolate(y - paddingTL.height + roiofs.y, + wholeSize.height, borderType) - roiofs.y; + + /* x- & y- derivatives for the whole row */ + int width = gradsize.width; + AutoBuffer _dbuf(width*4); + float* dbuf = _dbuf; + Mat Dx(1, width, CV_32F, dbuf); + Mat Dy(1, width, CV_32F, dbuf + width); + Mat Mag(1, width, CV_32F, dbuf + width*2); + Mat Angle(1, width, CV_32F, dbuf + width*3); + + int _nbins = nbins; + float angleScale = (float)(_nbins/CV_PI); + + for ( y = 0; y < gradsize.height; y++ ) { + const uchar* imgPtr = img.data + img.step*ymap[y]; + const uchar* prevPtr = img.data + img.step*ymap[y-1]; + const uchar* nextPtr = img.data + img.step*ymap[y+1]; + float* gradPtr = (float*)grad.ptr(y); + uchar* qanglePtr = (uchar*)qangle.ptr(y); + + for (x = 0; x < width; x++) { + int x1 = xmap[x]; + dbuf[x] = (float)(lut[imgPtr[xmap[x+1]]] - lut[imgPtr[xmap[x-1]]]); + dbuf[width + x] = (float)(lut[nextPtr[x1]] - lut[prevPtr[x1]]); + } + + cartToPolar(Dx, Dy, Mag, Angle, false); + + for (x = 0; x < width; x++) { + float mag = dbuf[x+width*2], angle = dbuf[x+width*3]*angleScale - 0.5f; + int hidx = cvFloor(angle); + angle -= hidx; + gradPtr[x*2] = mag*(1.f - angle); + gradPtr[x*2+1] = mag*angle; + + if ( hidx < 0 ) + hidx += _nbins; + else if ( hidx >= _nbins ) + hidx -= _nbins; + assert((unsigned)hidx < (unsigned)_nbins); + + qanglePtr[x*2] = (uchar)hidx; + hidx++; + hidx &= hidx < _nbins ? -1 : 0; + qanglePtr[x*2+1] = (uchar)hidx; + } + } +} + + +struct HOGCache { + struct BlockData { + BlockData() : histOfs(0), imgOffset() {} + int histOfs; + Point imgOffset; + }; + + struct PixData { + size_t gradOfs, qangleOfs; + int histOfs[4]; + float histWeights[4]; + float gradWeight; + }; + + HOGCache(); + HOGCache(const HOGDescriptor* descriptor, + const Mat& img, Size paddingTL, Size paddingBR, + bool useCache, Size cacheStride); + virtual ~HOGCache() {}; + virtual void init(const HOGDescriptor* descriptor, + const Mat& img, Size paddingTL, Size paddingBR, + bool useCache, Size cacheStride); + + Size windowsInImage(Size imageSize, Size winStride) const; + Rect getWindow(Size imageSize, Size winStride, int idx) const; + + const float* getBlock(Point pt, float* buf); + virtual void normalizeBlockHistogram(float* histogram) const; + + vector pixData; + vector blockData; + + bool useCache; + vector ymaxCached; + Size winSize, cacheStride; + Size nblocks, ncells; + int blockHistogramSize; + int count1, count2, count4; + Point imgoffset; + Mat_ blockCache; + Mat_ blockCacheFlags; + + Mat grad, qangle; + const HOGDescriptor* descriptor; +}; + + +HOGCache::HOGCache() +{ + useCache = false; + blockHistogramSize = count1 = count2 = count4 = 0; + descriptor = 0; +} + +HOGCache::HOGCache(const HOGDescriptor* _descriptor, + const Mat& _img, Size _paddingTL, Size _paddingBR, + bool _useCache, Size _cacheStride) +{ + init(_descriptor, _img, _paddingTL, _paddingBR, _useCache, _cacheStride); +} + +void HOGCache::init(const HOGDescriptor* _descriptor, + const Mat& _img, Size _paddingTL, Size _paddingBR, + bool _useCache, Size _cacheStride) +{ + descriptor = _descriptor; + cacheStride = _cacheStride; + useCache = _useCache; + + descriptor->computeGradient(_img, grad, qangle, _paddingTL, _paddingBR); + imgoffset = _paddingTL; + + winSize = descriptor->winSize; + Size blockSize = descriptor->blockSize; + Size blockStride = descriptor->blockStride; + Size cellSize = descriptor->cellSize; + int i, j, nbins = descriptor->nbins; + int rawBlockSize = blockSize.width*blockSize.height; + + nblocks = Size((winSize.width - blockSize.width)/blockStride.width + 1, + (winSize.height - blockSize.height)/blockStride.height + 1); + ncells = Size(blockSize.width/cellSize.width, blockSize.height/cellSize.height); + blockHistogramSize = ncells.width*ncells.height*nbins; + + if ( useCache ) { + Size cacheSize((grad.cols - blockSize.width)/cacheStride.width+1, + (winSize.height/cacheStride.height)+1); + blockCache.create(cacheSize.height, cacheSize.width*blockHistogramSize); + blockCacheFlags.create(cacheSize); + size_t cacheRows = blockCache.rows; + ymaxCached.resize(cacheRows); + for (size_t ii = 0; ii < cacheRows; ii++ ) + ymaxCached[ii] = -1; + } + + Mat_ weights(blockSize); + float sigma = (float)descriptor->getWinSigma(); + float scale = 1.f/(sigma*sigma*2); + + float blockHalfHeight = blockSize.height*0.5f; + float blockHalfWidth = blockSize.width*0.5f; + for (i = 0; i < blockSize.height; i++) + for (j = 0; j < blockSize.width; j++) { + float di = i - blockHalfHeight; + float dj = j - blockHalfWidth; + weights(i, j) = std::exp(-(di*di + dj*dj)*scale); + } + + blockData.resize(nblocks.width*nblocks.height); + pixData.resize(rawBlockSize*3); + + /* + * Initialize 2 lookup tables, pixData & blockData. + * Here is why: + * + * The detection algorithm runs in 4 nested loops (at each pyramid layer): + * loop over the windows within the input image + * loop over the blocks within each window + * loop over the cells within each block + * loop over the pixels in each cell + * + * As each of the loops runs over a 2-dimensional array, + * we could get 8(!) nested loops in total, which is very-very slow. + * + * To speed the things up, we do the following: + * 1. loop over windows is unrolled in the HOGDescriptor::{compute|detect} methods; + * inside we compute the current search window using getWindow() method. + * Yes, it involves some overhead (function call + couple of divisions), + * but it's tiny in fact. + * 2. loop over the blocks is also unrolled. Inside we use pre-computed blockData[j] + * to set up gradient and histogram pointers. + * 3. loops over cells and pixels in each cell are merged + * (since there is no overlap between cells, each pixel in the block is processed once) + * and also unrolled. Inside we use PixData[k] to access the gradient values and + * update the histogram + */ + + count1 = count2 = count4 = 0; + for ( j = 0; j < blockSize.width; j++ ) + for ( i = 0; i < blockSize.height; i++ ) { + PixData* data = 0; + float cellX = (j+0.5f)/cellSize.width - 0.5f; + float cellY = (i+0.5f)/cellSize.height - 0.5f; + int icellX0 = cvFloor(cellX); + int icellY0 = cvFloor(cellY); + int icellX1 = icellX0 + 1, icellY1 = icellY0 + 1; + cellX -= icellX0; + cellY -= icellY0; + + if ( (unsigned)icellX0 < (unsigned)ncells.width && + (unsigned)icellX1 < (unsigned)ncells.width ) { + if ( (unsigned)icellY0 < (unsigned)ncells.height && + (unsigned)icellY1 < (unsigned)ncells.height ) { + data = &pixData[rawBlockSize*2 + (count4++)]; + data->histOfs[0] = (icellX0*ncells.height + icellY0)*nbins; + data->histWeights[0] = (1.f - cellX)*(1.f - cellY); + data->histOfs[1] = (icellX1*ncells.height + icellY0)*nbins; + data->histWeights[1] = cellX*(1.f - cellY); + data->histOfs[2] = (icellX0*ncells.height + icellY1)*nbins; + data->histWeights[2] = (1.f - cellX)*cellY; + data->histOfs[3] = (icellX1*ncells.height + icellY1)*nbins; + data->histWeights[3] = cellX*cellY; + } else { + data = &pixData[rawBlockSize + (count2++)]; + if ( (unsigned)icellY0 < (unsigned)ncells.height ) { + icellY1 = icellY0; + cellY = 1.f - cellY; + } + data->histOfs[0] = (icellX0*ncells.height + icellY1)*nbins; + data->histWeights[0] = (1.f - cellX)*cellY; + data->histOfs[1] = (icellX1*ncells.height + icellY1)*nbins; + data->histWeights[1] = cellX*cellY; + data->histOfs[2] = data->histOfs[3] = 0; + data->histWeights[2] = data->histWeights[3] = 0; + } + } else { + if ( (unsigned)icellX0 < (unsigned)ncells.width ) { + icellX1 = icellX0; + cellX = 1.f - cellX; + } + + if ( (unsigned)icellY0 < (unsigned)ncells.height && + (unsigned)icellY1 < (unsigned)ncells.height ) { + data = &pixData[rawBlockSize + (count2++)]; + data->histOfs[0] = (icellX1*ncells.height + icellY0)*nbins; + data->histWeights[0] = cellX*(1.f - cellY); + data->histOfs[1] = (icellX1*ncells.height + icellY1)*nbins; + data->histWeights[1] = cellX*cellY; + data->histOfs[2] = data->histOfs[3] = 0; + data->histWeights[2] = data->histWeights[3] = 0; + } else { + data = &pixData[count1++]; + if ( (unsigned)icellY0 < (unsigned)ncells.height ) { + icellY1 = icellY0; + cellY = 1.f - cellY; + } + data->histOfs[0] = (icellX1*ncells.height + icellY1)*nbins; + data->histWeights[0] = cellX*cellY; + data->histOfs[1] = data->histOfs[2] = data->histOfs[3] = 0; + data->histWeights[1] = data->histWeights[2] = data->histWeights[3] = 0; + } + } + data->gradOfs = (grad.cols*i + j)*2; + data->qangleOfs = (qangle.cols*i + j)*2; + data->gradWeight = weights(i, j); + } + + assert(count1 + count2 + count4 == rawBlockSize); + /* defragment pixData */ + for ( j = 0; j < count2; j++ ) + pixData[j + count1] = pixData[j + rawBlockSize]; + for ( j = 0; j < count4; j++ ) + pixData[j + count1 + count2] = pixData[j + rawBlockSize*2]; + count2 += count1; + count4 += count2; + + /* initialize blockData */ + for ( j = 0; j < nblocks.width; j++ ) + for ( i = 0; i < nblocks.height; i++ ) { + BlockData& data = blockData[j*nblocks.height + i]; + data.histOfs = (j*nblocks.height + i)*blockHistogramSize; + data.imgOffset = Point(j*blockStride.width, i*blockStride.height); + } +} + + +const float* HOGCache::getBlock(Point pt, float* buf) +{ + float* blockHist = buf; + assert(descriptor != 0); + + Size blockSize = descriptor->blockSize; + pt += imgoffset; + + CV_Assert( (unsigned)pt.x <= (unsigned)(grad.cols - blockSize.width) && + (unsigned)pt.y <= (unsigned)(grad.rows - blockSize.height) ); + + if ( useCache ) { + CV_Assert(pt.x % cacheStride.width == 0 && + pt.y % cacheStride.height == 0); + Point cacheIdx(pt.x/cacheStride.width, + (pt.y/cacheStride.height) % blockCache.rows); + if ( pt.y != ymaxCached[cacheIdx.y] ) { + Mat_ cacheRow = blockCacheFlags.row(cacheIdx.y); + cacheRow = (uchar)0; + ymaxCached[cacheIdx.y] = pt.y; + } + + blockHist = &blockCache[cacheIdx.y][cacheIdx.x*blockHistogramSize]; + uchar& computedFlag = blockCacheFlags(cacheIdx.y, cacheIdx.x); + if ( computedFlag != 0 ) + return blockHist; + computedFlag = (uchar)1; /* set it at once, before actual computing */ + } + + int k, C1 = count1, C2 = count2, C4 = count4; + const float* gradPtr = (const float*)(grad.data + grad.step*pt.y) + pt.x*2; + const uchar* qanglePtr = qangle.data + qangle.step*pt.y + pt.x*2; + + CV_Assert(blockHist != 0); + for ( k = 0; k < blockHistogramSize; k++ ) + blockHist[k] = 0.f; + + const PixData* _pixData = &pixData[0]; + + for ( k = 0; k < C1; k++ ) { + const PixData& pk = _pixData[k]; + const float* a = gradPtr + pk.gradOfs; + float w = pk.gradWeight*pk.histWeights[0]; + const uchar* h = qanglePtr + pk.qangleOfs; + int h0 = h[0], h1 = h[1]; + float* hist = blockHist + pk.histOfs[0]; + float t0 = hist[h0] + a[0]*w; + float t1 = hist[h1] + a[1]*w; + hist[h0] = t0; + hist[h1] = t1; + } + + for ( ; k < C2; k++ ) { + const PixData& pk = _pixData[k]; + const float* a = gradPtr + pk.gradOfs; + float w, t0, t1, a0 = a[0], a1 = a[1]; + const uchar* h = qanglePtr + pk.qangleOfs; + int h0 = h[0], h1 = h[1]; + + float* hist = blockHist + pk.histOfs[0]; + w = pk.gradWeight*pk.histWeights[0]; + t0 = hist[h0] + a0*w; + t1 = hist[h1] + a1*w; + hist[h0] = t0; + hist[h1] = t1; + + hist = blockHist + pk.histOfs[1]; + w = pk.gradWeight*pk.histWeights[1]; + t0 = hist[h0] + a0*w; + t1 = hist[h1] + a1*w; + hist[h0] = t0; + hist[h1] = t1; + } + + for ( ; k < C4; k++ ) { + const PixData& pk = _pixData[k]; + const float* a = gradPtr + pk.gradOfs; + float w, t0, t1, a0 = a[0], a1 = a[1]; + const uchar* h = qanglePtr + pk.qangleOfs; + int h0 = h[0], h1 = h[1]; + + float* hist = blockHist + pk.histOfs[0]; + w = pk.gradWeight*pk.histWeights[0]; + t0 = hist[h0] + a0*w; + t1 = hist[h1] + a1*w; + hist[h0] = t0; + hist[h1] = t1; + + hist = blockHist + pk.histOfs[1]; + w = pk.gradWeight*pk.histWeights[1]; + t0 = hist[h0] + a0*w; + t1 = hist[h1] + a1*w; + hist[h0] = t0; + hist[h1] = t1; + + hist = blockHist + pk.histOfs[2]; + w = pk.gradWeight*pk.histWeights[2]; + t0 = hist[h0] + a0*w; + t1 = hist[h1] + a1*w; + hist[h0] = t0; + hist[h1] = t1; + + hist = blockHist + pk.histOfs[3]; + w = pk.gradWeight*pk.histWeights[3]; + t0 = hist[h0] + a0*w; + t1 = hist[h1] + a1*w; + hist[h0] = t0; + hist[h1] = t1; + } + + normalizeBlockHistogram(blockHist); + + return blockHist; +} + +void HOGCache::normalizeBlockHistogram(float* _hist) const +{ +#ifdef ENABLE_NEON + /* NEON vector for loading the histogram to the memory */ + float32x4_t hist_v; + /* Initialize the accumulator for summation storing */ + float32x4_t acc = vdupq_n_f32(0.f); +#endif + + /* Histogram pointer in the memory */ + float *hist_ptr = &_hist[0]; + /* Variable to store values of summations */ + float sum = 0.f; + size_t sz = blockHistogramSize; + +#ifdef ENABLE_NEON + for (; sz != 0u; sz -= 4u) { + hist_v = vld1q_f32(hist_ptr); + acc = vmlaq_f32(acc, hist_v, hist_v); + hist_ptr += 4; + } + + sum += vgetq_lane_f32(acc, 0) + vgetq_lane_f32(acc, 1) + + vgetq_lane_f32(acc, 2) + vgetq_lane_f32(acc, 3); + + /* Reset accumulator */ + acc = vdupq_n_f32(0.f); + + sz = blockHistogramSize; + hist_ptr = &_hist[0]; +#else + for (size_t i = 0; i < sz; ++i) + sum += hist_ptr[i] * hist_ptr[i]; +#endif + + float scale = 1.f / (std::sqrt(sum) + sz * 0.1f); + sum = 0.f; + +#ifdef ENABLE_NEON + float32x4_t thres_v = vdupq_n_f32((float)descriptor->L2HysThreshold); + + for (; sz != 0; sz -= 4) { + /* Find minimal value among threshold and histogram value, accumulate + * this value squared */ + hist_v = vminq_f32(vmulq_n_f32(vld1q_f32(hist_ptr), scale), thres_v); + acc = vmlaq_f32(acc, hist_v, hist_v); + /* Update histograms in memory according with found min values */ + vst1q_f32(hist_ptr, hist_v); + hist_ptr += 4; + } + + sum += vgetq_lane_f32(acc, 0) + vgetq_lane_f32(acc, 1) + + vgetq_lane_f32(acc, 2) + vgetq_lane_f32(acc, 3); + +#else + float thresh = (float)descriptor->L2HysThreshold; + for (size_t i = 0; i < sz; ++i) { + hist_ptr[i] = std::min(hist_ptr[i] * scale, thresh); + sum += hist_ptr[i] * hist_ptr[i]; + } +#endif + + scale = 1.f / (std::sqrt(sum) + 1e-3f); + +#ifdef ENABLE_NEON + sz = blockHistogramSize; + hist_ptr = &_hist[0]; + + /* Scale histogram (normalize): */ + for (; sz != 0; sz -= 4) { + vst1q_f32(hist_ptr, vmulq_n_f32(vld1q_f32(hist_ptr), scale)); + hist_ptr += 4; + } +#else + for (size_t i = 0; i < sz; i++ ) + hist_ptr[i] *= scale; +#endif +} + + +Size HOGCache::windowsInImage(Size imageSize, Size winStride) const +{ + return Size((imageSize.width - winSize.width)/winStride.width + 1, + (imageSize.height - winSize.height)/winStride.height + 1); +} + +Rect HOGCache::getWindow(Size imageSize, Size winStride, int idx) const +{ + int nwindowsX = (imageSize.width - winSize.width)/winStride.width + 1; + int y = idx / nwindowsX; + int x = idx - nwindowsX*y; + return Rect( x*winStride.width, y*winStride.height, winSize.width, winSize.height ); +} + + +void HOGDescriptor::compute(const Mat& img, vector& descriptors, + Size winStride, Size padding, + const vector& locations) const +{ + if ( winStride == Size() ) + winStride = cellSize; + Size cacheStride(gcd(winStride.width, blockStride.width), + gcd(winStride.height, blockStride.height)); + size_t nwindows = locations.size(); + padding.width = (int)alignSize(std::max(padding.width, 0), cacheStride.width); + padding.height = (int)alignSize(std::max(padding.height, 0), cacheStride.height); + Size paddedImgSize(img.cols + padding.width*2, img.rows + padding.height*2); + + HOGCache cache(this, img, padding, padding, nwindows == 0, cacheStride); + + if ( !nwindows ) + nwindows = cache.windowsInImage(paddedImgSize, winStride).area(); + + const HOGCache::BlockData* blockData = &cache.blockData[0]; + + int nblocks = cache.nblocks.area(); + int blockHistogramSize = cache.blockHistogramSize; + size_t dsize = getDescriptorSize(); + descriptors.resize(dsize*nwindows); + + for ( size_t i = 0; i < nwindows; i++ ) { + float* descriptor = &descriptors[i*dsize]; + + Point pt0; + if ( !locations.empty() ) { + pt0 = locations[i]; + if ( pt0.x < -padding.width || pt0.x > img.cols + padding.width - winSize.width || + pt0.y < -padding.height || pt0.y > img.rows + padding.height - winSize.height ) + continue; + } else { + pt0 = cache.getWindow(paddedImgSize, winStride, (int)i).tl() - Point(padding); + CV_Assert(pt0.x % cacheStride.width == 0 && pt0.y % cacheStride.height == 0); + } + + for ( int j = 0; j < nblocks; j++ ) { + const HOGCache::BlockData& bj = blockData[j]; + Point pt = pt0 + bj.imgOffset; + + float* dst = descriptor + bj.histOfs; + const float* src = cache.getBlock(pt, dst); + if ( src != dst ) + for ( int k = 0; k < blockHistogramSize; k++ ) + dst[k] = src[k]; + } + } +} + + +void HOGDescriptor::detect(const Mat& img, + vector& hits, vector& weights, double hitThreshold, + Size winStride, Size padding, const vector& locations) const +{ + hits.clear(); + if ( svmDetector.empty() ) + return; + + if ( winStride == Size() ) + winStride = cellSize; + Size cacheStride(gcd(winStride.width, blockStride.width), + gcd(winStride.height, blockStride.height)); + size_t nwindows = locations.size(); + padding.width = (int)alignSize(std::max(padding.width, 0), cacheStride.width); + padding.height = (int)alignSize(std::max(padding.height, 0), cacheStride.height); + Size paddedImgSize(img.cols + padding.width*2, img.rows + padding.height*2); + + HOGCache cache(this, img, padding, padding, nwindows == 0, cacheStride); + + if ( !nwindows ) + nwindows = cache.windowsInImage(paddedImgSize, winStride).area(); + + const HOGCache::BlockData* blockData = &cache.blockData[0]; + + int nblocks = cache.nblocks.area(); + int blockHistogramSize = cache.blockHistogramSize; + size_t dsize = getDescriptorSize(); + + double rho = svmDetector.size() > dsize ? svmDetector[dsize] : 0; + vector blockHist(blockHistogramSize); + + for ( size_t i = 0; i < nwindows; i++ ) { + Point pt0; + if ( !locations.empty() ) { + pt0 = locations[i]; + if ( pt0.x < -padding.width || pt0.x > img.cols + padding.width - winSize.width || + pt0.y < -padding.height || pt0.y > img.rows + padding.height - winSize.height ) + continue; + } else { + pt0 = cache.getWindow(paddedImgSize, winStride, (int)i).tl() - Point(padding); + CV_Assert(pt0.x % cacheStride.width == 0 && pt0.y % cacheStride.height == 0); + } + double s = rho; + const float* svmVec = &svmDetector[0]; + int j, k; + + for ( j = 0; j < nblocks; j++, svmVec += blockHistogramSize ) { + const HOGCache::BlockData& bj = blockData[j]; + Point pt = pt0 + bj.imgOffset; + + const float* vec = cache.getBlock(pt, &blockHist[0]); +#ifdef ENABLE_NEON + float32x4_t vec_v; /* NEON feature vector */ + float32x4_t svm_v; /* NEON SVM feature weights */ + float32x4_t acc = vdupq_n_f32(0.f); /* NEON partial sum */ + for ( k = 0; k <= blockHistogramSize - 4; k += 4 ) { + vec_v = vld1q_f32(vec + k); + svm_v = vld1q_f32(svmVec + k); + acc = vmlaq_f32(acc, vec_v, svm_v); + } + + s += vgetq_lane_f32(acc, 0) + vgetq_lane_f32(acc, 1) + + vgetq_lane_f32(acc, 2) + vgetq_lane_f32(acc, 3); + +#else + for ( k = 0; k <= blockHistogramSize - 4; k += 4 ) + s += vec[k]*svmVec[k] + vec[k+1]*svmVec[k+1] + + vec[k+2]*svmVec[k+2] + vec[k+3]*svmVec[k+3]; +#endif + for ( ; k < blockHistogramSize; k++ ) + s += vec[k]*svmVec[k]; + } + + if ( s >= hitThreshold ) { + hits.push_back(pt0); + weights.push_back(s); + } + } +} + +void HOGDescriptor::detect(const Mat& img, vector& hits, double hitThreshold, + Size winStride, Size padding, const vector& locations) const +{ + vector weightsV; + detect(img, hits, weightsV, hitThreshold, winStride, padding, locations); +} + +class HOGInvoker : public ParallelLoopBody { + public: + HOGInvoker(const HOGDescriptor* _hog, const Mat& _img, + double _hitThreshold, Size _winStride, Size _padding, + const double* _levelScale, std::vector * _vec, Mutex* _mtx, + std::vector* _weights = 0, std::vector* _scales = 0) { + hog = _hog; + img = _img; + hitThreshold = _hitThreshold; + winStride = _winStride; + padding = _padding; + levelScale = _levelScale; + vec = _vec; + weights = _weights; + scales = _scales; + mtx = _mtx; + } + + void operator()(const Range& range) const { + int i, i1 = range.start, i2 = range.end; + double minScale = i1 > 0 ? levelScale[i1] : i2 > 1 ? levelScale[i1+1] : std::max(img.cols, img.rows); + Size maxSz(cvCeil(img.cols/minScale), cvCeil(img.rows/minScale)); + Mat smallerImgBuf(maxSz, img.type()); + vector locations; + vector hitsWeights; + + Size wholeSize; + Point offset; + img.locateROI(wholeSize, offset); + + for ( i = i1; i < i2; i++ ) { + double scale = levelScale[i]; + Size sz(cvRound(img.cols/scale), cvRound(img.rows/scale)); + Mat smallerImg(sz, img.type(), smallerImgBuf.data); + if (sz == img.size()) + smallerImg = Mat(sz, img.type(), img.data, img.step); + else + resize(img, smallerImg, sz); + hog->detect(smallerImg, locations, hitsWeights, hitThreshold, winStride, padding); + + Size scaledWinSize = Size(cvRound(hog->winSize.width*scale), cvRound(hog->winSize.height*scale)); + + mtx->lock(); + for ( size_t j = 0; j < locations.size(); j++ ) { + vec->push_back(Rect(cvRound(locations[j].x*scale), + cvRound(locations[j].y*scale), + scaledWinSize.width, scaledWinSize.height)); + if (scales) { + scales->push_back(scale); + } + } + mtx->unlock(); + + if (weights && (!hitsWeights.empty())) { + mtx->lock(); + for (size_t j = 0; j < locations.size(); j++) { + weights->push_back(hitsWeights[j]); + } + mtx->unlock(); + } + } + } + + const HOGDescriptor* hog; + Mat img; + double hitThreshold; + Size winStride; + Size padding; + const double* levelScale; + std::vector* vec; + std::vector* weights; + std::vector* scales; + Mutex* mtx; +}; + + +void HOGDescriptor::detectMultiScale( + const Mat& img, vector& foundLocations, vector& foundWeights, + double hitThreshold, Size winStride, Size padding, + double scale0, double finalThreshold, bool useMeanshiftGrouping) const +{ + double scale = 1.; + int levels = 0; + + vector levelScale; + for ( levels = 0; levels < nlevels; levels++ ) { + levelScale.push_back(scale); + if ( cvRound(img.cols/scale) < winSize.width || + cvRound(img.rows/scale) < winSize.height || + scale0 <= 1 ) + break; + scale *= scale0; + } + levels = std::max(levels, 1); + levelScale.resize(levels); + + std::vector allCandidates; + std::vector tempScales; + std::vector tempWeights; + std::vector foundScales; + Mutex mtx; + + modifiedcv::parallel_for_(Range(0, (int)levelScale.size()), + HOGInvoker(this, img, hitThreshold, winStride, padding, &levelScale[0], &allCandidates, &mtx, &tempWeights, &tempScales)); + + std::copy(tempScales.begin(), tempScales.end(), back_inserter(foundScales)); + foundLocations.clear(); + std::copy(allCandidates.begin(), allCandidates.end(), back_inserter(foundLocations)); + foundWeights.clear(); + std::copy(tempWeights.begin(), tempWeights.end(), back_inserter(foundWeights)); + + if ( useMeanshiftGrouping ) { + groupRectangles_meanshift(foundLocations, foundWeights, foundScales, finalThreshold, winSize); + } else { + groupRectangles(foundLocations, foundWeights, (int)finalThreshold, 0.2); + } +} + +void HOGDescriptor::detectMultiScale(const Mat& img, vector& foundLocations, + double hitThreshold, Size winStride, Size padding, + double scale0, double finalThreshold, bool useMeanshiftGrouping) const +{ + vector foundWeights; + detectMultiScale(img, foundLocations, foundWeights, hitThreshold, winStride, + padding, scale0, finalThreshold, useMeanshiftGrouping); +} + +void HOGDescriptor::groupRectangles(vector& rectList, vector& weights, int groupThreshold, double eps) const +{ + if ( groupThreshold <= 0 || rectList.empty() ) { + return; + } + + CV_Assert(rectList.size() == weights.size()); + + vector labels; + int nclasses = partition(rectList, labels, SimilarRects(eps)); + + vector > rrects(nclasses); + vector numInClass(nclasses, 0); + vector foundWeights(nclasses, DBL_MIN); + int i, j, nlabels = (int)labels.size(); + + for ( i = 0; i < nlabels; i++ ) { + int cls = labels[i]; + rrects[cls].x += rectList[i].x; + rrects[cls].y += rectList[i].y; + rrects[cls].width += rectList[i].width; + rrects[cls].height += rectList[i].height; + foundWeights[cls] = max(foundWeights[cls], weights[i]); + numInClass[cls]++; + } + + for ( i = 0; i < nclasses; i++ ) { + /* find the average of all ROI in the cluster */ + cv::Rect_ r = rrects[i]; + double s = 1.0/numInClass[i]; + rrects[i] = cv::Rect_(cv::saturate_cast(r.x*s), + cv::saturate_cast(r.y*s), + cv::saturate_cast(r.width*s), + cv::saturate_cast(r.height*s)); + } + + rectList.clear(); + weights.clear(); + + for ( i = 0; i < nclasses; i++ ) { + cv::Rect r1 = rrects[i]; + int n1 = numInClass[i]; + double w1 = foundWeights[i]; + if ( n1 <= groupThreshold ) + continue; + /* filter out small rectangles inside large rectangles */ + for ( j = 0; j < nclasses; j++ ) { + int n2 = numInClass[j]; + + if ( j == i || n2 <= groupThreshold ) + continue; + + cv::Rect r2 = rrects[j]; + + int dx = cv::saturate_cast(r2.width * eps); + int dy = cv::saturate_cast(r2.height * eps); + + if ( r1.x >= r2.x - dx && + r1.y >= r2.y - dy && + r1.x + r1.width <= r2.x + r2.width + dx && + r1.y + r1.height <= r2.y + r2.height + dy && + (n2 > std::max(3, n1) || n1 < 3) ) + break; + } + + if ( j == nclasses ) { + rectList.push_back(r1); + weights.push_back(w1); + } + } +} +} diff --git a/mv_surveillance/surveillance/src/SurveillanceHelper.cpp b/mv_surveillance/surveillance/src/SurveillanceHelper.cpp new file mode 100644 index 0000000..bbd92e9 --- /dev/null +++ b/mv_surveillance/surveillance/src/SurveillanceHelper.cpp @@ -0,0 +1,200 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "SurveillanceHelper.h" + +#include + +#include "opencv2/highgui/highgui.hpp" + +namespace mediavision { +namespace surveillance { + +int SurveillanceHelper::convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource) +{ + MEDIA_VISION_INSTANCE_CHECK(mvSource); + + int depth = CV_8U; /* Default depth. 1 byte per channel. */ + unsigned int channelsNumber = 0; + unsigned int width = 0, height = 0; + unsigned int bufferSize = 0; + unsigned char *buffer = NULL; + + mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; + + MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width), + "Failed to get the width."); + MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height), + "Failed to get the height."); + MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace), + "Failed to get the colorspace."); + MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize), + "Failed to get the buffer size."); + + int conversionType = -1; /* Type of conversion from given colorspace to gray */ + switch(colorspace) { + case MEDIA_VISION_COLORSPACE_INVALID: + LOGE("Error: mv_source has invalid colorspace."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + case MEDIA_VISION_COLORSPACE_Y800: + channelsNumber = 1; + /* Without convertion */ + break; + case MEDIA_VISION_COLORSPACE_I420: + channelsNumber = 1; + height *= 1.5; + conversionType = CV_YUV2GRAY_I420; + break; + case MEDIA_VISION_COLORSPACE_NV12: + channelsNumber = 1; + height *= 1.5; + conversionType = CV_YUV2GRAY_NV12; + break; + case MEDIA_VISION_COLORSPACE_YV12: + channelsNumber = 1; + height *= 1.5; + conversionType = CV_YUV2GRAY_YV12; + break; + case MEDIA_VISION_COLORSPACE_NV21: + channelsNumber = 1; + height *= 1.5; + conversionType = CV_YUV2GRAY_NV21; + break; + case MEDIA_VISION_COLORSPACE_YUYV: + channelsNumber = 2; + conversionType = CV_YUV2GRAY_YUYV; + break; + case MEDIA_VISION_COLORSPACE_UYVY: + channelsNumber = 2; + conversionType = CV_YUV2GRAY_UYVY; + break; + case MEDIA_VISION_COLORSPACE_422P: + channelsNumber = 2; + conversionType = CV_YUV2GRAY_Y422; + break; + case MEDIA_VISION_COLORSPACE_RGB565: + channelsNumber = 2; + conversionType = CV_BGR5652GRAY; + break; + case MEDIA_VISION_COLORSPACE_RGB888: + channelsNumber = 3; + conversionType = CV_RGB2GRAY; + break; + case MEDIA_VISION_COLORSPACE_RGBA: + channelsNumber = 4; + conversionType = CV_RGBA2GRAY; + break; + default: + LOGE("Error: mv_source has unsupported colorspace."); + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } + + if (conversionType == -1) { /* Without conversion */ + cvSource = cv::Mat(cv::Size(width, height), + CV_MAKETYPE(depth, channelsNumber), buffer).clone(); + } else { /* Conversion */ + /* Class for representation the given image as cv::Mat before conversion */ + cv::Mat origin(cv::Size(width, height), + CV_MAKETYPE(depth, channelsNumber), buffer); + cv::cvtColor(origin, cvSource, conversionType); + } + + return MEDIA_VISION_ERROR_NONE; +} + +#ifdef ENABLE_NEON +int SurveillanceHelper::convertSourceMVRGB2GrayCVNeon( + mv_source_h mvSource, + cv::Mat& cvSource) +{ + MEDIA_VISION_INSTANCE_CHECK(mvSource); + + const int depth = CV_8U; /* Default depth. 1 byte per channel. */ + unsigned int width = 0, height = 0; + unsigned int bufferSize = 0; + unsigned char *src = NULL; + + mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; + + MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width), + "Failed to get the width."); + MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height), + "Failed to get the height."); + MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace), + "Failed to get the colorspace."); + MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &src, &bufferSize), + "Failed to get the buffer size."); + + if (colorspace != MEDIA_VISION_COLORSPACE_RGB888) { + LOGE("Error: mv_source has unsupported colorspace."); + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } + + cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(depth, 1)); + const unsigned int cvSourceSize = width * height; + +#if defined(__aarch64__) + asm volatile ("lsr %2, %2, #3 \n" + "# channel multimpliers: \n" + "mov w4, #28 \n" + "mov w5, #151 \n" + "mov w6, #77 \n" + "dup v3.8b, w4 \n" + "dup v4.8b, w5 \n" + "dup v5.8b, w6 \n" + ".loop: \n" + "# load 8 pixels: \n" + "ld3 {v0.8b,v1.8b,v2.8b}, [%0],#24 \n" + "# conversion: \n" + "umull v7.8h, v0.8b, v3.8b \n" + "umlal v7.8h, v1.8b, v4.8b \n" + "umlal v7.8h, v2.8b, v5.8b \n" + "# shift and store: \n" + "shrn v6.8b, v7.8h, #8 \n" + "st1 {v6.8b}, [%1],#8 \n" + "subs %2, %2, #1 \n" + "bne .loop \n"::"r" (src), "r" (cvSource.data), "r" (cvSourceSize) + :"memory", "w4", "w5", "w6"); +#else + asm volatile ("lsr %2, %2, #3 \n" + "# channel multimpliers: \n" + "mov r4, #77 \n" + "mov r5, #151 \n" + "mov r6, #28 \n" + "vdup.8 d3, r4 \n" + "vdup.8 d4, r5 \n" + "vdup.8 d5, r6 \n" + ".loop: \n" + "# load 8 pixels: \n" + "vld3.8 {d0-d2}, [%0]! \n" + "# conversion: \n" + "vmull.u8 q7, d0, d3 \n" + "vmlal.u8 q7, d1, d4 \n" + "vmlal.u8 q7, d2, d5 \n" + "# shift and store: \n" + "vshrn.u16 d6, q7, #8 \n" + "vst1.8 {d6}, [%1]! \n" + "subs %2, %2, #1 \n" + "bne .loop \n"::"r" (src), "r" (cvSource.data), "r" (cvSourceSize) + :"memory", "r4", "r5", "r6"); +#endif + + return MEDIA_VISION_ERROR_NONE; +} +#endif + +} /* surveillance */ +} /* mediavision */ diff --git a/mv_surveillance/surveillance/src/mv_absdiff.c b/mv_surveillance/surveillance/src/mv_absdiff.c new file mode 100644 index 0000000..9e122e6 --- /dev/null +++ b/mv_surveillance/surveillance/src/mv_absdiff.c @@ -0,0 +1,81 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mv_absdiff.h" + +#include "mv_common.h" +#include "mv_private.h" + +#ifdef ENABLE_NEON +#include +#endif + +int mv_absdiff( + uint8_t *__restrict__ src1, + uint8_t *__restrict__ src2, + int width, + int height, + int stride, + uint8_t *__restrict__ dst) +{ + if (src1 == NULL || src2 == NULL || width <= 0 || height <= 0 || + stride <= 0 || dst == NULL) { + LOGE("Wrong input parameter. Aplpying mask failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + int column = 0; + int row = 0; + const int padding = stride - width; + +#ifdef ENABLE_NEON + const int batch_size = 16; + const int batch_columns_count = width / batch_size; +#endif + + for (; row < height; ++row) { +#ifdef ENABLE_NEON + for (column = 0; column < batch_columns_count; ++column) { + uint8x16_t gray1 = vld1q_u8 (src1); + uint8x16_t gray2 = vld1q_u8 (src2); + + uint8x16_t dst_temp = vabdq_u8(gray1, gray2); + + vst1q_u8 (dst, dst_temp); + + src1 += batch_size; + src2 += batch_size; + dst += batch_size; + } +#else + for (column = 0; column < width; ++column) { + uint8_t gray1 = *src1; + uint8_t gray2 = *src2; + + (*dst) = abs((*src1) - (*src2)); + + ++src1; + ++src2; + ++dst; + } +#endif + src1 += padding; + src2 += padding; + dst += padding; + } + + return MEDIA_VISION_ERROR_NONE; +} diff --git a/mv_surveillance/surveillance/src/mv_apply_mask.c b/mv_surveillance/surveillance/src/mv_apply_mask.c new file mode 100644 index 0000000..f156d07 --- /dev/null +++ b/mv_surveillance/surveillance/src/mv_apply_mask.c @@ -0,0 +1,77 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mv_apply_mask.h" + +#include "mv_common.h" +#include "mv_private.h" + +#ifdef ENABLE_NEON +#include +#endif + +int mv_apply_mask( + uint8_t *src_buffer, + uint8_t *__restrict mask, + int width, + int height, + int stride, + uint8_t *dst_buffer) +{ + if (src_buffer == NULL || mask == NULL || width <= 0 || height <= 0 || + stride <= 0 || dst_buffer == NULL) { + LOGE("Wrong input parameter. Aplpying mask failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + const int padding = stride - width; + +#ifdef ENABLE_NEON + const int batch_size = 16; + const int batch_columns_count = width / batch_size; +#endif + + int row = 0; + int column = 0; + for (; row < height; ++row) { +#ifdef ENABLE_NEON + for (column = 0; column < batch_columns_count; ++column) { + uint8x16_t src_v = vld1q_u8(src_buffer); + uint8x16_t mask_v = vld1q_u8(mask); + + uint8x16_t dst_v = vandq_u8(src_v, mask_v); + + vst1q_u8(dst_buffer, dst_v); + + dst_buffer += batch_size; + src_buffer += batch_size; + mask += batch_size; + } +#else + for (column = 0; column < width; ++column) { + (*dst_buffer) = ((*src_buffer) & (*mask)); + ++dst_buffer; + ++src_buffer; + ++mask; + } +#endif + dst_buffer += padding; + src_buffer += padding; + mask += padding; + } + + return MEDIA_VISION_ERROR_NONE; +} diff --git a/mv_surveillance/surveillance/src/mv_mask_buffer.c b/mv_surveillance/surveillance/src/mv_mask_buffer.c new file mode 100644 index 0000000..f1d50ef --- /dev/null +++ b/mv_surveillance/surveillance/src/mv_mask_buffer.c @@ -0,0 +1,89 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mv_mask_buffer.h" + +#include "mv_common.h" +#include "mv_private.h" + +#include + +int mv_get_mask_buffer( + unsigned int buffer_width, + unsigned int buffer_height, + mv_point_s *polygon, + unsigned int points_number, + unsigned char **mask_buffer) +{ + if (buffer_width == 0u || buffer_height == 0u || + polygon == NULL|| points_number == 0u || mask_buffer == NULL) { + LOGE("Wrong input parameter. Getting mask buffer failed."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + const unsigned int buffer_size = buffer_width * buffer_height; + + *mask_buffer = (unsigned char*) malloc(sizeof(unsigned char) * buffer_size); + + unsigned int i = 0u; + unsigned int j = 0u; + unsigned int k = 0u; + + int max_x = polygon[0].x; + int max_y = polygon[0].y; + int min_x = polygon[0].x; + int min_y = polygon[0].y; + + for (k = 1u; k < points_number; ++k) { + if (polygon[k].x > max_x) { + max_x = polygon[k].x; + } else if (polygon[k].x < min_x) { + min_x = polygon[k].x; + } + + if (polygon[k].y > max_y) { + max_y = polygon[k].y; + } else if (polygon[k].y < min_y) { + min_y = polygon[k].y; + } + } + + + for (k = 0u; k < buffer_size; ++k) { + bool inside_polygon = false; + + const int test_x = (int) k % buffer_width; + const int test_y = (int) k / buffer_width; + + if (test_x > max_x || test_x < min_x || test_y > max_y || test_y < min_y) { + (*mask_buffer)[k] = 0; + continue; + } + + for (i = 0u, j = points_number - 1; i < points_number; j = i++) { + if (((polygon[i].y > test_y) != (polygon[j].y > test_y)) && + ((float) test_x < (float) (polygon[j].x - polygon[i].x) * + (test_y - polygon[i].y) / + (polygon[j].y - polygon[i].y) + + polygon[i].x)) { + inside_polygon = !inside_polygon; + } + } + inside_polygon ? ((*mask_buffer)[k] = 255) : ((*mask_buffer)[k] = 0); + } + + return MEDIA_VISION_ERROR_NONE; +} diff --git a/mv_surveillance/surveillance/src/mv_surveillance_open.cpp b/mv_surveillance/surveillance/src/mv_surveillance_open.cpp new file mode 100644 index 0000000..4d4c328 --- /dev/null +++ b/mv_surveillance/surveillance/src/mv_surveillance_open.cpp @@ -0,0 +1,140 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mv_surveillance_open.h" + +#include "EventManager.h" +#include "EventResult.h" + +#include + +using namespace mediavision::surveillance; + +int mv_surveillance_subscribe_event_trigger_open( + mv_surveillance_event_trigger_h event_trigger, + int video_stream_id, + mv_engine_config_h engine_cfg, + mv_surveillance_event_occurred_cb callback, + void *user_data) +{ + mv_surveillance_event_trigger_s *handle = + (mv_surveillance_event_trigger_s *)event_trigger; + + return EventManager::getInstance().registerEvent( + event_trigger, + static_cast(handle->trigger_id), + handle->event_type, + video_stream_id, + engine_cfg, + callback, + user_data, + handle->number_of_roi_points, + handle->roi); +} + +int mv_surveillance_unsubscribe_event_trigger_open( + mv_surveillance_event_trigger_h event_trigger, + int video_stream_id) +{ + mv_surveillance_event_trigger_s *handle = + (mv_surveillance_event_trigger_s *)event_trigger; + + return EventManager::getInstance().unregisterEvent( + static_cast(handle->trigger_id), + video_stream_id); +} + +int mv_surveillance_push_source_open( + mv_source_h source, + int video_stream_id) +{ + MEDIA_VISION_INSTANCE_CHECK(source); + + return EventManager::getInstance().pushSource(source, video_stream_id); +} + +int mv_surveillance_foreach_event_type_open( + mv_surveillance_event_type_cb callback, + void *user_data) +{ + StringVector eventTypes; + const int error = EventManager::getInstance().getSupportedEventTypes(eventTypes); + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Query events failed due to internal issues. Error code: %i", + error); + return error; + } + + StringConstIter eventIter = eventTypes.begin(); + while (eventIter != eventTypes.end()) { + if (!callback((*eventIter).c_str(), user_data)) { + break; + } + ++eventIter; + } + + return MEDIA_VISION_ERROR_NONE; +} + +int mv_surveillance_foreach_event_result_value_name_open( + const char *event_type, + mv_surveillance_event_result_name_cb callback, + void *user_data) +{ + StringVector eventResultValueNames; + + int error = MEDIA_VISION_ERROR_NONE; + + if (NULL == event_type) { + error = EventManager::getInstance().getSupportedEventResultValueNames( + eventResultValueNames); + } else { + error = EventManager::getInstance().getSupportedEventResultValueNames( + event_type, + eventResultValueNames); + } + + if (error != MEDIA_VISION_ERROR_NONE) { + LOGE("Query result value names failed due to internal issues. " + "Error code: %i", error); + return error; + } + + StringConstIter ervnIter = eventResultValueNames.begin(); + while (ervnIter != eventResultValueNames.end()) { + if (!callback((*ervnIter).c_str(), user_data)) { + break; + } + ++ervnIter; + } + + return MEDIA_VISION_ERROR_NONE; +} + +int mv_surveillance_get_result_value_open( + mv_surveillance_result_h result, + const char *value_name, + void *value) +{ + MEDIA_VISION_INSTANCE_CHECK(result); + MEDIA_VISION_NULL_ARG_CHECK(value_name); + MEDIA_VISION_NULL_ARG_CHECK(value); + + EventResult *eventResult = (EventResult*) result; + + return eventResult->getResultValue(value_name, value); +} diff --git a/mv_surveillance/surveillance_lic/CMakeLists.txt b/mv_surveillance/surveillance_lic/CMakeLists.txt new file mode 100644 index 0000000..266b94a --- /dev/null +++ b/mv_surveillance/surveillance_lic/CMakeLists.txt @@ -0,0 +1,25 @@ +project(${MV_SURVEILLANCE_LIB_NAME}) +cmake_minimum_required(VERSION 2.6) + +set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG) + +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) + +include_directories("${INC_DIR}") +include_directories("${PROJECT_SOURCE_DIR}/include") +include_directories("${PROJECT_SOURCE_DIR}/src") + +file(GLOB MV_SURVEILLANCE_INC_LIST "${PROJECT_SOURCE_DIR}/include/*.h") +file(GLOB MV_SURVEILLANCE_SRC_LIST "${PROJECT_SOURCE_DIR}/src/*.c") + +if(FORCED_STATIC_BUILD) + add_library(${PROJECT_NAME} STATIC ${MV_SURVEILLANCE_INC_LIST} ${MV_SURVEILLANCE_SRC_LIST}) +else() + add_library(${PROJECT_NAME} SHARED ${MV_SURVEILLANCE_INC_LIST} ${MV_SURVEILLANCE_SRC_LIST}) +endif() + +target_link_libraries(${PROJECT_NAME} ${MV_COMMON_LIB_NAME}) + +INSTALL(TARGETS ${PROJECT_NAME} DESTINATION lib) diff --git a/mv_surveillance/surveillance_lic/include/mv_surveillance_lic.h b/mv_surveillance/surveillance_lic/include/mv_surveillance_lic.h new file mode 100644 index 0000000..16bd4fc --- /dev/null +++ b/mv_surveillance/surveillance_lic/include/mv_surveillance_lic.h @@ -0,0 +1,187 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __TIZEN_MEDIAVISION_SURVEILLANCE_LIC_H__ +#define __TIZEN_MEDIAVISION_SURVEILLANCE_LIC_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @file mv_surveillance_lic.h + * @brief This file contains the Media Vision surveillance API + */ + +/** + * @brief Allows to subscribe to the event and start calling @a callback + * each time when the @a source is pushed using + * @ref mv_surveillance_push_source_lic() and event is detected. + * + * @since_tizen 3.0 + * @remarks To stop handling triggering use + * @ref mv_surveillance_unsubscribe_event_trigger_lic(). + * @param [in] event_trigger The event trigger activating calls of the + * @a callback function + * @param [in] engine_cfg The engine configuration of the event + * @param [in] callback Callback to be called each time when event + * occurrence is detected + * @param [in] user_data The user data to be passed to the callback function + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @post @ref mv_surveillance_event_occurred_cb + * + * @see mv_surveillance_event_trigger_s + * @see mv_surveillance_unsubscribe_event_trigger_lic() + */ +int mv_surveillance_subscribe_event_trigger_lic( + mv_surveillance_event_trigger_h event_trigger, + mv_engine_config_h engine_cfg, + mv_surveillance_event_occurred_cb callback, + void *user_data); + +/** + * @brief Allows to unsubscribe from the event and stop calling @a callback. + * + * @since_tizen 3.0 + * @remarks To start handling trigger activation use + @ref mv_surveillance_subscribe_event_trigger_lic(). + * @param [in] event_trigger The event trigger for which subscription will be + * stopped + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @post @ref mv_surveillance_event_occurred_cb + * + * @see mv_surveillance_event_trigger_s + * @see mv_surveillance_subscribe_event_trigger_lic() + */ +int mv_surveillance_unsubscribe_event_trigger_lic( + mv_surveillance_event_trigger_h event_trigger); + +/** + * @brief Allows to push source to the event trigger and start calling @a callback. + * + * @since_tizen 3.0 + * @param [in] source The handle to the media source + * @param [in] video_stream_id The video stream, wthich will be updated + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @see mv_surveillance_event_trigger_s + * @see mv_surveillance_event_occurred_cb + * @see mv_surveillance_subscribe_event_trigger_lic() + * @see mv_surveillance_unsubscribe_event_trigger_lic() + */ +int mv_surveillance_push_source_lic( + mv_source_h source, + int video_stream_id); + +/** + * @brief Starts traversing through list of supported event types. + * + * @since_tizen 3.0 + * @remarks Supported event types and their descriptions can be found in + * @ref CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES documentation + * section + * @param [in] callback The callback function to be called for each + * supported event type + * @param [in] user_data The user data to be passed to the @a callback + * function + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @see mv_surveillance_event_type_cb + * @see mv_surveillance_foreach_event_result_value_name_lic() + */ +int mv_surveillance_foreach_event_type_lic( + mv_surveillance_event_type_cb callback, + void *user_data); + +/** + * @brief Starts traversing through list of supported event result value names. + * + * @since_tizen 3.0 + * @remarks Supported event types, event result value names and their + * descriptions can be found in + * @ref CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES documentation + * section + * @param [in] event_type The name of the event type for which result value + * names will be passed to the @a callback. Can be + * set @c NULL. If set @c NULL then all supported + * event result value names will be traversed + * @param [in] callback The callback function to be called for each + * supported event result value name + * @param [in] user_data The user data to be passed to the @a callback + * function + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @see mv_surveillance_event_result_value_name_cb + * @see mv_surveillance_foreach_event_type_lic() + * @see mv_surveillance_get_result_value_lic() + */ +int mv_surveillance_foreach_event_result_value_name_lic( + const char *event_type, + mv_surveillance_event_result_value_name_cb callback, + void *user_data); + +/** + * @brief Gets result value. + * @details See the output values names in the event types descriptions located + * in /usr/share/config/capi-media-vision/surveillance-event-types.txt. + * + * @since_tizen 3.0 + * @param [in] result The handle to the event result + * @param [in] value_name The name of the value to be gotten + * @param [in, out] value The pointer to variable which will be filled + * by result value + * @return @c 0 on success, otherwise a negative error value + * @retval #MEDIA_VISION_ERROR_NONE Successful + * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported + * + * @pre Memory for value has to be allocated + * + * @see mv_surveillance_event_trigger_s + * @see mv_surveillance_event_occurred_cb + * @see mv_surveillance_subscribe_event_trigger_lic() + * @see mv_surveillance_unsubscribe_event_trigger_lic() + * @see mv_surveillance_query_events_lic() + */ +int mv_surveillance_get_result_value_lic( + mv_surveillance_result_h result, + const char *value_name, + void *value); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __TIZEN_MEDIAVISION_SURVEILLANCE_LIC_H__ */ diff --git a/mv_surveillance/surveillance_lic/src/mv_surveillance_lic.c b/mv_surveillance/surveillance_lic/src/mv_surveillance_lic.c new file mode 100644 index 0000000..95acb63 --- /dev/null +++ b/mv_surveillance/surveillance_lic/src/mv_surveillance_lic.c @@ -0,0 +1,63 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mv_surveillance_lic.h" + +int mv_surveillance_subscribe_event_trigger_lic( + mv_surveillance_event_trigger_h event_trigger, + mv_engine_config_h engine_cfg, + mv_surveillance_event_occurred_cb callback, + void *user_data) +{ + return MEDIA_VISION_ERROR_NOT_SUPPORTED; +} + + +int mv_surveillance_unsubscribe_event_trigger_lic( + mv_surveillance_event_trigger_h event_trigger) +{ + return MEDIA_VISION_ERROR_NOT_SUPPORTED; +} + +int mv_surveillance_push_source_lic( + mv_source_h source, + int video_stream_id) +{ + return MEDIA_VISION_ERROR_NOT_SUPPORTED; +} + +int mv_surveillance_foreach_event_type_lic( + mv_surveillance_event_type_cb callback, + void *user_data) +{ + return MEDIA_VISION_ERROR_NOT_SUPPORTED; +} + +int mv_surveillance_foreach_event_result_value_name_lic( + const char *event_type, + mv_surveillance_event_result_value_name_cb callback, + void *user_data) +{ + return MEDIA_VISION_ERROR_NOT_SUPPORTED; +} + +int mv_surveillance_get_result_value_lic( + mv_surveillance_result_h result, + const char *value_name, + void *value) +{ + return MEDIA_VISION_ERROR_NOT_SUPPORTED; +} diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index db59b53..d7eac4a 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.2.5 +Version: 0.3.0 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 @@ -49,6 +49,17 @@ export CFLAGS="$CFLAGS -DTIZEN_DEBUG_ENABLE" export CXXFLAGS="$CXXFLAGS -DTIZEN_DEBUG_ENABLE" export FFLAGS="$FFLAGS -DTIZEN_DEBUG_ENABLE" %endif + +%ifarch %{arm} +export CFLAGS="$CFLAGS -DENABLE_NEON" +export CXXFLAGS="$CXXFLAGS -DENABLE_NEON" +%endif + +%ifarch %{aarch64} +export CFLAGS="$CFLAGS -DENABLE_NEON" +export CXXFLAGS="$CXXFLAGS -DENABLE_NEON" +%endif + MAJORVER=`echo %{version} | awk 'BEGIN {FS="."}{print $1}'` %cmake . -DFULLVER=%{version} -DMAJORVER=${MAJORVER} diff --git a/src/mv_surveillance.c b/src/mv_surveillance.c new file mode 100644 index 0000000..48aa269 --- /dev/null +++ b/src/mv_surveillance.c @@ -0,0 +1,363 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mv_surveillance.h" + +#include "mv_surveillance_private.h" +#include "mv_private.h" + +#ifdef MEDIA_VISION_SURVEILLANCE_LICENSE_PORT + +/* Include headers of licensed surveillance module here. */ +#include "mv_surveillance_lic.h" + +#else + +/* Include headers of open surveillance module here. */ +#include "mv_surveillance_open.h" + +#endif /* MEDIA_VISION_SURVEILLANCE_LICENSE_PORT */ + +/** + * @file mv_surveillance.c + * @brief This file contains the porting layer for Media Vision surveillance module. + */ + +static size_t __mv_surveillance_id_counter = 0; + +int mv_surveillance_event_trigger_create( + const char *event_type, + mv_surveillance_event_trigger_h * trigger) +{ + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(event_type); + MEDIA_VISION_NULL_ARG_CHECK(trigger); + MEDIA_VISION_FUNCTION_ENTER(); + + mv_surveillance_event_trigger_s *handle = + (mv_surveillance_event_trigger_s *) malloc( + sizeof(mv_surveillance_event_trigger_s)); + if (NULL == handle) { + LOGE("[%s] malloc fail", __func__); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } + + memset(handle, 0, sizeof(mv_surveillance_event_trigger_s)); + + // default values: + handle->trigger_id = ++__mv_surveillance_id_counter; + handle->event_type = strndup(event_type, 255); + handle->number_of_roi_points = 0; + handle->roi = NULL; + + *trigger = (mv_surveillance_event_trigger_h) handle; + + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_NONE; +} + +int mv_surveillance_event_trigger_destroy( + mv_surveillance_event_trigger_h trigger) +{ + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(trigger); + MEDIA_VISION_FUNCTION_ENTER(); + + mv_surveillance_event_trigger_s *handle = + (mv_surveillance_event_trigger_s *) trigger; + free(handle->event_type); + free(handle->roi); + free((mv_surveillance_event_trigger_s *) trigger); + + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_NONE; +} + +int mv_surveillance_get_event_trigger_type( + mv_surveillance_event_trigger_h trigger, + char **event_type) +{ + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(trigger); + MEDIA_VISION_NULL_ARG_CHECK(event_type); + MEDIA_VISION_FUNCTION_ENTER(); + + mv_surveillance_event_trigger_s *handle = + (mv_surveillance_event_trigger_s *)trigger; + *event_type = strndup(handle->event_type, 255); + + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_NONE; +} + +int mv_surveillance_set_event_trigger_roi( + mv_surveillance_event_trigger_h trigger, + int number_of_points, + mv_point_s *roi) +{ + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(trigger); + MEDIA_VISION_NULL_ARG_CHECK(roi); + MEDIA_VISION_FUNCTION_ENTER(); + + mv_surveillance_event_trigger_s *handle = + (mv_surveillance_event_trigger_s *)trigger; + + handle->number_of_roi_points = number_of_points; + handle->roi = (mv_point_s*) malloc(sizeof(mv_point_s) * number_of_points); + + if (NULL == handle->roi) { + LOGE("[%s] malloc fail", __func__); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } + + int i = 0; + for (; i < number_of_points; ++i) { + handle->roi[i].x = roi[i].x; + handle->roi[i].y = roi[i].y; + } + + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_NONE; +} + +int mv_surveillance_get_event_trigger_roi( + mv_surveillance_event_trigger_h trigger, + int *number_of_points, + mv_point_s ** roi) +{ + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(trigger); + MEDIA_VISION_NULL_ARG_CHECK(number_of_points); + MEDIA_VISION_NULL_ARG_CHECK(roi); + MEDIA_VISION_FUNCTION_ENTER(); + + mv_surveillance_event_trigger_s *handle = + (mv_surveillance_event_trigger_s *) trigger; + + *number_of_points = handle->number_of_roi_points; + if (0 == *number_of_points) + { + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_NONE; + } + *roi = (mv_point_s *) malloc( + sizeof(mv_point_s) * handle->number_of_roi_points); + + int i = 0; + for (; i < handle->number_of_roi_points; ++i) { + (*roi)[i].x = handle->roi[i].x; + (*roi)[i].y = handle->roi[i].y; + } + + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_NONE; +} + +int mv_surveillance_subscribe_event_trigger( + mv_surveillance_event_trigger_h event_trigger, + int video_stream_id, + mv_engine_config_h engine_cfg, + mv_surveillance_event_occurred_cb callback, + void *user_data) +{ + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(event_trigger); + MEDIA_VISION_NULL_ARG_CHECK(callback); + MEDIA_VISION_FUNCTION_ENTER(); + +#ifdef MEDIA_VISION_SURVEILLANCE_LICENSE_PORT + + /* Use licensed surveillance functionality here. */ + const int ret = mv_surveillance_subscribe_event_trigger_lic( + event_trigger, + video_stream_id, + engine_cfg, + callback, + user_data); + +#else + + /* Use open surveillance functionality here. */ + const int ret = mv_surveillance_subscribe_event_trigger_open( + event_trigger, + video_stream_id, + engine_cfg, + callback, + user_data); + +#endif /* MEDIA_VISION_SURVEILLANCE_LICENSE_PORT */ + + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; +} + +int mv_surveillance_unsubscribe_event_trigger( + mv_surveillance_event_trigger_h event_trigger, + int video_stream_id) +{ + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(event_trigger); + MEDIA_VISION_FUNCTION_ENTER(); + +#ifdef MEDIA_VISION_SURVEILLANCE_LICENSE_PORT + + /* Use licensed surveillance functionality here. */ + const int ret = mv_surveillance_unsubscribe_event_trigger_lic( + event_trigger, + video_stream_id); + +#else + + /* Use open surveillance functionality here. */ + const int ret = mv_surveillance_unsubscribe_event_trigger_open( + event_trigger, + video_stream_id); + +#endif /* MEDIA_VISION_SURVEILLANCE_LICENSE_PORT */ + + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; +} + +int mv_surveillance_push_source( + mv_source_h source, + int video_stream_id) +{ + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_FUNCTION_ENTER(); + +#ifdef MEDIA_VISION_SURVEILLANCE_LICENSE_PORT + + /* Use licensed surveillance functionality here. */ + const int ret = mv_surveillance_push_source_lic(source, video_stream_id); + +#else + + /* Use open surveillance functionality here. */ + const int ret = mv_surveillance_push_source_open(source, video_stream_id); + +#endif /* MEDIA_VISION_SURVEILLANCE_LICENSE_PORT */ + + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; +} + +int mv_surveillance_foreach_supported_event_type( + mv_surveillance_event_type_cb callback, + void *user_data) +{ + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(callback); + MEDIA_VISION_FUNCTION_ENTER(); + +#ifdef MEDIA_VISION_SURVEILLANCE_LICENSE_PORT + + /* Use licensed surveillance functionality here. */ + const int ret = mv_surveillance_foreach_event_type_lic( + callback, + user_data); + +#else + + /* Use open surveillance functionality here. */ + const int ret = mv_surveillance_foreach_event_type_open( + callback, + user_data); + +#endif /* MEDIA_VISION_SURVEILLANCE_LICENSE_PORT */ + + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; +} + +int mv_surveillance_foreach_event_result_name( + const char *event_type, + mv_surveillance_event_result_name_cb callback, + void *user_data) +{ + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_NULL_ARG_CHECK(event_type); + MEDIA_VISION_NULL_ARG_CHECK(callback); + MEDIA_VISION_FUNCTION_ENTER(); + +#ifdef MEDIA_VISION_SURVEILLANCE_LICENSE_PORT + + /* Use licensed surveillance functionality here. */ + const int ret = mv_surveillance_foreach_event_result_value_name_lic( + event_type, + callback, + user_data); + +#else + + /* Use open surveillance functionality here. */ + const int ret = mv_surveillance_foreach_event_result_value_name_open( + event_type, + callback, + user_data); + +#endif /* MEDIA_VISION_SURVEILLANCE_LICENSE_PORT */ + + MEDIA_VISION_FUNCTION_LEAVE(); + + return ret; +} + +int mv_surveillance_get_result_value( + mv_surveillance_result_h result, + const char *value_name, + void *value) +{ + MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported()); + MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported()); + MEDIA_VISION_INSTANCE_CHECK(result); + MEDIA_VISION_NULL_ARG_CHECK(value_name); + MEDIA_VISION_NULL_ARG_CHECK(value); + MEDIA_VISION_FUNCTION_ENTER(); + +#ifdef MEDIA_VISION_SURVEILLANCE_LICENSE_PORT + + /* Use licensed surveillance functionality here. */ + const int ret = mv_surveillance_get_result_value_lic( + result, + value_name, + value); + +#else + + /* Use open surveillance functionality here. */ + const int ret = mv_surveillance_get_result_value_open( + result, + value_name, + value); + +#endif /* MEDIA_VISION_SURVEILLANCE_LICENSE_PORT */ + + MEDIA_VISION_FUNCTION_LEAVE(); + return ret; +} diff --git a/test/testsuites/CMakeLists.txt b/test/testsuites/CMakeLists.txt index 16a2eb6..fdb2d20 100644 --- a/test/testsuites/CMakeLists.txt +++ b/test/testsuites/CMakeLists.txt @@ -11,3 +11,4 @@ add_subdirectory(${PROJECT_SOURCE_DIR}/common) add_subdirectory(${PROJECT_SOURCE_DIR}/barcode) add_subdirectory(${PROJECT_SOURCE_DIR}/face) add_subdirectory(${PROJECT_SOURCE_DIR}/image) +add_subdirectory(${PROJECT_SOURCE_DIR}/surveillance) \ No newline at end of file diff --git a/test/testsuites/surveillance/CMakeLists.txt b/test/testsuites/surveillance/CMakeLists.txt new file mode 100644 index 0000000..80ac1ac --- /dev/null +++ b/test/testsuites/surveillance/CMakeLists.txt @@ -0,0 +1,31 @@ +project(mv_surveillance_test_suite) +cmake_minimum_required(VERSION 2.6) + +set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG) + +if(NOT SKIP_WARNINGS) + set(CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra -Werror") +endif() + +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${LIB_INSTALL_DIR}) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${LIB_INSTALL_DIR}) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) + +include_directories(${PROJECT_SOURCE_DIR}) +include_directories(${MV_CAPI_MEDIA_VISION_INC_DIR}) +include_directories(${INC_TS_COMMON}) +include_directories(${INC_VIDEO_HELPER}) +include_directories(${INC_IMAGE_HELPER}) + +file(GLOB MV_SURVEILLANCE_TS_INC_LIST "${PROJECT_SOURCE_DIR}/*.h") +file(GLOB MV_SURVEILLANCE_TS_SRC_LIST "${PROJECT_SOURCE_DIR}/*.c") + +add_executable(${PROJECT_NAME} ${MV_SURVEILLANCE_TS_SRC_LIST} + ${MV_SURVEILLANCE_TS_INC_LIST} + ${MV_CAPI_MEDIA_VISION_INC_LIST}) + +target_link_libraries(${PROJECT_NAME} capi-media-vision + mv_testsuite_common + mv_image_helper) + +install(TARGETS ${PROJECT_NAME} DESTINATION ${testsuites_dir}) diff --git a/test/testsuites/surveillance/surveillance_test_suite.c b/test/testsuites/surveillance/surveillance_test_suite.c new file mode 100644 index 0000000..6057121 --- /dev/null +++ b/test/testsuites/surveillance/surveillance_test_suite.c @@ -0,0 +1,1137 @@ +/** + * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mv_testsuite_common.h" +#include "image_helper.h" + +#include "mv_log_cfg.h" + +#include "mv_private.h" +#include "mv_surveillance.h" + +#include + +#define MAX_EVENTS_NUMBER 101 + +#define MAX_EVENT_TYPE_LEN 255 + +#define MIN_NUMBER_OF_ROI_POINTS 3 +#define MAX_NUMBER_OF_ROI_POINTS 100 + +#define MIN_ROI_X_COORD 0 +#define MAX_ROI_X_COORD 10000 +#define MIN_ROI_Y_COORD 0 +#define MAX_ROI_Y_COORD 10000 + +/*----------------------------------------------------*/ + +#define PRINT_R(MSG) printf(TEXT_RED MSG "\n" TEXT_RESET) +#define PRINT_Y(MSG) printf(TEXT_YELLOW MSG "\n" TEXT_RESET) +#define PRINT_G(MSG) printf(TEXT_GREEN MSG "\n" TEXT_RESET); + +#define PRINT_E(MSG, ERR) printf(TEXT_RED MSG "\n" TEXT_RESET, ERR) +#define PRINT_W(MSG, WARN) printf(TEXT_YELLOW MSG "\n" TEXT_RESET, WARN) +#define PRINT_S(MSG, RES) printf(TEXT_GREEN MSG "\n" TEXT_RESET, RES) + +/*----------------------------------------------------*/ +/* static */ +static mv_surveillance_event_trigger_h is_subscribed[MAX_EVENTS_NUMBER]; +static int video_streams_ids[MAX_EVENTS_NUMBER]; +static unsigned int trigger_id_cnt = 0; +static const int green_color[] = {0, 255, 0}; +static const int red_color[] = {0, 0, 255}; +static const int blue_color[] = {255, 0, 0}; +static bool save_results_to_image = false; +/*----------------------------------------------------*/ +/*functions*/ + +/* initializes is_subscribed by false */ +void init_is_subscribed(); + +/* prints identificators of subscribed events */ +void print_is_subscribed(); + +/* prints names of all available event types */ +void print_supported_events(); + +/* select event name from all available event types and creates trigger handle */ +int create_trigger_handle_by_event_name(mv_surveillance_event_trigger_h *handle); + +/* subscribes event */ +void subscribe_to_event(); + +/* adds ROI to event */ +void add_roi_to_event(mv_surveillance_event_trigger_h event_trigger); + +/* unsubscribes from event */ +void unsubscribe_from_event(); + +/* unsubscribes from all event */ +void unsubscribe_from_all_events(); + +/* pushes media source to event manager */ +void push_source(); + +/* fills engine configuration for person recognized event */ +bool fill_engine_cfg_person_recognized(mv_engine_config_h engine_cfg); + +/* Turn on (off) saving event results to image file */ +void turn_on_off_saving_to_image(); + +/*----------------------------------------------------*/ +/* callbacks */ + +void detect_person_appeared_cb( + mv_surveillance_event_trigger_h handle, + mv_source_h source, + int video_stream_id, + mv_surveillance_result_h event_result, + void *user_data); + +void person_recognized_cb( + mv_surveillance_event_trigger_h handle, + mv_source_h source, + int video_stream_id, + mv_surveillance_result_h event_result, + void *user_data); + +void movement_detected_cb( + mv_surveillance_event_trigger_h handle, + mv_source_h source, + int video_stream_id, + mv_surveillance_result_h event_result, + void *user_data); + +/*----------------------------------------------------*/ + +int main(void) +{ + LOGI("Surveillance Media Vision Testsuite is launched."); + + PRINT_W("Maximal number of events is %d", MAX_EVENTS_NUMBER - 1); + + init_is_subscribed(); + + const int options[8] = { 1, 2, 3, 4, 5, 6, 7, 8 }; + const char *names[8] = { + "Get list of supported events", + "Get identificators of subscribed events", + "Subscribe to event", + "Unsubscribe from event", + "Unsubscribe from all events", + "Push source", + "Turn on (off) saving event result to image", + "Exit" + }; + + while(1) { + char exit = 'n'; + int sel_opt = show_menu("Select action:", options, names, 8); + switch (sel_opt) { + case 1: /* Get list of supported events */ + print_supported_events(); + break; + case 2: /* Get identificators of subscribed events */ + print_is_subscribed(); + break; + case 3: /* Subscribe to event */ + subscribe_to_event(); + break; + case 4: /* Unsubscribe from event */ + unsubscribe_from_event(); + break; + case 5: /* Unsubscribe from all events */ + unsubscribe_from_all_events(); + break; + case 6: /* Push source */ + push_source(); + break; + case 7: /* Save event results to image */ + turn_on_off_saving_to_image(); + break; + case 8: /* Exit */ + exit = 'y'; + break; + default: + PRINT_R("Invalid option."); + sel_opt = 0; + continue; + } + + if ('y' == exit) { + sel_opt = 0; + const int options_last[2] = { 1, 2 }; + const char *names_last[2] = { "No", "Yes" }; + + while (sel_opt == 0) { + sel_opt = show_menu("Are you sure?", + options_last, names_last, 2); + switch (sel_opt) { + case 1: + exit = 'n'; + break; + case 2: + exit = 'y'; + break; + default: + PRINT_R("Invalid option. Back to the main menu."); + sel_opt = 0; + break; + } + } + + if ('y' == exit) { + unsubscribe_from_all_events(); + break; + } + } + } + + LOGI("Surveillance Media Vision Testsuite is closed"); + + return 0; +} + +void init_is_subscribed() +{ + int i = 0; + for (; i < MAX_EVENTS_NUMBER; ++i) { + is_subscribed[i] = NULL; + video_streams_ids[i] = -1; + } +} + +void print_is_subscribed() +{ + PRINT_Y("List of subscribed events identificators:"); + + bool is_empty = true; + + int i = 0; + for (; i < MAX_EVENTS_NUMBER; ++i) { + if (NULL != is_subscribed[i]) { + printf("%d ", i); + is_empty = false; + } + } + + if (is_empty) + PRINT_Y("List of subscribed events is empty"); +} + +static const char *EVENT_TYPES_NAMES[MAX_EVENT_TYPE_LEN] = { + MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, + MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED, + MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED +}; + +static const unsigned int NUMBER_OF_TYPES = 3u; + +bool foreach_event_result_value_name_cb(const char *value_name, void *user_data) +{ + if (NULL == value_name) { + PRINT_R("\tError occurred. Value name is NULL"); + return true; + } + PRINT_W("%s", value_name); + return true; +} + +bool foreach_event_type_cb(const char *event_type, void *user_data) +{ + if (NULL == event_type) { + PRINT_R("Error occurred. Event type name is NULL"); + return true; + } + + PRINT_W("%s", event_type); + PRINT_G("\tList of supported event result value names is:"); + + const int error = mv_surveillance_foreach_event_result_name( + event_type, foreach_event_result_value_name_cb, user_data); + if (MEDIA_VISION_ERROR_NONE != error) { + PRINT_E("Error occurred when trying to get value names for " + "event named '%s'", event_type); + return true; + } + return true; +} + +void print_supported_events() +{ + PRINT_G("List of supported events is:"); + + const int error = mv_surveillance_foreach_supported_event_type( + foreach_event_type_cb, NULL); + + if (MEDIA_VISION_ERROR_NONE != error) + PRINT_R("Error occurred when trying to get list of event type names \n"); +} + +int create_trigger_handle_by_event_name( + mv_surveillance_event_trigger_h *handle) +{ + PRINT_G("\nSelect event type:"); + + unsigned int i = 0u; + for (; i < NUMBER_OF_TYPES; ++i) + printf("#%d. %s\n", i, EVENT_TYPES_NAMES[i]); + + unsigned int event_id = 0u; + while (input_size("Input event type (unsigned integer value):", + NUMBER_OF_TYPES - 1, &event_id) == -1) { + PRINT_R("Incorrect input! Try again.\n List of supported events is:"); + + unsigned int i = 0u; + for (; i < NUMBER_OF_TYPES; ++i) + printf("%d\t%s\n", i, EVENT_TYPES_NAMES[i]); + } + + const int error = mv_surveillance_event_trigger_create( + EVENT_TYPES_NAMES[event_id], handle); + if (MEDIA_VISION_ERROR_NONE != error) { + PRINT_E("mv_surveillance_event_trigger_create() error!\n" + "Error code: %i\n", error); + return error; + } + + return MEDIA_VISION_ERROR_NONE; +} + +bool try_destroy_event_trigger(mv_surveillance_event_trigger_h trigger) +{ + const int error = mv_surveillance_event_trigger_destroy(trigger); + if (MEDIA_VISION_ERROR_NONE != error) { + PRINT_E("Error with code %d was occured when try to destroy " + "event trigger.", error); + return false; + } + return true; +} + +void subscribe_to_event() +{ + if (++trigger_id_cnt >= MAX_EVENTS_NUMBER) { + PRINT_R("Maximal value of event trigger id is reached. " + "Subscription impossible"); + return; + } + + mv_surveillance_event_trigger_h event_trigger = NULL; + int error = create_trigger_handle_by_event_name(&event_trigger); + if (MEDIA_VISION_ERROR_NONE != error) { + PRINT_E("Error occurred when creating of event trigger. " + "Error code: %i", error); + try_destroy_event_trigger(event_trigger); + return; + } + + int video_stream_id = 0; + + while (input_int("Input video stream identificator (integer value):", + INT_MIN, + INT_MAX, + &video_stream_id) == -1) + PRINT_R("Incorrect input! Try again."); + + char *event_type = NULL; + error = mv_surveillance_get_event_trigger_type(event_trigger, &event_type); + if (MEDIA_VISION_ERROR_NONE != error) { + PRINT_E("Error occurred when getting of event trigger type. " + "Error code: %i", error); + try_destroy_event_trigger(event_trigger); + return; + } + + if (show_confirm_dialog("Would you like to set ROI (Region Of Interest)?")) + add_roi_to_event(event_trigger); + + if (strncmp(event_type, + MV_SURVEILLANCE_EVENT_TYPE_PERSON_APPEARED_DISAPPEARED, + MAX_EVENT_TYPE_LEN) == 0) { + error = mv_surveillance_subscribe_event_trigger( + event_trigger, + video_stream_id, + NULL, + detect_person_appeared_cb, + NULL); + } else if (strncmp(event_type, + MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED, + MAX_EVENT_TYPE_LEN) == 0) { + PRINT_Y("Please create and save face recognition models\n" + "before subscribing to event. Use mv_face_test_suite."); + + mv_engine_config_h engine_cfg = NULL; + error = mv_create_engine_config(&engine_cfg); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_R("Failed to create engine configuration for event trigger."); + try_destroy_event_trigger(event_trigger); + free(event_type); + return; + } + + const bool is_filled = fill_engine_cfg_person_recognized(engine_cfg); + + if (!is_filled) { + PRINT_R("Failed to fill engine configuration for event trigger."); + try_destroy_event_trigger(event_trigger); + if (mv_destroy_engine_config(engine_cfg) != MEDIA_VISION_ERROR_NONE) + PRINT_E("Failed to destroy engine configuration for event trigger.", + error); + free(event_type); + return; + } + + error = mv_surveillance_subscribe_event_trigger( + event_trigger, + video_stream_id, + engine_cfg, + person_recognized_cb, + NULL); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Subscription failed. Error code: %i.", error); + try_destroy_event_trigger(event_trigger); + if (mv_destroy_engine_config(engine_cfg) != MEDIA_VISION_ERROR_NONE) + PRINT_E("Failed to destroy engine configuration for event trigger.", + error); + free(event_type); + return; + } + + if (mv_destroy_engine_config(engine_cfg) != MEDIA_VISION_ERROR_NONE) + PRINT_E("Failed to destroy engine configuration for event trigger.", + error); + } else if (strncmp(event_type, + MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, + MAX_EVENT_TYPE_LEN) == 0) { + error = mv_surveillance_subscribe_event_trigger( + event_trigger, + video_stream_id, + NULL, + movement_detected_cb, + NULL); + } + + free(event_type); + + if (MEDIA_VISION_ERROR_NONE != error) { + PRINT_E("Error with code %d was occurred in subscribe event.", error); + try_destroy_event_trigger(event_trigger); + return; + } + + is_subscribed[trigger_id_cnt] = event_trigger; + video_streams_ids[trigger_id_cnt] = video_stream_id; + PRINT_S("Event trigger %i has been successfully subscribed", trigger_id_cnt); +} + +void add_roi_to_event(mv_surveillance_event_trigger_h event_trigger) +{ + int number_of_roi_points = 0; + while (input_int("Input number of ROI points (integer value >2):", + MIN_NUMBER_OF_ROI_POINTS, + MAX_NUMBER_OF_ROI_POINTS, + &number_of_roi_points) == -1) + PRINT_R("Incorrect input! Try again."); + + mv_point_s* roi = (mv_point_s*) malloc(sizeof(mv_point_s) * number_of_roi_points); + + int x = 0; + int y = 0; + + int i = 0; + for (; i < number_of_roi_points; ++i) { + printf("Point %d \n", i + 1); + + while (input_int("Input x (integer value):", + MIN_ROI_X_COORD, + MAX_ROI_X_COORD, + &x) == -1) + PRINT_R("Incorrect input! Try again."); + + while (input_int("Input y (integer value):", + MIN_ROI_Y_COORD, + MAX_ROI_Y_COORD, + &y) == -1) + PRINT_R("Incorrect input! Try again."); + + roi[i].x = x; + roi[i].y = y; + } + + const int error = mv_surveillance_set_event_trigger_roi( + event_trigger, + number_of_roi_points, + roi); + + if (error == MEDIA_VISION_ERROR_NONE) + PRINT_G("ROI was successfully set") + else + PRINT_R("Setting ROI failed. Please try again") ; + + if (roi != NULL) + free(roi); +} + +void unsubscribe_from_event() +{ + int trigger_id = 0; + while (input_int("Input event identificator (1-100):", + 1, + MAX_EVENTS_NUMBER - 1, + &trigger_id) == -1) + PRINT_R("Incorrect input! Try again."); + + mv_surveillance_event_trigger_h event_trigger = is_subscribed[trigger_id]; + if (NULL == event_trigger) { + PRINT_E("Sorry, event trigger with %i identifier wasn't subscribed.", + trigger_id); + return; + } + + const int error = mv_surveillance_unsubscribe_event_trigger( + event_trigger, + video_streams_ids[trigger_id]); + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occured in unsubscribe event.", error); + return; + } + + try_destroy_event_trigger(event_trigger); + is_subscribed[trigger_id] = NULL; + video_streams_ids[trigger_id] = -1; + PRINT_S("Event with id %d was successfully unsubscribed", trigger_id); +} + +void unsubscribe_from_all_events() +{ + int error = MEDIA_VISION_ERROR_NONE; + unsigned int trigger_id = 0; + int unsubscribed_number = 0; + for (; trigger_id < MAX_EVENTS_NUMBER; ++trigger_id) { + mv_surveillance_event_trigger_h event_trigger = + is_subscribed[trigger_id]; + if (NULL != event_trigger) { + error = mv_surveillance_unsubscribe_event_trigger( + event_trigger, + video_streams_ids[trigger_id]); + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occurred in unsubscribe event.", + error); + continue; + } + ++unsubscribed_number; + + PRINT_S("Event with id %d was successfully unsubscribed", trigger_id); + + try_destroy_event_trigger(event_trigger); + is_subscribed[trigger_id] = NULL; + video_streams_ids[trigger_id] = -1; + } + } + + unsubscribed_number > 0 ? + PRINT_S("%d event(s) was successfully unsubscribed", unsubscribed_number): + PRINT_Y("\nThere are no triggers can be unsubscribed."); +} + +void push_source() +{ + mv_source_h source; + int error = mv_create_source(&source); + if (MEDIA_VISION_ERROR_NONE != error) { + PRINT_E("ERROR: Errors were occurred during source creating!!! Code %i" , + error); + return; + } + + char *path_to_image = NULL; + + while (input_string("Input file name with image to be analyzed:", + 1024, &path_to_image) == -1) + PRINT_R("Incorrect input! Try again."); + + error = load_mv_source_from_file(path_to_image, source); + if (MEDIA_VISION_ERROR_NONE != error) { + PRINT_E("Errors were occurred during source loading, code %i", error); + return; + } + + if (path_to_image != NULL) + free(path_to_image); + + int video_stream_id = 0; + + while (input_int("Input video stream identificator (integer value):", + INT_MIN, + INT_MAX, + &video_stream_id) == -1) + PRINT_R("Incorrect input! Try again."); + + error = mv_surveillance_push_source(source, video_stream_id); + if (MEDIA_VISION_ERROR_NONE != error) { + PRINT_E("Errors were occurred during source pushing, code %i", error); + return; + } + + error = mv_destroy_source(source); + if (MEDIA_VISION_ERROR_NONE != error) { + PRINT_E("Errors were occurred during source destroying, code %i", error); + return; + } + + PRINT_G("Media source was successfully pushed"); +} + +bool fill_engine_cfg_person_recognized(mv_engine_config_h engine_cfg) +{ + char *path_to_model = NULL; + + while (input_string("Input file name with face recognition model:", + 1024, &path_to_model) == -1) + PRINT_R("Incorrect input! Try again."); + + const int error = mv_engine_config_set_string_attribute( + engine_cfg, + MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH, + path_to_model); + + if (error != MEDIA_VISION_ERROR_NONE) + PRINT_E("Setting path to face recognition model failed, code %i", + error); + else + printf("\nModel path is %s \n", path_to_model); + + if (path_to_model != NULL) + free(path_to_model); + + return true; +} + +void turn_on_off_saving_to_image() +{ + save_results_to_image = !save_results_to_image; + + save_results_to_image ? + PRINT_Y("Save event results to image files ON."): + PRINT_Y("Save event results to image files OFF."); +} + +void detect_person_appeared_cb( + mv_surveillance_event_trigger_h handle, + mv_source_h source, + int video_stream_id, + mv_surveillance_result_h event_result, + void *user_data) +{ + PRINT_G("Person appeared / disappeared event was occured"); + if (save_results_to_image) + PRINT_G("Output image will be saved to /tmp/person_app.jpg.\n" + "Appeared locations - green;\n" + "Tracked locations - blue;\n" + "Disappeared locations - red."); + + unsigned char *out_buffer = NULL; + unsigned int buf_size = 0; + image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID }; + + if (save_results_to_image && + (mv_source_get_buffer(source, &out_buffer, &buf_size) || + mv_source_get_width(source, &(image_data.image_width)) || + mv_source_get_height(source, &(image_data.image_height)) || + mv_source_get_colorspace(source, &(image_data.image_colorspace)) || + out_buffer == NULL || + buf_size == 0)) + { + PRINT_R("ERROR: Creating out image is impossible."); + + return; + } + + unsigned char *out_buffer_copy = NULL; + if (save_results_to_image) { + out_buffer_copy = (unsigned char *) malloc(buf_size); + memcpy(out_buffer_copy, out_buffer, buf_size); + } + + int number_of_appeared_persons = 0; + int error = mv_surveillance_get_result_value( + event_result, + MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER, + &number_of_appeared_persons); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occured in getting number of " + "appeared persons.", error); + if (out_buffer_copy != NULL) + free(out_buffer_copy); + + return; + } + + printf("\nNumber of appeared persons is %d \n", number_of_appeared_persons); + + mv_rectangle_s *appeared_locations = + malloc(sizeof(mv_rectangle_s) * number_of_appeared_persons); + + error = mv_surveillance_get_result_value( + event_result, + MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS, + appeared_locations); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occured in getting locations of " + "appeared persons.", error); + + if (appeared_locations != NULL) + free(appeared_locations); + + if (out_buffer_copy != NULL) + free(out_buffer_copy); + + return; + } + + int i = 0; + for (; i < number_of_appeared_persons; ++i) { + printf("Person #%d location is: x - %d, y - %d, w - %d, h - %d.\n", + i, + appeared_locations[i].point.x, + appeared_locations[i].point.y, + appeared_locations[i].width, + appeared_locations[i].height); + + if (save_results_to_image) + draw_rectangle_on_buffer( + appeared_locations[i].point.x, + appeared_locations[i].point.y, + appeared_locations[i].point.x + appeared_locations[i].width, + appeared_locations[i].point.y + appeared_locations[i].height, + 3, + green_color, + &image_data, + out_buffer_copy); + } + + int number_of_tracked_persons = 0; + error = mv_surveillance_get_result_value( + event_result, + MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER, + &number_of_tracked_persons); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occured in getting number of " + "tracked persons.", error); + + if (appeared_locations != NULL) + free(appeared_locations); + + if (out_buffer_copy != NULL) + free(out_buffer_copy); + + return; + } + + printf("\nNumber of tracked persons is %d \n", number_of_tracked_persons); + + mv_rectangle_s *tracked_locations = + malloc(sizeof(mv_rectangle_s) * number_of_tracked_persons); + + error = mv_surveillance_get_result_value( + event_result, + MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS, + tracked_locations); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occured in getting locations of " + "tracked persons.", error); + + if (appeared_locations != NULL) + free(appeared_locations); + + if (tracked_locations != NULL) + free(tracked_locations); + + if (out_buffer_copy != NULL) + free(out_buffer_copy); + + return; + } + + for (i = 0; i < number_of_tracked_persons; ++i) { + printf("Person #%d location is: x - %d, y - %d, w - %d, h - %d.\n", + i, + tracked_locations[i].point.x, + tracked_locations[i].point.y, + tracked_locations[i].width, + tracked_locations[i].height); + + if (save_results_to_image) + draw_rectangle_on_buffer( + tracked_locations[i].point.x, + tracked_locations[i].point.y, + tracked_locations[i].point.x + tracked_locations[i].width, + tracked_locations[i].point.y + tracked_locations[i].height, + 3, + blue_color, + &image_data, + out_buffer_copy); + } + + int number_of_disappeared_persons = 0; + error = mv_surveillance_get_result_value( + event_result, + MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER, + &number_of_disappeared_persons); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occured in getting number of " + "disappeared persons.", error); + + if (appeared_locations != NULL) + free(appeared_locations); + + if (tracked_locations != NULL) + free(tracked_locations); + + if (out_buffer_copy != NULL) + free(out_buffer_copy); + + return; + } + + printf("\nNumber of disappeared persons is %d \n", number_of_disappeared_persons); + + mv_rectangle_s *disappeared_locations = + malloc(sizeof(mv_rectangle_s) * number_of_disappeared_persons); + + error = mv_surveillance_get_result_value( + event_result, + MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS, + disappeared_locations); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occured in getting locations of " + "disappeared persons.", error); + + if (appeared_locations != NULL) + free(appeared_locations); + + if (tracked_locations != NULL) + free(tracked_locations); + + if (disappeared_locations != NULL) + free(disappeared_locations); + + if (out_buffer_copy != NULL) + free(out_buffer_copy); + + return; + } + + for (i = 0; i < number_of_disappeared_persons; ++i) { + printf("Person #%d location is: x - %d, y - %d, w - %d, h - %d.\n", + i, + disappeared_locations[i].point.x, + disappeared_locations[i].point.y, + disappeared_locations[i].width, + disappeared_locations[i].height); + + if (save_results_to_image) + draw_rectangle_on_buffer( + disappeared_locations[i].point.x, + disappeared_locations[i].point.y, + disappeared_locations[i].point.x + disappeared_locations[i].width, + disappeared_locations[i].point.y + disappeared_locations[i].height, + 3, + red_color, + &image_data, + out_buffer_copy); + } + + if (save_results_to_image) + save_image_from_buffer("/tmp/person_app.jpg", out_buffer_copy, &image_data, 100); + + printf("\n"); + + if (appeared_locations != NULL) + free(appeared_locations); + + if (tracked_locations != NULL) + free(tracked_locations); + + if (disappeared_locations != NULL) + free(disappeared_locations); + + if (out_buffer_copy != NULL) + free(out_buffer_copy); +} + +void person_recognized_cb( + mv_surveillance_event_trigger_h handle, + mv_source_h source, + int video_stream_id, + mv_surveillance_result_h event_result, + void *user_data) +{ + PRINT_G("Person recognized event was occurred"); + if (save_results_to_image) + PRINT_G("Output image will be saved to /tmp/person_rec.jpg.\n" + "Person recognized locations - red."); + + int number_of_persons = 0; + int error = mv_surveillance_get_result_value( + event_result, + MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER, + &number_of_persons); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occured in getting number of persons.", + error); + return; + } + + printf("\nNumber of persons is %d \n\n", number_of_persons); + + mv_rectangle_s *locations = malloc(sizeof(mv_rectangle_s) * number_of_persons); + + error = mv_surveillance_get_result_value( + event_result, + MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS, + locations); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occured in getting locations of persons.", + error); + + if (locations != NULL) + free(locations); + + return; + } + + int *labels = malloc(sizeof(int) * number_of_persons); + + error = mv_surveillance_get_result_value( + event_result, + MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS, + labels); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occured in getting labels of persons.", + error); + + if (locations != NULL) + free(locations); + + if (labels != NULL) + free(labels); + + return; + } + + double *confidences = malloc(sizeof(double) * number_of_persons); + + error = mv_surveillance_get_result_value( + event_result, + MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES, + confidences); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occured in getting confidences of persons.", + error); + + if (locations != NULL) + free(locations); + + if (labels != NULL) + free(labels); + + if (confidences != NULL) + free(confidences); + + return; + } + + unsigned char *out_buffer = NULL; + unsigned int buf_size = 0; + image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID }; + + if (save_results_to_image && + (mv_source_get_buffer(source, &out_buffer, &buf_size) || + mv_source_get_width(source, &(image_data.image_width)) || + mv_source_get_height(source, &(image_data.image_height)) || + mv_source_get_colorspace(source, &(image_data.image_colorspace)) || + out_buffer == NULL || + buf_size == 0)) + { + PRINT_R("ERROR: Creating out image is impossible."); + + return; + } + + unsigned char *out_buffer_copy = NULL; + if (save_results_to_image) { + out_buffer_copy = (unsigned char *) malloc(buf_size); + memcpy(out_buffer_copy, out_buffer, buf_size); + } + + int i = 0; + for (; i < number_of_persons; ++i) { + printf("Person %d:\n", labels[i]); + printf("Location is: x - %d, y - %d, w - %d, h - %d.\n", + locations[i].point.x, + locations[i].point.y, + locations[i].width, + locations[i].height); + printf("Model confidence - %3.2f", confidences[i]); + printf("\n"); + + if (save_results_to_image) + draw_rectangle_on_buffer( + locations[i].point.x, + locations[i].point.y, + locations[i].point.x + locations[i].width, + locations[i].point.y + locations[i].height, + 3, + red_color, + &image_data, + out_buffer_copy); + } + + printf("\n"); + + if (save_results_to_image) + save_image_from_buffer("/tmp/person_rec.jpg", out_buffer_copy, &image_data, 100); + + if (locations != NULL) + free(locations); + + if (labels != NULL) + free(labels); + + if (confidences != NULL) + free(confidences); + + if (out_buffer_copy != NULL) + free(out_buffer_copy); +} + +void movement_detected_cb( + mv_surveillance_event_trigger_h event_trigger, + mv_source_h source, + int video_stream_id, + mv_surveillance_result_h event_result, + void *user_data) +{ + PRINT_G("Movement detected event was occured"); + if (save_results_to_image) + PRINT_G("Output image will be saved to /tmp/move_detect.jpg.\n" + "Movement detected locations - blue."); + + int number_of_movement_regions = 0; + int error = mv_surveillance_get_result_value( + event_result, + MV_SURVEILLANCE_MOVEMENT_NUMBER_OF_REGIONS, + &number_of_movement_regions); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occured in getting number of " + "movement regions.", error); + + return; + } + + printf("\nNumber of movement regions is %d \n", number_of_movement_regions); + + mv_rectangle_s *movement_regions = + malloc(sizeof(mv_rectangle_s) * number_of_movement_regions); + + error = mv_surveillance_get_result_value( + event_result, + MV_SURVEILLANCE_MOVEMENT_REGIONS, + movement_regions); + + if (error != MEDIA_VISION_ERROR_NONE) { + PRINT_E("Error with code %d was occured in getting movement regions.", + error); + + if (movement_regions != NULL) + free(movement_regions); + + return; + } + + unsigned char *out_buffer = NULL; + unsigned int buf_size = 0; + image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID }; + + if (save_results_to_image && + (mv_source_get_buffer(source, &out_buffer, &buf_size) || + mv_source_get_width(source, &(image_data.image_width)) || + mv_source_get_height(source, &(image_data.image_height)) || + mv_source_get_colorspace(source, &(image_data.image_colorspace)) || + out_buffer == NULL || + buf_size == 0)) + { + PRINT_R("ERROR: Creating out image is impossible."); + + if (movement_regions != NULL) + free(movement_regions); + + return; + } + + unsigned char *out_buffer_copy = NULL; + if (save_results_to_image) { + out_buffer_copy = (unsigned char *) malloc(buf_size); + memcpy(out_buffer_copy, out_buffer, buf_size); + } + + int i = 0; + for (; i < number_of_movement_regions; ++i) { + printf("Movement #%d region is: x - %d, y - %d, w - %d, h - %d.\n", + i, + movement_regions[i].point.x, + movement_regions[i].point.y, + movement_regions[i].width, + movement_regions[i].height); + + if (save_results_to_image) + draw_rectangle_on_buffer( + movement_regions[i].point.x, + movement_regions[i].point.y, + movement_regions[i].point.x + movement_regions[i].width, + movement_regions[i].point.y + movement_regions[i].height, + 3, + blue_color, + &image_data, + out_buffer_copy); + } + + printf("\n"); + + if (save_results_to_image) + save_image_from_buffer("/tmp/move_detect.jpg", out_buffer_copy, &image_data, 100); + + if (movement_regions != NULL) + free(movement_regions); + + if (out_buffer_copy != NULL) + free(out_buffer_copy); +} -- 2.7.4 From f17f579e5ff90af5193047584801729e12836a74 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Wed, 2 Dec 2015 17:29:35 +0900 Subject: [PATCH 07/16] Fixed Svace critical issues Change-Id: Idf44668d33bc345037f048730402b1797c2e110a Signed-off-by: Tae-Young Chung --- mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp | 5 +++++ mv_face/face/src/FaceEyeCondition.cpp | 2 -- packaging/capi-media-vision.spec | 2 +- test/testsuites/barcode/barcode_test_suite.c | 2 +- test/testsuites/common/image_helper/include/ImageHelper.h | 14 ++++++++++++++ test/testsuites/common/image_helper/include/image_helper.h | 12 ++++++++++++ test/testsuites/common/image_helper/src/ImageHelper.cpp | 12 ++++++++++++ test/testsuites/common/image_helper/src/image_helper.cpp | 5 +++++ .../common/testsuite_common/mv_testsuite_common.c | 4 ++-- test/testsuites/face/face_test_suite.c | 3 ++- 10 files changed, 54 insertions(+), 7 deletions(-) diff --git a/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp b/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp index b2357f7..d6dd4f8 100644 --- a/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp +++ b/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp @@ -44,6 +44,11 @@ int mv_barcode_detect_open( } zbar::Image greyImage = image.convert("Y800"); + if (!greyImage.get_data()) { + LOGE("fail to image convert by zbar"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + greyImage.set_crop(roi.point.x, roi.point.y, roi.width, roi.height); zbar::ImageScanner scanner; diff --git a/mv_face/face/src/FaceEyeCondition.cpp b/mv_face/face/src/FaceEyeCondition.cpp index 10d9e6e..53c835b 100644 --- a/mv_face/face/src/FaceEyeCondition.cpp +++ b/mv_face/face/src/FaceEyeCondition.cpp @@ -168,8 +168,6 @@ int FaceEyeCondition::recognizeEyeCondition( } if (NULL == eyeCondition) { - *eyeCondition = MV_FACE_EYES_NOT_FOUND; - LOGE("Output eye condition is NULL. Eye condition recognition failed."); return MEDIA_VISION_ERROR_INVALID_PARAMETER; } diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index d7eac4a..90fc7f2 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.3.0 +Version: 0.3.1 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 diff --git a/test/testsuites/barcode/barcode_test_suite.c b/test/testsuites/barcode/barcode_test_suite.c index b4e8606..9389461 100644 --- a/test/testsuites/barcode/barcode_test_suite.c +++ b/test/testsuites/barcode/barcode_test_suite.c @@ -610,7 +610,7 @@ int detect_barcode(barcode_model_s model, mv_rectangle_s roi) err = mv_barcode_detect(source, mv_engine_config, roi, barcode_detected_cb, &model); if (data_buffer != NULL) - free(data_buffer); + destroy_loaded_buffer(data_buffer); if (MEDIA_VISION_ERROR_NONE != err) { printf("ERROR: Errors were occurred during barcode detection!!! code: %i\n", err); diff --git a/test/testsuites/common/image_helper/include/ImageHelper.h b/test/testsuites/common/image_helper/include/ImageHelper.h index e9eabcc..179484f 100644 --- a/test/testsuites/common/image_helper/include/ImageHelper.h +++ b/test/testsuites/common/image_helper/include/ImageHelper.h @@ -76,6 +76,7 @@ public: * @return @c 0 on success, otherwise a negative error value * * @see ImageHelper::saveImageFromBuffer() + * @see ImageHelper::destroyLoadedBuffer() */ static int loadImageToBuffer( const char *filePath, @@ -103,6 +104,19 @@ public: const ImageData& imageData, int quality = 100); + /** + * @brief Destroys loaded buffer by loadImageToBuffer(). + * + * @since_tizen 3.0 + * @param [out] pDataBuffer The buffer of unsigned chars where image data + * will be stored + * @return @c 0 on success, otherwise a negative error value + * + * @see ImageHelper::loadImageToBuffer() + */ + static int destroyLoadedBuffer( + unsigned char *pDataBuffer); + /** * @brief Draws the rectangle of specified size on the image data buffer. * diff --git a/test/testsuites/common/image_helper/include/image_helper.h b/test/testsuites/common/image_helper/include/image_helper.h index bef34be..480c883 100644 --- a/test/testsuites/common/image_helper/include/image_helper.h +++ b/test/testsuites/common/image_helper/include/image_helper.h @@ -66,6 +66,7 @@ typedef struct * @return @c 0 on success, otherwise a negative error value * * @see save_image_from_buffer() + * @see destroy_loaded_buffer() */ int load_image_to_buffer( const char *file_path, @@ -94,6 +95,17 @@ int save_image_from_buffer( int quality); /** + * @brief Destroys loaded buffer by load_image_to_buffer(). + * + * @since_tizen 3.0 + * @param [in] data_buffer Data buffer to be deallocated + * @return @c 0 on success, otherwise a negative error value + * + * @see load_image_to_buffer() + */ +int destroy_loaded_buffer(unsigned char *data_buffer); + +/** * @brief Draws the rectangle of specified size on the image data buffer. * * @since_tizen 3.0 diff --git a/test/testsuites/common/image_helper/src/ImageHelper.cpp b/test/testsuites/common/image_helper/src/ImageHelper.cpp index e6b0a54..67ac386 100644 --- a/test/testsuites/common/image_helper/src/ImageHelper.cpp +++ b/test/testsuites/common/image_helper/src/ImageHelper.cpp @@ -220,6 +220,18 @@ int ImageHelper::saveImageFromBuffer( return MEDIA_VISION_ERROR_NONE; } +int ImageHelper::destroyLoadedBuffer(unsigned char *pDataBuffer) +{ + if (!pDataBuffer) { + return MEDIA_VISION_ERROR_NONE; + } + + delete [] pDataBuffer; + pDataBuffer = NULL; + + return MEDIA_VISION_ERROR_NONE; +} + int ImageHelper::drawRectangleOnBuffer( int topLeftVertexX, int topLeftVertexY, diff --git a/test/testsuites/common/image_helper/src/image_helper.cpp b/test/testsuites/common/image_helper/src/image_helper.cpp index 369f703..ecebee1 100644 --- a/test/testsuites/common/image_helper/src/image_helper.cpp +++ b/test/testsuites/common/image_helper/src/image_helper.cpp @@ -85,6 +85,11 @@ int save_image_from_buffer( return ImageHelper::saveImageFromBuffer(file_path, data_buffer, imageData, quality); } +int destroy_loaded_buffer(unsigned char *data_buffer) +{ + return ImageHelper::destroyLoadedBuffer(data_buffer); +} + int draw_rectangle_on_buffer( int tl_vertex_x, int tl_vertex_y, diff --git a/test/testsuites/common/testsuite_common/mv_testsuite_common.c b/test/testsuites/common/testsuite_common/mv_testsuite_common.c index bf02a2a..8e229c5 100644 --- a/test/testsuites/common/testsuite_common/mv_testsuite_common.c +++ b/test/testsuites/common/testsuite_common/mv_testsuite_common.c @@ -295,7 +295,7 @@ int load_mv_source_from_file( err); if (NULL != data_buffer) { - free(data_buffer); + destroy_loaded_buffer(data_buffer); } return err; @@ -316,7 +316,7 @@ int load_mv_source_from_file( if (NULL != data_buffer) { - free(data_buffer); + destroy_loaded_buffer(data_buffer); } return err; diff --git a/test/testsuites/face/face_test_suite.c b/test/testsuites/face/face_test_suite.c index e7ca50a..9da7554 100644 --- a/test/testsuites/face/face_test_suite.c +++ b/test/testsuites/face/face_test_suite.c @@ -865,7 +865,7 @@ int perform_model_evaluation(mv_face_recognition_model_h model) } labels[dir_n] = known_label ? face_label : -1; - strcpy(directories[dir_n], in_file_name); + snprintf(directories[dir_n], 1024, "%s", in_file_name); label_count += (0 == unique_checks[face_label] ? 1 : 0); if (labels[dir_n] >= 0) unique_checks[labels[dir_n]] += 1; @@ -895,6 +895,7 @@ int perform_model_evaluation(mv_face_recognition_model_h model) mv_source_h source = NULL; int err = mv_create_source(&source); if (MEDIA_VISION_ERROR_NONE != err) { + free(directories); printf(TEXT_RED "ERROR: Errors were occurred during creating the source!!! code: %i" TEXT_RESET "\n", err); -- 2.7.4 From 973b06c23ed5871967646e3451a3d33c8c9decb2 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Mon, 7 Dec 2015 18:43:12 +0900 Subject: [PATCH 08/16] Fixed a bug and remained Svace issues in barcode, face, image modules 1. Fixed the bug of setTolerantError() 2. Fixed remained Svace issues Change-Id: I8db590814dbd28b32cfadcc3c0f0ed6c89113aab Signed-off-by: Tae-Young Chung --- mv_face/face/src/FaceEyeCondition.cpp | 8 ++-- mv_face/face/src/FaceRecognitionModel.cpp | 2 +- mv_image/image/src/Features/FeatureMatcher.cpp | 2 +- .../src/Tracking/ImageContourStabilizator.cpp | 2 + mv_image/image/src/mv_image_open.cpp | 11 ++++- packaging/capi-media-vision.spec | 2 +- test/testsuites/barcode/barcode_test_suite.c | 54 ++++++++++++++++++++-- .../common/testsuite_common/mv_testsuite_common.c | 6 ++- test/testsuites/face/face_test_suite.c | 32 +++++++------ test/testsuites/image/image_test_suite.c | 34 ++++++++------ 10 files changed, 111 insertions(+), 42 deletions(-) diff --git a/mv_face/face/src/FaceEyeCondition.cpp b/mv_face/face/src/FaceEyeCondition.cpp index 53c835b..2203940 100644 --- a/mv_face/face/src/FaceEyeCondition.cpp +++ b/mv_face/face/src/FaceEyeCondition.cpp @@ -64,13 +64,13 @@ void FaceEyeCondition::splitEyes( const double xRightEyeCenter = (2 * eyeAreaRight.x + eyeAreaRight.width) / 2.; const double yRightEyeCenter = (2 * eyeAreaRight.y + eyeAreaRight.height) / 2.; - const cv::Rect leftEyeRect(xLeftEyeCenter - eyeAreaLeft.width / 4, - yLeftEyeCenter - eyeAreaLeft.height / 4, + const cv::Rect leftEyeRect((int)(xLeftEyeCenter - (double)eyeAreaLeft.width / 4), + (int)(yLeftEyeCenter - (double)eyeAreaLeft.height / 4), eyeAreaLeft.width / 2, eyeAreaLeft.height / 2); - const cv::Rect rightEyeRect(xRightEyeCenter - eyeAreaRight.width / 4, - yRightEyeCenter - eyeAreaRight.height / 4, + const cv::Rect rightEyeRect((int)(xRightEyeCenter - (double)eyeAreaRight.width / 4), + (int)(yRightEyeCenter - (double)eyeAreaRight.height / 4), eyeAreaRight.width / 2, eyeAreaRight.height / 2); diff --git a/mv_face/face/src/FaceRecognitionModel.cpp b/mv_face/face/src/FaceRecognitionModel.cpp index 1c0c55c..394e067 100644 --- a/mv_face/face/src/FaceRecognitionModel.cpp +++ b/mv_face/face/src/FaceRecognitionModel.cpp @@ -38,7 +38,7 @@ int CopyOpenCVAlgorithmParameters(const cv::Ptr& srcAlg, { char tempPath[1024]; - sprintf(tempPath, "/tmp/alg_copy_%p_%p", srcAlg.obj, dstAlg.obj); + snprintf(tempPath, 1024, "/tmp/alg_copy_%p_%p", srcAlg.obj, dstAlg.obj); srcAlg->save(tempPath); dstAlg->load(tempPath); diff --git a/mv_image/image/src/Features/FeatureMatcher.cpp b/mv_image/image/src/Features/FeatureMatcher.cpp index dbf72df..0d00d10 100644 --- a/mv_image/image/src/Features/FeatureMatcher.cpp +++ b/mv_image/image/src/Features/FeatureMatcher.cpp @@ -227,7 +227,7 @@ float FeatureMatcher::getTolerantError() const void FeatureMatcher::setTolerantError(float tolerantError) { - m_affectingPart = std::max(0.f, std::min(1.f, tolerantError)); + m_tolerantError = std::max(0.f, std::min(1.f, tolerantError)); } size_t FeatureMatcher::getMinimumMatchesNumber() const diff --git a/mv_image/image/src/Tracking/ImageContourStabilizator.cpp b/mv_image/image/src/Tracking/ImageContourStabilizator.cpp index 00a25a0..a745cec 100644 --- a/mv_image/image/src/Tracking/ImageContourStabilizator.cpp +++ b/mv_image/image/src/Tracking/ImageContourStabilizator.cpp @@ -31,6 +31,8 @@ ImageContourStabilizator::ImageContourStabilizator() : void ImageContourStabilizator::reset(void) { + m_tolerantShift = 0.0f; + m_tolerantShiftExtra = 0.0f; m_isPrepared = false; m_tempContourIndex = -1; m_currentHistoryAmount = 0; diff --git a/mv_image/image/src/mv_image_open.cpp b/mv_image/image/src/mv_image_open.cpp index df17707..53d46bb 100644 --- a/mv_image/image/src/mv_image_open.cpp +++ b/mv_image/image/src/mv_image_open.cpp @@ -411,6 +411,8 @@ int mv_image_recognize_open( convertSourceMV2GrayCV(source, scene), "Failed to convert mv_source."); + int ret = MEDIA_VISION_ERROR_NONE; + MediaVision::Image::FeaturesExtractingParams featuresExtractingParams; extractSceneFeaturesExtractingParams(engine_cfg, featuresExtractingParams); @@ -431,6 +433,11 @@ int mv_image_recognize_open( if (isRecognized && (resultContour.size() == MediaVision::Image::NumberOfQuadrangleCorners)) { resultLocations[objectNum] = new mv_quadrangle_s; + if (resultLocations[objectNum] == NULL) { + ret = MEDIA_VISION_ERROR_OUT_OF_MEMORY; + goto ErrorExit; + } + for (size_t pointNum = 0u; pointNum < MediaVision::Image::NumberOfQuadrangleCorners; ++pointNum) { @@ -452,6 +459,8 @@ int mv_image_recognize_open( number_of_objects, user_data); +ErrorExit: + for (int objectNum = 0; objectNum < number_of_objects; ++objectNum) { if (resultLocations[objectNum] != NULL) { delete resultLocations[objectNum]; @@ -459,7 +468,7 @@ int mv_image_recognize_open( } } - return MEDIA_VISION_ERROR_NONE; + return ret; } int mv_image_track_open( diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index 90fc7f2..3b29759 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.3.1 +Version: 0.3.2 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 diff --git a/test/testsuites/barcode/barcode_test_suite.c b/test/testsuites/barcode/barcode_test_suite.c index 9389461..52f845f 100644 --- a/test/testsuites/barcode/barcode_test_suite.c +++ b/test/testsuites/barcode/barcode_test_suite.c @@ -443,8 +443,15 @@ int generate_barcode_to_source(barcode_model_s model) LOGI("Call the mv_barcode_generate_source() function"); + mv_engine_config_h mv_engine_config; + err = mv_create_engine_config(&mv_engine_config); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Errors were occurred during creating the media engine " + "config: %i\n", err); + } + err = mv_barcode_generate_source( - NULL, + mv_engine_config, model.message, model.type, model.mode, @@ -462,6 +469,12 @@ int generate_barcode_to_source(barcode_model_s model) "Error code: %i\n", err2); } + const int err3 = mv_destroy_engine_config(mv_engine_config); + if (MEDIA_VISION_ERROR_NONE != err3) { + printf("ERROR: Errors were occurred during destroying the media engine " + "config: %i\n", err3); + } + MEDIA_VISION_FUNCTION_LEAVE(); return err; @@ -509,6 +522,12 @@ int generate_barcode_to_source(barcode_model_s model) "source. Error code: %i\n", err); } + err = mv_destroy_engine_config(mv_engine_config); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Errors were occurred during destroying the media engine " + "config: %i\n", err); + } + MEDIA_VISION_FUNCTION_LEAVE(); return MEDIA_VISION_ERROR_INTERNAL; @@ -520,12 +539,26 @@ int generate_barcode_to_source(barcode_model_s model) if (0 == strcmp(model.file_name + strlen(model.file_name) - 4, ".jpg") || 0 == strcmp(model.file_name + strlen(model.file_name) - 5, ".jpeg")) { jpeg_file_name = (char*)malloc(strlen(model.file_name) + 1); - strcpy(jpeg_file_name, model.file_name); + if (jpeg_file_name == NULL) { + mv_destroy_source(source); + mv_destroy_engine_config(mv_engine_config); + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } + + strncpy(jpeg_file_name, model.file_name, strlen(model.file_name) + 1); jpeg_file_name[strlen(model.file_name)] = '\0'; } else { jpeg_file_name = (char*)malloc(strlen(model.file_name) + 5); - strcpy(jpeg_file_name, model.file_name); - strcpy(jpeg_file_name + strlen(model.file_name), ".jpg"); + if (jpeg_file_name == NULL) { + mv_destroy_source(source); + mv_destroy_engine_config(mv_engine_config); + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } + + strncpy(jpeg_file_name, model.file_name, strlen(model.file_name) + 5); + strncpy(jpeg_file_name + strlen(model.file_name), ".jpg", 5); jpeg_file_name[strlen(model.file_name) + 4] = '\0'; } @@ -539,6 +572,12 @@ int generate_barcode_to_source(barcode_model_s model) "Error code: %i\n", err2); } + const int err3 = mv_destroy_engine_config(mv_engine_config); + if (MEDIA_VISION_ERROR_NONE != err3) { + printf("ERROR: Errors were occurred during destroying the media engine " + "config: %i\n", err); + } + MEDIA_VISION_FUNCTION_LEAVE(); return err; @@ -656,7 +695,12 @@ int input_string(const char *prompt, size_t max_len, char **string) size_t real_string_len = strlen(buffer); buffer[real_string_len - 1] = '\0'; *string = (char*)malloc(real_string_len * sizeof(char)); - strcpy(*string, buffer); + if (*string == NULL) { + MEDIA_VISION_FUNCTION_LEAVE(); + return -1; + } + + strncpy(*string, buffer, real_string_len); size_t str_len = strlen(*string); diff --git a/test/testsuites/common/testsuite_common/mv_testsuite_common.c b/test/testsuites/common/testsuite_common/mv_testsuite_common.c index 8e229c5..6ebd567 100644 --- a/test/testsuites/common/testsuite_common/mv_testsuite_common.c +++ b/test/testsuites/common/testsuite_common/mv_testsuite_common.c @@ -103,7 +103,11 @@ int input_string(const char *prompt, size_t max_len, char **string) size_t real_string_len = strlen(buffer); buffer[real_string_len - 1] = '\0'; *string = (char*)malloc(real_string_len * sizeof(char)); - strcpy(*string, buffer); + if (*string == NULL) { + return -1; + } + + strncpy(*string, buffer, real_string_len); return strlen(*string); } diff --git a/test/testsuites/face/face_test_suite.c b/test/testsuites/face/face_test_suite.c index 9da7554..6afbf5a 100644 --- a/test/testsuites/face/face_test_suite.c +++ b/test/testsuites/face/face_test_suite.c @@ -545,7 +545,7 @@ int perform_mv_face_recognition_model_add_face_example( if (file_name[0] == '.') continue; - sprintf(file_path, "%s/%s", in_file_name, file_name); + snprintf(file_path, 1024, "%s/%s", in_file_name, file_name); err = add_single_example(model, file_path, NULL, &face_label); if (MEDIA_VISION_ERROR_NONE != err) { @@ -918,7 +918,7 @@ int perform_model_evaluation(mv_face_recognition_model_h model) if (file_name[0] == '.') continue; - sprintf(file_path, "%s/%s", directories[i], file_name); + snprintf(file_path, 1024, "%s/%s", directories[i], file_name); err = load_mv_source_from_file(file_path, source); if (MEDIA_VISION_ERROR_NONE != err) { printf(TEXT_RED "Failed to test on example from %s. " @@ -1056,11 +1056,10 @@ int perform_recognize() "Error with code %i was occurred during destoy" TEXT_RESET "\n", err); } - - return err; } else { - return MEDIA_VISION_ERROR_NONE; + err = MEDIA_VISION_ERROR_NONE; } + break; default: sel_opt = 0; printf("ERROR: Incorrect option was selected.\n"); @@ -1069,8 +1068,12 @@ int perform_recognize() print_action_result(names[sel_opt - 1], err, notification_type); - sel_opt = 0; + if (sel_opt != 11) { + sel_opt = 0; + } } + + return err; } int perform_mv_face_tracking_model_save(mv_face_tracking_model_h model) @@ -1415,13 +1418,13 @@ int perform_mv_face_tracking_model_prepare(mv_face_tracking_model_h model) char str_prompt[100]; while (idx < 4) { ++idx; - sprintf(str_prompt, "Specify point %i x coordinate: x%i = ", + snprintf(str_prompt, 100, "Specify point %i x coordinate: x%i = ", idx - 1, idx); while (-1 == input_int(str_prompt, INT_MIN, INT_MAX, &(roi.points[idx - 1].x))) { printf("Incorrect input! Try again.\n"); } - sprintf(str_prompt, "Specify point %i y coordinate: y%i = ", + snprintf(str_prompt, 100, "Specify point %i y coordinate: y%i = ", idx - 1, idx); while (-1 == input_int(str_prompt, INT_MIN, INT_MAX, &(roi.points[idx - 1].y))) { @@ -1533,7 +1536,7 @@ void track_cb( } char file_path[1024]; - sprintf(file_path, "%s/%05d.jpg", track_output_dir, track_frame_counter); + snprintf(file_path, 1024, "%s/%05d.jpg", track_output_dir, track_frame_counter); if (MEDIA_VISION_ERROR_NONE == save_image_from_buffer( file_path, out_buffer, &image_data, 100)) { printf("Frame %i was outputted as %s\n", track_frame_counter, file_path); @@ -1806,11 +1809,10 @@ int perform_track() "Error with code %i was occurred during destroy" TEXT_RESET "\n", err); } - - return err; } else { - return MEDIA_VISION_ERROR_NONE; + err = MEDIA_VISION_ERROR_NONE; } + break; default: sel_opt = 0; printf("ERROR: Incorrect input.\n"); @@ -1819,8 +1821,12 @@ int perform_track() print_action_result(names[sel_opt - 1], err, notification_type); - sel_opt = 0; + if (sel_opt != 6) { + sel_opt = 0; + } } + + return err; } int perform_eye_condition_recognize() diff --git a/test/testsuites/image/image_test_suite.c b/test/testsuites/image/image_test_suite.c index 1cd9ba7..4e35f81 100644 --- a/test/testsuites/image/image_test_suite.c +++ b/test/testsuites/image/image_test_suite.c @@ -85,62 +85,66 @@ void testing_object_fill( switch (source_type) { case SOURCE_TYPE_GENERATION: { if (OBJECT_TYPE_IMAGE_OBJECT == object_type) { - sprintf( + snprintf( target->origin_label, + testing_object_maximum_label_length, "generated from \"%s\"", (char*)source); } else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == object_type) { - sprintf( + snprintf( target->origin_label, + testing_object_maximum_label_length, "generated from image object which is %s", ((testing_object_h)source)->actual_label); } else { - sprintf( + snprintf( target->origin_label, + testing_object_maximum_label_length, "generated unknown type of testing object"); } - strcpy(target->actual_label, target->origin_label); + strncpy(target->actual_label, target->origin_label, testing_object_maximum_label_length); break; } case SOURCE_TYPE_LOADING: { - sprintf(target->origin_label, "loaded from \"%s\"", (char*)source); - strcpy(target->actual_label, target->origin_label); + snprintf(target->origin_label, testing_object_maximum_label_length, "loaded from \"%s\"", (char*)source); + strncpy(target->actual_label, target->origin_label, testing_object_maximum_label_length); break; } case SOURCE_TYPE_CLONING: { testing_object_h source_object = (testing_object_h)source; - strcpy(target->origin_label, source_object->origin_label); + strncpy(target->origin_label, source_object->origin_label, testing_object_maximum_label_length); target->cloning_counter = source_object->cloning_counter + 1; char number_of_cloning[10]; number_of_cloning[0] = '\0'; if (1 < target->cloning_counter) { - sprintf(number_of_cloning, "%s%i%s", + snprintf(number_of_cloning, 10, "%s%i%s", "(x", target->cloning_counter, ")"); } char type_name[20]; if (OBJECT_TYPE_IMAGE_OBJECT == object_type) - sprintf(type_name, "image object"); + snprintf(type_name, 20, "image object"); else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == object_type) - sprintf(type_name, "tracking model"); + snprintf(type_name, 20, "tracking model"); else - sprintf(type_name, "unknown object"); + snprintf(type_name, 20, "unknown object"); - sprintf(target->actual_label, "%s%s%s%s%s%s", + snprintf(target->actual_label, testing_object_maximum_label_length, + "%s%s%s%s%s%s", "cloned ", number_of_cloning, " from ", type_name, " which is ", target->origin_label); break; } case SOURCE_TYPE_EMPTY: { - strcpy(target->origin_label, "created an empty"); - strcpy(target->actual_label, target->origin_label); + strncpy(target->origin_label, "created an empty", testing_object_maximum_label_length); + strncpy(target->actual_label, target->origin_label, testing_object_maximum_label_length); break; } default: { - strcpy(target->origin_label, "having unknown source"); + strncpy(target->origin_label, "having unknown source", testing_object_maximum_label_length); break; } } -- 2.7.4 From c27a8f0ad3f289652bc75c97e7039b34d365708b Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Thu, 10 Dec 2015 17:31:37 +0900 Subject: [PATCH 09/16] Applied Coding Rule Change-Id: I6b558e8580af98f113175455c26d6f1decc5580b Signed-off-by: Tae-Young Chung --- mv_image/image_lic/src/mv_image_lic.c | 119 +- mv_surveillance/surveillance/src/mv_mask_buffer.c | 12 +- .../surveillance_lic/src/mv_surveillance_lic.c | 43 +- packaging/capi-media-vision.spec | 2 +- src/mv_surveillance.c | 10 +- .../common/testsuite_common/mv_testsuite_common.c | 467 +++---- .../common/video_helper/mv_video_helper.c | 1440 ++++++++++---------- test/testsuites/face/face_test_suite.c | 6 +- .../surveillance/surveillance_test_suite.c | 17 +- 9 files changed, 1004 insertions(+), 1112 deletions(-) diff --git a/mv_image/image_lic/src/mv_image_lic.c b/mv_image/image_lic/src/mv_image_lic.c index 356e563..00e898b 100644 --- a/mv_image/image_lic/src/mv_image_lic.c +++ b/mv_image/image_lic/src/mv_image_lic.c @@ -17,134 +17,133 @@ #include "mv_image_lic.h" int mv_image_recognize_lic( - mv_source_h source, - const mv_image_object_h *image_objects, - int number_of_objects, - mv_engine_config_h engine_cfg, - mv_image_recognized_cb recognized_cb, - void *user_data) + mv_source_h source, + const mv_image_object_h *image_objects, + int number_of_objects, + mv_engine_config_h engine_cfg, + mv_image_recognized_cb recognized_cb, + void *user_data) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_track_lic( - mv_source_h source, - mv_image_tracking_model_h image_tracking_model, - mv_engine_config_h engine_cfg, - mv_image_tracked_cb tracked_cb, - void *user_data) + mv_source_h source, + mv_image_tracking_model_h image_tracking_model, + mv_engine_config_h engine_cfg, + mv_image_tracked_cb tracked_cb, + void *user_data) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_object_create_lic( - mv_image_object_h *image_object) + mv_image_object_h *image_object) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } - int mv_image_object_destroy_lic( - mv_image_object_h image_object) + mv_image_object_h image_object) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_object_fill_lic( - mv_image_object_h image_object, - mv_engine_config_h engine_cfg, - mv_source_h source, - mv_rectangle_s *location) + mv_image_object_h image_object, + mv_engine_config_h engine_cfg, + mv_source_h source, + mv_rectangle_s *location) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_object_get_recognition_rate_lic( - mv_image_object_h image_object, - double *recognition_rate) + mv_image_object_h image_object, + double *recognition_rate) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_object_set_label_lic( - mv_image_object_h image_object, - int label) + mv_image_object_h image_object, + int label) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_object_get_label_lic( - mv_image_object_h image_object, - int *label) + mv_image_object_h image_object, + int *label) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_object_clone_lic( - mv_image_object_h src, - mv_image_object_h *dst) + mv_image_object_h src, + mv_image_object_h *dst) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_object_save_lic( - mv_image_object_h image_object, - const char *file_name) + mv_image_object_h image_object, + const char *file_name) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_object_load_lic( - mv_image_object_h image_object, - const char *file_name) + mv_image_object_h image_object, + const char *file_name) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_tracking_model_create_lic( - mv_image_tracking_model_h *image_tracking_model) + mv_image_tracking_model_h *image_tracking_model) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_tracking_model_set_target_lic( - mv_image_object_h image_object, - mv_image_tracking_model_h image_tracking_model) + mv_image_object_h image_object, + mv_image_tracking_model_h image_tracking_model) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_tracking_model_destroy_lic( - mv_image_tracking_model_h image_tracking_model) + mv_image_tracking_model_h image_tracking_model) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_tracking_model_refresh_lic( - mv_image_tracking_model_h image_tracking_model, - mv_engine_config_h engine_cfg) + mv_image_tracking_model_h image_tracking_model, + mv_engine_config_h engine_cfg) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_tracking_model_clone_lic( - mv_image_tracking_model_h src, - mv_image_tracking_model_h *dst) + mv_image_tracking_model_h src, + mv_image_tracking_model_h *dst) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_tracking_model_save_lic( - mv_image_tracking_model_h image_tracking_model, - const char *file_name) + mv_image_tracking_model_h image_tracking_model, + const char *file_name) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_image_tracking_model_load_lic( - mv_image_tracking_model_h image_tracking_model, - const char *file_name) + mv_image_tracking_model_h image_tracking_model, + const char *file_name) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } diff --git a/mv_surveillance/surveillance/src/mv_mask_buffer.c b/mv_surveillance/surveillance/src/mv_mask_buffer.c index f1d50ef..af06e61 100644 --- a/mv_surveillance/surveillance/src/mv_mask_buffer.c +++ b/mv_surveillance/surveillance/src/mv_mask_buffer.c @@ -29,7 +29,7 @@ int mv_get_mask_buffer( unsigned char **mask_buffer) { if (buffer_width == 0u || buffer_height == 0u || - polygon == NULL|| points_number == 0u || mask_buffer == NULL) { + polygon == NULL || points_number == 0u || mask_buffer == NULL) { LOGE("Wrong input parameter. Getting mask buffer failed."); return MEDIA_VISION_ERROR_INVALID_PARAMETER; } @@ -48,17 +48,15 @@ int mv_get_mask_buffer( int min_y = polygon[0].y; for (k = 1u; k < points_number; ++k) { - if (polygon[k].x > max_x) { + if (polygon[k].x > max_x) max_x = polygon[k].x; - } else if (polygon[k].x < min_x) { + else if (polygon[k].x < min_x) min_x = polygon[k].x; - } - if (polygon[k].y > max_y) { + if (polygon[k].y > max_y) max_y = polygon[k].y; - } else if (polygon[k].y < min_y) { + else if (polygon[k].y < min_y) min_y = polygon[k].y; - } } diff --git a/mv_surveillance/surveillance_lic/src/mv_surveillance_lic.c b/mv_surveillance/surveillance_lic/src/mv_surveillance_lic.c index 95acb63..299a87d 100644 --- a/mv_surveillance/surveillance_lic/src/mv_surveillance_lic.c +++ b/mv_surveillance/surveillance_lic/src/mv_surveillance_lic.c @@ -17,47 +17,46 @@ #include "mv_surveillance_lic.h" int mv_surveillance_subscribe_event_trigger_lic( - mv_surveillance_event_trigger_h event_trigger, - mv_engine_config_h engine_cfg, - mv_surveillance_event_occurred_cb callback, - void *user_data) + mv_surveillance_event_trigger_h event_trigger, + mv_engine_config_h engine_cfg, + mv_surveillance_event_occurred_cb callback, + void *user_data) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } - int mv_surveillance_unsubscribe_event_trigger_lic( - mv_surveillance_event_trigger_h event_trigger) + mv_surveillance_event_trigger_h event_trigger) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_surveillance_push_source_lic( - mv_source_h source, - int video_stream_id) + mv_source_h source, + int video_stream_id) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_surveillance_foreach_event_type_lic( - mv_surveillance_event_type_cb callback, - void *user_data) + mv_surveillance_event_type_cb callback, + void *user_data) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_surveillance_foreach_event_result_value_name_lic( - const char *event_type, - mv_surveillance_event_result_value_name_cb callback, - void *user_data) + const char *event_type, + mv_surveillance_event_result_value_name_cb callback, + void *user_data) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } int mv_surveillance_get_result_value_lic( - mv_surveillance_result_h result, - const char *value_name, - void *value) + mv_surveillance_result_h result, + const char *value_name, + void *value) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED; + return MEDIA_VISION_ERROR_NOT_SUPPORTED; } diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index 3b29759..11304f8 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.3.2 +Version: 0.3.3 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 diff --git a/src/mv_surveillance.c b/src/mv_surveillance.c index 48aa269..461ea4d 100644 --- a/src/mv_surveillance.c +++ b/src/mv_surveillance.c @@ -58,7 +58,7 @@ int mv_surveillance_event_trigger_create( memset(handle, 0, sizeof(mv_surveillance_event_trigger_s)); - // default values: + /* default values: */ handle->trigger_id = ++__mv_surveillance_id_counter; handle->event_type = strndup(event_type, 255); handle->number_of_roi_points = 0; @@ -102,8 +102,8 @@ int mv_surveillance_get_event_trigger_type( (mv_surveillance_event_trigger_s *)trigger; *event_type = strndup(handle->event_type, 255); - MEDIA_VISION_FUNCTION_LEAVE(); - return MEDIA_VISION_ERROR_NONE; + MEDIA_VISION_FUNCTION_LEAVE(); + return MEDIA_VISION_ERROR_NONE; } int mv_surveillance_set_event_trigger_roi( @@ -154,11 +154,11 @@ int mv_surveillance_get_event_trigger_roi( (mv_surveillance_event_trigger_s *) trigger; *number_of_points = handle->number_of_roi_points; - if (0 == *number_of_points) - { + if (0 == *number_of_points) { MEDIA_VISION_FUNCTION_LEAVE(); return MEDIA_VISION_ERROR_NONE; } + *roi = (mv_point_s *) malloc( sizeof(mv_point_s) * handle->number_of_roi_points); diff --git a/test/testsuites/common/testsuite_common/mv_testsuite_common.c b/test/testsuites/common/testsuite_common/mv_testsuite_common.c index 6ebd567..c5fb3fc 100644 --- a/test/testsuites/common/testsuite_common/mv_testsuite_common.c +++ b/test/testsuites/common/testsuite_common/mv_testsuite_common.c @@ -22,306 +22,261 @@ #include #include - void print_fail_result( - const char *action_name, - int action_return_value) + const char *action_name, + int action_return_value) { - printf(TEXT_RED - "Error with code %i was occurred during action '%s'" - TEXT_RESET "\n", - action_return_value, - action_name); + printf(TEXT_RED + "Error with code %i was occurred during action '%s'" + TEXT_RESET "\n", + action_return_value, + action_name); } void print_done_result(const char *action_name) { - printf(TEXT_YELLOW - "Action '%s' was finished" - TEXT_RESET "\n", - action_name); + printf(TEXT_YELLOW + "Action '%s' was finished" + TEXT_RESET "\n", + action_name); } void print_success_result(const char *action_name) { - printf(TEXT_GREEN - "Action '%s' was finished successfully" - TEXT_RESET - "\n", action_name); + printf(TEXT_GREEN + "Action '%s' was finished successfully" + TEXT_RESET + "\n", action_name); } void print_action_result( - const char *action_name, - int action_return_value, - notification_type_e notification_type_e) + const char *action_name, + int action_return_value, + notification_type_e notification_type_e) { - switch(notification_type_e) - { - case FAIL_OR_SUCCESSS: - if (MEDIA_VISION_ERROR_NONE != action_return_value) - { - print_fail_result(action_name, action_return_value); - } - else - { - print_success_result(action_name); - } - break; - case FAIL_OR_DONE: - if (MEDIA_VISION_ERROR_NONE != action_return_value) - { - print_fail_result(action_name, action_return_value); - } - else - { - print_done_result(action_name); - } - break; - default: - print_done_result(action_name); - } + switch (notification_type_e) { + case FAIL_OR_SUCCESSS: + if (MEDIA_VISION_ERROR_NONE != action_return_value) + print_fail_result(action_name, action_return_value); + else + print_success_result(action_name); + + break; + case FAIL_OR_DONE: + if (MEDIA_VISION_ERROR_NONE != action_return_value) + print_fail_result(action_name, action_return_value); + else + print_done_result(action_name); + + break; + default: + print_done_result(action_name); + } } int input_string(const char *prompt, size_t max_len, char **string) { - printf("\n"); - printf("%s ", prompt); - - if (scanf("\n") != 0) - { - return -1; - } - - char buffer[max_len]; - int last_char = 0; - buffer[last_char] = '\0'; - buffer[sizeof(buffer) - 1] = ~'\0'; - if (NULL == fgets(buffer, sizeof(buffer), stdin)) - { - return -1; - } - size_t real_string_len = strlen(buffer); - buffer[real_string_len - 1] = '\0'; - *string = (char*)malloc(real_string_len * sizeof(char)); - if (*string == NULL) { + printf("\n"); + printf("%s ", prompt); + + if (scanf("\n") != 0) return -1; - } - strncpy(*string, buffer, real_string_len); + char buffer[max_len]; + int last_char = 0; + buffer[last_char] = '\0'; + buffer[sizeof(buffer) - 1] = ~'\0'; + if (NULL == fgets(buffer, sizeof(buffer), stdin)) + return -1; - return strlen(*string); + size_t real_string_len = strlen(buffer); + buffer[real_string_len - 1] = '\0'; + *string = (char*)malloc(real_string_len * sizeof(char)); + if (*string == NULL) + return -1; + + strncpy(*string, buffer, real_string_len); + + return strlen(*string); } int input_size(const char *prompt, size_t max_size, size_t *size) { - printf("\n"); - printf("%s ", prompt); - - if (scanf("%20zu", size) == 0) - { - if (scanf("%*[^\n]%*c") != 0) - { - printf("ERROR: Reading the input line error.\n"); - return -1; - } - printf("ERROR: Incorrect input.\n"); - return -1; - } - scanf("%*[^\n]%*c"); - - return (*size > max_size ? -1 : 0); + printf("\n"); + printf("%s ", prompt); + + if (scanf("%20zu", size) == 0) { + if (scanf("%*[^\n]%*c") != 0) { + printf("ERROR: Reading the input line error.\n"); + return -1; + } + printf("ERROR: Incorrect input.\n"); + return -1; + } + scanf("%*[^\n]%*c"); + + return (*size > max_size ? -1 : 0); } int input_int(const char *prompt, int min_value, int max_value, int *value) { - printf("\n"); - printf("%s ", prompt); - - if (scanf("%20i", value) == 0) - { - if (scanf("%*[^\n]%*c") != 0) - { - printf("ERROR: Reading the input line error.\n"); - return -1; - } - printf("ERROR: Incorrect input.\n"); - return -1; - } - scanf("%*[^\n]%*c"); - - return (*value < min_value || *value > max_value ? -1 : 0); + printf("\n"); + printf("%s ", prompt); + + if (scanf("%20i", value) == 0) { + if (scanf("%*[^\n]%*c") != 0) { + printf("ERROR: Reading the input line error.\n"); + return -1; + } + printf("ERROR: Incorrect input.\n"); + return -1; + } + scanf("%*[^\n]%*c"); + + return (*value < min_value || *value > max_value ? -1 : 0); } int input_double( - const char *prompt, - double min_value, - double max_value, - double *value) + const char *prompt, + double min_value, + double max_value, + double *value) { - printf("\n"); - printf("%s ", prompt); - - if (scanf("%20lf", value) == 0) - { - if (scanf("%*[^\n]%*c") != 0) - { - printf("ERROR: Reading the input line error.\n"); - return -1; - } - printf("ERROR: Incorrect input.\n"); - return -1; - } - scanf("%*[^\n]%*c"); - - return (*value < min_value || *value > max_value ? -1 : 0); + printf("\n"); + printf("%s ", prompt); + + if (scanf("%20lf", value) == 0) { + if (scanf("%*[^\n]%*c") != 0) { + printf("ERROR: Reading the input line error.\n"); + return -1; + } + printf("ERROR: Incorrect input.\n"); + return -1; + } + scanf("%*[^\n]%*c"); + + return (*value < min_value || *value > max_value ? -1 : 0); } bool show_confirm_dialog(const char *title) { - const int options[2] = {1, 2}; - const char *names[2] = { "No", "Yes" }; - - bool answer = false; - - int sel = -1; - while (sel == -1) - { - sel = show_menu(title, options, names, 2); - switch (sel) - { - case 1: - answer = false; - break; - case 2: - answer = true; - break; - default: - sel = -1; - printf("ERROR: Incorrect input.\n"); - continue; - } - } - - return answer; + const int options[2] = {1, 2}; + const char *names[2] = { "No", "Yes" }; + + bool answer = false; + + int sel = -1; + while (sel == -1) { + sel = show_menu(title, options, names, 2); + switch (sel) { + case 1: + answer = false; + break; + case 2: + answer = true; + break; + default: + sel = -1; + printf("ERROR: Incorrect input.\n"); + continue; + } + } + + return answer; } int show_menu( - const char *title, - const int *options, - const char **names, - int number_of_option) + const char *title, + const int *options, + const char **names, + int number_of_option) { - if (NULL == title || NULL == options || NULL == names || 0 >= number_of_option) - { - return -1; - } - - int number_size = 1; - - int tn_counter = number_of_option; - while(tn_counter/=10) - { - ++number_size; - } - - int max_len = strlen(title) - number_size - 2; - - int i = 0; - for (i = 0; i < number_of_option; ++i) - { - const int temp_len = strlen(names[i]); - if (max_len < temp_len) - { - max_len = temp_len; - } - } - - const int full_size = number_size + 2 + max_len; - - printf("\n**"); - for (i = 0; i < full_size; ++i) - { - printf("*"); - } - printf("**\n"); - - printf("* %-*s *\n", full_size, title); - - printf("*-"); - for (i = 0; i < full_size; ++i) - { - printf("-"); - } - printf("-*\n"); - - for (i = 0; i < number_of_option; ++i) - { - printf("* %0*i. %-*s *\n", number_size, options[i], max_len, names[i]); - } - - printf("**"); - for (i = 0; i < full_size; ++i) - { - printf("*"); - } - printf("**\n\n"); - - int selection = 0; - printf("Your choice: "); - if (scanf("%25i", &selection) == 0) - { - if (scanf("%*[^\n]%*c") != 0) - { - printf("ERROR: Reading the input line error.\n"); - } - - printf("ERROR: Incorrect input.\n"); - return -1; - } - scanf("%*[^\n]%*c"); - - return selection; + if (NULL == title || NULL == options || NULL == names || 0 >= number_of_option) + return -1; + + int number_size = 1; + + int tn_counter = number_of_option; + while (tn_counter /= 10) + ++number_size; + + int max_len = strlen(title) - number_size - 2; + + int i = 0; + for (i = 0; i < number_of_option; ++i) { + const int temp_len = strlen(names[i]); + if (max_len < temp_len) + max_len = temp_len; + } + + const int full_size = number_size + 2 + max_len; + + printf("\n**"); + for (i = 0; i < full_size; ++i) + printf("*"); + printf("**\n"); + + printf("* %-*s *\n", full_size, title); + + printf("*-"); + for (i = 0; i < full_size; ++i) + printf("-"); + printf("-*\n"); + + for (i = 0; i < number_of_option; ++i) + printf("* %0*i. %-*s *\n", number_size, options[i], max_len, names[i]); + + printf("**"); + for (i = 0; i < full_size; ++i) + printf("*"); + printf("**\n\n"); + + int selection = 0; + printf("Your choice: "); + if (scanf("%25i", &selection) == 0) { + if (scanf("%*[^\n]%*c") != 0) + printf("ERROR: Reading the input line error.\n"); + + printf("ERROR: Incorrect input.\n"); + return -1; + } + scanf("%*[^\n]%*c"); + + return selection; } int load_mv_source_from_file( - const char *path_to_image, - mv_source_h source) + const char *path_to_image, + mv_source_h source) { - unsigned char *data_buffer = NULL; - unsigned long buffer_size = 0; - image_data_s image_data; - - int err = load_image_to_buffer(path_to_image, &data_buffer, - &buffer_size, &image_data); - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Errors were occurred during opening file!!! code: %i\n", - err); - if (NULL != data_buffer) - { + unsigned char *data_buffer = NULL; + unsigned long buffer_size = 0; + image_data_s image_data; + + int err = load_image_to_buffer(path_to_image, &data_buffer, + &buffer_size, &image_data); + if (MEDIA_VISION_ERROR_NONE != err) { + printf("ERROR: Errors were occurred during opening file!!! code: %i\n", + err); + if (NULL != data_buffer) destroy_loaded_buffer(data_buffer); - } - - return err; - } - - err = mv_source_fill_by_buffer( - source, data_buffer, - buffer_size, - image_data.image_width, - image_data.image_height, - image_data.image_colorspace); - - if (MEDIA_VISION_ERROR_NONE != err) - { - printf("ERROR: Errors were occurred during filling source!!! code %i\n", - err); - } - - if (NULL != data_buffer) - { - destroy_loaded_buffer(data_buffer); - } - - return err; + + return err; + } + + err = mv_source_fill_by_buffer( + source, data_buffer, + buffer_size, + image_data.image_width, + image_data.image_height, + image_data.image_colorspace); + + if (MEDIA_VISION_ERROR_NONE != err) + printf("ERROR: Errors were occurred during filling source!!! code %i\n", + err); + + if (NULL != data_buffer) + destroy_loaded_buffer(data_buffer); + + return err; } diff --git a/test/testsuites/common/video_helper/mv_video_helper.c b/test/testsuites/common/video_helper/mv_video_helper.c index 7674d91..c59a81c 100644 --- a/test/testsuites/common/video_helper/mv_video_helper.c +++ b/test/testsuites/common/video_helper/mv_video_helper.c @@ -28,46 +28,44 @@ #include -typedef struct _mv_video_reader_s -{ - /* Main bin */ - GstElement *pl; +typedef struct _mv_video_reader_s { + /* Main bin */ + GstElement *pl; - /* Pipeline structure */ - GstElement *filesrc; - GstElement *decodebin; - GstElement *videoconvert; - GstElement *appsink; + /* Pipeline structure */ + GstElement *filesrc; + GstElement *decodebin; + GstElement *videoconvert; + GstElement *appsink; - void *new_sample_cb_user_data; - void *eos_cb_user_data; + void *new_sample_cb_user_data; + void *eos_cb_user_data; - GstCaps *caps; + GstCaps *caps; - pthread_spinlock_t new_sample_cb_guard; - pthread_spinlock_t eos_cb_guard; + pthread_spinlock_t new_sample_cb_guard; + pthread_spinlock_t eos_cb_guard; - mv_video_reader_new_sample_cb new_sample_cb; - mv_video_reader_eos_cb eos_cb; + mv_video_reader_new_sample_cb new_sample_cb; + mv_video_reader_eos_cb eos_cb; } mv_video_reader_s; -typedef struct _mv_video_writer_s -{ - /* Main bin */ - GstElement *pl; - - /* Pipeline structure */ - GstElement *appsrc; - GstElement *capsfilter; - GstElement *videoconvert; - GstElement *encoder; - GstElement *queue; - GstElement *muxer; - GstElement *filesink; - - image_data_s image_data; - unsigned int fps; - unsigned int buffer_size; +typedef struct _mv_video_writer_s { + /* Main bin */ + GstElement *pl; + + /* Pipeline structure */ + GstElement *appsrc; + GstElement *capsfilter; + GstElement *videoconvert; + GstElement *encoder; + GstElement *queue; + GstElement *muxer; + GstElement *filesink; + + image_data_s image_data; + unsigned int fps; + unsigned int buffer_size; } mv_video_writer_s; /* video reader internal funcitons */ @@ -86,841 +84,789 @@ static void cb_newpad(GstElement *decodebin, GstPad *new_pad, gpointer user_data /* video reader */ int mv_create_video_reader( - mv_video_reader_h *reader) + mv_video_reader_h *reader) { - mv_video_reader_s *handle = NULL; - int err = MEDIA_VISION_ERROR_NONE; - - if (reader == NULL) - { - LOGE("NULL pointer passed"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - gst_init(NULL, NULL); - - handle = (mv_video_reader_s *) malloc(sizeof(mv_video_reader_s)); - if (!handle) - { - LOGE("Not enough memory"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - memset(handle, 0, sizeof(mv_video_reader_s)); - - err = _mv_video_reader_create_internals(handle); - if (MEDIA_VISION_ERROR_NONE != err) - { - LOGE("Failed to create internals"); - free(handle); - return err; - } - - err = _mv_video_reader_link_internals(handle); - if (MEDIA_VISION_ERROR_NONE != err) - { - LOGE("Failed to link internals"); - free(handle); - return err; - } - - *reader = (mv_video_reader_s *) handle; - - return err; + mv_video_reader_s *handle = NULL; + int err = MEDIA_VISION_ERROR_NONE; + + if (reader == NULL) { + LOGE("NULL pointer passed"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + gst_init(NULL, NULL); + + handle = (mv_video_reader_s *) malloc(sizeof(mv_video_reader_s)); + if (!handle) { + LOGE("Not enough memory"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + memset(handle, 0, sizeof(mv_video_reader_s)); + + err = _mv_video_reader_create_internals(handle); + if (MEDIA_VISION_ERROR_NONE != err) { + LOGE("Failed to create internals"); + free(handle); + return err; + } + + err = _mv_video_reader_link_internals(handle); + if (MEDIA_VISION_ERROR_NONE != err) { + LOGE("Failed to link internals"); + free(handle); + return err; + } + + *reader = (mv_video_reader_s *) handle; + + return err; } int mv_destroy_video_reader( - mv_video_reader_h reader) + mv_video_reader_h reader) { - mv_video_reader_s *handle = NULL; + mv_video_reader_s *handle = NULL; + + if (reader == NULL) { + LOGE("NULL pointer passed"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - if (reader == NULL) - { - LOGE("NULL pointer passed"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + handle = (mv_video_reader_s *) reader; - handle = (mv_video_reader_s *) reader; + if (handle->caps && GST_OBJECT_REFCOUNT(handle->caps)) + gst_caps_unref(handle->caps); - if (handle->caps && GST_OBJECT_REFCOUNT(handle->caps)) - { - gst_caps_unref(handle->caps); - } + if (handle->pl) + gst_object_unref(handle->pl); - if (handle->pl) - { - gst_object_unref(handle->pl); - } - handle->pl = NULL; + handle->pl = NULL; - pthread_spin_destroy(&(handle->new_sample_cb_guard)); - pthread_spin_destroy(&(handle->eos_cb_guard)); + pthread_spin_destroy(&(handle->new_sample_cb_guard)); + pthread_spin_destroy(&(handle->eos_cb_guard)); - LOGD("video reader destroyed %p", handle); + LOGD("video reader destroyed %p", handle); - free(handle); + free(handle); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_video_reader_load( - mv_video_reader_h reader, - const char *path, - image_data_s *image_data, - unsigned int *fps) + mv_video_reader_h reader, + const char *path, + image_data_s *image_data, + unsigned int *fps) { - mv_video_reader_s *handle = NULL; - GstVideoInfo info; - - if (reader == NULL || path == NULL || - image_data == NULL || fps == NULL) - { - LOGE("NULL pointer passed"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - handle = (mv_video_reader_s *) reader; - - /* Set input file location from path */ - g_object_set(G_OBJECT(handle->filesrc), - "location", path, - NULL); - - /* Start playback */ - if (_mv_video_reader_state_change(handle, GST_STATE_PLAYING)) - { - LOGE("Unable to change state"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - if (_mv_video_reader_state_change(handle, GST_STATE_PAUSED)) - { - LOGE("Unable to change state"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - if (handle->caps == NULL) - { - LOGE("Unable to get caps from decodebin"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - gst_video_info_from_caps(&info, handle->caps); - gst_caps_unref(handle->caps); - - *fps = info.fps_n/info.fps_d; - - /* Fill image data */ - image_data->image_width = info.width; - image_data->image_height = info.height; - - /* Look to : - * http://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-plugins-base-libs/html/gst-plugins-base-libs-gstvideo.html#GstVideoFormat */ - switch(GST_VIDEO_FORMAT_INFO_FORMAT(info.finfo)) - { - case(GST_VIDEO_FORMAT_GRAY8): - image_data->image_colorspace = MEDIA_VISION_COLORSPACE_Y800; - break; - case(GST_VIDEO_FORMAT_I420): - image_data->image_colorspace = MEDIA_VISION_COLORSPACE_I420; - break; - case(GST_VIDEO_FORMAT_NV12): - image_data->image_colorspace = MEDIA_VISION_COLORSPACE_NV12; - break; - case(GST_VIDEO_FORMAT_YV12): - image_data->image_colorspace = MEDIA_VISION_COLORSPACE_YV12; - break; - case(GST_VIDEO_FORMAT_NV21): - image_data->image_colorspace = MEDIA_VISION_COLORSPACE_NV21; - break; - case(GST_VIDEO_FORMAT_YUY2): - image_data->image_colorspace = MEDIA_VISION_COLORSPACE_YUYV; - break; - case(GST_VIDEO_FORMAT_UYVY): - image_data->image_colorspace = MEDIA_VISION_COLORSPACE_UYVY; - break; - case(GST_VIDEO_FORMAT_RGB): - image_data->image_colorspace = MEDIA_VISION_COLORSPACE_RGB888; - break; - case(GST_VIDEO_FORMAT_RGBA): - image_data->image_colorspace = MEDIA_VISION_COLORSPACE_RGBA; - break; - default: - LOGE("Video pixel format is not supported\n"); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - - return MEDIA_VISION_ERROR_NONE; + mv_video_reader_s *handle = NULL; + GstVideoInfo info; + + if (reader == NULL || path == NULL || + image_data == NULL || fps == NULL) { + LOGE("NULL pointer passed"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + handle = (mv_video_reader_s *) reader; + + /* Set input file location from path */ + g_object_set(G_OBJECT(handle->filesrc), + "location", path, + NULL); + + /* Start playback */ + if (_mv_video_reader_state_change(handle, GST_STATE_PLAYING)) { + LOGE("Unable to change state"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + if (_mv_video_reader_state_change(handle, GST_STATE_PAUSED)) { + LOGE("Unable to change state"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + if (handle->caps == NULL) { + LOGE("Unable to get caps from decodebin"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + gst_video_info_from_caps(&info, handle->caps); + gst_caps_unref(handle->caps); + + *fps = info.fps_n/info.fps_d; + + /* Fill image data */ + image_data->image_width = info.width; + image_data->image_height = info.height; + + /* Look to : + * http://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-plugins-base-libs/html/gst-plugins-base-libs-gstvideo.html#GstVideoFormat */ + switch (GST_VIDEO_FORMAT_INFO_FORMAT(info.finfo)) { + case(GST_VIDEO_FORMAT_GRAY8): + image_data->image_colorspace = MEDIA_VISION_COLORSPACE_Y800; + break; + case(GST_VIDEO_FORMAT_I420): + image_data->image_colorspace = MEDIA_VISION_COLORSPACE_I420; + break; + case(GST_VIDEO_FORMAT_NV12): + image_data->image_colorspace = MEDIA_VISION_COLORSPACE_NV12; + break; + case(GST_VIDEO_FORMAT_YV12): + image_data->image_colorspace = MEDIA_VISION_COLORSPACE_YV12; + break; + case(GST_VIDEO_FORMAT_NV21): + image_data->image_colorspace = MEDIA_VISION_COLORSPACE_NV21; + break; + case(GST_VIDEO_FORMAT_YUY2): + image_data->image_colorspace = MEDIA_VISION_COLORSPACE_YUYV; + break; + case(GST_VIDEO_FORMAT_UYVY): + image_data->image_colorspace = MEDIA_VISION_COLORSPACE_UYVY; + break; + case(GST_VIDEO_FORMAT_RGB): + image_data->image_colorspace = MEDIA_VISION_COLORSPACE_RGB888; + break; + case(GST_VIDEO_FORMAT_RGBA): + image_data->image_colorspace = MEDIA_VISION_COLORSPACE_RGBA; + break; + default: + LOGE("Video pixel format is not supported\n"); + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } + + return MEDIA_VISION_ERROR_NONE; } int mv_video_reader_start( - mv_video_reader_h reader) + mv_video_reader_h reader) { - mv_video_reader_s *handle = NULL; + mv_video_reader_s *handle = NULL; - if (reader == NULL) - { - LOGE("NULL pointer passed"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (reader == NULL) { + LOGE("NULL pointer passed"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - handle = (mv_video_reader_s *) reader; + handle = (mv_video_reader_s *) reader; - /* Start playback */ - if (_mv_video_reader_state_change(handle, GST_STATE_PLAYING)) - { - LOGE("Unable to change state"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } + /* Start playback */ + if (_mv_video_reader_state_change(handle, GST_STATE_PLAYING)) { + LOGE("Unable to change state"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_video_reader_stop( - mv_video_reader_h reader) + mv_video_reader_h reader) { - mv_video_reader_s *handle = NULL; + mv_video_reader_s *handle = NULL; - if (reader == NULL) - { - LOGE("NULL pointer passed"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (reader == NULL) { + LOGE("NULL pointer passed"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - handle = (mv_video_reader_s *) reader; + handle = (mv_video_reader_s *) reader; - /* Stop playback (NULL or READY) */ - if (_mv_video_reader_state_change(handle, GST_STATE_NULL)) - { - LOGE("Unable to change state"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } + /* Stop playback (NULL or READY) */ + if (_mv_video_reader_state_change(handle, GST_STATE_NULL)) { + LOGE("Unable to change state"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_video_reader_set_new_sample_cb( - mv_video_reader_h reader, - mv_video_reader_new_sample_cb callback, - void *user_data) + mv_video_reader_h reader, + mv_video_reader_new_sample_cb callback, + void *user_data) { - mv_video_reader_s *handle = NULL; + mv_video_reader_s *handle = NULL; - if (reader == NULL || callback == NULL) - { - LOGE("NULL pointer passed"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (reader == NULL || callback == NULL) { + LOGE("NULL pointer passed"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - handle = (mv_video_reader_s *) reader; + handle = (mv_video_reader_s *) reader; - pthread_spin_lock(&(handle->new_sample_cb_guard)); - handle->new_sample_cb = callback; - handle->new_sample_cb_user_data = user_data; - pthread_spin_unlock(&(handle->new_sample_cb_guard)); + pthread_spin_lock(&(handle->new_sample_cb_guard)); + handle->new_sample_cb = callback; + handle->new_sample_cb_user_data = user_data; + pthread_spin_unlock(&(handle->new_sample_cb_guard)); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_video_reader_set_eos_cb( - mv_video_reader_h reader, - mv_video_reader_eos_cb callback, - void *user_data) + mv_video_reader_h reader, + mv_video_reader_eos_cb callback, + void *user_data) { - mv_video_reader_s *handle = NULL; + mv_video_reader_s *handle = NULL; - if (reader == NULL || callback == NULL) - { - LOGE("NULL pointer passed"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (reader == NULL || callback == NULL) { + LOGE("NULL pointer passed"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - handle = (mv_video_reader_s *) reader; + handle = (mv_video_reader_s *) reader; - pthread_spin_lock(&(handle->eos_cb_guard)); - handle->eos_cb = callback; - handle->eos_cb_user_data = user_data; - pthread_spin_unlock(&(handle->eos_cb_guard)); + pthread_spin_lock(&(handle->eos_cb_guard)); + handle->eos_cb = callback; + handle->eos_cb_user_data = user_data; + pthread_spin_unlock(&(handle->eos_cb_guard)); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } /* Video Writer */ int mv_create_video_writer( - mv_video_writer_h *writer) + mv_video_writer_h *writer) { - mv_video_writer_s *handle = NULL; - int err = MEDIA_VISION_ERROR_NONE; - - if (writer == NULL) - { - LOGE("NULL pointer passed"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - gst_init(NULL, NULL); - - handle = (mv_video_writer_s *) malloc(sizeof(mv_video_writer_s)); - if (!handle) - { - LOGE("Not enough memory"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - memset(handle, 0, sizeof(mv_video_writer_s)); - - err = _mv_video_writer_create_internals(handle); - if (MEDIA_VISION_ERROR_NONE != err) - { - LOGE("Failed to create internals"); - free(handle); - return err; - } - - *writer = (mv_video_writer_s *) handle; - - return err; + mv_video_writer_s *handle = NULL; + int err = MEDIA_VISION_ERROR_NONE; + + if (writer == NULL) { + LOGE("NULL pointer passed"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + gst_init(NULL, NULL); + + handle = (mv_video_writer_s *) malloc(sizeof(mv_video_writer_s)); + if (!handle) { + LOGE("Not enough memory"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + memset(handle, 0, sizeof(mv_video_writer_s)); + + err = _mv_video_writer_create_internals(handle); + if (MEDIA_VISION_ERROR_NONE != err) { + LOGE("Failed to create internals"); + free(handle); + return err; + } + + *writer = (mv_video_writer_s *) handle; + + return err; } int mv_destroy_video_writer( - mv_video_writer_h writer) + mv_video_writer_h writer) { - mv_video_writer_s *handle = NULL; + mv_video_writer_s *handle = NULL; + + if (writer == NULL) { + LOGE("NULL pointer passed"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - if (writer == NULL) - { - LOGE("NULL pointer passed"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + handle = (mv_video_writer_s *) writer; - handle = (mv_video_writer_s *) writer; + _mv_video_writer_state_change(writer, GST_STATE_NULL); - _mv_video_writer_state_change(writer, GST_STATE_NULL); + if (handle->pl) + gst_object_unref(handle->pl); - if (handle->pl) - { - gst_object_unref(handle->pl); - } - handle->pl = NULL; + handle->pl = NULL; - LOGD("video writer destroyed %p", handle); + LOGD("video writer destroyed %p", handle); - free(handle); + free(handle); - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } int mv_video_writer_init( - mv_video_writer_h writer, - const char *path, - image_data_s image_data, - unsigned int fps) + mv_video_writer_h writer, + const char *path, + image_data_s image_data, + unsigned int fps) { - mv_video_writer_s *handle = NULL; - unsigned int err = MEDIA_VISION_ERROR_NONE; + mv_video_writer_s *handle = NULL; + unsigned int err = MEDIA_VISION_ERROR_NONE; - if (writer == NULL) - { - LOGE("NULL pointer passed"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + if (writer == NULL) { + LOGE("NULL pointer passed"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } - handle = (mv_video_writer_s *) writer; + handle = (mv_video_writer_s *) writer; - handle->image_data.image_width = image_data.image_width; - handle->image_data.image_height = image_data.image_height; - handle->image_data.image_colorspace = image_data.image_colorspace; + handle->image_data.image_width = image_data.image_width; + handle->image_data.image_height = image_data.image_height; + handle->image_data.image_colorspace = image_data.image_colorspace; - handle->fps = fps; + handle->fps = fps; - g_object_set(G_OBJECT(handle->filesink), - "location", path, - NULL); + g_object_set(G_OBJECT(handle->filesink), + "location", path, + NULL); - err = _mv_video_writer_link_internals(handle); - if (MEDIA_VISION_ERROR_NONE != err) - { - LOGE("Failed to link internals"); - return err; - } + err = _mv_video_writer_link_internals(handle); + if (MEDIA_VISION_ERROR_NONE != err) { + LOGE("Failed to link internals"); + return err; + } - return err; + return err; } int mv_video_writer_write_frame( - mv_video_writer_h writer, - unsigned char *frame) + mv_video_writer_h writer, + unsigned char *frame) { - mv_video_writer_s *handle = NULL; - GstMapInfo info; - GstBuffer *buffer = NULL; - - if (writer == NULL || frame == NULL) - { - LOGE("NULL pointer passed"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - handle = (mv_video_writer_s *) writer; - - buffer = gst_buffer_new_allocate(NULL, handle->buffer_size, NULL); - if (!buffer) - { - LOGE("Unable to allocate buffer for frame"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - LOGD("Copying input frame to buffer and pushing to appsrc"); - gst_buffer_map(buffer, &info, GST_MAP_READWRITE); - memcpy(info.data, frame, info.size); - gst_buffer_unmap(buffer, &info); - - if (GST_FLOW_OK != - gst_app_src_push_buffer(handle->appsrc, buffer)) - { - LOGE("Failed to push buffer to appsrc"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - return MEDIA_VISION_ERROR_NONE; + mv_video_writer_s *handle = NULL; + GstMapInfo info; + GstBuffer *buffer = NULL; + + if (writer == NULL || frame == NULL) { + LOGE("NULL pointer passed"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + handle = (mv_video_writer_s *) writer; + + buffer = gst_buffer_new_allocate(NULL, handle->buffer_size, NULL); + if (!buffer) { + LOGE("Unable to allocate buffer for frame"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + LOGD("Copying input frame to buffer and pushing to appsrc"); + gst_buffer_map(buffer, &info, GST_MAP_READWRITE); + memcpy(info.data, frame, info.size); + gst_buffer_unmap(buffer, &info); + + if (GST_FLOW_OK != + gst_app_src_push_buffer(handle->appsrc, buffer)) { + LOGE("Failed to push buffer to appsrc"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + return MEDIA_VISION_ERROR_NONE; } /* Internal functions */ static int _mv_video_reader_create_internals( - mv_video_reader_s *reader) + mv_video_reader_s *reader) { - pthread_spin_init(&(reader->new_sample_cb_guard), PTHREAD_PROCESS_SHARED); - pthread_spin_init(&(reader->eos_cb_guard), PTHREAD_PROCESS_SHARED); - - reader->pl = gst_pipeline_new(NULL); - - reader->filesrc = gst_element_factory_make("filesrc", "filesrc"); - reader->decodebin = gst_element_factory_make("decodebin", "decoder"); - reader->videoconvert = gst_element_factory_make("videoconvert", "convert"); - reader->appsink = gst_element_factory_make("appsink", "appsink"); - - if ((!reader->pl) || - (!reader->filesrc) || - (!reader->decodebin) || - (!reader->videoconvert) || - (!reader->appsink)) - { - LOGE("Unable to create video read pipeline elements"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - gst_bin_add_many(GST_BIN(reader->pl), - reader->filesrc, - reader->decodebin, - reader->videoconvert, - reader->appsink, - NULL); - - return MEDIA_VISION_ERROR_NONE; + pthread_spin_init(&(reader->new_sample_cb_guard), PTHREAD_PROCESS_SHARED); + pthread_spin_init(&(reader->eos_cb_guard), PTHREAD_PROCESS_SHARED); + + reader->pl = gst_pipeline_new(NULL); + + reader->filesrc = gst_element_factory_make("filesrc", "filesrc"); + reader->decodebin = gst_element_factory_make("decodebin", "decoder"); + reader->videoconvert = gst_element_factory_make("videoconvert", "convert"); + reader->appsink = gst_element_factory_make("appsink", "appsink"); + + if ((!reader->pl) || + (!reader->filesrc) || + (!reader->decodebin) || + (!reader->videoconvert) || + (!reader->appsink)) { + LOGE("Unable to create video read pipeline elements"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + gst_bin_add_many(GST_BIN(reader->pl), + reader->filesrc, + reader->decodebin, + reader->videoconvert, + reader->appsink, + NULL); + + return MEDIA_VISION_ERROR_NONE; } static int _mv_video_reader_link_internals( - mv_video_reader_s *reader) + mv_video_reader_s *reader) { - GstCaps *caps = NULL; - - if (!gst_element_link_many(reader->filesrc, - reader->decodebin, - NULL)) - { - LOGE("Unable to link filesrc to decodebin"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - /* Decodebin pad will be linked during state change */ - g_signal_connect(reader->decodebin, - "pad-added", - G_CALLBACK(cb_newpad), - reader); - - if (!gst_element_link_many(reader->videoconvert, - reader->appsink, NULL)) - { - LOGE("Unable to link filesrc to decodebin"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - caps = gst_caps_new_simple("video/x-raw", - "format", G_TYPE_STRING, "RGB", - NULL); - - gst_app_sink_set_caps(GST_APP_SINK(reader->appsink), caps); - gst_caps_unref(caps); - - /* Configure appsink */ - gst_app_sink_set_emit_signals(GST_APP_SINK(reader->appsink), TRUE); - g_signal_connect(reader->appsink, - "new-sample", - G_CALLBACK(appsink_newsample), - reader); - g_signal_connect(reader->appsink, - "eos", - G_CALLBACK(appsink_eos), - reader); - g_object_set(G_OBJECT(reader->appsink), - "drop", TRUE, - "enable-last-sample", TRUE, - "sync", FALSE, - NULL); - - return MEDIA_VISION_ERROR_NONE; + GstCaps *caps = NULL; + + if (!gst_element_link_many(reader->filesrc, + reader->decodebin, + NULL)) { + LOGE("Unable to link filesrc to decodebin"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + /* Decodebin pad will be linked during state change */ + g_signal_connect(reader->decodebin, + "pad-added", + G_CALLBACK(cb_newpad), + reader); + + if (!gst_element_link_many(reader->videoconvert, + reader->appsink, NULL)) { + LOGE("Unable to link filesrc to decodebin"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + caps = gst_caps_new_simple("video/x-raw", + "format", G_TYPE_STRING, "RGB", + NULL); + + gst_app_sink_set_caps(GST_APP_SINK(reader->appsink), caps); + gst_caps_unref(caps); + + /* Configure appsink */ + gst_app_sink_set_emit_signals(GST_APP_SINK(reader->appsink), TRUE); + g_signal_connect(reader->appsink, + "new-sample", + G_CALLBACK(appsink_newsample), + reader); + g_signal_connect(reader->appsink, + "eos", + G_CALLBACK(appsink_eos), + reader); + g_object_set(G_OBJECT(reader->appsink), + "drop", TRUE, + "enable-last-sample", TRUE, + "sync", FALSE, + NULL); + + return MEDIA_VISION_ERROR_NONE; } static int _mv_video_reader_state_change( - mv_video_reader_s *reader, - GstState state) + mv_video_reader_s *reader, + GstState state) { - mv_video_reader_s *handle = (mv_video_reader_s *) reader; - GstStateChangeReturn state_ret = GST_STATE_CHANGE_FAILURE; - GstState pipeline_state = GST_STATE_NULL; - - state_ret = gst_element_set_state(handle->pl, - state); - - if (GST_STATE_CHANGE_FAILURE == state_ret) - { - LOGE("Set state failure"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - LOGI("Set state [%d], change return [%d]", - state, state_ret); - - state_ret = gst_element_get_state(handle->pl, - &pipeline_state, - NULL, - GST_CLOCK_TIME_NONE); - - if (GST_STATE_CHANGE_FAILURE == state_ret) - { - LOGE("get state failure"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - return MEDIA_VISION_ERROR_NONE; + mv_video_reader_s *handle = (mv_video_reader_s *) reader; + GstStateChangeReturn state_ret = GST_STATE_CHANGE_FAILURE; + GstState pipeline_state = GST_STATE_NULL; + + state_ret = gst_element_set_state(handle->pl, + state); + + if (GST_STATE_CHANGE_FAILURE == state_ret) { + LOGE("Set state failure"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + LOGI("Set state [%d], change return [%d]", + state, state_ret); + + state_ret = gst_element_get_state(handle->pl, + &pipeline_state, + NULL, + GST_CLOCK_TIME_NONE); + + if (GST_STATE_CHANGE_FAILURE == state_ret) { + LOGE("get state failure"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + return MEDIA_VISION_ERROR_NONE; } static int _mv_video_writer_create_internals( - mv_video_writer_s *writer) + mv_video_writer_s *writer) { - writer->pl = gst_pipeline_new(NULL); - - writer->appsrc = gst_element_factory_make("appsrc", "appsrc"); - writer->capsfilter = gst_element_factory_make("capsfilter", NULL); - writer->videoconvert = gst_element_factory_make("videoconvert", "videoconvert"); - writer->encoder = gst_element_factory_make("avenc_mpeg4", "encoder"); - writer->queue = gst_element_factory_make("queue", "queue"); - writer->muxer = gst_element_factory_make("avmux_avi", "muxer"); - writer->filesink = gst_element_factory_make("filesink", "filesink"); - - if ((!writer->pl) || - (!writer->appsrc) || - (!writer->capsfilter) || - (!writer->videoconvert) || - (!writer->encoder) || - (!writer->queue) || - (!writer->muxer) || - (!writer->filesink)) - { - LOGE("Unable to create video read pipeline elements\n"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - gst_bin_add_many(GST_BIN(writer->pl), - writer->appsrc, - writer->capsfilter, - writer->videoconvert, - writer->encoder, - writer->queue, - writer->muxer, - writer->filesink, - NULL); - - return MEDIA_VISION_ERROR_NONE; + writer->pl = gst_pipeline_new(NULL); + + writer->appsrc = gst_element_factory_make("appsrc", "appsrc"); + writer->capsfilter = gst_element_factory_make("capsfilter", NULL); + writer->videoconvert = gst_element_factory_make("videoconvert", "videoconvert"); + writer->encoder = gst_element_factory_make("avenc_mpeg4", "encoder"); + writer->queue = gst_element_factory_make("queue", "queue"); + writer->muxer = gst_element_factory_make("avmux_avi", "muxer"); + writer->filesink = gst_element_factory_make("filesink", "filesink"); + + if ((!writer->pl) || + (!writer->appsrc) || + (!writer->capsfilter) || + (!writer->videoconvert) || + (!writer->encoder) || + (!writer->queue) || + (!writer->muxer) || + (!writer->filesink)) { + LOGE("Unable to create video read pipeline elements\n"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + gst_bin_add_many(GST_BIN(writer->pl), + writer->appsrc, + writer->capsfilter, + writer->videoconvert, + writer->encoder, + writer->queue, + writer->muxer, + writer->filesink, + NULL); + + return MEDIA_VISION_ERROR_NONE; } static int _mv_video_writer_link_internals( - mv_video_writer_s *writer) + mv_video_writer_s *writer) { - GstVideoInfo vinfo; - GstCaps *caps = NULL; - char format[6] = {0}; - - /* Convert from mv_colorspace to GstVideoFormat */ - switch(writer->image_data.image_colorspace) - { - case(MEDIA_VISION_COLORSPACE_Y800): - strncpy(format, "GRAY8", 5); - break; - case(MEDIA_VISION_COLORSPACE_I420): - strncpy(format, "I420", 4); - break; - case(MEDIA_VISION_COLORSPACE_NV12): - strncpy(format, "NV12", 4); - break; - case(MEDIA_VISION_COLORSPACE_YV12): - strncpy(format, "YV12", 4); - break; - case(MEDIA_VISION_COLORSPACE_NV21): - strncpy(format, "NV21", 4); - break; - case(MEDIA_VISION_COLORSPACE_YUYV): - strncpy(format, "YUY2", 4); - break; - case(MEDIA_VISION_COLORSPACE_UYVY): - strncpy(format, "UYVY", 4); - break; - case(MEDIA_VISION_COLORSPACE_RGB888): - strncpy(format, "RGB", 3); - break; - case(MEDIA_VISION_COLORSPACE_RGBA): - strncpy(format, "RGBA", 4); - break; - default: - LOGE("Selected format %d is not supported", - writer->image_data.image_colorspace); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - - caps = gst_caps_new_simple("video/x-raw", - "format", G_TYPE_STRING, format, - "width", G_TYPE_INT, writer->image_data.image_width, - "height", G_TYPE_INT, writer->image_data.image_height, - "framerate", GST_TYPE_FRACTION, writer->fps, 1, - NULL); - - if (NULL == caps) - { - LOGE("Failed to create new caps"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - /* This is the simpler way to get buffer size */ - if (!gst_video_info_from_caps(&vinfo, caps)) - { - LOGE("Unable to set buffer size"); - gst_caps_unref(caps); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - writer->buffer_size = vinfo.size; - - /* link appsrc and capsfilter */ - if ((!gst_element_link_filtered(writer->appsrc, - writer->capsfilter, - caps))) - { - LOGE("Failed to link appsrc to capsfilter"); - gst_caps_unref(caps); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - gst_caps_unref(caps); - - if (!gst_element_link_many(writer->capsfilter, - writer->videoconvert, - writer->encoder, - writer->queue, - writer->muxer, - writer->filesink, - NULL)) - { - LOGE("Unable to capsfilter to filesink"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - g_object_set(G_OBJECT(writer->appsrc), - "max-bytes", 0, - "blocksize", writer->buffer_size, - "stream-type", 0, - "format", GST_FORMAT_BYTES, - NULL); - - if (_mv_video_writer_state_change(writer, - GST_STATE_PLAYING)) - { - LOGE("Unable to change video writer state"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - return MEDIA_VISION_ERROR_NONE; + GstVideoInfo vinfo; + GstCaps *caps = NULL; + char format[6] = {0}; + + /* Convert from mv_colorspace to GstVideoFormat */ + switch (writer->image_data.image_colorspace) { + case(MEDIA_VISION_COLORSPACE_Y800): + strncpy(format, "GRAY8", 5); + break; + case(MEDIA_VISION_COLORSPACE_I420): + strncpy(format, "I420", 4); + break; + case(MEDIA_VISION_COLORSPACE_NV12): + strncpy(format, "NV12", 4); + break; + case(MEDIA_VISION_COLORSPACE_YV12): + strncpy(format, "YV12", 4); + break; + case(MEDIA_VISION_COLORSPACE_NV21): + strncpy(format, "NV21", 4); + break; + case(MEDIA_VISION_COLORSPACE_YUYV): + strncpy(format, "YUY2", 4); + break; + case(MEDIA_VISION_COLORSPACE_UYVY): + strncpy(format, "UYVY", 4); + break; + case(MEDIA_VISION_COLORSPACE_RGB888): + strncpy(format, "RGB", 3); + break; + case(MEDIA_VISION_COLORSPACE_RGBA): + strncpy(format, "RGBA", 4); + break; + default: + LOGE("Selected format %d is not supported", + writer->image_data.image_colorspace); + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } + + caps = gst_caps_new_simple("video/x-raw", + "format", G_TYPE_STRING, format, + "width", G_TYPE_INT, writer->image_data.image_width, + "height", G_TYPE_INT, writer->image_data.image_height, + "framerate", GST_TYPE_FRACTION, writer->fps, 1, + NULL); + + if (NULL == caps) { + LOGE("Failed to create new caps"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + /* This is the simpler way to get buffer size */ + if (!gst_video_info_from_caps(&vinfo, caps)) { + LOGE("Unable to set buffer size"); + gst_caps_unref(caps); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + writer->buffer_size = vinfo.size; + + /* link appsrc and capsfilter */ + if ((!gst_element_link_filtered(writer->appsrc, + writer->capsfilter, + caps))) { + LOGE("Failed to link appsrc to capsfilter"); + gst_caps_unref(caps); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + gst_caps_unref(caps); + + if (!gst_element_link_many(writer->capsfilter, + writer->videoconvert, + writer->encoder, + writer->queue, + writer->muxer, + writer->filesink, + NULL)) { + LOGE("Unable to capsfilter to filesink"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + g_object_set(G_OBJECT(writer->appsrc), + "max-bytes", 0, + "blocksize", writer->buffer_size, + "stream-type", 0, + "format", GST_FORMAT_BYTES, + NULL); + + if (_mv_video_writer_state_change(writer, + GST_STATE_PLAYING)) { + LOGE("Unable to change video writer state"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + return MEDIA_VISION_ERROR_NONE; } static int _mv_video_writer_state_change( - mv_video_writer_s *writer, - GstState state) + mv_video_writer_s *writer, + GstState state) { - mv_video_writer_s *handle = (mv_video_writer_s *) writer; - GstStateChangeReturn state_ret = GST_STATE_CHANGE_FAILURE; - GstState pipeline_state = GST_STATE_NULL; + mv_video_writer_s *handle = (mv_video_writer_s *) writer; + GstStateChangeReturn state_ret = GST_STATE_CHANGE_FAILURE; + GstState pipeline_state = GST_STATE_NULL; - state_ret = gst_element_set_state(handle->pl, - state); + state_ret = gst_element_set_state(handle->pl, + state); - if (GST_STATE_CHANGE_FAILURE == state_ret) - { - LOGE("Set state failure"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } + if (GST_STATE_CHANGE_FAILURE == state_ret) { + LOGE("Set state failure"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } - LOGI("Set state [%d], change return [%d]", - state, state_ret); + LOGI("Set state [%d], change return [%d]", + state, state_ret); - /* AppSrc can't go to PLAYING state before buffer is not pushed */ + /* AppSrc can't go to PLAYING state before buffer is not pushed */ - return MEDIA_VISION_ERROR_NONE; + return MEDIA_VISION_ERROR_NONE; } /* Callbacks */ static GstFlowReturn appsink_newsample( - GstAppSink *appsink, - gpointer user_data) + GstAppSink *appsink, + gpointer user_data) { - mv_video_reader_s *handle = NULL; - GstSample *sample = gst_app_sink_pull_sample(appsink); - - if (user_data == NULL) - { - LOGE("NULL pointer passed"); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - if (sample != NULL) - { - handle = (mv_video_reader_s *) user_data; - GstVideoInfo vinfo; - GstMapInfo info = GST_MAP_INFO_INIT; - GstBuffer *buf = gst_sample_get_buffer(sample); - GstCaps *caps = gst_sample_get_caps(sample); - image_data_s im_data; - char *buffer = NULL; - unsigned int buffer_size = 0; - - LOGD("Received sample from appsink"); - - /* map buffer */ - gst_buffer_map(buf, &info, GST_MAP_READ); - buffer = (char *) info.data; - - /* Fill image data */ - gst_video_info_from_caps(&vinfo, caps); - im_data.image_width = vinfo.width; - im_data.image_height = vinfo.height; - - /* Look to : - * http://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-plugins-base-libs/html/gst-plugins-base-libs-gstvideo.html#GstVideoFormat */ - switch(GST_VIDEO_FORMAT_INFO_FORMAT(vinfo.finfo)) - { - case(GST_VIDEO_FORMAT_GRAY8): - im_data.image_colorspace = MEDIA_VISION_COLORSPACE_Y800; - break; - case(GST_VIDEO_FORMAT_I420): - im_data.image_colorspace = MEDIA_VISION_COLORSPACE_I420; - break; - case(GST_VIDEO_FORMAT_NV12): - im_data.image_colorspace = MEDIA_VISION_COLORSPACE_NV12; - break; - case(GST_VIDEO_FORMAT_YV12): - im_data.image_colorspace = MEDIA_VISION_COLORSPACE_YV12; - break; - case(GST_VIDEO_FORMAT_NV21): - im_data.image_colorspace = MEDIA_VISION_COLORSPACE_NV21; - break; - case(GST_VIDEO_FORMAT_YUY2): - im_data.image_colorspace = MEDIA_VISION_COLORSPACE_YUYV; - break; - case(GST_VIDEO_FORMAT_UYVY): - im_data.image_colorspace = MEDIA_VISION_COLORSPACE_UYVY; - break; - case(GST_VIDEO_FORMAT_RGB): - im_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888; - break; - case(GST_VIDEO_FORMAT_RGBA): - im_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGBA; - break; - default: - LOGE("Video pixel format is not supported\n"); - - gst_buffer_unmap(buf, &info); - gst_sample_unref(sample); - return GST_FLOW_ERROR; - } - - pthread_spin_lock(&(handle->new_sample_cb_guard)); - if (handle->new_sample_cb != NULL) - { - handle->new_sample_cb( - buffer, - info.size, - im_data, - handle->new_sample_cb_user_data); - } - pthread_spin_unlock(&(handle->new_sample_cb_guard)); - - gst_buffer_unmap(buf, &info); - gst_sample_unref(sample); - } - else - { - LOGE("Failed to pull sample from appsink"); - return GST_FLOW_ERROR; - } - - return GST_FLOW_OK; + mv_video_reader_s *handle = NULL; + GstSample *sample = gst_app_sink_pull_sample(appsink); + + if (user_data == NULL) { + LOGE("NULL pointer passed"); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + if (sample != NULL) { + handle = (mv_video_reader_s *) user_data; + GstVideoInfo vinfo; + GstMapInfo info = GST_MAP_INFO_INIT; + GstBuffer *buf = gst_sample_get_buffer(sample); + GstCaps *caps = gst_sample_get_caps(sample); + image_data_s im_data; + char *buffer = NULL; + unsigned int buffer_size = 0; + + LOGD("Received sample from appsink"); + + /* map buffer */ + gst_buffer_map(buf, &info, GST_MAP_READ); + buffer = (char *) info.data; + + /* Fill image data */ + gst_video_info_from_caps(&vinfo, caps); + im_data.image_width = vinfo.width; + im_data.image_height = vinfo.height; + + /* Look to : + * http://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-plugins-base-libs/html/gst-plugins-base-libs-gstvideo.html#GstVideoFormat */ + switch (GST_VIDEO_FORMAT_INFO_FORMAT(vinfo.finfo)) { + case(GST_VIDEO_FORMAT_GRAY8): + im_data.image_colorspace = MEDIA_VISION_COLORSPACE_Y800; + break; + case(GST_VIDEO_FORMAT_I420): + im_data.image_colorspace = MEDIA_VISION_COLORSPACE_I420; + break; + case(GST_VIDEO_FORMAT_NV12): + im_data.image_colorspace = MEDIA_VISION_COLORSPACE_NV12; + break; + case(GST_VIDEO_FORMAT_YV12): + im_data.image_colorspace = MEDIA_VISION_COLORSPACE_YV12; + break; + case(GST_VIDEO_FORMAT_NV21): + im_data.image_colorspace = MEDIA_VISION_COLORSPACE_NV21; + break; + case(GST_VIDEO_FORMAT_YUY2): + im_data.image_colorspace = MEDIA_VISION_COLORSPACE_YUYV; + break; + case(GST_VIDEO_FORMAT_UYVY): + im_data.image_colorspace = MEDIA_VISION_COLORSPACE_UYVY; + break; + case(GST_VIDEO_FORMAT_RGB): + im_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888; + break; + case(GST_VIDEO_FORMAT_RGBA): + im_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGBA; + break; + default: + LOGE("Video pixel format is not supported\n"); + + gst_buffer_unmap(buf, &info); + gst_sample_unref(sample); + return GST_FLOW_ERROR; + } + + pthread_spin_lock(&(handle->new_sample_cb_guard)); + if (handle->new_sample_cb != NULL) { + handle->new_sample_cb( + buffer, + info.size, + im_data, + handle->new_sample_cb_user_data); + } + pthread_spin_unlock(&(handle->new_sample_cb_guard)); + + gst_buffer_unmap(buf, &info); + gst_sample_unref(sample); + } else { + LOGE("Failed to pull sample from appsink"); + return GST_FLOW_ERROR; + } + + return GST_FLOW_OK; } static void appsink_eos( - GstAppSink *appsink, - gpointer user_data) + GstAppSink *appsink, + gpointer user_data) { - if (user_data == NULL) - { - LOGE("NULL pointer passed"); - return; - } - - mv_video_reader_s *handle = (mv_video_reader_s *) user_data; - - /* EOS callback to terminate reading */ - pthread_spin_lock(&(handle->eos_cb_guard)); - if (handle->eos_cb != NULL) - { - handle->eos_cb(handle->eos_cb_user_data); - } - pthread_spin_unlock(&(handle->eos_cb_guard)); + if (user_data == NULL) { + LOGE("NULL pointer passed"); + return; + } + + mv_video_reader_s *handle = (mv_video_reader_s *) user_data; + + /* EOS callback to terminate reading */ + pthread_spin_lock(&(handle->eos_cb_guard)); + if (handle->eos_cb != NULL) + handle->eos_cb(handle->eos_cb_user_data); + + pthread_spin_unlock(&(handle->eos_cb_guard)); } static void cb_newpad( - GstElement *decodebin, - GstPad *pad, - gpointer user_data) + GstElement *decodebin, + GstPad *pad, + gpointer user_data) { - mv_video_reader_s *reader = (mv_video_reader_s *) user_data; - GstStructure *str = NULL; - GstCaps *caps = NULL; - GstPad *video_pad = NULL; - - LOGI("Received pad from decodebin. Linking"); - video_pad = gst_element_get_static_pad(reader->videoconvert, "sink"); - if (GST_PAD_IS_LINKED(video_pad)) - { - LOGI("Already linked"); - g_object_unref(video_pad); - return; - } - - /* Check for pad is video */ - reader->caps = gst_pad_query_caps(pad, NULL); - str = gst_caps_get_structure(reader->caps, 0); - if (!g_strrstr(gst_structure_get_name(str), "video")) - { - LOGI("Not a video pad"); - gst_object_unref(video_pad); - return; - } - - gst_pad_link(pad, video_pad); - g_object_unref(video_pad); + mv_video_reader_s *reader = (mv_video_reader_s *) user_data; + GstStructure *str = NULL; + GstCaps *caps = NULL; + GstPad *video_pad = NULL; + + LOGI("Received pad from decodebin. Linking"); + video_pad = gst_element_get_static_pad(reader->videoconvert, "sink"); + if (GST_PAD_IS_LINKED(video_pad)) { + LOGI("Already linked"); + g_object_unref(video_pad); + return; + } + + /* Check for pad is video */ + reader->caps = gst_pad_query_caps(pad, NULL); + str = gst_caps_get_structure(reader->caps, 0); + if (!g_strrstr(gst_structure_get_name(str), "video")) { + LOGI("Not a video pad"); + gst_object_unref(video_pad); + return; + } + + gst_pad_link(pad, video_pad); + g_object_unref(video_pad); } diff --git a/test/testsuites/face/face_test_suite.c b/test/testsuites/face/face_test_suite.c index 6afbf5a..eb6665f 100644 --- a/test/testsuites/face/face_test_suite.c +++ b/test/testsuites/face/face_test_suite.c @@ -1068,9 +1068,8 @@ int perform_recognize() print_action_result(names[sel_opt - 1], err, notification_type); - if (sel_opt != 11) { + if (sel_opt != 11) sel_opt = 0; - } } return err; @@ -1821,9 +1820,8 @@ int perform_track() print_action_result(names[sel_opt - 1], err, notification_type); - if (sel_opt != 6) { + if (sel_opt != 6) sel_opt = 0; - } } return err; diff --git a/test/testsuites/surveillance/surveillance_test_suite.c b/test/testsuites/surveillance/surveillance_test_suite.c index 6057121..b89301a 100644 --- a/test/testsuites/surveillance/surveillance_test_suite.c +++ b/test/testsuites/surveillance/surveillance_test_suite.c @@ -137,7 +137,7 @@ int main(void) "Exit" }; - while(1) { + while (1) { char exit = 'n'; int sel_opt = show_menu("Select action:", options, names, 8); switch (sel_opt) { @@ -367,7 +367,7 @@ void subscribe_to_event() video_stream_id, NULL, detect_person_appeared_cb, - NULL); + NULL); } else if (strncmp(event_type, MV_SURVEILLANCE_EVENT_TYPE_PERSON_RECOGNIZED, MAX_EVENT_TYPE_LEN) == 0) { @@ -546,7 +546,7 @@ void unsubscribe_from_all_events() } unsubscribed_number > 0 ? - PRINT_S("%d event(s) was successfully unsubscribed", unsubscribed_number): + PRINT_S("%d event(s) was successfully unsubscribed", unsubscribed_number) : PRINT_Y("\nThere are no triggers can be unsubscribed."); } @@ -628,7 +628,7 @@ void turn_on_off_saving_to_image() save_results_to_image = !save_results_to_image; save_results_to_image ? - PRINT_Y("Save event results to image files ON."): + PRINT_Y("Save event results to image files ON.") : PRINT_Y("Save event results to image files OFF."); } @@ -656,8 +656,7 @@ void detect_person_appeared_cb( mv_source_get_height(source, &(image_data.image_height)) || mv_source_get_colorspace(source, &(image_data.image_colorspace)) || out_buffer == NULL || - buf_size == 0)) - { + buf_size == 0)) { PRINT_R("ERROR: Creating out image is impossible."); return; @@ -978,8 +977,7 @@ void person_recognized_cb( mv_source_get_height(source, &(image_data.image_height)) || mv_source_get_colorspace(source, &(image_data.image_colorspace)) || out_buffer == NULL || - buf_size == 0)) - { + buf_size == 0)) { PRINT_R("ERROR: Creating out image is impossible."); return; @@ -1087,8 +1085,7 @@ void movement_detected_cb( mv_source_get_height(source, &(image_data.image_height)) || mv_source_get_colorspace(source, &(image_data.image_colorspace)) || out_buffer == NULL || - buf_size == 0)) - { + buf_size == 0)) { PRINT_R("ERROR: Creating out image is impossible."); if (movement_regions != NULL) -- 2.7.4 From 97aa7477e98a4ae40576ad49fb30960b5947d66d Mon Sep 17 00:00:00 2001 From: SeokHoon Lee Date: Thu, 10 Dec 2015 16:06:44 +0900 Subject: [PATCH 10/16] Change path of saved/loaded model data Signed-off-by: SeokHoon Lee Change-Id: Ie7db6b92fede50dd083755b36e1e8ef5a29cff43 --- CMakeLists.txt | 2 +- include/mv_face.h | 32 ++++++++++++++++------ include/mv_image.h | 24 ++++++++++------ mv_face/face/src/FaceRecognitionModel.cpp | 16 +++-------- mv_face/face/src/FaceTrackingModel.cpp | 16 +++-------- mv_image/image/src/Recognition/ImageObject.cpp | 16 +++-------- mv_image/image/src/Tracking/ImageTrackingModel.cpp | 16 +++-------- packaging/capi-media-vision.spec | 3 +- 8 files changed, 58 insertions(+), 67 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2ec1ac7..49c3ed3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -83,7 +83,7 @@ INCLUDE_DIRECTORIES(${INC_DIR} ${INC_IMAGE} ${INC_SURVEILLANCE}) -SET(dependents "dlog capi-media-tool capi-system-info capi-appfw-application") +SET(dependents "dlog capi-media-tool capi-system-info") SET(pc_dependents "dlog") INCLUDE(FindPkgConfig) diff --git a/include/mv_face.h b/include/mv_face.h index 2b6d235..e0f765b 100644 --- a/include/mv_face.h +++ b/include/mv_face.h @@ -661,10 +661,12 @@ int mv_face_recognition_model_clone( * This examples can be removed by * @ref mv_face_recognition_model_reset() function if * it is needed to clear the memory. - * @remarks @a recognition_model is saved to the application's data directory. + * @remarks @a recognition_model is saved to the absolute path directory. + * Use @ref app_get_data_path for the private app storage path or + * @ref app_get_shared_data_path for the app shared storage path. * After model is saved to the file, it can be loaded from this file * by @ref mv_face_recognition_model_load() function. - * @param [in] file_name Name of the file to save the model + * @param [in] file_name Name of the path/file to save the model * @param [in] recognition_model The handle to the recognition model to be * saved to the file * @return @c 0 on success, otherwise a negative error value @@ -683,6 +685,8 @@ int mv_face_recognition_model_clone( * * @see mv_face_recognition_model_load() * @see mv_face_recognition_model_create() + * @see app_get_data_path() + * @see app_get_shared_data_path() */ int mv_face_recognition_model_save( const char *file_name, @@ -699,10 +703,12 @@ int mv_face_recognition_model_save( * @ref mv_face_recognition_model_reset() function. It * is recommended to clear the memory if learning algorithm doesn't * support reinforcement learning. - * @remarks @a recognition_model is loaded from the application's data directory. + * @remarks @a recognition_model is loaded from the absolute path directory. + * Use @ref app_get_data_path for the private app storage path or + * @ref app_get_shared_data_path for the app shared storage path. * @a recognition_model must be destroyed using * @ref mv_face_recognition_model_destroy(). - * @param [in] file_name Name of file to load the model + * @param [in] file_name Name of path/file to load the model * @param [out] recognition_model The handle to the recognition model * to be loaded from the file * @return @c 0 on success, otherwise a negative error value @@ -719,6 +725,8 @@ int mv_face_recognition_model_save( * * @see mv_face_recognition_model_save() * @see mv_face_recognition_model_destroy() + * @see app_get_data_path() + * @see app_get_shared_data_path() */ int mv_face_recognition_model_load( const char *file_name, @@ -1033,10 +1041,12 @@ int mv_face_tracking_model_clone( * @brief Calls this method to save tracking model to the file. * * @since_tizen 3.0 - * @remarks @ tracking_model is saved to the application's data directory. + * @remarks @a tracking_model is saved to the absolute path directory. + * Use @ref app_get_data_path for the private app storage path or + * @ref app_get_shared_data_path for the app shared storage path. * After model is saved to the file, it can be loaded from this file * with @ref mv_face_tracking_model_load() function. - * @param [in] file_name Name of the file to save the model + * @param [in] file_name Name of the path/file to save the model * @param [in] tracking_model The handle to the tracking model to be * saved to the file * @return @c 0 on success, otherwise a negative error value @@ -1055,6 +1065,8 @@ int mv_face_tracking_model_clone( * * @see mv_face_tracking_model_load() * @see mv_face_tracking_model_create() + * @see app_get_data_path() + * @see app_get_shared_data_path() */ int mv_face_tracking_model_save( const char *file_name, @@ -1064,10 +1076,12 @@ int mv_face_tracking_model_save( * @brief Calls this method to load a tracking model from file. * * @since_tizen 3.0 - * @remarks @a tracking_model is loaded from the application's data directory. + * @remarks @a tracking_model is loaded from the absolute path directory. + * Use @ref app_get_data_path for the private app storage path or + * @ref app_get_shared_data_path for the app shared storage path. * @a tracking_model must be destroyed using * @ref mv_face_tracking_model_destroy. - * @param [in] file_name Name of file to load the model + * @param [in] file_name Name of path/file to load the model * @param [out] tracking_model The handle to the tracking model to be * loaded from file * @return @c 0 on success, otherwise a negative error value @@ -1089,6 +1103,8 @@ int mv_face_tracking_model_save( * * @see mv_face_tracking_model_save() * @see mv_face_tracking_model_destroy() + * @see app_get_data_path() + * @see app_get_shared_data_path() */ int mv_face_tracking_model_load( const char *file_name, diff --git a/include/mv_image.h b/include/mv_image.h index 5efb9b6..a1a3d34 100644 --- a/include/mv_image.h +++ b/include/mv_image.h @@ -617,8 +617,10 @@ int mv_image_object_clone( * @brief Saves the image object. * * @since_tizen 3.0 - * @remarks @a image_object is saved to the application's data directory. - * @param [in] file_name Name of the file to save the image object + * @remarks @a image_object is saved to the absolute path directory. + * Use @ref app_get_data_path for the private app storage path or + * @ref app_get_shared_data_path for the app shared storage path. + * @param [in] file_name Name of the file to path/save the image object * @param [in] image_object The handle to the image object which will be saved * @return @c 0 on success, otherwise a negative error value * @retval #MEDIA_VISION_ERROR_NONE Successful @@ -642,10 +644,12 @@ int mv_image_object_save( * @brief Loads an image object from the file. * * @since_tizen 3.0 - * @remarks @a image_object is loaded from the application's data directory. + * @remarks @a image_object is loaded from the absolute path directory. + * Use @ref app_get_data_path for the private app storage path or + * @ref app_get_shared_data_path for the app shared storage path. * @a image_object must be destroyed using * @ref mv_image_object_destroy(). - * @param [in] file_name Name of file to load the image object + * @param [in] file_name Name of path/file to load the image object * @param [out] image_object The handle to the image object which will be * filled * @return @c 0 on success, otherwise a negative error value @@ -799,8 +803,10 @@ int mv_image_tracking_model_clone( * @brief Saves the image tracking model. * * @since_tizen 3.0 - * @remarks @a image_tracking_model is saved to the application's data directory. - * @param [in] file_name Name of file to save the model + * @remarks @a image_tracking_model is saved to the absolute path directory. + * Use @ref app_get_data_path for the private app storage path or + * @ref app_get_shared_data_path for the app shared storage path. + * @param [in] file_name Name of path/file to save the model * @param [in] image_tracking_model The handle to the image tracking model * to be saved * @return @c 0 on success, otherwise a negative error value @@ -826,10 +832,12 @@ int mv_image_tracking_model_save( * @brief Loads an image tracking model from the file. * * @since_tizen 3.0 - * @remarks @a image_tracking_model is loaded from the application's data directory. + * @remarks @a image_tracking_model is loaded from the absolute path directory. + * Use @ref app_get_data_path for the private app storage path or + * @ref app_get_shared_data_path for the app shared storage path. * @a image_tracking_model must be destroyed using * @ref mv_image_tracking_model_destroy. - * @param [in] file_name Name of file to load model + * @param [in] file_name Name of path/file to load model * @param [out] image_tracking_model The handle to the image tracking * model to be filled * @return @c 0 on success, otherwise a negative error value diff --git a/mv_face/face/src/FaceRecognitionModel.cpp b/mv_face/face/src/FaceRecognitionModel.cpp index 394e067..7a42cff 100644 --- a/mv_face/face/src/FaceRecognitionModel.cpp +++ b/mv_face/face/src/FaceRecognitionModel.cpp @@ -19,8 +19,6 @@ #include "mv_private.h" #include "mv_common.h" -#include - #include #include @@ -195,11 +193,8 @@ int FaceRecognitionModel::save(const std::string& fileName) if (!m_recognizer.empty()) { std::string filePath; - char *cPath = app_get_data_path(); - if (NULL == cPath) - filePath = fileName; - else - filePath = std::string(cPath) + fileName; + + filePath = fileName; std::string prefixPath = filePath.substr(0, filePath.find_last_of('/')); LOGD("prefixPath: %s", prefixPath.c_str()); @@ -251,11 +246,8 @@ int FaceRecognitionModel::save(const std::string& fileName) int FaceRecognitionModel::load(const std::string& fileName) { std::string filePath; - char *cPath = app_get_data_path(); - if (NULL == cPath) - filePath = fileName; - else - filePath = std::string(cPath) + fileName; + + filePath = fileName; if (access(filePath.c_str(), F_OK)) { LOGE("Can't load face recognition model. File[%s] doesn't existed.", filePath.c_str()); diff --git a/mv_face/face/src/FaceTrackingModel.cpp b/mv_face/face/src/FaceTrackingModel.cpp index 5feeb2a..cebbb69 100644 --- a/mv_face/face/src/FaceTrackingModel.cpp +++ b/mv_face/face/src/FaceTrackingModel.cpp @@ -19,8 +19,6 @@ #include "mv_private.h" #include "mv_common.h" -#include - #include namespace MediaVision { @@ -74,11 +72,8 @@ int FaceTrackingModel::save(const std::string& fileName) } std::string filePath; - char *cPath = app_get_data_path(); - if (NULL == cPath) - filePath = fileName; - else - filePath = std::string(cPath) + fileName; + + filePath = fileName; std::string prefixPath = filePath.substr(0, filePath.find_last_of('/')); LOGD("prefixPath: %s", prefixPath.c_str()); @@ -111,11 +106,8 @@ int FaceTrackingModel::save(const std::string& fileName) int FaceTrackingModel::load(const std::string& fileName) { std::string filePath; - char *cPath = app_get_data_path(); - if (NULL == cPath) - filePath = fileName; - else - filePath = std::string(cPath) + fileName; + + filePath = fileName; if (access(filePath.c_str(), F_OK)) { LOGE("Can't load face tracking model. File[%s] doesn't existed.", filePath.c_str()); diff --git a/mv_image/image/src/Recognition/ImageObject.cpp b/mv_image/image/src/Recognition/ImageObject.cpp index ac6569f..d9f99af 100644 --- a/mv_image/image/src/Recognition/ImageObject.cpp +++ b/mv_image/image/src/Recognition/ImageObject.cpp @@ -25,8 +25,6 @@ #include "mv_private.h" #include "mv_common.h" -#include - #include #include @@ -180,11 +178,8 @@ bool ImageObject::getLabel(int& label) const int ImageObject::save(const char *fileName) const { std::string filePath; - char *cPath = app_get_data_path(); - if (NULL == cPath) - filePath = fileName; - else - filePath = std::string(cPath) + std::string(fileName); + + filePath = fileName; std::string prefixPath = filePath.substr(0, filePath.find_last_of('/')); LOGD("prefixPath: %s", prefixPath.c_str()); @@ -216,11 +211,8 @@ int ImageObject::save(const char *fileName) const int ImageObject::load(const char *fileName) { std::string filePath; - char *cPath = app_get_data_path(); - if (NULL == cPath) - filePath = fileName; - else - filePath = std::string(cPath) + std::string(fileName); + + filePath = fileName; if (access(filePath.c_str(), F_OK)) { LOGE("Can't load image object model. Path[%s] doesn't existed.", filePath.c_str()); diff --git a/mv_image/image/src/Tracking/ImageTrackingModel.cpp b/mv_image/image/src/Tracking/ImageTrackingModel.cpp index 6240ea8..b9a89ce 100644 --- a/mv_image/image/src/Tracking/ImageTrackingModel.cpp +++ b/mv_image/image/src/Tracking/ImageTrackingModel.cpp @@ -25,8 +25,6 @@ #include "mv_private.h" #include "mv_common.h" -#include - #include #include @@ -223,11 +221,8 @@ ImageTrackingModel& ImageTrackingModel::operator=(const ImageTrackingModel& copy int ImageTrackingModel::save(const char *filepath) const { std::string filePath; - char *cPath = app_get_data_path(); - if (NULL == cPath) - filePath = std::string(filepath); - else - filePath = std::string(cPath) + std::string(filepath); + + filePath = std::string(filepath); std::string prefixPath = filePath.substr(0, filePath.find_last_of('/')); LOGD("prefixPath: %s", prefixPath.c_str()); @@ -258,11 +253,8 @@ int ImageTrackingModel::save(const char *filepath) const int ImageTrackingModel::load(const char *filepath) { std::string filePath; - char *cPath = app_get_data_path(); - if (NULL == cPath) - filePath = std::string(filepath); - else - filePath = std::string(cPath) + std::string(filepath); + + filePath = std::string(filepath); if (access(filePath.c_str(),F_OK)) { LOGE("Can't load tracking model. Path[%s] doesn't existed.", filepath); diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index 11304f8..32d97bc 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,13 +1,12 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.3.3 +Version: 0.3.4 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 Source0: %{name}-%{version}.tar.gz BuildRequires: cmake BuildRequires: pkgconfig(capi-media-tool) -BuildRequires: pkgconfig(capi-appfw-application) BuildRequires: pkgconfig(libtbm) BuildRequires: pkgconfig(dlog) BuildRequires: pkgconfig(capi-system-info) -- 2.7.4 From 61ac1bf60cd0151af2b1d7e259b382ffd3c87ec9 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Mon, 28 Dec 2015 10:08:17 +0900 Subject: [PATCH 11/16] Fixed incompatibility heap free Change-Id: I59ae33f87e76a85769013ff7c92b200b447e0073 Signed-off-by: Tae-Young Chung --- mv_face/face/src/mv_face_open.cpp | 2 +- mv_surveillance/surveillance/src/EventTrigger.cpp | 9 ++++++--- packaging/capi-media-vision.spec | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/mv_face/face/src/mv_face_open.cpp b/mv_face/face/src/mv_face_open.cpp index 0412f85..917924e 100644 --- a/mv_face/face/src/mv_face_open.cpp +++ b/mv_face/face/src/mv_face_open.cpp @@ -726,7 +726,7 @@ int mv_face_recognition_model_query_labels_open( const std::set& learnedLabels = pRecModel->getFaceLabels(); *number_of_labels = learnedLabels.size(); - (*labels) = new int[*number_of_labels]; + (*labels) = (int*)malloc(sizeof(int) * (*number_of_labels)); std::set::const_iterator it = learnedLabels.begin(); int i = 0; diff --git a/mv_surveillance/surveillance/src/EventTrigger.cpp b/mv_surveillance/surveillance/src/EventTrigger.cpp index d6b4cec..c2fec7c 100644 --- a/mv_surveillance/surveillance/src/EventTrigger.cpp +++ b/mv_surveillance/surveillance/src/EventTrigger.cpp @@ -144,8 +144,10 @@ int EventTrigger::applyROIToImage( &maskBuffer); if (error != MEDIA_VISION_ERROR_NONE || maskBuffer == NULL) { - if (maskBuffer != NULL) - delete maskBuffer; + if (maskBuffer != NULL) { + free(maskBuffer); + maskBuffer = NULL; + } LOGE("Getting mask buffer failed."); return error; @@ -159,7 +161,8 @@ int EventTrigger::applyROIToImage( imageWidth, image); - delete maskBuffer; + free(maskBuffer); + maskBuffer = NULL; if (error != MEDIA_VISION_ERROR_NONE) { LOGE("Applying mask buffer failed."); diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index 32d97bc..d7261c8 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.3.4 +Version: 0.3.5 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 -- 2.7.4 From efbda6c1245893a928180ea573bd4222bc730e25 Mon Sep 17 00:00:00 2001 From: SeokHoon Lee Date: Mon, 28 Dec 2015 11:12:18 +0900 Subject: [PATCH 12/16] modify feature link and refactoring Signed-off-by: SeokHoon Lee Change-Id: I3021c9b6081812f178d5ff78ad92387b4cad0763 --- CMakeLists.txt | 6 ++---- doc/mediavision_doc.h | 19 ++++++++----------- media-vision-config.json | 3 ++- 3 files changed, 12 insertions(+), 16 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 49c3ed3..5898b98 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -9,8 +9,6 @@ SET(PREFIX ${CMAKE_INSTALL_PREFIX}) # Configure for porting layer: -# Configure for porting layer: - option(MEDIA_VISION_BARCODE_DETECTOR_LICENSE_PORT "Turn on building of licensed port of the barcode detecting module (if OFF - open port will be built)." OFF) option(MEDIA_VISION_BARCODE_GENERATOR_LICENSE_PORT @@ -117,12 +115,12 @@ ADD_SUBDIRECTORY(mv_surveillance) aux_source_directory(src SOURCES) ADD_LIBRARY(${fw_name} SHARED ${SOURCES}) -TARGET_LINK_LIBRARIES(${fw_name} ${MV_COMMON_LIB_NAME} +TARGET_LINK_LIBRARIES(${fw_name} ${${fw_name}_LDFLAGS} + ${MV_COMMON_LIB_NAME} ${MV_BARCODE_DETECTOR_LIB_NAME} ${MV_BARCODE_GENERATOR_LIB_NAME} ${MV_IMAGE_LIB_NAME} ${MV_FACE_LIB_NAME} - ${${fw_name}_LDFLAGS} ${MV_SURVEILLANCE_LIB_NAME}) SET_TARGET_PROPERTIES(${fw_name} diff --git a/doc/mediavision_doc.h b/doc/mediavision_doc.h index 8402b85..3458f3c 100644 --- a/doc/mediavision_doc.h +++ b/doc/mediavision_doc.h @@ -70,7 +70,7 @@ * features, please define the features in your manifest file using the manifest * editor in the SDK.\n * More details on featuring your application can be found from - * + * * Feature Element. * * @@ -80,8 +80,6 @@ * @section CAPI_MEDIA_VISION_FACE_MODULE_HEADER Required Header * \#include * - * @section CAPI_MEDIA_VISION_FACE_MODULE_OVERVIEW Overview - * * @section CAPI_MEDIA_VISION_FACE_MODULE_FEATURE Related Features * This API is related with the following features:\n * - http://tizen.org/feature/vision.face_recognition\n @@ -95,10 +93,11 @@ * features, please define the features in your manifest file using the manifest * editor in the SDK.\n * More details on featuring your application can be found from - * + * * Feature Element. * * + * @section CAPI_MEDIA_VISION_FACE_MODULE_OVERVIEW Overview * @ref CAPI_MEDIA_VISION_FACE_MODULE contains @ref mv_face_detect() function * to detect faces on @ref mv_source_h, and @ref mv_face_detected_cb callback * to process detected faces. Also it contains @ref mv_face_recognize() function @@ -128,7 +127,7 @@ * function can be used.\n * Module contains function * @ref mv_face_track() which performs tracking on @ref mv_source_h for - * @ref mv_face_tracking_model_h and @ref mv_face_on_tracked_cb which process + * @ref mv_face_tracking_model_h and @ref mv_face_tracked_cb which process * tracked face. Tracking model should be created with * @ref mv_face_tracking_model_create() and destroyed with * @ref mv_face_tracking_model_destroy(). Tracking model should be prepared @@ -152,8 +151,6 @@ * @section CCAPI_MEDIA_VISION_IMAGE_MODULE_HEADER Required Header * \#include * - * @section CAPI_MEDIA_VISION_IMAGE_MODULE_OVERVIEW Overview - * * @section CAPI_MEDIA_VISION_IMAGE_MODULE_FEATURE Related Features * This API is related with the following features:\n * - http://tizen.org/feature/vision.image_recognition\n @@ -167,10 +164,11 @@ * features, please define the features in your manifest file using the manifest * editor in the SDK.\n * More details on featuring your application can be found from - * + * * Feature Element. * * + * @section CAPI_MEDIA_VISION_IMAGE_MODULE_OVERVIEW Overview * @ref CAPI_MEDIA_VISION_IMAGE_MODULE contains @ref mv_image_recognize() * function to recognize images on @ref mv_source_h, and @ref * mv_image_recognized_cb callback to process recognition result. Module @@ -202,8 +200,6 @@ * @section CAPI_MEDIA_VISION_BARCODE_MODULE_HEADER Required Header * \#include * - * @section CAPI_MEDIA_VISION_BARCODE_MODULE_OVERVIEW Overview - * * @section CAPI_MEDIA_VISION_BARCODE_MODULE_FEATURE Related Features * This API is related with the following features:\n * - http://tizen.org/feature/vision.barcode_detection\n @@ -218,10 +214,11 @@ * features, please define the features in your manifest file using the manifest * editor in the SDK.\n * More details on featuring your application can be found from - * + * * Feature Element. * * + * @section CAPI_MEDIA_VISION_BARCODE_MODULE_OVERVIEW Overview * @ref CAPI_MEDIA_VISION_BARCODE_MODULE contains two submodules:\n * * Detection submodule, * * Generation submodule.\n diff --git a/media-vision-config.json b/media-vision-config.json index 207e575..2830a12 100644 --- a/media-vision-config.json +++ b/media-vision-config.json @@ -36,7 +36,8 @@ "type" : "integer", "value" : -1 }, - { "name" : "MV_BARCODE_GENERATE_ATTR_TEXT", + { + "name" : "MV_BARCODE_GENERATE_ATTR_TEXT", "type" : "integer", "value" : 0 }, -- 2.7.4 From 68fee1fd89593908e8652e792fb6eba2dadcb8de Mon Sep 17 00:00:00 2001 From: SeokHoon Lee Date: Thu, 14 Jan 2016 15:07:17 +0900 Subject: [PATCH 13/16] - Set default value for TEXT_ATTR in barcode generate - Fix parameter for draw_rectangle_on_buffer in test suite Signed-off-by: SeokHoon Lee Change-Id: I8c139164bf9b9c74e84489879575594e0f68d4da --- .../src/mv_barcode_generate_open.cpp | 19 +++++++++++-------- packaging/capi-media-vision.spec | 2 +- test/testsuites/barcode/barcode_test_suite.c | 4 ++-- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp b/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp index 44c68b2..444ceb3 100644 --- a/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp +++ b/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp @@ -316,15 +316,18 @@ int mv_barcode_generate_image_open( } int showText = 0; - error = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_GENERATE_ATTR_TEXT", &showText); - if (error != MEDIA_VISION_ERROR_NONE) { - LOGW("mv_engine_config_get_int_attribute failed"); - return error; - } - if (showText == BARCODE_GEN_TEXT_VISIBLE && type == MV_BARCODE_QR) { - LOGW("QR code generation with visible text is not supported"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; + if (engine_cfg != NULL) { + error = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_GENERATE_ATTR_TEXT", &showText); + if (error != MEDIA_VISION_ERROR_NONE) { + LOGW("mv_engine_config_get_int_attribute failed"); + return error; + } + + if (showText == BARCODE_GEN_TEXT_VISIBLE && type == MV_BARCODE_QR) { + LOGW("QR code generation with visible text is not supported"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } } error = BarcodeGenerator::generateBarcodeToImage( diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index d7261c8..d53e0ba 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.3.5 +Version: 0.3.6 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 diff --git a/test/testsuites/barcode/barcode_test_suite.c b/test/testsuites/barcode/barcode_test_suite.c index 52f845f..1c69eac 100644 --- a/test/testsuites/barcode/barcode_test_suite.c +++ b/test/testsuites/barcode/barcode_test_suite.c @@ -357,15 +357,15 @@ void barcode_detected_cb( continue; } - const int rectangle_thickness = 6; const int drawing_color[] = {255, 0, 0}; + if (MEDIA_VISION_ERROR_NONE != draw_rectangle_on_buffer( minX, minY, maxX, maxY, + 6, drawing_color, - rectangle_thickness, &image_data, draw_buffer)) { continue; -- 2.7.4 From 78a087a1bf3dd81011b68b976fe560d3ebf9424f Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Mon, 1 Feb 2016 14:05:01 +0900 Subject: [PATCH 14/16] Remove libjpeg-turbo and libjpeg-turbo-devel dependency Change-Id: Ic9206d3813ab1378ac64057780cc44ec5a8815a4 Signed-off-by: Tae-Young Chung --- mv_common/CMakeLists.txt | 2 +- packaging/capi-media-vision.spec | 4 +- test/testsuites/common/image_helper/CMakeLists.txt | 2 +- .../common/image_helper/include/ImageHelper.h | 33 ---------------- .../common/image_helper/include/image_helper.h | 33 ---------------- .../common/image_helper/src/ImageHelper.cpp | 44 ---------------------- .../common/image_helper/src/image_helper.cpp | 14 ------- 7 files changed, 3 insertions(+), 129 deletions(-) diff --git a/mv_common/CMakeLists.txt b/mv_common/CMakeLists.txt index db991e8..05e8310 100644 --- a/mv_common/CMakeLists.txt +++ b/mv_common/CMakeLists.txt @@ -34,6 +34,6 @@ else() add_library(${PROJECT_NAME} SHARED ${MV_COMMON_INCLUDE_LIST} ${MV_COMMON_SRC_LIST}) endif() -TARGET_LINK_LIBRARIES(${MV_COMMON_LIB_NAME} jpeg ${OpenCV_LIBS} capi-media-tool tbm json-glib-1.0) +TARGET_LINK_LIBRARIES(${MV_COMMON_LIB_NAME} ${OpenCV_LIBS} capi-media-tool tbm json-glib-1.0) INSTALL(TARGETS ${PROJECT_NAME} DESTINATION ${LIB_INSTALL_DIR}) diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index d53e0ba..f2c445a 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,7 +1,7 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API Version: 0.3.6 -Release: 0 +Release: 1 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 Source0: %{name}-%{version}.tar.gz @@ -18,8 +18,6 @@ BuildRequires: zint BuildRequires: zint-devel BuildRequires: pkgconfig(json-glib-1.0) BuildRequires: dlogutil -BuildRequires: libjpeg-turbo -BuildRequires: libjpeg-turbo-devel BuildRequires: pkgconfig(libavcodec) BuildRequires: pkgconfig(libavformat) BuildRequires: pkgconfig(libswscale) diff --git a/test/testsuites/common/image_helper/CMakeLists.txt b/test/testsuites/common/image_helper/CMakeLists.txt index a89a88a..523bef7 100644 --- a/test/testsuites/common/image_helper/CMakeLists.txt +++ b/test/testsuites/common/image_helper/CMakeLists.txt @@ -32,6 +32,6 @@ else() add_library(${PROJECT_NAME} SHARED ${MV_IMAGE_HELPER_INCLUDE_LIST} ${MV_IMAGE_HELPER_SRC_LIST}) endif() -TARGET_LINK_LIBRARIES(${PROJECT_NAME} jpeg ${OpenCV_LIBS}) +TARGET_LINK_LIBRARIES(${PROJECT_NAME} ${OpenCV_LIBS}) INSTALL(TARGETS ${PROJECT_NAME} DESTINATION ${LIB_INSTALL_DIR}) diff --git a/test/testsuites/common/image_helper/include/ImageHelper.h b/test/testsuites/common/image_helper/include/ImageHelper.h index 179484f..43109e4 100644 --- a/test/testsuites/common/image_helper/include/ImageHelper.h +++ b/test/testsuites/common/image_helper/include/ImageHelper.h @@ -22,8 +22,6 @@ #include #include -#include - /** * @file ImageHelper.h * @brief ImageHelper class definition. @@ -173,37 +171,6 @@ public: unsigned char *pDataBuffer); /** - * @brief Convers libjpeg colorspace to the Tizen 'image util' colorspace. - * - * @since_tizen 3.0 - * @param [in] inColorspace The libjpeg colorspace to be converted. - * @param [out] pOutColorspace The Tizen 'image util' colorspace that - * will be obtained after conversion - * @return @c 0 on success, otherwise a negative error value - * - * @see ImageHelper::convertMVColorspaceToJpeglibColorspace() - */ - static int convertJpeglibColorspaceToMVColorspace( - J_COLOR_SPACE inColorspace, - mv_colorspace_e *pOutColorspace); - - /** - * @brief Convers libjpeg colorspace to the Tizen image util colorspace. - * - * @since_tizen 3.0 - * @param [in] inColorspace The Tizen 'image util' colorspace to be - * converted - * @param [out] pOutColorspace The libjpeg colorspace that will be - * obtained after conversion - * @return @c 0 on success, otherwise a negative error value - * - * @see ImageHelper::convertJpeglibColorspaceToMVColorspace() - */ - static int convertMVColorspaceToJpeglibColorspace( - mv_colorspace_e inColorspace, - J_COLOR_SPACE *pOutColorspace); - - /** * @brief Converts image data to the image data of RGB888 colorspace. * * @since_tizen 3.0 diff --git a/test/testsuites/common/image_helper/include/image_helper.h b/test/testsuites/common/image_helper/include/image_helper.h index 480c883..f74c163 100644 --- a/test/testsuites/common/image_helper/include/image_helper.h +++ b/test/testsuites/common/image_helper/include/image_helper.h @@ -31,8 +31,6 @@ extern "C" { #include #endif -#include - /** * @file image_helper.h * @brief Helper functions that provides set of useful methods for image management @@ -155,37 +153,6 @@ int draw_quadrangle_on_buffer( unsigned char *data_buffer); /** - * @brief Converts libjpeg colorspace to the Tizen 'image util' colorspace. - * - * @since_tizen 3.0 - * @param [in] in_colorspace The libjpeg colorspace to be converted. - * @param [out] out_colorspace The Tizen 'image util' colorspace that - * will be obtained after conversion - * @return @c 0 on success, otherwise a negative error value - * - * @see convert_mv_colorspace_to_jpeglib_colorspace() - */ -int convert_jpeglib_colorspace_to_mv_colorspace( - J_COLOR_SPACE in_colorspace, - mv_colorspace_e *out_colorspace); - -/** - * @brief Converts libjpeg colorspace to the Tizen image util colorspace. - * - * @since_tizen 3.0 - * @param [in] in_colorspace The Tizen 'image util' colorspace to be - * converted - * @param [out] out_colorspace The libjpeg colorspace that will be - * obtained after conversion - * @return @c 0 on success, otherwise a negative error value - * - * @see convert_jpeglib_colorspace_to_mv_colorspace() - */ -int convert_mv_colorspace_to_jpeglib_colorspace( - mv_colorspace_e in_colorspace, - J_COLOR_SPACE *out_colorspace); - -/** * @brief Converts image data to the image data of RGB888 colorspace. * * @since_tizen 3.0 diff --git a/test/testsuites/common/image_helper/src/ImageHelper.cpp b/test/testsuites/common/image_helper/src/ImageHelper.cpp index 67ac386..d362316 100644 --- a/test/testsuites/common/image_helper/src/ImageHelper.cpp +++ b/test/testsuites/common/image_helper/src/ImageHelper.cpp @@ -284,50 +284,6 @@ int ImageHelper::drawQuadrangleOnBuffer( return MEDIA_VISION_ERROR_NONE; } -int ImageHelper::convertJpeglibColorspaceToMVColorspace( - J_COLOR_SPACE inColorspace, - mv_colorspace_e *pOutColorspace) -{ - if (pOutColorspace == NULL) - { - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - - switch(inColorspace) - { - case JCS_GRAYSCALE: - (*pOutColorspace) = MEDIA_VISION_COLORSPACE_Y800; - return MEDIA_VISION_ERROR_NONE; - case JCS_RGB: - (*pOutColorspace) = MEDIA_VISION_COLORSPACE_RGB888; - return MEDIA_VISION_ERROR_NONE; - default: - return MEDIA_VISION_ERROR_NOT_SUPPORTED; - } - - return MEDIA_VISION_ERROR_NONE; -} - -int ImageHelper::convertMVColorspaceToJpeglibColorspace( - mv_colorspace_e inColorspace, - J_COLOR_SPACE *pOutColorspace) -{ - // todo: support more colorspaces: - switch (inColorspace) - { - case MEDIA_VISION_COLORSPACE_Y800: - (*pOutColorspace) = JCS_GRAYSCALE; - return MEDIA_VISION_ERROR_NONE; - case MEDIA_VISION_COLORSPACE_RGB888: - (*pOutColorspace) = JCS_RGB; - return MEDIA_VISION_ERROR_NONE; - default: - return MEDIA_VISION_ERROR_NOT_SUPPORTED; - } - - return MEDIA_VISION_ERROR_NOT_SUPPORTED; -} - int ImageHelper::convertBufferToRGB888( const unsigned char *pInBuffer, const ImageData& imageData, diff --git a/test/testsuites/common/image_helper/src/image_helper.cpp b/test/testsuites/common/image_helper/src/image_helper.cpp index ecebee1..825cd7a 100644 --- a/test/testsuites/common/image_helper/src/image_helper.cpp +++ b/test/testsuites/common/image_helper/src/image_helper.cpp @@ -141,20 +141,6 @@ int draw_quadrangle_on_buffer( data_buffer); } -int convert_jpeglib_colorspace_to_mv_colorspace( - J_COLOR_SPACE in_colorspace, - mv_colorspace_e *out_colorspace) -{ - return ImageHelper::convertJpeglibColorspaceToMVColorspace(in_colorspace, out_colorspace); -} - -int convert_mv_colorspace_to_jpeglib_colorspace( - mv_colorspace_e in_colorspace, - J_COLOR_SPACE *out_colorspace) -{ - return ImageHelper::convertMVColorspaceToJpeglibColorspace(in_colorspace, out_colorspace); -} - int convert_buffer_to_RGB888( const unsigned char *in_buffer, const image_data_s *image_data, -- 2.7.4 From 909d9c2cf026e129a1b4bb62487f37fe394e87af Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Wed, 3 Feb 2016 17:07:46 +0900 Subject: [PATCH 15/16] [face] Fixed error of tracking with a video file in mv_face_test_suite Change-Id: I0d16415d51ea5842bac4042e67b87d6fb1475c8f Signed-off-by: Tae-Young Chung --- packaging/capi-media-vision.spec | 2 +- .../common/video_helper/mv_video_helper.c | 51 +++++++++++++++++++--- 2 files changed, 46 insertions(+), 7 deletions(-) diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index f2c445a..a6b0f47 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,7 +1,7 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API Version: 0.3.6 -Release: 1 +Release: 2 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 Source0: %{name}-%{version}.tar.gz diff --git a/test/testsuites/common/video_helper/mv_video_helper.c b/test/testsuites/common/video_helper/mv_video_helper.c index c59a81c..1bf0ca2 100644 --- a/test/testsuites/common/video_helper/mv_video_helper.c +++ b/test/testsuites/common/video_helper/mv_video_helper.c @@ -36,12 +36,14 @@ typedef struct _mv_video_reader_s { GstElement *filesrc; GstElement *decodebin; GstElement *videoconvert; + GstElement *queue; GstElement *appsink; void *new_sample_cb_user_data; void *eos_cb_user_data; GstCaps *caps; + gulong pad_probe_id; pthread_spinlock_t new_sample_cb_guard; pthread_spinlock_t eos_cb_guard; @@ -82,6 +84,8 @@ static void appsink_eos(GstAppSink *appsink, gpointer user_data); static GstFlowReturn appsink_newsample(GstAppSink *appsink, gpointer user_data); static void cb_newpad(GstElement *decodebin, GstPad *new_pad, gpointer user_data); +static GstPadProbeReturn pad_probe_data_cb (GstPad *pad, GstPadProbeInfo *info, gpointer user_data); + /* video reader */ int mv_create_video_reader( mv_video_reader_h *reader) @@ -191,6 +195,7 @@ int mv_video_reader_load( } gst_video_info_from_caps(&info, handle->caps); + gst_caps_unref(handle->caps); *fps = info.fps_n/info.fps_d; @@ -464,12 +469,14 @@ static int _mv_video_reader_create_internals( reader->filesrc = gst_element_factory_make("filesrc", "filesrc"); reader->decodebin = gst_element_factory_make("decodebin", "decoder"); reader->videoconvert = gst_element_factory_make("videoconvert", "convert"); + reader->queue = gst_element_factory_make("queue", "queue"); reader->appsink = gst_element_factory_make("appsink", "appsink"); if ((!reader->pl) || (!reader->filesrc) || (!reader->decodebin) || (!reader->videoconvert) || + (!reader->queue) || (!reader->appsink)) { LOGE("Unable to create video read pipeline elements"); return MEDIA_VISION_ERROR_INVALID_OPERATION; @@ -479,6 +486,7 @@ static int _mv_video_reader_create_internals( reader->filesrc, reader->decodebin, reader->videoconvert, + reader->queue, reader->appsink, NULL); @@ -489,6 +497,7 @@ static int _mv_video_reader_link_internals( mv_video_reader_s *reader) { GstCaps *caps = NULL; + GstPad *pad = NULL; if (!gst_element_link_many(reader->filesrc, reader->decodebin, @@ -504,8 +513,10 @@ static int _mv_video_reader_link_internals( reader); if (!gst_element_link_many(reader->videoconvert, - reader->appsink, NULL)) { - LOGE("Unable to link filesrc to decodebin"); + reader->queue, + reader->appsink, + NULL)) { + LOGE("Unable to link videocovnert-queue-appsink"); return MEDIA_VISION_ERROR_INVALID_OPERATION; } @@ -532,6 +543,14 @@ static int _mv_video_reader_link_internals( "sync", FALSE, NULL); + + /* pad probe */ + pad = gst_element_get_static_pad(reader->queue, "src"); + + gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_BUFFER, + (GstPadProbeCallback)pad_probe_data_cb, reader, NULL); + gst_object_unref(pad); + return MEDIA_VISION_ERROR_NONE; } @@ -795,7 +814,6 @@ static GstFlowReturn appsink_newsample( break; default: LOGE("Video pixel format is not supported\n"); - gst_buffer_unmap(buf, &info); gst_sample_unref(sample); return GST_FLOW_ERROR; @@ -838,6 +856,9 @@ static void appsink_eos( handle->eos_cb(handle->eos_cb_user_data); pthread_spin_unlock(&(handle->eos_cb_guard)); + + + gst_pad_remove_probe(gst_element_get_static_pad(handle->queue, "src"), handle->pad_probe_id); } static void cb_newpad( @@ -859,14 +880,32 @@ static void cb_newpad( } /* Check for pad is video */ - reader->caps = gst_pad_query_caps(pad, NULL); - str = gst_caps_get_structure(reader->caps, 0); + caps = gst_pad_query_caps(pad, NULL); + str = gst_caps_get_structure(caps, 0); if (!g_strrstr(gst_structure_get_name(str), "video")) { LOGI("Not a video pad"); gst_object_unref(video_pad); return; } - + gst_caps_unref(caps); gst_pad_link(pad, video_pad); g_object_unref(video_pad); } + +static GstPadProbeReturn pad_probe_data_cb ( + GstPad *pad, + GstPadProbeInfo *info, + gpointer user_data) +{ + if (user_data == NULL) { + return GST_PAD_PROBE_PASS; + } + mv_video_reader_s *reader = (mv_video_reader_s *) user_data; + + if (reader->caps == NULL) { + reader->caps = gst_pad_get_current_caps(pad); + reader->pad_probe_id = GST_PAD_PROBE_INFO_ID(info); + } + + return GST_PAD_PROBE_OK; +} -- 2.7.4 From 91df4c68c5c499da10e0be9af82a2ca2745bca68 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Mon, 15 Feb 2016 18:23:47 +0900 Subject: [PATCH 16/16] Remove app_get_shared_data_path() usasge from remarks app_get_shared_data_path() API is deprecated since Tizen 3.0 Change-Id: I5df660cb5f7933e01b3857e282b7989b3d983897 Signed-off-by: Tae-Young Chung --- include/mv_face.h | 16 ++++------------ include/mv_image.h | 12 ++++-------- packaging/capi-media-vision.spec | 4 ++-- 3 files changed, 10 insertions(+), 22 deletions(-) diff --git a/include/mv_face.h b/include/mv_face.h index e0f765b..18e4772 100644 --- a/include/mv_face.h +++ b/include/mv_face.h @@ -662,8 +662,7 @@ int mv_face_recognition_model_clone( * @ref mv_face_recognition_model_reset() function if * it is needed to clear the memory. * @remarks @a recognition_model is saved to the absolute path directory. - * Use @ref app_get_data_path for the private app storage path or - * @ref app_get_shared_data_path for the app shared storage path. + * Use @ref app_get_data_path for the private app storage path. * After model is saved to the file, it can be loaded from this file * by @ref mv_face_recognition_model_load() function. * @param [in] file_name Name of the path/file to save the model @@ -686,7 +685,6 @@ int mv_face_recognition_model_clone( * @see mv_face_recognition_model_load() * @see mv_face_recognition_model_create() * @see app_get_data_path() - * @see app_get_shared_data_path() */ int mv_face_recognition_model_save( const char *file_name, @@ -704,8 +702,7 @@ int mv_face_recognition_model_save( * is recommended to clear the memory if learning algorithm doesn't * support reinforcement learning. * @remarks @a recognition_model is loaded from the absolute path directory. - * Use @ref app_get_data_path for the private app storage path or - * @ref app_get_shared_data_path for the app shared storage path. + * Use @ref app_get_data_path for the private app storage path. * @a recognition_model must be destroyed using * @ref mv_face_recognition_model_destroy(). * @param [in] file_name Name of path/file to load the model @@ -726,7 +723,6 @@ int mv_face_recognition_model_save( * @see mv_face_recognition_model_save() * @see mv_face_recognition_model_destroy() * @see app_get_data_path() - * @see app_get_shared_data_path() */ int mv_face_recognition_model_load( const char *file_name, @@ -1042,8 +1038,7 @@ int mv_face_tracking_model_clone( * * @since_tizen 3.0 * @remarks @a tracking_model is saved to the absolute path directory. - * Use @ref app_get_data_path for the private app storage path or - * @ref app_get_shared_data_path for the app shared storage path. + * Use @ref app_get_data_path for the private app storage path. * After model is saved to the file, it can be loaded from this file * with @ref mv_face_tracking_model_load() function. * @param [in] file_name Name of the path/file to save the model @@ -1066,7 +1061,6 @@ int mv_face_tracking_model_clone( * @see mv_face_tracking_model_load() * @see mv_face_tracking_model_create() * @see app_get_data_path() - * @see app_get_shared_data_path() */ int mv_face_tracking_model_save( const char *file_name, @@ -1077,8 +1071,7 @@ int mv_face_tracking_model_save( * * @since_tizen 3.0 * @remarks @a tracking_model is loaded from the absolute path directory. - * Use @ref app_get_data_path for the private app storage path or - * @ref app_get_shared_data_path for the app shared storage path. + * Use @ref app_get_data_path for the private app storage path. * @a tracking_model must be destroyed using * @ref mv_face_tracking_model_destroy. * @param [in] file_name Name of path/file to load the model @@ -1104,7 +1097,6 @@ int mv_face_tracking_model_save( * @see mv_face_tracking_model_save() * @see mv_face_tracking_model_destroy() * @see app_get_data_path() - * @see app_get_shared_data_path() */ int mv_face_tracking_model_load( const char *file_name, diff --git a/include/mv_image.h b/include/mv_image.h index a1a3d34..ec18dcd 100644 --- a/include/mv_image.h +++ b/include/mv_image.h @@ -618,8 +618,7 @@ int mv_image_object_clone( * * @since_tizen 3.0 * @remarks @a image_object is saved to the absolute path directory. - * Use @ref app_get_data_path for the private app storage path or - * @ref app_get_shared_data_path for the app shared storage path. + * Use @ref app_get_data_path for the private app storage path. * @param [in] file_name Name of the file to path/save the image object * @param [in] image_object The handle to the image object which will be saved * @return @c 0 on success, otherwise a negative error value @@ -645,8 +644,7 @@ int mv_image_object_save( * * @since_tizen 3.0 * @remarks @a image_object is loaded from the absolute path directory. - * Use @ref app_get_data_path for the private app storage path or - * @ref app_get_shared_data_path for the app shared storage path. + * Use @ref app_get_data_path for the private app storage path. * @a image_object must be destroyed using * @ref mv_image_object_destroy(). * @param [in] file_name Name of path/file to load the image object @@ -804,8 +802,7 @@ int mv_image_tracking_model_clone( * * @since_tizen 3.0 * @remarks @a image_tracking_model is saved to the absolute path directory. - * Use @ref app_get_data_path for the private app storage path or - * @ref app_get_shared_data_path for the app shared storage path. + * Use @ref app_get_data_path for the private app storage path. * @param [in] file_name Name of path/file to save the model * @param [in] image_tracking_model The handle to the image tracking model * to be saved @@ -833,8 +830,7 @@ int mv_image_tracking_model_save( * * @since_tizen 3.0 * @remarks @a image_tracking_model is loaded from the absolute path directory. - * Use @ref app_get_data_path for the private app storage path or - * @ref app_get_shared_data_path for the app shared storage path. + * Use @ref app_get_data_path for the private app storage path. * @a image_tracking_model must be destroyed using * @ref mv_image_tracking_model_destroy. * @param [in] file_name Name of path/file to load model diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index a6b0f47..55aae32 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,7 +1,7 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.3.6 -Release: 2 +Version: 0.3.7 +Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-2.0 Source0: %{name}-%{version}.tar.gz -- 2.7.4