From 841690fce01fd3f5d96f50e71765eb801b45d850 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Tue, 5 Jul 2022 15:06:49 +0900 Subject: [PATCH 01/16] mv_machine_learning: update description [Issue type] cleanup Updated header and doc file description for face recognition CAPI, which just reflects the feedback from API REVIEW step. Change-Id: Id17eef55aafbd6a4c077b712fa5e8fe2a279f729 Signed-off-by: Inki Dae --- doc/mediavision_doc.h | 58 ++++++++++++++++++++++++++++++++++++++++++- include/mv_face_recognition.h | 2 ++ 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/doc/mediavision_doc.h b/doc/mediavision_doc.h index 7691efd..4678ceb 100644 --- a/doc/mediavision_doc.h +++ b/doc/mediavision_doc.h @@ -28,7 +28,8 @@ * * Surveillance: movement detection, person appearance/disappearance, * person recognition. * * Inference: Image classification, object detection, - * face detection and facial landmark detection;\n + * face detection, facial landmark detection and face recognition;\n + * * Training: Face recognition;\n * * * @defgroup CAPI_MEDIA_VISION_COMMON_MODULE Media Vision Common @@ -411,6 +412,61 @@ * mv_inference_facial_landmark_detect() functionalities to detect faces and their landmark * on #mv_source_h, and callbacks mv_inference_face_detected_cb() and * mv_inference_facial_landmark_detected_cb() to process detection results. + * + * @defgroup CAPI_MEDIA_VISION_FACE_RECOGNITION_MODULE Media Vision Face Recognition + * @ingroup CAPI_MEDIA_VISION_MODULE + * @brief Face recognition. + * @section CAPI_MEDIA_VISION_FACE_RECOGNITION_MODULE_HEADER Required Header + * \#include + * + * @section CAPI_MEDIA_VISION_FACE_RECOGNITION_MODULE_FEATURE Related Features + * This API is related with the following features:\n + * - %http://tizen.org/feature/vision.face_recognition\n + * + * It is recommended to design feature related codes in your application for + * reliability.\n + * You can check if a device supports the related features for this API by using + * @ref CAPI_SYSTEM_SYSTEM_INFO_MODULE, thereby controlling the procedure of + * your application.\n + * To ensure your application is only running on the device with specific + * features, please define the features in your manifest file using the manifest + * editor in the SDK.\n + * More details on featuring your application can be found from + * + * Feature Element. + * + * + * @section CAPI_MEDIA_VISION_FACE_RECOGNITION_MODULE_OVERVIEW Overview + * @ref CAPI_MEDIA_VISION_FACE_RECOGNITION_MODULE contains mv_face_recognition_register() function + * to train a face on #mv_source_h, and mv_face_recognition_unregister() function to remote + * all face data related to a given label. Also it contains mv_face_recognition_inference() function + * which performs face recognition a face on #mv_source_h. User can get a proper label string through + * mv_face_recognition_get_label() function after calling mv_face_recognition_inference() function + * as a recognized result. + * + * A training example + * First of all, a face recognition handle should be created by mv_face_recognition_create() function + * and destroyed with mv_face_recognition_destroy() function, and some resources - such as backbone model to extract + * feature vector and loading the label/feature vector database files - should be prepared with + * mv_face_recognition_prepare() function, and then a given face image and its label string should be registered + * with mv_face_recognition_register() function. + * + * A inference example + * First of all, a face recognition handle should be created by mv_face_recognition_create() function + * and destroyed with mv_face_recognition_destroy() function, and some resources - such as backbone model to extract + * feature vector and loading the label/feature vector database files - should be prepared with + * mv_face_recognition_prepare() function, and mv_face_recognition_inference() function should be called + * to request the training to face recognition framework of Mediavision, and then mv_face_recognition_get_label() + * function should be called to get a label as a recognized result. + * + * A label removing example + * First of all, a face recognition handle should be created by mv_face_recognition_create() function + * and destroyed with mv_face_recognition_destroy() function, and some resources - such as backbone model to extract + * feature vector and loading the label/feature vector database files - should be prepared with + * mv_face_recognition_prepare() function, and mv_face_recognition_unregister() function should be called + * to delete face data to a given label string. + * + * For more details, please refer to test/testsuites/machine_learning/face_recognition/test_face_recognition.cpp */ #endif /* __TIZEN_MEDIAVISION_DOC_H__ */ diff --git a/include/mv_face_recognition.h b/include/mv_face_recognition.h index 9cece89..233c9ac 100644 --- a/include/mv_face_recognition.h +++ b/include/mv_face_recognition.h @@ -170,6 +170,8 @@ int mv_face_recognition_inference(mv_face_recognition_h handle, mv_source_h sour * * @since_tizen 7.0 * + * @remarks The @a out_label must NOT be released using free() + * * @param[in] handle The handle to the face recognition object. * @param[out] out_label The array pointer for the label name to be stored. * This function returns memory pointer containing actual label string to @a out_label. -- 2.7.4 From 6dfe0dc63f55a21b20addc006c145b20c431cd91 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Wed, 6 Jul 2022 11:09:11 +0900 Subject: [PATCH 02/16] common: code refactoring to EngineConfig.cpp [Issue type] code refactoring Did code refactoring to EngineConfig.cpp file. What this refactoring does, - change singletone way to OOP one. - drop redundant functions. - rename cacheDictionaries() to loadDictionaries() with its argument cleanup. The purpose of this refactoring is for service based multiple models support because config data of global config file will be moved to each config file for each service, and EngineConfig class will be used commonly by each service to manage its own config file. Change-Id: I22cf241cc250a213507e87727bb86b01e929cde2 Signed-off-by: Inki Dae --- mv_common/include/EngineConfig.h | 57 +++----- mv_common/src/EngineConfig.cpp | 294 ++++++++++++++++----------------------- mv_common/src/mv_common_c.cpp | 31 +++-- 3 files changed, 161 insertions(+), 221 deletions(-) diff --git a/mv_common/include/EngineConfig.h b/mv_common/include/EngineConfig.h index e8c5655..365e5fb 100644 --- a/mv_common/include/EngineConfig.h +++ b/mv_common/include/EngineConfig.h @@ -23,6 +23,8 @@ #include "mv_common.h" +#define MV_ENGINE_CONFIG_FILE_NAME "media-vision-config.json" + /** * @file EngineConfig.h * @brief Engine Configuration class definition. @@ -31,28 +33,28 @@ namespace MediaVision { namespace Common { -typedef std::map::const_iterator DictDblConstIter; -typedef std::map::const_iterator DictIntConstIter; -typedef std::map::const_iterator DictBoolConstIter; -typedef std::map::const_iterator DictStrConstIter; - -typedef std::map>::const_iterator DictVecStrConstIter; +using DictDblConstIter = std::map::const_iterator; +using DictIntConstIter = std::map::const_iterator; +using DictBoolConstIter = std::map::const_iterator; +using DictStrConstIter = std::map::const_iterator; +using DictVecStrConstIter = std::map>::const_iterator; class EngineConfig { public: /** * @brief Engine configuration constructor. * @details Create new engine configuration dictionary and set default - * attributes values. + * attributes values with a given config file path. * - * @since_tizen @if MOBILE 2.4 @else 3.0 @endif + * @since_tizen 7.0 + * @param [in] config_file_path A full path of config file. (Optional) */ - EngineConfig(); + explicit EngineConfig(std::string config_file_path = std::string(MV_CONFIG_PATH) + std::string(MV_ENGINE_CONFIG_FILE_NAME)); /** * @brief Engine configuration destructor. */ - virtual ~EngineConfig(); + ~EngineConfig(); /** * @brief Sets attribute with double value. @@ -174,33 +176,20 @@ public: */ int getStringAttribute(const std::string& key, std::vector *value) const; -public: - static bool setDefaultConfigFilePath(const std::string& confFilePath); - - static const std::map& getDefaultDblDict(); - static const std::map& getDefaultIntDict(); - static const std::map& getDefaultBoolDict(); - static const std::map& getDefaultStrDict(); - static const std::map>& getDefaultVecStrDict(); - static int cacheDictionaries( - bool isLazyCache = true, - std::string configFilePath = DefConfigFilePath); - -private: - std::map m_dblDict; - std::map m_intDict; - std::map m_boolDict; - std::map m_strDict; - std::map> m_vecStrDict; + const std::map& getDefaultDblDict(); + const std::map& getDefaultIntDict(); + const std::map& getDefaultBoolDict(); + const std::map& getDefaultStrDict(); + const std::map>& getDefaultVecStrDict(); private: - static std::string DefConfigFilePath; + std::map __dblDict; + std::map __intDict; + std::map __boolDict; + std::map __strDict; + std::map> __vecStrDict; - static std::map DefDblDict; - static std::map DefIntDict; - static std::map DefBoolDict; - static std::map DefStrDict; - static std::map> DefVecStrDict; + int loadDictionaries(std::string& config_file_path); }; } /* Common */ diff --git a/mv_common/src/EngineConfig.cpp b/mv_common/src/EngineConfig.cpp index 686295d..bbcdfdd 100644 --- a/mv_common/src/EngineConfig.cpp +++ b/mv_common/src/EngineConfig.cpp @@ -20,8 +20,6 @@ #include -#define MV_ENGINE_CONFIG_FILE_NAME "media-vision-config.json" - /** * @file EngineConfig.cpp * @brief Engine Configuration class methods implementation. @@ -30,30 +28,14 @@ namespace MediaVision { namespace Common { -std::string EngineConfig::DefConfigFilePath; - -std::map EngineConfig::DefDblDict; -std::map EngineConfig::DefIntDict; -std::map EngineConfig::DefBoolDict; -std::map EngineConfig::DefStrDict; -std::map> EngineConfig::DefVecStrDict; - -EngineConfig::EngineConfig() +EngineConfig::EngineConfig(std::string config_file_path) { - DefConfigFilePath = MV_CONFIG_PATH; - DefConfigFilePath += MV_ENGINE_CONFIG_FILE_NAME; - - LOGI("Default Engine config file location is %s", DefConfigFilePath.c_str()); + LOGI("Default Engine config file location is %s", config_file_path.c_str()); /* Force load default attributes from configuration file */ - cacheDictionaries(false); - - /* Insert default attribute values into creating engine configuration */ - m_dblDict.insert(getDefaultDblDict().begin(), getDefaultDblDict().end()); - m_intDict.insert(getDefaultIntDict().begin(), getDefaultIntDict().end()); - m_boolDict.insert(getDefaultBoolDict().begin(), getDefaultBoolDict().end()); - m_strDict.insert(getDefaultStrDict().begin(), getDefaultStrDict().end()); - m_vecStrDict.insert(getDefaultVecStrDict().begin(), getDefaultVecStrDict().end()); + int ret = loadDictionaries(config_file_path); + if (ret != MEDIA_VISION_ERROR_NONE) + throw static_cast(ret); } EngineConfig::~EngineConfig() @@ -66,12 +48,12 @@ int EngineConfig::setAttribute(const std::string& key, const double value) LOGI("Set double attribute for the engine config %p. [%s] = %f", this, key.c_str(), value); - if (m_dblDict.find(key) == m_dblDict.end()) { + if (__dblDict.find(key) == __dblDict.end()) { LOGE("Double attribute [%s] can't be set because isn't supported", key.c_str()); return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; } - m_dblDict[key] = value; + __dblDict[key] = value; return MEDIA_VISION_ERROR_NONE; } @@ -81,12 +63,12 @@ int EngineConfig::setAttribute(const std::string& key, const int value) LOGI("Set integer attribute for the engine config %p. [%s] = %i", this, key.c_str(), value); - if (m_intDict.find(key) == m_intDict.end()) { + if (__intDict.find(key) == __intDict.end()) { LOGE("Integer attribute [%s] can't be set because isn't supported", key.c_str()); return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; } - m_intDict[key] = value; + __intDict[key] = value; return MEDIA_VISION_ERROR_NONE; } @@ -96,12 +78,12 @@ int EngineConfig::setAttribute(const std::string& key, const bool value) LOGI("Set boolean attribute for the engine config %p. [%s] = %s", this, key.c_str(), value ? "TRUE" : "FALSE"); - if (m_boolDict.find(key) == m_boolDict.end()) { + if (__boolDict.find(key) == __boolDict.end()) { LOGE("Boolean attribute [%s] can't be set because isn't supported", key.c_str()); return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; } - m_boolDict[key] = value; + __boolDict[key] = value; return MEDIA_VISION_ERROR_NONE; } @@ -111,12 +93,12 @@ int EngineConfig::setAttribute(const std::string& key, const std::string& value) LOGI("Set string attribute for the engine config %p. [%s] = %s", this, key.c_str(), value.c_str()); - if (m_strDict.find(key) == m_strDict.end()) { + if (__strDict.find(key) == __strDict.end()) { LOGE("String attribute [%s] can't be set because isn't supported", key.c_str()); return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; } - m_strDict[key] = value; + __strDict[key] = value; return MEDIA_VISION_ERROR_NONE; } @@ -126,20 +108,20 @@ int EngineConfig::setAttribute(const std::string& key, const std::vector *value) const { - DictVecStrConstIter dictIter = m_vecStrDict.find(key); - if (dictIter == m_vecStrDict.end()) { + DictVecStrConstIter dictIter = __vecStrDict.find(key); + if (dictIter == __vecStrDict.end()) { LOGE("Attempt to access to the unsupported vector attribute [%s] of string " "of the engine config %p", key.c_str(), this); return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE; @@ -222,177 +204,143 @@ int EngineConfig::getStringAttribute(const std::string& key, std::vector& EngineConfig::getDefaultDblDict() { - cacheDictionaries(); - - return DefDblDict; + return __dblDict; } const std::map& EngineConfig::getDefaultIntDict() { - cacheDictionaries(); - - return DefIntDict; + return __intDict; } const std::map& EngineConfig::getDefaultBoolDict() { - cacheDictionaries(); - - return DefBoolDict; + return __boolDict; } const std::map& EngineConfig::getDefaultStrDict() { - cacheDictionaries(); - - return DefStrDict; + return __strDict; } const std::map>& EngineConfig::getDefaultVecStrDict() { - cacheDictionaries(); - - return DefVecStrDict; + return __vecStrDict; } -int EngineConfig::cacheDictionaries(bool isLazyCache, std::string configFilePath) +int EngineConfig::loadDictionaries(std::string& config_file_path) { - static bool isCached = false; - if (!isLazyCache || !isCached) { - LOGI("Start to cache default attributes from engine configuration file."); - - DefDblDict.clear(); - DefIntDict.clear(); - DefBoolDict.clear(); - DefStrDict.clear(); - DefVecStrDict.clear(); - - const char *conf_file = configFilePath.c_str(); - JsonParser *parser; - GError *error = NULL; - - parser = json_parser_new(); - json_parser_load_from_file(parser, conf_file, &error); - if (error) { - LOGW("Unable to parse file '%s': %s\n", conf_file, error->message); - g_error_free(error); - g_object_unref(parser); - return MEDIA_VISION_ERROR_NO_DATA; - } + LOGI("Start to cache default attributes from engine configuration file."); + + __dblDict.clear(); + __intDict.clear(); + __boolDict.clear(); + __strDict.clear(); + __vecStrDict.clear(); + + const char *conf_file = config_file_path.c_str(); + GError *error = NULL; + JsonParser *parser = json_parser_new(); + + json_parser_load_from_file(parser, conf_file, &error); + if (error) { + LOGW("Unable to parse file '%s': %s\n", conf_file, error->message); + g_error_free(error); + g_object_unref(parser); + return MEDIA_VISION_ERROR_NO_DATA; + } - JsonNode *root = json_parser_get_root(parser); - if (JSON_NODE_OBJECT != json_node_get_node_type(root)) { - LOGW("Can't parse tests configuration file. " - "Incorrect json markup."); - g_object_unref(parser); - return MEDIA_VISION_ERROR_NO_DATA; - } + JsonNode *root = json_parser_get_root(parser); + if (JSON_NODE_OBJECT != json_node_get_node_type(root)) { + LOGW("Can't parse tests configuration file. " + "Incorrect json markup."); + g_object_unref(parser); + return MEDIA_VISION_ERROR_NO_DATA; + } - JsonObject *jobj = json_node_get_object(root); + JsonObject *jobj = json_node_get_object(root); - if (!json_object_has_member(jobj, "attributes")) { - LOGW("Can't parse tests configuration file. " - "No 'attributes' section."); - g_object_unref(parser); - return MEDIA_VISION_ERROR_NO_DATA; - } + if (!json_object_has_member(jobj, "attributes")) { + LOGW("Can't parse tests configuration file. " + "No 'attributes' section."); + g_object_unref(parser); + return MEDIA_VISION_ERROR_NO_DATA; + } - JsonNode *attr_node = - json_object_get_member(jobj, "attributes"); + JsonNode *attr_node = json_object_get_member(jobj, "attributes"); - if (JSON_NODE_ARRAY != json_node_get_node_type(attr_node)) { - LOGW("Can't parse tests configuration file. " - "'attributes' section isn't array."); - g_object_unref(parser); - return MEDIA_VISION_ERROR_NO_DATA; - } + if (JSON_NODE_ARRAY != json_node_get_node_type(attr_node)) { + LOGW("Can't parse tests configuration file. " + "'attributes' section isn't array."); + g_object_unref(parser); + return MEDIA_VISION_ERROR_NO_DATA; + } - JsonArray *attr_array = json_node_get_array(attr_node); + JsonArray *attr_array = json_node_get_array(attr_node); + const guint attr_num = json_array_get_length(attr_array); + guint attrInd = 0; - const guint attr_num = json_array_get_length(attr_array); + for (; attrInd < attr_num; ++attrInd) { + JsonNode *attr_node = json_array_get_element(attr_array, attrInd); - guint attrInd = 0; - for (; attrInd < attr_num; ++attrInd) { - JsonNode *attr_node = json_array_get_element(attr_array, attrInd); + if (JSON_NODE_OBJECT != json_node_get_node_type(attr_node)) { + LOGW("Attribute %u wasn't parsed from json file.", attrInd); + continue; + } - if (JSON_NODE_OBJECT != json_node_get_node_type(attr_node)) { - LOGW("Attribute %u wasn't parsed from json file.", attrInd); - continue; - } + JsonObject *attr_obj = json_node_get_object(attr_node); - JsonObject *attr_obj = json_node_get_object(attr_node); + if (!json_object_has_member(attr_obj, "name") || + !json_object_has_member(attr_obj, "type") || + !json_object_has_member(attr_obj, "value")) { + LOGW("Attribute %u wasn't parsed from json file.", attrInd); + continue; + } - if (!json_object_has_member(attr_obj, "name") || - !json_object_has_member(attr_obj, "type") || - !json_object_has_member(attr_obj, "value")) { - LOGW("Attribute %u wasn't parsed from json file.", attrInd); - continue; - } + const char *nameStr = (const char*)json_object_get_string_member(attr_obj, "name"); + const char *typeStr = (const char*)json_object_get_string_member(attr_obj, "type"); - const char *nameStr = - (const char*)json_object_get_string_member(attr_obj, "name"); - const char *typeStr = - (const char*)json_object_get_string_member(attr_obj, "type"); + if (NULL == nameStr || NULL == typeStr) { + LOGW("Attribute %i wasn't parsed from json file. name and/or " + "type of the attribute are parsed as NULL.", attrInd); + continue; + } - if (NULL == nameStr || NULL == typeStr) { - LOGW("Attribute %i wasn't parsed from json file. name and/or " - "type of the attribute are parsed as NULL.", attrInd); + if (0 == strcmp("double", typeStr)) { + __dblDict[std::string(nameStr)] = (double)json_object_get_double_member(attr_obj, "value"); + } else if (0 == strcmp("integer", typeStr)) { + __intDict[std::string(nameStr)] = (int)json_object_get_int_member(attr_obj, "value"); + } else if (0 == strcmp("boolean", typeStr)) { + __boolDict[std::string(nameStr)] = json_object_get_boolean_member(attr_obj, "value") ? true : false; + } else if (0 == strcmp("string", typeStr)) { + __strDict[std::string(nameStr)] = (char*)json_object_get_string_member(attr_obj, "value"); + } else if (0 == strcmp("array", typeStr)) { + const char *subTypeStr = (const char*)json_object_get_string_member(attr_obj, "subtype"); + + if (NULL == subTypeStr) continue; - } else if (0 == strcmp("double", typeStr)) { - DefDblDict[std::string(nameStr)] = - (double)json_object_get_double_member(attr_obj, "value"); - } else if (0 == strcmp("integer", typeStr)) { - DefIntDict[std::string(nameStr)] = - (int)json_object_get_int_member(attr_obj, "value"); - } else if (0 == strcmp("boolean", typeStr)) { - DefBoolDict[std::string(nameStr)] = - json_object_get_boolean_member(attr_obj, "value") ? true : false; - } else if (0 == strcmp("string", typeStr)) { - DefStrDict[std::string(nameStr)] = - (char*)json_object_get_string_member(attr_obj, "value"); - } else if (0 == strcmp("array", typeStr)) { - const char *subTypeStr = (const char*)json_object_get_string_member(attr_obj, "subtype"); - - if (NULL == subTypeStr) - continue; - - if (0 == strcmp("string", subTypeStr)) { - JsonArray *attr_array = json_object_get_array_member(attr_obj, "value"); - std::vector defaultVecStr; - for (unsigned int item = 0; item < json_array_get_length(attr_array); ++item) { - defaultVecStr.push_back(std::string(json_array_get_string_element(attr_array, item))); - } - DefVecStrDict[std::string(nameStr)] = defaultVecStr; + if (0 == strcmp("string", subTypeStr)) { + JsonArray *attr_array = json_object_get_array_member(attr_obj, "value"); + std::vector defaultVecStr; + + for (unsigned int item = 0; item < json_array_get_length(attr_array); ++item) { + defaultVecStr.push_back(std::string(json_array_get_string_element(attr_array, item))); } - //TO-DO: add other subtypes - } else { - LOGW("Attribute %i:%s wasn't parsed from json file. " - "Type isn't supported.", attrInd, nameStr); - continue; + __vecStrDict[std::string(nameStr)] = defaultVecStr; + } + //TO-DO: add other subtypes + } else { + LOGW("Attribute %i:%s wasn't parsed from json file. " + "Type isn't supported.", attrInd, nameStr); + continue; } - - g_object_unref(parser); - isCached = true; } + g_object_unref(parser); + return MEDIA_VISION_ERROR_NONE; } diff --git a/mv_common/src/mv_common_c.cpp b/mv_common/src/mv_common_c.cpp index 28ba838..bc6bbf5 100644 --- a/mv_common/src/mv_common_c.cpp +++ b/mv_common/src/mv_common_c.cpp @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -701,21 +702,17 @@ int mv_engine_config_foreach_supported_attribute_c( } using namespace MediaVision::Common; + std::unique_ptr config; - int err = EngineConfig::cacheDictionaries(); - - if (MEDIA_VISION_ERROR_NONE != err) { - LOGE("Failed to get attribute names/types. " - "Failed to cache attributes from file"); - return err; + try { + config = std::make_unique(); + } catch(int& exception) { + return exception; } - DictDblConstIter dblDictIter = EngineConfig::getDefaultDblDict().begin(); - DictIntConstIter intDictIter = EngineConfig::getDefaultIntDict().begin(); - DictBoolConstIter boolDictIter = EngineConfig::getDefaultBoolDict().begin(); - DictStrConstIter strDictIter = EngineConfig::getDefaultStrDict().begin(); + DictDblConstIter dblDictIter = config->getDefaultDblDict().begin(); - while (dblDictIter != EngineConfig::getDefaultDblDict().end()) { + while (dblDictIter != config->getDefaultDblDict().end()) { if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE, dblDictIter->first.c_str(), user_data)) { LOGD("Attribute names/types traverse has been stopped by the user"); @@ -724,7 +721,9 @@ int mv_engine_config_foreach_supported_attribute_c( ++dblDictIter; } - while (intDictIter != EngineConfig::getDefaultIntDict().end()) { + DictIntConstIter intDictIter = config->getDefaultIntDict().begin(); + + while (intDictIter != config->getDefaultIntDict().end()) { if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER, intDictIter->first.c_str(), user_data)) { LOGD("Attribute names/types traverse has been stopped by the user"); @@ -733,7 +732,9 @@ int mv_engine_config_foreach_supported_attribute_c( ++intDictIter; } - while (boolDictIter != EngineConfig::getDefaultBoolDict().end()) { + DictBoolConstIter boolDictIter = config->getDefaultBoolDict().begin(); + + while (boolDictIter != config->getDefaultBoolDict().end()) { if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN, boolDictIter->first.c_str(), user_data)) { LOGD("Attribute names/types traverse has been stopped by the user"); @@ -742,7 +743,9 @@ int mv_engine_config_foreach_supported_attribute_c( ++boolDictIter; } - while (strDictIter != EngineConfig::getDefaultStrDict().end()) { + DictStrConstIter strDictIter = config->getDefaultStrDict().begin(); + + while (strDictIter != config->getDefaultStrDict().end()) { if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_STRING, strDictIter->first.c_str(), user_data)) { LOGD("Attribute names/types traverse has been stopped by the user"); -- 2.7.4 From 26dcb23156aa94cb79968a7c9ac01678ff2c49fc Mon Sep 17 00:00:00 2001 From: Kwanghoon Son Date: Fri, 8 Jul 2022 00:21:23 -0400 Subject: [PATCH 03/16] common: Clean duplicate code [Issue type] clean up clean up convertSourceMV2GrayCV Change-Id: I9f8427bdc47771ccbe0e4dbe61b4ceab8a3e7730 Signed-off-by: Kwanghoon Son --- mv_barcode/barcode_detector/include/BarcodeUtils.h | 11 -- mv_barcode/barcode_detector/src/BarcodeUtils.cpp | 97 ----------------- .../src/mv_barcode_detect_open.cpp | 3 +- mv_common/include/CommonUtils.h | 41 +++++++ mv_common/src/CommonUtils.cpp | 121 +++++++++++++++++++++ mv_face/face/include/FaceUtil.h | 10 -- mv_face/face/src/FaceUtil.cpp | 92 ---------------- mv_face/face/src/mv_face_open.cpp | 16 +-- mv_image/image/src/mv_image_open.cpp | 111 +------------------ .../surveillance/include/SurveillanceHelper.h | 9 -- mv_surveillance/surveillance/src/EventManager.cpp | 3 +- .../surveillance/src/SurveillanceHelper.cpp | 92 ---------------- 12 files changed, 179 insertions(+), 427 deletions(-) create mode 100644 mv_common/include/CommonUtils.h create mode 100644 mv_common/src/CommonUtils.cpp diff --git a/mv_barcode/barcode_detector/include/BarcodeUtils.h b/mv_barcode/barcode_detector/include/BarcodeUtils.h index 4b06ae5..2d1392b 100644 --- a/mv_barcode/barcode_detector/include/BarcodeUtils.h +++ b/mv_barcode/barcode_detector/include/BarcodeUtils.h @@ -39,17 +39,6 @@ namespace Barcode { */ int convertSourceMV2Zbar(mv_source_h mvSource, zbar::Image& zbarSource); -/** - * @brief This function converts media vision image handle to cv::Mat with gray color. - * - * @since_tizen 6.5 - * @param [in] mvSource Media vision image handle - * @param [out] cv::Mat - * @return @c MEDIA_VISION_ERROR_NONE on success, - otherwise a negative error value - */ -int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource); - } /* Barcode */ } /* MediaVision */ diff --git a/mv_barcode/barcode_detector/src/BarcodeUtils.cpp b/mv_barcode/barcode_detector/src/BarcodeUtils.cpp index 332ddaf..807a92b 100644 --- a/mv_barcode/barcode_detector/src/BarcodeUtils.cpp +++ b/mv_barcode/barcode_detector/src/BarcodeUtils.cpp @@ -105,102 +105,5 @@ int convertSourceMV2Zbar(mv_source_h mvSource, zbar::Image& zbarSource) } // LCOV_EXCL_STOP -int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource) -{ - MEDIA_VISION_INSTANCE_CHECK(mvSource); - - int depth = CV_8U; // Default depth. 1 byte for channel. - unsigned int channelsNumber = 0u; - unsigned int width = 0u, height = 0u; - unsigned int bufferSize = 0u; - unsigned char *buffer = NULL; - - mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; - - MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width), - "Failed to get the width."); - MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height), - "Failed to get the height."); - MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace), - "Failed to get the colorspace."); - MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize), - "Failed to get the buffer size."); - - int conversionType = -1; /* Type of conversion from given colorspace to gray */ - switch(colorspace) { - case MEDIA_VISION_COLORSPACE_Y800: - channelsNumber = 1; - /* Without convertion */ - break; - case MEDIA_VISION_COLORSPACE_I420: - channelsNumber = 1; - height *= 1.5; - conversionType = cv::COLOR_YUV2GRAY_I420; - break; - case MEDIA_VISION_COLORSPACE_NV12: - channelsNumber = 1; - height *= 1.5; - conversionType = cv::COLOR_YUV2GRAY_NV12; - break; - case MEDIA_VISION_COLORSPACE_YV12: - channelsNumber = 1; - height *= 1.5; - conversionType = cv::COLOR_YUV2GRAY_YV12; - break; - case MEDIA_VISION_COLORSPACE_NV21: - channelsNumber = 1; - height *= 1.5; - conversionType = cv::COLOR_YUV2GRAY_NV21; - break; - case MEDIA_VISION_COLORSPACE_YUYV: - channelsNumber = 2; - conversionType = cv::COLOR_YUV2GRAY_YUYV; - break; - case MEDIA_VISION_COLORSPACE_UYVY: - channelsNumber = 2; - conversionType = cv::COLOR_YUV2GRAY_UYVY; - break; - case MEDIA_VISION_COLORSPACE_422P: - channelsNumber = 2; - conversionType = cv::COLOR_YUV2GRAY_Y422; - break; - case MEDIA_VISION_COLORSPACE_RGB565: - channelsNumber = 2; - conversionType = cv::COLOR_BGR5652GRAY; - break; - case MEDIA_VISION_COLORSPACE_RGB888: - channelsNumber = 3; - conversionType = cv::COLOR_RGB2GRAY; - break; - case MEDIA_VISION_COLORSPACE_RGBA: - channelsNumber = 4; - conversionType = cv::COLOR_RGBA2GRAY; - break; - default: - LOGE("Error: mv_source has unsupported colorspace."); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - - if (conversionType == -1) {/* Without conversion */ - cvSource = cv::Mat(cv::Size(width, height), - CV_MAKETYPE(depth, channelsNumber), buffer).clone(); - } else {/* With conversion */ - /* Class for representation the given image as cv::Mat before conversion */ - cv::Mat origin; - - try { - origin = cv::Mat(cv::Size(width, height), - CV_MAKETYPE(depth, channelsNumber), buffer); - - cv::cvtColor(origin, cvSource, conversionType); - } catch (const cv::Exception &e) { - LOGE("Failed to cvtColor with %s", e.what()); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - } - - return MEDIA_VISION_ERROR_NONE; -} - } /* Barcode */ } /* MediaVision */ diff --git a/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp b/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp index fd5ef27..c2a7e83 100644 --- a/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp +++ b/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp @@ -24,6 +24,7 @@ #include #include #include +#include using namespace MediaVision::Barcode; @@ -145,7 +146,7 @@ int mv_barcode_detect_open( cv::Mat graySource; cv::Mat rotMat, rotBuffer; - err = convertSourceMV2GrayCV(source, graySource); + err = MediaVision::Common::convertSourceMV2GrayCV(source, graySource); if (err != MEDIA_VISION_ERROR_NONE) { LOGE("Failed to convertSourceMV2GrayCV[%d]", err); return err; diff --git a/mv_common/include/CommonUtils.h b/mv_common/include/CommonUtils.h new file mode 100644 index 0000000..9d81649 --- /dev/null +++ b/mv_common/include/CommonUtils.h @@ -0,0 +1,41 @@ +/** + * Copyright (c) 2022 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEDIA_VISION_COMMON_UTILS_H__ +#define __MEDIA_VISION_COMMON_UTILS_H__ + +#include +#include +#include + +namespace MediaVision { +namespace Common { + +/** + * @brief This function converts media vision image handle to cv::Mat with gray color. + * + * @since_tizen 6.5 + * @param [in] mvSource Media vision image handle + * @param [out] cv::Mat + * @return @c MEDIA_VISION_ERROR_NONE on success, + otherwise a negative error value + */ +int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource); + +} /* Common */ +} /* MediaVision */ + +#endif /* __MEDIA_VISION_COMMON_UTIL_H__ */ diff --git a/mv_common/src/CommonUtils.cpp b/mv_common/src/CommonUtils.cpp new file mode 100644 index 0000000..52e6bea --- /dev/null +++ b/mv_common/src/CommonUtils.cpp @@ -0,0 +1,121 @@ +/** + * Copyright (c) 2022 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +namespace MediaVision { +namespace Common { + + +int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource) +{ + MEDIA_VISION_INSTANCE_CHECK(mvSource); + + int depth = CV_8U; // Default depth. 1 byte for channel. + unsigned int channelsNumber = 0u; + unsigned int width = 0u, height = 0u; + unsigned int bufferSize = 0u; + unsigned char *buffer = NULL; + + mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; + + MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width), + "Failed to get the width."); + MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height), + "Failed to get the height."); + MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace), + "Failed to get the colorspace."); + MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize), + "Failed to get the buffer size."); + + int conversionType; + switch(colorspace) { + case MEDIA_VISION_COLORSPACE_Y800: + channelsNumber = 1; + conversionType = -1; /* Type of conversion from given colorspace to gray */ + /* Without convertion */ + break; + case MEDIA_VISION_COLORSPACE_I420: + channelsNumber = 1; + height *= 1.5; + conversionType = cv::COLOR_YUV2GRAY_I420; + break; + case MEDIA_VISION_COLORSPACE_NV12: + channelsNumber = 1; + height *= 1.5; + conversionType = cv::COLOR_YUV2GRAY_NV12; + break; + case MEDIA_VISION_COLORSPACE_YV12: + channelsNumber = 1; + height *= 1.5; + conversionType = cv::COLOR_YUV2GRAY_YV12; + break; + case MEDIA_VISION_COLORSPACE_NV21: + channelsNumber = 1; + height *= 1.5; + conversionType = cv::COLOR_YUV2GRAY_NV21; + break; + case MEDIA_VISION_COLORSPACE_YUYV: + channelsNumber = 2; + conversionType = cv::COLOR_YUV2GRAY_YUYV; + break; + case MEDIA_VISION_COLORSPACE_UYVY: + channelsNumber = 2; + conversionType = cv::COLOR_YUV2GRAY_UYVY; + break; + case MEDIA_VISION_COLORSPACE_422P: + channelsNumber = 2; + conversionType = cv::COLOR_YUV2GRAY_Y422; + break; + case MEDIA_VISION_COLORSPACE_RGB565: + channelsNumber = 2; + conversionType = cv::COLOR_BGR5652GRAY; + break; + case MEDIA_VISION_COLORSPACE_RGB888: + channelsNumber = 3; + conversionType = cv::COLOR_RGB2GRAY; + break; + case MEDIA_VISION_COLORSPACE_RGBA: + channelsNumber = 4; + conversionType = cv::COLOR_RGBA2GRAY; + break; + default: + LOGE("Error: mv_source has unsupported colorspace."); + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } + + if (conversionType == -1) {/* Without conversion */ + cvSource = cv::Mat(cv::Size(width, height), + CV_MAKETYPE(depth, channelsNumber), buffer).clone(); + } else {/* With conversion */ + /* Class for representation the given image as cv::Mat before conversion */ + try { + cv::Mat origin = cv::Mat(cv::Size(width, height), + CV_MAKETYPE(depth, channelsNumber), buffer); + + cv::cvtColor(origin, cvSource, conversionType); + } catch (const cv::Exception &e) { + LOGE("Failed to cvtColor with %s", e.what()); + return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; + } + } + + return MEDIA_VISION_ERROR_NONE; +} +} /* Common */ +} /* MediaVision */ diff --git a/mv_face/face/include/FaceUtil.h b/mv_face/face/include/FaceUtil.h index bd2cd63..0bbe521 100644 --- a/mv_face/face/include/FaceUtil.h +++ b/mv_face/face/include/FaceUtil.h @@ -54,16 +54,6 @@ struct RecognitionParams { the learning algorithm */ }; -/** - * @brief Converts mv_source_h to cv::Mat class with grayscale type. - * - * @since_tizen 3.0 - * @param [in] mvSource The handle to the image from Media Vision API. - * @param [out] cvSource The cv::Mat class, which will be filled. - * @return @c 0 on success, otherwise a negative error value - */ -int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource); - } /* Face */ } /* MediaVision */ diff --git a/mv_face/face/src/FaceUtil.cpp b/mv_face/face/src/FaceUtil.cpp index 31a2a9e..1d89744 100644 --- a/mv_face/face/src/FaceUtil.cpp +++ b/mv_face/face/src/FaceUtil.cpp @@ -35,97 +35,5 @@ RecognitionParams::RecognitionParams() : ; /* NULL */ } -int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource) -{ - MEDIA_VISION_INSTANCE_CHECK(mvSource); - - int depth = CV_8U; /* Default depth. 1 byte for channel. */ - unsigned int channelsNumber = 0; - unsigned int width = 0, height = 0; - unsigned int bufferSize = 0; - unsigned char *buffer = NULL; - - mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; - - MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width), - "Failed to get the width."); - MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height), - "Failed to get the height."); - MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace), - "Failed to get the colorspace."); - MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize), - "Failed to get the buffer size."); - - int conversionType = -1; // Type of conversion from given colorspace to gray - switch(colorspace) { - case MEDIA_VISION_COLORSPACE_INVALID: - LOGE("Error: mv_source has invalid colorspace."); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - case MEDIA_VISION_COLORSPACE_Y800: - channelsNumber = 1; - /* Without convertion */ - break; - case MEDIA_VISION_COLORSPACE_I420: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_I420; - break; - case MEDIA_VISION_COLORSPACE_NV12: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_NV12; - break; - case MEDIA_VISION_COLORSPACE_YV12: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_YV12; - break; - case MEDIA_VISION_COLORSPACE_NV21: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_NV21; - break; - case MEDIA_VISION_COLORSPACE_YUYV: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_YUYV; - break; - case MEDIA_VISION_COLORSPACE_UYVY: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_UYVY; - break; - case MEDIA_VISION_COLORSPACE_422P: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_Y422; - break; - case MEDIA_VISION_COLORSPACE_RGB565: - channelsNumber = 2; - conversionType = CV_BGR5652GRAY; - break; - case MEDIA_VISION_COLORSPACE_RGB888: - channelsNumber = 3; - conversionType = CV_RGB2GRAY; - break; - case MEDIA_VISION_COLORSPACE_RGBA: - channelsNumber = 4; - conversionType = CV_RGBA2GRAY; - break; - default: - LOGE("Error: mv_source has unsupported colorspace."); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - - if (conversionType == -1) {/* Without conversion */ - cvSource = cv::Mat(cv::Size(width, height), - CV_MAKETYPE(depth, channelsNumber), buffer).clone(); - } else {/* With conversion */ - /* Class for representation the given image as cv::Mat before conversion */ - cv::Mat origin(cv::Size(width, height), - CV_MAKETYPE(depth, channelsNumber), buffer); - cv::cvtColor(origin, cvSource, conversionType); - } - - return MEDIA_VISION_ERROR_NONE; -} - } /* Face */ } /* MediaVision */ diff --git a/mv_face/face/src/mv_face_open.cpp b/mv_face/face/src/mv_face_open.cpp index fb4496b..ce297d3 100644 --- a/mv_face/face/src/mv_face_open.cpp +++ b/mv_face/face/src/mv_face_open.cpp @@ -29,6 +29,8 @@ #include #include +#include + using namespace ::MediaVision::Face; static const RecognitionParams DEFAULT_RECOGNITION_PARAMS = RecognitionParams(); @@ -80,7 +82,7 @@ int mv_face_detect_open( { cv::Mat image; - int error = convertSourceMV2GrayCV(source, image); + int error = MediaVision::Common::convertSourceMV2GrayCV(source, image); if (error != MEDIA_VISION_ERROR_NONE) { LOGE("Convertion mv_source_h to gray failed"); return error; @@ -224,7 +226,7 @@ int mv_face_recognize_open( FaceRecognitionModel *pRecModel = static_cast(recognition_model); cv::Mat grayImage; - int ret = convertSourceMV2GrayCV(source, grayImage); + int ret = MediaVision::Common::convertSourceMV2GrayCV(source, grayImage); if (MEDIA_VISION_ERROR_NONE != ret) { LOGE("Convertion mv_source_h to gray failed"); @@ -318,7 +320,7 @@ int mv_face_track_open( static_cast(tracking_model); cv::Mat grayImage; - int ret = convertSourceMV2GrayCV(source, grayImage); + int ret = MediaVision::Common::convertSourceMV2GrayCV(source, grayImage); if (MEDIA_VISION_ERROR_NONE != ret) { LOGE("Convertion mv_source_h to gray failed"); @@ -377,7 +379,7 @@ int mv_face_eye_condition_recognize_open( { cv::Mat image; - int error = convertSourceMV2GrayCV(source, image); + int error = MediaVision::Common::convertSourceMV2GrayCV(source, image); if (error != MEDIA_VISION_ERROR_NONE) { LOGE("Convertion mv_source_h to gray failed"); return error; @@ -413,7 +415,7 @@ int mv_face_facial_expression_recognize_open( { cv::Mat image; - int error = convertSourceMV2GrayCV(source, image); + int error = MediaVision::Common::convertSourceMV2GrayCV(source, image); if (error != MEDIA_VISION_ERROR_NONE) { LOGE("Convertion mv_source_h to gray failed"); return error; @@ -594,7 +596,7 @@ int mv_face_recognition_model_add_open( static_cast(recognition_model); cv::Mat image; - int ret = convertSourceMV2GrayCV(source, image); + int ret = MediaVision::Common::convertSourceMV2GrayCV(source, image); if (MEDIA_VISION_ERROR_NONE != ret) { LOGE("Convertion mv_source_h to gray failed"); return ret; @@ -770,7 +772,7 @@ int mv_face_tracking_model_prepare_open( static_cast(tracking_model); cv::Mat image; - int ret = convertSourceMV2GrayCV(source, image); + int ret = MediaVision::Common::convertSourceMV2GrayCV(source, image); if (MEDIA_VISION_ERROR_NONE != ret) { LOGE("Convertion mv_source_h to gray failed"); return ret; diff --git a/mv_image/image/src/mv_image_open.cpp b/mv_image/image/src/mv_image_open.cpp index 25d19b2..3ba7dd4 100644 --- a/mv_image/image/src/mv_image_open.cpp +++ b/mv_image/image/src/mv_image_open.cpp @@ -25,7 +25,7 @@ #include "Recognition/ImageObject.h" #include "Recognition/ImageRecognizer.h" #include "Tracking/ImageTrackingModel.h" - +#include #include #include @@ -299,109 +299,6 @@ void extractTrackingParams( mv_destroy_engine_config(working_cfg); } -int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource) -{ - MEDIA_VISION_INSTANCE_CHECK(mvSource); - - int depth = CV_8U; // Default depth. 1 byte for channel. - unsigned int channelsNumber = 0u; - unsigned int width = 0u, height = 0u; - unsigned int bufferSize = 0u; - unsigned char *buffer = NULL; - - mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; - - MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width), - "Failed to get the width."); - MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height), - "Failed to get the height."); - MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace), - "Failed to get the colorspace."); - MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize), - "Failed to get the buffer size."); - - int conversionType = -1; /* Type of conversion from given colorspace to gray */ - switch(colorspace) { - case MEDIA_VISION_COLORSPACE_INVALID: - LOGE("Error: mv_source has invalid colorspace."); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - case MEDIA_VISION_COLORSPACE_Y800: - channelsNumber = 1; - /* Without convertion */ - break; - case MEDIA_VISION_COLORSPACE_I420: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_I420; - break; - case MEDIA_VISION_COLORSPACE_NV12: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_NV12; - break; - case MEDIA_VISION_COLORSPACE_YV12: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_YV12; - break; - case MEDIA_VISION_COLORSPACE_NV21: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_NV21; - break; - case MEDIA_VISION_COLORSPACE_YUYV: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_YUYV; - break; - case MEDIA_VISION_COLORSPACE_UYVY: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_UYVY; - break; - case MEDIA_VISION_COLORSPACE_422P: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_Y422; - break; - case MEDIA_VISION_COLORSPACE_RGB565: - channelsNumber = 2; - conversionType = CV_BGR5652GRAY; - break; - case MEDIA_VISION_COLORSPACE_RGB888: - channelsNumber = 3; - conversionType = CV_RGB2GRAY; - break; - case MEDIA_VISION_COLORSPACE_RGBA: - channelsNumber = 4; - conversionType = CV_RGBA2GRAY; - break; - default: - LOGE("Error: mv_source has unsupported colorspace."); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - - if (conversionType == -1) {/* Without conversion */ - cvSource = cv::Mat(cv::Size(width, height), - CV_MAKETYPE(depth, channelsNumber), buffer).clone(); - } else {/* With conversion */ - /* Class for representation the given image as cv::Mat before conversion */ - cv::Mat origin; - - try { - origin = cv::Mat(cv::Size(width, height), - CV_MAKETYPE(depth, channelsNumber), buffer); - } catch (cv::Exception &e) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - - try { - cv::cvtColor(origin, cvSource, conversionType); - } catch (cv::Exception &e) { - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - } - - return MEDIA_VISION_ERROR_NONE; -} - } /* anonymous namespace */ int mv_image_recognize_open( @@ -420,7 +317,7 @@ int mv_image_recognize_open( cv::Mat scene; MEDIA_VISION_ASSERT( - convertSourceMV2GrayCV(source, scene), + MediaVision::Common::convertSourceMV2GrayCV(source, scene), "Failed to convert mv_source."); int ret = MEDIA_VISION_ERROR_NONE; @@ -504,7 +401,7 @@ int mv_image_track_open( cv::Mat frame; MEDIA_VISION_ASSERT( - convertSourceMV2GrayCV(source, frame), + MediaVision::Common::convertSourceMV2GrayCV(source, frame), "Failed to convert mv_source."); MediaVision::Image::ImageTrackingModel *trackingModel = @@ -563,7 +460,7 @@ int mv_image_object_fill_open( cv::Mat image; MEDIA_VISION_ASSERT( - convertSourceMV2GrayCV(source, image), + MediaVision::Common::convertSourceMV2GrayCV(source, image), "Failed to convert mv_source."); std::vector roi; diff --git a/mv_surveillance/surveillance/include/SurveillanceHelper.h b/mv_surveillance/surveillance/include/SurveillanceHelper.h index 9e0de18..62fb5b7 100644 --- a/mv_surveillance/surveillance/include/SurveillanceHelper.h +++ b/mv_surveillance/surveillance/include/SurveillanceHelper.h @@ -38,15 +38,6 @@ namespace surveillance { */ class SurveillanceHelper { public: - /** - * @brief Converts mediavision source to cv::Mat in gray scale. - * - * @since_tizen 3.0 - * @param [in] mvSource The input media source handle - * @param [out] cvSource The outut matrix with gray scaled image - * @return @c 0 on success, otherwise a negative error value - */ - static int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource); #ifdef ENABLE_NEON /** diff --git a/mv_surveillance/surveillance/src/EventManager.cpp b/mv_surveillance/surveillance/src/EventManager.cpp index b5d186c..d6879f4 100644 --- a/mv_surveillance/surveillance/src/EventManager.cpp +++ b/mv_surveillance/surveillance/src/EventManager.cpp @@ -23,6 +23,7 @@ #include "EventTriggerMovementDetection.h" #include "mv_private.h" +#include namespace mediavision { namespace surveillance { @@ -300,7 +301,7 @@ int EventManager::pushSource(mv_source_h source, int videoStreamId) error = SurveillanceHelper::convertSourceMVRGB2GrayCVNeon(source, grayImage); else #endif /* ENABLE_NEON */ - error = SurveillanceHelper::convertSourceMV2GrayCV(source, grayImage); + error = MediaVision::Common::convertSourceMV2GrayCV(source, grayImage); if (error != MEDIA_VISION_ERROR_NONE || grayImage.empty()) { LOGE("Media source conversion failed."); diff --git a/mv_surveillance/surveillance/src/SurveillanceHelper.cpp b/mv_surveillance/surveillance/src/SurveillanceHelper.cpp index e971071..98e2360 100644 --- a/mv_surveillance/surveillance/src/SurveillanceHelper.cpp +++ b/mv_surveillance/surveillance/src/SurveillanceHelper.cpp @@ -25,98 +25,6 @@ namespace mediavision { namespace surveillance { -int SurveillanceHelper::convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource) -{ - MEDIA_VISION_INSTANCE_CHECK(mvSource); - - int depth = CV_8U; /* Default depth. 1 byte per channel. */ - unsigned int channelsNumber = 0; - unsigned int width = 0, height = 0; - unsigned int bufferSize = 0; - unsigned char *buffer = NULL; - - mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID; - - MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width), - "Failed to get the width."); - MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height), - "Failed to get the height."); - MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace), - "Failed to get the colorspace."); - MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize), - "Failed to get the buffer size."); - - int conversionType = -1; /* Type of conversion from given colorspace to gray */ - switch(colorspace) { - case MEDIA_VISION_COLORSPACE_INVALID: - LOGE("Error: mv_source has invalid colorspace."); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - case MEDIA_VISION_COLORSPACE_Y800: - channelsNumber = 1; - /* Without convertion */ - break; - case MEDIA_VISION_COLORSPACE_I420: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_I420; - break; - case MEDIA_VISION_COLORSPACE_NV12: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_NV12; - break; - case MEDIA_VISION_COLORSPACE_YV12: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_YV12; - break; - case MEDIA_VISION_COLORSPACE_NV21: - channelsNumber = 1; - height *= 1.5; - conversionType = CV_YUV2GRAY_NV21; - break; - case MEDIA_VISION_COLORSPACE_YUYV: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_YUYV; - break; - case MEDIA_VISION_COLORSPACE_UYVY: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_UYVY; - break; - case MEDIA_VISION_COLORSPACE_422P: - channelsNumber = 2; - conversionType = CV_YUV2GRAY_Y422; - break; - case MEDIA_VISION_COLORSPACE_RGB565: - channelsNumber = 2; - conversionType = CV_BGR5652GRAY; - break; - case MEDIA_VISION_COLORSPACE_RGB888: - channelsNumber = 3; - conversionType = CV_RGB2GRAY; - break; - case MEDIA_VISION_COLORSPACE_RGBA: - channelsNumber = 4; - conversionType = CV_RGBA2GRAY; - break; - default: - LOGE("Error: mv_source has unsupported colorspace."); - return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; - } - - if (conversionType == -1) { /* Without conversion */ - cvSource = cv::Mat(cv::Size(width, height), - CV_MAKETYPE(depth, channelsNumber), buffer).clone(); - } else { /* Conversion */ - /* Class for representation the given image as cv::Mat before conversion */ - cv::Mat origin(cv::Size(width, height), - CV_MAKETYPE(depth, channelsNumber), buffer); - cv::cvtColor(origin, cvSource, conversionType); - } - - return MEDIA_VISION_ERROR_NONE; -} - #ifdef ENABLE_NEON int SurveillanceHelper::convertSourceMVRGB2GrayCVNeon( mv_source_h mvSource, -- 2.7.4 From ae2cf9ed5ab42dc81206f36fdc390b1ff11ea127 Mon Sep 17 00:00:00 2001 From: Kwanghoon Son Date: Thu, 14 Jul 2022 01:38:05 -0400 Subject: [PATCH 04/16] mv_common: Add buffer size check [Issue type] : Bug fix Change-Id: Iee755ff4d2871a5c1a809b3ec4eed0a73555008c Reported-by: TSEVEN-2231 Signed-off-by: Kwanghoon Son --- mv_common/src/CommonUtils.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mv_common/src/CommonUtils.cpp b/mv_common/src/CommonUtils.cpp index 52e6bea..4b21714 100644 --- a/mv_common/src/CommonUtils.cpp +++ b/mv_common/src/CommonUtils.cpp @@ -99,6 +99,12 @@ int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource) return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; } + if (bufferSize < width * height * channelsNumber) { + LOGE("bufferSize : %u is too small for image w: %u, h: %u, c: %u", + bufferSize, width, height, channelsNumber); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + if (conversionType == -1) {/* Without conversion */ cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(depth, channelsNumber), buffer).clone(); -- 2.7.4 From 3d67a1fe7429275c7efb89d41d0295babb52d3cb Mon Sep 17 00:00:00 2001 From: Hyunsoo Park Date: Mon, 4 Jul 2022 18:19:11 +0900 Subject: [PATCH 05/16] mv_roi_tracker: move logic to class subfuntion [Issue Type] Code Refactorying - Make logics class subfunction which is possible. Change-Id: I10c934756dd7483db0ff1e20ec0efd8662c17e6c Signed-off-by: Hyunsoo Park --- include/mv_roi_tracker.h | 106 ++++++++++++------- include/mv_roi_tracker_type.h | 13 +-- media-vision-config.json | 5 + mv_roi_tracker/roi_tracker/include/ROITracker.h | 14 ++- .../roi_tracker/include/mv_roi_tracker_open.h | 82 ++++++--------- mv_roi_tracker/roi_tracker/src/ROITracker.cpp | 116 +++++++++++++++++---- mv_roi_tracker/roi_tracker/src/mv_roi_tracker.c | 55 ++++------ .../roi_tracker/src/mv_roi_tracker_open.cpp | 76 +++++--------- test/testsuites/tracker/test_tracker.cpp | 41 ++++++-- 9 files changed, 290 insertions(+), 218 deletions(-) diff --git a/include/mv_roi_tracker.h b/include/mv_roi_tracker.h index 998f7b6..6a90bd5 100644 --- a/include/mv_roi_tracker.h +++ b/include/mv_roi_tracker.h @@ -35,6 +35,22 @@ extern "C" { */ /** + * @brief Defines #MV_ROI_TRACKER_TYPE to set the type used + * for tracker type attribute of the engine configuration. + * @details Switches between SPEED, BALANCED, or ACCURACY\n + * #MV_ROI_TRACKER_TYPE_ACCURACY,\n + * #MV_ROI_TRACKER_TYPE_BALANCE,\n + * #MV_ROI_TRACKER_TYPE_SPEED.\n + * + * The default type is MV_ROI_TRACKER_TYPE_BALANCE. + * + * @since_tizen 7.0 + * @see mv_engine_config_set_int_attribute() + * @see mv_engine_config_get_int_attribute() + */ +#define MV_ROI_TRACKER_TYPE "MV_ROI_TRACKER_TYPE" + +/** * @brief Creates tracker handle. * @details Use this function to create a tracker handle. * @@ -70,41 +86,68 @@ int mv_roi_tracker_create(mv_roi_tracker_h *handle); int mv_roi_tracker_destroy(mv_roi_tracker_h handle); /** - * @brief Sets initial ROI coordinates to tracker object handle. + * @brief Configures the attributes of the roi tracker. + * @details Use this function to configure the attributes of the roi tracker + * which is set to @a engine_config. * * @since_tizen 7.0 * - * @param[in] handle The tracker handle to set coordinates. - * @param[in] x The x coordinate to set initial ROI to be tracked - * @param[in] y The y coordinate to set initial ROI to be tracked - * @param[in] width The width to set initial ROI to be tracked - * @param[in] height The height to set initial ROI to be tracked + * @param[in] handle The handle to the roi tracker + * @param[in] engine_config The handle to the configuration of + * engine. * * @return @c 0 on success, otherwise a negative error value * @retval #MEDIA_VISION_ERROR_NONE Successful * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter - * - * @see mv_roi_tracker_create() + * in @a engine_config */ -int mv_roi_tracker_set_coordinate(mv_roi_tracker_h handle, int x, int y, int width, int height); +int mv_roi_tracker_configure(mv_roi_tracker_h handle, + mv_engine_config_h engine_config); /** - * @brief Sets tracker type to ROI tracker object handle. + * @brief Prepares roi tracker. + * @details Use this function to prepare roi tracker based on + * the configurtion. * * @since_tizen 7.0 * - * @param[in] handle The tracker handle to set tracker type. - * @param[in] type The tracker type option to be set - * + * @param[in] handle The handle to the roi tracker + * @param[in] x The x coordinate to set ROI to be tracked + * @param[in] y The y coordinate to set ROI to be tracked + * @param[in] width The width to set ROI to be tracked + * @param[in] height The height to set ROI to be tracked * @return @c 0 on success, otherwise a negative error value * @retval #MEDIA_VISION_ERROR_NONE Successful * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter + */ +int mv_roi_tracker_prepare(mv_roi_tracker_h handle, int x, int y, int width, int height); + +/** + * @brief Called when roi in @a source are detected. + * @details This type callback is invoked each time when + * mv_roi_tracker_perform() is called to provide + * the results of the tracked roi. + * + * @since_tizen 7.0 + * @remarks The @a roi should not be released by app. They can be used only in the callback. + * + * @param[in] source The handle to the source of the media where + * roi tracker were performed. @a source is the same object + * for which mv_roi_tracker_perform() was called. + * It should be released by calling mv_destroy_source() + * when it's not needed anymore. + * @param[in] roi Roi of the tracked result. + * @param[in] user_data The user data passed from callback invoking code * - * @see mv_roi_tracker_create() + * @see mv_roi_tracker_perform() */ -int mv_roi_tracker_set_tracker_type(mv_roi_tracker_h handle, mv_roi_tracker_type_e type); +typedef void (*mv_roi_tracker_tracked_cb)( + mv_source_h source, + mv_rectangle_s roi, + void *user_data); + /** * @brief Tracks with a given tracker on the @a source. @@ -115,7 +158,10 @@ int mv_roi_tracker_set_tracker_type(mv_roi_tracker_h handle, mv_roi_tracker_type * * @param[in] handle The handle to the tracker object. * @param[in] source The handle to the source of the media. - * + * @param[in] tracked_cb The callback which will receive the tracked results. + * @param[in] user_data The user data passed from the code where + * mv_roi_tracker_perform() is invoked. + * This data will be accessible in @a tracked_cb callback. * @return @c 0 on success, otherwise a negative error value * @retval #MEDIA_VISION_ERROR_NONE Successful * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported @@ -127,31 +173,13 @@ int mv_roi_tracker_set_tracker_type(mv_roi_tracker_h handle, mv_roi_tracker_type * * @pre Create a new tracker handle by calling @ref mv_roi_tracker_create() */ -int mv_roi_tracker_perform(mv_roi_tracker_h handle, mv_source_h source); +int mv_roi_tracker_perform( + mv_roi_tracker_h handle, + mv_source_h source, + mv_roi_tracker_tracked_cb tracked_cb, + void *user_data); /** - * @brief Gets ROI result - * @details Use this function to get ROI result after calling mv_roi_tracker_perform function. - * This function returns a proper ROI coordinates and size result of the tracked region. - * - * @since_tizen 7.0 - * - * @param[in] handle The handle to the tracker object. - * @param[out] x The x coordinate of ROI result. - * @param[out] y The y coordinate of ROI result. - * @param[out] width The width of ROI result. - * @param[out] height The height of ROI result. - * - * @return @c 0 on success, otherwise a negative error value - * @retval #MEDIA_VISION_ERROR_NONE Successful - * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported - * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter - * - * @pre Perform tracking action by calling @ref mv_roi_tracker_perform() - */ -int mv_roi_tracker_get_result(mv_roi_tracker_h handle, - int *x, int *y, int *width, int *height); -/** * @} */ diff --git a/include/mv_roi_tracker_type.h b/include/mv_roi_tracker_type.h index 1f01b75..f49812c 100644 --- a/include/mv_roi_tracker_type.h +++ b/include/mv_roi_tracker_type.h @@ -44,10 +44,11 @@ extern "C" { * @since_tizen 7.0 */ typedef struct { - int x; /**< Left-top x coordinate of tracked region */ - int y; /**< Left-top y coordinate of tracked region */ - int width; /**< Width of tracked region */ - int height; /**< Height of tracked region */ + int x; /**< Left-top x coordinate of tracked region */ + int y; /**< Left-top y coordinate of tracked region */ + int width; /**< Width of tracked region */ + int height; /**< Height of tracked region */ + bool initialized; /**< flag that struct is initialized or not */ } mv_roi_tracker_result_s; /** @@ -58,9 +59,9 @@ typedef void *mv_roi_tracker_h; typedef enum { MV_ROI_TRACKER_TYPE_NONE = 0, /**< None */ - MV_ROI_TRACKER_TYPE_SPEED, /**< Tracker type focused on speed */ + MV_ROI_TRACKER_TYPE_ACCURACY, /**< Tracker type focused on accuracy */ MV_ROI_TRACKER_TYPE_BALANCE, /**< Tracker type focused on balance */ - MV_ROI_TRACKER_TYPE_ACCURACY /**< Tracker type focused on accuracy */ + MV_ROI_TRACKER_TYPE_SPEED /**< Tracker type focused on speed */ } mv_roi_tracker_type_e; /** * @} diff --git a/media-vision-config.json b/media-vision-config.json index 02cea2b..b12a490 100644 --- a/media-vision-config.json +++ b/media-vision-config.json @@ -262,6 +262,11 @@ "name" : "MV_FACE_RECOGNITION_DECISION_THRESHOLD", "type" : "double", "value" : -0.85 + }, + { + "name" : "MV_ROI_TRACKER_TYPE", + "type" : "integer", + "value" : 2 } ] } diff --git a/mv_roi_tracker/roi_tracker/include/ROITracker.h b/mv_roi_tracker/roi_tracker/include/ROITracker.h index 377b03d..844c37c 100644 --- a/mv_roi_tracker/roi_tracker/include/ROITracker.h +++ b/mv_roi_tracker/roi_tracker/include/ROITracker.h @@ -32,11 +32,17 @@ public: ROITracker(); ~ ROITracker() = default; - mv_roi_tracker_result_s Initialize(cv::Mat &frame); - mv_roi_tracker_result_s Update(cv::Mat &frame); + void perform(cv::Mat frame); + void getResult(int *x, int *y, int *width, int *height); + bool initialized(); + int setConfiguration(mv_engine_config_h engine_config); + int setRoi(int x, int y, int width, int height); +private: + void initialize(cv::Mat &frame); + void update(cv::Mat &frame); + cv::Ptr cvTracker; cv::Rect boundingBox; - bool initialized; mv_roi_tracker_type_e type; mv_roi_tracker_result_s result; }; @@ -44,4 +50,4 @@ public: } /* ROITracker */ } /* MediaVision */ -#endif /* __ROI_TRACKER_H__ */ \ No newline at end of file +#endif /* __ROI_TRACKER_H__ */ diff --git a/mv_roi_tracker/roi_tracker/include/mv_roi_tracker_open.h b/mv_roi_tracker/roi_tracker/include/mv_roi_tracker_open.h index 8821c0e..3100f56 100644 --- a/mv_roi_tracker/roi_tracker/include/mv_roi_tracker_open.h +++ b/mv_roi_tracker/roi_tracker/include/mv_roi_tracker_open.h @@ -31,7 +31,6 @@ extern "C" * @brief Create tracker object handle. * @details Use this function to create an tracker object handle. * After creation the handle has to be performed with - * @ref mv_roi_tracker_set_coordinate_open() function and * @ref mv_roi_tracker_perform_open() function to perform * an tracker object. * @@ -69,47 +68,44 @@ int mv_roi_tracker_create_open(mv_roi_tracker_h *handle); int mv_roi_tracker_destroy_open(mv_roi_tracker_h handle); /** - * @brief Set initial roi coordinates to roi tracker object handle. - * @details Use this function to set coordinates want to track to an tracker object handle. - * After setting coordinates, handle has to be performed with - * @ref mv_roi_tracker_perform_open() function to perform - * an tracker object. + * @brief Configure attributes to the roi tracker handle * * @since_tizen 7.0 * - * @param [in] handle The handle to the tracker object to be created - * @param [in] x The x coordiante to set initial ROI to be tracked - * @param [in] y The y coordiante to set initial ROI to be tracked - * @param [in] width The width to set initial ROI to be tracked - * @param [in] height The height to set initial ROI to be tracked + * @param [in] handle The handle to the roi tracker + * @param [in] engine_config The handle to the configuration of + * engine. + * * @return @c 0 on success, otherwise a negative error value * @retval #MEDIA_VISION_ERROR_NONE Successful * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter - * @pre Create an tracker handle by calling @ref mv_roi_tracker_create_open() - * - * @see mv_roi_tracker_create_open() + * in @a engine_config + * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data + * in @a engine_config + * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data + * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory + * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported */ -int mv_roi_tracker_set_coordinate_open(mv_roi_tracker_h handle, int x, int y, int width, int height); +int mv_roi_tracker_configure_engine_open(mv_roi_tracker_h handle, + mv_engine_config_h engine_config); /** - * @brief Sets default tracker to roi tracker object handle. - * @details Use this function to set tracker want to set to an tracker object handle. - * After setting tracker, handle has to be performed with - * @ref mv_roi_tracker_perform_open() function to perform - * an tracker object. + * @brief Prepare roi tracker. + * @details Use this function to prepare roi tracker based on + * the configured attributes. * * @since_tizen 7.0 * - * @param [in] handle The handle to the tracker object - * @param [in] type The tracker to set default tracker to be set + * @param [in] handle The handle to the roi tracker + * @param[in] x The x coordinate to set ROI to be tracked + * @param[in] y The y coordinate to set ROI to be tracked + * @param[in] width The width to set ROI to be tracked + * @param[in] height The height to set ROI to be tracked * @return @c 0 on success, otherwise a negative error value * @retval #MEDIA_VISION_ERROR_NONE Successful * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter - * @pre Create an tracker handle by calling @ref mv_roi_tracker_create_open() - * - * @see mv_roi_tracker_create_open() */ -int mv_roi_tracker_set_tracker_type_open(mv_roi_tracker_h handle, mv_roi_tracker_type_e type); +int mv_roi_tracker_prepare_open(mv_roi_tracker_h handle, int x, int y, int width, int height); /** * @brief Track with a given roi on the @a source. @@ -118,9 +114,14 @@ int mv_roi_tracker_set_tracker_type_open(mv_roi_tracker_h handle, mv_roi_tracker * * @since_tizen 7.0 * - * @param [in] handle The handle to the tracker object. - * @param [in] source The handle to the source of the media. - * + * @param [in] handle The handle to the tracker object. + * @param [in] source The handle to the source of the media. + * @param [in] tracked_cb The callback which will be called for + * tracked roi on media source. + * This callback will receive the tracked roi. + * @param [in] user_data The user data passed from the code where + * @ref mv_roi_tracker_perform_open() is invoked. This data will + * be accessible from @a tracked_cb callback. * @return @c 0 on success, otherwise a negative error value * @retval #MEDIA_VISION_ERROR_NONE Successful * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter @@ -132,29 +133,8 @@ int mv_roi_tracker_set_tracker_type_open(mv_roi_tracker_h handle, mv_roi_tracker * @pre Create a source handle by calling @ref mv_create_source() * @pre Create an tracker handle by calling @ref mv_roi_tracker_create_open() */ -int mv_roi_tracker_perform_open(mv_roi_tracker_h handle, mv_source_h source); +int mv_roi_tracker_perform_open(mv_roi_tracker_h handle, mv_source_h source, mv_roi_tracker_tracked_cb tracked_cb, void *user_data); -/** - * @brief Gets ROI result - * @details Use this function to get ROI result after calling mv_roi_tracker_perform function. - * This function returns a proper ROI coordinates and size result of the tracked region. - * - * @since_tizen 7.0 - * - * @param[in] handle The handle to the tracker object. - * @param[out] x The x coordinate to set initial ROI to be tracked - * @param[out] y The y coordinate to set initial ROI to be tracked - * @param[out] width The width to set initial ROI to be tracked - * @param[out] height The height to set initial ROI to be tracked - * - * @return @c 0 on success, otherwise a negative error value - * @retval #MEDIA_VISION_ERROR_NONE Successful - * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter - * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation - * - * @pre Perform tracking action by calling @ref mv_roi_tracker_perform_open() - */ -int mv_roi_tracker_get_result_open(mv_roi_tracker_h handle, int *x, int *y, int *width, int *height); #ifdef __cplusplus } #endif /* __cplusplus */ diff --git a/mv_roi_tracker/roi_tracker/src/ROITracker.cpp b/mv_roi_tracker/roi_tracker/src/ROITracker.cpp index 24ccd68..86c3c2b 100644 --- a/mv_roi_tracker/roi_tracker/src/ROITracker.cpp +++ b/mv_roi_tracker/roi_tracker/src/ROITracker.cpp @@ -39,14 +39,21 @@ using namespace std; namespace MediaVision { namespace ROITracker { + + ROITracker::ROITracker() - : initialized(false), - type(MV_ROI_TRACKER_TYPE_BALANCE), - result() + : type(MV_ROI_TRACKER_TYPE_BALANCE) { + result = (mv_roi_tracker_result_s) { + .x = -1, + .y = -1, + .width = -1, + .height = -1, + .initialized = false + }; } -mv_roi_tracker_result_s ROITracker::Initialize(cv::Mat &frame) +void ROITracker::initialize(cv::Mat &frame) { LOGI("ENTER"); @@ -57,42 +64,105 @@ mv_roi_tracker_result_s ROITracker::Initialize(cv::Mat &frame) boundingBox.width, boundingBox.height); + if (cvTracker) { + LOGE("cvTracker already exists. 'mv_roi_tracker_destroy' should be called for removing cvTracker."); + throw std::runtime_error("tracker Initialize failed."); + } + cvTracker = createTrackerByName(type); cvTracker->init(frame, boundingBox); - initialized = true; - LOGI("LEAVE"); - return mv_roi_tracker_result_s { - boundingBox.x, - boundingBox.y, - boundingBox.width, - boundingBox.height + result = (mv_roi_tracker_result_s) { + .x = boundingBox.x, + .y = boundingBox.y, + .width = boundingBox.width, + .height = boundingBox.height, + .initialized = true }; + LOGI("LEAVE"); } -mv_roi_tracker_result_s ROITracker::Update(cv::Mat &frame) +void ROITracker::update(cv::Mat &frame) { LOGI("ENTER"); //updates the tracker if (cvTracker->update(frame, boundingBox)) { + result = (mv_roi_tracker_result_s) { + .x = boundingBox.x, + .y = boundingBox.y, + .width = boundingBox.width, + .height = boundingBox.height, + .initialized = true + }; LOGD(" Updated: x: %d, y: %d, w: %d, h: %d", - boundingBox.x, - boundingBox.y, - boundingBox.width, - boundingBox.height); + result.x, + result.y, + result.width, + result.height); + } else { LOGE("update failed."); + result.initialized = false; throw std::runtime_error("tracker update failed."); } LOGI("LEAVE"); - return mv_roi_tracker_result_s { - boundingBox.x, - boundingBox.y, - boundingBox.width, - boundingBox.height - }; +} + +void ROITracker::perform(cv::Mat frame) +{ + if (!result.initialized) + initialize(frame); + else + update(frame); +} + +void ROITracker::getResult(int *x, int *y, int *width, int *height) +{ + if (!result.initialized) + throw std::runtime_error("tracker getResult failed."); + + *x = result.x; + *y = result.y; + *width = result.width; + *height = result.height; +} + +int ROITracker::setConfiguration(mv_engine_config_h engine_config) +{ + int tracker_type = 0; + int ret = mv_engine_config_get_int_attribute( + engine_config, MV_ROI_TRACKER_TYPE, &tracker_type); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Fail to get roi tracker type"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + type = static_cast(tracker_type); + LOGD("tracker_type: [%d] is set.", type); + + return MEDIA_VISION_ERROR_NONE; +} + +int ROITracker::setRoi(int x, int y, int width, int height) +{ + boundingBox = { x, y, width, height }; + + LOGD("ROI : x:%d, y:%d, w:%d, h:%d is set.", + boundingBox.x, + boundingBox.y, + boundingBox.width, + boundingBox.height); + + LOGD("LEAVE"); + + return MEDIA_VISION_ERROR_NONE; +} + +bool ROITracker::initialized() +{ + return result.initialized; } } /* ROITracker */ -} /* MediaVision */ \ No newline at end of file +} /* MediaVision */ diff --git a/mv_roi_tracker/roi_tracker/src/mv_roi_tracker.c b/mv_roi_tracker/roi_tracker/src/mv_roi_tracker.c index 3d6e66a..6a5b00e 100644 --- a/mv_roi_tracker/roi_tracker/src/mv_roi_tracker.c +++ b/mv_roi_tracker/roi_tracker/src/mv_roi_tracker.c @@ -50,70 +50,59 @@ int mv_roi_tracker_destroy(mv_roi_tracker_h handle) return ret; } -int mv_roi_tracker_set_coordinate(mv_roi_tracker_h handle, int x, int y, int width, int height) +int mv_roi_tracker_configure(mv_roi_tracker_h handle, + mv_engine_config_h engine_config) { MEDIA_VISION_SUPPORT_CHECK( - __mv_roi_tracking_check_system_info_feature_supported()); - + __mv_roi_tracking_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(handle); + MEDIA_VISION_INSTANCE_CHECK(engine_config); MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_roi_tracker_set_coordinate_open(handle, x, y, width, height); - - MEDIA_VISION_FUNCTION_LEAVE(); - - return ret; -} - -int mv_roi_tracker_set_tracker_type(mv_roi_tracker_h handle, mv_roi_tracker_type_e type) -{ - MEDIA_VISION_SUPPORT_CHECK( - __mv_roi_tracking_check_system_info_feature_supported()); - - MEDIA_VISION_INSTANCE_CHECK(handle); - - MEDIA_VISION_FUNCTION_ENTER(); + int ret = MEDIA_VISION_ERROR_NONE; - int ret = mv_roi_tracker_set_tracker_type_open(handle, type); + ret = mv_roi_tracker_configure_engine_open(handle, engine_config); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Fail to configure engine and target"); + return ret; + } MEDIA_VISION_FUNCTION_LEAVE(); - return ret; } -int mv_roi_tracker_perform(mv_roi_tracker_h handle, mv_source_h source) +int mv_roi_tracker_prepare(mv_roi_tracker_h handle, int x, int y, int width, int height) { MEDIA_VISION_SUPPORT_CHECK( - __mv_roi_tracking_check_system_info_feature_supported()); - + __mv_roi_tracking_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(handle); - MEDIA_VISION_INSTANCE_CHECK(source); + MEDIA_VISION_NULL_ARG_CHECK(x); + MEDIA_VISION_NULL_ARG_CHECK(y); + MEDIA_VISION_NULL_ARG_CHECK(width); + MEDIA_VISION_NULL_ARG_CHECK(height); MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_roi_tracker_perform_open(handle, source); + int ret = MEDIA_VISION_ERROR_NONE; - MEDIA_VISION_FUNCTION_LEAVE(); + ret = mv_roi_tracker_prepare_open(handle, x, y, width, height); + MEDIA_VISION_FUNCTION_LEAVE(); return ret; } -int mv_roi_tracker_get_result(mv_roi_tracker_h handle, - int *x, int *y, int *width, int *height) +int mv_roi_tracker_perform(mv_roi_tracker_h handle, mv_source_h source, mv_roi_tracker_tracked_cb tracked_cb, void *user_data) { MEDIA_VISION_SUPPORT_CHECK( __mv_roi_tracking_check_system_info_feature_supported()); MEDIA_VISION_INSTANCE_CHECK(handle); - MEDIA_VISION_NULL_ARG_CHECK(x); - MEDIA_VISION_NULL_ARG_CHECK(y); - MEDIA_VISION_NULL_ARG_CHECK(width); - MEDIA_VISION_NULL_ARG_CHECK(height); + MEDIA_VISION_INSTANCE_CHECK(source); MEDIA_VISION_FUNCTION_ENTER(); - int ret = mv_roi_tracker_get_result_open(handle, x, y, width, height); + int ret = mv_roi_tracker_perform_open(handle, source, tracked_cb, user_data); MEDIA_VISION_FUNCTION_LEAVE(); diff --git a/mv_roi_tracker/roi_tracker/src/mv_roi_tracker_open.cpp b/mv_roi_tracker/roi_tracker/src/mv_roi_tracker_open.cpp index 03cf733..7cecb8d 100644 --- a/mv_roi_tracker/roi_tracker/src/mv_roi_tracker_open.cpp +++ b/mv_roi_tracker/roi_tracker/src/mv_roi_tracker_open.cpp @@ -77,15 +77,15 @@ int mv_roi_tracker_destroy_open(mv_roi_tracker_h handle) LOGD("Destroying tracker handle [%p]", handle); delete static_cast(handle); - LOGD("Tracker handle has been destroyed"); LOGD("LEAVE"); return MEDIA_VISION_ERROR_NONE; } -int mv_roi_tracker_set_coordinate_open(mv_roi_tracker_h handle, int x, int y, int width, int height) +int mv_roi_tracker_configure_engine_open(mv_roi_tracker_h handle, + mv_engine_config_h engine_config) { - LOGD("ENTER"); + LOGI("ENTER"); if (!handle) { LOGE("Handle is NULL."); @@ -94,20 +94,19 @@ int mv_roi_tracker_set_coordinate_open(mv_roi_tracker_h handle, int x, int y, in auto pTracker = static_cast(handle); - pTracker->boundingBox = { x, y, width, height }; - LOGD("Init pos : x:%d, y:%d, w:%d, h:%d is set.", - pTracker->boundingBox.x, - pTracker->boundingBox.y, - pTracker->boundingBox.width, - pTracker->boundingBox.height); + int ret = pTracker->setConfiguration(engine_config); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Fail to set configuration"); + return ret; + } + LOGI("LEAVE"); - LOGD("LEAVE"); return MEDIA_VISION_ERROR_NONE; } -int mv_roi_tracker_set_tracker_type_open(mv_roi_tracker_h handle, mv_roi_tracker_type_e type) +int mv_roi_tracker_prepare_open(mv_roi_tracker_h handle, int x, int y, int width, int height) { - LOGD("ENTER"); + LOGI("ENTER"); if (!handle) { LOGE("Handle is NULL."); @@ -115,18 +114,18 @@ int mv_roi_tracker_set_tracker_type_open(mv_roi_tracker_h handle, mv_roi_tracker } auto pTracker = static_cast(handle); - - if (!pTracker->initialized) { - pTracker->type = type; - LOGD("tracker_type: [%d] is set.", pTracker->type); - } else { - LOGW("This function is valid only in case that tracker isn't initialized."); + int ret = pTracker->setRoi(x, y, width, height); + if (ret != MEDIA_VISION_ERROR_NONE) { + LOGE("Fail to set roi"); + return ret; } - LOGD("LEAVE"); + + LOGI("LEAVE"); + return MEDIA_VISION_ERROR_NONE; } -int mv_roi_tracker_perform_open(mv_roi_tracker_h handle, mv_source_h source) +int mv_roi_tracker_perform_open(mv_roi_tracker_h handle, mv_source_h source, mv_roi_tracker_tracked_cb tracked_cb, void *user_data) { LOGD("ENTER"); if (!handle) { @@ -134,7 +133,7 @@ int mv_roi_tracker_perform_open(mv_roi_tracker_h handle, mv_source_h source) return MEDIA_VISION_ERROR_INVALID_PARAMETER; } - ROITracker *pTracker = static_cast(handle); + auto pTracker = static_cast(handle); unsigned int channels = 0; unsigned int width = 0, height = 0; @@ -153,42 +152,17 @@ int mv_roi_tracker_perform_open(mv_roi_tracker_h handle, mv_source_h source) LOGD(" w: %d, h: %d, c: %d", width, height, channels); try { - cv::Mat frame = getTrackerFrame(colorspace, channels, width, height, buffer); - if (!pTracker->initialized) { - pTracker->result = pTracker->Initialize(frame); - return MEDIA_VISION_ERROR_NONE; - } - pTracker->result = pTracker->Update(frame); + pTracker->perform(getTrackerFrame(colorspace, channels, width, height, buffer)); } catch (const std::exception &e) { LOGE("Failed : %s", e.what()); return MEDIA_VISION_ERROR_INVALID_OPERATION; } - LOGD("LEAVE"); - return MEDIA_VISION_ERROR_NONE; -} -int mv_roi_tracker_get_result_open(mv_roi_tracker_h handle, - int *x, int *y, int *width, int *height) -{ - LOGD("ENTER"); - if (!handle) { - LOGE("Handle is NULL."); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } + mv_rectangle_s roi; + pTracker->getResult(&roi.point.x, &roi.point.y, &roi.width, &roi.height); - auto *pTracker = static_cast(handle); - - if (!pTracker->initialized) { - LOGE("ROI Tracker dosn't performed yet."); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - *x = pTracker->result.x; - *y = pTracker->result.y; - *width = pTracker->result.width; - *height = pTracker->result.height; - - LOGD("x:%d, y:%d w: %d, h: %d", *x, *y, *width, *height); + tracked_cb(source, roi, user_data); LOGD("LEAVE"); return MEDIA_VISION_ERROR_NONE; -} \ No newline at end of file +} diff --git a/test/testsuites/tracker/test_tracker.cpp b/test/testsuites/tracker/test_tracker.cpp index 7c84364..dd24589 100644 --- a/test/testsuites/tracker/test_tracker.cpp +++ b/test/testsuites/tracker/test_tracker.cpp @@ -27,6 +27,25 @@ using namespace testing; using namespace std; using namespace MediaVision::Common; +void _tracked_cb(mv_source_h source, + mv_rectangle_s roi, + void *user_data) +{ + printf("In callback: roi.x y width height : %d %d %d %d\n", roi.point.x, roi.point.y, roi.width, roi.height); +} + +int perform_tracker_configure(mv_engine_config_h engine_cfg) +{ + int ret = mv_engine_config_set_int_attribute( + engine_cfg, MV_ROI_TRACKER_TYPE, (int)MV_ROI_TRACKER_TYPE_BALANCE); + if (ret != MEDIA_VISION_ERROR_NONE) { + printf("Fail to set roi tracker type"); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + return ret; +} + TEST(TrackerTest, TrackerClassShouldBeOk) { mv_roi_tracker_h handle; @@ -34,10 +53,17 @@ TEST(TrackerTest, TrackerClassShouldBeOk) int ret = mv_roi_tracker_create(&handle); ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE); - ret = mv_roi_tracker_set_tracker_type(handle, MV_ROI_TRACKER_TYPE_BALANCE); + mv_engine_config_h config = NULL; + ret = mv_create_engine_config(&config); + ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE); + + ret = perform_tracker_configure(config); + ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE); + + ret = mv_roi_tracker_configure(handle, config); ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE); - ret = mv_roi_tracker_set_coordinate(handle, 50, 50, 70, 70); + ret = mv_roi_tracker_prepare(handle, 50, 50, 50, 50); ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE); const string image_path = string("/usr/share/capi-media-vision/roi-tracker/tracker-test.jpeg"); @@ -49,17 +75,10 @@ TEST(TrackerTest, TrackerClassShouldBeOk) ret = ImageHelper::loadImageToSource(image_path.c_str(), mv_source); ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE); - ret = mv_roi_tracker_perform(handle, mv_source); - ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE); - - int x, y, width, height; - ret = mv_roi_tracker_get_result(handle, &x, &y, &width, &height); - ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE); - - ret = mv_roi_tracker_perform(handle, mv_source); + ret = mv_roi_tracker_perform(handle, mv_source, _tracked_cb, NULL); ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE); - ret = mv_roi_tracker_get_result(handle, &x, &y, &width, &height); + ret = mv_roi_tracker_perform(handle, mv_source, _tracked_cb, NULL); ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE); ret = mv_roi_tracker_destroy(handle); -- 2.7.4 From 5e865c84f6174f43c32747102def8c05adf5c8e1 Mon Sep 17 00:00:00 2001 From: Seungbae Shin Date: Wed, 13 Jul 2022 18:29:56 +0900 Subject: [PATCH 06/16] mv_roi_tracker: more refactoring [Issue Type] Code Refactorying - merge 'result' with 'boundingBox' - rename member variables to start with __ prefix - remove unnecessary initialized() method - take mv_roi_tracker_type_e instead of mv_engine_config_h for the setting type - return result by tuple instead Change-Id: Id537086cbab2e56c474cae7343c40e6cf3af6e05 --- mv_roi_tracker/roi_tracker/include/ROITracker.h | 31 +++--- mv_roi_tracker/roi_tracker/src/ROITracker.cpp | 121 +++++---------------- .../roi_tracker/src/mv_roi_tracker_open.cpp | 38 +++---- packaging/capi-media-vision.spec | 2 +- 4 files changed, 63 insertions(+), 129 deletions(-) diff --git a/mv_roi_tracker/roi_tracker/include/ROITracker.h b/mv_roi_tracker/roi_tracker/include/ROITracker.h index 844c37c..3b6d6fd 100644 --- a/mv_roi_tracker/roi_tracker/include/ROITracker.h +++ b/mv_roi_tracker/roi_tracker/include/ROITracker.h @@ -27,24 +27,27 @@ namespace MediaVision { namespace ROITracker { + +using TrackerResult = std::tuple; + class ROITracker { public: - ROITracker(); - ~ ROITracker() = default; - - void perform(cv::Mat frame); - void getResult(int *x, int *y, int *width, int *height); - bool initialized(); - int setConfiguration(mv_engine_config_h engine_config); - int setRoi(int x, int y, int width, int height); + ROITracker() = default; + virtual ~ROITracker() = default; + + void setType(mv_roi_tracker_type_e type) noexcept; + void setRoi(int x, int y, int width, int height) noexcept; + void perform(cv::Mat& frame); + TrackerResult result(); + private: - void initialize(cv::Mat &frame); - void update(cv::Mat &frame); + void initialize(cv::Mat& frame); + void update(cv::Mat& frame); - cv::Ptr cvTracker; - cv::Rect boundingBox; - mv_roi_tracker_type_e type; - mv_roi_tracker_result_s result; + cv::Ptr __cvTracker; + cv::Rect __boundingBox; + mv_roi_tracker_type_e __type { MV_ROI_TRACKER_TYPE_BALANCE }; + bool __initialized { false }; }; } /* ROITracker */ diff --git a/mv_roi_tracker/roi_tracker/src/ROITracker.cpp b/mv_roi_tracker/roi_tracker/src/ROITracker.cpp index 86c3c2b..c473865 100644 --- a/mv_roi_tracker/roi_tracker/src/ROITracker.cpp +++ b/mv_roi_tracker/roi_tracker/src/ROITracker.cpp @@ -35,134 +35,69 @@ #include "ROITracker.h" #include "ROITrackerUtil.h" -using namespace std; - namespace MediaVision { namespace ROITracker { - -ROITracker::ROITracker() - : type(MV_ROI_TRACKER_TYPE_BALANCE) -{ - result = (mv_roi_tracker_result_s) { - .x = -1, - .y = -1, - .width = -1, - .height = -1, - .initialized = false - }; -} - -void ROITracker::initialize(cv::Mat &frame) +void ROITracker::initialize(cv::Mat& frame) { - LOGI("ENTER"); - - //initializes the tracker - LOGD("Init pos : x:%d, y:%d, w:%d, h:%d is set.", - boundingBox.x, - boundingBox.y, - boundingBox.width, - boundingBox.height); - - if (cvTracker) { + if (__cvTracker) { LOGE("cvTracker already exists. 'mv_roi_tracker_destroy' should be called for removing cvTracker."); throw std::runtime_error("tracker Initialize failed."); } - cvTracker = createTrackerByName(type); - cvTracker->init(frame, boundingBox); - - result = (mv_roi_tracker_result_s) { - .x = boundingBox.x, - .y = boundingBox.y, - .width = boundingBox.width, - .height = boundingBox.height, - .initialized = true - }; - LOGI("LEAVE"); + LOGD("Init pos : x:%d, y:%d, w:%d, h:%d is set.", + __boundingBox.x, __boundingBox.y, __boundingBox.width, __boundingBox.height); + + __cvTracker = createTrackerByName(__type); + __cvTracker->init(frame, __boundingBox); + + __initialized = true; + + LOGI("Initialized done"); } -void ROITracker::update(cv::Mat &frame) +void ROITracker::update(cv::Mat& frame) { - LOGI("ENTER"); - - //updates the tracker - if (cvTracker->update(frame, boundingBox)) { - result = (mv_roi_tracker_result_s) { - .x = boundingBox.x, - .y = boundingBox.y, - .width = boundingBox.width, - .height = boundingBox.height, - .initialized = true - }; - LOGD(" Updated: x: %d, y: %d, w: %d, h: %d", - result.x, - result.y, - result.width, - result.height); - - } else { + if (!__cvTracker->update(frame, __boundingBox)) { LOGE("update failed."); - result.initialized = false; + __initialized = false; throw std::runtime_error("tracker update failed."); } - LOGI("LEAVE"); + LOGD(" Updated: x: %d, y: %d, w: %d, h: %d", + __boundingBox.x, __boundingBox.y, __boundingBox.width, __boundingBox.height); } -void ROITracker::perform(cv::Mat frame) +void ROITracker::perform(cv::Mat& frame) { - if (!result.initialized) + if (!__initialized) initialize(frame); else update(frame); } -void ROITracker::getResult(int *x, int *y, int *width, int *height) +TrackerResult ROITracker::result() { - if (!result.initialized) - throw std::runtime_error("tracker getResult failed."); + if (!__initialized) + throw std::runtime_error("not initialized yet!!!"); - *x = result.x; - *y = result.y; - *width = result.width; - *height = result.height; + return std::make_tuple(__boundingBox.x, __boundingBox.y, __boundingBox.width, __boundingBox.height); } -int ROITracker::setConfiguration(mv_engine_config_h engine_config) +void ROITracker::setType(mv_roi_tracker_type_e type) noexcept { - int tracker_type = 0; - int ret = mv_engine_config_get_int_attribute( - engine_config, MV_ROI_TRACKER_TYPE, &tracker_type); - if (ret != MEDIA_VISION_ERROR_NONE) { - LOGE("Fail to get roi tracker type"); - return MEDIA_VISION_ERROR_INVALID_OPERATION; - } - - type = static_cast(tracker_type); - LOGD("tracker_type: [%d] is set.", type); + __type = type; - return MEDIA_VISION_ERROR_NONE; + LOGD("tracker_type: [%d] is set.", static_cast(__type)); } -int ROITracker::setRoi(int x, int y, int width, int height) +void ROITracker::setRoi(int x, int y, int width, int height) noexcept { - boundingBox = { x, y, width, height }; + __boundingBox = { x, y, width, height }; LOGD("ROI : x:%d, y:%d, w:%d, h:%d is set.", - boundingBox.x, - boundingBox.y, - boundingBox.width, - boundingBox.height); - - LOGD("LEAVE"); - - return MEDIA_VISION_ERROR_NONE; + __boundingBox.x, __boundingBox.y, __boundingBox.width, __boundingBox.height); } -bool ROITracker::initialized() -{ - return result.initialized; -} } /* ROITracker */ } /* MediaVision */ diff --git a/mv_roi_tracker/roi_tracker/src/mv_roi_tracker_open.cpp b/mv_roi_tracker/roi_tracker/src/mv_roi_tracker_open.cpp index 7cecb8d..1f198f5 100644 --- a/mv_roi_tracker/roi_tracker/src/mv_roi_tracker_open.cpp +++ b/mv_roi_tracker/roi_tracker/src/mv_roi_tracker_open.cpp @@ -52,7 +52,6 @@ int mv_roi_tracker_create_open(mv_roi_tracker_h *handle) { LOGD("ENTER"); - //instantiates the specific Tracker ROITracker *pTracker = new (std::nothrow)ROITracker; if (!pTracker) { LOGE("Failed to create tracker"); @@ -70,6 +69,7 @@ int mv_roi_tracker_create_open(mv_roi_tracker_h *handle) int mv_roi_tracker_destroy_open(mv_roi_tracker_h handle) { LOGD("ENTER"); + if (!handle) { LOGE("Handle can't be destroyed because handle is NULL"); return MEDIA_VISION_ERROR_INVALID_PARAMETER; @@ -92,15 +92,15 @@ int mv_roi_tracker_configure_engine_open(mv_roi_tracker_h handle, return MEDIA_VISION_ERROR_INVALID_PARAMETER; } + int tracker_type; + if (mv_engine_config_get_int_attribute(engine_config, + MV_ROI_TRACKER_TYPE, &tracker_type) != MEDIA_VISION_ERROR_NONE) + return MEDIA_VISION_ERROR_INVALID_OPERATION; + auto pTracker = static_cast(handle); + pTracker->setType(static_cast(tracker_type)); - int ret = pTracker->setConfiguration(engine_config); - if (ret != MEDIA_VISION_ERROR_NONE) { - LOGE("Fail to set configuration"); - return ret; - } LOGI("LEAVE"); - return MEDIA_VISION_ERROR_NONE; } @@ -114,27 +114,21 @@ int mv_roi_tracker_prepare_open(mv_roi_tracker_h handle, int x, int y, int width } auto pTracker = static_cast(handle); - int ret = pTracker->setRoi(x, y, width, height); - if (ret != MEDIA_VISION_ERROR_NONE) { - LOGE("Fail to set roi"); - return ret; - } + pTracker->setRoi(x, y, width, height); LOGI("LEAVE"); - return MEDIA_VISION_ERROR_NONE; } int mv_roi_tracker_perform_open(mv_roi_tracker_h handle, mv_source_h source, mv_roi_tracker_tracked_cb tracked_cb, void *user_data) { LOGD("ENTER"); + if (!handle) { LOGE("Handle is NULL."); return MEDIA_VISION_ERROR_INVALID_PARAMETER; } - auto pTracker = static_cast(handle); - unsigned int channels = 0; unsigned int width = 0, height = 0; unsigned int bufferSize = 0; @@ -152,17 +146,19 @@ int mv_roi_tracker_perform_open(mv_roi_tracker_h handle, mv_source_h source, mv_ LOGD(" w: %d, h: %d, c: %d", width, height, channels); try { - pTracker->perform(getTrackerFrame(colorspace, channels, width, height, buffer)); + cv::Mat frame = getTrackerFrame(colorspace, channels, width, height, buffer); + + auto pTracker = static_cast(handle); + pTracker->perform(frame); + + mv_rectangle_s roi; + std::tie(roi.point.x, roi.point.y, roi.width, roi.height) = pTracker->result(); + tracked_cb(source, roi, user_data); } catch (const std::exception &e) { LOGE("Failed : %s", e.what()); return MEDIA_VISION_ERROR_INVALID_OPERATION; } - mv_rectangle_s roi; - pTracker->getResult(&roi.point.x, &roi.point.y, &roi.width, &roi.height); - - tracked_cb(source, roi, user_data); - LOGD("LEAVE"); return MEDIA_VISION_ERROR_NONE; } diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index a8807b8..e597cbf 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.23.3 +Version: 0.23.4 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-3-Clause -- 2.7.4 From f6a4877595e27aa8ae7e49a50a93320e09b836d7 Mon Sep 17 00:00:00 2001 From: Seungbae Shin Date: Mon, 18 Jul 2022 20:45:31 +0900 Subject: [PATCH 07/16] fixup! common: code refactoring to EngineConfig.cpp fix NO_CATCH defects [Issue type] svace Change-Id: Ia249796b28e7ab4f83b154e7da8d2ba68481d806 --- mv_common/src/mv_common_c.cpp | 7 ++++--- packaging/capi-media-vision.spec | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/mv_common/src/mv_common_c.cpp b/mv_common/src/mv_common_c.cpp index bc6bbf5..21071e4 100644 --- a/mv_common/src/mv_common_c.cpp +++ b/mv_common/src/mv_common_c.cpp @@ -323,9 +323,10 @@ int mv_create_engine_config_c( } LOGD("Creating media vision engine config"); - (*engine_cfg) = static_cast - (new (std::nothrow) MediaVision::Common::EngineConfig()); - if (*engine_cfg == NULL) { + try { + (*engine_cfg) = static_cast + (new MediaVision::Common::EngineConfig()); + } catch (...) { LOGE("Failed to create mv_engine_config_h handle"); return MEDIA_VISION_ERROR_OUT_OF_MEMORY; } diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index e597cbf..b5a958e 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.23.4 +Version: 0.23.5 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-3-Clause -- 2.7.4 From 42f56e7cbdaf642496628f657a0006774fc03bc9 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Fri, 1 Jul 2022 15:44:32 +0900 Subject: [PATCH 08/16] mv_machine_learning: introduce Context, ITask and adapter classes [Issue type] new feature Introduced Context, ITask, and adapter class derived from ITask one. With this patch, each Task API has a context which can have one more itask objects for controlling inference or training modules. The purpose of this patch is to keep same interfaces for each module. As a reference, this patch applies this new approach to Face recognition framework. This is just a step for next code refactoring for multi models support. Change-Id: Iad4c4df9fc87143723decadbc003787854e60e4b Signed-off-by: Inki Dae --- CMakeLists.txt | 1 + media-vision-config.json | 20 --- mv_machine_learning/common/include/context.h | 37 +++++ mv_machine_learning/common/include/itask.h | 38 +++++ .../face_recognition/include/face_recognition.h | 41 +++-- .../include/face_recognition_adapter.h | 88 ++++++++++ .../include/mv_face_recognition_open.h | 33 ---- .../face_recognition/meta/face_recognition.json | 20 +++ .../face_recognition/src/face_recognition.cpp | 14 +- .../src/face_recognition_adapter.cpp | 141 ++++++++++++++++ .../src/mv_face_recognition_open.cpp | 184 ++++++++++----------- packaging/capi-media-vision.spec | 1 + 12 files changed, 457 insertions(+), 161 deletions(-) create mode 100644 mv_machine_learning/common/include/context.h create mode 100644 mv_machine_learning/common/include/itask.h create mode 100644 mv_machine_learning/face_recognition/include/face_recognition_adapter.h create mode 100644 mv_machine_learning/face_recognition/meta/face_recognition.json create mode 100644 mv_machine_learning/face_recognition/src/face_recognition_adapter.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 7050132..bbc1735 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -148,6 +148,7 @@ configure_file( @ONLY ) install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name}-face-recognition.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig) +install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/mv_machine_learning/face_recognition/meta/face_recognition.json DESTINATION ${CMAKE_INSTALL_DATADIR}/${fw_name}) set(PC_NAME ${fw_name}-tracker) set(PC_LDFLAGS "-l${MV_ROI_TRACKER_LIB_NAME} -l${MV_COMMON_LIB_NAME}") diff --git a/media-vision-config.json b/media-vision-config.json index b12a490..9987623 100644 --- a/media-vision-config.json +++ b/media-vision-config.json @@ -247,26 +247,6 @@ "name" : "MV_INFERENCE_MODEL_META_FILE_PATH", "type" : "string", "value" : "" - }, - { - "name" : "MV_FACE_RECOGNITION_BACKBONE_MODEL_FILE_PATH", - "type" : "string", - "value" : "/usr/share/capi-media-vision/models/FR/backbone/tflite/facenet.tflite" - }, - { - "name" : "MV_FACE_RECOGNITION_DEFAULT_PATH", - "type" : "string", - "value" : "/usr/share/capi-media-vision/models/FR/training/" - }, - { - "name" : "MV_FACE_RECOGNITION_DECISION_THRESHOLD", - "type" : "double", - "value" : -0.85 - }, - { - "name" : "MV_ROI_TRACKER_TYPE", - "type" : "integer", - "value" : 2 } ] } diff --git a/mv_machine_learning/common/include/context.h b/mv_machine_learning/common/include/context.h new file mode 100644 index 0000000..518fbe4 --- /dev/null +++ b/mv_machine_learning/common/include/context.h @@ -0,0 +1,37 @@ +/** + * Copyright (c) 2022 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CONTEXT_H__ +#define __CONTEXT_H__ + +#include "itask.h" +#include + +namespace mediavision +{ +namespace common +{ +class Context { +public: + Context() { } + ~Context() { } + + std::map __itasks; +}; +} // namespace +} // namespace + +#endif \ No newline at end of file diff --git a/mv_machine_learning/common/include/itask.h b/mv_machine_learning/common/include/itask.h new file mode 100644 index 0000000..cacf827 --- /dev/null +++ b/mv_machine_learning/common/include/itask.h @@ -0,0 +1,38 @@ +/** + * Copyright (c) 2022 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ITASK_H__ +#define __ITASK_H__ + +namespace mediavision +{ +namespace common +{ +// T : parameter type, V : return type +template +class ITask { +public: + virtual ~ITask() { }; + virtual void configure() = 0; + virtual void prepare() = 0; + virtual void setInput(T& t) = 0; + virtual void perform() = 0; + virtual V& getOutput() = 0; +}; +} // namespace +} // namespace + +#endif \ No newline at end of file diff --git a/mv_machine_learning/face_recognition/include/face_recognition.h b/mv_machine_learning/face_recognition/include/face_recognition.h index bb79825..f4d65a8 100644 --- a/mv_machine_learning/face_recognition/include/face_recognition.h +++ b/mv_machine_learning/face_recognition/include/face_recognition.h @@ -17,6 +17,7 @@ #ifndef __FACE_RECOGNITION_H__ #define __FACE_RECOGNITION_H__ +#include #include #include @@ -35,18 +36,36 @@ namespace Mv { namespace FaceRecognition { + namespace Status { - enum { - NONE = 0, - INITIALIZED, - REGISTERED, - INFERENCED, - DELETED - }; -} -} -} +enum { + NONE = 0, + INITIALIZED, + REGISTERED, + INFERENCED, + DELETED +}; +} // Status + +namespace Mode +{ +enum { + REGISTER = 0, + INFERENCE, + DELETE +}; +} // Mode + +} // FaceRecognition +} // Mv + +typedef struct { + unsigned int mode; + std::unordered_map register_src; + mv_source_h inference_src; + std::vector labels; +} mv_face_recognition_input_s; /** * @brief The face recognition result structure. @@ -108,6 +127,8 @@ public: int RecognizeFace(mv_source_h img_src); int DeleteLabel(std::string label_name); int GetLabel(const char **out_label); + mv_face_recognition_result_s& GetResult(); + }; #endif \ No newline at end of file diff --git a/mv_machine_learning/face_recognition/include/face_recognition_adapter.h b/mv_machine_learning/face_recognition/include/face_recognition_adapter.h new file mode 100644 index 0000000..f5a4ad0 --- /dev/null +++ b/mv_machine_learning/face_recognition/include/face_recognition_adapter.h @@ -0,0 +1,88 @@ +/** + * Copyright (c) 2022 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __FACE_RECOGNITION_ADAPTER_H__ +#define __FACE_RECOGNITION_ADAPTER_H__ + +#include + +#include "EngineConfig.h" +#include "itask.h" +#include "face_recognition.h" + +/** + * @brief Defines #MV_FACE_RECOGNITION_BACKBONE_MODEL_FILE_PATH + * to set the backbone model file path. + * @details This model file is used to extract the feature vectors from a given face image data. + * + * @since_tizen 7.0 + * @see mv_engine_config_set_string_attribute() + * @see mv_engine_config_get_string_attribute() + */ +#define MV_FACE_RECOGNITION_BACKBONE_MODEL_FILE_PATH "MV_FACE_RECOGNITION_BACKBONE_MODEL_FILE_PATH" + +/** + * @brief Defines #MV_FACE_RECOGNITION_DEFAULT_PATH + * to set the path where the training relevant files are created. + * @details This path is used as a default location where the trained model, label and feature vector files are created. + * + * @since_tizen 7.0 + * @see mv_engine_config_set_string_attribute() + * @see mv_engine_config_get_string_attribute() + */ +#define MV_FACE_RECOGNITION_DEFAULT_PATH "MV_FACE_RECOGNITION_DEFAULT_PATH" + +/** + * @brief Defines #MV_FACE_RECOGNITION_DECISION_THRESHOLD + * to set the decision threshold file+. + * @details This file is used to determine face recognition result with a given face image data is true or false.. + * + * @since_tizen 7.0 + * @see mv_engine_config_set_string_attribute() + * @see mv_engine_config_get_string_attribute() + */ +#define MV_FACE_RECOGNITION_DECISION_THRESHOLD "MV_FACE_RECOGNITION_DECISION_THRESHOLD" + +namespace mediavision +{ +namespace machine_learning +{ +template +class FaceRecognitionAdapter : public mediavision::common::ITask { +private: + std::unique_ptr _face_recognition; + mv_face_recognition_input_s _source; + std::unique_ptr _config; + +public: + FaceRecognitionAdapter(); + ~FaceRecognitionAdapter(); + + std::unique_ptr& getConfig() + { + return _config; + } + + void configure(); + void prepare(); + void setInput(T& t); + void perform(); + V& getOutput(); +}; +} // machine_learning +} // mediavision + +#endif \ No newline at end of file diff --git a/mv_machine_learning/face_recognition/include/mv_face_recognition_open.h b/mv_machine_learning/face_recognition/include/mv_face_recognition_open.h index 1f17b1f..5055ec5 100644 --- a/mv_machine_learning/face_recognition/include/mv_face_recognition_open.h +++ b/mv_machine_learning/face_recognition/include/mv_face_recognition_open.h @@ -21,39 +21,6 @@ #include #include -/** - * @brief Defines #MV_FACE_RECOGNITION_BACKBONE_MODEL_FILE_PATH - * to set the backbone model file path. - * @details This model file is used to extract the feature vectors from a given face image data. - * - * @since_tizen 7.0 - * @see mv_engine_config_set_string_attribute() - * @see mv_engine_config_get_string_attribute() - */ -#define MV_FACE_RECOGNITION_BACKBONE_MODEL_FILE_PATH "MV_FACE_RECOGNITION_BACKBONE_MODEL_FILE_PATH" - -/** - * @brief Defines #MV_FACE_RECOGNITION_DEFAULT_PATH - * to set the path where the training relevant files are created. - * @details This path is used as a default location where the trained model, label and feature vector files are created. - * - * @since_tizen 7.0 - * @see mv_engine_config_set_string_attribute() - * @see mv_engine_config_get_string_attribute() - */ -#define MV_FACE_RECOGNITION_DEFAULT_PATH "MV_FACE_RECOGNITION_DEFAULT_PATH" - -/** - * @brief Defines #MV_FACE_RECOGNITION_DECISION_THRESHOLD - * to set the decision threshold file+. - * @details This file is used to determine face recognition result with a given face image data is true or false.. - * - * @since_tizen 7.0 - * @see mv_engine_config_set_string_attribute() - * @see mv_engine_config_get_string_attribute() - */ -#define MV_FACE_RECOGNITION_DECISION_THRESHOLD "MV_FACE_RECOGNITION_DECISION_THRESHOLD" - #ifdef __cplusplus extern "C" { diff --git a/mv_machine_learning/face_recognition/meta/face_recognition.json b/mv_machine_learning/face_recognition/meta/face_recognition.json new file mode 100644 index 0000000..db563ac --- /dev/null +++ b/mv_machine_learning/face_recognition/meta/face_recognition.json @@ -0,0 +1,20 @@ +{ + "attributes": + [ + { + "name" : "MV_FACE_RECOGNITION_BACKBONE_MODEL_FILE_PATH", + "type" : "string", + "value" : "/usr/share/capi-media-vision/models/FR/backbone/tflite/facenet.tflite" + }, + { + "name" : "MV_FACE_RECOGNITION_DEFAULT_PATH", + "type" : "string", + "value" : "/usr/share/capi-media-vision/models/FR/training/" + }, + { + "name" : "MV_FACE_RECOGNITION_DECISION_THRESHOLD", + "type" : "double", + "value" : -0.85 + } + ] +} diff --git a/mv_machine_learning/face_recognition/src/face_recognition.cpp b/mv_machine_learning/face_recognition/src/face_recognition.cpp index ecf0d26..11ad1a2 100644 --- a/mv_machine_learning/face_recognition/src/face_recognition.cpp +++ b/mv_machine_learning/face_recognition/src/face_recognition.cpp @@ -601,4 +601,16 @@ int FaceRecognition::GetLabel(const char **out_label) *out_label = _result.label.c_str(); return MEDIA_VISION_ERROR_NONE; -} \ No newline at end of file +} + + mv_face_recognition_result_s& FaceRecognition::GetResult() + { + try { + _label_manager->GetLabelString(_result.label, _result.label_idx); + } catch (const BaseException& e) { + LOGE("%s", e.what()); + throw e; + } + + return _result; + } diff --git a/mv_machine_learning/face_recognition/src/face_recognition_adapter.cpp b/mv_machine_learning/face_recognition/src/face_recognition_adapter.cpp new file mode 100644 index 0000000..de0abb9 --- /dev/null +++ b/mv_machine_learning/face_recognition/src/face_recognition_adapter.cpp @@ -0,0 +1,141 @@ +/** + * Copyright (c) 2022 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "machine_learning_exception.h" +#include "face_recognition_adapter.h" + +#define FACE_RECOGNITION_META_FILE_NAME "face_recognition.json" + +using namespace std; +using namespace MediaVision::Common; +using namespace Mediavision::MachineLearning::Exception; + +namespace mediavision +{ +namespace machine_learning +{ +template +FaceRecognitionAdapter::FaceRecognitionAdapter() +{ + _face_recognition = make_unique(); +} + +template +FaceRecognitionAdapter::~FaceRecognitionAdapter() +{ + +} + +template +void FaceRecognitionAdapter::configure() +{ + _config = make_unique(string(MV_CONFIG_PATH) + + string(FACE_RECOGNITION_META_FILE_NAME)); + string backboneModelFilePath; + int ret = _config->getStringAttribute(string(MV_FACE_RECOGNITION_BACKBONE_MODEL_FILE_PATH), + &backboneModelFilePath); + if (ret != MEDIA_VISION_ERROR_NONE) + throw InvalidParameter("Failed to get an attribute"); + + LOGD("Backbone model file path : %s", backboneModelFilePath.c_str()); + + string defaultPath; + + ret = _config->getStringAttribute(string(MV_FACE_RECOGNITION_DEFAULT_PATH), + &defaultPath); + if (ret != MEDIA_VISION_ERROR_NONE) + throw InvalidOperation("Fail to get default path."); + + LOGD("Default path : %s", defaultPath.c_str()); + + double decisionThreshold = 0.0f; + ret = _config->getDoubleAttribute(string(MV_FACE_RECOGNITION_DECISION_THRESHOLD), + &decisionThreshold); + if (ret != MEDIA_VISION_ERROR_NONE) + throw InvalidOperation("Fail to get default decision threshold value."); + + FaceRecognitionConfig config = { + MV_INFERENCE_TARGET_DEVICE_CPU, // not used and default type is used. See TrainingModel() + MV_INFERENCE_BACKEND_NNTRAINER, // not used and default type is used. See TrainingModel() + MV_INFERENCE_TARGET_DEVICE_CPU, + MV_INFERENCE_BACKEND_NNTRAINER, + MV_INFERENCE_TARGET_DEVICE_CPU, + MV_INFERENCE_BACKEND_TFLITE, + backboneModelFilePath, + string(defaultPath) + "model_and_weights.ini", + string(defaultPath) + "labels.dat", + string(defaultPath) + "feature_vector_file.dat", + decisionThreshold + }; + + _face_recognition->SetConfig(config); +} + +template +void FaceRecognitionAdapter::prepare() +{ + int ret = _face_recognition->Initialize(); + if (ret != MEDIA_VISION_ERROR_NONE) + throw InvalidOperation("Fail to initialize face recognition."); +} + +template +void FaceRecognitionAdapter::setInput(T& t) +{ + _source = t; +} + +template +void FaceRecognitionAdapter::perform() +{ + if (_source.mode == Mv::FaceRecognition::Mode::REGISTER) { + for (auto& s : _source.register_src) { + int ret = _face_recognition->RegisterNewFace(s.first, s.second); + if (ret != MEDIA_VISION_ERROR_NONE) + throw InvalidOperation("Fail to register new face."); + } + + return; + } + + if (_source.mode == Mv::FaceRecognition::Mode::INFERENCE) { + int ret = _face_recognition->RecognizeFace(_source.inference_src); + if (ret == MEDIA_VISION_ERROR_NO_DATA) + throw NoData("Label not found."); + + return; + } + + if (_source.mode == Mv::FaceRecognition::Mode::DELETE) { + for (auto& l : _source.labels) { + int ret = _face_recognition->DeleteLabel(l); + if (ret != MEDIA_VISION_ERROR_NONE) + throw InvalidOperation("Fail to unregister a given label."); + } + + return; + } +} + +template +V& FaceRecognitionAdapter::getOutput() +{ + return _face_recognition->GetResult(); +} + +template class FaceRecognitionAdapter; +} +} \ No newline at end of file diff --git a/mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp b/mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp index ef5b8c1..d34b699 100644 --- a/mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp +++ b/mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp @@ -18,13 +18,15 @@ #include #include -#include "face_recognition.h" -#include "feature_vector_manager.h" -#include "backbone_model_info.h" +#include "face_recognition_adapter.h" #include "mv_face_recognition_open.h" #include "machine_learning_exception.h" +#include "context.h" using namespace std; +using namespace mediavision::common; +using namespace mediavision::machine_learning; +using namespace Mv::FaceRecognition; using namespace Mediavision::MachineLearning::Exception; int mv_face_recognition_create_open(mv_face_recognition_h *handle) @@ -34,12 +36,31 @@ int mv_face_recognition_create_open(mv_face_recognition_h *handle) return MEDIA_VISION_ERROR_INVALID_PARAMETER; } - (*handle) = static_cast(new (std::nothrow)FaceRecognition()); - if (*handle == NULL) { - LOGE("Failed to create face recognition handle"); + Context *context = new (nothrow)Context(); + if (!context) { + LOGE("Fail to allocate a context."); return MEDIA_VISION_ERROR_OUT_OF_MEMORY; } + ITask *itask = + new (nothrow)FaceRecognitionAdapter(); + if (!itask) { + delete context; + LOGE("Fail to allocate a itask."); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } + + pair::iterator, bool> result; + + result = context->__itasks.insert(pair("face_recognition", itask)); + if (!result.second) { + delete context; + LOGE("Fail to register a new task. Same task already exists."); + return MEDIA_VISION_ERROR_INVALID_OPERATION; + } + + *handle = static_cast(context); + LOGD("face recognition handle [%p] has been created", *handle); return MEDIA_VISION_ERROR_NONE; @@ -52,8 +73,16 @@ int mv_face_recognition_destroy_open(mv_face_recognition_h handle) return MEDIA_VISION_ERROR_INVALID_PARAMETER; } - LOGD("Destroying face recognition handle [%p]", handle); - delete static_cast(handle); + Context *context = static_cast(handle); + map::iterator iter; + + for (iter = context->__itasks.begin(); iter != context->__itasks.end(); ++iter) { + auto itask = static_cast *>(iter->second); + delete itask; + } + + delete context; + LOGD("Face recognition handle has been destroyed"); return MEDIA_VISION_ERROR_NONE; @@ -68,77 +97,15 @@ int mv_face_recognition_prepare_open(mv_face_recognition_h handle) return MEDIA_VISION_ERROR_INVALID_PARAMETER; } - mv_engine_config_h cfg_handle; - - int ret = mv_create_engine_config(&cfg_handle); - if (ret != MEDIA_VISION_ERROR_NONE) { - LOGE("Fail to create engine configuration handle."); - return ret; - } - - char *backboneModelFilePath = NULL; - - ret = mv_engine_config_get_string_attribute(cfg_handle, MV_FACE_RECOGNITION_BACKBONE_MODEL_FILE_PATH, &backboneModelFilePath); - if (ret != MEDIA_VISION_ERROR_NONE) { - LOGE("Fail to get backbone model file path"); - mv_destroy_engine_config(cfg_handle); - return ret; - } - - LOGD("Backbone model file path : %s", backboneModelFilePath); - - char *defaultPath = NULL; - - ret = mv_engine_config_get_string_attribute(cfg_handle, MV_FACE_RECOGNITION_DEFAULT_PATH, &defaultPath); - if (ret != MEDIA_VISION_ERROR_NONE) { - LOGE("Fail to get default path."); - free(backboneModelFilePath); - mv_destroy_engine_config(cfg_handle); - return ret; - } + Context *context = static_cast(handle); + auto itask = static_cast *>(context->__itasks["face_recognition"]); - LOGD("Default path : %s", defaultPath); - - double decisionThreshold = 0.0f; - - ret = mv_engine_config_get_double_attribute(cfg_handle, MV_FACE_RECOGNITION_DECISION_THRESHOLD, &decisionThreshold); - if (ret != MEDIA_VISION_ERROR_NONE) { - LOGE("Fail to get default decision threshold file path."); - free(backboneModelFilePath); - free(defaultPath); - mv_destroy_engine_config(cfg_handle); - return ret; - } - - FaceRecognitionConfig config = { - MV_INFERENCE_TARGET_DEVICE_CPU, // not used and default type is used. See TrainingModel() - MV_INFERENCE_BACKEND_NNTRAINER, // not used and default type is used. See TrainingModel() - MV_INFERENCE_TARGET_DEVICE_CPU, - MV_INFERENCE_BACKEND_NNTRAINER, - MV_INFERENCE_TARGET_DEVICE_CPU, - MV_INFERENCE_BACKEND_TFLITE, - backboneModelFilePath, - string(defaultPath) + "model_and_weights.ini", - string(defaultPath) + "labels.dat", - string(defaultPath) + "feature_vector_file.dat", - decisionThreshold - }; - - FaceRecognition *pFace = static_cast(handle); - pFace->SetConfig(config); - - ret = pFace->Initialize(); - if (ret != MEDIA_VISION_ERROR_NONE) - LOGE("Fail to initialize face recognition."); - - free(backboneModelFilePath); - free(defaultPath); - - mv_destroy_engine_config(cfg_handle); + itask->configure(); + itask->prepare(); LOGD("LEAVE"); - return ret; + return MEDIA_VISION_ERROR_NONE; } int mv_face_recognition_register_open(mv_face_recognition_h handle, mv_source_h source, const char *label) @@ -150,15 +117,24 @@ int mv_face_recognition_register_open(mv_face_recognition_h handle, mv_source_h return MEDIA_VISION_ERROR_INVALID_PARAMETER; } - FaceRecognition *pFace = static_cast(handle); + try { + Context *context = static_cast(handle); + auto itask = static_cast *>(context->__itasks["face_recognition"]); + + mv_face_recognition_input_s input = { Mode::REGISTER }; - int ret = pFace->RegisterNewFace(source, string(label)); - if (ret != MEDIA_VISION_ERROR_NONE) + input.register_src.clear(); + input.register_src.insert(make_pair(source, string(label))); + itask->setInput(input); + itask->perform(); + } catch (const BaseException& e) { LOGE("Fail to register new face."); + return e.getError(); + } LOGD("LEAVE"); - return ret; + return MEDIA_VISION_ERROR_NONE; } int mv_face_recognition_unregister_open(mv_face_recognition_h handle, const char *label) @@ -170,15 +146,24 @@ int mv_face_recognition_unregister_open(mv_face_recognition_h handle, const char return MEDIA_VISION_ERROR_INVALID_PARAMETER; } - FaceRecognition *pFace = static_cast(handle); + try { + Context *context = static_cast(handle); + auto itask = static_cast *>(context->__itasks["face_recognition"]); - int ret = pFace->DeleteLabel(string(label)); - if (ret != MEDIA_VISION_ERROR_NONE) - LOGE("Fail to register new face."); + mv_face_recognition_input_s input = { Mode::DELETE }; + + input.labels.clear(); + input.labels.push_back(string(label)); + itask->setInput(input); + itask->perform(); + } catch (const BaseException& e) { + LOGE("Fail to unregister a given label."); + return e.getError(); + } LOGD("LEAVE"); - return ret; + return MEDIA_VISION_ERROR_NONE; } int mv_face_recognition_inference_open(mv_face_recognition_h handle, mv_source_h source) @@ -190,20 +175,23 @@ int mv_face_recognition_inference_open(mv_face_recognition_h handle, mv_source_h return MEDIA_VISION_ERROR_INVALID_PARAMETER; } - FaceRecognition *pFace = static_cast(handle); + try { + Context *context = static_cast(handle); + auto itask = static_cast *>(context->__itasks["face_recognition"]); - int ret = pFace->RecognizeFace(source); - if (ret == MEDIA_VISION_ERROR_NO_DATA) { - LOGW("Label not found."); - return ret; - } + mv_face_recognition_input_s input = { Mode::INFERENCE }; - if (ret != MEDIA_VISION_ERROR_NONE) - LOGE("Fail to recognize face."); + input.inference_src = source; + itask->setInput(input); + itask->perform(); + } catch (const BaseException& e) { + LOGE("Fail to recognize a face."); + return e.getError(); + } LOGD("LEAVE"); - return ret; + return MEDIA_VISION_ERROR_NONE; } int mv_face_recognition_get_label_open(mv_face_recognition_h handle, const char **out_label) @@ -215,12 +203,14 @@ int mv_face_recognition_get_label_open(mv_face_recognition_h handle, const char return MEDIA_VISION_ERROR_INVALID_PARAMETER; } - FaceRecognition *pFace = static_cast(handle); + try { + Context *context = static_cast(handle); + auto itask = static_cast *>(context->__itasks["face_recognition"]); - int ret = pFace->GetLabel(out_label); - if (ret != MEDIA_VISION_ERROR_NONE) { + *out_label = itask->getOutput().label.c_str(); + } catch (const BaseException& e) { LOGE("Fail to get label."); - return ret; + return e.getError(); } LOGD("LEAVE"); diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index b5a958e..5ac71bd 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -317,6 +317,7 @@ find . -name '*.gcno' -exec cp --parents '{}' "$gcno_obj_dir" ';' %files machine_learning %manifest %{name}.manifest %license LICENSE.APLv2 +%{_datadir}/%{name}/face_recognition.json %{_libdir}/libmv_inference*.so %{_libdir}/libmv_training.so %{_libdir}/libmv_face_recognition.so -- 2.7.4 From 4d67b72aca03eddc38361b83aa256c16cf649c53 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Mon, 18 Jul 2022 18:30:01 +0900 Subject: [PATCH 09/16] mv_machine_learning: cleanup namespace [Issue type] cleanup Cleaned up the use of namespace for Face recognition framework according to Tizen coding style document. Change-Id: Ib5c8e751911c73a2d32e5ed453d3c04a51aef28c Signed-off-by: Inki Dae --- .../common/include/machine_learning_exception.h | 6 +++--- .../face_recognition/include/face_recognition.h | 20 ++++++++++++-------- .../include/face_recognition_adapter.h | 2 ++ .../face_recognition/src/face_recognition.cpp | 12 ++++++++++-- .../src/face_recognition_adapter.cpp | 10 ++++++---- .../src/mv_face_recognition_open.cpp | 10 +++++----- .../face_recognition/src/nntrainer_dsm.cpp | 2 +- .../face_recognition/src/nntrainer_fvm.cpp | 2 +- .../face_recognition/src/simple_shot.cpp | 2 +- .../training/src/data_augment_rotate.cpp | 2 +- .../training/src/feature_vector_manager.cpp | 2 +- mv_machine_learning/training/src/label_manager.cpp | 2 +- mv_machine_learning/training/src/training_model.cpp | 2 +- 13 files changed, 45 insertions(+), 29 deletions(-) diff --git a/mv_machine_learning/common/include/machine_learning_exception.h b/mv_machine_learning/common/include/machine_learning_exception.h index 05d33c9..1038c93 100644 --- a/mv_machine_learning/common/include/machine_learning_exception.h +++ b/mv_machine_learning/common/include/machine_learning_exception.h @@ -22,9 +22,9 @@ #include -namespace Mediavision { -namespace MachineLearning { -namespace Exception { +namespace mediavision { +namespace machine_learning { +namespace exception { class BaseException : public std::exception { private: diff --git a/mv_machine_learning/face_recognition/include/face_recognition.h b/mv_machine_learning/face_recognition/include/face_recognition.h index f4d65a8..a239471 100644 --- a/mv_machine_learning/face_recognition/include/face_recognition.h +++ b/mv_machine_learning/face_recognition/include/face_recognition.h @@ -32,12 +32,14 @@ #include "data_augment_flip.h" #include "data_augment_rotate.h" -namespace Mv +namespace mediavision { -namespace FaceRecognition +namespace machine_learning +{ +namespace face_recognition { -namespace Status +namespace status { enum { NONE = 0, @@ -46,19 +48,18 @@ enum { INFERENCED, DELETED }; -} // Status +} // status -namespace Mode +namespace mode { enum { REGISTER = 0, INFERENCE, DELETE }; -} // Mode +} // mode -} // FaceRecognition -} // Mv +} // face_recognition typedef struct { unsigned int mode; @@ -131,4 +132,7 @@ public: }; +} // machine_learning +} // mediavision + #endif \ No newline at end of file diff --git a/mv_machine_learning/face_recognition/include/face_recognition_adapter.h b/mv_machine_learning/face_recognition/include/face_recognition_adapter.h index f5a4ad0..b4ab9fa 100644 --- a/mv_machine_learning/face_recognition/include/face_recognition_adapter.h +++ b/mv_machine_learning/face_recognition/include/face_recognition_adapter.h @@ -60,6 +60,7 @@ namespace mediavision { namespace machine_learning { + template class FaceRecognitionAdapter : public mediavision::common::ITask { private: @@ -82,6 +83,7 @@ public: void perform(); V& getOutput(); }; + } // machine_learning } // mediavision diff --git a/mv_machine_learning/face_recognition/src/face_recognition.cpp b/mv_machine_learning/face_recognition/src/face_recognition.cpp index 11ad1a2..13b120d 100644 --- a/mv_machine_learning/face_recognition/src/face_recognition.cpp +++ b/mv_machine_learning/face_recognition/src/face_recognition.cpp @@ -36,8 +36,13 @@ using namespace std; using namespace mediavision::inference; using namespace TrainingEngineInterface::Common; -using namespace Mv::FaceRecognition::Status; -using namespace Mediavision::MachineLearning::Exception; +using namespace mediavision::machine_learning::face_recognition::status; +using namespace mediavision::machine_learning::exception; + +namespace mediavision +{ +namespace machine_learning +{ FaceRecognition::FaceRecognition() : _status(NONE), _internal(), _backbone(), _face_net_info(), _training_model(), _label_manager() @@ -614,3 +619,6 @@ int FaceRecognition::GetLabel(const char **out_label) return _result; } + +} // machine_learning +} // mediavision \ No newline at end of file diff --git a/mv_machine_learning/face_recognition/src/face_recognition_adapter.cpp b/mv_machine_learning/face_recognition/src/face_recognition_adapter.cpp index de0abb9..da8aeec 100644 --- a/mv_machine_learning/face_recognition/src/face_recognition_adapter.cpp +++ b/mv_machine_learning/face_recognition/src/face_recognition_adapter.cpp @@ -21,12 +21,14 @@ using namespace std; using namespace MediaVision::Common; -using namespace Mediavision::MachineLearning::Exception; +using namespace mediavision::machine_learning::exception; +using namespace mediavision::machine_learning::face_recognition; namespace mediavision { namespace machine_learning { + template FaceRecognitionAdapter::FaceRecognitionAdapter() { @@ -101,7 +103,7 @@ void FaceRecognitionAdapter::setInput(T& t) template void FaceRecognitionAdapter::perform() { - if (_source.mode == Mv::FaceRecognition::Mode::REGISTER) { + if (_source.mode == mode::REGISTER) { for (auto& s : _source.register_src) { int ret = _face_recognition->RegisterNewFace(s.first, s.second); if (ret != MEDIA_VISION_ERROR_NONE) @@ -111,7 +113,7 @@ void FaceRecognitionAdapter::perform() return; } - if (_source.mode == Mv::FaceRecognition::Mode::INFERENCE) { + if (_source.mode == mode::INFERENCE) { int ret = _face_recognition->RecognizeFace(_source.inference_src); if (ret == MEDIA_VISION_ERROR_NO_DATA) throw NoData("Label not found."); @@ -119,7 +121,7 @@ void FaceRecognitionAdapter::perform() return; } - if (_source.mode == Mv::FaceRecognition::Mode::DELETE) { + if (_source.mode == mode::DELETE) { for (auto& l : _source.labels) { int ret = _face_recognition->DeleteLabel(l); if (ret != MEDIA_VISION_ERROR_NONE) diff --git a/mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp b/mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp index d34b699..3dbbb67 100644 --- a/mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp +++ b/mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp @@ -26,8 +26,8 @@ using namespace std; using namespace mediavision::common; using namespace mediavision::machine_learning; -using namespace Mv::FaceRecognition; -using namespace Mediavision::MachineLearning::Exception; +using namespace mediavision::machine_learning::face_recognition; +using namespace mediavision::machine_learning::exception; int mv_face_recognition_create_open(mv_face_recognition_h *handle) { @@ -121,7 +121,7 @@ int mv_face_recognition_register_open(mv_face_recognition_h handle, mv_source_h Context *context = static_cast(handle); auto itask = static_cast *>(context->__itasks["face_recognition"]); - mv_face_recognition_input_s input = { Mode::REGISTER }; + mv_face_recognition_input_s input = { mode::REGISTER }; input.register_src.clear(); input.register_src.insert(make_pair(source, string(label))); @@ -150,7 +150,7 @@ int mv_face_recognition_unregister_open(mv_face_recognition_h handle, const char Context *context = static_cast(handle); auto itask = static_cast *>(context->__itasks["face_recognition"]); - mv_face_recognition_input_s input = { Mode::DELETE }; + mv_face_recognition_input_s input = { mode::DELETE }; input.labels.clear(); input.labels.push_back(string(label)); @@ -179,7 +179,7 @@ int mv_face_recognition_inference_open(mv_face_recognition_h handle, mv_source_h Context *context = static_cast(handle); auto itask = static_cast *>(context->__itasks["face_recognition"]); - mv_face_recognition_input_s input = { Mode::INFERENCE }; + mv_face_recognition_input_s input = { mode::INFERENCE }; input.inference_src = source; itask->setInput(input); diff --git a/mv_machine_learning/face_recognition/src/nntrainer_dsm.cpp b/mv_machine_learning/face_recognition/src/nntrainer_dsm.cpp index 1a95cdf..2eedaa4 100644 --- a/mv_machine_learning/face_recognition/src/nntrainer_dsm.cpp +++ b/mv_machine_learning/face_recognition/src/nntrainer_dsm.cpp @@ -22,7 +22,7 @@ #include "nntrainer_dsm.h" using namespace std; -using namespace Mediavision::MachineLearning::Exception; +using namespace mediavision::machine_learning::exception; void NNTrainerDSM::PrintHeader(FeaVecHeader& fvh) { diff --git a/mv_machine_learning/face_recognition/src/nntrainer_fvm.cpp b/mv_machine_learning/face_recognition/src/nntrainer_fvm.cpp index e9ffb6b..0f20e9a 100644 --- a/mv_machine_learning/face_recognition/src/nntrainer_fvm.cpp +++ b/mv_machine_learning/face_recognition/src/nntrainer_fvm.cpp @@ -20,7 +20,7 @@ #include "nntrainer_fvm.h" using namespace std; -using namespace Mediavision::MachineLearning::Exception; +using namespace mediavision::machine_learning::exception; NNTrainerFVM::NNTrainerFVM(const string feature_vector_file) : FeatureVectorManager(feature_vector_file) diff --git a/mv_machine_learning/face_recognition/src/simple_shot.cpp b/mv_machine_learning/face_recognition/src/simple_shot.cpp index 6dd97f2..25a51ad 100644 --- a/mv_machine_learning/face_recognition/src/simple_shot.cpp +++ b/mv_machine_learning/face_recognition/src/simple_shot.cpp @@ -30,7 +30,7 @@ using namespace std; using namespace TrainingEngineInterface::Common; -using namespace Mediavision::MachineLearning::Exception; +using namespace mediavision::machine_learning::exception; SimpleShot::SimpleShot(const mv_inference_backend_type_e backend_type, const mv_inference_target_device_e target_type, diff --git a/mv_machine_learning/training/src/data_augment_rotate.cpp b/mv_machine_learning/training/src/data_augment_rotate.cpp index 5ccb588..4b20623 100644 --- a/mv_machine_learning/training/src/data_augment_rotate.cpp +++ b/mv_machine_learning/training/src/data_augment_rotate.cpp @@ -18,7 +18,7 @@ #include "data_augment_rotate.h" using namespace std; -using namespace Mediavision::MachineLearning::Exception; +using namespace mediavision::machine_learning::exception; DataAugmentRotate::DataAugmentRotate(unsigned int degree) : _degree(degree) { diff --git a/mv_machine_learning/training/src/feature_vector_manager.cpp b/mv_machine_learning/training/src/feature_vector_manager.cpp index 6e83e80..62d90a3 100644 --- a/mv_machine_learning/training/src/feature_vector_manager.cpp +++ b/mv_machine_learning/training/src/feature_vector_manager.cpp @@ -21,7 +21,7 @@ #include "feature_vector_manager.h" using namespace std; -using namespace Mediavision::MachineLearning::Exception; +using namespace mediavision::machine_learning::exception; FeatureVectorManager::FeatureVectorManager(const string feature_vector_file) : _feature_vector_file(feature_vector_file) diff --git a/mv_machine_learning/training/src/label_manager.cpp b/mv_machine_learning/training/src/label_manager.cpp index b72b3d6..2f65c27 100644 --- a/mv_machine_learning/training/src/label_manager.cpp +++ b/mv_machine_learning/training/src/label_manager.cpp @@ -21,7 +21,7 @@ #include "label_manager.h" using namespace std; -using namespace Mediavision::MachineLearning::Exception; +using namespace mediavision::machine_learning::exception; LabelManager::LabelManager(string label_file, double decision_threshold) : _labels_and_files(), _label_file(label_file) { diff --git a/mv_machine_learning/training/src/training_model.cpp b/mv_machine_learning/training/src/training_model.cpp index fe9183f..6737791 100644 --- a/mv_machine_learning/training/src/training_model.cpp +++ b/mv_machine_learning/training/src/training_model.cpp @@ -31,7 +31,7 @@ using namespace std; using namespace TrainingEngineInterface::Common; -using namespace Mediavision::MachineLearning::Exception; +using namespace mediavision::machine_learning::exception; TrainingModel::TrainingModel(const mv_inference_backend_type_e backend_type, const mv_inference_target_device_e target_type, -- 2.7.4 From 7436807ee614768f0e902336ade42db4a59eb452 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Wed, 20 Jul 2022 14:20:47 +0900 Subject: [PATCH 10/16] test/machine_learning: fix build error [Issue type] bug fix Fixed a build error by dropping duplicated declarations. Change-Id: I594dc6e508c895a531f5944a597bbc6cb7972eee Signed-off-by: Inki Dae --- .../inference/inference_test_suite.c | 29 +--------------------- 1 file changed, 1 insertion(+), 28 deletions(-) diff --git a/test/testsuites/machine_learning/inference/inference_test_suite.c b/test/testsuites/machine_learning/inference/inference_test_suite.c index f5dc3b7..135081b 100644 --- a/test/testsuites/machine_learning/inference/inference_test_suite.c +++ b/test/testsuites/machine_learning/inference/inference_test_suite.c @@ -553,33 +553,6 @@ clean_mv_inference: return err; } -int engine_config_hosted_tflite_cpu(mv_engine_config_h handle, - const char *tf_weight, - const char *meta_file) -{ - RET_IF_FAIL(mv_engine_config_set_string_attribute( - handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, tf_weight)); - RET_IF_FAIL(mv_engine_config_set_int_attribute( - handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_TFLITE)); - RET_IF_FAIL(mv_engine_config_set_int_attribute( - handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU)); - if (meta_file != NULL) - RET_IF_FAIL(mv_engine_config_set_string_attribute( - handle, MV_INFERENCE_MODEL_META_FILE_PATH, meta_file)); - return MEDIA_VISION_ERROR_NONE; -} - -int engine_config_user_hosted_tflite_cpu(mv_engine_config_h handle, - const char *tf_weight, - const char *user_file, - const char *meta_file) -{ - RET_IF_FAIL(engine_config_hosted_tflite_cpu(handle, tf_weight, meta_file)); - RET_IF_FAIL(mv_engine_config_set_string_attribute( - handle, MV_INFERENCE_MODEL_USER_FILE_PATH, user_file)); - return MEDIA_VISION_ERROR_NONE; -} - int perform_configure_set_model_config_path(mv_engine_config_h engine_cfg) { int err = MEDIA_VISION_ERROR_NONE; @@ -1827,4 +1800,4 @@ int main() if (err != MEDIA_VISION_ERROR_NONE) printf("Fail to perform task. ERROR[0x%x]\n", err); return err; -} \ No newline at end of file +} -- 2.7.4 From d4e89d0f3ae0d4b1b7bf181628eee83aae717e4e Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Wed, 20 Jul 2022 13:51:19 +0900 Subject: [PATCH 11/16] mv_machine_learning: cleanup mv_face_recognition_open.cpp [Issue type] code cleanup Cleaned up mv_face_recognition_open.cpp file by doing, - rename itask -> task. - use forward declarationn with "using" keyword. Change-Id: I9a80f014efc83b6161cc09b561c1ca4e5183e453 Signed-off-by: Inki Dae --- mv_machine_learning/common/include/context.h | 2 +- .../src/mv_face_recognition_open.cpp | 43 +++++++++++----------- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/mv_machine_learning/common/include/context.h b/mv_machine_learning/common/include/context.h index 518fbe4..77932ec 100644 --- a/mv_machine_learning/common/include/context.h +++ b/mv_machine_learning/common/include/context.h @@ -29,7 +29,7 @@ public: Context() { } ~Context() { } - std::map __itasks; + std::map __tasks; }; } // namespace } // namespace diff --git a/mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp b/mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp index 3dbbb67..598aaae 100644 --- a/mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp +++ b/mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp @@ -28,6 +28,7 @@ using namespace mediavision::common; using namespace mediavision::machine_learning; using namespace mediavision::machine_learning::face_recognition; using namespace mediavision::machine_learning::exception; +using FaceRecognitionTask = ITask; int mv_face_recognition_create_open(mv_face_recognition_h *handle) { @@ -42,17 +43,17 @@ int mv_face_recognition_create_open(mv_face_recognition_h *handle) return MEDIA_VISION_ERROR_OUT_OF_MEMORY; } - ITask *itask = + FaceRecognitionTask *task = new (nothrow)FaceRecognitionAdapter(); - if (!itask) { + if (!task) { delete context; - LOGE("Fail to allocate a itask."); + LOGE("Fail to allocate a task."); return MEDIA_VISION_ERROR_OUT_OF_MEMORY; } pair::iterator, bool> result; - result = context->__itasks.insert(pair("face_recognition", itask)); + result = context->__tasks.insert(pair("face_recognition", task)); if (!result.second) { delete context; LOGE("Fail to register a new task. Same task already exists."); @@ -76,9 +77,9 @@ int mv_face_recognition_destroy_open(mv_face_recognition_h handle) Context *context = static_cast(handle); map::iterator iter; - for (iter = context->__itasks.begin(); iter != context->__itasks.end(); ++iter) { - auto itask = static_cast *>(iter->second); - delete itask; + for (iter = context->__tasks.begin(); iter != context->__tasks.end(); ++iter) { + auto task = static_cast(iter->second); + delete task; } delete context; @@ -98,10 +99,10 @@ int mv_face_recognition_prepare_open(mv_face_recognition_h handle) } Context *context = static_cast(handle); - auto itask = static_cast *>(context->__itasks["face_recognition"]); + auto task = static_cast(context->__tasks["face_recognition"]); - itask->configure(); - itask->prepare(); + task->configure(); + task->prepare(); LOGD("LEAVE"); @@ -119,14 +120,14 @@ int mv_face_recognition_register_open(mv_face_recognition_h handle, mv_source_h try { Context *context = static_cast(handle); - auto itask = static_cast *>(context->__itasks["face_recognition"]); + auto task = static_cast(context->__tasks["face_recognition"]); mv_face_recognition_input_s input = { mode::REGISTER }; input.register_src.clear(); input.register_src.insert(make_pair(source, string(label))); - itask->setInput(input); - itask->perform(); + task->setInput(input); + task->perform(); } catch (const BaseException& e) { LOGE("Fail to register new face."); return e.getError(); @@ -148,14 +149,14 @@ int mv_face_recognition_unregister_open(mv_face_recognition_h handle, const char try { Context *context = static_cast(handle); - auto itask = static_cast *>(context->__itasks["face_recognition"]); + auto task = static_cast(context->__tasks["face_recognition"]); mv_face_recognition_input_s input = { mode::DELETE }; input.labels.clear(); input.labels.push_back(string(label)); - itask->setInput(input); - itask->perform(); + task->setInput(input); + task->perform(); } catch (const BaseException& e) { LOGE("Fail to unregister a given label."); return e.getError(); @@ -177,13 +178,13 @@ int mv_face_recognition_inference_open(mv_face_recognition_h handle, mv_source_h try { Context *context = static_cast(handle); - auto itask = static_cast *>(context->__itasks["face_recognition"]); + auto task = static_cast(context->__tasks["face_recognition"]); mv_face_recognition_input_s input = { mode::INFERENCE }; input.inference_src = source; - itask->setInput(input); - itask->perform(); + task->setInput(input); + task->perform(); } catch (const BaseException& e) { LOGE("Fail to recognize a face."); return e.getError(); @@ -205,9 +206,9 @@ int mv_face_recognition_get_label_open(mv_face_recognition_h handle, const char try { Context *context = static_cast(handle); - auto itask = static_cast *>(context->__itasks["face_recognition"]); + auto task = static_cast(context->__tasks["face_recognition"]); - *out_label = itask->getOutput().label.c_str(); + *out_label = task->getOutput().label.c_str(); } catch (const BaseException& e) { LOGE("Fail to get label."); return e.getError(); -- 2.7.4 From 366eef94e767d6197eb7faf61ad43a8037186876 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Thu, 21 Jul 2022 14:26:40 +0900 Subject: [PATCH 12/16] mv_common: fix utc negative test issue [Issue type] bug fix Fixed negative test issue of legacy face tracking test by returning an error correctly. In case of invalid color space type, INVALID_PARAMETER error should be returned according to utc_mediavision_mv_face_track_n test case. Change-Id: Ia74578eb33ea1bbc88681cd3abf6fa9d44e22169 Signed-off-by: Inki Dae --- mv_common/src/CommonUtils.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mv_common/src/CommonUtils.cpp b/mv_common/src/CommonUtils.cpp index 4b21714..a90b73e 100644 --- a/mv_common/src/CommonUtils.cpp +++ b/mv_common/src/CommonUtils.cpp @@ -45,6 +45,9 @@ int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource) int conversionType; switch(colorspace) { + case MEDIA_VISION_COLORSPACE_INVALID: + LOGE("Error: mv_source has invalid colorspace."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; case MEDIA_VISION_COLORSPACE_Y800: channelsNumber = 1; conversionType = -1; /* Type of conversion from given colorspace to gray */ -- 2.7.4 From 2a2bd83e28af0fdab2b54036445d9ef098c3c1d9 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Tue, 26 Jul 2022 13:37:18 +0900 Subject: [PATCH 13/16] mv_machine_learning: add feature enabling option for face recognition [Version] : 0.23.6 [Issue type] cleanup Added feature enabling option for face recognition framework. With this patch, VD build error is fixed. Change-Id: If80d718c3147104d53a712023a7a0c7b9ab3fca6 Signed-off-by: Inki Dae --- CMakeLists.txt | 38 +++++++++++++------------ mv_machine_learning/CMakeLists.txt | 8 ++++-- packaging/capi-media-vision.spec | 23 +++++++++++---- test/testsuites/machine_learning/CMakeLists.txt | 6 +++- 4 files changed, 48 insertions(+), 27 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index bbc1735..7134d36 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -131,24 +131,26 @@ configure_file( ) install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name}-inference.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig) -set(PC_NAME ${fw_name}-training) -set(PC_LDFLAGS "-l${MV_TRAINING_LIB_NAME} -l${MV_COMMON_LIB_NAME}") -configure_file( - ${fw_name}.pc.in - ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name}-training.pc - @ONLY -) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name}-training.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig) - -set(PC_NAME ${fw_name}-face-recognition) -set(PC_LDFLAGS "-l${MV_FACE_RECOG_LIB_NAME} -l${MV_COMMON_LIB_NAME}") -configure_file( - ${fw_name}.pc.in - ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name}-face-recognition.pc - @ONLY -) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name}-face-recognition.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/mv_machine_learning/face_recognition/meta/face_recognition.json DESTINATION ${CMAKE_INSTALL_DATADIR}/${fw_name}) +if (${ENABLE_ML_FACE_RECOGNITION}) + set(PC_NAME ${fw_name}-training) + set(PC_LDFLAGS "-l${MV_TRAINING_LIB_NAME} -l${MV_COMMON_LIB_NAME}") + configure_file( + ${fw_name}.pc.in + ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name}-training.pc + @ONLY + ) + install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name}-training.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig) + + set(PC_NAME ${fw_name}-face-recognition) + set(PC_LDFLAGS "-l${MV_FACE_RECOG_LIB_NAME} -l${MV_COMMON_LIB_NAME}") + configure_file( + ${fw_name}.pc.in + ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name}-face-recognition.pc + @ONLY + ) + install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name}-face-recognition.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig) + install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/mv_machine_learning/face_recognition/meta/face_recognition.json DESTINATION ${CMAKE_INSTALL_DATADIR}/${fw_name}) +endif() set(PC_NAME ${fw_name}-tracker) set(PC_LDFLAGS "-l${MV_ROI_TRACKER_LIB_NAME} -l${MV_COMMON_LIB_NAME}") diff --git a/mv_machine_learning/CMakeLists.txt b/mv_machine_learning/CMakeLists.txt index 3e99704..a077ce1 100644 --- a/mv_machine_learning/CMakeLists.txt +++ b/mv_machine_learning/CMakeLists.txt @@ -1,3 +1,7 @@ add_subdirectory(inference) -add_subdirectory(training) -add_subdirectory(face_recognition) + +if (${ENABLE_ML_FACE_RECOGNITION}) + message("Enabled machine learning face recognition feature.") + add_subdirectory(training) + add_subdirectory(face_recognition) +endif() \ No newline at end of file diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index 5ac71bd..ce2ac76 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.23.5 +Version: 0.23.6 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-3-Clause @@ -13,7 +13,6 @@ BuildRequires: pkgconfig(opencv) BuildRequires: pkgconfig(json-glib-1.0) BuildRequires: pkgconfig(iniparser) BuildRequires: pkgconfig(inference-engine-interface-common) -BuildRequires: pkgconfig(training-engine-interface-common) %if !0%{?ml_only:1} BuildRequires: pkgconfig(glib-2.0) BuildRequires: pkgconfig(zbar) @@ -35,7 +34,13 @@ BuildRequires: gtest-devel # Build options # ENABLE_INFERENCE_PROFILER # 0 : disable Mediavision inference engine profiler, 1 : enable Mediavision inference engine profiler. -%define build_options -DENABLE_INFERENCE_PROFILER=0 +%if "%{tizen_profile_name}" == "tv" +%define enable_ml_face_recognition 0 +%else +%define enable_ml_face_recognition 1 +BuildRequires: pkgconfig(training-engine-interface-common) +%endif +%define build_options -DENABLE_INFERENCE_PROFILER=0 -DENABLE_ML_FACE_RECOGNITION=%{enable_ml_face_recognition} Requires: %{name}-machine_learning %if !0%{?ml_only:1} @@ -317,16 +322,20 @@ find . -name '*.gcno' -exec cp --parents '{}' "$gcno_obj_dir" ';' %files machine_learning %manifest %{name}.manifest %license LICENSE.APLv2 -%{_datadir}/%{name}/face_recognition.json %{_libdir}/libmv_inference*.so +%if "%{enable_ml_face_recognition}" == "1" +%{_datadir}/%{name}/face_recognition.json %{_libdir}/libmv_training.so %{_libdir}/libmv_face_recognition.so +%endif %files machine_learning-devel %{_includedir}/media/mv_infer*.h %{_libdir}/pkgconfig/*inference.pc +%if "%{enable_ml_face_recognition}" == "1" %{_libdir}/pkgconfig/*training.pc %{_libdir}/pkgconfig/*face-recognition.pc +%endif %files roi_tracker %manifest %{name}.manifest @@ -345,8 +354,10 @@ find . -name '*.gcno' -exec cp --parents '{}' "$gcno_obj_dir" ';' %{_libdir}/libmv_*helper.so %{_libdir}/libmv_testsuite*.so %{_bindir}/mv_* -%{_bindir}/test_* -%{_bindir}/measure_* +%if "%{enable_ml_face_recognition}" == "1" +%{_bindir}/test_face_recognition +%{_bindir}/measure_face_recognition +%endif %endif %if 0%{?gcov:1} diff --git a/test/testsuites/machine_learning/CMakeLists.txt b/test/testsuites/machine_learning/CMakeLists.txt index de2c654..ebf24f4 100644 --- a/test/testsuites/machine_learning/CMakeLists.txt +++ b/test/testsuites/machine_learning/CMakeLists.txt @@ -2,4 +2,8 @@ project(machine_learning) cmake_minimum_required(VERSION 2.6...3.13) add_subdirectory(${PROJECT_SOURCE_DIR}/inference) -add_subdirectory(${PROJECT_SOURCE_DIR}/face_recognition) \ No newline at end of file + +if (${ENABLE_ML_FACE_RECOGNITION}) + message("Enabled machine learning face recognition test cases.") + add_subdirectory(${PROJECT_SOURCE_DIR}/face_recognition) +endif() -- 2.7.4 From e23276cbb37910d9565352163325fff2826e679d Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Wed, 27 Jul 2022 13:07:26 +0900 Subject: [PATCH 14/16] mv_machine_learning: fix a build error on VD server [Version] : 0.23.7 [Issue type] : bug fix Fixed a build error on VD server due to face recognition relevant library dependency. VD don't use the face recognition framework so this patch breaks the build dependency. Change-Id: Ieaf2f45afb7ebd9c80637a1c9a0dedce68ad77ec Signed-off-by: Inki Dae --- CMakeLists.txt | 10 ++++++++-- packaging/capi-media-vision.spec | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7134d36..0dbc0e0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -56,9 +56,15 @@ install( endif() set(PC_NAME ${fw_name}) -set(PC_REQUIRED "${fw_name}-barcode ${fw_name}-face ${fw_name}-image ${fw_name}-surveillance ${fw_name}-inference ${fw_name}-training ${fw_name}-face-recognition ${fw_name}-tracker") -set(PC_LDFLAGS "-l${MV_COMMON_LIB_NAME} -l${MV_BARCODE_DETECTOR_LIB_NAME} -l${MV_BARCODE_GENERATOR_LIB_NAME} \ +if (${ENABLE_ML_FACE_RECOGNITION}) + set(PC_REQUIRED "${fw_name}-barcode ${fw_name}-face ${fw_name}-image ${fw_name}-surveillance ${fw_name}-inference ${fw_name}-training ${fw_name}-face-recognition ${fw_name}-tracker") + set(PC_LDFLAGS "-l${MV_COMMON_LIB_NAME} -l${MV_BARCODE_DETECTOR_LIB_NAME} -l${MV_BARCODE_GENERATOR_LIB_NAME} \ -l${MV_IMAGE_LIB_NAME} -l${MV_FACE_LIB_NAME} -l${MV_SURVEILLANCE_LIB_NAME} -l${MV_INFERENCE_LIB_NAME} -l${MV_TRAINING_LIB_NAME} -l${MV_FACE_RECOG_LIB_NAME} -l${MV_ROI_TRACKER_LIB_NAME}") +else() + set(PC_REQUIRED "${fw_name}-barcode ${fw_name}-face ${fw_name}-image ${fw_name}-surveillance ${fw_name}-inference ${fw_name}-tracker") + set(PC_LDFLAGS "-l${MV_COMMON_LIB_NAME} -l${MV_BARCODE_DETECTOR_LIB_NAME} -l${MV_BARCODE_GENERATOR_LIB_NAME} \ +-l${MV_IMAGE_LIB_NAME} -l${MV_FACE_LIB_NAME} -l${MV_SURVEILLANCE_LIB_NAME} -l${MV_INFERENCE_LIB_NAME} -l${MV_ROI_TRACKER_LIB_NAME}") +endif() configure_file( ${fw_name}.pc.in diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index ce2ac76..e25aa65 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.23.6 +Version: 0.23.7 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-3-Clause -- 2.7.4 From c41a938c47032334e824142452a2e685c4e9b92c Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Wed, 27 Jul 2022 17:58:17 +0900 Subject: [PATCH 15/16] mv_machine_learning: fix seg. fault [Version] : 0.23.8 [Issue type] : bug fix Fixed a bug that seg. fault happens when mv_face_recognition_destroy() is called just after mv_face_recognition_create(). _label_manager of FaceRecognition class has no instance at mv_face_recognition_create() call so check if _label_manager is nullptr or not. Change-Id: Ie7948e4f6152f1ad3237600ab6ae64bc88e0ebf9 Signed-off-by: Inki Dae --- mv_machine_learning/face_recognition/src/face_recognition.cpp | 3 ++- packaging/capi-media-vision.spec | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/mv_machine_learning/face_recognition/src/face_recognition.cpp b/mv_machine_learning/face_recognition/src/face_recognition.cpp index 13b120d..2671134 100644 --- a/mv_machine_learning/face_recognition/src/face_recognition.cpp +++ b/mv_machine_learning/face_recognition/src/face_recognition.cpp @@ -53,7 +53,8 @@ FaceRecognition::FaceRecognition() : FaceRecognition::~FaceRecognition() { - _label_manager->Clear(); + if (_label_manager) + _label_manager->Clear(); } void FaceRecognition::CheckFeatureVectorFile(unique_ptr& old_fvm, unique_ptr& new_fvm) diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index e25aa65..4df997c 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.23.7 +Version: 0.23.8 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-3-Clause -- 2.7.4 From 81b18fee299b982106111ffd82f7d2a21686afca Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Thu, 28 Jul 2022 16:49:19 +0900 Subject: [PATCH 16/16] mv_machine_learning: package face recognition header files [Version] : 0.23.9 [Issue type] : bug fix Included face recognition header files missed to devel package. Change-Id: I0aa05eafc79ddc24972f9ea01e30884850590078 Signed-off-by: Inki Dae --- packaging/capi-media-vision.spec | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index 4df997c..ef00f6f 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.23.8 +Version: 0.23.9 Release: 0 Group: Multimedia/Framework License: Apache-2.0 and BSD-3-Clause @@ -333,6 +333,7 @@ find . -name '*.gcno' -exec cp --parents '{}' "$gcno_obj_dir" ';' %{_includedir}/media/mv_infer*.h %{_libdir}/pkgconfig/*inference.pc %if "%{enable_ml_face_recognition}" == "1" +%{_includedir}/media/mv_face_recognition*.h %{_libdir}/pkgconfig/*training.pc %{_libdir}/pkgconfig/*face-recognition.pc %endif -- 2.7.4