Updated Image recognition and tracking functionality, and Fixed face and image save... 68/51368/2
authorTae-Young Chung <ty83.chung@samsung.com>
Mon, 9 Nov 2015 07:40:39 +0000 (16:40 +0900)
committerTae-Young Chung <ty83.chung@samsung.com>
Tue, 10 Nov 2015 06:01:44 +0000 (15:01 +0900)
Change-Id: I239f59dbda12f66a8f47c438a9a5ef49e3e2f578
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
48 files changed:
CMakeLists.txt
include/mv_image.h
media-vision-config.json
mv_face/face/src/FaceRecognitionModel.cpp
mv_face/face/src/FaceTrackingModel.cpp
mv_image/image/CMakeLists.txt
mv_image/image/include/Features/BasicExtractorFactory.h [new file with mode: 0644]
mv_image/image/include/Features/FeatureExtractor.h [new file with mode: 0644]
mv_image/image/include/Features/FeatureExtractorFactory.h [new file with mode: 0644]
mv_image/image/include/Features/FeatureMatcher.h [new file with mode: 0644]
mv_image/image/include/Features/FeaturePack.h [new file with mode: 0644]
mv_image/image/include/Features/ORBExtractorFactory.h [new file with mode: 0644]
mv_image/image/include/ImageConfig.h
mv_image/image/include/ImageMathUtil.h
mv_image/image/include/ImageTracker.h [deleted file]
mv_image/image/include/Recognition/ImageObject.h [moved from mv_image/image/include/ImageObject.h with 82% similarity]
mv_image/image/include/Recognition/ImageRecognizer.h [moved from mv_image/image/include/ImageRecognizer.h with 81% similarity]
mv_image/image/include/Tracking/AsyncTracker.h [new file with mode: 0644]
mv_image/image/include/Tracking/CascadeTracker.h [new file with mode: 0644]
mv_image/image/include/Tracking/FeatureSubstitutionTracker.h [new file with mode: 0644]
mv_image/image/include/Tracking/ImageContourStabilizator.h [moved from mv_image/image/include/ImageContourStabilizator.h with 68% similarity]
mv_image/image/include/Tracking/ImageTrackingModel.h [moved from mv_image/image/include/ImageTrackingModel.h with 60% similarity]
mv_image/image/include/Tracking/MFTracker.h [new file with mode: 0644]
mv_image/image/include/Tracking/ObjectTracker.h [new file with mode: 0644]
mv_image/image/include/Tracking/RecognitionBasedTracker.h [new file with mode: 0644]
mv_image/image/src/Features/BasicExtractorFactory.cpp [new file with mode: 0644]
mv_image/image/src/Features/FeatureExtractor.cpp [new file with mode: 0644]
mv_image/image/src/Features/FeatureExtractorFactory.cpp [new file with mode: 0644]
mv_image/image/src/Features/FeatureMatcher.cpp [new file with mode: 0644]
mv_image/image/src/Features/FeaturePack.cpp [new file with mode: 0644]
mv_image/image/src/Features/ORBExtractorFactory.cpp [new file with mode: 0644]
mv_image/image/src/ImageConfig.cpp
mv_image/image/src/ImageMathUtil.cpp
mv_image/image/src/ImageObject.cpp [deleted file]
mv_image/image/src/ImageTracker.cpp [deleted file]
mv_image/image/src/ImageTrackingModel.cpp [deleted file]
mv_image/image/src/Recognition/ImageObject.cpp [new file with mode: 0644]
mv_image/image/src/Recognition/ImageRecognizer.cpp [moved from mv_image/image/src/ImageRecognizer.cpp with 69% similarity]
mv_image/image/src/Tracking/AsyncTracker.cpp [new file with mode: 0644]
mv_image/image/src/Tracking/CascadeTracker.cpp [new file with mode: 0644]
mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp [new file with mode: 0644]
mv_image/image/src/Tracking/ImageContourStabilizator.cpp [moved from mv_image/image/src/ImageContourStabilizator.cpp with 61% similarity]
mv_image/image/src/Tracking/ImageTrackingModel.cpp [new file with mode: 0644]
mv_image/image/src/Tracking/MFTracker.cpp [new file with mode: 0644]
mv_image/image/src/Tracking/ObjectTracker.cpp [new file with mode: 0644]
mv_image/image/src/Tracking/RecognitionBasedTracker.cpp [new file with mode: 0644]
mv_image/image/src/mv_image_open.cpp
packaging/capi-media-vision.spec

index ebd2df1..8452d0a 100644 (file)
@@ -104,12 +104,12 @@ ADD_SUBDIRECTORY(mv_face)
 aux_source_directory(src SOURCES)
 ADD_LIBRARY(${fw_name} SHARED ${SOURCES})
 
-TARGET_LINK_LIBRARIES(${fw_name} ${${fw_name}_LDFLAGS}
-                                 ${MV_COMMON_LIB_NAME}
+TARGET_LINK_LIBRARIES(${fw_name} ${MV_COMMON_LIB_NAME}
                                  ${MV_BARCODE_DETECTOR_LIB_NAME}
                                  ${MV_BARCODE_GENERATOR_LIB_NAME}
                                  ${MV_IMAGE_LIB_NAME}
-                                 ${MV_FACE_LIB_NAME})
+                                 ${MV_FACE_LIB_NAME}
+                                 ${${fw_name}_LDFLAGS})
 
 SET_TARGET_PROPERTIES(${fw_name}
      PROPERTIES
index 5fa9cf6..5efb9b6 100644 (file)
@@ -150,7 +150,7 @@ extern "C" {
 /**
  * @brief Defines MV_IMAGE_TRACKING_EXPECTED_OFFSET to set the expected tracking
  *        offset attribute of the engine configuration.
- * @detials Relative offset value, for which the object offset is
+ * @details Relative offset value, for which the object offset is
  *          expected (relative to the object size in the current frame).
  *          Value is a double and the defalut is 0
  *
@@ -162,7 +162,7 @@ extern "C" {
 
 /**
  * @brief Defines MV_IMAGE_TRACKING_USE_STABLIZATION to enable the contour
- *        stabilization during tracking process.
+ *        stabilization during tracking process. Default value is true.
  *
  * @since_tizen 3.0
  * @see mv_engine_config_set_bool_attribute()
@@ -172,11 +172,14 @@ extern "C" {
 
 /**
  * @brief Defines MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT to set the
- *        tolerant shift for the tracking stabilization attribute of the engine
- *        configuration.
- * @details Relative value of maximum shift per one frame which will be ignored by
- *             stabilization (relative to the object size in the current frame).
- *            Value is a double and the defalut is 0.006
+ *        relative tolerant shift for the tracking stabilization attribute of
+ *        the engine configuration.
+ * @details It is component of tolerant shift which will be ignored by
+ *          stabilization process. (this value is relative to the object size in
+ *          the current frame). Tolerant shift will be computed like R * S + C,
+ *          where R - value set to MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT,
+ *          S - area of object location on frame, C - constant value equal 1.3.
+ *          Value is a double
  *
  * @since_tizen 3.0
  * @see mv_engine_config_set_double_attribute()
@@ -189,7 +192,7 @@ extern "C" {
  *        speed of the tracking stabilization attribute of the engine
  *        configuration.
  * @details Start speed will be used for image stabilization. Value is a double
- *          and the defalut is 2
+ *          and the defalut is 0.3
  * @since_tizen 3.0
  * @see mv_engine_config_set_double_attribute()
  * @see mv_engine_config_get_double_attribute()
@@ -202,7 +205,7 @@ extern "C" {
  *        configuration.
  * @details Acceleration will be used for image stabilization (relative to
  *          the distance from current location to stabilized location).
- *          Value is double from 0 to 1 and the defalut is 0.001
+ *          Value is double from 0 to 1 and the defalut is 0.1
  *
  * @since_tizen 3.0
  * @see mv_engine_config_set_double_attribute()
@@ -222,7 +225,7 @@ extern "C" {
  * @since_tizen 3.0
  * @remarks Values @a source, @a engine_cfg, @a image_objects, and @a number_of_objects
  *          are the same as values of input parameters of @ref mv_image_recognize().
- * @remarks @locations are valid only inside callback.
+ * @remarks @locations are valid only inside callback.
  * @param [in] source              The handle to the source image on which the
  *                                 recognition was carried out
  * @param [in] engine_cfg          The handle to the configuration of engine
@@ -326,7 +329,7 @@ int mv_image_recognize(
  *          but @a location will be NULL.
  * @remarks Handles @a image_tracking_model, @a source and @a engine_cfg the
  *          same as input parameters of @ref mv_image_track().
- * @remarks @location pointer is valid only inside callback
+ * @remarks @location pointer is valid only inside callback
  * @param [in] source                 The handle to the source image on which
  *                                    the tracking was carried out
  * @param [in] image_tracking_model   The handle to the image tracking model
index 9185a1a..2f0b46c 100644 (file)
         {
             "name"  : "MV_IMAGE_TRACKING_USE_STABLIZATION",
             "type"  : "boolean",
-            "value" : false
+            "value" : true
         },
         {
             "name"  : "MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT",
             "type"  : "double",
-            "value" : 0.006
+            "value" : 0.00006
         },
         {
             "name"  : "MV_IMAGE_TRACKING_STABLIZATION_SPEED",
             "type"  : "double",
-            "value" : 2
+            "value" : 0.3
         },
         {
             "name"  : "MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION",
             "type"  : "double",
-            "value" : 0.001
+            "value" : 0.1
         },
         {
             "name"  : "MV_FACE_RECOGNITION_MODEL_TYPE",
index c18de17..1c0c55c 100644 (file)
 
 #include "FaceRecognitionModel.h"
 
-#include <app_common.h>
-
 #include "mv_private.h"
 #include "mv_common.h"
 
+#include <app_common.h>
+
 #include <map>
 
 #include <stdio.h>
 namespace MediaVision {
 namespace Face {
 namespace {
+
+unsigned int DefaultUnisizeWidth = 200;
+unsigned int DefaultUnisizeHeight = 200;
+
 int CopyOpenCVAlgorithmParameters(const cv::Ptr<cv::FaceRecognizer>& srcAlg,
                cv::Ptr<cv::FaceRecognizer>& dstAlg)
 {
@@ -116,8 +120,8 @@ FaceRecognitionModelConfig::FaceRecognitionModelConfig() :
                mNeighbors(8),
                mGridX(8),
                mGridY(8),
-               mImgWidth(150),
-               mImgHeight(150)
+               mImgWidth(DefaultUnisizeWidth),
+               mImgHeight(DefaultUnisizeHeight)
 {
        ; /* NULL */
 }
@@ -189,18 +193,20 @@ FaceRecognitionModel::~FaceRecognitionModel()
 int FaceRecognitionModel::save(const std::string& fileName)
 {
        if (!m_recognizer.empty()) {
-               /* find directory */
-               std::string prefix_path = std::string(app_get_data_path());
-               LOGD("prefix_path: %s", prefix_path.c_str());
 
                std::string filePath;
-               filePath += prefix_path;
-               filePath += fileName;
+               char *cPath = app_get_data_path();
+               if (NULL == cPath)
+                       filePath = fileName;
+               else
+                       filePath = std::string(cPath) + fileName;
+
+               std::string prefixPath = filePath.substr(0, filePath.find_last_of('/'));
+               LOGD("prefixPath: %s", prefixPath.c_str());
 
                /* check the directory is available */
-               std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
-               if (access(prefix_path_check.c_str(), F_OK)) {
-                       LOGE("Can't save recognition model. Path[%s] doesn't existed.", prefix_path_check.c_str());
+               if (access(prefixPath.c_str(), F_OK)) {
+                       LOGE("Can't save recognition model. Path[%s] doesn't existed.", prefixPath.c_str());
 
                        return MEDIA_VISION_ERROR_INVALID_PATH;
                }
@@ -214,9 +220,13 @@ int FaceRecognitionModel::save(const std::string& fileName)
                switch (m_learnAlgorithmConfig.mModelType) {
                case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES:
                        storage << "algorithm" << "Eigenfaces";
+                       storage << "resizeW" << m_learnAlgorithmConfig.mImgWidth;
+                       storage << "resizeH" << m_learnAlgorithmConfig.mImgHeight;
                        break;
                case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES:
                        storage << "algorithm" << "Fisherfaces";
+                       storage << "resizeW" << m_learnAlgorithmConfig.mImgWidth;
+                       storage << "resizeH" << m_learnAlgorithmConfig.mImgHeight;
                        break;
                case MEDIA_VISION_FACE_MODEL_TYPE_LBPH:
                        storage << "algorithm" << "LBPH";
@@ -240,16 +250,15 @@ int FaceRecognitionModel::save(const std::string& fileName)
 
 int FaceRecognitionModel::load(const std::string& fileName)
 {
-       /* find directory */
-       std::string prefix_path = std::string(app_get_data_path());
-       LOGD("prefix_path: %s", prefix_path.c_str());
-
        std::string filePath;
-       filePath += prefix_path;
-       filePath += fileName;
+       char *cPath = app_get_data_path();
+       if (NULL == cPath)
+               filePath = fileName;
+       else
+               filePath = std::string(cPath) + fileName;
 
        if (access(filePath.c_str(), F_OK)) {
-               LOGE("Can't load face recognition model. File[%s] doesn't exist.", filePath.c_str());
+               LOGE("Can't load face recognition model. File[%s] doesn't existed.", filePath.c_str());
 
                return MEDIA_VISION_ERROR_INVALID_PATH;
        }
@@ -274,6 +283,8 @@ int FaceRecognitionModel::load(const std::string& fileName)
 
        if (algName == "Eigenfaces") {
                tempRecognizer = cv::createEigenFaceRecognizer();
+               storage["resizeW"] >> tempConfig.mImgWidth;
+               storage["resizeH"] >> tempConfig.mImgHeight;
                tempRecognizer->load(storage);
                tempConfig.mModelType =
                                MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES;
@@ -282,6 +293,8 @@ int FaceRecognitionModel::load(const std::string& fileName)
                ParseOpenCVLabels(tempRecognizer, tempLearnedLabels);
        } else if (algName == "Fisherfaces") {
                tempRecognizer = cv::createFisherFaceRecognizer();
+               storage["resizeW"] >> tempConfig.mImgWidth;
+               storage["resizeH"] >> tempConfig.mImgHeight;
                tempRecognizer->load(storage);
                tempConfig.mModelType =
                                MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES;
@@ -404,7 +417,7 @@ int FaceRecognitionModel::learn(const FaceRecognitionModelConfig& config)
                                cv::resize(it->second[sampleInd],
                                resizedSample,
                                cv::Size(config.mImgWidth, config.mImgHeight),
-                               1.0, 1.0, cv::INTER_CUBIC);
+                               0.0, 0.0, cv::INTER_CUBIC);
                                samples.push_back(resizedSample);
                        }
                }
@@ -451,11 +464,32 @@ int FaceRecognitionModel::recognize(const cv::Mat& image, FaceRecognitionResults
 {
        if (!m_recognizer.empty() && m_canRecognize) {
                double absConf = 0.0;
-               m_recognizer->predict(image, results.mFaceLabel, absConf);
-               /* Normalize the absolute value of the confidence */
-               absConf = exp(7.5 - (0.05 * absConf));
-               results.mConfidence = absConf / (1 + absConf);
-               results.mIsRecognized = true;
+               if ((MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES == m_learnAlgorithmConfig.mModelType ||
+                        MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == m_learnAlgorithmConfig.mModelType) &&
+                       (image.cols != m_learnAlgorithmConfig.mImgWidth ||
+                        image.rows != m_learnAlgorithmConfig.mImgHeight)) {
+                       cv::Mat predictionImg(
+                                               m_learnAlgorithmConfig.mImgWidth,
+                                               m_learnAlgorithmConfig.mImgHeight,
+                                               CV_8UC1);
+                       cv::resize(image, predictionImg, predictionImg.size());
+                       m_recognizer->predict(predictionImg, results.mFaceLabel, absConf);
+
+                       if (-1 != results.mFaceLabel) {
+                               results.mConfidence = 1.0;
+                               results.mIsRecognized = true;
+                       } else {
+                               results.mConfidence = 0.0;
+                               results.mIsRecognized = false;
+                       }
+               } else {
+                       m_recognizer->predict(image, results.mFaceLabel, absConf);
+                       /* Normalize the absolute value of the confidence */
+                       absConf = exp(7.5 - (0.05 * absConf));
+                       results.mConfidence = absConf / (1 + absConf);
+                       results.mIsRecognized = true;
+               }
+
                results.mFaceLocation = cv::Rect(0, 0, image.cols, image.rows);
        } else {
                LOGE("Attempt to recognize faces with untrained model");
index 25fdcb8..5feeb2a 100644 (file)
 
 #include "FaceTrackingModel.h"
 
-#include <app_common.h>
-
 #include "mv_private.h"
 #include "mv_common.h"
 
+#include <app_common.h>
+
 #include <unistd.h>
 
 namespace MediaVision {
@@ -73,17 +73,19 @@ int FaceTrackingModel::save(const std::string& fileName)
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       std::string prefix_path = std::string(app_get_data_path());
-       LOGD("prefix_path: %s", prefix_path.c_str());
-
        std::string filePath;
-       filePath += prefix_path;
-filePath += fileName;
+       char *cPath = app_get_data_path();
+       if (NULL == cPath)
+               filePath = fileName;
+       else
+               filePath = std::string(cPath) + fileName;
+
+       std::string prefixPath = filePath.substr(0, filePath.find_last_of('/'));
+       LOGD("prefixPath: %s", prefixPath.c_str());
 
        /* check the directory is available */
-       std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
-       if (access(prefix_path_check.c_str(), F_OK)) {
-               LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefix_path_check.c_str());
+       if (access(prefixPath.c_str(), F_OK)) {
+               LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefixPath.c_str());
 
                return MEDIA_VISION_ERROR_INVALID_PATH;
        }
@@ -108,16 +110,15 @@ filePath += fileName;
 
 int FaceTrackingModel::load(const std::string& fileName)
 {
-       /* find directory */
-       std::string prefix_path = std::string(app_get_data_path());
-       LOGD("prefix_path: %s", prefix_path.c_str());
-
        std::string filePath;
-       filePath += prefix_path;
-       filePath += fileName;
+       char *cPath = app_get_data_path();
+       if (NULL == cPath)
+               filePath = fileName;
+       else
+               filePath = std::string(cPath) + fileName;
 
        if (access(filePath.c_str(), F_OK)) {
-               LOGE("Can't load face tracking model. File[%s] doesn't exist.", filePath.c_str());
+               LOGE("Can't load face tracking model. File[%s] doesn't existed.", filePath.c_str());
 
                return MEDIA_VISION_ERROR_INVALID_PATH;
        }
index 0269712..739d5f2 100644 (file)
@@ -11,8 +11,8 @@ include_directories("${INC_DIR}")
 include_directories("${PROJECT_SOURCE_DIR}/include")
 include_directories("${PROJECT_SOURCE_DIR}/src")
 
-file(GLOB MV_IMAGE_INC_LIST "${PROJECT_SOURCE_DIR}/include/*.h")
-file(GLOB MV_IMAGE_SRC_LIST "${PROJECT_SOURCE_DIR}/src/*.cpp")
+file(GLOB_RECURSE MV_IMAGE_INC_LIST "${PROJECT_SOURCE_DIR}/include/*.h")
+file(GLOB_RECURSE MV_IMAGE_SRC_LIST "${PROJECT_SOURCE_DIR}/src/*.cpp")
 
 find_package(OpenCV REQUIRED core highgui imgproc objdetect features2d calib3d)
 if(NOT OpenCV_FOUND)
diff --git a/mv_image/image/include/Features/BasicExtractorFactory.h b/mv_image/image/include/Features/BasicExtractorFactory.h
new file mode 100644 (file)
index 0000000..bbfc824
--- /dev/null
@@ -0,0 +1,40 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IMAGEFEATURES_BASICEXTRACTORFACTORY_H__
+#define __IMAGEFEATURES_BASICEXTRACTORFACTORY_H__
+
+#include "Features/FeatureExtractorFactory.h"
+
+namespace MediaVision {
+namespace Image {
+
+class BasicExtractorFactory : public FeatureExtractorFactory {
+public:
+       BasicExtractorFactory(KeypointType keypointsType, DescriptorType descType);
+
+       virtual cv::Ptr<FeatureExtractor> buildFeatureExtractor();
+
+private:
+       KeypointType m_kpType;
+
+       DescriptorType m_descType;
+};
+
+} /* Image */
+} /* MediaVision */
+
+#endif /* __IMAGEFEATURES_BASICEXTRACTORFACTORY_H__ */
diff --git a/mv_image/image/include/Features/FeatureExtractor.h b/mv_image/image/include/Features/FeatureExtractor.h
new file mode 100644 (file)
index 0000000..ae55503
--- /dev/null
@@ -0,0 +1,79 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IMAGEFEATURES_FEATUREEXTRACTOR_H__
+#define __IMAGEFEATURES_FEATUREEXTRACTOR_H__
+
+#include "ImageConfig.h"
+
+#include "Features/FeaturePack.h"
+
+namespace cv {
+class FeatureDetector;
+class DescriptorExtractor;
+}
+
+namespace MediaVision {
+namespace Image {
+/**
+ * @class FeatureExtractor
+ * @brief Class contains functionality to extract features from an image
+ *
+ * @since_tizen 3.0
+ */
+class FeatureExtractor {
+public:
+       FeatureExtractor();
+
+       void setFeatureDetector(
+                       const cv::Ptr<cv::FeatureDetector> detector,
+                       KeypointType keypointType);
+
+       void setDescriptorExtractor(
+                       cv::Ptr<cv::DescriptorExtractor> extractor,
+                       DescriptorType descriptorType);
+
+       void setRecognitionRateMetric(
+                       float (*computeRecognitionRate)(
+                                       const cv::Mat&,
+                                       const std::vector<cv::KeyPoint>&));
+
+       bool extract(
+                       const cv::Mat& image,
+                       FeaturePack& result,
+                       const std::vector<cv::Point2f>& roi = std::vector<cv::Point2f>());
+
+private:
+       static const cv::Size MinSize;
+
+private:
+       KeypointType m_kpType;
+
+       cv::Ptr<cv::FeatureDetector> m_detector;
+
+       DescriptorType m_descType;
+
+       cv::Ptr<cv::DescriptorExtractor> m_extractor;
+
+       float (*m_computeRecognitionRate)(
+                       const cv::Mat&,
+                       const std::vector<cv::KeyPoint>&);
+};
+
+} /* Image */
+} /* MediaVision */
+
+#endif /* __IMAGEFEATURES_FEATUREEXTRACTOR_H__ */
diff --git a/mv_image/image/include/Features/FeatureExtractorFactory.h b/mv_image/image/include/Features/FeatureExtractorFactory.h
new file mode 100644 (file)
index 0000000..d421478
--- /dev/null
@@ -0,0 +1,37 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IMAGEFEATURES_FEATUREEXTRACTORFACTORY_H__
+#define __IMAGEFEATURES_FEATUREEXTRACTORFACTORY_H__
+
+#include "Features/FeatureExtractor.h"
+
+#include <opencv2/core/core.hpp>
+
+namespace MediaVision {
+namespace Image {
+
+class FeatureExtractorFactory {
+public:
+       virtual ~FeatureExtractorFactory();
+
+       virtual cv::Ptr<FeatureExtractor> buildFeatureExtractor() = 0;
+};
+
+} /* Image */
+} /* MediaVision */
+
+#endif /* __IMAGEFEATURES_FEATUREEXTRACTORFACTORY_H__ */
diff --git a/mv_image/image/include/Features/FeatureMatcher.h b/mv_image/image/include/Features/FeatureMatcher.h
new file mode 100644 (file)
index 0000000..f3c2463
--- /dev/null
@@ -0,0 +1,73 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IMAGEFEATURES_FEATUREMATCHER_H__
+#define __IMAGEFEATURES_FEATUREMATCHER_H__
+
+#include "Features/FeaturePack.h"
+
+#include <opencv2/features2d/features2d.hpp>
+
+namespace MediaVision {
+namespace Image {
+
+class FeatureMatcher {
+public:
+       enum MatchError{
+               InvalidFeaturePackFrom,
+               InvalidFeaturePackTo,
+               DisparateTypes,
+               MatchesNotFound,
+               Success
+       };
+
+public:
+       FeatureMatcher(
+                       float affectingPart = 1.f,
+                       float tolerantError = 0.f,
+                       size_t minimumMatchesNumber = 0u);
+
+       MatchError match(
+                       const FeaturePack& from,
+                       const FeaturePack& to,
+                       cv::Mat& homophraphyMatrix) const;
+
+       float getAffectingPart() const;
+
+       void setAffectingPart(float affectingPart);
+
+       float getTolerantError() const;
+
+       void setTolerantError(float tolerantError);
+
+       size_t getMinimumMatchesNumber() const;
+
+       void setMinimumMatchesNumber(size_t minimumMatchesNumber);
+
+private:
+       cv::BFMatcher m_matcher;
+
+       float m_affectingPart;
+
+       float m_tolerantError;
+
+       size_t m_minimumMatchesNumber;
+};
+
+} /* Image */
+} /* MediaVision */
+
+#endif /* __IMAGEFEATURES_FEATUREMATCHER_H__ */
diff --git a/mv_image/image/include/Features/FeaturePack.h b/mv_image/image/include/Features/FeaturePack.h
new file mode 100644 (file)
index 0000000..a100ba6
--- /dev/null
@@ -0,0 +1,79 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IMAGEFEATURES_FEATUREPACK_H__
+#define __IMAGEFEATURES_FEATUREPACK_H__
+
+#include "ImageConfig.h"
+
+#include <vector>
+#include <opencv2/core/core.hpp>
+
+namespace cv {
+class KeyPoint;
+}
+
+namespace MediaVision {
+namespace Image {
+/**
+ * @class    FeaturePack
+ * @brief    This class contains information about features and can be used for
+ *           recognition.
+ *
+ * @since_tizen 3.0
+ */
+class FeaturePack {
+public:
+       /**
+        * @brief   @ref FeaturePack default constructor.
+        *
+        * @since_tizen 3.0
+        */
+       FeaturePack();
+
+       /**
+        * @brief   @ref FeaturePack copy constructor.
+        * @details Creates copy of @ref FeaturePack
+        *
+        * @since_tizen 3.0
+        * @param   [in] copy @ref FeaturePack which will be copied
+        */
+       FeaturePack(const FeaturePack& copy);
+
+       /**
+        * @brief   @ref FeaturePack copy assignment operator.
+        * @details Fills the information based on the @a copy
+        *
+        * @since_tizen 3.0
+        * @param   [in] copy @ref FeaturePack which will be copied
+        */
+       FeaturePack& operator= (const FeaturePack& copy);
+
+       KeypointType m_keypointsType;
+
+       std::vector<cv::KeyPoint> m_objectKeypoints;
+
+       DescriptorType m_descriptorsType;
+
+       cv::Mat m_objectDescriptors;
+
+       float m_recognitionRate;
+};
+
+} /* Image */
+} /* MediaVision */
+
+#endif /* __IMAGEFEATURES_FEATUREPACK_H__ */
diff --git a/mv_image/image/include/Features/ORBExtractorFactory.h b/mv_image/image/include/Features/ORBExtractorFactory.h
new file mode 100644 (file)
index 0000000..50f6ad6
--- /dev/null
@@ -0,0 +1,55 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IMAGEFEATURES_ORBEXTRACTORFACTORY_H__
+#define __IMAGEFEATURES_ORBEXTRACTORFACTORY_H__
+
+#include "Features/FeatureExtractorFactory.h"
+
+namespace MediaVision {
+namespace Image {
+
+class ORBExtractorFactory : public FeatureExtractorFactory {
+public:
+       ORBExtractorFactory(
+                       float scaleFactor = 1.2f,
+                       size_t maximumFeaturesNumber = 800u);
+
+       virtual cv::Ptr<FeatureExtractor> buildFeatureExtractor();
+
+       float getScaleFactor() const;
+
+       void setScaleFactor(float scaleFactor);
+
+       size_t getMaximumFeaturesNumber() const;
+
+       void setMaximumFeaturesNumber(size_t maximumFeaturesNumber);
+
+private:
+       static float computeRecognitionRate(
+                       const cv::Mat&,
+                       const std::vector<cv::KeyPoint>&);
+
+       float m_scaleFactor; /**< Recognition scale factor for the ORB detector. */
+
+       size_t m_maximumFeaturesNumber; /**< Maximum number of features, which will
+                                                                               be extracted from object image. */
+};
+
+} /* Image */
+} /* MediaVision */
+
+#endif /* __IMAGEFEATURES_ORBEXTRACTORFACTORY_H__ */
index 8f1b348..6caa166 100644 (file)
  * limitations under the License.
  */
 
-#ifndef __IMAGEUTIL_H__
-#define __IMAGEUTIL_H__
+#ifndef __IMAGECONFIG_H__
+#define __IMAGECONFIG_H__
 
-#include <opencv/cv.h>
+#include <string>
 
 /**
- * @file  ImageUtil.h
- * @brief This file contains Image Module utility.
+ * @file  ImageConfig.h
+ * @brief This file contains Image Module Configuration.
  */
 
 namespace MediaVision {
 namespace Image {
 /**
+ * @brief Keypoint's type enumeration.
+ *
+ * @since_tizen 3.0
+ */
+enum KeypointType {
+       KT_INVALID = -1,  /**< Undefined keypoint's type */
+       KT_ORB,           /**< Oriented FAST keypoint's type */
+       KT_GFTT,          /**< Keypoint's type of good features to track */
+       KT_SIZE           /**< Number of keypoint's types */
+};
+
+const std::string KeypointNames[KT_SIZE] = {
+       [KT_ORB] = "ORB",
+       [KT_GFTT] = "GFTT"
+};
+
+/*
+ * @brief Descriptor's type enumeration.
+ *
+ * @since_tizen 3.0
+ */
+enum DescriptorType {
+       DT_INVALID = -1,  /**< Undefined descriptor's type */
+       DT_ORB,           /**< Rotated BRIEF descriptor's type */
+       DT_BRIEF,         /**< Descriptor's type of binary robust independent
+                                                       elementary features */
+       DT_SIZE           /**< Number of descriptor's types */
+};
+
+const std::string DescriptorNames[DT_SIZE] = {
+        [DT_ORB] = "ORB",
+        [DT_BRIEF] = "BRIEF"
+};
+
+/**
  * @brief Contains parameters for features extracting from image objects.
  *
  * @since_tizen 3.0
  */
 struct FeaturesExtractingParams {
-       FeaturesExtractingParams(
-                       double scaleFactor,
-                       int maximumFeaturesNumber);
 
        FeaturesExtractingParams();
 
-       double mScaleFactor; /**< Recognition scale factor for the ORB detector. */
+       KeypointType mKeypointType; /**< Keypoint's type. */
+
+       DescriptorType mDescriptorType; /**< Descriptor's type. */
 
-       int mMaximumFeaturesNumber; /**< Maximum number of features, which will be
-                                                                       extracted from object image. */
+       union { /**< Extracting parameters for concretes algorithms */
+               struct { /**< Extracting parameters for ORB algorithm. */
+                       double mScaleFactor; /**< Recognition scale factor for the ORB detector. */
+                       int mMaximumFeaturesNumber; /**< Maximum number of features,
+                                                                       which will be extracted from object image.*/
+               } ORB;
+       };
 };
 
 /**
+ * @class RecognitionParams
  * @brief Contains parameters for image objects recognition.
  *
  * @since_tizen 3.0
@@ -53,7 +93,7 @@ struct RecognitionParams {
        RecognitionParams(
                        int minMatchesNumber,
                        double requiredMatchesPart,
-                       double allowableMatchesPartError);
+                       double tolerantMatchesPartError);
 
        RecognitionParams();
 
@@ -65,10 +105,11 @@ struct RecognitionParams {
                                                                result in unsustainable behavior, but effect of object overlapping
                                                                will be reduced. Value can be from 0 to 1.*/
 
-       double mAllowableMatchesPartError; /**< Allowable error of matches number. */
+       double mTolerantMatchesPartError; /**< Tolerant error of matches number. */
 };
 
 /**
+ * @class StabilizationParams
  * @brief Contains parameters for contour stabilization during tracking of image
  *        objects.
  *
@@ -76,21 +117,29 @@ struct RecognitionParams {
  */
 struct StabilizationParams {
        StabilizationParams(
+                       bool isEnabled,
                        int historyAmount,
-                       double allowableShift,
+                       double tolerantShift,
+                       double tolerantShiftExtra,
                        double stabilizationSpeed,
                        double stabilizationAcceleration);
 
        StabilizationParams();
 
+       bool mIsEnabled; /**< Flag that specifies whether to use the stabilization. */
+
        int mHistoryAmount; /**< Number of previous recognition results, which
                                                        will influence the stabilization. */
 
-       double mAllowableShift; /**< Relative value of maximum shift per one frame,
+       double mTolerantShift; /**< Relative value of maximum shift per one frame,
                                                                which will be ignored by stabilization.
                                                                It is relative to the object size
                                                                in the current frame. */
 
+       double mTolerantShiftExtra; /**< Constant value which will be added to
+                                                                       maximum shift per one frame,
+                                                                       which will be ignored by stabilization. */
+
        double mStabilizationSpeed; /**< Start speed with which the object will be
                                                                        stabilized. */
 
@@ -102,6 +151,7 @@ struct StabilizationParams {
 };
 
 /**
+ * @calss TrackingParams
  * @brief Contains parameters for image objects tracking.
  *
  * @since_tizen 3.0
@@ -133,4 +183,4 @@ struct TrackingParams {
 } /* Image */
 } /* MediaVision */
 
-#endif /* __IMAGEUTIL_H__ */
+#endif /* __IMAGECONFIG_H__ */
index f839ac9..41cdb0c 100644 (file)
  * limitations under the License.
  */
 
-#ifndef __MATHUTIL_H__
-#define __MATHUTIL_H__
+#ifndef __IMAGEMATHUTIL_H__
+#define __IMAGEMATHUTIL_H__
 
 #include <opencv/cv.h>
 
 /**
- * @file  MathUtil.h
+ * @file  ImageMathUtil.h
  * @brief This file contains math utility for Image Module.
  */
 
@@ -69,7 +69,41 @@ float getTriangleArea(
 float getQuadrangleArea(
                const cv::Point2f points[NumberOfQuadrangleCorners]);
 
+/**
+ * @brief   Checks point on the accessory region.
+ *
+ * @since_tizen 3.0
+ * @param [in] point   Point which will be checked on the accessory region
+ * @param [in] region  Contour of region
+ * @return true if point is inside the region, otherwise return false
+ */
+bool checkAccessory(
+               const cv::Point2f& point,
+               const std::vector<cv::Point2f>& region);
+
+/**
+ * @brief   Cuts a rectangle according to the maximum size.
+ * @details From the rectangle will remain only the part which is inside the
+ *          rectangle from {0,0} to @a maxSize
+ *
+ * @since_tizen 3.0
+ * @param [in] rectange   Rectangle which will be cut
+ * @param [in] maxSize    Maximum values of needed rectangle
+ */
+void catRect(cv::Rect& rectange, const cv::Size& maxSize);
+
+/**
+ * @brief   Resizes a region.
+ *
+ * @since_tizen 3.0
+ * @param [in] roi                  Contour of region which will be resized
+ * @param [in] scalingCoefficient   Scaling coefficient
+ */
+std::vector<cv::Point2f> contourResize(
+               const std::vector<cv::Point2f>& roi,
+               float scalingCoefficient);
+
 } /* Image */
 } /* MediaVision */
 
-#endif /* __MATHUTIL_H__ */
+#endif /* __IMAGEMATHUTIL_H__ */
diff --git a/mv_image/image/include/ImageTracker.h b/mv_image/image/include/ImageTracker.h
deleted file mode 100644 (file)
index ea577f4..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __IMAGETRACKER_H__
-#define __IMAGETRACKER_H__
-
-#include "ImageConfig.h"
-
-#include <opencv/cv.h>
-
-/**
- * @file  ImageTracker.h
- * @brief This file contains functionality for image object tracking.
- */
-
-namespace MediaVision {
-namespace Image {
-class ImageRecognizer;
-class ImageTrackingModel;
-/**
- * @class    ImageTracker
- * @brief    This class contains functionality for image object tracking.
- *
- * @since_tizen 3.0
- */
-class ImageTracker {
-private:
-       struct RecognitionInfo {
-               cv::Mat mFrame;
-
-               RecognitionParams mRecognitionParams;
-
-               FeaturesExtractingParams mSceneFeaturesExtractingParams;
-
-               ImageTrackingModel *mpTarget;
-       };
-
-       static void *recognitionThreadFunc(void *recognitionInfo);
-
-public:
-       /**
-        * @brief   @ref ImageTracker constructor based on tracking algorithm
-        *          parameters.
-        *
-        * @since_tizen 3.0
-        * @param [in] trackingParams   Parameters for image objects tracking
-        */
-       ImageTracker(const TrackingParams& trackingParams);
-
-       /**
-        * @brief Tracks the @a target for the video stream consisting of frames.
-        *
-        * @since_tizen 3.0
-        * @remarks Call this function alternately for each frame
-        * @param [in]      frame    Current frame of the video stream
-        * @param [in,out]  target   @ref ImageTrackingModel, which will be tracked
-        */
-       void track(const cv::Mat& frame, ImageTrackingModel& target);
-
-private:
-       void trackDetectedObject(
-                       const cv::Mat& frame,
-                       ImageTrackingModel& target);
-
-       void trackUndetectedObject(
-                       const cv::Mat& frame,
-                       ImageTrackingModel& target);
-
-       cv::Rect computeExpectedArea(
-                       const ImageTrackingModel& target,
-                       const cv::Size& frameSize);
-
-private:
-       TrackingParams m_trackingParams;
-};
-
-} /* Image */
-} /* MediaVision */
-
-#endif /* __IMAGETRACKER_H__ */
similarity index 82%
rename from mv_image/image/include/ImageObject.h
rename to mv_image/image/include/Recognition/ImageObject.h
index 4e33e55..e8bc67a 100644 (file)
@@ -19,6 +19,8 @@
 
 #include "ImageConfig.h"
 
+#include "Features/FeaturePack.h"
+
 #include <opencv/cv.h>
 
 /**
@@ -88,31 +90,15 @@ public:
         *          new @ref ImageObject
         *
         * @since_tizen 3.0
-        * @param [in] image    The image for which instance of @ref ImageObject
-        *                      will be created
-        * @param [in] params   Features extracting parameters
-        */
-       void fill(const cv::Mat& image, const FeaturesExtractingParams& params);
-
-       /**
-        * @brief   Fills @ref ImageObject class based on image.
-        * @details Detects keypoints and extracts features from image and creates
-        *          new @ref ImageObject
-        *
-        * @since_tizen 3.0
         * @param [in] image         The image for which instance of @ref
         *                           ImageObject will be created
-        * @param [in] boundingBox   Bounding box of the object being analyzed in
-        *                           the @a image
         * @param [in] params        Features extracting parameters
-        * @return @a true on success, otherwise a @a false value
-        * @retval true  Successful
-        * @retval false Invalid ROI (bounding box)
+        * @param [in] roi           Region of interested object on the @a image
         */
-       bool fill(
-                       const cv::Mat& image,
-                       const cv::Rect& boundingBox,
-                       const FeaturesExtractingParams& params);
+       void fill(
+                               const cv::Mat& image,
+                               const FeaturesExtractingParams& params,
+                               const std::vector<cv::Point2f>& roi = std::vector<cv::Point2f>());
 
        /**
         * @brief Gets a value that determines how well an @ref ImageObject can be recognized.
@@ -138,6 +124,14 @@ public:
        bool isEmpty() const;
 
        /**
+        * @brief Sets a contour for the image object.
+        *
+        * @since_tizen 3.0
+        * @param [in] contour  The contour which will be used with @ref ImageObject
+        */
+       void setContour(const std::vector<cv::Point2f>& contour);
+
+       /**
         * @brief Sets a label for the image object.
         *
         * @since_tizen 3.0
@@ -173,17 +167,17 @@ public:
        int load(const char *fileName);
 
 private:
-       static const int MinWidth = 5;
-       static const int MinHeight = 5;
-
-private:
        void extractFeatures(
                        const cv::Mat& image,
-                       const FeaturesExtractingParams& params);
-
-       void computeRecognitionRate(const cv::Mat& image);
+                       const FeaturesExtractingParams& params,
+                       const std::vector<cv::Point2f>& roi);
 
 private:
+
+       FeaturesExtractingParams m_featureExtractingParams;
+
+       FeaturePack m_features;
+
        bool m_isEmpty;
 
        bool m_isLabeled;
@@ -192,12 +186,6 @@ private:
 
        std::vector<cv::Point2f> m_boundingContour;
 
-       std::vector<cv::KeyPoint> m_objectKeypoints;
-
-       cv::Mat m_objectDescriptors;
-
-       float m_recognitionRate;
-
        friend class ImageRecognizer;
 
        friend std::ostream& operator << (std::ostream& os, const ImageObject& obj);
@@ -19,7 +19,8 @@
 
 #include "ImageMathUtil.h"
 #include "ImageConfig.h"
-#include "ImageObject.h"
+
+#include "Recognition/ImageObject.h"
 
 #include <opencv/cv.h>
 
@@ -39,17 +40,7 @@ namespace Image {
 class ImageRecognizer {
 public:
        /**
-        * @brief   @ref ImageRecognizer constructor based on scene image.
-        *
-        * @since_tizen 3.0
-        * @param [in] sceneImage   The scene in which image objects will be recognized
-        * @param [in] params       Scene features extracting parameters
-        */
-       ImageRecognizer(const cv::Mat& sceneImage,
-                       const FeaturesExtractingParams& params);
-
-       /**
-        * @brief   @ref ImageRecognizer constructor based on thes scene @ref ImageObject.
+        * @brief   @ref ImageRecognizer constructor based on the scene @ref ImageObject.
         *
         * @since_tizen 3.0
         * @param [in] scene   The scene for which the objects will be recognized by
@@ -70,13 +61,17 @@ public:
         * @since_tizen 3.0
         * @param [in]  target    @ref ImageObject, which will be recognized
         * @param [in]  params    Recognition parameters
-        * @param [out] contour   The result contour of @a target object on the scene
+        * @param [out] contour   The result contour of @a target object on the
+        *                        scene
+        * @param [out] ignoreFactor Scaling factor of area near the contour
+        *              of object which will be ignored
         * @return true if object is found on the scene, otherwise return false
         */
        bool recognize(
                        const ImageObject& target,
                        const RecognitionParams& params,
-                       std::vector<cv::Point2f>& contour) const;
+                       std::vector<cv::Point2f>& contour,
+                       float ignoreFactor = 0.f) const;
 
 private:
        ImageRecognizer();
@@ -84,7 +79,8 @@ private:
        bool findHomophraphyMatrix(
                        const ImageObject& target,
                        const RecognitionParams& params,
-                       cv::Mat& homophraphyMatrix) const;
+                       cv::Mat& homophraphyMatrix,
+                       float ignoreFactor) const;
 
        size_t matchesSelection(
                        std::vector<cv::DMatch>& examples,
@@ -98,6 +94,7 @@ private:
                        const cv::Point2f corners[NumberOfQuadrangleCorners]);
 
 private:
+       /* TODO: Replace to cv::Ptr<ImageObject> */
        ImageObject m_scene;
 
        cv::BFMatcher m_matcher;
diff --git a/mv_image/image/include/Tracking/AsyncTracker.h b/mv_image/image/include/Tracking/AsyncTracker.h
new file mode 100644 (file)
index 0000000..890f655
--- /dev/null
@@ -0,0 +1,131 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IMAGETRACKING_ASYNCTRACKER_H__
+#define __IMAGETRACKING_ASYNCTRACKER_H__
+
+#include "Tracking/ObjectTracker.h"
+
+#include <pthread.h>
+
+namespace MediaVision {
+namespace Image {
+/**
+ * @class    AsyncTracker
+ * @brief    Tracker is based on the another tracker and extends the
+ *           functionality for asynchronous use
+ *
+ * @since_tizen 3.0
+ */
+class AsyncTracker : public ObjectTracker {
+public:
+       /**
+        * @brief @ref AsyncTracker copy constructor.
+        *
+        * @since_tizen 3.0
+        * @param   [in] copy @ref AsyncTracker which will be copied
+        */
+       AsyncTracker(const AsyncTracker& copy);
+
+       /**
+        * @brief @ref AsyncTracker constructor based on the another tracker.
+        *
+        * @since_tizen 3.0
+        * @remarks You guarantee that frame will not be changed while tracking is
+        *          working if @a copyingPolicy value equal false.
+        * @param   [in] baseTracker    Tracker which will be aggregated
+        * @param   [in] copyingPolicy  Flag that determines whether the frame is
+        *                              copied inside @ref track() function
+        */
+       AsyncTracker(cv::Ptr<ObjectTracker> baseTracker, bool copyingPolicy = true);
+
+       /**
+        * @brief @ref AsyncTracker destructor
+        *
+        * @since_tizen 3.0
+        */
+       virtual ~AsyncTracker();
+
+       /**
+        * @brief Tracks the target for the video stream consisting of frames.
+        *
+        * @since_tizen 3.0
+        * @remarks Call this function alternately for each frame
+        * @param [in]   frame    Current frame of the video stream
+        * @param [out]  result   Result contour
+        * @return true if object is tracked, otherwise return false
+        */
+       virtual bool track(const cv::Mat& frame, std::vector<cv::Point>& result);
+
+       /**
+        * @brief Provides the current location of a target.
+        *
+        * @since_tizen 3.0
+        * @param [in] location  Current location of a target
+        */
+       virtual void reinforcement(const std::vector<cv::Point>& location);
+
+       /*
+        * @brief Creates a copy of itself
+        *
+        * @since_tizen 3.0
+        * @return clone
+        */
+       virtual cv::Ptr<ObjectTracker> clone() const;
+
+       bool wait();
+
+       bool isRun();
+
+       bool isUpdated(std::vector<cv::Point>& result);
+
+       bool getResult(std::vector<cv::Point>& location);
+
+private:
+       AsyncTracker& operator= (const AsyncTracker& copy);
+
+       bool baseTrack(std::vector<cv::Point>& result);
+
+       static void *asyncTrack(void *data);
+
+private:
+       cv::Ptr<ObjectTracker> m_baseTracker;
+
+       cv::Mat m_frame;
+
+       std::vector<cv::Point> m_result;
+
+       bool m_isRun;
+
+       bool m_isUpdated;
+
+       bool m_copyingPolicy;
+
+       pthread_t m_thread;
+
+       mutable pthread_mutex_t m_globalGuard;
+
+       mutable pthread_spinlock_t m_resultGuard;
+
+       mutable pthread_spinlock_t m_isRunGuard;
+
+       mutable pthread_spinlock_t m_isUpdatedGuard;
+};
+
+} /* Image */
+} /* MediaVision */
+
+#endif /* __IMAGETRACKING_ASYNCTRACKER_H__ */
diff --git a/mv_image/image/include/Tracking/CascadeTracker.h b/mv_image/image/include/Tracking/CascadeTracker.h
new file mode 100644 (file)
index 0000000..4ac0ec3
--- /dev/null
@@ -0,0 +1,132 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IMAGETRACKING_CASCADETRACKER_H__
+#define __IMAGETRACKING_CASCADETRACKER_H__
+
+#include "Tracking/ObjectTracker.h"
+
+#include <opencv2/core/core.hpp>
+
+#include <set>
+
+namespace MediaVision {
+namespace Image {
+/**
+ * @class    CascadeTracker
+ * @brief    Tracker is based on the another trackers and combines the results.
+ *
+ * @since_tizen 3.0
+ */
+class CascadeTracker : public ObjectTracker {
+public:
+       /**
+        * @brief @ref CascadeTracker default constructor
+        *
+        * @since_tizen 3.0
+        * @param   [in] minimumArea   Minimum detected area
+        */
+       CascadeTracker(float minimumArea = 2000);
+
+       /**
+        * @brief @ref CascadeTracker copy constructor.
+        *
+        * @since_tizen 3.0
+        * @param   [in] copy @ref CascadeTracker which will be copied
+        */
+       CascadeTracker(const CascadeTracker& copy);
+
+       /**
+        * @brief @ref CascadeTracker destructor
+        *
+        * @since_tizen 3.0
+        */
+       virtual ~CascadeTracker();
+
+       /**
+        * @brief Tracks the target for the video stream consisting of frames.
+        *
+        * @since_tizen 3.0
+        * @remarks Call this function alternately for each frame
+        * @param [in]   frame    Current frame of the video stream
+        * @param [out]  result   Result contour
+        * @return true if object is tracked, otherwise return false
+        */
+       virtual bool track(const cv::Mat& frame, std::vector<cv::Point>& result);
+
+       /**
+        * @brief Provides the current location of a target.
+        *
+        * @since_tizen 3.0
+        * @param [in] location  Current location of a target
+        */
+       virtual void reinforcement(const std::vector<cv::Point>& location);
+
+       /*
+        * @brief Creates a copy of itself
+        *
+        * @since_tizen 3.0
+        * @return clone
+        */
+       virtual cv::Ptr<ObjectTracker> clone() const;
+
+       /**
+        * @brief Assignment operator.
+        *
+        * @since_tizen 3.0
+        * @param [in] copy @ref CascadeTracker which will be copied
+        * @return itself
+        */
+       virtual CascadeTracker& operator=(const CascadeTracker& copy);
+
+       bool enableTracker(cv::Ptr<ObjectTracker> tracker, float priority);
+
+       bool disableTracker(cv::Ptr<ObjectTracker> tracker);
+
+private:
+       void internalReinforcement();
+
+       bool mergeResults(std::vector<cv::Point>& result) const;
+
+private:
+       struct TrackerInfo {
+               TrackerInfo(cv::Ptr<ObjectTracker>, float);
+
+               bool operator<(const TrackerInfo&) const;
+
+               bool operator==(const TrackerInfo&) const;
+
+               bool operator!=(const TrackerInfo&) const;
+
+               cv::Ptr<ObjectTracker> mTracker;
+
+               float mPriority;
+
+               mutable std::vector<cv::Point> mResult;
+       };
+
+       /* don't use m_trackers.find() because
+       operator==() and operator<() are independent
+       TODO: Fix it with aggregator or something like that */
+       std::set<TrackerInfo> m_trackers;
+
+       float m_minimumArea;
+};
+
+} /* Image */
+} /* MediaVision */
+
+#endif /* __IMAGETRACKING_CASCADETRACKER_H__ */
diff --git a/mv_image/image/include/Tracking/FeatureSubstitutionTracker.h b/mv_image/image/include/Tracking/FeatureSubstitutionTracker.h
new file mode 100644 (file)
index 0000000..010ca89
--- /dev/null
@@ -0,0 +1,108 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IMAGETRACKING_FEATURESUBSTITUTIONTRACKER_H__
+#define __IMAGETRACKING_FEATURESUBSTITUTIONTRACKER_H__
+
+#include "Tracking/ObjectTracker.h"
+
+#include "Recognition/ImageObject.h"
+
+namespace MediaVision {
+namespace Image {
+/**
+ * @class    FeatureSubstitutionTracker
+ * @brief    Tracker uses feature substitution.
+ *
+ * @since_tizen 3.0
+ */
+class FeatureSubstitutionTracker : public ObjectTracker {
+public:
+       /**
+        * @brief @ref FeatureSubstitutionTracker constructor.
+        *
+        * @since_tizen 3.0
+        * @param [in] featuresExtractingParams   Parameters of feature extracting
+        * @param [in] recognitionParams          Parameters of recognition
+        * @param [in] expectedOffset             Expected offset
+        * @param [in] sceneScalingFactor         Additional area around target
+        *                                        contour on the frame which will
+        *                                        be use for recognition
+        *                                        (recommended value is 1 - 1.5)
+        * @param [in] objectScalingFactor        Additional area near the contour
+        *                                        of object which will be ignored by
+        *                                        recognition
+        *                                        (recommended value is 0.5 - 1)
+        */
+       FeatureSubstitutionTracker(
+                       const FeaturesExtractingParams& featuresExtractingParams,
+                       const RecognitionParams& recognitionParams,
+                       float expectedOffset,
+                       float sceneScalingFactor = 1.2f,
+                       float objectScalingFactor = 0.85f);
+
+       /**
+        * @brief Tracks the target for the video stream consisting of frames.
+        *
+        * @since_tizen 3.0
+        * @remarks Call this function alternately for each frame
+        * @param [in]   frame    Current frame of the video stream
+        * @param [out]  result   Result contour
+        * @return true if object is tracked, otherwise return false
+        */
+       virtual bool track(const cv::Mat& frame, std::vector<cv::Point>& result);
+
+       /**
+        * @brief Provides the current location of a target.
+        *
+        * @since_tizen 3.0
+        * @param [in] location  Current location of a target
+        */
+       virtual void reinforcement(const std::vector<cv::Point>& location);
+
+       /*
+        * @brief Creates a copy of itself
+        *
+        * @since_tizen 3.0
+        * @return clone
+        */
+       virtual cv::Ptr<ObjectTracker> clone() const;
+
+private:
+       std::vector<cv::Point2f> computeExpectedArea();
+
+private:
+       bool m_isInit;
+
+       cv::Ptr<ImageObject> m_target;
+
+       std::vector<cv::Point> m_location;
+
+       FeaturesExtractingParams m_featureExtractingParams;
+
+       RecognitionParams m_recogParams;
+
+       float m_expectedOffset;
+
+       float m_sceneScalingFactor;
+
+       float m_objectScalingFactor;
+};
+
+} /* Image */
+} /* MediaVision */
+
+#endif /* __IMAGETRACKING_FEATURESUBSTITUTIONTRACKER_H__ */
@@ -39,6 +39,20 @@ namespace Image {
 class ImageContourStabilizator {
 public:
        /**
+        * @brief Enumeration for stabilization return value
+        *
+        * @since_tizen 3.0
+        */
+       enum StabilizationError {
+               Successfully,           /**< Contour is stabilized. */
+               TooShortMovingHistory,  /**< Too short moving history, it's normal
+                                                                                                       behavior, you can continue to call
+                                                                                                       stabilization in order to accumulate it. */
+               InvalidSettings,        /**< Invalid settings. */
+               UnsupportedContourType  /**< Unsupported contour type. */
+       };
+
+       /**
         * @brief   @ref ImageContourStabilizator default constructor.
         *
         * @since_tizen 3.0
@@ -52,11 +66,15 @@ public:
         * @remarks Call this function alternately for each contour from sequence
         * @param [in,out] contour   @ref contour, which will be stabilized
         * @param [in]     params    configuration parameters
-        * @return true if contour is stabilized, otherwise return false
+        * @return Successfully if contour is stabilized, otherwise return error
+        * @retval #Successfully           Contour is stabilized
+        * @retval #TooShortMovingHistory  Too short moving history
+        * @retval #InvalidSettings        Invalid settings
+        * @retval #UnsupportedContourType Unsupported contour type
         */
-       bool stabilize(
-                               std::vector<cv::Point2f>& contour,
-                               const StabilizationParams& params);
+       StabilizationError stabilize(
+                       std::vector<cv::Point2f>& contour,
+                       const StabilizationParams& params);
 
        /**
         * @brief Resets stabilization process.
@@ -67,10 +85,14 @@ public:
        void reset(void);
 
 private:
+       bool updateSettings(const StabilizationParams& params);
+
        std::vector<cv::Point2f> computeStabilizedQuadrangleContour(void);
 
 private:
-       static const size_t MovingHistoryAmount = 3u;
+       float m_tolerantShift;
+
+       float m_tolerantShiftExtra;
 
        std::vector<float> m_speeds;
 
@@ -80,6 +102,8 @@ private:
 
        std::vector<cv::Point2f> m_lastStabilizedContour;
 
+       size_t m_historyAmount;
+
        size_t m_currentHistoryAmount;
 
        int m_tempContourIndex;
 #ifndef __IMAGETRACKINGMODEL_H__
 #define __IMAGETRACKINGMODEL_H__
 
-#include "ImageObject.h"
+#include "Recognition/ImageObject.h"
 
-#include "ImageContourStabilizator.h"
-
-#include <opencv/cv.h>
-
-#include <pthread.h>
-
-#include <vector>
-#include <list>
+#include "Tracking/ObjectTracker.h"
+#include "Tracking/ImageContourStabilizator.h"
 
 /**
  * @file  ImageTrackingModel.h
@@ -35,7 +29,6 @@
 
 namespace MediaVision {
 namespace Image {
-class ImageContourStabilizator;
 /**
  * @class    ImageTrackingModel
  * @brief    This class contains the tracking functionality for image objects.
@@ -43,26 +36,6 @@ class ImageContourStabilizator;
  * @since_tizen 3.0
  */
 class ImageTrackingModel {
-private:
-       /**
-        * @brief   @ref ImageTrackingModel state enumeration.
-        *
-        * @since_tizen 3.0
-        */
-       enum State {
-               Invalid,     /**< Invalid tracking model can not be tracked. Set not
-                                       empty image object as target by using function
-                                       @ref setTarget() to make tracking model valid, also
-                                       you can load valid tracking model by using @ref load() */
-               Undetected,  /**< The object was not recognized on the last frame. Ready
-                                       for further recognition */
-               Appeared,    /**< The object was recognized on one of the last frames
-                                       after its absence  */
-               Tracked,    /**< The object was recognized on the last frame. Its
-                                       location can be obtained by calling method getLocation() */
-               InProcess    /**< The object is in the recognition process */
-       };
-
 public:
        /**
         * @brief   @ref ImageTrackingModel default constructor
@@ -72,15 +45,6 @@ public:
        ImageTrackingModel();
 
        /**
-        * @brief   @ref ImageTrackingModel constructor based on tracking algorithm
-        *          parameters.
-        *
-        * @since_tizen 3.0
-        * @param[in] recognitionObject  @ref ImageObject which will be tracked
-        */
-       ImageTrackingModel(const ImageObject& recognitionObject);
-
-       /**
         * @brief   @ref ImageTrackingModel copy constructor.
         * @details Creates copy of @ref ImageTrackingModel
         *
@@ -90,13 +54,6 @@ public:
        ImageTrackingModel(const ImageTrackingModel& copy);
 
        /**
-        * @brief   @ref ImageTrackingModel destructor.
-        *
-        * @since_tizen 3.0
-        */
-       ~ImageTrackingModel();
-
-       /**
         * @brief   Sets @ref ImageObject as target which will be tracked.
         *
         * @since_tizen 3.0
@@ -118,6 +75,17 @@ public:
        bool isValid() const;
 
        /**
+        * @brief Tracks the target for the video stream consisting of frames.
+        *
+        * @since_tizen 3.0
+        * @remarks Call this function alternately for each frame
+        * @param [in]   frame    Current frame of the video stream
+        * @param [out]  result   Result contour
+        * @return true if target is tracked, otherwise return false
+        */
+       bool track(const cv::Mat& frame, std::vector<cv::Point>& result);
+
+       /**
         * @brief   Refreshes tracking model.
         *
         * @since_tizen 3.0
@@ -149,52 +117,10 @@ public:
         * @since_tizen 3.0
         * @param  [in] filepath  File name from which will be loaded a model
         * @return @a 0 on success, otherwise a negative error value
-       */
-       int load(const char *filepath);
-
-       /**
-        * @brief  Checks state of the @ref ImageTrackingModel.
-        *
-        * @since_tizen 3.0
-        * @return @a true if object was detected on the last processed frame,
-        *         otherwise a @a false value
         */
-       bool isDetected() const;
-
-       /**
-        * @brief  Gets last location of the @ref ImageTrackingModel.
-        *
-        * @since_tizen 3.0
-        * @return Last detected location
-        */
-       std::vector<cv::Point2f> getLastlocation() const;
-
-private:
-       ImageObject m_recognitionObject;
-
-       ImageContourStabilizator m_stabilizator;
-
-       std::vector<cv::Point2f> m_lastLocation;
-
-       State m_state;
-
-       pthread_t m_recognitionThread;
-
-       mutable pthread_mutex_t m_globalGuard;
-
-       mutable pthread_spinlock_t m_lastLocationGuard;
-
-       mutable pthread_spinlock_t m_stateGuard;
-
-       friend std::ostream& operator << (
-                       std::ostream& os,
-                       const ImageTrackingModel::State& state);
-
-       friend std::istream& operator >> (
-                       std::istream& is,
-                       ImageTrackingModel::State& state);
+       int load(const char *filepath);
 
-       friend std::ostream& operator << (
+        friend std::ostream& operator << (
                        std::ostream& os,
                        const ImageTrackingModel& obj);
 
@@ -202,7 +128,16 @@ private:
                        std::istream& is,
                        ImageTrackingModel& obj);
 
-       friend class ImageTracker;
+private:
+       ImageObject m_target;
+
+       cv::Ptr<ObjectTracker> m_tracker;
+
+       ImageContourStabilizator m_stabilizator;
+
+       std::vector<cv::Point> m_location;
+
+       StabilizationParams m_stabilizationParams;
 };
 
 } /* Image */
diff --git a/mv_image/image/include/Tracking/MFTracker.h b/mv_image/image/include/Tracking/MFTracker.h
new file mode 100644 (file)
index 0000000..90652e4
--- /dev/null
@@ -0,0 +1,151 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IMAGETRACKING_MFTRACKER_H__
+#define __IMAGETRACKING_MFTRACKER_H__
+
+#include "Tracking/ObjectTracker.h"
+
+namespace MediaVision {
+namespace Image {
+/**
+ * @class    MFTracker
+ * @brief    Median Flow tracker implementation.
+ *
+ * @since_tizen 3.0
+ */
+class MFTracker : public ObjectTracker {
+public:
+       struct Params {
+               /**
+                * @brief TrackerMedianFlow algorithm parameters constructor
+                */
+       Params();
+
+       int mPointsInGrid; /**< Square root of number of used keypoints.
+                                                               Increase it to trade accurateness for speed.
+                                                               Default value is sensible and recommended */
+
+       cv::Size mWindowSize; /**< Size of the search window at each pyramid level
+                                                               for Lucas-Kanade optical flow search used for
+                                                               tracking */
+
+       int mPyrMaxLevel; /**< Number of pyramid levels for Lucas-Kanade optical
+                                                               flow search used for tracking */
+
+               /* TODO: add lifetime*/
+               /*time_t mLifetime;*/  /**< Time of tracking without reinforcement. */
+    };
+
+       /**
+        * @brief   @ref MFTracker constructor based on tracking algorithm parameters.
+        *
+        * @since_tizen 3.0
+        * @param [in] params   Parameters for objects tracking
+        */
+       MFTracker(Params params = Params());
+
+       /**
+        * @brief Tracks the target for the video stream consisting of frames.
+        *
+        * @since_tizen 3.0
+        * @remarks Call this function alternately for each frame
+        * @param [in]   frame    Current frame of the video stream
+        * @param [out]  result   Result contour
+        * @return true if object is tracked, otherwise return false
+        */
+       virtual bool track(const cv::Mat& frame, std::vector<cv::Point>& result);
+
+       /**
+        * @brief Provides the current location of a target.
+        *
+        * @since_tizen 3.0
+        * @param [in] location  Current location of a target
+        */
+       virtual void reinforcement(const std::vector<cv::Point>& location);
+
+       /**
+        * @brief Creates a copy of itself
+        *
+        * @since_tizen 3.0
+        * @return clone
+        */
+       virtual cv::Ptr<ObjectTracker> clone() const;
+
+private:
+       bool isInited() const;
+
+       bool init(const cv::Mat& image);
+
+       bool update(const cv::Mat& image);
+
+       float getLastConfidence() const;
+
+       cv::Rect_<float> getLastBoundingBox() const;
+
+       bool medianFlowImpl(cv::Mat oldImage, cv::Mat newImage, cv::Rect_<float>& oldBox);
+
+       cv::Rect_<float> vote(
+                       const std::vector<cv::Point2f>& oldPoints,
+                       const std::vector<cv::Point2f>& newPoints,
+                       const cv::Rect_<float>& oldRect,
+                       cv::Point2f& mD);
+
+       void check_FB(
+                       std::vector<cv::Mat> newPyramid,
+                       const std::vector<cv::Point2f>& oldPoints,
+                       const std::vector<cv::Point2f>& newPoints,
+                       std::vector<bool>& status);
+
+       void check_NCC(
+                       const cv::Mat& oldImage,
+                       const cv::Mat& newImage,
+                       const std::vector<cv::Point2f>& oldPoints,
+                       const std::vector<cv::Point2f>& newPoints,
+                       std::vector<bool>& status);
+
+private:
+       bool m_isInit;                /**< Flag is used to determine the model
+                                                                               initialization */
+
+       Params m_params;              /**< Parameters used during tracking, see
+                                                                               @ref TrackerMedianFlow::Params */
+
+       cv::TermCriteria m_termcrit;  /**< Terminating criteria for OpenCV
+                                                                               Lucas–Kanade optical flow algorithm used
+                                                                               during tracking */
+
+       std::vector<cv::Point2f> m_startLocation; /**< Tracking object start
+                                                                                                       location with relative values
+                                                                                                       to the bounding box */
+
+       cv::Rect_<float> m_boundingBox;  /**< Tracking object bounding box */
+
+       float m_confidence;              /**< Confidence that object was tracked
+                                                                                       correctly at the last tracking iteration */
+
+       cv::Mat m_image;                 /**< Last image for which tracking was
+                                                                                       performed */
+
+       std::vector<cv::Mat> m_pyramid;  /**< The pyramid had been calculated for
+                                                                                       the previous frame (or when
+                                                                                       initialize the model) */
+};
+
+} /* Image */
+} /* MediaVision */
+
+#endif /* __IMAGETRACKING_MFTRACKER_H__ */
diff --git a/mv_image/image/include/Tracking/ObjectTracker.h b/mv_image/image/include/Tracking/ObjectTracker.h
new file mode 100644 (file)
index 0000000..77e884e
--- /dev/null
@@ -0,0 +1,80 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IMAGETRACKING_OBJECTTRACKER_H__
+#define __IMAGETRACKING_OBJECTTRACKER_H__
+
+#include <opencv2/core/core.hpp>
+
+namespace MediaVision {
+namespace Image {
+/**
+ * @class    ObjectTracker
+ * @brief    Basic object tracker.
+ *
+ * @since_tizen 3.0
+ */
+class ObjectTracker {
+public:
+       /**
+        * @brief @ref ObjectTracker destructor
+        *
+        * @since_tizen 3.0
+        */
+       virtual ~ObjectTracker();
+
+       /**
+        * @brief Tracks the target for the video stream consisting of frames.
+        *
+        * @since_tizen 3.0
+        * @remarks Call this function alternately for each frame
+        * @param [in]   frame    Current frame of the video stream
+        * @param [out]  result   Result contour
+        * @return true if object is tracked, otherwise return false
+        */
+       virtual bool track(const cv::Mat& frame, std::vector<cv::Point>& result) = 0;
+
+       /**
+        * @brief Provides the current location of a target.
+        *
+        * @since_tizen 3.0
+        * @param [in] location  Current location of a target
+        */
+       virtual void reinforcement(const std::vector<cv::Point>& location) = 0;
+
+       /*
+        * @brief Creates a copy of itself
+        *
+        * @since_tizen 3.0
+        * @return clone
+        */
+       virtual cv::Ptr<ObjectTracker> clone() const = 0;
+
+private:
+       /**
+        * @brief Assignment operator for the base class @ref ObjectTracker.
+        *
+        * @since_tizen 3.0
+        * @param [in] copy @ref ObjectTracker which will be copied
+        * @return itself
+        */
+       ObjectTracker& operator=(const ObjectTracker& copy);
+};
+
+} /* Image */
+} /* MediaVision */
+
+#endif /* __IMAGETRACKING_OBJECTTRACKER_H__ */
diff --git a/mv_image/image/include/Tracking/RecognitionBasedTracker.h b/mv_image/image/include/Tracking/RecognitionBasedTracker.h
new file mode 100644 (file)
index 0000000..8106860
--- /dev/null
@@ -0,0 +1,93 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IMAGETRACKING_RECOGNITIONBASEDTRACKER_H__
+#define __IMAGETRACKING_RECOGNITIONBASEDTRACKER_H__
+
+#include "Tracking/ObjectTracker.h"
+
+#include "Recognition/ImageObject.h"
+
+namespace MediaVision {
+namespace Image {
+/**
+ * @class    FeatureSubstitutionTracker
+ * @brief    Tracker uses recognition of target on the entire frame.
+ *
+ * @since_tizen 3.0
+ */
+class RecognitionBasedTracker : public ObjectTracker {
+public:
+       /**
+        * @brief @ref RecognitionBasedTracker constructor.
+        *
+        * @since_tizen 3.0
+        * @param [in] target                          Target of recognition
+        * @param [in] sceneFeaturesExtractingParams   Parameters of feature
+        *                                             extracting from frames
+        * @param [in] recognitionParams               Parameters of recognition
+        */
+       RecognitionBasedTracker(
+                       const ImageObject& target,
+                       const FeaturesExtractingParams& sceneFeaturesExtractingParams,
+                       const RecognitionParams& recognitionParams);
+
+       /**
+        * @brief @ref RecognitionBasedTracker destructor
+        *
+        * @since_tizen 3.0
+        */
+       virtual ~RecognitionBasedTracker();
+
+       /**
+        * @brief Tracks the target for the video stream consisting of frames.
+        *
+        * @since_tizen 3.0
+        * @remarks Call this function alternately for each frame
+        * @param [in]   frame    Current frame of the video stream
+        * @param [out]  result   Result contour
+        * @return true if object is tracked, otherwise return false
+        */
+       virtual bool track(const cv::Mat& frame, std::vector<cv::Point>& result);
+
+       /**
+        * @brief Provides the current location of a target.
+        *
+        * @since_tizen 3.0
+        * @param [in] location  Current location of a target
+        */
+       virtual void reinforcement(const std::vector<cv::Point>& location);
+
+       /*
+        * @brief Creates a copy of itself
+        *
+        * @since_tizen 3.0
+        * @return clone
+        */
+       virtual cv::Ptr<ObjectTracker> clone() const;
+
+private:
+       ImageObject m_target;
+
+       FeaturesExtractingParams m_sceneFeatureExtractingParams;
+
+       RecognitionParams m_recogParams;
+};
+
+} /* Image */
+} /* MediaVision */
+
+#endif /* __IMAGETRACKING_RECOGNITIONBASEDTRACKER_H__ */
diff --git a/mv_image/image/src/Features/BasicExtractorFactory.cpp b/mv_image/image/src/Features/BasicExtractorFactory.cpp
new file mode 100644 (file)
index 0000000..09285da
--- /dev/null
@@ -0,0 +1,48 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Features/BasicExtractorFactory.h"
+
+#include <opencv/cv.h>
+
+namespace MediaVision {
+namespace Image {
+BasicExtractorFactory::BasicExtractorFactory(
+               KeypointType keypointsType,
+               DescriptorType descType) :
+                               m_kpType(keypointsType),
+                               m_descType(descType)
+{
+}
+
+cv::Ptr<FeatureExtractor> BasicExtractorFactory::buildFeatureExtractor()
+{
+       cv::Ptr<FeatureExtractor> featureExtractor(new FeatureExtractor());
+
+       cv::Ptr<cv::FeatureDetector> detector =
+                       cv::FeatureDetector::create(KeypointNames[m_kpType]);
+
+       cv::Ptr<cv::DescriptorExtractor> extractor =
+                       cv::DescriptorExtractor::create(DescriptorNames[m_descType]);
+
+       featureExtractor->setFeatureDetector(detector, m_kpType);
+       featureExtractor->setDescriptorExtractor(extractor, m_descType);
+
+       return featureExtractor;
+}
+
+} /* Image */
+} /* MediaVision */
diff --git a/mv_image/image/src/Features/FeatureExtractor.cpp b/mv_image/image/src/Features/FeatureExtractor.cpp
new file mode 100644 (file)
index 0000000..be9224b
--- /dev/null
@@ -0,0 +1,140 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Features/FeatureExtractor.h"
+
+#include "ImageMathUtil.h"
+
+#include <opencv/cv.h>
+
+namespace MediaVision {
+namespace Image {
+const cv::Size FeatureExtractor::MinSize = cv::Size(5, 5);
+
+FeatureExtractor::FeatureExtractor() :
+               m_kpType(KT_INVALID),
+               m_detector(),
+               m_descType(DT_INVALID),
+               m_extractor(),
+               m_computeRecognitionRate(NULL)
+{
+}
+
+void FeatureExtractor::setFeatureDetector(
+               const cv::Ptr<cv::FeatureDetector> detector,
+               KeypointType keypointType)
+{
+       m_detector = detector;
+       m_kpType = keypointType;
+}
+
+void FeatureExtractor::setDescriptorExtractor(
+               cv::Ptr<cv::DescriptorExtractor> extractor,
+               DescriptorType descriptorType)
+{
+       m_extractor = extractor;
+       m_descType = descriptorType;
+}
+
+void FeatureExtractor::setRecognitionRateMetric(
+               float (*computeRecognitionRate)(
+                               const cv::Mat&,
+                               const std::vector<cv::KeyPoint>&))
+{
+       m_computeRecognitionRate = computeRecognitionRate;
+}
+
+bool FeatureExtractor::extract(
+               const cv::Mat& image,
+               FeaturePack& result,
+               const std::vector<cv::Point2f>& roi)
+{
+       if (m_detector.empty() || m_extractor.empty())
+               return false;
+
+       cv::Rect boundingBox;
+
+       if (roi.empty()) {
+               boundingBox.x = 0;
+               boundingBox.y = 0;
+               boundingBox.width = image.cols;
+               boundingBox.height = image.rows;
+       } else {
+               if (roi.size() < 3)
+                       return false;
+
+               boundingBox = cv::boundingRect(roi);
+               catRect(boundingBox, image.size());
+       }
+
+       if (boundingBox.width < MinSize.width || boundingBox.height < MinSize.height)
+               return false;
+
+       result.m_objectKeypoints.clear();
+
+       std::vector<cv::KeyPoint> keypoints;
+
+       m_detector->detect(
+                       image(boundingBox),
+                       keypoints);
+
+       result.m_objectKeypoints = keypoints;
+       if (!roi.empty()) {
+               const size_t numberOfKeypoints = keypoints.size();
+               result.m_objectKeypoints.resize(numberOfKeypoints);
+               for (size_t i = 0; i < numberOfKeypoints; ++i) {
+                       result.m_objectKeypoints[i].pt.x += boundingBox.x;
+                       result.m_objectKeypoints[i].pt.y += boundingBox.y;
+               }
+       }
+
+       if (!roi.empty()) {
+               /* TODO: Ecode roi to reduce the boundary effect. Provide new parameter
+               / for this action cause roi is a bounding contour for the object. */
+
+               for (size_t i = 0; i < result.m_objectKeypoints.size(); ++i) {
+                       if (!checkAccessory(result.m_objectKeypoints[i].pt, roi)) {
+                               result.m_objectKeypoints.erase(result.m_objectKeypoints.begin() + i);
+                               --i;
+                       }
+               }
+       }
+
+       m_extractor->compute(
+                       image,
+                       result.m_objectKeypoints,
+                       result.m_objectDescriptors);
+
+       if (NULL != m_computeRecognitionRate) {
+               result.m_recognitionRate = m_computeRecognitionRate(
+                               image(boundingBox),
+                               keypoints);
+       } else {
+               /* Default recognition rate metric */
+               if (result.m_objectKeypoints.size() < MinimumNumberOfFeatures)
+                       result.m_recognitionRate = 0.f;
+               else
+                       result.m_recognitionRate = 0.5f;
+       }
+
+       result.m_keypointsType = m_kpType;
+       result.m_descriptorsType = m_descType;
+
+       return true;
+}
+
+} /* Image */
+} /* MediaVision */
diff --git a/mv_image/image/src/Features/FeatureExtractorFactory.cpp b/mv_image/image/src/Features/FeatureExtractorFactory.cpp
new file mode 100644 (file)
index 0000000..be022d5
--- /dev/null
@@ -0,0 +1,28 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Features/FeatureExtractorFactory.h"
+
+namespace MediaVision {
+namespace Image {
+
+FeatureExtractorFactory::~FeatureExtractorFactory()
+{
+    ; /* NULL */
+}
+
+} /* Image */
+} /* MediaVision */
diff --git a/mv_image/image/src/Features/FeatureMatcher.cpp b/mv_image/image/src/Features/FeatureMatcher.cpp
new file mode 100644 (file)
index 0000000..dbf72df
--- /dev/null
@@ -0,0 +1,244 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Features/FeatureMatcher.h"
+
+#include "ImageMathUtil.h"
+
+#include <opencv/cv.h>
+
+namespace MediaVision {
+namespace Image {
+
+namespace {
+float computeLinearSupportElement(
+               const std::vector<cv::DMatch>& examples,
+               int requiredNumber,
+               int leftLimit,
+               int rightLimit)
+{
+       int sizeOfExamples = rightLimit - leftLimit + 1;
+
+       if (sizeOfExamples <= 1)
+               return examples[leftLimit].distance;
+
+       float minValue = examples[leftLimit].distance;
+       float maxValue = examples[leftLimit].distance;
+
+       /* Finding the maximum and minimum values */
+       for (int i = leftLimit + 1; i <= rightLimit; ++i) {
+               if (minValue > examples[i].distance)
+                       minValue = examples[i].distance;
+               else if (maxValue < examples[i].distance)
+                       maxValue = examples[i].distance;
+       }
+
+       /* Linear approximation. f(x) = k*x + b */
+       /* f(sizeOfExamples) = maxValue; f(1) = minValue; */
+       const float b = (maxValue - minValue * sizeOfExamples) / (1 - sizeOfExamples);
+       const float k = minValue - b;
+
+       /* Calculation of the support element */
+       return k * requiredNumber + b;
+}
+
+size_t matchesSelection(
+               std::vector<cv::DMatch>& examples,
+               size_t filterAmount,
+               size_t allowableError)
+{
+       size_t sizeOfExamples = examples.size();
+
+       if ((filterAmount + allowableError) > sizeOfExamples)
+               return sizeOfExamples;
+
+       int startLeftLimit = 0;
+       int startRightLimit = sizeOfExamples - 1;
+
+       int leftLimit = startLeftLimit;
+       int rightLimit = startRightLimit;
+
+       int requiredNumber = filterAmount;
+
+       float supportElement = 0.f;
+
+       while (true) {
+               if (leftLimit >= rightLimit) {
+                       if (leftLimit < (requiredNumber - (int)allowableError))
+                               leftLimit = requiredNumber + (int)allowableError;
+
+                       break;
+               }
+
+               supportElement = computeLinearSupportElement(examples, requiredNumber,
+                               leftLimit, rightLimit);
+
+               /* Iteration similar quicksort */
+               while (true) {
+                       /* Search the leftmost element which have bigger confidence than support element */
+                       while (examples[leftLimit].distance <= supportElement &&
+                               leftLimit < startRightLimit) {
+                               ++leftLimit;
+                       }
+
+                       /* Search the rightmost element which have smaller confidence than support element */
+                       while (examples[rightLimit].distance >= supportElement &&
+                               rightLimit >= startLeftLimit) {
+                               --rightLimit;
+                       }
+
+                       if (leftLimit >= rightLimit)
+                               break;
+
+                       /* Swap */
+                        std::swap(examples[leftLimit], examples[rightLimit]);
+               }
+
+               if (abs(filterAmount - leftLimit) <= (int)allowableError)
+                       break;
+        
+               if ((int)filterAmount > leftLimit) {
+                       requiredNumber -= leftLimit - startLeftLimit;
+
+                       rightLimit = startRightLimit;
+                       startLeftLimit = leftLimit;
+               } else {
+                       leftLimit = startLeftLimit;
+                       startRightLimit = rightLimit;
+               }
+       }
+
+       return (size_t)leftLimit;
+}
+
+} /* anonymous namespace */
+
+FeatureMatcher::FeatureMatcher(
+               float affectingPart,
+               float tolerantError,
+               size_t minimumMatchesNumber)
+{
+       setAffectingPart(affectingPart);
+       setTolerantError(tolerantError);
+       setMinimumMatchesNumber(minimumMatchesNumber);
+}
+
+FeatureMatcher::MatchError FeatureMatcher::match(
+               const FeaturePack& from,
+               const FeaturePack& to,
+               cv::Mat& homophraphyMatrix) const
+{
+       if (MinimumNumberOfFeatures > from.m_objectKeypoints.size())
+               return InvalidFeaturePackFrom;
+    
+       if (MinimumNumberOfFeatures > to.m_objectKeypoints.size())
+               return InvalidFeaturePackTo;
+    
+       if (from.m_descriptorsType != to.m_descriptorsType)
+               return DisparateTypes;
+
+       std::vector<cv::DMatch> matches;
+
+       m_matcher.match(from.m_objectDescriptors, to.m_objectDescriptors, matches);
+
+       size_t matchesNumber = matches.size();
+
+       if (MinimumNumberOfFeatures > matchesNumber)
+               return MatchesNotFound;
+
+       size_t requiredMatchesNumber = m_affectingPart * matchesNumber;
+       size_t allowableMatchesNumberError = m_tolerantError * requiredMatchesNumber;
+
+       if (matchesNumber - allowableMatchesNumberError > MinimumNumberOfFeatures &&
+               requiredMatchesNumber + allowableMatchesNumberError < matchesNumber) {
+               if (requiredMatchesNumber - allowableMatchesNumberError <
+                       m_minimumMatchesNumber) {
+                       if (requiredMatchesNumber + allowableMatchesNumberError >
+                               m_minimumMatchesNumber) {
+                               requiredMatchesNumber = (requiredMatchesNumber +
+                                               m_minimumMatchesNumber + allowableMatchesNumberError) / 2;
+
+                               allowableMatchesNumberError = requiredMatchesNumber -
+                                               m_minimumMatchesNumber + allowableMatchesNumberError;
+                       } else {
+                               const size_t minimalAllowableMatchesNumberError = 2u;
+
+                               requiredMatchesNumber = minimalAllowableMatchesNumberError +
+                                                                               m_minimumMatchesNumber;
+
+                               allowableMatchesNumberError = minimalAllowableMatchesNumberError;
+                       }
+               }
+
+               const size_t filterAmount = matchesSelection(
+                                                                                                       matches,
+                                                                                                       requiredMatchesNumber,
+                                                                                                       allowableMatchesNumberError);
+
+               if (filterAmount >= MinimumNumberOfFeatures)
+                       matches.resize(filterAmount);
+
+               matchesNumber = matches.size();
+       }
+
+       std::vector<cv::Point2f> objectPoints(matchesNumber);
+       std::vector<cv::Point2f> scenePoints(matchesNumber);
+
+       for (size_t matchIdx = 0; matchIdx < matchesNumber; ++matchIdx) {
+               objectPoints[matchIdx] =
+                               from.m_objectKeypoints[matches[matchIdx].queryIdx].pt;
+
+               scenePoints[matchIdx] =
+                               to.m_objectKeypoints[matches[matchIdx].trainIdx].pt;
+       }
+
+       homophraphyMatrix = cv::findHomography(objectPoints, scenePoints, CV_RANSAC);
+
+       return Success;
+}
+
+float FeatureMatcher::getAffectingPart() const
+{
+       return m_affectingPart;
+}
+
+void FeatureMatcher::setAffectingPart(float affectingPart)
+{
+       m_affectingPart = std::max(0.f, std::min(1.f, affectingPart));
+}
+
+float FeatureMatcher::getTolerantError() const
+{
+       return m_tolerantError;
+}
+
+void FeatureMatcher::setTolerantError(float tolerantError)
+{
+       m_affectingPart = std::max(0.f, std::min(1.f, tolerantError));
+}
+
+size_t FeatureMatcher::getMinimumMatchesNumber() const
+{
+       return m_minimumMatchesNumber;
+}
+
+void FeatureMatcher::setMinimumMatchesNumber(size_t minimumMatchesNumber)
+{
+       m_minimumMatchesNumber = minimumMatchesNumber;
+}
+
+} /* Image */
+} /* MediaVision */
diff --git a/mv_image/image/src/Features/FeaturePack.cpp b/mv_image/image/src/Features/FeaturePack.cpp
new file mode 100644 (file)
index 0000000..61364f5
--- /dev/null
@@ -0,0 +1,58 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Features/FeaturePack.h"
+
+#include <opencv/cv.h>
+
+namespace MediaVision {
+namespace Image {
+
+FeaturePack::FeaturePack() :
+               m_keypointsType(KT_INVALID),
+               m_objectKeypoints(),
+               m_descriptorsType(DT_INVALID),
+               m_objectDescriptors(),
+               m_recognitionRate(0.f)
+{
+       ; /* NULL */
+}
+
+FeaturePack::FeaturePack(const FeaturePack& copy) :
+               m_keypointsType(copy.m_keypointsType),
+               m_objectKeypoints(copy.m_objectKeypoints),
+               m_descriptorsType(copy.m_descriptorsType),
+               m_objectDescriptors(copy.m_objectDescriptors.clone()),
+               m_recognitionRate(copy.m_recognitionRate)
+{
+       ; /* NULL */
+}
+
+FeaturePack& FeaturePack::operator= (const FeaturePack& copy)
+{
+       if (this != &copy) {
+               m_keypointsType = copy.m_keypointsType;
+               m_objectKeypoints = copy.m_objectKeypoints;
+               m_descriptorsType = copy.m_descriptorsType;
+               m_objectDescriptors = copy.m_objectDescriptors.clone();
+               m_recognitionRate = copy.m_recognitionRate;
+       }
+
+       return *this;
+}
+
+} /* Image */
+} /* MediaVision */
diff --git a/mv_image/image/src/Features/ORBExtractorFactory.cpp b/mv_image/image/src/Features/ORBExtractorFactory.cpp
new file mode 100644 (file)
index 0000000..2ac5d81
--- /dev/null
@@ -0,0 +1,145 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Features/ORBExtractorFactory.h"
+
+#include "ImageMathUtil.h"
+
+#include <opencv/cv.h>
+
+namespace MediaVision {
+namespace Image {
+ORBExtractorFactory::ORBExtractorFactory(
+               float scaleFactor,
+               size_t maximumFeaturesNumber)
+{
+       setScaleFactor(scaleFactor);
+       setMaximumFeaturesNumber(maximumFeaturesNumber);
+}
+
+cv::Ptr<FeatureExtractor> ORBExtractorFactory::buildFeatureExtractor()
+{
+       cv::Ptr<FeatureExtractor> featureExtractor(new FeatureExtractor());
+
+       cv::Ptr<cv::OrbFeatureDetector> detector(
+                       new cv::ORB(
+                               m_maximumFeaturesNumber,
+                               m_scaleFactor));
+
+       cv::Ptr<cv::OrbDescriptorExtractor> extractor = detector;
+
+       featureExtractor->setFeatureDetector(detector, KT_ORB);
+       featureExtractor->setDescriptorExtractor(extractor, DT_ORB);
+       featureExtractor->setRecognitionRateMetric(computeRecognitionRate);
+
+       return featureExtractor;
+}
+
+float ORBExtractorFactory::getScaleFactor() const
+{
+       return m_scaleFactor;
+}
+
+void ORBExtractorFactory::setScaleFactor(float scaleFactor)
+{
+       m_scaleFactor = scaleFactor;
+}
+
+size_t ORBExtractorFactory::getMaximumFeaturesNumber() const
+{
+       return m_scaleFactor;
+}
+
+void ORBExtractorFactory::setMaximumFeaturesNumber(size_t maximumFeaturesNumber)
+{
+       m_maximumFeaturesNumber = maximumFeaturesNumber;
+}
+
+float ORBExtractorFactory::computeRecognitionRate(
+               const cv::Mat& image,
+               const std::vector<cv::KeyPoint>& keypoints)
+{
+       const size_t numberOfKeypoints = keypoints.size();
+
+       /* it is impossible to calculate the perspective transformation parameters
+        * if number of key points less than MinimumNumberOfFeatures (4) */
+       if (numberOfKeypoints < MinimumNumberOfFeatures)
+               return 0.f;
+
+       static const size_t xCellsNumber = 10u;
+       static const size_t yCellsNumber = 10u;
+
+       cv::Mat cells[xCellsNumber][yCellsNumber];
+       size_t accumulationCounter[xCellsNumber][yCellsNumber];
+
+       const size_t cellWidth = image.cols / xCellsNumber;
+       const size_t cellHeight = image.rows / yCellsNumber;
+
+       for (size_t x = 0u; x < xCellsNumber; ++x) {
+               for (size_t y = 0u; y < yCellsNumber; ++y) {
+                       cells[x][y] = image(cv::Rect(
+                                       x * cellWidth,
+                                       y * cellHeight,
+                                       cellWidth,
+                                       cellHeight));
+
+                       accumulationCounter[x][y] = 0;
+               }
+       }
+
+       for (size_t i = 0u; i < numberOfKeypoints; ++i) {
+               size_t xCellIdx = keypoints[i].pt.x / cellWidth;
+               if (xCellIdx >= xCellsNumber)
+                       xCellIdx = xCellsNumber - 1;
+        
+               size_t yCellIdx = keypoints[i].pt.y / cellHeight;
+               if (yCellIdx >= yCellsNumber)
+                       yCellIdx = yCellsNumber - 1;
+
+               ++(accumulationCounter[xCellIdx][yCellIdx]);
+       }
+
+       const float exceptedNumber = numberOfKeypoints /
+                               (float)(xCellsNumber * yCellsNumber);
+
+       float distributedEvaluation = 0.f;
+
+       for (size_t x = 0u; x < xCellsNumber; ++x) {
+               for (size_t y = 0u; y < yCellsNumber; ++y) {
+                       distributedEvaluation += (accumulationCounter[x][y] - exceptedNumber) *
+                                       (accumulationCounter[x][y] - exceptedNumber) / exceptedNumber;
+               }
+       }
+
+       float maximumDistributedEvaluation = (xCellsNumber * yCellsNumber - 1) *
+                                                                                       exceptedNumber;
+
+       maximumDistributedEvaluation += (numberOfKeypoints - exceptedNumber) *
+                       (numberOfKeypoints - exceptedNumber) / exceptedNumber;
+
+       distributedEvaluation = 1 -
+                               (distributedEvaluation / maximumDistributedEvaluation);
+
+       /* Exponentiation to find an approximate confidence value based on the
+        * number of key points on the image. */
+       const float cardinalityEvaluation = pow(-0.9, numberOfKeypoints - 3) + 1.0f;
+
+       /* Result metric */
+       return distributedEvaluation * cardinalityEvaluation;
+}
+
+} /* Image */
+} /* MediaVision */
index a058965..0f31d66 100644 (file)
 
 namespace MediaVision {
 namespace Image {
-FeaturesExtractingParams::FeaturesExtractingParams(
-                                                       double scaleFactor,
-                                                       int maximumFeaturesNumber) :
-       mScaleFactor(scaleFactor),
-       mMaximumFeaturesNumber(maximumFeaturesNumber)
-{
-       ; /* NULL */
-}
-
 FeaturesExtractingParams::FeaturesExtractingParams() :
-       mScaleFactor(1.2),
-       mMaximumFeaturesNumber(800)
+       mKeypointType(KT_INVALID),
+       mDescriptorType(DT_INVALID)
 {
        ; /* NULL */
 }
@@ -37,10 +28,10 @@ FeaturesExtractingParams::FeaturesExtractingParams() :
 RecognitionParams::RecognitionParams(
                                        int minMatchesNumber,
                                        double requiredMatchesPart,
-                                       double allowableMatchesPartError) :
+                                       double tolerantMatchesPartError) :
        mMinMatchesNumber(minMatchesNumber),
        mRequiredMatchesPart(requiredMatchesPart),
-       mAllowableMatchesPartError(allowableMatchesPartError)
+       mTolerantMatchesPartError(tolerantMatchesPartError)
 {
        ; /* NULL */
 }
@@ -48,18 +39,22 @@ RecognitionParams::RecognitionParams(
 RecognitionParams::RecognitionParams() :
         mMinMatchesNumber(0),
         mRequiredMatchesPart(1.0),
-        mAllowableMatchesPartError(0.0)
+        mTolerantMatchesPartError(0.0)
 {
        ; /* NULL */
 }
 
 StabilizationParams::StabilizationParams(
+                                       bool isEnabled,
                                        int historyAmount,
-                                       double allowableShift,
+                                       double tolerantShift,
+                                       double tolerantShiftExtra,
                                        double stabilizationSpeed,
                                        double stabilizationAcceleration) :
+       mIsEnabled(isEnabled),
        mHistoryAmount(historyAmount),
-       mAllowableShift(allowableShift),
+       mTolerantShift(tolerantShift),
+       mTolerantShiftExtra(tolerantShiftExtra),
        mStabilizationSpeed(stabilizationSpeed),
        mStabilizationAcceleration(stabilizationAcceleration)
 {
@@ -67,8 +62,10 @@ StabilizationParams::StabilizationParams(
 }
 
 StabilizationParams::StabilizationParams() :
+       mIsEnabled(false),
        mHistoryAmount(1),
-       mAllowableShift(0.0),
+       mTolerantShift(0.0),
+       mTolerantShiftExtra(0.0),
        mStabilizationSpeed(0.0),
        mStabilizationAcceleration(1.0)
 {
index 0da2dbc..f8d7890 100644 (file)
@@ -40,10 +40,15 @@ float getTriangleArea(
 
        const float semiperimeter = (distances[0] + distances[1] + distances[2]) / 2.0f;
 
-       return sqrt(semiperimeter *
+       const float res2x = semiperimeter *
                        (semiperimeter - distances[0]) *
                        (semiperimeter - distances[1]) *
-                       (semiperimeter - distances[2]));
+                       (semiperimeter - distances[2]);
+
+       if (res2x < 0.f)
+               return 0.f;
+
+       return sqrt(res2x);
 }
 
 float getQuadrangleArea(const cv::Point2f points[NumberOfQuadrangleCorners])
@@ -52,5 +57,86 @@ float getQuadrangleArea(const cv::Point2f points[NumberOfQuadrangleCorners])
                        getTriangleArea(points[0], points[3], points[2]);
 }
 
+bool checkAccessory(
+               const cv::Point2f& point,
+               const std::vector<cv::Point2f>& region)
+{
+       if (region.size() < 3)
+               return false;
+
+       bool insideFlag = false;
+       const size_t numberOfContourPoints = region.size();
+
+       for (size_t i = 0u, j = numberOfContourPoints - 1; i < numberOfContourPoints; j = i++) {
+               if (((region[i].y > point.y) != (region[j].y > point.y)) &&
+                       ((float) point.x < (float)
+                       (region[j].x - region[i].x) * (point.y - region[i].y) /
+                       (region[j].y - region[i].y) + region[i].x)) {
+                       insideFlag = !insideFlag;
+                       }
+       }
+
+       return insideFlag;
+}
+
+void catRect(cv::Rect& rectange, const cv::Size& maxSize)
+{
+       if (rectange.width < 0) {
+               rectange.x += rectange.width;
+               rectange.width *= -1;
+       }
+
+       if (rectange.height < 0) {
+               rectange.y += rectange.height;
+               rectange.height *= -1;
+       }
+
+       if (rectange.x > maxSize.width || rectange.y > maxSize.height) {
+               rectange.x = 0;
+               rectange.y = 0;
+               rectange.width = 0;
+               rectange.height = 0;
+               return;
+       }
+
+       if (rectange.x < 0) {
+               rectange.width += rectange.x;
+               rectange.x = 0;
+       }
+
+       if (rectange.y < 0) {
+               rectange.height += rectange.y;
+               rectange.y = 0;
+       }
+
+       if (rectange.x + rectange.width > maxSize.width)
+               rectange.width = maxSize.width - rectange.x;
+
+       if (rectange.y + rectange.height > maxSize.height)
+               rectange.height = maxSize.height - rectange.y;
+}
+
+std::vector<cv::Point2f> contourResize(
+               const std::vector<cv::Point2f>& roi,
+               float scalingCoefficient)
+{
+       const size_t numberOfContourPoints = roi.size();
+       cv::Point2f centre(0, 0);
+       for (size_t i = 0; i < numberOfContourPoints; ++i) {
+               centre.x += roi[i].x;
+               centre.y += roi[i].y;
+       }
+       centre.x /= numberOfContourPoints;
+       centre.y /= numberOfContourPoints;
+
+       std::vector<cv::Point2f> result(numberOfContourPoints);
+       for (size_t i = 0; i < numberOfContourPoints; ++i) {
+               result[i].x = (roi[i].x - centre.x) * scalingCoefficient + centre.x;
+               result[i].y = (roi[i].y - centre.y) * scalingCoefficient + centre.y;
+       }
+
+       return result;
+}
+
 } /* Image */
 } /* MediaVision */
diff --git a/mv_image/image/src/ImageObject.cpp b/mv_image/image/src/ImageObject.cpp
deleted file mode 100644 (file)
index a562605..0000000
+++ /dev/null
@@ -1,446 +0,0 @@
-/**
- * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ImageObject.h"
-
-#include "ImageMathUtil.h"
-
-#include <app_common.h>
-
-#include "mv_private.h"
-#include "mv_common.h"
-
-#include <opencv/cv.h>
-#include <opencv2/features2d/features2d.hpp>
-
-#include <fstream>
-#include <unistd.h>
-
-namespace MediaVision {
-namespace Image {
-ImageObject::ImageObject() :
-       m_isEmpty(true),
-       m_isLabeled(false),
-       m_label(0),
-       m_recognitionRate(0.f)
-{
-       ; /* NULL */
-}
-
-ImageObject::ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params) :
-       m_isEmpty(true),
-       m_isLabeled(false),
-       m_label(0),
-       m_recognitionRate(0.f)
-{
-       fill(image, params);
-}
-
-ImageObject::ImageObject(const ImageObject& copy) :
-       m_isEmpty(copy.m_isEmpty),
-       m_isLabeled(copy.m_isLabeled),
-       m_label(copy.m_label),
-       m_boundingContour(copy.m_boundingContour),
-       m_objectKeypoints(copy.m_objectKeypoints),
-       m_objectDescriptors(copy.m_objectDescriptors.clone()),
-       m_recognitionRate(copy.m_recognitionRate)
-{
-       ; /* NULL */
-}
-
-ImageObject& ImageObject::operator=(const ImageObject& copy)
-{
-       if (this != &copy) {
-               m_isEmpty = copy.m_isEmpty;
-               m_isLabeled = copy.m_isLabeled;
-               m_label = copy.m_label;
-               m_boundingContour = copy.m_boundingContour;
-               m_objectKeypoints = copy.m_objectKeypoints;
-               m_objectDescriptors = copy.m_objectDescriptors.clone();
-               m_recognitionRate = copy.m_recognitionRate;
-       }
-       return *this;
-}
-
-ImageObject::~ImageObject()
-{
-       ; /* NULL */
-}
-
-void ImageObject::fill(const cv::Mat& image, const FeaturesExtractingParams& params)
-{
-       m_isEmpty = false;
-       m_boundingContour.resize(NumberOfQuadrangleCorners);
-
-       m_boundingContour[0].x = 0.f;
-       m_boundingContour[0].y = 0.f;
-
-       m_boundingContour[1].x = image.cols;
-       m_boundingContour[1].y = 0.f;
-
-       m_boundingContour[2].x = image.cols;
-       m_boundingContour[2].y = image.rows;
-
-       m_boundingContour[3].x = 0.f;
-       m_boundingContour[3].y = image.rows;
-
-       extractFeatures(image, params);
-
-       computeRecognitionRate(image);
-
-       LOGI("[%s] Image object is filled.", __FUNCTION__);
-}
-
-bool ImageObject::fill(const cv::Mat& image, const cv::Rect& boundingBox,
-               const FeaturesExtractingParams& params)
-{
-       if ((0 > boundingBox.x) || (0 >= boundingBox.width) ||
-               (0 > boundingBox.y) || (0 >= boundingBox.height) ||
-               (image.cols < (boundingBox.x + boundingBox.width)) ||
-               (image.rows < (boundingBox.y + boundingBox.height))) {
-                       LOGE("[%s] Invalid ROI.", __FUNCTION__);
-                       return false;
-       }
-
-       m_isEmpty = false;
-       m_boundingContour.resize(NumberOfQuadrangleCorners);
-
-       m_boundingContour[0].x = 0.f;
-       m_boundingContour[0].y = 0.f;
-
-       m_boundingContour[1].x = boundingBox.width;
-       m_boundingContour[1].y = 0.f;
-
-       m_boundingContour[2].x = boundingBox.width;
-       m_boundingContour[2].y = boundingBox.height;
-
-       m_boundingContour[3].x = 0.f;
-       m_boundingContour[3].y = boundingBox.height;
-
-       cv::Mat objectImage(image, boundingBox);
-
-       extractFeatures(objectImage, params);
-
-       computeRecognitionRate(image);
-
-       LOGI("[%s] Image object is filled.", __FUNCTION__);
-
-       return true;
-}
-
-void ImageObject::extractFeatures(const cv::Mat& image,
-               const FeaturesExtractingParams& params)
-{
-       cv::ORB orb(params.mMaximumFeaturesNumber, params.mScaleFactor);
-
-       if (image.cols < MinWidth || image.rows < MinHeight) {
-               LOGW("[%s] Area is too small, recognition rate is 0.", __FUNCTION__);
-               m_objectKeypoints.clear();
-               m_objectDescriptors = cv::Mat();
-       } else {
-               orb.detect(image, m_objectKeypoints);
-               orb.compute(image, m_objectKeypoints, m_objectDescriptors);
-       }
-}
-
-void ImageObject::computeRecognitionRate(const cv::Mat& image)
-{
-       const size_t numberOfKeypoints = m_objectKeypoints.size();
-
-       /* it is impossible to calculate the perspective transformation parameters
-        * if number of key points less than MinimumNumberOfFeatures (4)
-        */
-       if (numberOfKeypoints < MinimumNumberOfFeatures) {
-               m_recognitionRate = 0.f;
-               return;
-       }
-
-       static const size_t xCellsNumber = 10u;
-       static const size_t yCellsNumber = 10u;
-
-       cv::Mat cells[xCellsNumber][yCellsNumber];
-       size_t accumulationCounter[xCellsNumber][yCellsNumber];
-
-       const size_t cellWidth = image.cols / xCellsNumber;
-       const size_t cellHeight = image.rows / yCellsNumber;
-
-       for (size_t x = 0u; x < xCellsNumber; ++x) {
-               for (size_t y = 0u; y < yCellsNumber; ++y) {
-                       cells[x][y] = image(cv::Rect(
-                                                       x * cellWidth,
-                                                       y * cellHeight,
-                                                       cellWidth,
-                                                       cellHeight));
-
-                       accumulationCounter[x][y] = 0;
-               }
-       }
-
-       for (size_t i = 0u; i < numberOfKeypoints; ++i) {
-               size_t xCellIdx = m_objectKeypoints[i].pt.x / cellWidth;
-               if (xCellIdx >= xCellsNumber) {
-                       xCellIdx = xCellsNumber - 1;
-               }
-               size_t yCellIdx = m_objectKeypoints[i].pt.y / cellHeight;
-               if (yCellIdx >= yCellsNumber) {
-                       yCellIdx = yCellsNumber - 1;
-               }
-               ++(accumulationCounter[xCellIdx][yCellIdx]);
-       }
-
-               const float exceptedNumber = numberOfKeypoints /
-                       (float)(xCellsNumber * yCellsNumber);
-
-       float distributedEvaluation = 0.f;
-
-       for (size_t x = 0u; x < xCellsNumber; ++x) {
-               for (size_t y = 0u; y < yCellsNumber; ++y) {
-                       distributedEvaluation += (accumulationCounter[x][y] - exceptedNumber) *
-                               (accumulationCounter[x][y] - exceptedNumber) / exceptedNumber;
-               }
-       }
-
-       float maximumDistributedEvaluation = (xCellsNumber * yCellsNumber - 1) *
-                       exceptedNumber;
-
-       maximumDistributedEvaluation += (numberOfKeypoints - exceptedNumber) *
-                       (numberOfKeypoints - exceptedNumber) / exceptedNumber;
-
-       distributedEvaluation = 1 -
-                       (distributedEvaluation / maximumDistributedEvaluation);
-
-       /* Exponentiation to find an approximate confidence value based on the
-        * number of key points on the image.
-        */
-       const float cardinalityEvaluation = pow(-0.9, numberOfKeypoints - 3) + 1.0f;
-
-       m_recognitionRate =
-                       distributedEvaluation *
-                       cardinalityEvaluation;
-}
-
-float ImageObject::getRecognitionRate(void) const
-{
-       return m_recognitionRate;
-}
-
-bool ImageObject::isEmpty() const
-{
-       return m_isEmpty;
-}
-
-void ImageObject::setLabel(int label)
-{
-       m_isLabeled = true;
-       m_label = label;
-}
-
-bool ImageObject::getLabel(int& label) const
-{
-       if (!m_isLabeled) {
-               LOGW("[%s] Image hasn't label.", __FUNCTION__);
-               return false;
-       }
-       label = m_label;
-       return true;
-}
-
-int ImageObject::save(const char *fileName) const
-{
-       std::string prefix_path = std::string(app_get_data_path());
-       LOGD("prefix_path: %s", prefix_path.c_str());
-
-       std::string filePath;
-       filePath += prefix_path;
-       filePath += fileName;
-
-       /* check the directory is available */
-       std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
-       if (access(prefix_path_check.c_str(), F_OK)) {
-               LOGE("Can't save image object. Path[%s] doesn't existed.", prefix_path_check.c_str());
-
-               return MEDIA_VISION_ERROR_INVALID_PATH;
-       }
-
-       std::ofstream out;
-
-       out.open(filePath.c_str());
-
-       if (!out.is_open()) {
-               LOGE("[%s] Can't create/open file.", __FUNCTION__);
-               return MEDIA_VISION_ERROR_PERMISSION_DENIED;
-       }
-
-       out << (*this);
-
-       out.close();
-       LOGI("[%s] Image object is saved.", __FUNCTION__);
-
-       return MEDIA_VISION_ERROR_NONE;
-}
-
-int ImageObject::load(const char *fileName)
-{
-       /* find directory */
-       std::string prefix_path = std::string(app_get_data_path());
-       LOGD("prefix_path: %s", prefix_path.c_str());
-
-       std::string filePath;
-       filePath += prefix_path;
-       filePath += fileName;
-
-       if (access(filePath.c_str(), F_OK)) {
-               LOGE("Can't load image object model. Path[%s] doesn't existed.", filePath.c_str());
-
-               return MEDIA_VISION_ERROR_INVALID_PATH;
-       }
-
-       std::ifstream in;
-       in.open(filePath.c_str());
-
-       if (!in.is_open()) {
-               LOGE("[%s] Can't open file.", __FUNCTION__);
-               return MEDIA_VISION_ERROR_PERMISSION_DENIED;
-       }
-
-       in >> (*this);
-
-       if (!in.good()) {
-               LOGE("[%s] Unexpected end of file.", __FUNCTION__);
-               return MEDIA_VISION_ERROR_PERMISSION_DENIED;
-       }
-
-       in.close();
-       LOGI("[%s] Image object is loaded.", __FUNCTION__);
-
-       return MEDIA_VISION_ERROR_NONE;
-}
-
-std::ostream& operator << (std::ostream& os, const ImageObject& obj)
-{
-       os << std::setprecision(7);
-
-       os << obj.m_isEmpty << '\n';
-       os << obj.m_isLabeled << '\n';
-       os << obj.m_label << '\n';
-
-       os << obj.m_boundingContour.size() << '\n';
-       for (size_t pointNum = 0u; pointNum < obj.m_boundingContour.size(); ++pointNum) {
-               os << obj.m_boundingContour[pointNum].x << ' ';
-               os << obj.m_boundingContour[pointNum].y << '\n';
-       }
-
-       os << obj.m_objectKeypoints.size() << '\n';
-       for (size_t keypointNum = 0u; keypointNum < obj.m_objectKeypoints.size(); ++keypointNum) {
-               os << obj.m_objectKeypoints[keypointNum].pt.x << ' ';
-               os << obj.m_objectKeypoints[keypointNum].pt.y << ' ';
-               os << obj.m_objectKeypoints[keypointNum].size << ' ';
-               os << obj.m_objectKeypoints[keypointNum].response << ' ';
-               os << obj.m_objectKeypoints[keypointNum].angle << ' ';
-               os << obj.m_objectKeypoints[keypointNum].octave << ' ';
-               os << obj.m_objectKeypoints[keypointNum].class_id << '\n';
-       }
-
-       os << obj.m_objectDescriptors.rows << ' ';
-       os << obj.m_objectDescriptors.cols << ' ';
-       os << obj.m_objectDescriptors.type() << '\n';
-       for (int descriptorNum = 0; descriptorNum < obj.m_objectDescriptors.rows;
-               ++descriptorNum) {
-               for (int featureNum = 0; featureNum < obj.m_objectDescriptors.cols;
-                       ++featureNum, os << '\n') {
-                       os << (int)obj.m_objectDescriptors.at<uchar>(descriptorNum, featureNum) << ' ';
-               }
-       }
-
-       return os;
-}
-
-std::istream& operator >> (std::istream& is, ImageObject& obj)
-{
-       size_t numberOfContourPoints = 0u;
-       size_t numberOfKeyPoints = 0u;
-       int rows = 0, cols = 0;
-       int descriptorType = 0;
-
-       ImageObject temporal;
-
-#define MEDIA_VISION_CHECK_IFSTREAM \
-       if (!is.good()) { \
-               return is; \
-       }
-
-       is >> temporal.m_isEmpty;
-       MEDIA_VISION_CHECK_IFSTREAM
-       is >> temporal.m_isLabeled;
-       MEDIA_VISION_CHECK_IFSTREAM
-       is >> temporal.m_label;
-       MEDIA_VISION_CHECK_IFSTREAM
-
-       is >> numberOfContourPoints;
-       MEDIA_VISION_CHECK_IFSTREAM
-
-       temporal.m_boundingContour.resize(numberOfContourPoints);
-       for (size_t pointNum = 0; pointNum < temporal.m_boundingContour.size(); ++pointNum) {
-               is >> temporal.m_boundingContour[pointNum].x;
-               MEDIA_VISION_CHECK_IFSTREAM
-               is >> temporal.m_boundingContour[pointNum].y;
-               MEDIA_VISION_CHECK_IFSTREAM
-       }
-
-       is >> numberOfKeyPoints;
-       temporal.m_objectKeypoints.resize(numberOfKeyPoints);
-       for (size_t keypointNum = 0; keypointNum < temporal.m_objectKeypoints.size(); ++keypointNum) {
-               is >> temporal.m_objectKeypoints[keypointNum].pt.x;
-               MEDIA_VISION_CHECK_IFSTREAM
-               is >> temporal.m_objectKeypoints[keypointNum].pt.y;
-               MEDIA_VISION_CHECK_IFSTREAM
-               is >> temporal.m_objectKeypoints[keypointNum].size;
-               MEDIA_VISION_CHECK_IFSTREAM
-               is >> temporal.m_objectKeypoints[keypointNum].response;
-               MEDIA_VISION_CHECK_IFSTREAM
-               is >> temporal.m_objectKeypoints[keypointNum].angle;
-               MEDIA_VISION_CHECK_IFSTREAM
-               is >> temporal.m_objectKeypoints[keypointNum].octave;
-               MEDIA_VISION_CHECK_IFSTREAM
-               is >> temporal.m_objectKeypoints[keypointNum].class_id;
-               MEDIA_VISION_CHECK_IFSTREAM
-       }
-
-       is >> rows;
-       MEDIA_VISION_CHECK_IFSTREAM
-       is >> cols;
-       MEDIA_VISION_CHECK_IFSTREAM
-       is >> descriptorType;
-       MEDIA_VISION_CHECK_IFSTREAM
-       temporal.m_objectDescriptors = cv::Mat(rows, cols, descriptorType);
-       int value = 0;
-       for (int descriptorNum = 0; descriptorNum < temporal.m_objectDescriptors.rows; ++descriptorNum) {
-               for (int featureNum = 0; featureNum < temporal.m_objectDescriptors.cols; ++featureNum) {
-                       is >> value;
-                       MEDIA_VISION_CHECK_IFSTREAM
-                       temporal.m_objectDescriptors.at<uchar>(descriptorNum, featureNum) = (uchar)value;
-               }
-       }
-
-#undef MEDIA_VISION_CHECK_IFSTREAM
-
-       obj = temporal;
-
-       return is;
-}
-
-} /* Image */
-} /* MediaVision */
diff --git a/mv_image/image/src/ImageTracker.cpp b/mv_image/image/src/ImageTracker.cpp
deleted file mode 100644 (file)
index 400205c..0000000
+++ /dev/null
@@ -1,332 +0,0 @@
-/**
- * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ImageTracker.h"
-
-#include "ImageMathUtil.h"
-#include "ImageRecognizer.h"
-#include "ImageTrackingModel.h"
-#include "ImageContourStabilizator.h"
-
-#include "mv_private.h"
-
-#include <pthread.h>
-
-namespace MediaVision {
-namespace Image {
-ImageTracker::ImageTracker(const TrackingParams& trackingParams) :
-       m_trackingParams(trackingParams)
-{
-       ; /* NULL */
-}
-
-void ImageTracker::track(const cv::Mat& frame, ImageTrackingModel& target)
-{
-       ImageTrackingModel::State currentState = ImageTrackingModel::Undetected;
-
-       while (pthread_mutex_trylock(&target.m_globalGuard) != 0) {
-               pthread_spin_lock(&target.m_stateGuard);
-               currentState = target.m_state;
-               pthread_spin_unlock(&target.m_stateGuard);
-
-               if (ImageTrackingModel::InProcess == currentState) {
-                       LOGI("[%s] Calling is skipped. Object is recognizing.", __FUNCTION__);
-                       return;
-               }
-       }
-
-       pthread_spin_lock(&target.m_stateGuard);
-       currentState = target.m_state;
-       pthread_spin_unlock(&target.m_stateGuard);
-
-       if (ImageTrackingModel::Invalid == currentState) {
-               pthread_mutex_unlock(&target.m_globalGuard);
-               LOGE("[%s] Tracking model is invalid.", __FUNCTION__);
-               return;
-       }
-
-       switch (target.m_state) {
-       case ImageTrackingModel::Appeared:
-       case ImageTrackingModel::Tracked: {
-               pthread_spin_lock(&target.m_stateGuard);
-               target.m_state = ImageTrackingModel::InProcess;
-               pthread_spin_unlock(&target.m_stateGuard);
-
-               trackDetectedObject(frame, target);
-               break;
-       }
-       case ImageTrackingModel::Undetected: {
-               pthread_spin_lock(&target.m_stateGuard);
-               target.m_state = ImageTrackingModel::InProcess;
-               pthread_spin_unlock(&target.m_stateGuard);
-
-               trackUndetectedObject(frame, target);
-
-               /* Recognition thread is started. Don't use target here, just exit! */
-               return;
-       }
-       case ImageTrackingModel::InProcess:
-       default: {
-               /* Abnormal behaviour:
-                * tracking model state is InProcess but globalGuard is not locked
-                */
-               LOGE("[%s] Abnormal behaviour. Tracking model status is"
-                                       "\"InProgress\" but it is not in progress.", __FUNCTION__);
-
-               pthread_spin_lock(&target.m_stateGuard);
-               if (target.m_recognitionObject.isEmpty()) {
-                       target.m_state = ImageTrackingModel::Invalid;
-                       LOGI("[%s] Tracking model status is changed on \"Invalid\"", __FUNCTION__);
-               } else {
-                       target.m_state = ImageTrackingModel::Undetected;
-                       LOGI("[%s] Tracking model status is changed on \"Undetected\"", __FUNCTION__);
-               }
-               pthread_spin_unlock(&target.m_stateGuard);
-
-               pthread_mutex_unlock(&target.m_globalGuard);
-               break;
-       }
-       }
-}
-
-void ImageTracker::trackDetectedObject(
-               const cv::Mat& frame,
-               ImageTrackingModel& target)
-{
-       cv::Rect expectedArea = computeExpectedArea(target, frame.size());
-
-       std::vector<cv::Point2f> resultContour;
-
-       ImageRecognizer recognizer(
-                       frame(expectedArea),
-                       m_trackingParams.mFramesFeaturesExtractingParams);
-
-       const bool isRecognized = recognizer.recognize(
-                       target.m_recognitionObject,
-                        m_trackingParams.mRecognitionParams,
-               resultContour);
-
-       if (isRecognized) {
-               for (size_t pointIdx = 0; pointIdx < resultContour.size(); ++pointIdx) {
-                       resultContour[pointIdx].x += expectedArea.x;
-                       resultContour[pointIdx].y += expectedArea.y;
-               }
-
-               if (m_trackingParams.mStabilizationParams.mHistoryAmount > 0) {
-                       target.m_stabilizator.stabilize(
-                                       resultContour,
-                                       m_trackingParams.mStabilizationParams);
-               }
-
-               target.m_stabilizator.stabilize(
-                               resultContour,
-                               m_trackingParams.mStabilizationParams);
-
-               pthread_spin_lock(&target.m_lastLocationGuard);
-               target.m_lastLocation = resultContour;
-               pthread_spin_unlock(&target.m_lastLocationGuard);
-
-               pthread_spin_lock(&target.m_stateGuard);
-               target.m_state = ImageTrackingModel::Tracked;
-               pthread_spin_unlock(&target.m_stateGuard);
-
-               LOGI("[%s] Object is successfully tracked.", __FUNCTION__);
-       } else {
-               target.m_stabilizator.reset();
-
-               pthread_spin_lock(&target.m_stateGuard);
-               target.m_state = ImageTrackingModel::Undetected;
-               pthread_spin_unlock(&target.m_stateGuard);
-
-               LOGI("[%s] Object is lost.", __FUNCTION__);
-       }
-
-       pthread_mutex_unlock(&target.m_globalGuard);
-}
-
-void *ImageTracker::recognitionThreadFunc(void *recognitionInfo)
-{
-       if (NULL == recognitionInfo) {
-               return NULL;
-       }
-
-       RecognitionInfo *recogInfo = (RecognitionInfo*)recognitionInfo;
-
-       std::vector<cv::Point2f> resultContour;
-
-       ImageRecognizer recognizer(
-                       recogInfo->mFrame,
-                       recogInfo->mSceneFeaturesExtractingParams);
-
-       bool isRecognized = recognizer.recognize(
-                       recogInfo->mpTarget->m_recognitionObject,
-                       recogInfo->mRecognitionParams,
-                       resultContour);
-
-       if (isRecognized) {
-               recogInfo->mpTarget->m_stabilizator.reset();
-
-               pthread_spin_lock(&(recogInfo->mpTarget->m_lastLocationGuard));
-               recogInfo->mpTarget->m_lastLocation = resultContour;
-               pthread_spin_unlock(&(recogInfo->mpTarget->m_lastLocationGuard));
-
-               pthread_spin_lock(&(recogInfo->mpTarget->m_stateGuard));
-               recogInfo->mpTarget->m_state = ImageTrackingModel::Appeared;
-               pthread_spin_unlock(&(recogInfo->mpTarget->m_stateGuard));
-       } else {
-               pthread_spin_lock(&(recogInfo->mpTarget->m_stateGuard));
-               recogInfo->mpTarget->m_state = ImageTrackingModel::Undetected;
-               pthread_spin_unlock(&(recogInfo->mpTarget->m_stateGuard));
-       }
-
-       recogInfo->mpTarget->m_recognitionThread = 0;
-
-       pthread_mutex_unlock(&(recogInfo->mpTarget->m_globalGuard));
-
-       delete recogInfo;
-
-       return NULL;
-}
-
-void ImageTracker::trackUndetectedObject(
-               const cv::Mat& frame,
-               ImageTrackingModel& target)
-{
-       RecognitionInfo *recognitionInfo = new RecognitionInfo;
-
-       recognitionInfo->mFrame = frame.clone();
-       recognitionInfo->mpTarget = &target;
-
-       recognitionInfo->mRecognitionParams =
-                       m_trackingParams.mRecognitionParams;
-       recognitionInfo->mSceneFeaturesExtractingParams =
-                       m_trackingParams.mFramesFeaturesExtractingParams;
-
-       if (target.m_recognitionThread) {
-               /* Abnormal behaviour:
-                * Recognition thread isn't finished but guardian mutex is unlocked
-                */
-               LOGE("[%s] Abnormal behaviour. Recognition thread isn't finished but"
-                                       "guardian mutex is unlocked.", __FUNCTION__);
-
-               LOGI("[%s] Try to wait recognition thread.", __FUNCTION__);
-               pthread_join(target.m_recognitionThread, NULL);
-               target.m_recognitionThread = 0;
-               LOGI("[%s] Recognition thread is finished.", __FUNCTION__);
-       }
-
-       const int err = pthread_create(
-                       &target.m_recognitionThread,
-                       NULL,
-                       recognitionThreadFunc,
-                       recognitionInfo);
-
-       if (0 == err) {
-               LOGI("[%s] Recognition thread is started.", __FUNCTION__);
-               /* Recognition thread is started. Don't use target here, just exit! */
-               return;
-       }
-       LOGE("[%s] Recognition thread creation is failed.", __FUNCTION__);
-
-       pthread_spin_lock(&target.m_stateGuard);
-       if (target.m_recognitionObject.isEmpty()) {
-               target.m_state = ImageTrackingModel::Invalid;
-               LOGI("[%s] Tracking model status is changed on \"Invalid\"", __FUNCTION__);
-       } else {
-               target.m_state = ImageTrackingModel::Undetected;
-               LOGI("[%s] Tracking model status is changed on \"Undetected\"", __FUNCTION__);
-       }
-       pthread_spin_unlock(&target.m_stateGuard);
-
-       pthread_mutex_unlock(&target.m_globalGuard);
-}
-
-cv::Rect ImageTracker::computeExpectedArea(
-        const ImageTrackingModel& target,
-        const cv::Size& frameSize)
-{
-       if (target.m_state == ImageTrackingModel::Appeared) {
-               LOGI("[%s] Expected area for appeared object is full frame.", __FUNCTION__);
-               return cv::Rect(0, 0, frameSize.width, frameSize.height);
-       }
-
-       if (target.m_lastLocation.empty()) {
-               LOGW("[%s] Can't compute expected area for object without last"
-                               "location.", __FUNCTION__);
-               return cv::Rect(0, 0, 0, 0);
-       }
-
-       cv::Point2f ltCorner(target.m_lastLocation[0]);
-       cv::Point2f rbCorner(target.m_lastLocation[0]);
-
-       const size_t contourPointsNumber = target.m_lastLocation.size();
-
-       for (size_t pointNum = 1; pointNum < contourPointsNumber; ++pointNum) {
-               if (ltCorner.x > target.m_lastLocation[pointNum].x) {
-                       ltCorner.x = target.m_lastLocation[pointNum].x;
-               } else if (rbCorner.x < target.m_lastLocation[pointNum].x) {
-                       rbCorner.x = target.m_lastLocation[pointNum].x;
-               }
-
-               if (ltCorner.y > target.m_lastLocation[pointNum].y) {
-                       ltCorner.y = target.m_lastLocation[pointNum].y;
-               } else if (rbCorner.y < target.m_lastLocation[pointNum].y) {
-                       rbCorner.y = target.m_lastLocation[pointNum].y;
-               }
-       }
-
-       cv::Point2f center(
-                       (ltCorner.x + rbCorner.x) / 2.0f,
-                       (ltCorner.y + rbCorner.y) / 2.0f);
-
-       cv::Size2f halfSize(
-                       (center.x - ltCorner.x) * (1 + m_trackingParams.mExpectedOffset),
-                       (center.y - ltCorner.y) * (1 + m_trackingParams.mExpectedOffset));
-
-       cv::Rect expectedArea(
-                       center.x - halfSize.width, center.y - halfSize.height,
-                       halfSize.width * 2, halfSize.height * 2);
-
-       if (expectedArea.x < 0) {
-               expectedArea.width += expectedArea.x;
-               expectedArea.x = 0;
-       }
-
-       if (expectedArea.y < 0) {
-               expectedArea.height += expectedArea.y;
-               expectedArea.y = 0;
-       }
-
-       if (expectedArea.x + expectedArea.width > frameSize.width) {
-               expectedArea.width = frameSize.width - expectedArea.x;
-       }
-
-       if (expectedArea.y + expectedArea.height > frameSize.height) {
-               expectedArea.height = frameSize.height - expectedArea.y;
-       }
-
-       if (expectedArea.width <= 0 || expectedArea.height <= 0) {
-               expectedArea.x = 0;
-               expectedArea.y = 0;
-               expectedArea.width = 0;
-               expectedArea.height = 0;
-       }
-
-       return expectedArea;
-}
-
-} /* Image */
-} /* MediaVision */
diff --git a/mv_image/image/src/ImageTrackingModel.cpp b/mv_image/image/src/ImageTrackingModel.cpp
deleted file mode 100644 (file)
index 014a629..0000000
+++ /dev/null
@@ -1,340 +0,0 @@
-/**
- * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ImageTrackingModel.h"
-
-#include <app_common.h>
-
-#include "mv_private.h"
-#include "mv_common.h"
-
-#include <fstream>
-#include <unistd.h>
-
-namespace MediaVision {
-namespace Image {
-ImageTrackingModel::ImageTrackingModel() :
-       m_recognitionObject(),
-       m_lastLocation(0),
-       m_state(Invalid),
-       m_recognitionThread(0)
-{
-       pthread_mutex_init(&m_globalGuard, NULL);
-       pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED);
-       pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED);
-}
-
-ImageTrackingModel::ImageTrackingModel(const ImageObject& recognitionObject) :
-       m_recognitionObject(recognitionObject),
-       m_lastLocation(0),
-       m_state(Invalid),
-       m_recognitionThread(0)
-{
-       if (!recognitionObject.isEmpty()) {
-               m_state = Undetected;
-       }
-       pthread_mutex_init(&m_globalGuard, NULL);
-       pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED);
-       pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED);
-}
-
-ImageTrackingModel::ImageTrackingModel(const ImageTrackingModel& copy) :
-       m_recognitionThread(0)
-{
-       pthread_mutex_init(&m_globalGuard, NULL);
-       pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED);
-       pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED);
-
-       *this = copy;
-}
-
-ImageTrackingModel::~ImageTrackingModel()
-{
-       if (m_recognitionThread) {
-               pthread_join(m_recognitionThread, NULL);
-       }
-
-       pthread_mutex_destroy(&m_globalGuard);
-       pthread_spin_destroy(&m_lastLocationGuard);
-       pthread_spin_destroy(&m_stateGuard);
-}
-
-void ImageTrackingModel::setTarget(const ImageObject& target)
-{
-       pthread_mutex_lock(&m_globalGuard);
-
-       pthread_spin_lock(&m_stateGuard);
-       m_state = target.isEmpty() ? Invalid : Undetected;
-       pthread_spin_unlock(&m_stateGuard);
-
-       pthread_spin_lock(&m_lastLocationGuard);
-       m_lastLocation.clear();
-       pthread_spin_unlock(&m_lastLocationGuard);
-
-       LOGI("[%s] Target is set into tracking model.", __FUNCTION__);
-
-       m_recognitionObject = target;
-
-       pthread_mutex_unlock(&m_globalGuard);
-}
-
-void ImageTrackingModel::refresh(void)
-{
-       pthread_mutex_lock(&m_globalGuard);
-
-       pthread_spin_lock(&m_stateGuard);
-       m_state = m_recognitionObject.isEmpty() ? Invalid : Undetected;
-       pthread_spin_unlock(&m_stateGuard);
-
-       pthread_spin_lock(&m_lastLocationGuard);
-       m_lastLocation.clear();
-       pthread_spin_unlock(&m_lastLocationGuard);
-
-       LOGI("[%s] Image tracking model is refreshed.", __FUNCTION__);
-
-       pthread_mutex_unlock(&m_globalGuard);
-}
-
-bool ImageTrackingModel::isValid() const
-{
-       bool result = false;
-
-       pthread_spin_lock(&m_stateGuard);
-       result = (m_state != Invalid);
-       pthread_spin_unlock(&m_stateGuard);
-
-       return result;
-}
-
-ImageTrackingModel& ImageTrackingModel::operator=(const ImageTrackingModel& copy)
-{
-       if (this != &copy) {
-               pthread_mutex_t *higherMutex = &m_globalGuard;
-               pthread_mutex_t *lowerMutex = &copy.m_globalGuard;
-
-               if (higherMutex < lowerMutex) {
-                       std::swap(higherMutex, lowerMutex);
-               }
-
-               pthread_mutex_lock(higherMutex);
-               pthread_mutex_lock(lowerMutex);
-
-               m_recognitionObject = copy.m_recognitionObject;
-
-               pthread_spin_lock(&m_lastLocationGuard);
-               m_lastLocation = copy.m_lastLocation;
-               pthread_spin_unlock(&m_lastLocationGuard);
-
-               if (copy.m_state == InProcess) {
-                       pthread_spin_lock(&m_stateGuard);
-                       m_state = m_recognitionObject.isEmpty() ? Invalid : Undetected;
-                       pthread_spin_unlock(&m_stateGuard);
-               } else {
-                       pthread_spin_lock(&m_stateGuard);
-                       m_state = copy.m_state;
-                       pthread_spin_unlock(&m_stateGuard);
-               }
-
-               pthread_mutex_unlock(lowerMutex);
-               pthread_mutex_unlock(higherMutex);
-       }
-
-       return *this;
-}
-
-int ImageTrackingModel::save(const char *fileName) const
-{
-       std::string prefix_path = std::string(app_get_data_path());
-       LOGD("prefix_path: %s", prefix_path.c_str());
-
-       std::string filePath;
-       filePath += prefix_path;
-       filePath += fileName;
-
-       /* check the directory is available */
-       std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
-       if (access(prefix_path_check.c_str(), F_OK)) {
-               LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefix_path_check.c_str());
-
-               return MEDIA_VISION_ERROR_INVALID_PATH;
-       }
-
-       std::ofstream out;
-       out.open(filePath.c_str());
-
-       if (!out.is_open()) {
-               LOGE("[%s] Can't create/open file.", __FUNCTION__);
-               return MEDIA_VISION_ERROR_PERMISSION_DENIED;
-       }
-
-       out << (*this);
-
-       out.close();
-       LOGI("[%s] Image tracking model is saved.", __FUNCTION__);
-
-       return MEDIA_VISION_ERROR_NONE;
-}
-
-int ImageTrackingModel::load(const char *fileName)
-{
-       /* find directory */
-       std::string prefix_path = std::string(app_get_data_path());
-       LOGD("prefix_path: %s", prefix_path.c_str());
-
-       std::string filePath;
-       filePath += prefix_path;
-       filePath += fileName;
-
-       if (access(filePath.c_str(), F_OK)) {
-               LOGE("Can't load tracking model. Path[%s] doesn't existed.", filePath.c_str());
-
-               return MEDIA_VISION_ERROR_INVALID_PATH;
-       }
-
-       std::ifstream in;
-       in.open(filePath.c_str());
-
-       if (!in.is_open()) {
-               LOGE("[%s] Can't open file.", __FUNCTION__);
-               return MEDIA_VISION_ERROR_PERMISSION_DENIED;
-       }
-
-       in >> (*this);
-
-       if (!in.good()) {
-               LOGE("[%s] Unexpected end of file.", __FUNCTION__);
-               return MEDIA_VISION_ERROR_PERMISSION_DENIED;
-       }
-
-       in.close();
-       LOGI("[%s] Image tracking model is loaded.", __FUNCTION__);
-
-       return MEDIA_VISION_ERROR_NONE;
-}
-
-bool ImageTrackingModel::isDetected() const
-{
-       bool result = false;
-
-       pthread_spin_lock(&m_stateGuard);
-       result = (m_state == Tracked);
-       pthread_spin_unlock(&m_stateGuard);
-
-       return result;
-}
-
-std::vector<cv::Point2f> ImageTrackingModel::getLastlocation() const
-{
-       std::vector<cv::Point2f> result;
-
-       pthread_spin_lock(&m_lastLocationGuard);
-       result = m_lastLocation;
-       pthread_spin_unlock(&m_lastLocationGuard);
-
-       return result;
-}
-
-#define STATE_UNSEEN_IO_ID 0
-#define STATE_VISIBLE_IO_ID 1
-
-std::ostream& operator << (std::ostream& os, const ImageTrackingModel::State& state)
-{
-       if (ImageTrackingModel::Tracked == state) {
-               os << STATE_VISIBLE_IO_ID;
-       } else {
-               os << STATE_UNSEEN_IO_ID;
-       }
-
-       return os;
-}
-
-std::istream& operator >> (std::istream& is, ImageTrackingModel::State& state)
-{
-       int stateId = -1;
-
-       is >> stateId;
-
-       if (STATE_VISIBLE_IO_ID == stateId) {
-               state = ImageTrackingModel::Tracked;
-       } else {
-               state = ImageTrackingModel::Undetected;
-       }
-
-       return is;
-}
-
-#undef STATE_UNSEEN_IO_ID
-#undef STATE_VISIBLE_IO_ID
-
-std::ostream& operator << (std::ostream& os, const ImageTrackingModel& obj)
-{
-       os << std::setprecision(7);
-
-       pthread_mutex_lock(&obj.m_globalGuard);
-
-       os << obj.m_recognitionObject;
-
-       os << obj.m_lastLocation.size();
-       for (size_t pointNum = 0u; pointNum < obj.m_lastLocation.size(); ++pointNum) {
-               os << ' ' << obj.m_lastLocation[pointNum].x << ' ' << obj.m_lastLocation[pointNum].y;
-       }
-       os << '\n';
-
-       os << obj.m_state << '\n';
-
-       pthread_mutex_unlock(&obj.m_globalGuard);
-
-       return os;
-}
-
-std::istream& operator >> (std::istream& is, ImageTrackingModel& obj)
-{
-#define MEDIA_VISION_CHECK_IFSTREAM \
-       if (!is.good()) { \
-               return is; \
-       }
-
-       ImageTrackingModel temporal;
-
-       is >> obj.m_recognitionObject;
-       MEDIA_VISION_CHECK_IFSTREAM
-
-       size_t lastLocationAmount = 0u;
-       is >> lastLocationAmount;
-       MEDIA_VISION_CHECK_IFSTREAM
-
-       temporal.m_lastLocation.resize(lastLocationAmount);
-       for (size_t pointNum = 0u; pointNum < lastLocationAmount; ++pointNum) {
-               is >> temporal.m_lastLocation[pointNum].x;
-               MEDIA_VISION_CHECK_IFSTREAM
-               is >> temporal.m_lastLocation[pointNum].y;
-               MEDIA_VISION_CHECK_IFSTREAM
-       }
-
-       is >> temporal.m_state;
-       MEDIA_VISION_CHECK_IFSTREAM
-
-       if (temporal.m_recognitionObject.isEmpty()) {
-               temporal.m_state = ImageTrackingModel::Invalid;
-       }
-
-       obj = temporal;
-
-       return is;
-}
-
-} /* Image */
-} /* MediaVision */
diff --git a/mv_image/image/src/Recognition/ImageObject.cpp b/mv_image/image/src/Recognition/ImageObject.cpp
new file mode 100644 (file)
index 0000000..ac6569f
--- /dev/null
@@ -0,0 +1,376 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Recognition/ImageObject.h"
+
+#include "ImageMathUtil.h"
+
+#include "Features/FeatureExtractor.h"
+#include "Features/BasicExtractorFactory.h"
+#include "Features/ORBExtractorFactory.h"
+
+#include "mv_private.h"
+#include "mv_common.h"
+
+#include <app_common.h>
+
+#include <opencv/cv.h>
+#include <opencv2/features2d/features2d.hpp>
+
+#include <fstream>
+#include <unistd.h>
+
+namespace MediaVision {
+namespace Image {
+ImageObject::ImageObject() :
+               m_features(),
+               m_isEmpty(true),
+               m_isLabeled(false),
+               m_label(0)
+{
+       ; /* NULL */
+}
+
+ImageObject::ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params) :
+               m_featureExtractingParams(),
+               m_features(),
+               m_isEmpty(true),
+               m_isLabeled(false),
+               m_label(0)
+{
+       fill(image, params);
+}
+
+ImageObject::ImageObject(const ImageObject& copy) :
+               m_featureExtractingParams(copy.m_featureExtractingParams),
+               m_features(copy.m_features),
+               m_isEmpty(copy.m_isEmpty),
+               m_isLabeled(copy.m_isLabeled),
+               m_label(copy.m_label),
+               m_boundingContour(copy.m_boundingContour)
+{
+       ; /* NULL */
+}
+
+ImageObject& ImageObject::operator=(const ImageObject& copy)
+{
+       if (this != &copy) {
+               m_isEmpty = copy.m_isEmpty;
+               m_isLabeled = copy.m_isLabeled;
+               m_label = copy.m_label;
+               m_boundingContour = copy.m_boundingContour;
+
+               m_features = copy.m_features;
+       }
+
+       return *this;
+}
+
+ImageObject::~ImageObject()
+{
+       ; /* NULL */
+}
+
+void ImageObject::fill(
+               const cv::Mat& image,
+               const FeaturesExtractingParams& params,
+               const std::vector<cv::Point2f>& roi)
+{
+       m_isEmpty = false;
+
+       if (!roi.empty()) {
+               m_boundingContour = roi;
+       } else {
+               m_boundingContour.resize(NumberOfQuadrangleCorners);
+
+               m_boundingContour[0].x = 0.f;
+               m_boundingContour[0].y = 0.f;
+
+               m_boundingContour[1].x = image.cols;
+               m_boundingContour[1].y = 0.f;
+
+               m_boundingContour[2].x = image.cols;
+               m_boundingContour[2].y = image.rows;
+
+               m_boundingContour[3].x = 0.f;
+               m_boundingContour[3].y = image.rows;
+       }
+
+       extractFeatures(image, params, m_boundingContour);
+
+       m_featureExtractingParams = params;
+
+       LOGI("[%s] Image object is filled.", __FUNCTION__);
+}
+
+float ImageObject::getRecognitionRate(void) const
+{
+       return m_features.m_recognitionRate;
+}
+
+void ImageObject::extractFeatures(
+               const cv::Mat& image,
+               const FeaturesExtractingParams& params,
+               const std::vector<cv::Point2f>& roi)
+{
+       /* TODO: It is advisable to consider the distribution of functional */
+
+       cv::Ptr<FeatureExtractor> extractor;
+
+       if (params.mKeypointType == KT_ORB &&
+                       params.mDescriptorType == DT_ORB) {
+               ORBExtractorFactory extractorFactory;
+
+               extractorFactory.setScaleFactor((float)params.ORB.mScaleFactor);
+               extractorFactory.setMaximumFeaturesNumber(params.ORB.mMaximumFeaturesNumber);
+
+               extractor = extractorFactory.buildFeatureExtractor();
+       } else {
+               BasicExtractorFactory extractorFactory(
+                               params.mKeypointType,
+                               params.mDescriptorType);
+
+               extractor = extractorFactory.buildFeatureExtractor();
+       }
+
+       if (!extractor.empty())
+               extractor->extract(image, m_features, roi);
+}
+
+bool ImageObject::isEmpty() const
+{
+       return (m_features.m_objectKeypoints.empty() ||
+                               m_features.m_objectDescriptors.empty());
+}
+
+void ImageObject::setContour(const std::vector<cv::Point2f>& contour)
+{
+       m_boundingContour = contour;
+}
+
+void ImageObject::setLabel(int label)
+{
+       m_isLabeled = true;
+       m_label = label;
+}
+
+bool ImageObject::getLabel(int& label) const
+{
+       if (!m_isLabeled) {
+               LOGW("[%s] Image hasn't label.", __FUNCTION__);
+               return false;
+       }
+       label = m_label;
+       return true;
+}
+
+int ImageObject::save(const char *fileName) const
+{
+       std::string filePath;
+       char *cPath = app_get_data_path();
+       if (NULL == cPath)
+               filePath = fileName;
+       else
+               filePath = std::string(cPath) + std::string(fileName);
+
+       std::string prefixPath = filePath.substr(0, filePath.find_last_of('/'));
+       LOGD("prefixPath: %s", prefixPath.c_str());
+
+       /* check the directory is available */
+       if (access(prefixPath.c_str(), F_OK)) {
+               LOGE("Can't save image object. Path[%s] doesn't existed.", filePath.c_str());
+
+               return MEDIA_VISION_ERROR_INVALID_PATH;
+       }
+
+       std::ofstream out;
+
+       out.open(filePath.c_str());
+
+       if (!out.is_open()) {
+               LOGE("[%s] Can't create/open file.", __FUNCTION__);
+               return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+       }
+
+       out<<(*this);
+
+       out.close();
+       LOGI("[%s] Image object is saved.", __FUNCTION__);
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int ImageObject::load(const char *fileName)
+{
+       std::string filePath;
+       char *cPath = app_get_data_path();
+       if (NULL == cPath)
+               filePath = fileName;
+       else
+               filePath = std::string(cPath) + std::string(fileName);
+
+       if (access(filePath.c_str(), F_OK)) {
+               LOGE("Can't load image object model. Path[%s] doesn't existed.", filePath.c_str());
+
+               return MEDIA_VISION_ERROR_INVALID_PATH;
+       }
+
+       std::ifstream in;
+       in.open(filePath.c_str());
+
+       if (!in.is_open()) {
+               LOGE("[%s] Can't open file.", __FUNCTION__);
+               return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+       }
+
+       in>>(*this);
+
+       if (!in.good()) {
+               /* TODO: Provide another error code */
+               LOGE("[%s] Unexpected end of file.", __FUNCTION__);
+               return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+       }
+
+       in.close();
+       LOGI("[%s] Image object is loaded.", __FUNCTION__);
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+std::ostream& operator << (std::ostream& os, const ImageObject& obj)
+{
+       os<<std::setprecision(7);
+
+       os<<obj.m_isEmpty<<'\n';
+       os<<obj.m_isLabeled<<'\n';
+       os<<obj.m_label<<'\n';
+
+       os<<obj.m_boundingContour.size()<<'\n';
+       for (size_t pointNum = 0u; pointNum < obj.m_boundingContour.size(); ++pointNum) {
+               os<<obj.m_boundingContour[pointNum].x<<' ';
+               os<<obj.m_boundingContour[pointNum].y<<'\n';
+       }
+
+       const size_t numberOfKeypoints = obj.m_features.m_objectKeypoints.size();
+       os<<numberOfKeypoints<<'\n';
+       for (size_t keypointNum = 0u; keypointNum < numberOfKeypoints; ++keypointNum) {
+               os<<obj.m_features.m_objectKeypoints[keypointNum].pt.x<<' ';
+               os<<obj.m_features.m_objectKeypoints[keypointNum].pt.y<<' ';
+               os<<obj.m_features.m_objectKeypoints[keypointNum].size<<' ';
+               os<<obj.m_features.m_objectKeypoints[keypointNum].response<<' ';
+               os<<obj.m_features.m_objectKeypoints[keypointNum].angle<<' ';
+               os<<obj.m_features.m_objectKeypoints[keypointNum].octave<<' ';
+               os<<obj.m_features.m_objectKeypoints[keypointNum].class_id<<'\n';
+       }
+
+       const int numberOfDescriptors = obj.m_features.m_objectDescriptors.rows;
+       const int sizeOfDescriptor = obj.m_features.m_objectDescriptors.cols;
+
+       os<<numberOfDescriptors<<' ';
+       os<<sizeOfDescriptor<<' ';
+       os<<obj.m_features.m_objectDescriptors.type()<<'\n';
+
+       for (int descriptorNum = 0; descriptorNum < numberOfDescriptors;
+                               ++descriptorNum, os<<'\n') {
+               for (int featureNum = 0; featureNum < sizeOfDescriptor;
+                                       ++featureNum) {
+                               os<<(int)obj.m_features.m_objectDescriptors.at<uchar>(
+                               descriptorNum,
+                               featureNum)<<' ';
+               }
+       }
+
+       return os;
+}
+
+std::istream& operator >> (std::istream& is, ImageObject& obj)
+{
+       size_t numberOfContourPoints = 0u;
+       size_t numberOfKeypoints = 0u;
+       int rows = 0, cols = 0;
+       int descriptorType = 0;
+
+       ImageObject temporal;
+
+#define MEDIA_VISION_CHECK_IFSTREAM \
+       if (!is.good()) { \
+               return is; \
+       }
+
+       is>>temporal.m_isEmpty;
+       MEDIA_VISION_CHECK_IFSTREAM
+       is>>temporal.m_isLabeled;
+       MEDIA_VISION_CHECK_IFSTREAM
+       is>>temporal.m_label;
+       MEDIA_VISION_CHECK_IFSTREAM
+
+       is>>numberOfContourPoints;
+       MEDIA_VISION_CHECK_IFSTREAM
+
+       temporal.m_boundingContour.resize(numberOfContourPoints);
+       for (size_t pointNum = 0; pointNum < numberOfContourPoints; ++pointNum) {
+               is>>temporal.m_boundingContour[pointNum].x;
+               MEDIA_VISION_CHECK_IFSTREAM
+               is>>temporal.m_boundingContour[pointNum].y;
+               MEDIA_VISION_CHECK_IFSTREAM
+       }
+
+       is>>numberOfKeypoints;
+       temporal.m_features.m_objectKeypoints.resize(numberOfKeypoints);
+       for (size_t keypointNum = 0; keypointNum < numberOfKeypoints; ++keypointNum) {
+               is>>temporal.m_features.m_objectKeypoints[keypointNum].pt.x;
+               MEDIA_VISION_CHECK_IFSTREAM
+               is>>temporal.m_features.m_objectKeypoints[keypointNum].pt.y;
+               MEDIA_VISION_CHECK_IFSTREAM
+               is>>temporal.m_features.m_objectKeypoints[keypointNum].size;
+               MEDIA_VISION_CHECK_IFSTREAM
+               is>>temporal.m_features.m_objectKeypoints[keypointNum].response;
+               MEDIA_VISION_CHECK_IFSTREAM
+               is>>temporal.m_features.m_objectKeypoints[keypointNum].angle;
+               MEDIA_VISION_CHECK_IFSTREAM
+               is>>temporal.m_features.m_objectKeypoints[keypointNum].octave;
+               MEDIA_VISION_CHECK_IFSTREAM
+               is>>temporal.m_features.m_objectKeypoints[keypointNum].class_id;
+               MEDIA_VISION_CHECK_IFSTREAM
+       }
+
+       is>>rows;
+       MEDIA_VISION_CHECK_IFSTREAM
+       is>>cols;
+       MEDIA_VISION_CHECK_IFSTREAM
+       is>>descriptorType;
+       MEDIA_VISION_CHECK_IFSTREAM
+       temporal.m_features.m_objectDescriptors = cv::Mat(rows, cols, descriptorType);
+       int value = 0;
+       for (int descriptorNum = 0; descriptorNum < rows; ++descriptorNum) {
+               for (int featureNum = 0; featureNum < cols; ++featureNum) {
+                       is>>value;
+                       MEDIA_VISION_CHECK_IFSTREAM
+
+                       temporal.m_features.m_objectDescriptors.at<uchar>(descriptorNum, featureNum) =
+                                       (uchar)value;
+               }
+       }
+
+#undef MEDIA_VISION_CHECK_IFSTREAM
+
+       obj = temporal;
+
+       return is;
+}
+
+} /* Image */
+} /* MediaVision */
similarity index 69%
rename from mv_image/image/src/ImageRecognizer.cpp
rename to mv_image/image/src/Recognition/ImageRecognizer.cpp
index 73dd335..ebfc386 100644 (file)
  * limitations under the License.
  */
 
-#include "ImageRecognizer.h"
-#include "ImageObject.h"
+#include "Recognition/ImageRecognizer.h"
+#include "Recognition/ImageObject.h"
 
 #include "mv_private.h"
 
 namespace MediaVision {
 namespace Image {
-ImageRecognizer::ImageRecognizer(
-       const cv::Mat& sceneImage,
-       const FeaturesExtractingParams& params) :
-       m_scene(sceneImage, params)
-{
-       ; /* NULL */
-}
-
 ImageRecognizer::ImageRecognizer(const ImageObject& scene) :
-       m_scene(scene)
+               m_scene(scene)
 {
        ; /* NULL */
 }
@@ -43,23 +35,24 @@ ImageRecognizer::~ImageRecognizer()
 bool ImageRecognizer::recognize(
                const ImageObject& target,
                const RecognitionParams& params,
-               std::vector<cv::Point2f>& contour) const
+               std::vector<cv::Point2f>& contour,
+               float ignoreFactor) const
 {
        cv::Mat homophraphyMatrix;
 
        contour.clear();
 
-       if (MinimumNumberOfFeatures > target.m_objectKeypoints.size()) {
+       if (MinimumNumberOfFeatures > target.m_features.m_objectKeypoints.size()) {
                LOGW("[%s] Image object can't be recognized (Recognition rate is too small).", __FUNCTION__);
                return false;
        }
 
-       if (MinimumNumberOfFeatures > m_scene.m_objectKeypoints.size()) {
+       if (MinimumNumberOfFeatures > m_scene.m_features.m_objectKeypoints.size()) {
                LOGW("[%s] Scene image can't be analyzed (Too few features for recognition).", __FUNCTION__);
                return false;
        }
 
-       if(!findHomophraphyMatrix(target, params, homophraphyMatrix)) {
+       if(!findHomophraphyMatrix(target, params, homophraphyMatrix, ignoreFactor)) {
                LOGE("[%s] Can't match the features.", __FUNCTION__);
                return false;
        }
@@ -81,11 +74,15 @@ bool ImageRecognizer::recognize(
 bool ImageRecognizer::findHomophraphyMatrix(
                const ImageObject& target,
                const RecognitionParams& params,
-               cv::Mat& homophraphyMatrix) const
+               cv::Mat& homophraphyMatrix,
+               float ignoreFactor) const
 {
        std::vector<cv::DMatch> matches;
 
-       m_matcher.match(target.m_objectDescriptors, m_scene.m_objectDescriptors, matches);
+       m_matcher.match(
+                       target.m_features.m_objectDescriptors,
+                       m_scene.m_features.m_objectDescriptors,
+                       matches);
 
        size_t matchesNumber = matches.size();
 
@@ -98,18 +95,18 @@ bool ImageRecognizer::findHomophraphyMatrix(
                        params.mRequiredMatchesPart * matchesNumber;
 
        size_t allowableMatchesNumberError =
-                       params.mAllowableMatchesPartError * requiredMatchesNumber;
-
-       if ((matchesNumber - allowableMatchesNumberError) >
-               (size_t)params.mMinMatchesNumber &&
-               (requiredMatchesNumber + allowableMatchesNumberError) <
-               matchesNumber) {
-               if ((requiredMatchesNumber - allowableMatchesNumberError) <
-                       (size_t)params.mMinMatchesNumber) {
-                       if ((requiredMatchesNumber + allowableMatchesNumberError) >
+                       params.mTolerantMatchesPartError * requiredMatchesNumber;
+
+       if (matchesNumber - allowableMatchesNumberError >
+                       (size_t)params.mMinMatchesNumber &&
+                       requiredMatchesNumber + allowableMatchesNumberError <
+                       matchesNumber) {
+               if (requiredMatchesNumber - allowableMatchesNumberError <
                                (size_t)params.mMinMatchesNumber) {
+                       if (requiredMatchesNumber + allowableMatchesNumberError >
+                                       (size_t)params.mMinMatchesNumber) {
                                requiredMatchesNumber = ((size_t)params.mMinMatchesNumber +
-                                               requiredMatchesNumber + allowableMatchesNumberError) / 2;
+                               requiredMatchesNumber + allowableMatchesNumberError) / 2;
 
                                allowableMatchesNumberError = requiredMatchesNumber-
                                                (size_t)params.mMinMatchesNumber +
@@ -128,13 +125,12 @@ bool ImageRecognizer::findHomophraphyMatrix(
                                                                                                        requiredMatchesNumber,
                                                                                                        allowableMatchesNumberError);
 
-               if (filterAmount >= MinimumNumberOfFeatures) {
+               if (filterAmount >= MinimumNumberOfFeatures)
                        matches.resize(filterAmount);
-               } else {
+               else
                        LOGW("[%s] Wrong filtration of feature matches.", __FUNCTION__);
-               }
 
-                matchesNumber = matches.size();
+               matchesNumber = matches.size();
        }
 
        std::vector<cv::Point2f> objectPoints(matchesNumber);
@@ -142,12 +138,29 @@ bool ImageRecognizer::findHomophraphyMatrix(
 
        for (size_t matchIdx = 0; matchIdx < matchesNumber; ++matchIdx) {
                objectPoints[matchIdx] =
-                               target.m_objectKeypoints[matches[matchIdx].queryIdx].pt;
+                               target.m_features.m_objectKeypoints[matches[matchIdx].queryIdx].pt;
 
                scenePoints[matchIdx] =
-                               m_scene.m_objectKeypoints[matches[matchIdx].trainIdx].pt;
+                               m_scene.m_features.m_objectKeypoints[matches[matchIdx].trainIdx].pt;
+       }
+
+       if (ignoreFactor > FLT_EPSILON) {
+               const std::vector<cv::Point2f> significantArea = contourResize(
+                               target.m_boundingContour,
+                               ignoreFactor);
+
+               for (size_t matchIdx = 0; matchIdx < objectPoints.size(); ++matchIdx) {
+                       if (!checkAccessory(objectPoints[matchIdx], significantArea)) {
+                               objectPoints.erase(objectPoints.begin() + matchIdx);
+                               scenePoints.erase(scenePoints.begin() + matchIdx);
+                               --matchIdx;
+                       }
+               }
        }
 
+       if (objectPoints.size() < MinimumNumberOfFeatures)
+               return false;
+
        homophraphyMatrix = cv::findHomography(objectPoints, scenePoints, CV_RANSAC);
 
        return true;
@@ -159,9 +172,8 @@ size_t ImageRecognizer::matchesSelection(
 {
        size_t sizeOfExamples = examples.size();
 
-       if ((filterAmount + allowableError) > sizeOfExamples) {
+       if ((filterAmount + allowableError) > sizeOfExamples)
                return examples.size();
-       }
 
        int startLeftLimit = 0;
        int startRightLimit = sizeOfExamples - 1;
@@ -175,44 +187,39 @@ size_t ImageRecognizer::matchesSelection(
 
        while (true) {
                if (leftLimit >= rightLimit) {
-                       if (leftLimit < (requiredNumber - (int)allowableError)) {
+                       if (leftLimit < (requiredNumber - (int)allowableError))
                                leftLimit = requiredNumber + (int)allowableError;
-                       }
 
                        break;
                }
 
                supportElement = computeLinearSupportElement(examples, requiredNumber,
-                               leftLimit, rightLimit);
+                                       leftLimit, rightLimit);
 
                /* Iteration similar quicksort */
                while (true) {
-                       /* Search the leftmost element
-                        *which have bigger confidence than support element
-                        */
+                       /* Search the leftmost element which have bigger confidence than support element */
                        while (examples[leftLimit].distance <= supportElement &&
-                               leftLimit < startRightLimit) {
+                                       leftLimit < startRightLimit) {
                                ++leftLimit;
                        }
 
-                       /* Search the rightmost element
-                        *which have smaller confidence than support element
-                        */
+                       /* Search the rightmost element which have smaller confidence than support element */
                        while (examples[rightLimit].distance >= supportElement &&
-                               rightLimit >= startLeftLimit) {
+                                       rightLimit >= startLeftLimit) {
                                --rightLimit;
                        }
 
-                       if (leftLimit >= rightLimit) {
+                       if (leftLimit >= rightLimit)
                                break;
-                       }
 
                        /* Swap */
                        std::swap(examples[leftLimit], examples[rightLimit]);
                }
-               if (abs(filterAmount - leftLimit) <= (int)allowableError) {
+
+               if (abs(filterAmount - leftLimit) <= (int)allowableError)
                        break;
-               }
+
                if ((int)filterAmount > leftLimit) {
                        requiredNumber -= leftLimit - startLeftLimit;
 
@@ -248,9 +255,8 @@ float ImageRecognizer::computeLinearSupportElement(const std::vector<cv::DMatch>
                }
        }
 
-       /* Linear approximation. f(x) = k*x + b
-        * f(sizeOfExamples) = maxValue; f(1) = minValue;
-        */
+       /* Linear approximation. f(x) = k*x + b */
+       /* f(sizeOfExamples) = maxValue; f(1) = minValue; */
        const float b = (maxValue - minValue * sizeOfExamples) / (1 - sizeOfExamples);
        const float k = minValue - b;
 
@@ -261,8 +267,10 @@ float ImageRecognizer::computeLinearSupportElement(const std::vector<cv::DMatch>
 bool ImageRecognizer::isPossibleQuadrangleCorners(
                const cv::Point2f corners[NumberOfQuadrangleCorners])
 {
-       static const float Epsilon = cv::TermCriteria::EPS;
-       static const float MinSizeOfDetectedArea = 30.f;
+       static const float Epsilon = 0.1f;
+
+       /* TODO: move the MinSizeOfDetectedArea out of the ImageRecognizer */
+       static const float MinSizeOfDetectedArea = 64.f;
 
        const float firstSemiArea = getTriangleArea(corners[0], corners[2], corners[1]) +
                        getTriangleArea(corners[0], corners[2], corners[3]);
@@ -271,9 +279,8 @@ bool ImageRecognizer::isPossibleQuadrangleCorners(
                        getTriangleArea(corners[1], corners[3], corners[0]);
 
        if (Epsilon < fabs(firstSemiArea - secondSemiArea) ||
-               MinSizeOfDetectedArea > (firstSemiArea + secondSemiArea)) {
+                       MinSizeOfDetectedArea > (firstSemiArea + secondSemiArea))
                return false;
-       }
 
        return true;
 }
diff --git a/mv_image/image/src/Tracking/AsyncTracker.cpp b/mv_image/image/src/Tracking/AsyncTracker.cpp
new file mode 100644 (file)
index 0000000..5ae18ae
--- /dev/null
@@ -0,0 +1,184 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tracking/AsyncTracker.h"
+
+namespace MediaVision {
+namespace Image {
+AsyncTracker::AsyncTracker(const AsyncTracker& copy) :
+               m_baseTracker(copy.m_baseTracker.obj->clone()),
+               m_result(copy.m_result),
+               m_isRun(false),
+               m_isUpdated(copy.m_isUpdated),
+               m_copyingPolicy(copy.m_copyingPolicy),
+               m_thread(0)
+{
+       pthread_mutex_init(&m_globalGuard, NULL);
+       pthread_spin_init(&m_resultGuard, PTHREAD_PROCESS_SHARED);
+       pthread_spin_init(&m_isRunGuard, PTHREAD_PROCESS_SHARED);
+       pthread_spin_init(&m_isUpdatedGuard, PTHREAD_PROCESS_SHARED);
+}
+
+AsyncTracker::AsyncTracker(
+               cv::Ptr<ObjectTracker> baseTracker,
+               bool copyingPolicy) :
+               m_baseTracker(baseTracker),
+               m_result(),
+               m_isRun(false),
+               m_isUpdated(false),
+               m_copyingPolicy(copyingPolicy),
+               m_thread(0)
+{
+       pthread_mutex_init(&m_globalGuard, NULL);
+       pthread_spin_init(&m_resultGuard, PTHREAD_PROCESS_SHARED);
+       pthread_spin_init(&m_isRunGuard, PTHREAD_PROCESS_SHARED);
+       pthread_spin_init(&m_isUpdatedGuard, PTHREAD_PROCESS_SHARED);
+}
+
+AsyncTracker::~AsyncTracker()
+{
+       if(isRun())
+               pthread_join(m_thread, NULL);
+
+       pthread_mutex_destroy(&m_globalGuard);
+       pthread_spin_destroy(&m_resultGuard);
+       pthread_spin_destroy(&m_isRunGuard);
+       pthread_spin_destroy(&m_isUpdatedGuard);
+}
+
+bool AsyncTracker::track(
+               const cv::Mat& frame,
+               std::vector<cv::Point>& result)
+{
+       while (pthread_mutex_trylock(&m_globalGuard) != 0) {
+               return getResult(result);
+       }
+
+       pthread_spin_lock(&m_isRunGuard);
+       m_isRun = true;
+       pthread_spin_unlock(&m_isRunGuard);
+
+       if (m_copyingPolicy)
+               m_frame = frame.clone();
+       else
+               m_frame = frame;
+
+       const int err = pthread_create(&m_thread, NULL, asyncTrack, this);
+
+       if (0 == err)
+               return getResult(result);
+
+       pthread_spin_lock(&m_isRunGuard);
+       m_isRun = false;
+       pthread_spin_unlock(&m_isRunGuard);
+
+       pthread_mutex_unlock(&m_globalGuard);
+
+       return getResult(result);
+}
+
+void AsyncTracker::reinforcement(const std::vector<cv::Point>& location)
+{
+       /* TODO: Unsafe. Need to redesign. */
+       m_baseTracker->reinforcement(location);
+
+       pthread_spin_lock(&m_resultGuard);
+       m_result = location;
+       pthread_spin_unlock(&m_resultGuard);
+}
+
+cv::Ptr<ObjectTracker> AsyncTracker::clone() const
+{
+       return cv::Ptr<ObjectTracker>(new AsyncTracker(*this));
+}
+
+bool AsyncTracker::baseTrack(std::vector<cv::Point>& result)
+{
+       return m_baseTracker->track(m_frame, result);
+}
+
+void *AsyncTracker::asyncTrack(void *data)
+{
+       AsyncTracker *tracker = reinterpret_cast<AsyncTracker*>(data);
+
+       std::vector<cv::Point> result;
+       tracker->baseTrack(result);
+
+       pthread_spin_lock(&tracker->m_resultGuard);
+       tracker->m_result = result;
+       pthread_spin_unlock(&tracker->m_resultGuard);
+
+       pthread_spin_lock(&tracker->m_isUpdatedGuard);
+       tracker->m_isUpdated = true;
+       pthread_spin_unlock(&tracker->m_isUpdatedGuard);
+
+       pthread_mutex_unlock(&tracker->m_globalGuard);
+
+       pthread_spin_lock(&tracker->m_isRunGuard);
+       tracker->m_isRun = false;
+       pthread_spin_unlock(&tracker->m_isRunGuard);
+
+       return NULL;
+}
+
+bool AsyncTracker::wait()
+{
+       if(isRun()) {
+               pthread_join(m_thread, NULL);
+               return true;
+       }
+       return false;
+}
+
+bool AsyncTracker::isRun()
+{
+       bool result = false;
+
+       pthread_spin_lock(&m_isRunGuard);
+       result = m_isRun;
+       pthread_spin_unlock(&m_isRunGuard);
+
+       return result;
+}
+
+bool AsyncTracker::isUpdated(std::vector<cv::Point>& result)
+{
+       bool isUpdated = false;
+
+       getResult(result);
+
+       pthread_spin_lock(&m_isUpdatedGuard);
+       isUpdated = m_isUpdated;
+       m_isUpdated = false;
+       pthread_spin_unlock(&m_isUpdatedGuard);
+
+       return isUpdated;
+}
+
+bool AsyncTracker::getResult(std::vector<cv::Point>& result)
+{
+       bool isTracked = false;
+
+       pthread_spin_lock(&m_resultGuard);
+       isTracked = !m_result.empty();
+       result = m_result;
+       pthread_spin_unlock(&m_resultGuard);
+
+       return isTracked;
+}
+
+} /* Image */
+} /* MediaVision */
diff --git a/mv_image/image/src/Tracking/CascadeTracker.cpp b/mv_image/image/src/Tracking/CascadeTracker.cpp
new file mode 100644 (file)
index 0000000..ed56f09
--- /dev/null
@@ -0,0 +1,195 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tracking/CascadeTracker.h"
+#include "Tracking/AsyncTracker.h"
+
+#include "ImageMathUtil.h"
+
+namespace MediaVision {
+namespace Image {
+CascadeTracker::CascadeTracker(float minimumArea) :
+               m_trackers(),
+               m_minimumArea(minimumArea)
+{
+       ; /* NULL */
+}
+
+CascadeTracker::CascadeTracker(const CascadeTracker& copy) :
+               m_trackers(),
+               m_minimumArea(copy.m_minimumArea)
+{
+       *this = copy;
+}
+
+CascadeTracker::~CascadeTracker()
+{
+       ; /* NULL */
+}
+
+bool CascadeTracker::track(const cv::Mat& frame, std::vector<cv::Point>& result)
+{
+       internalReinforcement();
+
+       std::set<TrackerInfo>::iterator it = m_trackers.begin();
+
+       for (; it != m_trackers.end(); ++it) {
+               if (!it->mTracker.obj->track(frame, it->mResult)) {
+                       it->mResult.clear();
+               }
+       }
+
+       return mergeResults(result);
+}
+
+void CascadeTracker::reinforcement(const std::vector<cv::Point>& location)
+{
+       std::set<TrackerInfo>::iterator it = m_trackers.begin();
+
+       for (; it != m_trackers.end(); ++it)
+               it->mTracker.obj->reinforcement(location);
+}
+
+cv::Ptr<ObjectTracker> CascadeTracker::clone() const
+{
+       return cv::Ptr<ObjectTracker>(new CascadeTracker(*this));
+}
+
+CascadeTracker& CascadeTracker::operator=(const CascadeTracker& copy)
+{
+       if (this != &copy) {
+               this->m_minimumArea = copy.m_minimumArea;
+               this->m_trackers.clear();
+
+               std::set<TrackerInfo>::iterator it = copy.m_trackers.begin();
+               for (; it != copy.m_trackers.end(); ++it) {
+                       TrackerInfo temp(it->mTracker.obj->clone(), it->mPriority);
+                       temp.mResult = it->mResult;
+
+                       m_trackers.insert(temp);
+               }
+       }
+
+       return *this;
+}
+
+bool CascadeTracker::enableTracker(cv::Ptr<ObjectTracker> tracker, float priority)
+{
+       TrackerInfo temp(tracker, priority);
+
+       std::set<TrackerInfo>::iterator it =
+                       std::find(m_trackers.begin(), m_trackers.end(), temp);
+
+       if (it != m_trackers.end())
+               m_trackers.erase(it);
+
+       return m_trackers.insert(temp).second;
+}
+
+bool CascadeTracker::disableTracker(cv::Ptr<ObjectTracker> tracker)
+{
+       TrackerInfo target(tracker, 0);
+
+       std::set<TrackerInfo>::iterator it =
+                       std::find(m_trackers.begin(), m_trackers.end(), target);
+
+       if (it == m_trackers.end())
+               return false;
+
+       m_trackers.erase(it);
+
+       return true;
+}
+
+void CascadeTracker::internalReinforcement()
+{
+       std::set<TrackerInfo>::iterator it1 = m_trackers.begin();
+       for (; it1 != m_trackers.end(); ++it1) {
+               bool isUpdated = true;
+
+               /* TODO: Redesign without dynamic_cast */
+               AsyncTracker *asyncView = dynamic_cast<AsyncTracker*>(it1->mTracker.obj);
+               if (NULL != asyncView)
+                       isUpdated = asyncView->isUpdated(it1->mResult);
+
+               if (!it1->mResult.empty() && isUpdated) {
+                       const size_t numberOfContourPoints = it1->mResult.size();
+                       std::vector<cv::Point2f> checkedArea(numberOfContourPoints);
+                       for (size_t i = 0; i < numberOfContourPoints; ++i) {
+                               checkedArea[i].x = it1->mResult[i].x;
+                               checkedArea[i].y = it1->mResult[i].y;
+                       }
+
+                       if (getQuadrangleArea(checkedArea.data()) < m_minimumArea) {
+                               it1->mResult = std::vector<cv::Point>(0);
+                               it1->mTracker.obj->reinforcement(it1->mResult);
+                       }
+
+                       float priority = it1->mPriority;
+                       std::set<TrackerInfo>::iterator it2 = m_trackers.begin();
+
+                       for (; it2 != m_trackers.end(); ++it2) {
+                               if (it1 != it2 &&
+                                               priority > it2->mPriority) {
+                                        it2->mTracker.obj->reinforcement(it1->mResult);
+                               }
+                       }
+               }
+       }
+}
+
+bool CascadeTracker::mergeResults(std::vector<cv::Point>& result) const
+{
+       result.clear();
+
+       std::set<TrackerInfo>::iterator it = m_trackers.begin();
+
+       float resPriotiry = 0.f;
+       for (; it != m_trackers.end(); ++it) {
+               if (result.empty() || resPriotiry > it->mPriority) {
+                       resPriotiry = it->mPriority;
+                       result = it->mResult;
+               }
+       }
+
+       return !(result.empty());
+}
+
+CascadeTracker::TrackerInfo::TrackerInfo(cv::Ptr<ObjectTracker> tracker, float priority) :
+               mTracker(tracker),
+               mPriority(priority),
+               mResult()
+{
+       ; /* NULL */
+}
+
+bool CascadeTracker::TrackerInfo::operator<(const TrackerInfo& second) const
+{
+       return (this->mPriority < second.mPriority);
+}
+
+bool CascadeTracker::TrackerInfo::operator==(const TrackerInfo& second) const
+{
+       return (this->mTracker == second.mTracker);
+}
+
+bool CascadeTracker::TrackerInfo::operator!=(const TrackerInfo& second) const
+{
+       return !(*this == second);
+}
+
+} /* Image */
+} /* MediaVision */
diff --git a/mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp b/mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp
new file mode 100644 (file)
index 0000000..eaf8bef
--- /dev/null
@@ -0,0 +1,132 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tracking/FeatureSubstitutionTracker.h"
+
+#include "Recognition/ImageRecognizer.h"
+
+namespace MediaVision {
+namespace Image {
+FeatureSubstitutionTracker::FeatureSubstitutionTracker(
+               const FeaturesExtractingParams& featuresExtractingParams,
+               const RecognitionParams& recognitionParams,
+               float expectedOffset,
+               float sceneScalingFactor,
+               float objectScalingFactor) :
+                               m_isInit(false),
+                               m_target(),
+                               m_location(),
+                               m_featureExtractingParams(featuresExtractingParams),
+                               m_recogParams(recognitionParams),
+                               m_expectedOffset(expectedOffset),
+                               m_sceneScalingFactor(sceneScalingFactor),
+                               m_objectScalingFactor(objectScalingFactor)
+{
+       ; /* NULL */
+}
+
+bool FeatureSubstitutionTracker::track(
+               const cv::Mat& frame,
+               std::vector<cv::Point>& result)
+{
+       std::vector<cv::Point2f> contour;
+       size_t numberOfContourPoints = m_location.size();
+       contour.resize(numberOfContourPoints);
+       for(size_t i = 0u; i < numberOfContourPoints; ++i) {
+               contour[i].x = m_location[i].x;
+               contour[i].y = m_location[i].y;
+       }
+
+       if (!m_isInit) {
+               if (m_location.empty()) {
+                       return false;
+               } else {
+                       m_target = new ImageObject;
+                       m_target->fill(
+                                       frame,
+                                       m_featureExtractingParams,
+                                       contourResize(contour, m_objectScalingFactor));
+                       m_target->setContour(contour);
+                       m_isInit = true;
+                       result = m_location;
+                       return true;
+               }
+       }
+
+       cv::Ptr<ImageObject> sceneImageObject = new ImageObject;
+
+       sceneImageObject->fill(frame, m_featureExtractingParams, computeExpectedArea());
+
+       ImageRecognizer recognizer(*sceneImageObject.obj);
+
+       const bool isTracked =
+                       recognizer.recognize(
+                                       *m_target.obj,
+                                       m_recogParams,
+                                       contour,
+                                       m_objectScalingFactor);
+
+       if (isTracked) {
+               numberOfContourPoints = contour.size();
+               m_location.resize(numberOfContourPoints);
+               for(size_t i = 0u; i < numberOfContourPoints; ++i) {
+                       m_location[i].x = (int)contour[i].x;
+                       m_location[i].y = (int)contour[i].y;
+               }
+
+               result = m_location;
+               m_target = sceneImageObject;
+               m_target->setContour(contour);
+       } else {
+               m_location.clear();
+               m_isInit = false;
+       }
+
+       return isTracked;
+}
+
+void FeatureSubstitutionTracker::reinforcement(const std::vector<cv::Point>& location)
+{
+       m_isInit = false;
+
+       if (location.size() < 3) {
+               m_location.clear();
+               return;
+       }
+
+       m_location = location;
+}
+
+cv::Ptr<ObjectTracker> FeatureSubstitutionTracker::clone() const
+{
+       return cv::Ptr<ObjectTracker>(new FeatureSubstitutionTracker(*this));
+}
+
+std::vector<cv::Point2f> FeatureSubstitutionTracker::computeExpectedArea()
+{
+       std::vector<cv::Point2f> contour;
+       const size_t numberOfContourPoints = m_location.size();
+       contour.resize(numberOfContourPoints);
+       for(size_t i = 0u; i < numberOfContourPoints; ++i) {
+               contour[i].x = m_location[i].x;
+               contour[i].y = m_location[i].y;
+       }
+
+       return contourResize(contour, m_sceneScalingFactor);
+}
+
+} /* Image */
+} /* MediaVision */
  * limitations under the License.
  */
 
-#include "ImageContourStabilizator.h"
 #include "ImageMathUtil.h"
 
+#include "Tracking/ImageContourStabilizator.h"
+
 #include "mv_private.h"
 
 namespace MediaVision {
 namespace Image {
 ImageContourStabilizator::ImageContourStabilizator() :
-       m_movingHistory(MovingHistoryAmount),
-       m_priorities(MovingHistoryAmount)
+               m_movingHistory(),
+               m_priorities()
 {
        reset();
-
-       /* increasing the stabilization rate */
-       m_speeds.push_back(0.3f);
-       m_speeds.push_back(0.4f);
-       m_speeds.push_back(0.5f);
-       m_speeds.push_back(0.6f);
-       m_speeds.push_back(0.8f);
-       m_speeds.push_back(1.f);
-
-       /* calculation of priorities for positions in the moving history */
-       for (size_t i = 0u; i < MovingHistoryAmount; ++i) {
-               /* linear dependence on the elapsed time */
-               m_priorities[i] = (i + 1) / ((MovingHistoryAmount + 1) * MovingHistoryAmount / 2.0f);
-       }
 }
 
 void ImageContourStabilizator::reset(void)
@@ -47,57 +34,58 @@ void ImageContourStabilizator::reset(void)
        m_isPrepared = false;
        m_tempContourIndex = -1;
        m_currentHistoryAmount = 0;
-
-       LOGI("Outlier is detected.");
+       m_historyAmount = 0;
+       m_movingHistory.clear();
 }
 
-bool ImageContourStabilizator::stabilize(
+ImageContourStabilizator::StabilizationError ImageContourStabilizator::stabilize(
                std::vector<cv::Point2f>& contour,
-               const StabilizationParams& /*params*/)
+               const StabilizationParams& params)
 {
+       if (!updateSettings(params)) {
+               LOGW("Not stabilized. Invalid settings.");
+
+               return InvalidSettings;
+       }
+
        /* current implementation stabilizes quadrangles only */
        if (contour.size() != NumberOfQuadrangleCorners) {
-               LOGW("Not stabilized. Empty contour.");
+               LOGW("Not stabilized. Unsupported contour type.");
 
-               return false;
+               return UnsupportedContourType;
        }
 
        m_currentCornersSpeed.resize(contour.size(), 0);
 
-       if (contour[0].x == contour[1].x && contour[0].y == contour[1].y) {
-               LOGW("Not stabilized. Invalid contour.");
-
-               return false;
-       }
-
-       if (m_lastStabilizedContour.empty()) {
+       if (m_lastStabilizedContour.empty())
                m_lastStabilizedContour = contour;
-       }
 
        std::vector<cv::Point2f> stabilizedState;
 
        /* history amount < 2 it's no sense */
-       if (MovingHistoryAmount >= 2) {
+       if (m_historyAmount >= 2) {
                /* first sample */
                if (m_tempContourIndex == -1) {
-                       m_movingHistory[1] = contour;
+                       m_movingHistory.push_back(contour);
+                       m_movingHistory.push_back(contour);
+
                        m_tempContourIndex = 1;
-                       m_currentHistoryAmount = 1;
+                       m_currentHistoryAmount = 2;
 
                        LOGI("Not stabilized. Too small moving history. (the first one)");
 
-                       return false;
+                       return TooShortMovingHistory;
                }
 
                /* too short moving history */
-               if (m_currentHistoryAmount < MovingHistoryAmount - 1) {
+               if (m_currentHistoryAmount < m_historyAmount) {
                        ++m_currentHistoryAmount;
                        ++m_tempContourIndex;
-                       m_movingHistory[m_tempContourIndex] = contour;
+                       m_movingHistory.push_back(contour);
 
-                       LOGI("Not stabilized. Too small moving history.");
+                       LOGI("Not stabilized. Too short moving history.");
 
-                       return false;
+                       return TooShortMovingHistory;
                }
 
                /* saving into moving history */
@@ -105,7 +93,7 @@ bool ImageContourStabilizator::stabilize(
                m_movingHistory.push_back(contour);
 
                if (!m_isPrepared) {
-                       m_lastStabilizedContour = m_movingHistory[MovingHistoryAmount - 2];
+                       m_lastStabilizedContour = m_movingHistory[m_historyAmount - 2];
 
                        LOGI("Not stabilized. Too small moving history. (the last one)");
 
@@ -115,14 +103,14 @@ bool ImageContourStabilizator::stabilize(
                /* stabilization */
                stabilizedState = computeStabilizedQuadrangleContour();
 
-               if (stabilizedState.empty()) {
+               if (stabilizedState.empty())
                        stabilizedState = m_lastStabilizedContour;
-               }
        } else {
                stabilizedState = m_lastStabilizedContour;
        }
 
-       const float tolerantShift = getQuadrangleArea(contour.data()) * 0.00006f + 1.3f;
+       const float tolerantShift = getQuadrangleArea(contour.data()) *
+                               m_tolerantShift + m_tolerantShiftExtra;
 
        const size_t contourSize = stabilizedState.size();
        for (size_t i = 0u; i < contourSize; ++i) {
@@ -150,6 +138,77 @@ bool ImageContourStabilizator::stabilize(
 
        LOGI("Contour successfully stabilized.");
 
+       return Successfully;
+}
+
+bool ImageContourStabilizator::updateSettings(const StabilizationParams& params)
+{
+       if (params.mHistoryAmount < 1)
+               return false;
+
+       m_tolerantShift = (float)params.mTolerantShift;
+       m_tolerantShiftExtra = (float)params.mTolerantShiftExtra;
+
+       if (m_historyAmount != (size_t)params.mHistoryAmount) {
+               m_historyAmount = (size_t)params.mHistoryAmount;
+
+               m_priorities.resize(m_historyAmount);
+
+               /* calculation of priorities for positions in the moving history */
+               for (size_t i = 0u; i < m_historyAmount; ++i) {
+                       /* linear dependence on the elapsed time */
+                       m_priorities[i] = ((i + 1) * 2.0f) /
+                                               ((m_historyAmount + 1) * m_historyAmount);
+               }
+       }
+
+       while (m_historyAmount > (size_t)params.mHistoryAmount) {
+               m_movingHistory.pop_front();
+               --m_historyAmount;
+       }
+
+       if ((size_t)params.mHistoryAmount > m_historyAmount) {
+               /* TODO: save current moving history */
+
+               m_tempContourIndex = -1;
+               m_historyAmount = (size_t)params.mHistoryAmount;
+               m_movingHistory.clear();
+       }
+
+       bool speedIsValid = false;
+       if (m_speeds.size() > 1) {
+               const static float Epsilon = 0.0001f;
+               if (fabs(m_speeds[0] - params.mStabilizationSpeed) < Epsilon &&
+                               fabs((m_speeds[1] - m_speeds[0]) -
+                               params.mStabilizationAcceleration) < Epsilon) {
+               speedIsValid = true;
+               }
+       }
+
+       if (!speedIsValid) {
+               m_speeds.clear();
+
+               int speedsSize = (int)((1 - params.mStabilizationSpeed) /
+                                       params.mStabilizationAcceleration) + 1;
+
+               if (speedsSize < 1) {
+                       m_speeds.push_back(1.0f);
+               } else {
+                       static const int MaxSpeedsSize = 25;
+
+                       if (speedsSize > MaxSpeedsSize)
+                               speedsSize = MaxSpeedsSize;
+
+                       float speed = std::max(0.f,
+                                       std::min((float)params.mStabilizationSpeed, 1.0f));
+
+                       for (int i = 0; i < speedsSize; ++i) {
+                               m_speeds.push_back(speed);
+                               speed += params.mStabilizationAcceleration;
+                       }
+               }
+       }
+
        return true;
 }
 
@@ -157,11 +216,11 @@ std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleCo
 {
        /* final contour */
        std::vector<cv::Point2f> stabilizedState(
-                       NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f));
+                               NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f));
 
        /* calculation the direction of contour corners to a new location */
        std::vector<cv::Point2f> directions(
-                       NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f));
+                               NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f));
 
        /* computing expected directions and outliers searching */
        bool expressiveTime = false;
@@ -170,25 +229,25 @@ std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleCo
        std::vector<cv::Point2f> directionsToLastPos(NumberOfQuadrangleCorners);
        for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j) {
                /* calculation the moving directions and computing average direction */
-               std::vector<cv::Point2f> trackDirections(MovingHistoryAmount - 1);
+               std::vector<cv::Point2f> trackDirections(m_historyAmount - 1);
                cv::Point2f averageDirections(0.f, 0.f);
 
-               for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) {
+               for (size_t i = 0u; i < m_historyAmount - 1; ++i) {
                        averageDirections.x += (trackDirections[i].x =
                                        m_movingHistory[i+1][j].x - m_movingHistory[i][j].x) /
-                                       (MovingHistoryAmount - 1);
+                                       (m_historyAmount - 1);
 
                        averageDirections.y += (trackDirections[i].y =
                                        m_movingHistory[i+1][j].y - m_movingHistory[i][j].y) /
-                                       (MovingHistoryAmount - 1);
+                                       (m_historyAmount - 1);
                }
 
                /* calculation a deviations and select outlier */
-               std::vector<float> directionDistances(MovingHistoryAmount - 1);
+               std::vector<float> directionDistances(m_historyAmount - 1);
                float maxDistance = 0.f, prevMaxDistance = 0.f;
                int idxWithMaxDistance = 0;
                int numExpressiveDirection = -1;
-               for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) {
+               for (size_t i = 0u; i < m_historyAmount - 1; ++i) {
                        directionDistances[i] = getDistance(
                                        trackDirections[i],
                                        averageDirections);
@@ -213,7 +272,7 @@ std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleCo
 
                /* final direction computing */
                float summPriority = 0.f;
-               for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) {
+               for (size_t i = 0u; i < m_historyAmount - 1; ++i) {
                        if ((int)i != numExpressiveDirection) {
                                directions[j].x += trackDirections[i].x * m_priorities[i];
                                directions[j].y += trackDirections[i].y * m_priorities[i];
@@ -221,22 +280,21 @@ std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleCo
                        }
                }
 
-               if (numExpressiveDirection == MovingHistoryAmount - 1) {
-                               expressiveTime = true;
-               }
+               if (numExpressiveDirection == (int)(m_historyAmount - 1))
+                       expressiveTime = true;
 
                summPriorityWithoutToLastPos[j] = summPriority;
-               priorityToLastPos[j] = m_priorities[MovingHistoryAmount - 1];
+               priorityToLastPos[j] = m_priorities[m_historyAmount - 1];
 
                directions[j].x -= directionsToLastPos[j].x =
-                                               (m_lastStabilizedContour[j].x -
-                                                       m_movingHistory[MovingHistoryAmount - 1][j].x) *
-                                                       priorityToLastPos[j];
+                                       (m_lastStabilizedContour[j].x -
+                                       m_movingHistory[m_historyAmount - 1][j].x) *
+                                       priorityToLastPos[j];
 
                directions[j].y -= directionsToLastPos[j].y =
-                                               (m_lastStabilizedContour[j].y -
-                                                       m_movingHistory[MovingHistoryAmount - 1][j].y) *
-                                                       priorityToLastPos[j];
+                                       (m_lastStabilizedContour[j].y -
+                                       m_movingHistory[m_historyAmount - 1][j].y) *
+                                       priorityToLastPos[j];
 
                summPriority += priorityToLastPos[j];
 
@@ -248,12 +306,12 @@ std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleCo
        for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j) {
                if (expressiveTime) {
                        directions[j].x *= (summPriorityWithoutToLastPos[j] +
-                                       priorityToLastPos[j]);
+                                               priorityToLastPos[j]);
                        directions[j].x -= directionsToLastPos[j].x;
                        directions[j].x /= summPriorityWithoutToLastPos[j];
 
                        directions[j].y *= (summPriorityWithoutToLastPos[j] +
-                                       priorityToLastPos[j]);
+                                               priorityToLastPos[j]);
                        directions[j].y -= directionsToLastPos[j].y;
                        directions[j].y /= summPriorityWithoutToLastPos[j];
                }
diff --git a/mv_image/image/src/Tracking/ImageTrackingModel.cpp b/mv_image/image/src/Tracking/ImageTrackingModel.cpp
new file mode 100644 (file)
index 0000000..6240ea8
--- /dev/null
@@ -0,0 +1,362 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tracking/ImageTrackingModel.h"
+
+#include "Tracking/CascadeTracker.h"
+#include "Tracking/RecognitionBasedTracker.h"
+#include "Tracking/FeatureSubstitutionTracker.h"
+#include "Tracking/AsyncTracker.h"
+#include "Tracking/MFTracker.h"
+
+#include "mv_private.h"
+#include "mv_common.h"
+
+#include <app_common.h>
+
+#include <fstream>
+#include <unistd.h>
+
+namespace MediaVision {
+namespace Image {
+ImageTrackingModel::ImageTrackingModel() :
+               m_target(),
+               m_tracker(),
+               m_stabilizator(),
+               m_location(),
+               m_stabilizationParams()
+{
+       ; /* NULL */
+}
+
+ImageTrackingModel::ImageTrackingModel(const ImageTrackingModel& copy) :
+               m_target(copy.m_target),
+               m_tracker(copy.m_tracker.empty()? NULL: copy.m_tracker->clone()),
+               m_stabilizator(copy.m_stabilizator),
+               m_location(copy.m_location),
+               m_stabilizationParams(copy.m_stabilizationParams)
+{
+       ; /* NULL */
+}
+
+void ImageTrackingModel::setTarget(const ImageObject& target)
+{
+       /* TODO: Here are all the settings.
+        *        This can be transferred to configuration file.
+        *
+        * Parameters of recognition based tracker
+        */
+
+       FeaturesExtractingParams orbFeatureExtractingParams;
+
+       orbFeatureExtractingParams.mKeypointType = KT_ORB;
+       orbFeatureExtractingParams.mDescriptorType = DT_ORB;
+       orbFeatureExtractingParams.ORB.mMaximumFeaturesNumber = 5000;
+       orbFeatureExtractingParams.ORB.mScaleFactor = 1.15;
+
+       RecognitionParams orbRecogParams;
+
+       orbRecogParams.mMinMatchesNumber = 70;
+       orbRecogParams.mRequiredMatchesPart = 0.005;
+       orbRecogParams.mTolerantMatchesPartError = 0.1;
+
+       /* Parameters of feature substitution tracker */
+
+       FeaturesExtractingParams gfttWbriefFeatureExtractingParams;
+
+       gfttWbriefFeatureExtractingParams.mKeypointType = KT_GFTT;
+       gfttWbriefFeatureExtractingParams.mDescriptorType = DT_BRIEF;
+
+       RecognitionParams gfttWbriefRecogParams;
+
+       gfttWbriefRecogParams.mMinMatchesNumber = 30;
+       gfttWbriefRecogParams.mRequiredMatchesPart = 0.05;
+       gfttWbriefRecogParams.mTolerantMatchesPartError = 0.1;
+
+       const float expectedOffset = 1.0f;
+
+       /* Parameters of median flow tracker */
+
+       MFTracker::Params medianflowTrackingParams;
+
+       medianflowTrackingParams.mPointsInGrid = 10;
+       medianflowTrackingParams.mWindowSize = cv::Size(16, 16);
+       medianflowTrackingParams.mPyrMaxLevel = 16;
+
+       /* Parameters of cascade tracker */
+
+       const float recognitionBasedTrackerPriotity = 1.0f;
+       const float featureSubstitutionTrackerPriotity = 0.6f;
+       const float medianFlowTrackerPriotity = 0.1f;
+
+       /* Parameters of stabilization */
+
+       m_stabilizationParams.mIsEnabled = true;
+       m_stabilizationParams.mHistoryAmount = 3;
+       m_stabilizationParams.mTolerantShift = 0.00006;
+       m_stabilizationParams.mTolerantShiftExtra = 1.3;
+       m_stabilizationParams.mStabilizationSpeed = 0.3;
+       m_stabilizationParams.mStabilizationAcceleration = 0.1;
+
+       /* Parameters definition is finished */
+
+       /* Creating a basic tracker which will have other trackers */
+
+       cv::Ptr<CascadeTracker> mainTracker = new CascadeTracker;
+
+       /* Adding asynchronous recognition based tracker */
+
+       cv::Ptr<RecognitionBasedTracker> recogTracker =
+                       new RecognitionBasedTracker(
+                                       target,
+                                       orbFeatureExtractingParams,
+                                       orbRecogParams);
+
+       cv::Ptr<AsyncTracker> asyncRecogTracker =
+                       new AsyncTracker(
+                                       recogTracker,
+                                       true);
+
+       mainTracker->enableTracker(
+                       asyncRecogTracker,
+                       recognitionBasedTrackerPriotity);
+
+       /* Adding asynchronous feature substitution based tracker */
+
+       cv::Ptr<FeatureSubstitutionTracker> substitutionTracker =
+                       new FeatureSubstitutionTracker(
+                                       gfttWbriefFeatureExtractingParams,
+                                       gfttWbriefRecogParams,
+                                       expectedOffset);
+
+       cv::Ptr<AsyncTracker> asyncSubstitutionTracker =
+                       new AsyncTracker(
+                                       substitutionTracker,
+                                       true);
+
+       mainTracker->enableTracker(
+                       asyncSubstitutionTracker,
+                       featureSubstitutionTrackerPriotity);
+
+       /* Adding median flow tracker */
+
+       cv::Ptr<MFTracker> mfTracker = new MFTracker(medianflowTrackingParams);
+
+       mainTracker->enableTracker(
+                       mfTracker,
+                       medianFlowTrackerPriotity);
+
+       m_tracker = mainTracker;
+       m_target = target;
+}
+
+bool ImageTrackingModel::isValid() const
+{
+       return !(m_target.isEmpty());
+}
+
+bool ImageTrackingModel::track(const cv::Mat& frame, std::vector<cv::Point>& result)
+{
+       result.clear();
+
+       if (m_tracker.empty())
+               return false;
+
+       if (!(m_tracker->track(frame, m_location))) {
+               m_stabilizator.reset();
+               return false;
+       }
+
+       const size_t numberOfContourPoints = m_location.size();
+       std::vector<cv::Point2f> stabilizedContour(numberOfContourPoints);
+       for (size_t i = 0; i < numberOfContourPoints; ++i) {
+               stabilizedContour[i].x = (float)m_location[i].x;
+               stabilizedContour[i].y = (float)m_location[i].y;
+       }
+
+       m_stabilizator.stabilize(stabilizedContour, m_stabilizationParams);
+       for (size_t i = 0; i < numberOfContourPoints; ++i) {
+               m_location[i].x = (int)stabilizedContour[i].x;
+               m_location[i].y = (int)stabilizedContour[i].y;
+       }
+
+       result = m_location;
+
+       return true;
+}
+
+void ImageTrackingModel::refresh(void)
+{
+       m_location.clear();
+}
+
+ImageTrackingModel& ImageTrackingModel::operator=(const ImageTrackingModel& copy)
+{
+       if (this != &copy) {
+               m_target = copy.m_target;
+               if (!copy.m_tracker.empty())
+                       m_tracker = copy.m_tracker->clone();
+               else
+                       m_tracker.release();
+
+               m_stabilizator = copy.m_stabilizator;
+               m_location = copy.m_location;
+               m_stabilizationParams = copy.m_stabilizationParams;
+       }
+
+       return *this;
+}
+
+int ImageTrackingModel::save(const char *filepath) const
+{
+       std::string filePath;
+       char *cPath = app_get_data_path();
+       if (NULL == cPath)
+               filePath = std::string(filepath);
+       else
+               filePath = std::string(cPath) + std::string(filepath);
+
+       std::string prefixPath = filePath.substr(0, filePath.find_last_of('/'));
+       LOGD("prefixPath: %s", prefixPath.c_str());
+
+       /* check the directory is available */
+       if (access(prefixPath.c_str(),F_OK)) {
+               LOGE("Can't save tracking model. Path[%s] doesn't existed.", filePath.c_str());
+
+               return MEDIA_VISION_ERROR_INVALID_PATH;
+       }
+
+       std::ofstream out;
+       out.open(filePath.c_str());
+
+       if (!out.is_open()) {
+               LOGE("[%s] Can't create/open file.", __FUNCTION__);
+               return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+       }
+
+       out<<(*this);
+
+       out.close();
+       LOGI("[%s] Image tracking model is saved.", __FUNCTION__);
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int ImageTrackingModel::load(const char *filepath)
+{
+       std::string filePath;
+       char *cPath = app_get_data_path();
+       if (NULL == cPath)
+               filePath = std::string(filepath);
+       else
+               filePath = std::string(cPath) + std::string(filepath);
+
+       if (access(filePath.c_str(),F_OK)) {
+               LOGE("Can't load tracking model. Path[%s] doesn't existed.", filepath);
+
+               return MEDIA_VISION_ERROR_INVALID_PATH;
+       }
+
+       std::ifstream in;
+       in.open(filePath.c_str());
+
+       if (!in.is_open()) {
+               LOGE("[%s] Can't open file.", __FUNCTION__);
+               return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+       }
+
+       in>>(*this);
+
+       if (!in.good()) {
+               LOGE("[%s] Unexpected end of file.", __FUNCTION__);
+               return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+       }
+
+       in.close();
+       LOGI("[%s] Image tracking model is loaded.", __FUNCTION__);
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+std::ostream& operator << (std::ostream& os, const ImageTrackingModel& obj)
+{
+       os<<std::setprecision(7);
+
+       os<<obj.m_target;
+       os<<obj.m_stabilizationParams.mIsEnabled<<'\n';
+       os<<obj.m_stabilizationParams.mHistoryAmount<<'\n';
+       os<<obj.m_stabilizationParams.mStabilizationSpeed<<'\n';
+       os<<obj.m_stabilizationParams.mStabilizationAcceleration<<'\n';
+       os<<obj.m_stabilizationParams.mTolerantShift<<'\n';
+       os<<obj.m_stabilizationParams.mTolerantShiftExtra<<'\n';
+
+       const size_t numberOfContourPoints = obj.m_location.size();
+       os<<numberOfContourPoints<<'\n';
+       for (size_t pointNum = 0u; pointNum < numberOfContourPoints; ++pointNum)
+               os<<' '<<obj.m_location[pointNum].x<<' '<<obj.m_location[pointNum].y;
+
+       os<<'\n';
+
+       return os;
+}
+
+std::istream& operator >> (std::istream& is, ImageTrackingModel& obj)
+{
+#define MEDIA_VISION_CHECK_IFSTREAM \
+       if (!is.good()) { \
+               return is; \
+       }
+
+       ImageObject target;
+       std::vector<cv::Point> location;
+
+       is>>target;
+       MEDIA_VISION_CHECK_IFSTREAM
+
+       StabilizationParams params;
+       is>>params.mIsEnabled;
+       is>>params.mHistoryAmount;
+       is>>params.mStabilizationSpeed;
+       is>>params.mStabilizationAcceleration;
+       is>>params.mTolerantShift;
+       is>>params.mTolerantShiftExtra;
+
+       size_t numberOfContourPoints = 0u;
+       is>>numberOfContourPoints;
+       MEDIA_VISION_CHECK_IFSTREAM
+
+       location.resize(numberOfContourPoints);
+       for (size_t pointNum = 0u; pointNum < numberOfContourPoints; ++pointNum) {
+               is>>location[pointNum].x;
+               MEDIA_VISION_CHECK_IFSTREAM
+               is>>location[pointNum].y;
+               MEDIA_VISION_CHECK_IFSTREAM
+       }
+
+#undef MEDIA_VISION_CHECK_IFSTREAM
+
+       obj.m_stabilizationParams = params;
+       obj.m_location = location;
+       if (!(target.isEmpty())) {
+               obj.setTarget(target);
+               obj.m_tracker->reinforcement(location);
+       }
+
+       return is;
+}
+
+} /* Image */
+} /* MediaVision */
diff --git a/mv_image/image/src/Tracking/MFTracker.cpp b/mv_image/image/src/Tracking/MFTracker.cpp
new file mode 100644 (file)
index 0000000..9dc20cc
--- /dev/null
@@ -0,0 +1,410 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tracking/MFTracker.h"
+
+#include <opencv/cv.h>
+
+namespace MediaVision {
+namespace Image {
+namespace {
+       const float FloatEps = 10e-6f;
+
+       template<typename T>
+       T getMedian(std::vector<T>& values, int size = -1) {
+               if (size == -1)
+                       size = (int)values.size();
+
+               std::vector<T> copy(values.begin(), values.begin() + size);
+               std::sort(copy.begin(),copy.end());
+               if(size%2==0)
+                       return (copy[size/2-1]+copy[size/2])/((T)2.0);
+               else
+                       return copy[(size - 1) / 2];
+
+       }
+
+       inline float l2distance(cv::Point2f p1, cv::Point2f p2) {
+               const float dx = p1.x - p2.x;
+               const float dy = p1.y - p2.y;
+               return sqrtf(dx * dx + dy * dy);
+       }
+} /* anonymous namespace */
+
+MFTracker::Params::Params()
+{
+       mPointsInGrid = 10;
+       mWindowSize = cv::Size(3, 3);
+       mPyrMaxLevel = 5;
+}
+
+MFTracker::MFTracker(Params params) :
+               m_isInit(false),
+               m_params(params),
+               m_termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.3),
+               m_confidence(0.0f)
+{
+}
+
+bool MFTracker::track(const cv::Mat& frame, std::vector<cv::Point>& result)
+{
+       result.clear();
+
+       if (!m_isInit) {
+               if (m_startLocation.empty())
+                       return false;
+
+               if (!init(frame))
+                       return false;
+       } else {
+               if (!update(frame)) {
+                       m_isInit = false;
+                       m_startLocation.clear();
+                       return false;
+               }
+       }
+
+       const size_t numberOfContourPoints = m_startLocation.size();
+       result.resize(numberOfContourPoints);
+
+       for (size_t i = 0; i < numberOfContourPoints; ++i) {
+               result[i].x = (int)(m_boundingBox.x +
+                                       m_startLocation[i].x * m_boundingBox.width);
+               result[i].y = (int)(m_boundingBox.y +
+                                       m_startLocation[i].y * m_boundingBox.height);
+       }
+
+       return true;
+}
+
+void MFTracker::reinforcement(const std::vector<cv::Point>& location)
+{
+       m_isInit = false;
+
+       if (location.size() < 3) {
+               m_startLocation.clear();
+               m_boundingBox.x = 0;
+               m_boundingBox.y = 0;
+               m_boundingBox.width = 0;
+               m_boundingBox.height = 0;
+
+               return;
+       }
+
+       const cv::Rect_<float>& boundingBox = cv::boundingRect(location);
+       m_boundingBox = boundingBox;
+
+       const size_t numberOfContourPoints = location.size();
+       m_startLocation.resize(numberOfContourPoints);
+       for (size_t i = 0; i < numberOfContourPoints; ++i) {
+               m_startLocation[i].x = (location[i].x - boundingBox.x) / boundingBox.width;
+               m_startLocation[i].y = (location[i].y - boundingBox.y) / boundingBox.height;
+       }
+}
+
+cv::Ptr<ObjectTracker> MFTracker::clone() const
+{
+       return cv::Ptr<ObjectTracker>(new MFTracker(*this));
+}
+
+bool MFTracker::init(const cv::Mat& image)
+{
+       if (image.empty())
+               return false;
+
+       image.copyTo(m_image);
+       buildOpticalFlowPyramid(
+                       m_image,
+                       m_pyramid,
+                       m_params.mWindowSize,
+                       m_params.mPyrMaxLevel);
+
+       m_isInit = true;
+       return m_isInit;
+}
+
+bool MFTracker::update(const cv::Mat& image)
+{
+       if (!m_isInit || image.empty())
+               return false;
+
+       /* Handles such behaviour when preparation frame has the size
+        * different to the tracking frame size. In such case, we resize preparation
+        *frame and bounding box. Then, track as usually:
+        */
+       if (m_image.rows != image.rows || m_image.cols != image.cols) {
+               const float xFactor = (float) image.cols / m_image.cols;
+               const float yFactor = (float) image.rows / m_image.rows;
+
+               resize(m_image, m_image, cv::Size(), xFactor, yFactor);
+
+               m_boundingBox.x *= xFactor;
+               m_boundingBox.y *= yFactor;
+               m_boundingBox.width *= xFactor;
+               m_boundingBox.height *= yFactor;
+       }
+
+       cv::Mat oldImage = m_image;
+
+       cv::Rect_<float> oldBox = m_boundingBox;
+       if(!medianFlowImpl(oldImage, image, oldBox))
+               return false;
+
+       image.copyTo(m_image);
+       m_boundingBox = oldBox;
+
+       return true;
+}
+
+bool MFTracker::isInited() const
+{
+       return m_isInit;
+}
+
+float MFTracker::getLastConfidence() const
+{
+       return m_confidence;
+}
+
+cv::Rect_<float> MFTracker::getLastBoundingBox() const
+{
+       return m_boundingBox;
+}
+
+bool MFTracker::medianFlowImpl(
+               cv::Mat oldImage_gray, cv::Mat newImage_gray, cv::Rect_<float>& oldBox)
+{
+       std::vector<cv::Point2f> pointsToTrackOld, pointsToTrackNew;
+
+       const float gridXStep = oldBox.width / m_params.mPointsInGrid;
+       const float gridYStep = oldBox.height / m_params.mPointsInGrid;
+       for (int i = 0; i < m_params.mPointsInGrid; i++) {
+               for (int j = 0; j < m_params.mPointsInGrid; j++) {
+                       pointsToTrackOld.push_back(
+                                       cv::Point2f(oldBox.x + .5f*gridXStep + 1.f*gridXStep*j,
+                                                       oldBox.y + .5f*gridYStep + 1.f*gridYStep*i));
+               }
+       }
+
+       const size_t numberOfPointsToTrackOld = pointsToTrackOld.size();
+       std::vector<uchar> status(numberOfPointsToTrackOld);
+       std::vector<float> errors(numberOfPointsToTrackOld);
+
+       std::vector<cv::Mat> tempPyramid;
+       cv::buildOpticalFlowPyramid(
+                                                       newImage_gray,
+                                                       tempPyramid,
+                                                       m_params.mWindowSize,
+                                                       m_params.mPyrMaxLevel);
+
+       cv::calcOpticalFlowPyrLK(m_pyramid,
+                                                       tempPyramid,
+                                                       pointsToTrackOld,
+                                                       pointsToTrackNew,
+                                                       status,
+                                                       errors,
+                                                       m_params.mWindowSize,
+                                                       m_params.mPyrMaxLevel,
+                                                       m_termcrit);
+
+       std::vector<cv::Point2f> di;
+       for (size_t idx = 0u; idx < numberOfPointsToTrackOld; idx++) {
+               if (status[idx] == 1)
+                       di.push_back(pointsToTrackNew[idx] - pointsToTrackOld[idx]);
+       }
+
+       std::vector<bool> filter_status;
+       check_FB(tempPyramid,
+                       pointsToTrackOld,
+                       pointsToTrackNew,
+                       filter_status);
+
+       check_NCC(oldImage_gray,
+                       newImage_gray,
+                       pointsToTrackOld,
+                       pointsToTrackNew,
+                       filter_status);
+
+       for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++) {
+               if (!filter_status[idx]) {
+                       pointsToTrackOld.erase(pointsToTrackOld.begin() + idx);
+                       pointsToTrackNew.erase(pointsToTrackNew.begin() + idx);
+                       filter_status.erase(filter_status.begin() + idx);
+                       idx--;
+               }
+       }
+
+       if (pointsToTrackOld.empty() || di.empty())
+               return false;
+
+       cv::Point2f mDisplacement;
+       cv::Rect_<float> boxCandidate =
+                               vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement);
+
+       std::vector<float> displacements;
+       for (size_t idx = 0u; idx < di.size(); idx++) {
+               di[idx] -= mDisplacement;
+               displacements.push_back(sqrt(di[idx].ddot(di[idx])));
+       }
+
+       m_confidence =
+                               (10.f - getMedian(displacements,(int)displacements.size())) / 10.f;
+
+       if (m_confidence < 0.f) {
+               m_confidence = 0.f;
+               return false;
+       }
+
+       m_pyramid.swap(tempPyramid);
+       oldBox = boxCandidate;
+       return true;
+}
+
+cv::Rect_<float> MFTracker::vote(
+               const std::vector<cv::Point2f>& oldPoints,
+               const std::vector<cv::Point2f>& newPoints,
+               const cv::Rect_<float>& oldRect,
+               cv::Point2f& mD)
+{
+       cv::Rect_<float> newRect;
+       cv::Point2f newCenter(
+                       oldRect.x + oldRect.width / 2.f,
+                       oldRect.y + oldRect.height / 2.f);
+
+       const int n = (int)oldPoints.size();
+       std::vector<float> buf(std::max( n*(n-1) / 2, 3), 0.f);
+
+       if(oldPoints.size() == 1) {
+               newRect.x = oldRect.x+newPoints[0].x-oldPoints[0].x;
+               newRect.y = oldRect.y+newPoints[0].y-oldPoints[0].y;
+               newRect.width=oldRect.width;
+               newRect.height=oldRect.height;
+
+               return newRect;
+       }
+
+       float xshift = 0.f;
+       float yshift = 0.f;
+       for(int i = 0; i < n; i++)
+               buf[i] = newPoints[i].x - oldPoints[i].x;
+
+       xshift = getMedian(buf, n);
+       newCenter.x += xshift;
+       for(int idx = 0; idx < n; idx++)
+               buf[idx] = newPoints[idx].y - oldPoints[idx].y;
+
+       yshift = getMedian(buf, n);
+       newCenter.y += yshift;
+       mD = cv::Point2f(xshift, yshift);
+
+       if(oldPoints.size() == 1) {
+               newRect.x = newCenter.x - oldRect.width / 2.f;
+               newRect.y = newCenter.y - oldRect.height / 2.f;
+               newRect.width = oldRect.width;
+               newRect.height = oldRect.height;
+
+               return newRect;
+       }
+
+       float nd = 0.f;
+       float od = 0.f;
+       for (int i = 0, ctr = 0; i < n; i++) {
+               for(int j = 0; j < i; j++) {
+                       nd = l2distance(newPoints[i], newPoints[j]);
+                       od = l2distance(oldPoints[i], oldPoints[j]);
+                       buf[ctr] = (od == 0.f ? 0.f : nd / od);
+                       ctr++;
+               }
+       }
+
+       float scale = getMedian(buf, n*(n-1) / 2);
+       newRect.x = newCenter.x - scale * oldRect.width / 2.f;
+       newRect.y = newCenter.y-scale * oldRect.height / 2.f;
+       newRect.width = scale * oldRect.width;
+       newRect.height = scale * oldRect.height;
+
+       return newRect;
+}
+
+void MFTracker::check_FB(
+               std::vector<cv::Mat> newPyramid,
+               const std::vector<cv::Point2f>& oldPoints,
+               const std::vector<cv::Point2f>& newPoints,
+               std::vector<bool>& status)
+{
+       const size_t numberOfOldPoints = oldPoints.size();
+
+       if(status.empty())
+               status = std::vector<bool>(numberOfOldPoints, true);
+
+       std::vector<uchar> LKstatus(numberOfOldPoints);
+       std::vector<float> errors(numberOfOldPoints);
+       std::vector<float> FBerror(numberOfOldPoints);
+       std::vector<cv::Point2f> pointsToTrackReprojection;
+
+       calcOpticalFlowPyrLK(newPyramid,
+                                               m_pyramid,
+                                               newPoints,
+                                               pointsToTrackReprojection,
+                                               LKstatus,
+                                               errors,
+                                               m_params.mWindowSize,
+                                               m_params.mPyrMaxLevel,
+                                               m_termcrit);
+
+       for (size_t idx = 0u; idx < numberOfOldPoints; idx++)
+               FBerror[idx] = l2distance(oldPoints[idx], pointsToTrackReprojection[idx]);
+
+       float FBerrorMedian = getMedian(FBerror) + FloatEps;
+       for (size_t idx = 0u; idx < numberOfOldPoints; idx++)
+               status[idx] = (FBerror[idx] < FBerrorMedian);
+}
+
+void MFTracker::check_NCC(
+               const cv::Mat& oldImage,
+               const cv::Mat& newImage,
+               const std::vector<cv::Point2f>& oldPoints,
+               const std::vector<cv::Point2f>& newPoints,
+               std::vector<bool>& status)
+{
+       std::vector<float> NCC(oldPoints.size(), 0.f);
+       cv::Size patch(30, 30);
+       cv::Mat p1;
+       cv::Mat p2;
+
+       for (size_t idx = 0u; idx < oldPoints.size(); idx++) {
+               getRectSubPix(oldImage, patch, oldPoints[idx], p1);
+               getRectSubPix(newImage, patch, newPoints[idx], p2);
+
+               const int N = 900;
+               const float s1 = sum(p1)(0);
+               const float s2 = sum(p2)(0);
+               const float n1 = norm(p1);
+               const float n2 = norm(p2);
+               const float prod = p1.dot(p2);
+               const float sq1 = sqrt(n1 * n1 - s1 * s1 / N);
+               const float sq2 = sqrt(n2 * n2 - s2 * s2 / N);
+               NCC[idx] = (sq2==0 ? sq1 / std::abs(sq1)
+                                                       : (prod - s1 * s2 / N) / sq1 / sq2);
+       }
+
+       float median = getMedian(NCC) - FloatEps;
+       for(size_t idx = 0u; idx < oldPoints.size(); idx++)
+               status[idx] = status[idx] && (NCC[idx] > median);
+}
+
+} /* Image */
+} /* MediaVision */
diff --git a/mv_image/image/src/Tracking/ObjectTracker.cpp b/mv_image/image/src/Tracking/ObjectTracker.cpp
new file mode 100644 (file)
index 0000000..4e73730
--- /dev/null
@@ -0,0 +1,27 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tracking/ObjectTracker.h"
+
+namespace MediaVision {
+namespace Image {
+ObjectTracker::~ObjectTracker()
+{
+       ; /* NULL */
+}
+
+} /* Image */
+} /* MediaVision */
diff --git a/mv_image/image/src/Tracking/RecognitionBasedTracker.cpp b/mv_image/image/src/Tracking/RecognitionBasedTracker.cpp
new file mode 100644 (file)
index 0000000..218ac16
--- /dev/null
@@ -0,0 +1,77 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tracking/RecognitionBasedTracker.h"
+
+#include "Recognition/ImageRecognizer.h"
+
+namespace MediaVision {
+namespace Image {
+RecognitionBasedTracker::RecognitionBasedTracker(
+               const ImageObject& target,
+               const FeaturesExtractingParams& sceneFeaturesExtractingParams,
+               const RecognitionParams& recognitionParams) :
+                               m_target(target),
+                               m_sceneFeatureExtractingParams(sceneFeaturesExtractingParams),
+                               m_recogParams(recognitionParams)
+{
+       ; /* NULL */
+}
+
+RecognitionBasedTracker::~RecognitionBasedTracker()
+{
+       ; /* NULL */
+}
+
+bool RecognitionBasedTracker::track(
+               const cv::Mat& frame,
+               std::vector<cv::Point>& result)
+{
+       result.clear();
+
+       ImageObject scene(frame, m_sceneFeatureExtractingParams);
+
+       ImageRecognizer recognizer(scene);
+
+       std::vector<cv::Point2f> contour;
+
+       bool isRecognized = recognizer.recognize(m_target, m_recogParams, contour);
+
+       if (isRecognized) {
+               size_t numberOfContourPoints = contour.size();
+               result.resize(numberOfContourPoints);
+               for(size_t i = 0u; i < numberOfContourPoints; ++i) {
+                       result[i].x = (int)contour[i].x;
+                       result[i].y = (int)contour[i].y;
+               }
+       }
+
+       return isRecognized;
+}
+
+void RecognitionBasedTracker::reinforcement(const std::vector<cv::Point>& location)
+{
+       ; /* The tracker is based on the recognition on the entire image.
+          *The reinforcement does not make a sense.*/
+}
+
+cv::Ptr<ObjectTracker> RecognitionBasedTracker::clone() const
+{
+       return cv::Ptr<ObjectTracker>(new RecognitionBasedTracker(*this));
+}
+
+} /* Image */
+} /* MediaVision */
index 8c81168..df17707 100644 (file)
 #include "mv_private.h"
 #include "mv_common_c.h"
 
-#include "ImageObject.h"
-#include "ImageRecognizer.h"
-#include "ImageTrackingModel.h"
-#include "ImageTracker.h"
+#include "ImageConfig.h"
+#include "Features/FeatureExtractor.h"
+#include "Features/ORBExtractorFactory.h"
+#include "Recognition/ImageObject.h"
+#include "Recognition/ImageRecognizer.h"
+#include "Tracking/ImageTrackingModel.h"
 
 #include <opencv/cv.h>
 
 namespace {
-const MediaVision::Image::FeaturesExtractingParams
-               defaultObjectFeaturesExtractingParams(1.2, 1000);
+class DefaultConfiguration {
+public:
+       static const DefaultConfiguration& getInstance();
 
-const MediaVision::Image::FeaturesExtractingParams
-               defaultSceneFeaturesExtractingParams(1.2, 5000);
+       MediaVision::Image::FeaturesExtractingParams getObjectFeaturesExtractingParams() const;
 
-const MediaVision::Image::RecognitionParams
-               defaultRecognitionParams(15, 0.33, 0.1);
+       MediaVision::Image::FeaturesExtractingParams getSceneFeaturesExtractingParams() const;
 
-const MediaVision::Image::StabilizationParams
-               defaultStabilizationParams(3, 0.006, 2, 0.001);
+       MediaVision::Image::RecognitionParams getRecognitionParams() const;
 
-const MediaVision::Image::TrackingParams
-               defaultTrackingParams(
-                               defaultSceneFeaturesExtractingParams,
-                               defaultRecognitionParams,
-                               defaultStabilizationParams,
-                               0.0);
+       MediaVision::Image::StabilizationParams getStabilizationParams() const;
+
+       MediaVision::Image::TrackingParams getTrackingParams() const;
+
+private:
+       DefaultConfiguration();
+
+private:
+       static DefaultConfiguration instance;
+
+       MediaVision::Image::FeaturesExtractingParams m_objectFeaturesExtractingParams;
+
+       MediaVision::Image::FeaturesExtractingParams m_sceneFeaturesExtractingParams;
+
+       MediaVision::Image::RecognitionParams m_recognitionParams;
+
+       MediaVision::Image::StabilizationParams m_stabilizationParams;
+
+       MediaVision::Image::TrackingParams m_trackingParams;
+};
+
+DefaultConfiguration DefaultConfiguration::instance;
+
+DefaultConfiguration::DefaultConfiguration() :
+               m_objectFeaturesExtractingParams(),
+               m_sceneFeaturesExtractingParams(),
+               m_recognitionParams(15, 0.33, 0.1),
+               m_stabilizationParams(true, 3, 0.00006, 1.3, 2, 0.001),
+               m_trackingParams()
+{
+       m_objectFeaturesExtractingParams.mKeypointType = MediaVision::Image::KT_ORB;
+       m_objectFeaturesExtractingParams.mDescriptorType = MediaVision::Image::DT_ORB;
+       m_objectFeaturesExtractingParams.ORB.mScaleFactor = 1.2;
+       m_objectFeaturesExtractingParams.ORB.mMaximumFeaturesNumber = 1000;
+
+       m_sceneFeaturesExtractingParams.mKeypointType = MediaVision::Image::KT_ORB;
+       m_sceneFeaturesExtractingParams.mDescriptorType = MediaVision::Image::DT_ORB;
+       m_sceneFeaturesExtractingParams.ORB.mScaleFactor = 1.2;
+       m_sceneFeaturesExtractingParams.ORB.mMaximumFeaturesNumber = 5000;
+
+       m_trackingParams.mFramesFeaturesExtractingParams = m_sceneFeaturesExtractingParams;
+       m_trackingParams.mRecognitionParams = m_recognitionParams;
+       m_trackingParams.mStabilizationParams = m_stabilizationParams;
+       m_trackingParams.mExpectedOffset = 0.0;
+}
+
+const DefaultConfiguration& DefaultConfiguration::getInstance()
+{
+       return instance;
+}
+
+MediaVision::Image::FeaturesExtractingParams
+DefaultConfiguration::getObjectFeaturesExtractingParams() const
+{
+       return m_objectFeaturesExtractingParams;
+}
+
+MediaVision::Image::FeaturesExtractingParams
+DefaultConfiguration::getSceneFeaturesExtractingParams() const
+{
+       return m_sceneFeaturesExtractingParams;
+}
+
+MediaVision::Image::RecognitionParams
+DefaultConfiguration::getRecognitionParams() const
+{
+       return m_recognitionParams;
+}
+
+MediaVision::Image::StabilizationParams
+DefaultConfiguration::getStabilizationParams() const
+{
+       return m_stabilizationParams;
+}
+
+MediaVision::Image::TrackingParams
+DefaultConfiguration::getTrackingParams() const
+{
+       return m_trackingParams;
+}
 
 void extractTargetFeaturesExtractingParams(
                mv_engine_config_h engine_cfg,
@@ -58,17 +132,18 @@ void extractTargetFeaturesExtractingParams(
                working_cfg = engine_cfg;
        }
 
-       featuresExtractingParams = defaultObjectFeaturesExtractingParams;
+       featuresExtractingParams =
+                       DefaultConfiguration::getInstance().getObjectFeaturesExtractingParams();
 
        mv_engine_config_get_double_attribute_c(
                        working_cfg,
-                       "MV_IMAGE_RECOGNITION_OBJECT_SCALE_FACTOR",
-                       &featuresExtractingParams.mScaleFactor);
+                       MV_IMAGE_RECOGNITION_OBJECT_SCALE_FACTOR,
+                       &featuresExtractingParams.ORB.mScaleFactor);
 
        mv_engine_config_get_int_attribute_c(
                        working_cfg,
-                       "MV_IMAGE_RECOGNITION_OBJECT_MAX_KEYPOINTS_NUM",
-                       &featuresExtractingParams.mMaximumFeaturesNumber);
+                       MV_IMAGE_RECOGNITION_OBJECT_MAX_KEYPOINTS_NUM,
+                       &featuresExtractingParams.ORB.mMaximumFeaturesNumber);
 
        if (NULL == engine_cfg) {
                mv_destroy_engine_config(working_cfg);
@@ -87,17 +162,18 @@ void extractSceneFeaturesExtractingParams(
                working_cfg = engine_cfg;
        }
 
-       featuresExtractingParams = defaultSceneFeaturesExtractingParams;
+       featuresExtractingParams =
+                       DefaultConfiguration::getInstance().getSceneFeaturesExtractingParams();
 
        mv_engine_config_get_double_attribute_c(
                        working_cfg,
-                       "MV_IMAGE_RECOGNITION_SCENE_SCALE_FACTOR",
-                       &featuresExtractingParams.mScaleFactor);
+                       MV_IMAGE_RECOGNITION_SCENE_SCALE_FACTOR,
+                       &featuresExtractingParams.ORB.mScaleFactor);
 
        mv_engine_config_get_int_attribute_c(
                        working_cfg,
-                       "MV_IMAGE_RECOGNITION_SCENE_MAX_KEYPOINTS_NUM",
-                       &featuresExtractingParams.mMaximumFeaturesNumber);
+                       MV_IMAGE_RECOGNITION_SCENE_MAX_KEYPOINTS_NUM,
+                       &featuresExtractingParams.ORB.mMaximumFeaturesNumber);
 
        if (NULL == engine_cfg) {
                mv_destroy_engine_config(working_cfg);
@@ -116,22 +192,23 @@ void extractRecognitionParams(
                working_cfg = engine_cfg;
        }
 
-       recognitionParams = defaultRecognitionParams;
+       recognitionParams =
+                       DefaultConfiguration::getInstance().getRecognitionParams();
 
        mv_engine_config_get_int_attribute_c(
                        working_cfg,
-                       "MV_IMAGE_RECOGNITION_MIN_MATCH_NUM",
+                       MV_IMAGE_RECOGNITION_MIN_MATCH_NUM,
                        &recognitionParams.mMinMatchesNumber);
 
        mv_engine_config_get_double_attribute_c(
                        working_cfg,
-                       "MV_IMAGE_RECOGNITION_REQ_MATCH_PART",
+                       MV_IMAGE_RECOGNITION_REQ_MATCH_PART,
                        &recognitionParams.mRequiredMatchesPart);
 
        mv_engine_config_get_double_attribute_c(
                        working_cfg,
-                       "MV_IMAGE_RECOGNITION_TOLERANT_MATCH_PART_ERR",
-                       &recognitionParams.mAllowableMatchesPartError);
+                       MV_IMAGE_RECOGNITION_TOLERANT_MATCH_PART_ERR,
+                       &recognitionParams.mTolerantMatchesPartError);
 
        if (NULL == engine_cfg) {
                mv_destroy_engine_config(working_cfg);
@@ -150,40 +227,32 @@ void extractStabilizationParams(
                working_cfg = engine_cfg;
        }
 
-       stabilizationParams = defaultStabilizationParams;
+       stabilizationParams =
+                       DefaultConfiguration::getInstance().getStabilizationParams();
 
-       bool useStabilization = true;
        mv_engine_config_get_bool_attribute_c(
                        working_cfg,
-                       "MV_IMAGE_TRACKING_USE_STABLIZATION",
-                       &useStabilization);
-
-       if (!useStabilization) {
-               stabilizationParams.mHistoryAmount = 0;
-               if (NULL == engine_cfg) {
-                       mv_destroy_engine_config(working_cfg);
-               }
-               return;
-       }
+                       MV_IMAGE_TRACKING_USE_STABLIZATION,
+                       &stabilizationParams.mIsEnabled);
 
        mv_engine_config_get_int_attribute_c(
                        working_cfg,
-                       "MV_IMAGE_TRACKING_HISTORY_AMOUNT",
+                       MV_IMAGE_TRACKING_HISTORY_AMOUNT,
                        &stabilizationParams.mHistoryAmount);
 
        mv_engine_config_get_double_attribute_c(
                        working_cfg,
-                       "MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT",
-                       &stabilizationParams.mAllowableShift);
+                       MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT,
+                       &stabilizationParams.mTolerantShift);
 
        mv_engine_config_get_double_attribute_c(
                        working_cfg,
-                       "MV_IMAGE_TRACKING_STABLIZATION_SPEED",
+                       MV_IMAGE_TRACKING_STABLIZATION_SPEED,
                        &stabilizationParams.mStabilizationSpeed);
 
        mv_engine_config_get_double_attribute_c(
                        working_cfg,
-                       "MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION",
+                       MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION,
                        &stabilizationParams.mStabilizationAcceleration);
 
        if (NULL == engine_cfg) {
@@ -203,7 +272,8 @@ void extractTrackingParams(
                working_cfg = engine_cfg;
        }
 
-       trackingParams = defaultTrackingParams;
+       trackingParams =
+                       DefaultConfiguration::getInstance().getTrackingParams();
 
        extractSceneFeaturesExtractingParams(
                        working_cfg,
@@ -219,7 +289,7 @@ void extractTrackingParams(
 
        mv_engine_config_get_double_attribute_c(
                        working_cfg,
-                       "MV_IMAGE_TRACKING_EXPECTED_OFFSET",
+                       MV_IMAGE_TRACKING_EXPECTED_OFFSET,
                        &trackingParams.mExpectedOffset);
 
        if (NULL == engine_cfg) {
@@ -344,11 +414,12 @@ int mv_image_recognize_open(
        MediaVision::Image::FeaturesExtractingParams featuresExtractingParams;
        extractSceneFeaturesExtractingParams(engine_cfg, featuresExtractingParams);
 
+       MediaVision::Image::ImageObject sceneImageObject(scene, featuresExtractingParams);
+
        MediaVision::Image::RecognitionParams recognitionParams;
        extractRecognitionParams(engine_cfg, recognitionParams);
 
-       MediaVision::Image::ImageRecognizer recognizer(scene,
-                       featuresExtractingParams);
+       MediaVision::Image::ImageRecognizer recognizer(sceneImageObject);
 
        mv_quadrangle_s *resultLocations[number_of_objects];
 
@@ -415,16 +486,13 @@ int mv_image_track_open(
                        convertSourceMV2GrayCV(source, frame),
                        "Failed to convert mv_source.");
 
-       MediaVision::Image::ImageTracker tracker(trackingParams);
-
        MediaVision::Image::ImageTrackingModel *trackingModel =
                        (MediaVision::Image::ImageTrackingModel*)image_tracking_model;
 
-       tracker.track(frame, *trackingModel);
-
-       std::vector<cv::Point2f> resultContour = trackingModel->getLastlocation();
+       std::vector<cv::Point> resultContour;
+       const bool isTracked = trackingModel->track(frame, resultContour);
 
-       if (trackingModel->isDetected() &&
+       if (isTracked &&
                MediaVision::Image::NumberOfQuadrangleCorners == resultContour.size()) {
                mv_quadrangle_s result;
                for (size_t pointNum = 0u;
@@ -478,22 +546,30 @@ int mv_image_object_fill_open(
                        convertSourceMV2GrayCV(source, image),
                        "Failed to convert mv_source.");
 
+       std::vector<cv::Point2f> roi;
+       if (NULL != location) {
+               roi.resize(4);
+
+               roi[0].x = location->point.x;
+               roi[0].y = location->point.y;
+
+               roi[1].x = roi[0].x + location->width;
+               roi[1].y = roi[0].y;
+
+               roi[2].x = roi[1].x;
+               roi[2].y = roi[1].y + location->height;
+
+               roi[3].x = roi[0].x;
+               roi[3].y = roi[2].y;
+       }
+
        MediaVision::Image::FeaturesExtractingParams featuresExtractingParams;
        extractTargetFeaturesExtractingParams(engine_cfg, featuresExtractingParams);
 
-       if (NULL == location) {
-               ((MediaVision::Image::ImageObject*)image_object)->fill(image,
-                               featuresExtractingParams);
-       } else {
-               if (!((MediaVision::Image::ImageObject*)image_object)->fill(image,
-                                                               cv::Rect(location->point.x, location->point.y,
-                                                               location->width, location->height),
-                                                               featuresExtractingParams)) {
-                       /* Wrong ROI (bounding box) */
-                       LOGE("[%s] Wrong ROI.", __FUNCTION__);
-                       return MEDIA_VISION_ERROR_INVALID_DATA;
-               }
-       }
+       static_cast<MediaVision::Image::ImageObject*>(image_object)->fill(
+                       image,
+                       featuresExtractingParams,
+                       roi);
 
        return MEDIA_VISION_ERROR_NONE;
 }
@@ -668,7 +744,7 @@ int mv_image_tracking_model_clone_open(
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
        }
 
-       *(MediaVision::Image::ImageObject*)(*dst) = *(MediaVision::Image::ImageObject*)src;
+       *(MediaVision::Image::ImageTrackingModel*)(*dst) = *(MediaVision::Image::ImageTrackingModel*)src;
 
        LOGD("Image tracking model has been successfully cloned");
        return MEDIA_VISION_ERROR_NONE;
index 9d68686..db59b53 100644 (file)
@@ -1,6 +1,6 @@
 Name:        capi-media-vision
 Summary:     Media Vision library for Tizen Native API
-Version:     0.2.4
+Version:     0.2.5
 Release:     0
 Group:       Multimedia/Framework
 License:     Apache-2.0 and BSD-2.0