Migration to OpenCV 3.4.0 87/170787/2
authorTae-Young Chung <ty83.chung@samsung.com>
Thu, 8 Feb 2018 06:18:52 +0000 (15:18 +0900)
committerTae-Young Chung <ty83.chung@samsung.com>
Mon, 26 Feb 2018 07:27:23 +0000 (16:27 +0900)
Change-Id: Ie75974180cc868dce34b1b4dddbac6cbf3531a6f
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
48 files changed:
mv_barcode/barcode_generator/src/BarcodeGenerator.cpp
mv_face/face/CMakeLists.txt
mv_face/face/include/FaceDetector.h
mv_face/face/include/FaceExpressionRecognizer.h
mv_face/face/include/FaceEyeCondition.h
mv_face/face/include/FaceRecognitionModel.h
mv_face/face/include/FaceTracker.h [moved from mv_face/face/include/TrackerMedianFlow.h with 90% similarity]
mv_face/face/include/FaceTrackingModel.h
mv_face/face/include/FaceUtil.h
mv_face/face/src/FaceExpressionRecognizer.cpp
mv_face/face/src/FaceRecognitionModel.cpp
mv_face/face/src/FaceTracker.cpp [moved from mv_face/face/src/TrackerMedianFlow.cpp with 89% similarity]
mv_face/face/src/FaceTrackingModel.cpp
mv_face/face/src/FaceUtil.cpp
mv_image/image/CMakeLists.txt
mv_image/image/include/Features/FeatureExtractor.h
mv_image/image/include/Features/FeatureExtractorFactory.h
mv_image/image/include/Features/FeatureMatcher.h
mv_image/image/include/Features/FeaturePack.h
mv_image/image/include/ImageMathUtil.h
mv_image/image/include/Recognition/ImageRecognizer.h
mv_image/image/include/Tracking/CascadeTracker.h
mv_image/image/include/Tracking/ObjectTracker.h
mv_image/image/src/Features/BasicExtractorFactory.cpp
mv_image/image/src/Features/FeatureExtractor.cpp
mv_image/image/src/Features/FeatureMatcher.cpp
mv_image/image/src/Features/FeaturePack.cpp
mv_image/image/src/Features/ORBExtractorFactory.cpp
mv_image/image/src/Recognition/ImageObject.cpp
mv_image/image/src/Tracking/AsyncTracker.cpp
mv_image/image/src/Tracking/CascadeTracker.cpp
mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp
mv_image/image/src/Tracking/ImageTrackingModel.cpp
mv_image/image/src/Tracking/MFTracker.cpp
mv_image/image/src/mv_image_open.cpp
mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h
mv_surveillance/surveillance/include/HoGDetector.h [deleted file]
mv_surveillance/surveillance/include/MFTracker.h
mv_surveillance/surveillance/include/SurveillanceHelper.h
mv_surveillance/surveillance/src/EventManager.cpp
mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp
mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp
mv_surveillance/surveillance/src/HoGDetector.cpp [deleted file]
mv_surveillance/surveillance/src/MFTracker.cpp
mv_surveillance/surveillance/src/SurveillanceHelper.cpp
packaging/capi-media-vision.spec
test/testsuites/common/image_helper/src/ImageHelper.cpp
test/testsuites/common/image_helper/src/image_helper.cpp

index d329946..1bfe327 100644 (file)
@@ -20,9 +20,9 @@
 
 #include <zint.h>
 
-#include <opencv2/core/core.hpp>
-#include <opencv2/imgproc/imgproc.hpp>
-#include <opencv2/highgui/highgui.hpp>
+#include <opencv2/core.hpp>
+#include <opencv2/imgproc.hpp>
+#include <opencv2/highgui.hpp>
 
 #include <cstring>
 #include <vector>
index caff530..8aec2e3 100644 (file)
@@ -17,7 +17,7 @@ include_directories("${PROJECT_SOURCE_DIR}/src")
 file(GLOB MV_FACE_INCLUDE_LIST "${PROJECT_SOURCE_DIR}/include/*.h")
 file(GLOB MV_FACE_SRC_LIST "${PROJECT_SOURCE_DIR}/src/*.cpp" "${PROJECT_SOURCE_DIR}/src/*.c")
 
-find_package(OpenCV REQUIRED core objdetect contrib)
+find_package(OpenCV REQUIRED core objdetect imgproc tracking face)
 if(NOT OpenCV_FOUND)
     message(SEND_ERROR "Failed to find OpenCV")
     return()
index 48d1730..55c0132 100644 (file)
@@ -17,7 +17,7 @@
 #ifndef __MEDIA_VISION_FACE_DETECTOR_H__
 #define __MEDIA_VISION_FACE_DETECTOR_H__
 
-#include <opencv/cv.h>
+#include <opencv2/objdetect.hpp>
 #include <vector>
 #include <string>
 
index b894833..496e407 100644 (file)
 #include "mv_common_c.h"
 #include "mv_face_open.h"
 
+#include <opencv2/objdetect.hpp>
 #include <string>
 
-namespace cv {
-       class Mat;
-}
-
 /**
  * @file FaceExpressionRecognizer.h
  * @brief This file contains the FaceExpressionRecognizer class which implements
index 7c1ec36..cc7a9bc 100644 (file)
@@ -20,7 +20,7 @@
 #include <mv_common_c.h>
 #include <mv_face.h>
 
-#include <opencv/cv.h>
+#include <opencv2/imgproc.hpp>
 
 /**
  * @file FaceEyeCondition.h
index f89c846..8a8f3ae 100644 (file)
 
 #include "FaceUtil.h"
 
-#include <opencv2/core/core.hpp>
-#include <opencv2/contrib/contrib.hpp>
+#include <opencv2/core.hpp>
+#include <opencv2/face/facerec.hpp>
+#include <opencv2/imgproc.hpp>
 
 #include <cstring>
 #include <vector>
+#include <set>
 
 /**
  * @file FaceRecognitionModel.h
@@ -252,7 +254,7 @@ private:
         * Factory method for creating of the recognition algorithm based on input
         * configuration:
         */
-       static cv::Ptr<cv::FaceRecognizer> CreateRecognitionAlgorithm(
+       static cv::Ptr<cv::face::FaceRecognizer> CreateRecognitionAlgorithm(
                        const FaceRecognitionModelConfig& config =
                        FaceRecognitionModelConfig());
 
@@ -268,7 +270,7 @@ private:
        FaceRecognitionModelConfig m_learnAlgorithmConfig; /**< Configuration of the
                                                                                                                        learning method */
 
-       cv::Ptr<cv::FaceRecognizer> m_recognizer; /**< Recognizer associated with
+       cv::Ptr<cv::face::FaceRecognizer> m_recognizer; /**< Recognizer associated with
                                                                                                        the current model */
 
        std::set<int> m_learnedLabels; /**< Vector of the labels had been learned
similarity index 90%
rename from mv_face/face/include/TrackerMedianFlow.h
rename to mv_face/face/include/FaceTracker.h
index e8bed92..5266fea 100644 (file)
 #ifndef __MEDIA_VISION_TRACKER_MEDIAN_FLOW_H__
 #define __MEDIA_VISION_TRACKER_MEDIAN_FLOW_H__
 
-#include "opencv2/core/core.hpp"
+#include <opencv2/core.hpp>
+#include <opencv2/tracking.hpp>
 
 namespace cv {
 
-class TrackerMedianFlowModel;
+//class TrackerMedianFlowModel;
 
 /** @brief Median Flow tracker implementation.
 
@@ -58,7 +59,7 @@ by authors to outperform MIL). During the implementation period the code at
 <http://www.aonsquared.co.uk/node/5>, the courtesy of the author Arthur Amarra, was used for the
 reference purpose.
  */
-class TrackerMedianFlow : public virtual Algorithm {
+class FaceTracker : public TrackerMedianFlow {
 public:
        struct Params {
                /**
@@ -80,12 +81,12 @@ public:
                                                                flow search used for tracking */
        };
 
-       TrackerMedianFlow(Params paramsIn = Params());
+       FaceTracker(Params paramsIn = Params());
 
-       bool copyTo(TrackerMedianFlow& copy) const;
+       bool copyTo(FaceTracker& copy) const;
 
-       bool init(const Mat& image, const Rect_<float>& boundingBox);
-       bool update(const Mat& image, Rect_<float>& boundingBox);
+       bool initImpl(const Mat& image, const Rect2d& boundingBox);
+       bool updateImpl(const Mat& image, Rect2d& boundingBox);
 
        bool isInited() const;
 
@@ -94,11 +95,12 @@ public:
 
        void read(FileStorage& fn);
        void write(FileStorage& fs) const;
+       void read( const FileNode& fn );
 
 private:
        bool m_isInit;
 
-       bool medianFlowImpl(Mat oldImage, Mat newImage, Rect_<float>& oldBox);
+       bool medianFlowImpl(Mat oldImage, Mat newImage, Rect2f& oldBox);
 
        Rect_<float> vote(
                        const std::vector<Point2f>& oldPoints,
@@ -132,7 +134,7 @@ private:
                                                                                Lucas–Kanade optical flow algorithm used
                                                                                during tracking */
 
-       Rect_<float> m_boundingBox;  /**< Tracking object bounding box */
+       Rect2d m_boundingBox;  /**< Tracking object bounding box */
 
        float m_confidence;          /**< Confidence that face was tracked correctly
                                                                                at the last tracking iteration */
index 8c73705..95f8d6e 100644 (file)
@@ -17,7 +17,9 @@
 #ifndef __MEDIA_VISION_FACE_TRACKING_MODEL_H__
 #define __MEDIA_VISION_FACE_TRACKING_MODEL_H__
 
-#include "TrackerMedianFlow.h"
+#include <opencv2/core.hpp>
+#include "FaceTracker.h"
+
 
 /**
  * @file FaceTrackingModel.h
@@ -158,7 +160,7 @@ private:
                                                                                                        of the tracking model to
                                                                                                        perform track */
 
-       cv::Ptr<cv::TrackerMedianFlow> m_tracker; /**< Underlying OpenCV tracking
+       cv::Ptr<cv::FaceTracker> m_tracker; /**< Underlying OpenCV tracking
                                                                                                        model */
 };
 
index 65c5896..bd2cd63 100644 (file)
@@ -17,7 +17,7 @@
 #ifndef __MEDIA_VISION_FACE_UTIL_H__
 #define __MEDIA_VISION_FACE_UTIL_H__
 
-#include <opencv/cv.h>
+#include <opencv2/core.hpp>
 
 #include "mv_common_c.h"
 
index a1f7b0b..54f5309 100644 (file)
@@ -20,7 +20,6 @@
 
 #include <vector>
 
-#include <opencv/cv.h>
 
 namespace MediaVision {
 namespace Face {
index e247f58..32a6d8c 100644 (file)
@@ -56,17 +56,17 @@ bool isEmptyAlgorithmParam(const std::string& path)
        return false;
 }
 
-int CopyOpenCVAlgorithmParameters(const cv::Ptr<cv::FaceRecognizer>& srcAlg,
-               cv::Ptr<cv::FaceRecognizer>& dstAlg)
+int CopyOpenCVAlgorithmParameters(const cv::Ptr<cv::face::FaceRecognizer>& srcAlg,
+               cv::Ptr<cv::face::FaceRecognizer>& dstAlg)
 {
        char tempPath[1024] = "";
 
-       snprintf(tempPath, 1024, "/tmp/alg_copy_%p_%p", srcAlg.obj, dstAlg.obj);
+       snprintf(tempPath, 1024, "/tmp/alg_copy_%p_%p", srcAlg.get(), dstAlg.get());
 
-       srcAlg->save(tempPath);
+       srcAlg->write(tempPath);
 
        if (!isEmptyAlgorithmParam(tempPath))
-               dstAlg->load(tempPath);
+               dstAlg->read(tempPath);
 
        if (0 != remove(tempPath))
                LOGW("Error removing serialized FaceRecognizer in %s", tempPath);
@@ -123,11 +123,11 @@ int CopyOpenCVAlgorithmParameters(const cv::Ptr<cv::FaceRecognizer>& srcAlg,
 }
 
 void ParseOpenCVLabels(
-               const cv::Ptr<cv::FaceRecognizer>& recognizer,
+               const cv::Ptr<cv::face::FaceRecognizer>& recognizer,
                std::set<int>& outLabels)
 {
        if (!recognizer.empty()) {
-               cv::Mat labels = recognizer->getMat("labels");
+               cv::Mat labels = (dynamic_cast<cv::face::EigenFaceRecognizer*>(recognizer.get()))->getLabels();
 
                for (int i = 0; i < labels.rows; ++i)
                        outLabels.insert(labels.at<int>(i, 0));
@@ -174,7 +174,7 @@ bool FaceRecognitionModelConfig::operator!=(
 
 FaceRecognitionModel::FaceRecognitionModel() :
                m_canRecognize(false),
-               m_recognizer(NULL)
+               m_recognizer() // The default constructor creates a null Ptr
 {
        ; /* NULL */
 }
@@ -255,7 +255,7 @@ int FaceRecognitionModel::save(const std::string& fileName)
                }
 
                storage << "can_recognize" << m_canRecognize;
-               m_recognizer->save(storage);
+               m_recognizer->write(storage);
 
                storage.release();
        } else {
@@ -292,39 +292,39 @@ int FaceRecognitionModel::load(const std::string& fileName)
        storage["algorithm"] >> algName;
        storage["can_recognize"] >> canRecognize;
 
-       cv::Ptr<cv::FaceRecognizer> tempRecognizer;
+       cv::Ptr<cv::face::FaceRecognizer> tempRecognizer;
        FaceRecognitionModelConfig tempConfig;
        std::set<int> tempLearnedLabels;
 
        if (algName == "Eigenfaces") {
-               tempRecognizer = cv::createEigenFaceRecognizer();
+               tempRecognizer = cv::face::EigenFaceRecognizer::create();
                storage["resizeW"] >> tempConfig.mImgWidth;
                storage["resizeH"] >> tempConfig.mImgHeight;
-               tempRecognizer->load(storage);
+               tempRecognizer->read(storage.root());
                tempConfig.mModelType =
                                MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES;
                tempConfig.mNumComponents =
-                               tempRecognizer->getInt("ncomponents");
+                               (dynamic_cast<cv::face::EigenFaceRecognizer*>(tempRecognizer.get()))->getNumComponents();
                ParseOpenCVLabels(tempRecognizer, tempLearnedLabels);
        } else if (algName == "Fisherfaces") {
-               tempRecognizer = cv::createFisherFaceRecognizer();
+               tempRecognizer = cv::face::FisherFaceRecognizer::create();
                storage["resizeW"] >> tempConfig.mImgWidth;
                storage["resizeH"] >> tempConfig.mImgHeight;
-               tempRecognizer->load(storage);
+               tempRecognizer->read(storage.root());
                tempConfig.mModelType =
                                MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES;
                tempConfig.mNumComponents =
-                               tempRecognizer->getInt("ncomponents");
+                               (dynamic_cast<cv::face::FisherFaceRecognizer*>(tempRecognizer.get()))->getNumComponents();
                ParseOpenCVLabels(tempRecognizer, tempLearnedLabels);
        } else if (algName == "LBPH") {
-               tempRecognizer = cv::createLBPHFaceRecognizer();
-               tempRecognizer->load(storage);
+               tempRecognizer = cv::face::LBPHFaceRecognizer::create();
+               tempRecognizer->read(storage.root());
                tempConfig.mModelType =
                                MEDIA_VISION_FACE_MODEL_TYPE_LBPH;
-               tempConfig.mGridX = tempRecognizer->getInt("grid_x");
-               tempConfig.mGridY = tempRecognizer->getInt("grid_y");
-               tempConfig.mNeighbors = tempRecognizer->getInt("neighbors");
-               tempConfig.mRadius = tempRecognizer->getInt("radius");
+               tempConfig.mGridX = (dynamic_cast<cv::face::LBPHFaceRecognizer*>(tempRecognizer.get()))->getGridX();
+               tempConfig.mGridY = (dynamic_cast<cv::face::LBPHFaceRecognizer*>(tempRecognizer.get()))->getGridY();
+               tempConfig.mNeighbors = (dynamic_cast<cv::face::LBPHFaceRecognizer*>(tempRecognizer.get()))->getNeighbors();
+               tempConfig.mRadius = (dynamic_cast<cv::face::LBPHFaceRecognizer*>(tempRecognizer.get()))->getRadius();
                ParseOpenCVLabels(tempRecognizer, tempLearnedLabels);
        } else {
                tempConfig = FaceRecognitionModelConfig();
@@ -336,7 +336,7 @@ int FaceRecognitionModel::load(const std::string& fileName)
                return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
        }
 
-       tempConfig.mThreshold = tempRecognizer->getDouble("threshold");
+       tempConfig.mThreshold = tempRecognizer->getThreshold();
 
        LOGD("Recognition model of [%s] type has been loaded from file",
                        algName.c_str());
@@ -529,23 +529,23 @@ int FaceRecognitionModel::recognize(const cv::Mat& image, FaceRecognitionResults
        return MEDIA_VISION_ERROR_NONE;
 }
 
-cv::Ptr<cv::FaceRecognizer> FaceRecognitionModel::CreateRecognitionAlgorithm(
+cv::Ptr<cv::face::FaceRecognizer> FaceRecognitionModel::CreateRecognitionAlgorithm(
                const FaceRecognitionModelConfig& config)
 {
-       cv::Ptr<cv::FaceRecognizer> tempRecognizer;
+       cv::Ptr<cv::face::FaceRecognizer> tempRecognizer;
        switch (config.mModelType) {
        case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES:
-               tempRecognizer = cv::createEigenFaceRecognizer(
+               tempRecognizer = cv::face::EigenFaceRecognizer::create(
                                                                        config.mNumComponents,
                                                                        config.mThreshold);
                break;
        case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES:
-               tempRecognizer = cv::createFisherFaceRecognizer(
+               tempRecognizer = cv::face::FisherFaceRecognizer::create(
                                                                        config.mNumComponents,
                                                                        config.mThreshold);
                break;
        case MEDIA_VISION_FACE_MODEL_TYPE_LBPH:
-               tempRecognizer = cv::createLBPHFaceRecognizer(
+               tempRecognizer = cv::face::LBPHFaceRecognizer::create(
                                                                        config.mRadius,
                                                                        config.mNeighbors,
                                                                        config.mGridX,
@@ -553,7 +553,7 @@ cv::Ptr<cv::FaceRecognizer> FaceRecognitionModel::CreateRecognitionAlgorithm(
                                                                        config.mThreshold);
                break;
        default:
-               return NULL;
+               LOGE("Unknown FaceRecognition model");
        }
 
        return tempRecognizer;
similarity index 89%
rename from mv_face/face/src/TrackerMedianFlow.cpp
rename to mv_face/face/src/FaceTracker.cpp
index 759b606..f5427ce 100644 (file)
  //
  //M*/
 
-#include "TrackerMedianFlow.h"
+#include "FaceTracker.h"
 
 #include "opencv2/video/tracking.hpp"
-#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/imgproc.hpp"
 
 #include <algorithm>
 #include <cmath>
@@ -52,14 +52,14 @@ namespace {
 } /* anonymous namespace */
 
 namespace cv {
-TrackerMedianFlow::Params::Params()
+FaceTracker::Params::Params()
 {
        mPointsInGrid = 10;
        mWindowSize = Size(3, 3);
        mPyrMaxLevel = 5;
 }
 
-void TrackerMedianFlow::Params::read(const cv::FileNode& fn)
+void FaceTracker::Params::read(const cv::FileNode& fn)
 {
        mPointsInGrid = fn["pointsInGrid"];
        int winSizeHeight = fn["windowSizeHeight"];
@@ -68,7 +68,7 @@ void TrackerMedianFlow::Params::read(const cv::FileNode& fn)
        mPyrMaxLevel = fn["pyrMaxLevel"];
 }
 
-void TrackerMedianFlow::Params::write(cv::FileStorage& fs) const
+void FaceTracker::Params::write(cv::FileStorage& fs) const
 {
        fs << "pointsInGrid" << mPointsInGrid;
        fs << "windowSizeHeight" << mWindowSize.height;
@@ -76,7 +76,7 @@ void TrackerMedianFlow::Params::write(cv::FileStorage& fs) const
        fs << "pyrMaxLevel" << mPyrMaxLevel;
 }
 
-TrackerMedianFlow::TrackerMedianFlow(Params paramsIn) :
+FaceTracker::FaceTracker(Params paramsIn) :
        m_termcrit(TermCriteria::COUNT | TermCriteria::EPS, 20, 0.3),
        m_confidence(0.0)
 {
@@ -84,7 +84,7 @@ TrackerMedianFlow::TrackerMedianFlow(Params paramsIn) :
        m_isInit = false;
 }
 
-bool TrackerMedianFlow::copyTo(TrackerMedianFlow& copy) const
+bool FaceTracker::copyTo(FaceTracker& copy) const
 {
        copy.m_isInit = m_isInit;
        copy.m_params = m_params;
@@ -95,7 +95,7 @@ bool TrackerMedianFlow::copyTo(TrackerMedianFlow& copy) const
        return true;
 }
 
-bool TrackerMedianFlow::init(const Mat& image, const Rect_<float>& boundingBox)
+bool FaceTracker::initImpl(const Mat& image, const Rect2d& boundingBox)
 {
        if (image.empty())
                return false;
@@ -109,7 +109,7 @@ bool TrackerMedianFlow::init(const Mat& image, const Rect_<float>& boundingBox)
        return m_isInit;
 }
 
-bool TrackerMedianFlow::update(const Mat& image, Rect_<float>& boundingBox)
+bool FaceTracker::updateImpl(const Mat& image, Rect2d& boundingBox)
 {
        if (!m_isInit || image.empty())
                return false;
@@ -119,8 +119,8 @@ bool TrackerMedianFlow::update(const Mat& image, Rect_<float>& boundingBox)
         * frame and bounding box. Then, track as usually:
         */
        if (m_image.rows != image.rows || m_image.cols != image.cols) {
-               const float xFactor = (float) image.cols / m_image.cols;
-               const float yFactor = (float) image.rows / m_image.rows;
+               const double xFactor = (double) image.cols / m_image.cols;
+               const double yFactor = (double) image.rows / m_image.rows;
 
                resize(m_image, m_image, Size(), xFactor, yFactor);
 
@@ -132,33 +132,33 @@ bool TrackerMedianFlow::update(const Mat& image, Rect_<float>& boundingBox)
 
        Mat oldImage = m_image;
 
-       Rect_<float> oldBox = m_boundingBox;
+       Rect2f oldBox = (Rect2f)m_boundingBox;
        if(!medianFlowImpl(oldImage, image, oldBox))
                return false;
 
-       boundingBox = oldBox;
+       boundingBox = (Rect2d)oldBox;
        image.copyTo(m_image);
        m_boundingBox = boundingBox;
        return true;
 }
 
-bool TrackerMedianFlow::isInited() const
+bool FaceTracker::isInited() const
 {
        return m_isInit;
 }
 
-float TrackerMedianFlow::getLastConfidence() const
+float FaceTracker::getLastConfidence() const
 {
        return m_confidence;
 }
 
-Rect_<float> TrackerMedianFlow::getLastBoundingBox() const
+Rect_<float> FaceTracker::getLastBoundingBox() const
 {
        return m_boundingBox;
 }
 
-bool TrackerMedianFlow::medianFlowImpl(
-               Mat oldGrayImage, Mat newGrayImage, Rect_<float>& oldBox)
+bool FaceTracker::medianFlowImpl(
+               Mat oldGrayImage, Mat newGrayImage, Rect2f& oldBox)
 {
        std::vector<Point2f> pointsToTrackOld, pointsToTrackNew;
 
@@ -241,7 +241,7 @@ bool TrackerMedianFlow::medianFlowImpl(
        return true;
 }
 
-Rect_<float> TrackerMedianFlow::vote(
+Rect_<float> FaceTracker::vote(
                const std::vector<Point2f>& oldPoints,
                const std::vector<Point2f>& newPoints,
                const Rect_<float>& oldRect,
@@ -305,7 +305,7 @@ Rect_<float> TrackerMedianFlow::vote(
 }
 
 template<typename T>
-T TrackerMedianFlow::getMedian(std::vector<T>& values, int size)
+T FaceTracker::getMedian(std::vector<T>& values, int size)
 {
        if (size == -1)
                size = (int)values.size();
@@ -319,14 +319,14 @@ T TrackerMedianFlow::getMedian(std::vector<T>& values, int size)
        }
 }
 
-float TrackerMedianFlow::l2distance(Point2f p1, Point2f p2)
+float FaceTracker::l2distance(Point2f p1, Point2f p2)
 {
        float dx = p1.x - p2.x;
        float dy = p1.y - p2.y;
        return sqrt(dx * dx + dy * dy);
 }
 
-void TrackerMedianFlow::check_FB(
+void FaceTracker::check_FB(
                std::vector<Mat> newPyramid,
                const std::vector<Point2f>& oldPoints,
                const std::vector<Point2f>& newPoints,
@@ -358,7 +358,7 @@ void TrackerMedianFlow::check_FB(
                status[idx] = (FBerror[idx] < FBerrorMedian);
 }
 
-void TrackerMedianFlow::check_NCC(
+void FaceTracker::check_NCC(
                const Mat& oldImage,
                const Mat& newImage,
                const std::vector<Point2f>& oldPoints,
@@ -391,9 +391,14 @@ void TrackerMedianFlow::check_NCC(
                status[idx] = status[idx] && (NCC[idx] > median);
 }
 
-void TrackerMedianFlow::read(cv::FileStorage& fs)
+void FaceTracker::read(const cv::FileNode& fn)
 {
-       m_params.read(fs.root());
+       m_params.read(fn);
+}
+
+void FaceTracker::read(cv::FileStorage& fs)
+{
+       read(fs.root());
        float bbX = 0.f;
        float bbY = 0.f;
        float bbW = 0.f;
@@ -406,7 +411,8 @@ void TrackerMedianFlow::read(cv::FileStorage& fs)
        fs["lastImage"] >> m_image;
 }
 
-void TrackerMedianFlow::write(cv::FileStorage& fs) const
+
+void FaceTracker::write(cv::FileStorage& fs) const
 {
        m_params.write(fs);
        fs << "lastLocationX" << m_boundingBox.x;
index 98df8a4..46faa0b 100644 (file)
@@ -32,26 +32,26 @@ FaceTrackingResults::FaceTrackingResults() :
 
 FaceTrackingModel::FaceTrackingModel() :
        m_canTrack(false),
-       m_tracker(new cv::TrackerMedianFlow())
+       m_tracker(new cv::FaceTracker())
 {
        ; /* NULL */
 }
 
 FaceTrackingModel::FaceTrackingModel(const FaceTrackingModel& origin) :
        m_canTrack(origin.m_canTrack),
-       m_tracker(new cv::TrackerMedianFlow())
+       m_tracker(new cv::FaceTracker())
 {
        if (!origin.m_tracker.empty())
-               origin.m_tracker->copyTo(*(m_tracker.obj));
+               origin.m_tracker->copyTo(*(m_tracker.get()));
 }
 
 FaceTrackingModel& FaceTrackingModel::operator=(const FaceTrackingModel& copy)
 {
        if (this != &copy) {
                m_canTrack = copy.m_canTrack;
-               m_tracker = cv::Ptr<cv::TrackerMedianFlow>(new cv::TrackerMedianFlow());
+               m_tracker = cv::Ptr<cv::FaceTracker>(new cv::FaceTracker());
                if (!copy.m_tracker.empty())
-                       copy.m_tracker->copyTo(*(m_tracker.obj));
+                       copy.m_tracker->copyTo(*(m_tracker.get()));
        }
 
        return *this;
@@ -162,7 +162,7 @@ int FaceTrackingModel::prepare(
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       if (!m_tracker->init(image, boundingBox)) {
+       if (!m_tracker->initImpl(image, boundingBox)) {
                LOGE("Failed to prepare tracking model.");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
@@ -174,8 +174,10 @@ int FaceTrackingModel::prepare(
 int FaceTrackingModel::track(const cv::Mat& image, FaceTrackingResults& results)
 {
        if (!m_tracker.empty() && m_canTrack) {
-               results.mIsTracked = m_tracker->update(image, results.mFaceLocation);
+               cv::Rect2d faceLocation = (cv::Rect2d)results.mFaceLocation;
+               results.mIsTracked = m_tracker->updateImpl(image, faceLocation);
                results.mConfidence = m_tracker->getLastConfidence();
+               results.mFaceLocation = (cv::Rect2f)faceLocation;
        } else {
                LOGE("Attempt to track face with not prepared model");
                return MEDIA_VISION_ERROR_INVALID_OPERATION;
index 954f82b..c2e5ec0 100644 (file)
@@ -18,7 +18,7 @@
 
 #include "mv_private.h"
 
-#include <opencv2/imgproc/types_c.h>
+#include <opencv2/imgproc.hpp>
 
 namespace MediaVision {
 namespace Face {
index 801c418..acdb240 100644 (file)
@@ -17,7 +17,7 @@ include_directories("${PROJECT_SOURCE_DIR}/src")
 file(GLOB_RECURSE MV_IMAGE_INC_LIST "${PROJECT_SOURCE_DIR}/include/*.h")
 file(GLOB_RECURSE MV_IMAGE_SRC_LIST "${PROJECT_SOURCE_DIR}/src/*.cpp" "${PROJECT_SOURCE_DIR}/src/*.c")
 
-find_package(OpenCV REQUIRED core imgproc objdetect features2d contrib)
+find_package(OpenCV REQUIRED core imgproc objdetect tracking features2d xfeatures2d)
 if(NOT OpenCV_FOUND)
     message(SEND_ERROR "Failed to find OpenCV")
     return()
index 4a34fae..f2f6e5e 100644 (file)
 
 #include "Features/FeaturePack.h"
 
-namespace cv {
-class FeatureDetector;
-class DescriptorExtractor;
-}
-
 namespace MediaVision {
 namespace Image {
 /**
index 837725a..1477292 100644 (file)
@@ -19,7 +19,7 @@
 
 #include "Features/FeatureExtractor.h"
 
-#include <opencv2/core/core.hpp>
+#include <opencv2/core.hpp>
 
 namespace MediaVision {
 namespace Image {
index 37f4508..971e7f9 100644 (file)
@@ -19,8 +19,6 @@
 
 #include "Features/FeaturePack.h"
 
-#include <opencv2/features2d/features2d.hpp>
-
 namespace MediaVision {
 namespace Image {
 
index c492bf3..422de0b 100644 (file)
 #ifndef __MEDIA_VISION_FEATUREPACK_H__
 #define __MEDIA_VISION_FEATUREPACK_H__
 
+#include "mv_private.h"
 #include "ImageConfig.h"
 
 #include <vector>
-#include <opencv2/core/core.hpp>
-
-namespace cv {
-class KeyPoint;
-}
+#include <opencv2/core.hpp>
+#include <opencv2/imgproc.hpp>
+#include <opencv2/features2d.hpp>
+#include <opencv2/xfeatures2d.hpp>
+#include <opencv2/calib3d.hpp>
 
 namespace MediaVision {
 namespace Image {
index f8a8ce1..bd49674 100644 (file)
@@ -17,7 +17,7 @@
 #ifndef __MEDIA_VISION_IMAGEMATHUTIL_H__
 #define __MEDIA_VISION_IMAGEMATHUTIL_H__
 
-#include <opencv/cv.h>
+#include <opencv2/core.hpp>
 
 /**
  * @file  ImageMathUtil.h
index 2a92550..5ac2f2e 100644 (file)
@@ -22,7 +22,7 @@
 
 #include "Recognition/ImageObject.h"
 
-#include <opencv/cv.h>
+#include <opencv2/core.hpp>
 
 /**
  * @file  ImageRecognizer.h
index e28e294..09a9d08 100644 (file)
@@ -19,7 +19,7 @@
 
 #include "Tracking/ObjectTracker.h"
 
-#include <opencv2/core/core.hpp>
+#include <opencv2/core.hpp>
 
 #include <set>
 
index ffc02c1..0fe88b4 100644 (file)
@@ -17,7 +17,7 @@
 #ifndef __MEDIA_VISION_OBJECTTRACKER_H__
 #define __MEDIA_VISION_OBJECTTRACKER_H__
 
-#include <opencv2/core/core.hpp>
+#include <opencv2/core.hpp>
 
 namespace MediaVision {
 namespace Image {
index 9c2d6e6..0982be1 100644 (file)
@@ -16,7 +16,7 @@
 
 #include "Features/BasicExtractorFactory.h"
 
-#include <opencv/cv.h>
+#include <opencv2/core.hpp>
 
 namespace MediaVision {
 namespace Image {
@@ -32,17 +32,35 @@ BasicExtractorFactory::BasicExtractorFactory(
 cv::Ptr<FeatureExtractor> BasicExtractorFactory::buildFeatureExtractor()
 {
        cv::Ptr<FeatureExtractor> featureExtractor(new (std::nothrow)FeatureExtractor());
-       if (featureExtractor == NULL)
-         return NULL;
 
-       cv::Ptr<cv::FeatureDetector> detector =
-                       cv::FeatureDetector::create(KeypointNames[__kpType]);
+       if (featureExtractor != NULL) {
+               cv::Ptr<cv::FeatureDetector> detector;
+               switch (__kpType) {
+               case KT_ORB:
+                       detector = cv::ORB::create();
+                       break;
+               case KT_GFTT:
+                       detector = cv::GFTTDetector::create();
+                       break;
+               default:
+                       LOGE("Unknown feature detector", __FUNCTION__);
+               }
 
-       cv::Ptr<cv::DescriptorExtractor> extractor =
-                       cv::DescriptorExtractor::create(DescriptorNames[__descType]);
+               cv::Ptr<cv::DescriptorExtractor> extractor;
+               switch (__descType) {
+               case DT_ORB:
+                       extractor = cv::ORB::create();
+                       break;
+               case DT_BRIEF:
+                       extractor = cv::xfeatures2d::BriefDescriptorExtractor::create();
+                       break;
+               default:
+                       LOGE("Unkown feature extractor", __FUNCTION__);
+               }
 
-       featureExtractor->setFeatureDetector(detector, __kpType);
-       featureExtractor->setDescriptorExtractor(extractor, __descType);
+               featureExtractor->setFeatureDetector(detector, __kpType);
+               featureExtractor->setDescriptorExtractor(extractor, __descType);
+       }
 
        return featureExtractor;
 }
index 15c36be..5b04f00 100644 (file)
@@ -18,7 +18,8 @@
 
 #include "ImageMathUtil.h"
 
-#include <opencv/cv.h>
+#include <opencv2/core.hpp>
+
 
 namespace MediaVision {
 namespace Image {
index 891e85f..f7c887c 100644 (file)
@@ -18,7 +18,7 @@
 
 #include "ImageMathUtil.h"
 
-#include <opencv/cv.h>
+#include <opencv2/core.hpp>
 
 namespace MediaVision {
 namespace Image {
index 57338ff..d2ce616 100644 (file)
@@ -16,7 +16,7 @@
 
 #include "Features/FeaturePack.h"
 
-#include <opencv/cv.h>
+#include <opencv2/core.hpp>
 
 namespace MediaVision {
 namespace Image {
index cc482ce..1dcded8 100644 (file)
@@ -18,7 +18,7 @@
 
 #include "ImageMathUtil.h"
 
-#include <opencv/cv.h>
+#include <opencv2/core.hpp>
 
 namespace MediaVision {
 namespace Image {
@@ -33,17 +33,9 @@ ORBExtractorFactory::ORBExtractorFactory(
 cv::Ptr<FeatureExtractor> ORBExtractorFactory::buildFeatureExtractor()
 {
        cv::Ptr<FeatureExtractor> featureExtractor(new (std::nothrow)FeatureExtractor());
-       if (featureExtractor == NULL)
-               return NULL;
 
-       cv::Ptr<cv::OrbFeatureDetector> detector(
-               new (std::nothrow)cv::ORB(
-                       __maximumFeaturesNumber,
-                       __scaleFactor));
-       if (detector == NULL)
-               return NULL;
-
-       cv::Ptr<cv::OrbDescriptorExtractor> extractor = detector;
+       cv::Ptr<cv::ORB> detector = cv::ORB::create(__maximumFeaturesNumber, __scaleFactor);
+       cv::Ptr<cv::ORB> extractor = detector;
 
        featureExtractor->setFeatureDetector(detector, KT_ORB);
        featureExtractor->setDescriptorExtractor(extractor, DT_ORB);
index 057038e..60d30b2 100644 (file)
 #include "mv_common.h"
 
 #include <opencv/cv.h>
-#include <opencv2/features2d/features2d.hpp>
+#include <opencv2/features2d.hpp>
 
 #include <fstream>
 #include <unistd.h>
+#include <iomanip>
 
 namespace MediaVision {
 namespace Image {
index ea24f8a..8916784 100644 (file)
@@ -22,7 +22,7 @@ namespace MediaVision {
 namespace Image {
 
 AsyncTracker::AsyncTracker(const AsyncTracker& copy) :
-               __baseTracker(copy.__baseTracker.obj->clone()),
+               __baseTracker(copy.__baseTracker.get()->clone()),
                __result(copy.__result),
                __isRun(false),
                __isUpdated(copy.__isUpdated),
index 7110939..a60bbb2 100644 (file)
@@ -49,7 +49,7 @@ bool CascadeTracker::track(const cv::Mat& frame, std::vector<cv::Point>& result)
        std::set<TrackerInfo>::iterator it = __trackers.begin();
 
        for (; it != __trackers.end(); ++it)
-               if (!it->mTracker.obj->track(frame, it->mResult))
+               if (!it->mTracker.get()->track(frame, it->mResult))
                        it->mResult.clear();
 
        return mergeResults(result);
@@ -60,7 +60,7 @@ void CascadeTracker::reinforcement(const std::vector<cv::Point>& location)
        std::set<TrackerInfo>::iterator it = __trackers.begin();
 
        for (; it != __trackers.end(); ++it)
-               it->mTracker.obj->reinforcement(location);
+               it->mTracker.get()->reinforcement(location);
 }
 
 cv::Ptr<ObjectTracker> CascadeTracker::clone() const
@@ -76,7 +76,7 @@ CascadeTracker& CascadeTracker::operator=(const CascadeTracker& copy)
 
                std::set<TrackerInfo>::iterator it = copy.__trackers.begin();
                for (; it != copy.__trackers.end(); ++it) {
-                       TrackerInfo temp(it->mTracker.obj->clone(), it->mPriority);
+                       TrackerInfo temp(it->mTracker.get()->clone(), it->mPriority);
                        temp.mResult = it->mResult;
 
                        __trackers.insert(temp);
@@ -121,7 +121,7 @@ void CascadeTracker::internalReinforcement()
                bool isUpdated = true;
 
                /* TODO: Redesign without dynamic_cast */
-               AsyncTracker *asyncView = dynamic_cast<AsyncTracker*>(it1->mTracker.obj);
+               AsyncTracker *asyncView = dynamic_cast<AsyncTracker*>(it1->mTracker.get());
                if (NULL != asyncView)
                        isUpdated = asyncView->isUpdated(it1->mResult);
 
@@ -135,7 +135,7 @@ void CascadeTracker::internalReinforcement()
 
                        if (getQuadrangleArea(checkedArea.data()) < __minimumArea) {
                                it1->mResult = std::vector<cv::Point>(0);
-                               it1->mTracker.obj->reinforcement(it1->mResult);
+                               it1->mTracker.get()->reinforcement(it1->mResult);
                        }
 
                        float priority = it1->mPriority;
@@ -143,7 +143,7 @@ void CascadeTracker::internalReinforcement()
 
                        for (; it2 != __trackers.end(); ++it2)
                                if (it1 != it2 && priority > it2->mPriority)
-                                        it2->mTracker.obj->reinforcement(it1->mResult);
+                                        it2->mTracker.get()->reinforcement(it1->mResult);
                }
        }
 }
index 43805a8..b6e10fc 100644 (file)
@@ -78,11 +78,11 @@ bool FeatureSubstitutionTracker::track(
 
        sceneImageObject->fill(frame, __featureExtractingParams, computeExpectedArea());
 
-       ImageRecognizer recognizer(*sceneImageObject.obj);
+       ImageRecognizer recognizer(*sceneImageObject.get());
 
        const bool isTracked =
                        recognizer.recognize(
-                                       *__target.obj,
+                                       *(__target.get()),
                                        __recogParams,
                                        contour,
                                        __objectScalingFactor);
index 3d78550..e0337ad 100644 (file)
@@ -28,6 +28,7 @@
 #include <fstream>
 #include <unistd.h>
 #include <new>
+#include <iomanip>
 
 namespace MediaVision {
 namespace Image {
@@ -44,12 +45,13 @@ ImageTrackingModel::ImageTrackingModel() :
 
 ImageTrackingModel::ImageTrackingModel(const ImageTrackingModel& copy) :
                __target(copy.__target),
-               __tracker(copy.__tracker.empty()? NULL: copy.__tracker->clone()),
                __stabilizator(copy.__stabilizator),
                __location(copy.__location),
                __stabilizationParams(copy.__stabilizationParams)
 {
-       ; /* NULL */
+       if (!copy.__tracker.empty())
+               __tracker = copy.__tracker->clone();
+       /* NULL */
 }
 
 void ImageTrackingModel::setTarget(const ImageObject& target)
index fd99019..72695b2 100644 (file)
@@ -16,7 +16,8 @@
 
 #include "Tracking/MFTracker.h"
 
-#include <opencv/cv.h>
+#include "opencv2/video/tracking.hpp"
+#include "opencv2/imgproc.hpp"
 
 namespace MediaVision {
 namespace Image {
@@ -205,13 +206,13 @@ bool MFTracker::medianFlowImpl(
        std::vector<float> errors(numberOfPointsToTrackOld);
 
        std::vector<cv::Mat> tempPyramid;
-       cv::buildOpticalFlowPyramid(
+       buildOpticalFlowPyramid(
                                                        newImage_gray,
                                                        tempPyramid,
                                                        __params.mWindowSize,
                                                        __params.mPyrMaxLevel);
 
-       cv::calcOpticalFlowPyrLK(__pyramid,
+       calcOpticalFlowPyrLK(__pyramid,
                                                        tempPyramid,
                                                        pointsToTrackOld,
                                                        pointsToTrackNew,
index 42a5ce3..b1f23bc 100644 (file)
@@ -26,7 +26,7 @@
 #include "Recognition/ImageRecognizer.h"
 #include "Tracking/ImageTrackingModel.h"
 
-#include <opencv/cv.h>
+#include <opencv2/core.hpp>
 
 namespace {
 
index a4ce5a0..4355107 100644 (file)
@@ -27,9 +27,9 @@
 
 #include "EventResult.h"
 #include "EventDefs.h"
-#include "HoGDetector.h"
 
-#include <opencv/cv.h>
+#include <opencv2/opencv.hpp>
+#include <opencv2/objdetect.hpp>
 
 #include <sys/time.h>
 
@@ -187,7 +187,7 @@ private:
 
        CVRectangles __disappearedRects;
 
-       modifiedcv::HOGDescriptor __hogClassifier; /**< Classifier to be used for full body
+       cv::HOGDescriptor __hogClassifier; /**< Classifier to be used for full body
                                                                                                person detection */
 
        MVRectangles __detectedLocations;
diff --git a/mv_surveillance/surveillance/include/HoGDetector.h b/mv_surveillance/surveillance/include/HoGDetector.h
deleted file mode 100644 (file)
index b4fd68f..0000000
+++ /dev/null
@@ -1,201 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef __MEDIA_VISION_HOGDETECTOR_H__
-#define __MEDIA_VISION_HOGDETECTOR_H__
-
-/**
- * @file  HOGDetector.h
- * @brief This file contains structure of HOG detector.
- */
-
-#include "opencv2/core/core.hpp"
-#include "opencv2/objdetect/objdetect.hpp"
-
-#include <vector>
-
-namespace modifiedcv {
-
-using namespace cv;
-
-struct HOGDescriptor {
-       enum { L2Hys = 0 };
-       enum { DEFAULT_NLEVELS = 64 };
-
-       /* default constructor */
-       HOGDescriptor() :
-               winSize(64, 128),
-               blockSize(16, 16),
-               blockStride(8, 8),
-               cellSize(8, 8),
-               nbins(9),
-               derivAperture(1),
-               winSigma(-1),
-               histogramNormType(HOGDescriptor::L2Hys),
-               L2HysThreshold(0.2),
-               gammaCorrection(true),
-               nlevels(HOGDescriptor::DEFAULT_NLEVELS)
-       {}
-
-       /* constructor */
-       HOGDescriptor(
-               Size _winSize,
-               Size _blockSize,
-               Size _blockStride,
-               Size _cellSize,
-               int _nbins,
-               int _derivAperture = 1,
-               double _winSigma = -1.,
-               int _histogramNormType = L2Hys,
-               double _L2HysThreshold = 0.2,
-               bool _gammaCorrection = false,
-               int _nlevels = DEFAULT_NLEVELS) :
-               winSize(_winSize),
-               blockSize(_blockSize),
-               blockStride(_blockStride),
-               cellSize(_cellSize),
-               nbins(_nbins),
-               derivAperture(_derivAperture),
-               winSigma(_winSigma),
-               histogramNormType(_histogramNormType),
-               L2HysThreshold(_L2HysThreshold),
-               gammaCorrection(_gammaCorrection),
-               nlevels(_nlevels)
-       {}
-
-       /* default destructor */
-       virtual ~HOGDescriptor() {}
-
-       size_t getDescriptorSize() const;
-
-       bool checkDetectorSize() const;
-
-       double getWinSigma() const;
-
-       virtual void setSVMDetector(InputArray _svmdetector);
-
-       virtual void compute(
-               const Mat& img,
-               CV_OUT vector<float>& descriptors,
-               Size winStride = Size(),
-               Size padding = Size(),
-               const vector<Point>& locations = vector<Point>()) const;
-
-       /* with found weights output */
-       virtual void detect(
-               const Mat& img,
-               CV_OUT vector<Point>& foundLocations,
-               CV_OUT vector<double>& weights,
-               double hitThreshold = 0.,
-               Size winStride = Size(),
-               Size padding = Size(),
-               const vector<Point>& searchLocations = vector<Point>()) const;
-
-       /* without found weights output */
-       virtual void detect(
-               const Mat& img,
-               CV_OUT vector<Point>& foundLocations,
-               double hitThreshold = 0.,
-               Size winStride = Size(),
-               Size padding = Size(),
-               const vector<Point>& searchLocations = vector<Point>()) const;
-
-       /* with result weights output */
-       virtual void detectMultiScale(
-               const Mat& img,
-               CV_OUT vector<Rect>& foundLocations,
-               CV_OUT vector<double>& foundWeights,
-               double hitThreshold = 0,
-               Size winStride = Size(),
-               Size padding = Size(),
-               double scale = 1.05,
-               double finalThreshold = 2.0,
-               bool useMeanshiftGrouping = false) const;
-
-       /* without found weights output */
-       virtual void detectMultiScale(
-               const Mat& img,
-               CV_OUT vector<Rect>& foundLocations,
-               double hitThreshold = 0.,
-               Size winStride = Size(),
-               Size padding = Size(),
-               double scale = 1.05,
-               double finalThreshold = 2.0,
-               bool useMeanshiftGrouping = false) const;
-
-       virtual void computeGradient(
-               const Mat& img,
-               CV_OUT Mat& grad,
-               CV_OUT Mat& angleOfs,
-               Size paddingTL = Size(),
-               Size paddingBR = Size()) const;
-
-       static vector<float> getDefaultPeopleDetector();
-
-       static vector<float> getDaimlerPeopleDetector();
-
-       /* read/parse Dalal's alt model file */
-       void readALTModel(std::string modelfile);
-
-       void groupRectangles(
-               vector<cv::Rect>& rectList,
-               vector<double>& weights,
-               int groupThreshold,
-               double eps) const;
-
-       Size winSize;
-       Size blockSize;
-       Size blockStride;
-       Size cellSize;
-       int nbins;
-       int derivAperture;
-       double winSigma;
-       int histogramNormType;
-       double L2HysThreshold;
-       bool gammaCorrection;
-       vector<float> svmDetector;
-       int nlevels;
-};
-
-} /* modifiedcv */
-
-#endif /* __MEDIA_VISION_HOGDETECTOR_H__ */
index 947f6d4..4a58714 100644 (file)
@@ -17,7 +17,7 @@
 #ifndef __MEDIA_VISION_MFTRACKER_H__
 #define __MEDIA_VISION_MFTRACKER_H__
 
-#include <opencv2/core/core.hpp>
+#include <opencv2/core.hpp>
 
 namespace mediavision {
 namespace surveillance {
index 1ad303a..9e0de18 100644 (file)
@@ -24,7 +24,7 @@
 
 #include <mv_common.h>
 
-#include <opencv/cv.h>
+#include <opencv2/core.hpp>
 
 namespace mediavision {
 namespace surveillance {
index 77787f3..7dc2c09 100644 (file)
@@ -22,7 +22,7 @@
 #include "EventTriggerPersonRecognition.h"
 #include "EventTriggerMovementDetection.h"
 
-#include <mv_private.h>
+#include "mv_private.h"
 
 namespace mediavision {
 namespace surveillance {
index af37537..6385f3a 100644 (file)
@@ -22,7 +22,7 @@
 
 #include <mv_private.h>
 
-#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/highgui.hpp"
 
 namespace mediavision {
 namespace surveillance {
index 59bc61a..f09f39d 100644 (file)
@@ -21,8 +21,7 @@
 #include "SurveillanceHelper.h"
 #include "EventTriggerMovementDetection.h"
 
-#include "opencv2/opencv.hpp"
-#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/highgui.hpp"
 
 #include <mv_private.h>
 
diff --git a/mv_surveillance/surveillance/src/HoGDetector.cpp b/mv_surveillance/surveillance/src/HoGDetector.cpp
deleted file mode 100644 (file)
index 4d1ea0c..0000000
+++ /dev/null
@@ -1,1006 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-#include <stdio.h>
-#include "HoGDetector.h"
-#include "opencv2/imgproc/imgproc.hpp"
-#include <iterator>
-
-#ifdef ENABLE_NEON
-#include <arm_neon.h>
-#endif
-
-#ifdef ENABLE_OMP
-#include <sched.h>
-#define NCORES 4
-static int coreids[NCORES] = {1, 2, 3, 4};
-#endif
-
-/****************************************************************************************\
-      The code below is implementation of HOG (Histogram-of-Oriented Gradients)
-      descriptor and object detection, introduced by Navneet Dalal and Bill Triggs.
-
-      The computed feature vectors are compatible with the
-      INRIA Object Detection and Localization Toolkit
-      (http://pascal.inrialpes.fr/soft/olt/)
-\****************************************************************************************/
-
-namespace modifiedcv {
-
-class ParallelLoopBodyWrapper {
-public:
-       ParallelLoopBodyWrapper(const cv::ParallelLoopBody& _body, const cv::Range& _r) {
-               body = &_body;
-               wholeRange = _r;
-               nstripes = cvRound(wholeRange.end - wholeRange.start);
-       }
-       void operator()(const cv::Range& sr) const {
-               cv::Range r;
-               r.start = (int)(wholeRange.start +
-                                               ((uint64)sr.start*(wholeRange.end - wholeRange.start) + nstripes/2)/nstripes);
-               r.end = sr.end >= nstripes ? wholeRange.end : (int)(wholeRange.start +
-                               ((uint64)sr.end*(wholeRange.end - wholeRange.start) + nstripes/2)/nstripes);
-               (*body)(r);
-       }
-       cv::Range stripeRange() const {
-               return cv::Range(0, nstripes);
-       }
-
-protected:
-       const cv::ParallelLoopBody* body;
-       cv::Range wholeRange;
-       int nstripes;
-};
-
-void parallel_for_(const cv::Range& range, const cv::ParallelLoopBody& body)
-{
-#if defined ENABLE_OMP
-       ParallelLoopBodyWrapper pbody(body, range);
-       cv::Range stripeRange = pbody.stripeRange();
-       int i = 0;
-       #pragma omp parallel for private(i) num_threads(NCORES)
-       for (i = stripeRange.start; i < stripeRange.end; ++i) {
-               cpu_set_t mask;
-               CPU_ZERO(&mask);
-               CPU_SET(coreids[i % 4], &mask);
-
-               if (sched_setaffinity (0, sizeof(mask), &mask) == -1) {
-                       printf("Could not set CPU Affinity, continuing...");
-               }
-
-               pbody(Range(i, i + 1));
-       }
-#else
-       cv::parallel_for_(range, body);
-#endif
-}
-
-size_t HOGDescriptor::getDescriptorSize() const
-{
-       return (size_t)nbins*
-               (blockSize.width/cellSize.width)*
-               (blockSize.height/cellSize.height)*
-               ((winSize.width - blockSize.width)/blockStride.width + 1)*
-               ((winSize.height - blockSize.height)/blockStride.height + 1);
-}
-
-double HOGDescriptor::getWinSigma() const
-{
-       return winSigma >= 0 ? winSigma : (blockSize.width + blockSize.height)/8.;
-}
-
-bool HOGDescriptor::checkDetectorSize() const
-{
-       size_t detectorSize = svmDetector.size(), descriptorSize = getDescriptorSize();
-       return detectorSize == 0 ||
-               detectorSize == descriptorSize ||
-               detectorSize == descriptorSize + 1;
-}
-
-void HOGDescriptor::setSVMDetector(InputArray _svmDetector)
-{
-       _svmDetector.getMat().convertTo(svmDetector, CV_32F);
-       CV_Assert(checkDetectorSize());
-}
-
-void HOGDescriptor::computeGradient(const Mat& img, Mat& grad, Mat& qangle,
-                                                                       Size paddingTL, Size paddingBR) const
-{
-       CV_Assert(img.type() == CV_8U);
-
-       Size gradsize(img.cols + paddingTL.width + paddingBR.width,
-                       img.rows + paddingTL.height + paddingBR.height);
-       grad.create(gradsize, CV_32FC2);  /* <magnitude*(1-alpha), magnitude*alpha> */
-       qangle.create(gradsize, CV_8UC2); /* [0..nbins-1] - quantized gradient orientation */
-       Size wholeSize;
-       Point roiofs;
-       img.locateROI(wholeSize, roiofs);
-
-       int i, x, y;
-       /*    int cn = img.channels(); */
-
-       Mat_<float> _lut(1, 256);
-       const float* lut = &_lut(0, 0);
-
-       if ( gammaCorrection )
-               for ( i = 0; i < 256; i++ )
-                       _lut(0, i) = std::sqrt((float)i);
-       else
-               for ( i = 0; i < 256; i++ )
-                       _lut(0, i) = (float)i;
-
-       AutoBuffer<int> mapbuf(gradsize.width + gradsize.height + 4);
-       int* xmap = (int*)mapbuf + 1;
-       int* ymap = xmap + gradsize.width + 2;
-
-       const int borderType = (int)cv::BORDER_REFLECT_101;
-
-       for ( x = -1; x < gradsize.width + 1; x++ )
-               xmap[x] = cv::borderInterpolate(x - paddingTL.width + roiofs.x,
-                               wholeSize.width, borderType) - roiofs.x;
-       for ( y = -1; y < gradsize.height + 1; y++ )
-               ymap[y] = cv::borderInterpolate(y - paddingTL.height + roiofs.y,
-                               wholeSize.height, borderType) - roiofs.y;
-
-       /* x- & y- derivatives for the whole row */
-       int width = gradsize.width;
-       AutoBuffer<float> _dbuf(width*4);
-       float* dbuf = _dbuf;
-       Mat Dx(1, width, CV_32F, dbuf);
-       Mat Dy(1, width, CV_32F, dbuf + width);
-       Mat Mag(1, width, CV_32F, dbuf + width*2);
-       Mat Angle(1, width, CV_32F, dbuf + width*3);
-
-       int _nbins = nbins;
-       float angleScale = (float)(_nbins/CV_PI);
-
-       for ( y = 0; y < gradsize.height; y++ ) {
-               const uchar* imgPtr  = img.data + img.step*ymap[y];
-               const uchar* prevPtr = img.data + img.step*ymap[y-1];
-               const uchar* nextPtr = img.data + img.step*ymap[y+1];
-               float* gradPtr = (float*)grad.ptr(y);
-               uchar* qanglePtr = (uchar*)qangle.ptr(y);
-
-               for (x = 0; x < width; x++) {
-                       int x1 = xmap[x];
-                       dbuf[x] = (float)(lut[imgPtr[xmap[x+1]]] - lut[imgPtr[xmap[x-1]]]);
-                       dbuf[width + x] = (float)(lut[nextPtr[x1]] - lut[prevPtr[x1]]);
-               }
-
-               cartToPolar(Dx, Dy, Mag, Angle, false);
-
-               for (x = 0; x < width; x++) {
-                       float mag = dbuf[x+width*2], angle = dbuf[x+width*3]*angleScale - 0.5f;
-                       int hidx = cvFloor(angle);
-                       angle -= hidx;
-                       gradPtr[x*2] = mag*(1.f - angle);
-                       gradPtr[x*2+1] = mag*angle;
-
-                       if ( hidx < 0 )
-                               hidx += _nbins;
-                       else if ( hidx >= _nbins )
-                               hidx -= _nbins;
-                       assert((unsigned)hidx < (unsigned)_nbins);
-
-                       qanglePtr[x*2] = (uchar)hidx;
-                       hidx++;
-                       hidx &= hidx < _nbins ? -1 : 0;
-                       qanglePtr[x*2+1] = (uchar)hidx;
-               }
-       }
-}
-
-
-struct HOGCache {
-       struct BlockData {
-               BlockData() : histOfs(0), imgOffset() {}
-               int histOfs;
-               Point imgOffset;
-       };
-
-       struct PixData {
-               size_t gradOfs, qangleOfs;
-               int histOfs[4];
-               float histWeights[4];
-               float gradWeight;
-       };
-
-       HOGCache();
-       HOGCache(const HOGDescriptor* descriptor,
-                       const Mat& img, Size paddingTL, Size paddingBR,
-                       bool useCache, Size cacheStride);
-       virtual ~HOGCache() {};
-       virtual void init(const HOGDescriptor* descriptor,
-                       const Mat& img, Size paddingTL, Size paddingBR,
-                       bool useCache, Size cacheStride);
-
-       Size windowsInImage(Size imageSize, Size winStride) const;
-       Rect getWindow(Size imageSize, Size winStride, int idx) const;
-
-       const float* getBlock(Point pt, float* buf);
-       virtual void normalizeBlockHistogram(float* histogram) const;
-
-       vector<PixData> pixData;
-       vector<BlockData> blockData;
-
-       bool useCache;
-       vector<int> ymaxCached;
-       Size winSize, cacheStride;
-       Size nblocks, ncells;
-       int blockHistogramSize;
-       int count1, count2, count4;
-       Point imgoffset;
-       Mat_<float> blockCache;
-       Mat_<uchar> blockCacheFlags;
-
-       Mat grad, qangle;
-       const HOGDescriptor* descriptor;
-};
-
-
-HOGCache::HOGCache()
-{
-       useCache = false;
-       blockHistogramSize = count1 = count2 = count4 = 0;
-       descriptor = 0;
-}
-
-HOGCache::HOGCache(const HOGDescriptor* _descriptor,
-               const Mat& _img, Size _paddingTL, Size _paddingBR,
-               bool _useCache, Size _cacheStride)
-{
-       init(_descriptor, _img, _paddingTL, _paddingBR, _useCache, _cacheStride);
-}
-
-void HOGCache::init(const HOGDescriptor* _descriptor,
-               const Mat& _img, Size _paddingTL, Size _paddingBR,
-               bool _useCache, Size _cacheStride)
-{
-       descriptor = _descriptor;
-       cacheStride = _cacheStride;
-       useCache = _useCache;
-
-       descriptor->computeGradient(_img, grad, qangle, _paddingTL, _paddingBR);
-       imgoffset = _paddingTL;
-
-       winSize = descriptor->winSize;
-       Size blockSize = descriptor->blockSize;
-       Size blockStride = descriptor->blockStride;
-       Size cellSize = descriptor->cellSize;
-       int i, j, nbins = descriptor->nbins;
-       int rawBlockSize = blockSize.width*blockSize.height;
-
-       nblocks = Size((winSize.width - blockSize.width)/blockStride.width + 1,
-                       (winSize.height - blockSize.height)/blockStride.height + 1);
-       ncells = Size(blockSize.width/cellSize.width, blockSize.height/cellSize.height);
-       blockHistogramSize = ncells.width*ncells.height*nbins;
-
-       if ( useCache ) {
-               Size cacheSize((grad.cols - blockSize.width)/cacheStride.width+1,
-                               (winSize.height/cacheStride.height)+1);
-               blockCache.create(cacheSize.height, cacheSize.width*blockHistogramSize);
-               blockCacheFlags.create(cacheSize);
-               size_t cacheRows = blockCache.rows;
-               ymaxCached.resize(cacheRows);
-               for (size_t ii = 0; ii < cacheRows; ii++ )
-                       ymaxCached[ii] = -1;
-       }
-
-       Mat_<float> weights(blockSize);
-       float sigma = (float)descriptor->getWinSigma();
-       float scale = 1.f/(sigma*sigma*2);
-
-       float blockHalfHeight = blockSize.height*0.5f;
-       float blockHalfWidth = blockSize.width*0.5f;
-       for (i = 0; i < blockSize.height; i++)
-               for (j = 0; j < blockSize.width; j++) {
-                       float di = i - blockHalfHeight;
-                       float dj = j - blockHalfWidth;
-                       weights(i, j) = std::exp(-(di*di + dj*dj)*scale);
-               }
-
-       blockData.resize(nblocks.width*nblocks.height);
-       pixData.resize(rawBlockSize*3);
-
-       /*
-        * Initialize 2 lookup tables, pixData & blockData.
-        * Here is why:
-        *
-        * The detection algorithm runs in 4 nested loops (at each pyramid layer):
-        *  loop over the windows within the input image
-        *    loop over the blocks within each window
-        *      loop over the cells within each block
-        *        loop over the pixels in each cell
-        *
-        * As each of the loops runs over a 2-dimensional array,
-        * we could get 8(!) nested loops in total, which is very-very slow.
-        *
-        * To speed the things up, we do the following:
-        *   1. loop over windows is unrolled in the HOGDescriptor::{compute|detect} methods;
-        *         inside we compute the current search window using getWindow() method.
-        *         Yes, it involves some overhead (function call + couple of divisions),
-        *         but it's tiny in fact.
-        *   2. loop over the blocks is also unrolled. Inside we use pre-computed blockData[j]
-        *         to set up gradient and histogram pointers.
-        *   3. loops over cells and pixels in each cell are merged
-        *       (since there is no overlap between cells, each pixel in the block is processed once)
-        *      and also unrolled. Inside we use PixData[k] to access the gradient values and
-        *      update the histogram
-        */
-
-       count1 = count2 = count4 = 0;
-       for ( j = 0; j < blockSize.width; j++ )
-               for ( i = 0; i < blockSize.height; i++ ) {
-                       PixData* data = 0;
-                       float cellX = (j+0.5f)/cellSize.width - 0.5f;
-                       float cellY = (i+0.5f)/cellSize.height - 0.5f;
-                       int icellX0 = cvFloor(cellX);
-                       int icellY0 = cvFloor(cellY);
-                       int icellX1 = icellX0 + 1, icellY1 = icellY0 + 1;
-                       cellX -= icellX0;
-                       cellY -= icellY0;
-
-                       if ( (unsigned)icellX0 < (unsigned)ncells.width &&
-                                       (unsigned)icellX1 < (unsigned)ncells.width ) {
-                               if ( (unsigned)icellY0 < (unsigned)ncells.height &&
-                                               (unsigned)icellY1 < (unsigned)ncells.height ) {
-                                       data = &pixData[rawBlockSize*2 + (count4++)];
-                                       data->histOfs[0] = (icellX0*ncells.height + icellY0)*nbins;
-                                       data->histWeights[0] = (1.f - cellX)*(1.f - cellY);
-                                       data->histOfs[1] = (icellX1*ncells.height + icellY0)*nbins;
-                                       data->histWeights[1] = cellX*(1.f - cellY);
-                                       data->histOfs[2] = (icellX0*ncells.height + icellY1)*nbins;
-                                       data->histWeights[2] = (1.f - cellX)*cellY;
-                                       data->histOfs[3] = (icellX1*ncells.height + icellY1)*nbins;
-                                       data->histWeights[3] = cellX*cellY;
-                               } else {
-                                       data = &pixData[rawBlockSize + (count2++)];
-                                       if ( (unsigned)icellY0 < (unsigned)ncells.height ) {
-                                               icellY1 = icellY0;
-                                               cellY = 1.f - cellY;
-                                       }
-                                       data->histOfs[0] = (icellX0*ncells.height + icellY1)*nbins;
-                                       data->histWeights[0] = (1.f - cellX)*cellY;
-                                       data->histOfs[1] = (icellX1*ncells.height + icellY1)*nbins;
-                                       data->histWeights[1] = cellX*cellY;
-                                       data->histOfs[2] = data->histOfs[3] = 0;
-                                       data->histWeights[2] = data->histWeights[3] = 0;
-                               }
-                       } else {
-                               if ( (unsigned)icellX0 < (unsigned)ncells.width ) {
-                                       icellX1 = icellX0;
-                                       cellX = 1.f - cellX;
-                               }
-
-                               if ( (unsigned)icellY0 < (unsigned)ncells.height &&
-                                               (unsigned)icellY1 < (unsigned)ncells.height ) {
-                                       data = &pixData[rawBlockSize + (count2++)];
-                                       data->histOfs[0] = (icellX1*ncells.height + icellY0)*nbins;
-                                       data->histWeights[0] = cellX*(1.f - cellY);
-                                       data->histOfs[1] = (icellX1*ncells.height + icellY1)*nbins;
-                                       data->histWeights[1] = cellX*cellY;
-                                       data->histOfs[2] = data->histOfs[3] = 0;
-                                       data->histWeights[2] = data->histWeights[3] = 0;
-                               } else {
-                                       data = &pixData[count1++];
-                                       if ( (unsigned)icellY0 < (unsigned)ncells.height ) {
-                                               icellY1 = icellY0;
-                                               cellY = 1.f - cellY;
-                                       }
-                                       data->histOfs[0] = (icellX1*ncells.height + icellY1)*nbins;
-                                       data->histWeights[0] = cellX*cellY;
-                                       data->histOfs[1] = data->histOfs[2] = data->histOfs[3] = 0;
-                                       data->histWeights[1] = data->histWeights[2] = data->histWeights[3] = 0;
-                               }
-                       }
-                       data->gradOfs = (grad.cols*i + j)*2;
-                       data->qangleOfs = (qangle.cols*i + j)*2;
-                       data->gradWeight = weights(i, j);
-               }
-
-       assert(count1 + count2 + count4 == rawBlockSize);
-       /* defragment pixData */
-       for ( j = 0; j < count2; j++ )
-               pixData[j + count1] = pixData[j + rawBlockSize];
-       for ( j = 0; j < count4; j++ )
-               pixData[j + count1 + count2] = pixData[j + rawBlockSize*2];
-       count2 += count1;
-       count4 += count2;
-
-       /* initialize blockData */
-       for ( j = 0; j < nblocks.width; j++ )
-               for ( i = 0; i < nblocks.height; i++ ) {
-                       BlockData& data = blockData[j*nblocks.height + i];
-                       data.histOfs = (j*nblocks.height + i)*blockHistogramSize;
-                       data.imgOffset = Point(j*blockStride.width, i*blockStride.height);
-               }
-}
-
-
-const float* HOGCache::getBlock(Point pt, float* buf)
-{
-       float* blockHist = buf;
-       assert(descriptor != 0);
-
-       Size blockSize = descriptor->blockSize;
-       pt += imgoffset;
-
-       CV_Assert( (unsigned)pt.x <= (unsigned)(grad.cols - blockSize.width) &&
-                       (unsigned)pt.y <= (unsigned)(grad.rows - blockSize.height) );
-
-       if ( useCache ) {
-               CV_Assert(pt.x % cacheStride.width == 0 &&
-                               pt.y % cacheStride.height == 0);
-               Point cacheIdx(pt.x/cacheStride.width,
-                               (pt.y/cacheStride.height) % blockCache.rows);
-               if ( pt.y != ymaxCached[cacheIdx.y] ) {
-                       Mat_<uchar> cacheRow = blockCacheFlags.row(cacheIdx.y);
-                       cacheRow = (uchar)0;
-                       ymaxCached[cacheIdx.y] = pt.y;
-               }
-
-               blockHist = &blockCache[cacheIdx.y][cacheIdx.x*blockHistogramSize];
-               uchar& computedFlag = blockCacheFlags(cacheIdx.y, cacheIdx.x);
-               if ( computedFlag != 0 )
-                       return blockHist;
-               computedFlag = (uchar)1; /* set it at once, before actual computing */
-       }
-
-       int k, C1 = count1, C2 = count2, C4 = count4;
-       const float* gradPtr = (const float*)(grad.data + grad.step*pt.y) + pt.x*2;
-       const uchar* qanglePtr = qangle.data + qangle.step*pt.y + pt.x*2;
-
-       CV_Assert(blockHist != 0);
-       for ( k = 0; k < blockHistogramSize; k++ )
-               blockHist[k] = 0.f;
-
-       const PixData* _pixData = &pixData[0];
-
-       for ( k = 0; k < C1; k++ ) {
-               const PixData& pk = _pixData[k];
-               const float* a = gradPtr + pk.gradOfs;
-               float w = pk.gradWeight*pk.histWeights[0];
-               const uchar* h = qanglePtr + pk.qangleOfs;
-               int h0 = h[0], h1 = h[1];
-               float* hist = blockHist + pk.histOfs[0];
-               float t0 = hist[h0] + a[0]*w;
-               float t1 = hist[h1] + a[1]*w;
-               hist[h0] = t0;
-               hist[h1] = t1;
-       }
-
-       for ( ; k < C2; k++ ) {
-               const PixData& pk = _pixData[k];
-               const float* a = gradPtr + pk.gradOfs;
-               float w, t0, t1, a0 = a[0], a1 = a[1];
-               const uchar* h = qanglePtr + pk.qangleOfs;
-               int h0 = h[0], h1 = h[1];
-
-               float* hist = blockHist + pk.histOfs[0];
-               w = pk.gradWeight*pk.histWeights[0];
-               t0 = hist[h0] + a0*w;
-               t1 = hist[h1] + a1*w;
-               hist[h0] = t0;
-               hist[h1] = t1;
-
-               hist = blockHist + pk.histOfs[1];
-               w = pk.gradWeight*pk.histWeights[1];
-               t0 = hist[h0] + a0*w;
-               t1 = hist[h1] + a1*w;
-               hist[h0] = t0;
-               hist[h1] = t1;
-       }
-
-       for ( ; k < C4; k++ ) {
-               const PixData& pk = _pixData[k];
-               const float* a = gradPtr + pk.gradOfs;
-               float w, t0, t1, a0 = a[0], a1 = a[1];
-               const uchar* h = qanglePtr + pk.qangleOfs;
-               int h0 = h[0], h1 = h[1];
-
-               float* hist = blockHist + pk.histOfs[0];
-               w = pk.gradWeight*pk.histWeights[0];
-               t0 = hist[h0] + a0*w;
-               t1 = hist[h1] + a1*w;
-               hist[h0] = t0;
-               hist[h1] = t1;
-
-               hist = blockHist + pk.histOfs[1];
-               w = pk.gradWeight*pk.histWeights[1];
-               t0 = hist[h0] + a0*w;
-               t1 = hist[h1] + a1*w;
-               hist[h0] = t0;
-               hist[h1] = t1;
-
-               hist = blockHist + pk.histOfs[2];
-               w = pk.gradWeight*pk.histWeights[2];
-               t0 = hist[h0] + a0*w;
-               t1 = hist[h1] + a1*w;
-               hist[h0] = t0;
-               hist[h1] = t1;
-
-               hist = blockHist + pk.histOfs[3];
-               w = pk.gradWeight*pk.histWeights[3];
-               t0 = hist[h0] + a0*w;
-               t1 = hist[h1] + a1*w;
-               hist[h0] = t0;
-               hist[h1] = t1;
-       }
-
-       normalizeBlockHistogram(blockHist);
-
-       return blockHist;
-}
-
-void HOGCache::normalizeBlockHistogram(float* _hist) const
-{
-#ifdef ENABLE_NEON
-       /* NEON vector for loading the histogram to the memory */
-       float32x4_t hist_v;
-       /* Initialize the accumulator for summation storing */
-       float32x4_t acc = vdupq_n_f32(0.f);
-#endif
-
-       /* Histogram pointer in the memory */
-       float *hist_ptr = &_hist[0];
-       /* Variable to store values of summations */
-       float sum = 0.f;
-       size_t sz = blockHistogramSize;
-
-#ifdef ENABLE_NEON
-       for (; sz != 0u; sz -= 4u) {
-               hist_v = vld1q_f32(hist_ptr);
-               acc = vmlaq_f32(acc, hist_v, hist_v);
-               hist_ptr += 4;
-       }
-
-       sum += vgetq_lane_f32(acc, 0) + vgetq_lane_f32(acc, 1) +
-               vgetq_lane_f32(acc, 2) + vgetq_lane_f32(acc, 3);
-
-       /* Reset accumulator */
-       acc = vdupq_n_f32(0.f);
-
-       sz = blockHistogramSize;
-       hist_ptr = &_hist[0];
-#else
-       for (size_t i = 0; i < sz; ++i)
-               sum += hist_ptr[i] * hist_ptr[i];
-#endif
-
-       float scale = 1.f / (std::sqrt(sum) + sz * 0.1f);
-       sum = 0.f;
-
-#ifdef ENABLE_NEON
-       float32x4_t thres_v = vdupq_n_f32((float)descriptor->L2HysThreshold);
-
-       for (; sz != 0; sz -= 4) {
-               /* Find minimal value among threshold and histogram value, accumulate
-                * this value squared */
-               hist_v = vminq_f32(vmulq_n_f32(vld1q_f32(hist_ptr), scale), thres_v);
-               acc = vmlaq_f32(acc, hist_v, hist_v);
-               /* Update histograms in memory according with found min values */
-               vst1q_f32(hist_ptr, hist_v);
-               hist_ptr += 4;
-       }
-
-       sum += vgetq_lane_f32(acc, 0) + vgetq_lane_f32(acc, 1) +
-               vgetq_lane_f32(acc, 2) + vgetq_lane_f32(acc, 3);
-
-#else
-       float thresh = (float)descriptor->L2HysThreshold;
-       for (size_t i = 0; i < sz; ++i) {
-               hist_ptr[i] = std::min(hist_ptr[i] * scale, thresh);
-               sum += hist_ptr[i] * hist_ptr[i];
-       }
-#endif
-
-       scale = 1.f / (std::sqrt(sum) + 1e-3f);
-
-#ifdef ENABLE_NEON
-       sz = blockHistogramSize;
-       hist_ptr = &_hist[0];
-
-       /* Scale histogram (normalize): */
-       for (; sz != 0; sz -= 4) {
-               vst1q_f32(hist_ptr, vmulq_n_f32(vld1q_f32(hist_ptr), scale));
-               hist_ptr += 4;
-       }
-#else
-       for (size_t i = 0; i < sz; i++ )
-               hist_ptr[i] *= scale;
-#endif
-}
-
-
-Size HOGCache::windowsInImage(Size imageSize, Size winStride) const
-{
-       return Size((imageSize.width - winSize.width)/winStride.width + 1,
-                               (imageSize.height - winSize.height)/winStride.height + 1);
-}
-
-Rect HOGCache::getWindow(Size imageSize, Size winStride, int idx) const
-{
-       int nwindowsX = (imageSize.width - winSize.width)/winStride.width + 1;
-       int y = idx / nwindowsX;
-       int x = idx - nwindowsX*y;
-       return Rect( x*winStride.width, y*winStride.height, winSize.width, winSize.height );
-}
-
-
-void HOGDescriptor::compute(const Mat& img, vector<float>& descriptors,
-                                                       Size winStride, Size padding,
-                                                       const vector<Point>& locations) const
-{
-       if ( winStride == Size() )
-               winStride = cellSize;
-       Size cacheStride(gcd(winStride.width, blockStride.width),
-                       gcd(winStride.height, blockStride.height));
-       size_t nwindows = locations.size();
-       padding.width = (int)alignSize(std::max(padding.width, 0), cacheStride.width);
-       padding.height = (int)alignSize(std::max(padding.height, 0), cacheStride.height);
-       Size paddedImgSize(img.cols + padding.width*2, img.rows + padding.height*2);
-
-       HOGCache cache(this, img, padding, padding, nwindows == 0, cacheStride);
-
-       if ( !nwindows )
-               nwindows = cache.windowsInImage(paddedImgSize, winStride).area();
-
-       const HOGCache::BlockData* blockData = &cache.blockData[0];
-
-       int nblocks = cache.nblocks.area();
-       int blockHistogramSize = cache.blockHistogramSize;
-       size_t dsize = getDescriptorSize();
-       descriptors.resize(dsize*nwindows);
-
-       for ( size_t i = 0; i < nwindows; i++ ) {
-               float* descriptor = &descriptors[i*dsize];
-
-               Point pt0;
-               if ( !locations.empty() ) {
-                       pt0 = locations[i];
-                       if ( pt0.x < -padding.width || pt0.x > img.cols + padding.width - winSize.width ||
-                                       pt0.y < -padding.height || pt0.y > img.rows + padding.height - winSize.height )
-                               continue;
-               } else {
-                       pt0 = cache.getWindow(paddedImgSize, winStride, (int)i).tl() - Point(padding);
-                       CV_Assert(pt0.x % cacheStride.width == 0 && pt0.y % cacheStride.height == 0);
-               }
-
-               for ( int j = 0; j < nblocks; j++ ) {
-                       const HOGCache::BlockData& bj = blockData[j];
-                       Point pt = pt0 + bj.imgOffset;
-
-                       float* dst = descriptor + bj.histOfs;
-                       const float* src = cache.getBlock(pt, dst);
-                       if ( src != dst )
-                               for ( int k = 0; k < blockHistogramSize; k++ )
-                                       dst[k] = src[k];
-               }
-       }
-}
-
-
-void HOGDescriptor::detect(const Mat& img,
-               vector<Point>& hits, vector<double>& weights, double hitThreshold,
-               Size winStride, Size padding, const vector<Point>& locations) const
-{
-       hits.clear();
-       if ( svmDetector.empty() )
-               return;
-
-       if ( winStride == Size() )
-               winStride = cellSize;
-       Size cacheStride(gcd(winStride.width, blockStride.width),
-                       gcd(winStride.height, blockStride.height));
-       size_t nwindows = locations.size();
-       padding.width = (int)alignSize(std::max(padding.width, 0), cacheStride.width);
-       padding.height = (int)alignSize(std::max(padding.height, 0), cacheStride.height);
-       Size paddedImgSize(img.cols + padding.width*2, img.rows + padding.height*2);
-
-       HOGCache cache(this, img, padding, padding, nwindows == 0, cacheStride);
-
-       if ( !nwindows )
-               nwindows = cache.windowsInImage(paddedImgSize, winStride).area();
-
-       const HOGCache::BlockData* blockData = &cache.blockData[0];
-
-       int nblocks = cache.nblocks.area();
-       int blockHistogramSize = cache.blockHistogramSize;
-       size_t dsize = getDescriptorSize();
-
-       double rho = svmDetector.size() > dsize ? svmDetector[dsize] : 0;
-       vector<float> blockHist(blockHistogramSize);
-
-       for ( size_t i = 0; i < nwindows; i++ ) {
-               Point pt0;
-               if ( !locations.empty() ) {
-                       pt0 = locations[i];
-                       if ( pt0.x < -padding.width || pt0.x > img.cols + padding.width - winSize.width ||
-                                       pt0.y < -padding.height || pt0.y > img.rows + padding.height - winSize.height )
-                               continue;
-               } else {
-                       pt0 = cache.getWindow(paddedImgSize, winStride, (int)i).tl() - Point(padding);
-                       CV_Assert(pt0.x % cacheStride.width == 0 && pt0.y % cacheStride.height == 0);
-               }
-               double s = rho;
-               const float* svmVec = &svmDetector[0];
-               int j, k;
-
-               for ( j = 0; j < nblocks; j++, svmVec += blockHistogramSize ) {
-                       const HOGCache::BlockData& bj = blockData[j];
-                       Point pt = pt0 + bj.imgOffset;
-
-                       const float* vec = cache.getBlock(pt, &blockHist[0]);
-#ifdef ENABLE_NEON
-                       float32x4_t vec_v; /* NEON feature vector */
-                       float32x4_t svm_v; /* NEON SVM feature weights */
-                       float32x4_t acc = vdupq_n_f32(0.f); /* NEON partial sum */
-                       for ( k = 0; k <= blockHistogramSize - 4; k += 4 ) {
-                               vec_v = vld1q_f32(vec + k);
-                               svm_v = vld1q_f32(svmVec + k);
-                               acc = vmlaq_f32(acc, vec_v, svm_v);
-                       }
-
-                       s += vgetq_lane_f32(acc, 0) + vgetq_lane_f32(acc, 1) +
-                               vgetq_lane_f32(acc, 2) + vgetq_lane_f32(acc, 3);
-
-#else
-                       for ( k = 0; k <= blockHistogramSize - 4; k += 4 )
-                               s += vec[k]*svmVec[k] + vec[k+1]*svmVec[k+1] +
-                                       vec[k+2]*svmVec[k+2] + vec[k+3]*svmVec[k+3];
-#endif
-                       for ( ; k < blockHistogramSize; k++ )
-                               s += vec[k]*svmVec[k];
-               }
-
-               if ( s >= hitThreshold ) {
-                       hits.push_back(pt0);
-                       weights.push_back(s);
-               }
-       }
-}
-
-void HOGDescriptor::detect(const Mat& img, vector<Point>& hits, double hitThreshold,
-               Size winStride, Size padding, const vector<Point>& locations) const
-{
-       vector<double> weightsV;
-       detect(img, hits, weightsV, hitThreshold, winStride, padding, locations);
-}
-
-class HOGInvoker : public ParallelLoopBody {
-       public:
-               HOGInvoker(const HOGDescriptor* _hog, const Mat& _img,
-                               double _hitThreshold, Size _winStride, Size _padding,
-                               const double* _levelScale, std::vector<Rect> * _vec, Mutex* _mtx,
-                               std::vector<double>* _weights = 0, std::vector<double>* _scales = 0) {
-                       hog = _hog;
-                       img = _img;
-                       hitThreshold = _hitThreshold;
-                       winStride = _winStride;
-                       padding = _padding;
-                       levelScale = _levelScale;
-                       vec = _vec;
-                       weights = _weights;
-                       scales = _scales;
-                       mtx = _mtx;
-               }
-
-               void operator()(const Range& range) const {
-                       int i, i1 = range.start, i2 = range.end;
-                       double minScale = i1 > 0 ? levelScale[i1] : i2 > 1 ? levelScale[i1+1] : std::max(img.cols, img.rows);
-                       Size maxSz(cvCeil(img.cols/minScale), cvCeil(img.rows/minScale));
-                       Mat smallerImgBuf(maxSz, img.type());
-                       vector<Point> locations;
-                       vector<double> hitsWeights;
-
-                       Size wholeSize;
-                       Point offset;
-                       img.locateROI(wholeSize, offset);
-
-                       for ( i = i1; i < i2; i++ ) {
-                               double scale = levelScale[i];
-                               Size sz(cvRound(img.cols/scale), cvRound(img.rows/scale));
-                               Mat smallerImg(sz, img.type(), smallerImgBuf.data);
-                               if (sz == img.size())
-                                       smallerImg = Mat(sz, img.type(), img.data, img.step);
-                               else
-                                       resize(img, smallerImg, sz);
-                               hog->detect(smallerImg, locations, hitsWeights, hitThreshold, winStride, padding);
-
-                               Size scaledWinSize = Size(cvRound(hog->winSize.width*scale), cvRound(hog->winSize.height*scale));
-
-                               mtx->lock();
-                               for ( size_t j = 0; j < locations.size(); j++ ) {
-                                       vec->push_back(Rect(cvRound(locations[j].x*scale),
-                                                               cvRound(locations[j].y*scale),
-                                                               scaledWinSize.width, scaledWinSize.height));
-                                       if (scales) {
-                                               scales->push_back(scale);
-                                       }
-                               }
-                               mtx->unlock();
-
-                               if (weights && (!hitsWeights.empty())) {
-                                       mtx->lock();
-                                       for (size_t j = 0; j < locations.size(); j++) {
-                                               weights->push_back(hitsWeights[j]);
-                                       }
-                                       mtx->unlock();
-                               }
-                       }
-               }
-
-               const HOGDescriptor* hog;
-               Mat img;
-               double hitThreshold;
-               Size winStride;
-               Size padding;
-               const double* levelScale;
-               std::vector<Rect>* vec;
-               std::vector<double>* weights;
-               std::vector<double>* scales;
-               Mutex* mtx;
-};
-
-
-void HOGDescriptor::detectMultiScale(
-               const Mat& img, vector<Rect>& foundLocations, vector<double>& foundWeights,
-               double hitThreshold, Size winStride, Size padding,
-               double scale0, double finalThreshold, bool useMeanshiftGrouping) const
-{
-       double scale = 1.;
-       int levels = 0;
-
-       vector<double> levelScale;
-       for ( levels = 0; levels < nlevels; levels++ ) {
-               levelScale.push_back(scale);
-               if ( cvRound(img.cols/scale) < winSize.width ||
-                               cvRound(img.rows/scale) < winSize.height ||
-                               scale0 <= 1 )
-                       break;
-               scale *= scale0;
-       }
-       levels = std::max(levels, 1);
-       levelScale.resize(levels);
-
-       std::vector<Rect> allCandidates;
-       std::vector<double> tempScales;
-       std::vector<double> tempWeights;
-       std::vector<double> foundScales;
-       Mutex mtx;
-
-       modifiedcv::parallel_for_(Range(0, (int)levelScale.size()),
-                       HOGInvoker(this, img, hitThreshold, winStride, padding, &levelScale[0], &allCandidates, &mtx, &tempWeights, &tempScales));
-
-       std::copy(tempScales.begin(), tempScales.end(), back_inserter(foundScales));
-       foundLocations.clear();
-       std::copy(allCandidates.begin(), allCandidates.end(), back_inserter(foundLocations));
-       foundWeights.clear();
-       std::copy(tempWeights.begin(), tempWeights.end(), back_inserter(foundWeights));
-
-       if ( useMeanshiftGrouping ) {
-               groupRectangles_meanshift(foundLocations, foundWeights, foundScales, finalThreshold, winSize);
-       } else {
-               groupRectangles(foundLocations, foundWeights, (int)finalThreshold, 0.2);
-       }
-}
-
-void HOGDescriptor::detectMultiScale(const Mat& img, vector<Rect>& foundLocations,
-               double hitThreshold, Size winStride, Size padding,
-               double scale0, double finalThreshold, bool useMeanshiftGrouping) const
-{
-       vector<double> foundWeights;
-       detectMultiScale(img, foundLocations, foundWeights, hitThreshold, winStride,
-                       padding, scale0, finalThreshold, useMeanshiftGrouping);
-}
-
-void HOGDescriptor::groupRectangles(vector<cv::Rect>& rectList, vector<double>& weights, int groupThreshold, double eps) const
-{
-       if ( groupThreshold <= 0 || rectList.empty() ) {
-               return;
-       }
-
-       CV_Assert(rectList.size() == weights.size());
-
-       vector<int> labels;
-       int nclasses = partition(rectList, labels, SimilarRects(eps));
-
-       vector<cv::Rect_<double> > rrects(nclasses);
-       vector<int> numInClass(nclasses, 0);
-       vector<double> foundWeights(nclasses, DBL_MIN);
-       int i, j, nlabels = (int)labels.size();
-
-       for ( i = 0; i < nlabels; i++ ) {
-               int cls = labels[i];
-               rrects[cls].x += rectList[i].x;
-               rrects[cls].y += rectList[i].y;
-               rrects[cls].width += rectList[i].width;
-               rrects[cls].height += rectList[i].height;
-               foundWeights[cls] = max(foundWeights[cls], weights[i]);
-               numInClass[cls]++;
-       }
-
-       for ( i = 0; i < nclasses; i++ ) {
-               /* find the average of all ROI in the cluster */
-               cv::Rect_<double> r = rrects[i];
-               double s = 1.0/numInClass[i];
-               rrects[i] = cv::Rect_<double>(cv::saturate_cast<double>(r.x*s),
-                               cv::saturate_cast<double>(r.y*s),
-                               cv::saturate_cast<double>(r.width*s),
-                               cv::saturate_cast<double>(r.height*s));
-       }
-
-       rectList.clear();
-       weights.clear();
-
-       for ( i = 0; i < nclasses; i++ ) {
-               cv::Rect r1 = rrects[i];
-               int n1 = numInClass[i];
-               double w1 = foundWeights[i];
-               if ( n1 <= groupThreshold )
-                       continue;
-               /* filter out small rectangles inside large rectangles */
-               for ( j = 0; j < nclasses; j++ ) {
-                       int n2 = numInClass[j];
-
-                       if ( j == i || n2 <= groupThreshold )
-                               continue;
-
-                       cv::Rect r2 = rrects[j];
-
-                       int dx = cv::saturate_cast<int>(r2.width * eps);
-                       int dy = cv::saturate_cast<int>(r2.height * eps);
-
-                       if ( r1.x >= r2.x - dx &&
-                                       r1.y >= r2.y - dy &&
-                                       r1.x + r1.width <= r2.x + r2.width + dx &&
-                                       r1.y + r1.height <= r2.y + r2.height + dy &&
-                                       (n2 > std::max(3, n1) || n1 < 3) )
-                               break;
-               }
-
-               if ( j == nclasses ) {
-                       rectList.push_back(r1);
-                       weights.push_back(w1);
-               }
-       }
-}
-}
index b5e6da5..85b9803 100644 (file)
@@ -16,7 +16,8 @@
 
 #include "MFTracker.h"
 
-#include <opencv/cv.h>
+#include "opencv2/video/tracking.hpp"
+#include "opencv2/imgproc.hpp"
 
 namespace mediavision {
 namespace surveillance {
@@ -169,13 +170,13 @@ bool MFTracker::medianFlowImpl(
        std::vector<float> errors(numberOfPointsToTrackOld);
 
        std::vector<cv::Mat> tempPyramid;
-       cv::buildOpticalFlowPyramid(
+       buildOpticalFlowPyramid(
                newImage_gray,
                tempPyramid,
                __params.mWindowSize,
                __params.mPyrMaxLevel);
 
-       cv::calcOpticalFlowPyrLK(__pyramid,
+       calcOpticalFlowPyrLK(__pyramid,
                        tempPyramid,
                        pointsToTrackOld,
                        pointsToTrackNew,
index bbd92e9..d2dddcc 100644 (file)
 
 #include "SurveillanceHelper.h"
 
-#include <mv_private.h>
+#include "mv_private.h"
 
-#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/imgproc.hpp"
+#include "opencv2/highgui.hpp"
 
 namespace mediavision {
 namespace surveillance {
index f729b5b..6c761b8 100644 (file)
@@ -1,7 +1,7 @@
 Name:        capi-media-vision
 Summary:     Media Vision library for Tizen Native API
-Version:     0.3.27
-Release:     5
+Version:     0.4.27
+Release:     1
 Group:       Multimedia/Framework
 License:     Apache-2.0 and BSD-3-Clause
 Source0:     %{name}-%{version}.tar.gz
@@ -10,7 +10,7 @@ BuildRequires: pkgconfig(capi-media-tool)
 BuildRequires: pkgconfig(libtbm)
 BuildRequires: pkgconfig(dlog)
 BuildRequires: pkgconfig(capi-system-info)
-BuildRequires: pkgconfig(opencv)
+BuildRequires: pkgconfig(opencv) >= 3.4.0
 BuildRequires: pkgconfig(zbar)
 BuildRequires: pkgconfig(glib-2.0)
 # Change to the pkgconfig(zint) after zint package refactor
index b72c2e3..a02486c 100644 (file)
@@ -23,9 +23,9 @@
 
 #include <setjmp.h>
 
-#include <opencv2/core/core.hpp>
-#include <opencv2/highgui/highgui.hpp>
-#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/core.hpp>
+#include <opencv2/highgui.hpp>
+#include <opencv2/imgproc.hpp>
 
 /**
  * @file   ImageHelper.cpp
index 616e411..c4d9698 100644 (file)
@@ -19,7 +19,7 @@
 #include "mv_private.h"
 #include "ImageHelper.h"
 
-#include <opencv2/core/core.hpp>
+#include <opencv2/core.hpp>
 
 /**
  * @file   image_helper.cpp