/*
* GStreamer
* Copyright (C) 2013 Miguel Casas-Sanchez <miguelecasassanchez@gmail.com>
- * Except: Parts of code inside the preprocessor define CODE_FROM_OREILLY_BOOK,
- * which are downloaded from O'Reilly website
+ * Except: Parts of code inside the preprocessor define CODE_FROM_OREILLY_BOOK,
+ * which are downloaded from O'Reilly website
* [http://examples.oreilly.com/9780596516130/]
* and adapted. Its license reads:
* "Oct. 3, 2008
- * Right to use this code in any way you want without warrenty, support or
- * any guarentee of it working. "
+ * Right to use this code in any way you want without warrenty, support or
+ * any guarantee of it working. "
+ *
*
- *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
*
* This element creates and updates a fg/bg model using one of several approaches.
* The one called "codebook" refers to the codebook approach following the opencv
- * O'Reilly book [1] implementation of the algorithm described in K. Kim,
- * T. H. Chalidabhongse, D. Harwood and L. Davis [2]. BackgroundSubtractorMOG [3],
- * or MOG for shorts, refers to a Gaussian Mixture-based Background/Foreground
+ * O'Reilly book [1] implementation of the algorithm described in K. Kim,
+ * T. H. Chalidabhongse, D. Harwood and L. Davis [2]. BackgroundSubtractorMOG [3],
+ * or MOG for shorts, refers to a Gaussian Mixture-based Background/Foreground
* Segmentation Algorithm. OpenCV MOG implements the algorithm described in [4].
- * BackgroundSubtractorMOG2 [5], refers to another Gaussian Mixture-based
- * Background/Foreground segmentation algorithm. OpenCV MOG2 implements the
+ * BackgroundSubtractorMOG2 [5], refers to another Gaussian Mixture-based
+ * Background/Foreground segmentation algorithm. OpenCV MOG2 implements the
* algorithm described in [6] and [7].
*
- * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary Bradski
+ * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary Bradski
* and Adrian Kaehler, Published by O'Reilly Media, October 3, 2008
- * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
+ * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
* Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005.
* [3] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
- * [4] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
- * mixture model for real-time tracking with shadow detection", Proc. 2nd
+ * [4] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
+ * mixture model for real-time tracking with shadow detection", Proc. 2nd
* European Workshop on Advanced Video-Based Surveillance Systems, 2001
* [5] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
- * [6] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
+ * [6] Z.Zivkovic, "Improved adaptive Gaussian mixture model for background
* subtraction", International Conference Pattern Recognition, UK, August, 2004.
- * [7] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
- * per Image Pixel for the Task of Background Subtraction", Pattern Recognition
+ * [7] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
+ * per Image Pixel for the Task of Background Subtraction", Pattern Recognition
* Letters, vol. 27, no. 7, pages 773-780, 2006.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
+ *
* |[
- * gst-launch-1.0 v4l2src device=/dev/video0 ! videoconvert ! video/x-raw,width=320,height=240 ! videoconvert ! segmentation test-mode=true method=2 ! videoconvert ! ximagesink
+ * gst-launch-1.0 v4l2src device=/dev/video0 ! videoconvert ! segmentation test-mode=true method=2 ! videoconvert ! ximagesink
* ]|
- * </refsect2>
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
-#include <gst/gst.h>
-
#include "gstsegmentation.h"
-#include <opencv2/video/background_segm.hpp>
+#include <opencv2/imgproc.hpp>
GST_DEBUG_CATEGORY_STATIC (gst_segmentation_debug);
#define GST_CAT_DEFAULT gst_segmentation_debug
+using namespace cv;
+
/* Filter signals and args */
enum
{
return etype;
}
-G_DEFINE_TYPE (GstSegmentation, gst_segmentation, GST_TYPE_VIDEO_FILTER);
+G_DEFINE_TYPE (GstSegmentation, gst_segmentation, GST_TYPE_OPENCV_VIDEO_FILTER);
+
static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
-static void gst_segmentation_set_property (GObject * object, guint prop_id,
+static void
+gst_segmentation_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec);
-static void gst_segmentation_get_property (GObject * object, guint prop_id,
+static void
+gst_segmentation_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
-static GstFlowReturn gst_segmentation_transform_ip (GstVideoFilter * btrans,
- GstVideoFrame * frame);
+static GstFlowReturn gst_segmentation_transform_ip (GstOpencvVideoFilter *
+ filter, GstBuffer * buffer, Mat img);
-static gboolean gst_segmentation_stop (GstBaseTransform * basesrc);
-static gboolean gst_segmentation_set_info (GstVideoFilter * filter,
- GstCaps * incaps, GstVideoInfo * in_info,
- GstCaps * outcaps, GstVideoInfo * out_info);
-static void gst_segmentation_release_all_pointers (GstSegmentation * filter);
+static void gst_segmentation_finalize (GObject * object);
+static gboolean gst_segmentation_set_caps (GstOpencvVideoFilter * filter,
+ gint in_width, gint in_height, int in_cv_type, gint out_width,
+ gint out_height, int out_cv_type);
/* Codebook algorithm + connected components functions*/
static int update_codebook (unsigned char *p, codeBook * c,
static int clear_stale_entries (codeBook * c);
static unsigned char background_diff (unsigned char *p, codeBook * c,
int numChannels, int *minMod, int *maxMod);
-static void find_connected_components (IplImage * mask, int poly1_hull0,
- float perimScale, CvMemStorage * mem_storage, CvSeq * contours);
+static void find_connected_components (Mat mask, int poly1_hull0,
+ float perimScale);
/* MOG (Mixture-of-Gaussians functions */
-static int initialise_mog (GstSegmentation * filter);
static int run_mog_iteration (GstSegmentation * filter);
static int run_mog2_iteration (GstSegmentation * filter);
-static int finalise_mog (GstSegmentation * filter);
/* initialize the segmentation's class */
static void
{
GObjectClass *gobject_class;
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
- GstBaseTransformClass *basesrc_class = GST_BASE_TRANSFORM_CLASS (klass);
- GstVideoFilterClass *video_class = (GstVideoFilterClass *) klass;
+ GstOpencvVideoFilterClass *cvfilter_class =
+ (GstOpencvVideoFilterClass *) klass;
gobject_class = (GObjectClass *) klass;
+ gobject_class->finalize = gst_segmentation_finalize;
gobject_class->set_property = gst_segmentation_set_property;
gobject_class->get_property = gst_segmentation_get_property;
- basesrc_class->stop = gst_segmentation_stop;
- video_class->transform_frame_ip = gst_segmentation_transform_ip;
- video_class->set_info = gst_segmentation_set_info;
+ cvfilter_class->cv_trans_ip_func = gst_segmentation_transform_ip;
+ cvfilter_class->cv_set_caps = gst_segmentation_set_caps;
g_object_class_install_property (gobject_class, PROP_METHOD,
g_param_spec_enum ("method",
"Create a Foregound/Background mask applying a particular algorithm",
"Miguel Casas-Sanchez <miguelecasassanchez@gmail.com>");
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&src_factory));
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&sink_factory));
+ gst_element_class_add_static_pad_template (element_class, &src_factory);
+ gst_element_class_add_static_pad_template (element_class, &sink_factory);
}
/* initialize the new element
* instantiate pads and add them to element
- * set pad calback functions
+ * set pad callback functions
* initialize instance structure
*/
static void
filter->test_mode = DEFAULT_TEST_MODE;
filter->framecount = 0;
filter->learning_rate = DEFAULT_LEARNING_RATE;
- gst_base_transform_set_in_place (GST_BASE_TRANSFORM (filter), TRUE);
+ gst_opencv_video_filter_set_in_place (GST_OPENCV_VIDEO_FILTER (filter), TRUE);
}
-
static void
gst_segmentation_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
}
}
-/* GstElement vmethod implementations */
-/* this function handles the link with other elements */
static gboolean
-gst_segmentation_set_info (GstVideoFilter * filter,
- GstCaps * incaps, GstVideoInfo * in_info,
- GstCaps * outcaps, GstVideoInfo * out_info)
+gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
+ gint in_height, int in_cv_type,
+ gint out_width, gint out_height, int out_cv_type)
{
GstSegmentation *segmentation = GST_SEGMENTATION (filter);
- CvSize size;
+ Size size;
- size = cvSize (in_info->width, in_info->height);
- segmentation->width = in_info->width;
- segmentation->height = in_info->height;
- /* If cvRGB is already allocated, it means there's a cap modification, */
- /* so release first all the images. */
- if (NULL != segmentation->cvRGBA)
- gst_segmentation_release_all_pointers (segmentation);
+ size = Size (in_width, in_height);
+ segmentation->width = in_width;
+ segmentation->height = in_height;
- segmentation->cvRGBA = cvCreateImageHeader (size, IPL_DEPTH_8U, 4);
+ segmentation->cvRGB.create (size, CV_8UC3);
+ segmentation->cvYUV.create (size, CV_8UC3);
- segmentation->cvRGB = cvCreateImage (size, IPL_DEPTH_8U, 3);
- segmentation->cvYUV = cvCreateImage (size, IPL_DEPTH_8U, 3);
+ segmentation->cvFG = Mat::zeros (size, CV_8UC1);
- segmentation->cvFG = cvCreateImage (size, IPL_DEPTH_8U, 1);
- cvZero (segmentation->cvFG);
-
- segmentation->ch1 = cvCreateImage (size, IPL_DEPTH_8U, 1);
- segmentation->ch2 = cvCreateImage (size, IPL_DEPTH_8U, 1);
- segmentation->ch3 = cvCreateImage (size, IPL_DEPTH_8U, 1);
+ segmentation->ch1.create (size, CV_8UC1);
+ segmentation->ch2.create (size, CV_8UC1);
+ segmentation->ch3.create (size, CV_8UC1);
/* Codebook method */
segmentation->TcodeBook = (codeBook *)
segmentation->learning_interval = (int) (1.0 / segmentation->learning_rate);
/* Mixture-of-Gaussians (mog) methods */
- initialise_mog (segmentation);
+ segmentation->mog = bgsegm::createBackgroundSubtractorMOG ();
+ segmentation->mog2 = createBackgroundSubtractorMOG2 ();
return TRUE;
}
/* Clean up */
-static gboolean
-gst_segmentation_stop (GstBaseTransform * basesrc)
-{
- GstSegmentation *filter = GST_SEGMENTATION (basesrc);
-
- if (filter->cvRGBA != NULL)
- gst_segmentation_release_all_pointers (filter);
-
- return TRUE;
-}
-
static void
-gst_segmentation_release_all_pointers (GstSegmentation * filter)
+gst_segmentation_finalize (GObject * object)
{
- cvReleaseImage (&filter->cvRGBA);
- cvReleaseImage (&filter->cvRGB);
- cvReleaseImage (&filter->cvYUV);
- cvReleaseImage (&filter->cvFG);
- cvReleaseImage (&filter->ch1);
- cvReleaseImage (&filter->ch2);
- cvReleaseImage (&filter->ch3);
+ GstSegmentation *filter = GST_SEGMENTATION (object);
+ filter->cvRGB.release ();
+ filter->cvYUV.release ();
+ filter->cvFG.release ();
+ filter->ch1.release ();
+ filter->ch2.release ();
+ filter->ch3.release ();
+ filter->mog.release ();
+ filter->mog2.release ();
g_free (filter->TcodeBook);
- finalise_mog (filter);
+
+ G_OBJECT_CLASS (gst_segmentation_parent_class)->finalize (object);
}
static GstFlowReturn
-gst_segmentation_transform_ip (GstVideoFilter * btrans, GstVideoFrame * frame)
+gst_segmentation_transform_ip (GstOpencvVideoFilter * cvfilter,
+ GstBuffer * buffer, Mat img)
{
- GstSegmentation *filter = GST_SEGMENTATION (btrans);
+ GstSegmentation *filter = GST_SEGMENTATION (cvfilter);
int j;
- /* get image data from the input, which is RGBA */
- filter->cvRGBA->imageData = (char *) GST_VIDEO_FRAME_COMP_DATA (frame, 0);
- filter->cvRGBA->widthStep = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
filter->framecount++;
/* Image preprocessing: color space conversion etc */
- cvCvtColor (filter->cvRGBA, filter->cvRGB, CV_RGBA2RGB);
- cvCvtColor (filter->cvRGB, filter->cvYUV, CV_RGB2YCrCb);
+ cvtColor (img, filter->cvRGB, COLOR_RGBA2RGB);
+ cvtColor (filter->cvRGB, filter->cvYUV, COLOR_RGB2YCrCb);
- /* Create and update a fg/bg model using a codebook approach following the
+ /* Create and update a fg/bg model using a codebook approach following the
* opencv O'Reilly book [1] implementation of the algo described in [2].
*
- * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary
+ * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary
* Bradski and Adrian Kaehler, Published by O'Reilly Media, October 3, 2008
- * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
+ * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
* Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005. */
if (METHOD_BOOK == filter->method) {
unsigned cbBounds[3] = { 10, 5, 5 };
int minMod[3] = { 20, 20, 20 }, maxMod[3] = {
- 20, 20, 20};
+ 20, 20, 20
+ };
if (filter->framecount < 30) {
/* Learning background phase: update_codebook on every frame */
for (j = 0; j < filter->width * filter->height; j++) {
- update_codebook ((unsigned char *) filter->cvYUV->imageData + j * 3,
+ update_codebook (filter->cvYUV.data + j * 3,
(codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
}
} else {
/* this updating is responsible for FG becoming BG again */
if (filter->framecount % filter->learning_interval == 0) {
for (j = 0; j < filter->width * filter->height; j++) {
- update_codebook ((uchar *) filter->cvYUV->imageData + j * 3,
+ update_codebook (filter->cvYUV.data + j * 3,
(codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
}
}
for (j = 0; j < filter->width * filter->height; j++) {
if (background_diff
- ((uchar *) filter->cvYUV->imageData + j * 3,
+ (filter->cvYUV.data + j * 3,
(codeBook *) & (filter->TcodeBook[j]), 3, minMod, maxMod)) {
- filter->cvFG->imageData[j] = 255;
+ filter->cvFG.data[j] = (char) 255;
} else {
- filter->cvFG->imageData[j] = 0;
+ filter->cvFG.data[j] = 0;
}
}
}
/* 3rd param is the smallest area to show: (w+h)/param , in pixels */
- find_connected_components (filter->cvFG, 1, 10000,
- filter->mem_storage, filter->contours);
+ find_connected_components (filter->cvFG, 1, 10000);
}
- /* Create the foreground and background masks using BackgroundSubtractorMOG [1],
- * Gaussian Mixture-based Background/Foreground segmentation algorithm. OpenCV
+ /* Create the foreground and background masks using BackgroundSubtractorMOG [1],
+ * Gaussian Mixture-based Background/Foreground segmentation algorithm. OpenCV
* MOG implements the algorithm described in [2].
- *
+ *
* [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
- * [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
- * mixture model for real-time tracking with shadow detection", Proc. 2nd
+ * [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
+ * mixture model for real-time tracking with shadow detection", Proc. 2nd
* European Workshop on Advanced Video-Based Surveillance Systems, 2001
*/
else if (METHOD_MOG == filter->method) {
run_mog_iteration (filter);
}
/* Create the foreground and background masks using BackgroundSubtractorMOG2
- * [1], Gaussian Mixture-based Background/Foreground segmentation algorithm.
+ * [1], Gaussian Mixture-based Background/Foreground segmentation algorithm.
* OpenCV MOG2 implements the algorithm described in [2] and [3].
- *
+ *
* [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
- * [2] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
+ * [2] Z.Zivkovic, "Improved adaptive Gaussian mixture model for background
* subtraction", International Conference Pattern Recognition, UK, Aug 2004.
- * [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
- * per Image Pixel for the Task of Background Subtraction", Pattern
+ * [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
+ * per Image Pixel for the Task of Background Subtraction", Pattern
* Recognition Letters, vol. 27, no. 7, pages 773-780, 2006. */
else if (METHOD_MOG2 == filter->method) {
run_mog2_iteration (filter);
}
/* if we want to test_mode, just overwrite the output */
+ std::vector < cv::Mat > channels (3);
+
if (filter->test_mode) {
- cvCvtColor (filter->cvFG, filter->cvRGB, CV_GRAY2RGB);
+ cvtColor (filter->cvFG, filter->cvRGB, COLOR_GRAY2RGB);
- cvSplit (filter->cvRGB, filter->ch1, filter->ch2, filter->ch3, NULL);
+ split (filter->cvRGB, channels);
} else
- cvSplit (filter->cvRGBA, filter->ch1, filter->ch2, filter->ch3, NULL);
+ split (img, channels);
+
+ channels.push_back (filter->cvFG);
/* copy anyhow the fg/bg to the alpha channel in the output image */
- cvMerge (filter->ch1, filter->ch2, filter->ch3, filter->cvFG, filter->cvRGBA);
+ merge (channels, img);
return GST_FLOW_OK;
#ifdef CODE_FROM_OREILLY_BOOK /* See license at the beginning of the page */
-/*
- int update_codebook(uchar *p, codeBook &c, unsigned cbBounds)
- Updates the codebook entry with a new data point
-
- p Pointer to a YUV or HSI pixel
- c Codebook for this pixel
- cbBounds Learning bounds for codebook (Rule of thumb: 10)
- numChannels Number of color channels we¡¯re learning
-
- NOTES:
- cvBounds must be of length equal to numChannels
-
- RETURN
- codebook index
+/*
+ int update_codebook(uchar *p, codeBook &c, unsigned cbBounds)
+ Updates the codebook entry with a new data point
+
+ p Pointer to a YUV or HSI pixel
+ c Codebook for this pixel
+ cbBounds Learning bounds for codebook (Rule of thumb: 10)
+ numChannels Number of color channels we¡¯re learning
+
+ NOTES:
+ cvBounds must be of length equal to numChannels
+
+ RETURN
+ codebook index
*/
int
update_codebook (unsigned char *p, codeBook * c, unsigned *cbBounds,
int matchChannel;
for (n = 0; n < numChannels; n++) {
- high[n] = *(p + n) + *(cbBounds + n);
+ high[n] = p[n] + cbBounds[n];
if (high[n] > 255)
high[n] = 255;
- low[n] = *(p + n) - *(cbBounds + n);
- if (low[n] < 0)
+
+ if (p[n] > cbBounds[n])
+ low[n] = p[n] - cbBounds[n];
+ else
low[n] = 0;
}
/*
- int clear_stale_entries(codeBook &c)
- During learning, after you've learned for some period of time,
- periodically call this to clear out stale codebook entries
-
- c Codebook to clean up
-
- Return
- number of entries cleared
+ int clear_stale_entries(codeBook &c)
+ During learning, after you've learned for some period of time,
+ periodically call this to clear out stale codebook entries
+
+ c Codebook to clean up
+
+ Return
+ number of entries cleared
*/
int
clear_stale_entries (codeBook * c)
/*
- uchar background_diff( uchar *p, codeBook &c,
- int minMod, int maxMod)
- Given a pixel and a codebook, determine if the pixel is
- covered by the codebook
-
- p Pixel pointer (YUV interleaved)
- c Codebook reference
- numChannels Number of channels we are testing
- maxMod Add this (possibly negative) number onto
-
- max level when determining if new pixel is foreground
- minMod Subract this (possibly negative) number from
- min level when determining if new pixel is foreground
-
- NOTES:
- minMod and maxMod must have length numChannels,
- e.g. 3 channels => minMod[3], maxMod[3]. There is one min and
- one max threshold per channel.
-
- Return
- 0 => background, 255 => foreground
+ uchar background_diff( uchar *p, codeBook &c,
+ int minMod, int maxMod)
+ Given a pixel and a codebook, determine if the pixel is
+ covered by the codebook
+
+ p Pixel pointer (YUV interleaved)
+ c Codebook reference
+ numChannels Number of channels we are testing
+ maxMod Add this (possibly negative) number onto
+
+ max level when determining if new pixel is foreground
+ minMod Subtract this (possibly negative) number from
+ min level when determining if new pixel is foreground
+
+ NOTES:
+ minMod and maxMod must have length numChannels,
+ e.g. 3 channels => minMod[3], maxMod[3]. There is one min and
+ one max threshold per channel.
+
+ Return
+ 0 => background, 255 => foreground
*/
unsigned char
background_diff (unsigned char *p, codeBook * c, int numChannels,
/* How many iterations of erosion and/or dilation there should be */
#define CVCLOSE_ITR 1
static void
-find_connected_components (IplImage * mask, int poly1_hull0, float perimScale,
- CvMemStorage * mem_storage, CvSeq * contours)
+find_connected_components (Mat mask, int poly1_hull0, float perimScale)
{
- CvContourScanner scanner;
- CvSeq *c;
- int numCont = 0;
/* Just some convenience variables */
- const CvScalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
- const CvScalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);
+ const Scalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
+ //const Scalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);
+ int idx = 0;
/* CLEAN UP RAW MASK */
- cvMorphologyEx (mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR);
- cvMorphologyEx (mask, mask, 0, 0, CV_MOP_CLOSE, CVCLOSE_ITR);
+ morphologyEx (mask, mask, MORPH_OPEN, Mat (), Point (-1, -1), CVCLOSE_ITR);
+ morphologyEx (mask, mask, MORPH_CLOSE, Mat (), Point (-1, -1), CVCLOSE_ITR);
/* FIND CONTOURS AROUND ONLY BIGGER REGIONS */
- if (mem_storage == NULL) {
- mem_storage = cvCreateMemStorage (0);
- } else {
- cvClearMemStorage (mem_storage);
- }
- scanner = cvStartFindContours (mask, mem_storage, sizeof (CvContour),
- CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint (0, 0));
-
- while ((c = cvFindNextContour (scanner)) != NULL) {
- double len = cvContourArea (c, CV_WHOLE_SEQ, 0);
- /* calculate perimeter len threshold: */
- double q = (mask->height + mask->width) / perimScale;
- /* Get rid of blob if its perimeter is too small: */
- if (len < q) {
- cvSubstituteContour (scanner, NULL);
- } else {
- /* Smooth its edges if its large enough */
- CvSeq *c_new;
+ std::vector < std::vector < Point > >contours;
+ std::vector < std::vector < Point > >to_draw;
+ std::vector < Vec4i > hierarchy;
+ findContours (mask, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE,
+ Point (0, 0));
+ if (contours.size () == 0)
+ return;
+
+ for (; idx >= 0; idx = hierarchy[idx][0]) {
+ const std::vector < Point > &c = contours[idx];
+ double len = fabs (contourArea (Mat (c)));
+ double q = (mask.size ().height + mask.size ().width) / perimScale;
+ if (len >= q) {
+ std::vector < Point > c_new;
if (poly1_hull0) {
- /* Polygonal approximation */
- c_new =
- cvApproxPoly (c, sizeof (CvContour), mem_storage, CV_POLY_APPROX_DP,
- CVCONTOUR_APPROX_LEVEL, 0);
+ approxPolyDP (c, c_new, CVCONTOUR_APPROX_LEVEL, (hierarchy[idx][2] < 0
+ && hierarchy[idx][3] < 0));
} else {
- /* Convex Hull of the segmentation */
- c_new = cvConvexHull2 (c, mem_storage, CV_CLOCKWISE, 1);
+ convexHull (c, c_new, true, true);
}
- cvSubstituteContour (scanner, c_new);
- numCont++;
+ to_draw.push_back (c_new);
}
}
- contours = cvEndFindContours (&scanner);
-
- /* PAINT THE FOUND REGIONS BACK INTO THE IMAGE */
- cvZero (mask);
- /* DRAW PROCESSED CONTOURS INTO THE MASK */
- for (c = contours; c != NULL; c = c->h_next)
- cvDrawContours (mask, c, CVX_WHITE, CVX_BLACK, -1, CV_FILLED, 8, cvPoint (0,
- 0));
-}
-#endif /*ifdef CODE_FROM_OREILLY_BOOK */
-
-int
-initialise_mog (GstSegmentation * filter)
-{
- filter->img_input_as_cvMat = (void *) new cv::Mat (filter->cvYUV, false);
- filter->img_fg_as_cvMat = (void *) new cv::Mat (filter->cvFG, false);
-
- filter->mog = (void *) new cv::BackgroundSubtractorMOG ();
- filter->mog2 = (void *) new cv::BackgroundSubtractorMOG2 ();
+ mask.setTo (Scalar::all (0));
+ if (to_draw.size () > 0) {
+ drawContours (mask, to_draw, -1, CVX_WHITE, FILLED);
+ }
- return (0);
}
+#endif /*ifdef CODE_FROM_OREILLY_BOOK */
int
run_mog_iteration (GstSegmentation * filter)
{
- ((cv::Mat *) filter->img_input_as_cvMat)->data =
- (uchar *) filter->cvYUV->imageData;
- ((cv::Mat *) filter->img_fg_as_cvMat)->data =
- (uchar *) filter->cvFG->imageData;
-
/*
- BackgroundSubtractorMOG [1], Gaussian Mixture-based Background/Foreground
+ BackgroundSubtractorMOG [1], Gaussian Mixture-based Background/Foreground
Segmentation Algorithm. OpenCV MOG implements the algorithm described in [2].
[1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
- [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
- mixture model for real-time tracking with shadow detection", Proc. 2nd
+ [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
+ mixture model for real-time tracking with shadow detection", Proc. 2nd
European Workshop on Advanced Video-Based Surveillance Systems, 2001
*/
- (*((cv::BackgroundSubtractorMOG *) filter->mog)) (*((cv::Mat *) filter->
- img_input_as_cvMat), *((cv::Mat *) filter->img_fg_as_cvMat),
- filter->learning_rate);
+ filter->mog->apply (filter->cvYUV, filter->cvFG, filter->learning_rate);
return (0);
}
int
run_mog2_iteration (GstSegmentation * filter)
{
- ((cv::Mat *) filter->img_input_as_cvMat)->data =
- (uchar *) filter->cvYUV->imageData;
- ((cv::Mat *) filter->img_fg_as_cvMat)->data =
- (uchar *) filter->cvFG->imageData;
-
/*
- BackgroundSubtractorMOG2 [1], Gaussian Mixture-based Background/Foreground
- segmentation algorithm. OpenCV MOG2 implements the algorithm described in
+ BackgroundSubtractorMOG2 [1], Gaussian Mixture-based Background/Foreground
+ segmentation algorithm. OpenCV MOG2 implements the algorithm described in
[2] and [3].
[1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
- [2] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
+ [2] Z.Zivkovic, "Improved adaptive Gaussian mixture model for background
subtraction", International Conference Pattern Recognition, UK, August, 2004.
- [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation per
- Image Pixel for the Task of Background Subtraction", Pattern Recognition
+ [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation per
+ Image Pixel for the Task of Background Subtraction", Pattern Recognition
Letters, vol. 27, no. 7, pages 773-780, 2006.
*/
- (*((cv::BackgroundSubtractorMOG *) filter->mog2)) (*((cv::Mat *) filter->
- img_input_as_cvMat), *((cv::Mat *) filter->img_fg_as_cvMat),
- filter->learning_rate);
-
- return (0);
-}
+ filter->mog2->apply (filter->cvYUV, filter->cvFG, filter->learning_rate);
-int
-finalise_mog (GstSegmentation * filter)
-{
- delete (cv::Mat *) filter->img_input_as_cvMat;
- delete (cv::Mat *) filter->img_fg_as_cvMat;
- delete (cv::BackgroundSubtractorMOG *) filter->mog;
- delete (cv::BackgroundSubtractorMOG2 *) filter->mog2;
return (0);
}