3 * Copyright (C) 2013 Miguel Casas-Sanchez <miguelecasassanchez@gmail.com>
4 * Except: Parts of code inside the preprocessor define CODE_FROM_OREILLY_BOOK,
5 * which are downloaded from O'Reilly website
6 * [http://examples.oreilly.com/9780596516130/]
7 * and adapted. Its license reads:
9 * Right to use this code in any way you want without warrenty, support or
10 * any guarantee of it working. "
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 * DEALINGS IN THE SOFTWARE.
31 * Alternatively, the contents of this file may be used under the
32 * GNU Lesser General Public License Version 2.1 (the "LGPL"), in
33 * which case the following provisions apply instead of the ones
36 * This library is free software; you can redistribute it and/or
37 * modify it under the terms of the GNU Library General Public
38 * License as published by the Free Software Foundation; either
39 * version 2 of the License, or (at your option) any later version.
41 * This library is distributed in the hope that it will be useful,
42 * but WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * Library General Public License for more details.
46 * You should have received a copy of the GNU Library General Public
47 * License along with this library; if not, write to the
48 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
49 * Boston, MA 02110-1301, USA.
51 #define CODE_FROM_OREILLY_BOOK
54 * SECTION:element-segmentation
56 * This element creates and updates a fg/bg model using one of several approaches.
57 * The one called "codebook" refers to the codebook approach following the opencv
58 * O'Reilly book [1] implementation of the algorithm described in K. Kim,
59 * T. H. Chalidabhongse, D. Harwood and L. Davis [2]. BackgroundSubtractorMOG [3],
60 * or MOG for shorts, refers to a Gaussian Mixture-based Background/Foreground
61 * Segmentation Algorithm. OpenCV MOG implements the algorithm described in [4].
62 * BackgroundSubtractorMOG2 [5], refers to another Gaussian Mixture-based
63 * Background/Foreground segmentation algorithm. OpenCV MOG2 implements the
64 * algorithm described in [6] and [7].
66 * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary Bradski
67 * and Adrian Kaehler, Published by O'Reilly Media, October 3, 2008
68 * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
69 * Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005.
70 * [3] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
71 * [4] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
72 * mixture model for real-time tracking with shadow detection", Proc. 2nd
73 * European Workshop on Advanced Video-Based Surveillance Systems, 2001
74 * [5] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
75 * [6] Z.Zivkovic, "Improved adaptive Gaussian mixture model for background
76 * subtraction", International Conference Pattern Recognition, UK, August, 2004.
77 * [7] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
78 * per Image Pixel for the Task of Background Subtraction", Pattern Recognition
79 * Letters, vol. 27, no. 7, pages 773-780, 2006.
81 * ## Example launch line
84 * gst-launch-1.0 v4l2src device=/dev/video0 ! videoconvert ! segmentation test-mode=true method=2 ! videoconvert ! ximagesink
92 #include "gstsegmentation.h"
93 #include <opencv2/imgproc.hpp>
95 GST_DEBUG_CATEGORY_STATIC (gst_segmentation_debug);
96 #define GST_CAT_DEFAULT gst_segmentation_debug
100 /* Filter signals and args */
119 } GstSegmentationMethod;
121 #define DEFAULT_TEST_MODE FALSE
122 #define DEFAULT_METHOD METHOD_MOG2
123 #define DEFAULT_LEARNING_RATE 0.01
125 #define GST_TYPE_SEGMENTATION_METHOD (gst_segmentation_method_get_type ())
127 gst_segmentation_method_get_type (void)
129 static GType etype = 0;
131 static const GEnumValue values[] = {
132 {METHOD_BOOK, "Codebook-based segmentation (Bradski2008)", "codebook"},
133 {METHOD_MOG, "Mixture-of-Gaussians segmentation (Bowden2001)", "mog"},
134 {METHOD_MOG2, "Mixture-of-Gaussians segmentation (Zivkovic2004)", "mog2"},
137 etype = g_enum_register_static ("GstSegmentationMethod", values);
142 G_DEFINE_TYPE_WITH_CODE (GstSegmentation, gst_segmentation,
143 GST_TYPE_OPENCV_VIDEO_FILTER,
144 GST_DEBUG_CATEGORY_INIT (gst_segmentation_debug, "segmentation", 0,
145 "Performs Foreground/Background segmentation in video sequences");
147 GST_ELEMENT_REGISTER_DEFINE (segmentation, "segmentation", GST_RANK_NONE,
148 GST_TYPE_SEGMENTATION);
150 static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
153 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
155 static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
158 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
162 gst_segmentation_set_property (GObject * object, guint prop_id,
163 const GValue * value, GParamSpec * pspec);
165 gst_segmentation_get_property (GObject * object, guint prop_id,
166 GValue * value, GParamSpec * pspec);
168 static GstFlowReturn gst_segmentation_transform_ip (GstOpencvVideoFilter *
169 filter, GstBuffer * buffer, Mat img);
171 static void gst_segmentation_finalize (GObject * object);
172 static gboolean gst_segmentation_set_caps (GstOpencvVideoFilter * filter,
173 gint in_width, gint in_height, int in_cv_type, gint out_width,
174 gint out_height, int out_cv_type);
176 /* Codebook algorithm + connected components functions*/
177 static int update_codebook (unsigned char *p, codeBook * c,
178 unsigned *cbBounds, int numChannels);
179 static int clear_stale_entries (codeBook * c);
180 static unsigned char background_diff (unsigned char *p, codeBook * c,
181 int numChannels, int *minMod, int *maxMod);
182 static void find_connected_components (Mat mask, int poly1_hull0,
185 /* MOG (Mixture-of-Gaussians functions */
186 static int run_mog_iteration (GstSegmentation * filter);
187 static int run_mog2_iteration (GstSegmentation * filter);
189 /* initialize the segmentation's class */
191 gst_segmentation_class_init (GstSegmentationClass * klass)
193 GObjectClass *gobject_class;
194 GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
195 GstOpencvVideoFilterClass *cvfilter_class =
196 (GstOpencvVideoFilterClass *) klass;
198 gobject_class = (GObjectClass *) klass;
200 gobject_class->finalize = gst_segmentation_finalize;
201 gobject_class->set_property = gst_segmentation_set_property;
202 gobject_class->get_property = gst_segmentation_get_property;
205 cvfilter_class->cv_trans_ip_func = gst_segmentation_transform_ip;
206 cvfilter_class->cv_set_caps = gst_segmentation_set_caps;
208 g_object_class_install_property (gobject_class, PROP_METHOD,
209 g_param_spec_enum ("method",
210 "Segmentation method to use",
211 "Segmentation method to use",
212 GST_TYPE_SEGMENTATION_METHOD, DEFAULT_METHOD,
213 (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
215 g_object_class_install_property (gobject_class, PROP_TEST_MODE,
216 g_param_spec_boolean ("test-mode", "test-mode",
217 "If true, the output RGB is overwritten with the calculated foreground (white color)",
218 DEFAULT_TEST_MODE, (GParamFlags)
219 (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
221 g_object_class_install_property (gobject_class, PROP_LEARNING_RATE,
222 g_param_spec_float ("learning-rate", "learning-rate",
223 "Speed with which a motionless foreground pixel would become background (inverse of number of frames)",
224 0, 1, DEFAULT_LEARNING_RATE, (GParamFlags) (G_PARAM_READWRITE)));
226 gst_element_class_set_static_metadata (element_class,
227 "Foreground/background video sequence segmentation",
228 "Filter/Effect/Video",
229 "Create a Foregound/Background mask applying a particular algorithm",
230 "Miguel Casas-Sanchez <miguelecasassanchez@gmail.com>");
232 gst_element_class_add_static_pad_template (element_class, &src_factory);
233 gst_element_class_add_static_pad_template (element_class, &sink_factory);
235 gst_type_mark_as_plugin_api (GST_TYPE_SEGMENTATION_METHOD, (GstPluginAPIFlags) 0);
238 /* initialize the new element
239 * instantiate pads and add them to element
240 * set pad callback functions
241 * initialize instance structure
244 gst_segmentation_init (GstSegmentation * filter)
246 filter->method = DEFAULT_METHOD;
247 filter->test_mode = DEFAULT_TEST_MODE;
248 filter->framecount = 0;
249 filter->learning_rate = DEFAULT_LEARNING_RATE;
250 gst_opencv_video_filter_set_in_place (GST_OPENCV_VIDEO_FILTER (filter), TRUE);
254 gst_segmentation_set_property (GObject * object, guint prop_id,
255 const GValue * value, GParamSpec * pspec)
257 GstSegmentation *filter = GST_SEGMENTATION (object);
261 filter->method = g_value_get_enum (value);
264 filter->test_mode = g_value_get_boolean (value);
266 case PROP_LEARNING_RATE:
267 filter->learning_rate = g_value_get_float (value);
270 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
276 gst_segmentation_get_property (GObject * object, guint prop_id,
277 GValue * value, GParamSpec * pspec)
279 GstSegmentation *filter = GST_SEGMENTATION (object);
283 g_value_set_enum (value, filter->method);
286 g_value_set_boolean (value, filter->test_mode);
288 case PROP_LEARNING_RATE:
289 g_value_set_float (value, filter->learning_rate);
292 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
298 gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
299 gint in_height, int in_cv_type,
300 gint out_width, gint out_height, int out_cv_type)
302 GstSegmentation *segmentation = GST_SEGMENTATION (filter);
305 size = Size (in_width, in_height);
306 segmentation->width = in_width;
307 segmentation->height = in_height;
309 segmentation->cvRGB.create (size, CV_8UC3);
310 segmentation->cvYUV.create (size, CV_8UC3);
312 segmentation->cvFG = Mat::zeros (size, CV_8UC1);
314 segmentation->ch1.create (size, CV_8UC1);
315 segmentation->ch2.create (size, CV_8UC1);
316 segmentation->ch3.create (size, CV_8UC1);
318 /* Codebook method */
319 segmentation->TcodeBook = (codeBook *)
320 g_malloc (sizeof (codeBook) *
321 (segmentation->width * segmentation->height + 1));
322 for (int j = 0; j < segmentation->width * segmentation->height; j++) {
323 segmentation->TcodeBook[j].numEntries = 0;
324 segmentation->TcodeBook[j].t = 0;
326 segmentation->learning_interval = (int) (1.0 / segmentation->learning_rate);
328 /* Mixture-of-Gaussians (mog) methods */
329 segmentation->mog = bgsegm::createBackgroundSubtractorMOG ();
330 segmentation->mog2 = createBackgroundSubtractorMOG2 ();
337 gst_segmentation_finalize (GObject * object)
339 GstSegmentation *filter = GST_SEGMENTATION (object);
341 filter->cvRGB.release ();
342 filter->cvYUV.release ();
343 filter->cvFG.release ();
344 filter->ch1.release ();
345 filter->ch2.release ();
346 filter->ch3.release ();
347 filter->mog.release ();
348 filter->mog2.release ();
349 g_free (filter->TcodeBook);
351 G_OBJECT_CLASS (gst_segmentation_parent_class)->finalize (object);
355 gst_segmentation_transform_ip (GstOpencvVideoFilter * cvfilter,
356 GstBuffer * buffer, Mat img)
358 GstSegmentation *filter = GST_SEGMENTATION (cvfilter);
361 filter->framecount++;
363 /* Image preprocessing: color space conversion etc */
364 cvtColor (img, filter->cvRGB, COLOR_RGBA2RGB);
365 cvtColor (filter->cvRGB, filter->cvYUV, COLOR_RGB2YCrCb);
367 /* Create and update a fg/bg model using a codebook approach following the
368 * opencv O'Reilly book [1] implementation of the algo described in [2].
370 * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary
371 * Bradski and Adrian Kaehler, Published by O'Reilly Media, October 3, 2008
372 * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
373 * Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005. */
374 if (METHOD_BOOK == filter->method) {
375 unsigned cbBounds[3] = { 10, 5, 5 };
376 int minMod[3] = { 20, 20, 20 }, maxMod[3] = {
380 if (filter->framecount < 30) {
381 /* Learning background phase: update_codebook on every frame */
382 for (j = 0; j < filter->width * filter->height; j++) {
383 update_codebook (filter->cvYUV.data + j * 3,
384 (codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
387 /* this updating is responsible for FG becoming BG again */
388 if (filter->framecount % filter->learning_interval == 0) {
389 for (j = 0; j < filter->width * filter->height; j++) {
390 update_codebook (filter->cvYUV.data + j * 3,
391 (codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
394 if (filter->framecount % 60 == 0) {
395 for (j = 0; j < filter->width * filter->height; j++)
396 clear_stale_entries ((codeBook *) & (filter->TcodeBook[j]));
399 for (j = 0; j < filter->width * filter->height; j++) {
401 (filter->cvYUV.data + j * 3,
402 (codeBook *) & (filter->TcodeBook[j]), 3, minMod, maxMod)) {
403 filter->cvFG.data[j] = (char) 255;
405 filter->cvFG.data[j] = 0;
410 /* 3rd param is the smallest area to show: (w+h)/param , in pixels */
411 find_connected_components (filter->cvFG, 1, 10000);
414 /* Create the foreground and background masks using BackgroundSubtractorMOG [1],
415 * Gaussian Mixture-based Background/Foreground segmentation algorithm. OpenCV
416 * MOG implements the algorithm described in [2].
418 * [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
419 * [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
420 * mixture model for real-time tracking with shadow detection", Proc. 2nd
421 * European Workshop on Advanced Video-Based Surveillance Systems, 2001
423 else if (METHOD_MOG == filter->method) {
424 run_mog_iteration (filter);
426 /* Create the foreground and background masks using BackgroundSubtractorMOG2
427 * [1], Gaussian Mixture-based Background/Foreground segmentation algorithm.
428 * OpenCV MOG2 implements the algorithm described in [2] and [3].
430 * [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
431 * [2] Z.Zivkovic, "Improved adaptive Gaussian mixture model for background
432 * subtraction", International Conference Pattern Recognition, UK, Aug 2004.
433 * [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
434 * per Image Pixel for the Task of Background Subtraction", Pattern
435 * Recognition Letters, vol. 27, no. 7, pages 773-780, 2006. */
436 else if (METHOD_MOG2 == filter->method) {
437 run_mog2_iteration (filter);
440 /* if we want to test_mode, just overwrite the output */
441 std::vector < cv::Mat > channels (3);
443 if (filter->test_mode) {
444 cvtColor (filter->cvFG, filter->cvRGB, COLOR_GRAY2RGB);
446 split (filter->cvRGB, channels);
448 split (img, channels);
450 channels.push_back (filter->cvFG);
452 /* copy anyhow the fg/bg to the alpha channel in the output image */
453 merge (channels, img);
460 #ifdef CODE_FROM_OREILLY_BOOK /* See license at the beginning of the page */
462 int update_codebook(uchar *p, codeBook &c, unsigned cbBounds)
463 Updates the codebook entry with a new data point
465 p Pointer to a YUV or HSI pixel
466 c Codebook for this pixel
467 cbBounds Learning bounds for codebook (Rule of thumb: 10)
468 numChannels Number of color channels we¡¯re learning
471 cvBounds must be of length equal to numChannels
477 update_codebook (unsigned char *p, codeBook * c, unsigned *cbBounds,
481 unsigned int high[3], low[3];
485 for (n = 0; n < numChannels; n++) {
486 high[n] = p[n] + cbBounds[n];
490 if (p[n] > cbBounds[n])
491 low[n] = p[n] - cbBounds[n];
496 /* SEE IF THIS FITS AN EXISTING CODEWORD */
497 for (i = 0; i < c->numEntries; i++) {
499 for (n = 0; n < numChannels; n++) {
500 if ((c->cb[i]->learnLow[n] <= *(p + n)) &&
501 /* Found an entry for this channel */
502 (*(p + n) <= c->cb[i]->learnHigh[n])) {
506 if (matchChannel == numChannels) { /* If an entry was found */
507 c->cb[i]->t_last_update = c->t;
508 /* adjust this codeword for the first channel */
509 for (n = 0; n < numChannels; n++) {
510 if (c->cb[i]->max[n] < *(p + n)) {
511 c->cb[i]->max[n] = *(p + n);
512 } else if (c->cb[i]->min[n] > *(p + n)) {
513 c->cb[i]->min[n] = *(p + n);
519 /* OVERHEAD TO TRACK POTENTIAL STALE ENTRIES */
520 for (int s = 0; s < c->numEntries; s++) {
521 /* Track which codebook entries are going stale: */
522 int negRun = c->t - c->cb[s]->t_last_update;
523 if (c->cb[s]->stale < negRun)
524 c->cb[s]->stale = negRun;
526 /* ENTER A NEW CODEWORD IF NEEDED */
527 if (i == c->numEntries) { /* if no existing codeword found, make one */
529 (code_element **) g_malloc (sizeof (code_element *) *
530 (c->numEntries + 1));
531 for (int ii = 0; ii < c->numEntries; ii++) {
532 foo[ii] = c->cb[ii]; /* copy all pointers */
534 foo[c->numEntries] = (code_element *) g_malloc (sizeof (code_element));
538 for (n = 0; n < numChannels; n++) {
539 c->cb[c->numEntries]->learnHigh[n] = high[n];
540 c->cb[c->numEntries]->learnLow[n] = low[n];
541 c->cb[c->numEntries]->max[n] = *(p + n);
542 c->cb[c->numEntries]->min[n] = *(p + n);
544 c->cb[c->numEntries]->t_last_update = c->t;
545 c->cb[c->numEntries]->stale = 0;
548 /* SLOWLY ADJUST LEARNING BOUNDS */
549 for (n = 0; n < numChannels; n++) {
550 if (c->cb[i]->learnHigh[n] < high[n])
551 c->cb[i]->learnHigh[n] += 1;
552 if (c->cb[i]->learnLow[n] > low[n])
553 c->cb[i]->learnLow[n] -= 1;
563 int clear_stale_entries(codeBook &c)
564 During learning, after you've learned for some period of time,
565 periodically call this to clear out stale codebook entries
567 c Codebook to clean up
570 number of entries cleared
573 clear_stale_entries (codeBook * c)
575 int staleThresh = c->t >> 1;
576 int *keep = (int *) g_malloc (sizeof (int) * (c->numEntries));
581 /* SEE WHICH CODEBOOK ENTRIES ARE TOO STALE */
582 for (int i = 0; i < c->numEntries; i++) {
583 if (c->cb[i]->stale > staleThresh)
584 keep[i] = 0; /* Mark for destruction */
586 keep[i] = 1; /* Mark to keep */
590 /* KEEP ONLY THE GOOD */
591 c->t = 0; /* Full reset on stale tracking */
592 foo = (code_element **) g_malloc (sizeof (code_element *) * keepCnt);
594 for (int ii = 0; ii < c->numEntries; ii++) {
597 /* We have to refresh these entries for next clearStale */
598 foo[k]->t_last_update = 0;
606 numCleared = c->numEntries - keepCnt;
607 c->numEntries = keepCnt;
614 uchar background_diff( uchar *p, codeBook &c,
615 int minMod, int maxMod)
616 Given a pixel and a codebook, determine if the pixel is
617 covered by the codebook
619 p Pixel pointer (YUV interleaved)
621 numChannels Number of channels we are testing
622 maxMod Add this (possibly negative) number onto
624 max level when determining if new pixel is foreground
625 minMod Subtract this (possibly negative) number from
626 min level when determining if new pixel is foreground
629 minMod and maxMod must have length numChannels,
630 e.g. 3 channels => minMod[3], maxMod[3]. There is one min and
631 one max threshold per channel.
634 0 => background, 255 => foreground
637 background_diff (unsigned char *p, codeBook * c, int numChannels,
638 int *minMod, int *maxMod)
641 /* SEE IF THIS FITS AN EXISTING CODEWORD */
643 for (i = 0; i < c->numEntries; i++) {
645 for (int n = 0; n < numChannels; n++) {
646 if ((c->cb[i]->min[n] - minMod[n] <= *(p + n)) &&
647 (*(p + n) <= c->cb[i]->max[n] + maxMod[n])) {
648 matchChannel++; /* Found an entry for this channel */
653 if (matchChannel == numChannels) {
654 break; /* Found an entry that matched all channels */
657 if (i >= c->numEntries)
666 void find_connected_components(IplImage *mask, int poly1_hull0,
667 float perimScale, int *num,
668 CvRect *bbs, CvPoint *centers)
669 This cleans up the foreground segmentation mask derived from calls
672 mask Is a grayscale (8-bit depth) “rawâ€
\9d mask image that
676 poly1_hull0 If set, approximate connected component by
677 (DEFAULT) polygon, or else convex hull (0)
678 perimScale Len = image (width+height)/perimScale. If contour
679 len < this, delete that contour (DEFAULT: 4)
680 num Maximum number of rectangles and/or centers to
681 return; on return, will contain number filled
683 bbs Pointer to bounding box rectangle vector of
684 length num. (DEFAULT SETTING: NULL)
685 centers Pointer to contour centers vector of length
689 /* Approx.threshold - the bigger it is, the simpler is the boundary */
690 #define CVCONTOUR_APPROX_LEVEL 1
691 /* How many iterations of erosion and/or dilation there should be */
692 #define CVCLOSE_ITR 1
694 find_connected_components (Mat mask, int poly1_hull0, float perimScale)
696 /* Just some convenience variables */
697 const Scalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
698 //const Scalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);
701 /* CLEAN UP RAW MASK */
702 morphologyEx (mask, mask, MORPH_OPEN, Mat (), Point (-1, -1), CVCLOSE_ITR);
703 morphologyEx (mask, mask, MORPH_CLOSE, Mat (), Point (-1, -1), CVCLOSE_ITR);
704 /* FIND CONTOURS AROUND ONLY BIGGER REGIONS */
706 std::vector < std::vector < Point > >contours;
707 std::vector < std::vector < Point > >to_draw;
708 std::vector < Vec4i > hierarchy;
709 findContours (mask, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE,
711 if (contours.size () == 0)
714 for (; idx >= 0; idx = hierarchy[idx][0]) {
715 const std::vector < Point > &c = contours[idx];
716 double len = fabs (contourArea (Mat (c)));
717 double q = (mask.size ().height + mask.size ().width) / perimScale;
719 std::vector < Point > c_new;
721 approxPolyDP (c, c_new, CVCONTOUR_APPROX_LEVEL, (hierarchy[idx][2] < 0
722 && hierarchy[idx][3] < 0));
724 convexHull (c, c_new, true, true);
726 to_draw.push_back (c_new);
730 mask.setTo (Scalar::all (0));
731 if (to_draw.size () > 0) {
732 drawContours (mask, to_draw, -1, CVX_WHITE, FILLED);
736 #endif /*ifdef CODE_FROM_OREILLY_BOOK */
739 run_mog_iteration (GstSegmentation * filter)
742 BackgroundSubtractorMOG [1], Gaussian Mixture-based Background/Foreground
743 Segmentation Algorithm. OpenCV MOG implements the algorithm described in [2].
745 [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
746 [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
747 mixture model for real-time tracking with shadow detection", Proc. 2nd
748 European Workshop on Advanced Video-Based Surveillance Systems, 2001
751 filter->mog->apply (filter->cvYUV, filter->cvFG, filter->learning_rate);
757 run_mog2_iteration (GstSegmentation * filter)
760 BackgroundSubtractorMOG2 [1], Gaussian Mixture-based Background/Foreground
761 segmentation algorithm. OpenCV MOG2 implements the algorithm described in
764 [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
765 [2] Z.Zivkovic, "Improved adaptive Gaussian mixture model for background
766 subtraction", International Conference Pattern Recognition, UK, August, 2004.
767 [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation per
768 Image Pixel for the Task of Background Subtraction", Pattern Recognition
769 Letters, vol. 27, no. 7, pages 773-780, 2006.
772 filter->mog2->apply (filter->cvYUV, filter->cvFG, filter->learning_rate);