3 * Copyright (C) 2013 Miguel Casas-Sanchez <miguelecasassanchez@gmail.com>
4 * Except: Parts of code inside the preprocessor define CODE_FROM_OREILLY_BOOK,
5 * which are downloaded from O'Reilly website
6 * [http://examples.oreilly.com/9780596516130/]
7 * and adapted. Its license reads:
9 * Right to use this code in any way you want without warrenty, support or
10 * any guarentee of it working. "
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 * DEALINGS IN THE SOFTWARE.
31 * Alternatively, the contents of this file may be used under the
32 * GNU Lesser General Public License Version 2.1 (the "LGPL"), in
33 * which case the following provisions apply instead of the ones
36 * This library is free software; you can redistribute it and/or
37 * modify it under the terms of the GNU Library General Public
38 * License as published by the Free Software Foundation; either
39 * version 2 of the License, or (at your option) any later version.
41 * This library is distributed in the hope that it will be useful,
42 * but WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * Library General Public License for more details.
46 * You should have received a copy of the GNU Library General Public
47 * License along with this library; if not, write to the
48 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
49 * Boston, MA 02110-1301, USA.
51 #define CODE_FROM_OREILLY_BOOK
54 * SECTION:element-segmentation
56 * This element creates and updates a fg/bg model using one of several approaches.
57 * The one called "codebook" refers to the codebook approach following the opencv
58 * O'Reilly book [1] implementation of the algorithm described in K. Kim,
59 * T. H. Chalidabhongse, D. Harwood and L. Davis [2]. BackgroundSubtractorMOG [3],
60 * or MOG for shorts, refers to a Gaussian Mixture-based Background/Foreground
61 * Segmentation Algorithm. OpenCV MOG implements the algorithm described in [4].
62 * BackgroundSubtractorMOG2 [5], refers to another Gaussian Mixture-based
63 * Background/Foreground segmentation algorithm. OpenCV MOG2 implements the
64 * algorithm described in [6] and [7].
66 * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary Bradski
67 * and Adrian Kaehler, Published by O'Reilly Media, October 3, 2008
68 * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
69 * Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005.
70 * [3] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
71 * [4] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
72 * mixture model for real-time tracking with shadow detection", Proc. 2nd
73 * European Workshop on Advanced Video-Based Surveillance Systems, 2001
74 * [5] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
75 * [6] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
76 * subtraction", International Conference Pattern Recognition, UK, August, 2004.
77 * [7] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
78 * per Image Pixel for the Task of Background Subtraction", Pattern Recognition
79 * Letters, vol. 27, no. 7, pages 773-780, 2006.
81 * ## Example launch line
84 * gst-launch-1.0 v4l2src device=/dev/video0 ! videoconvert ! segmentation test-mode=true method=2 ! videoconvert ! ximagesink
92 #include "gstsegmentation.h"
93 #include <opencv2/imgproc.hpp>
95 GST_DEBUG_CATEGORY_STATIC (gst_segmentation_debug);
96 #define GST_CAT_DEFAULT gst_segmentation_debug
100 /* Filter signals and args */
119 } GstSegmentationMethod;
121 #define DEFAULT_TEST_MODE FALSE
122 #define DEFAULT_METHOD METHOD_MOG2
123 #define DEFAULT_LEARNING_RATE 0.01
125 #define GST_TYPE_SEGMENTATION_METHOD (gst_segmentation_method_get_type ())
127 gst_segmentation_method_get_type (void)
129 static GType etype = 0;
131 static const GEnumValue values[] = {
132 {METHOD_BOOK, "Codebook-based segmentation (Bradski2008)", "codebook"},
133 {METHOD_MOG, "Mixture-of-Gaussians segmentation (Bowden2001)", "mog"},
134 {METHOD_MOG2, "Mixture-of-Gaussians segmentation (Zivkovic2004)", "mog2"},
137 etype = g_enum_register_static ("GstSegmentationMethod", values);
142 G_DEFINE_TYPE (GstSegmentation, gst_segmentation, GST_TYPE_OPENCV_VIDEO_FILTER);
144 static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
147 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
149 static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
152 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
156 gst_segmentation_set_property (GObject * object, guint prop_id,
157 const GValue * value, GParamSpec * pspec);
159 gst_segmentation_get_property (GObject * object, guint prop_id,
160 GValue * value, GParamSpec * pspec);
162 static GstFlowReturn gst_segmentation_transform_ip (GstOpencvVideoFilter *
163 filter, GstBuffer * buffer, Mat img);
165 static void gst_segmentation_finalize (GObject * object);
166 static gboolean gst_segmentation_set_caps (GstOpencvVideoFilter * filter,
167 gint in_width, gint in_height, int in_cv_type, gint out_width,
168 gint out_height, int out_cv_type);
170 /* Codebook algorithm + connected components functions*/
171 static int update_codebook (unsigned char *p, codeBook * c,
172 unsigned *cbBounds, int numChannels);
173 static int clear_stale_entries (codeBook * c);
174 static unsigned char background_diff (unsigned char *p, codeBook * c,
175 int numChannels, int *minMod, int *maxMod);
176 static void find_connected_components (Mat mask, int poly1_hull0,
179 /* MOG (Mixture-of-Gaussians functions */
180 static int run_mog_iteration (GstSegmentation * filter);
181 static int run_mog2_iteration (GstSegmentation * filter);
183 /* initialize the segmentation's class */
185 gst_segmentation_class_init (GstSegmentationClass * klass)
187 GObjectClass *gobject_class;
188 GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
189 GstOpencvVideoFilterClass *cvfilter_class =
190 (GstOpencvVideoFilterClass *) klass;
192 gobject_class = (GObjectClass *) klass;
194 gobject_class->finalize = gst_segmentation_finalize;
195 gobject_class->set_property = gst_segmentation_set_property;
196 gobject_class->get_property = gst_segmentation_get_property;
199 cvfilter_class->cv_trans_ip_func = gst_segmentation_transform_ip;
200 cvfilter_class->cv_set_caps = gst_segmentation_set_caps;
202 g_object_class_install_property (gobject_class, PROP_METHOD,
203 g_param_spec_enum ("method",
204 "Segmentation method to use",
205 "Segmentation method to use",
206 GST_TYPE_SEGMENTATION_METHOD, DEFAULT_METHOD,
207 (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
209 g_object_class_install_property (gobject_class, PROP_TEST_MODE,
210 g_param_spec_boolean ("test-mode", "test-mode",
211 "If true, the output RGB is overwritten with the calculated foreground (white color)",
212 DEFAULT_TEST_MODE, (GParamFlags)
213 (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
215 g_object_class_install_property (gobject_class, PROP_LEARNING_RATE,
216 g_param_spec_float ("learning-rate", "learning-rate",
217 "Speed with which a motionless foreground pixel would become background (inverse of number of frames)",
218 0, 1, DEFAULT_LEARNING_RATE, (GParamFlags) (G_PARAM_READWRITE)));
220 gst_element_class_set_static_metadata (element_class,
221 "Foreground/background video sequence segmentation",
222 "Filter/Effect/Video",
223 "Create a Foregound/Background mask applying a particular algorithm",
224 "Miguel Casas-Sanchez <miguelecasassanchez@gmail.com>");
226 gst_element_class_add_static_pad_template (element_class, &src_factory);
227 gst_element_class_add_static_pad_template (element_class, &sink_factory);
231 /* initialize the new element
232 * instantiate pads and add them to element
233 * set pad calback functions
234 * initialize instance structure
237 gst_segmentation_init (GstSegmentation * filter)
239 filter->method = DEFAULT_METHOD;
240 filter->test_mode = DEFAULT_TEST_MODE;
241 filter->framecount = 0;
242 filter->learning_rate = DEFAULT_LEARNING_RATE;
243 gst_opencv_video_filter_set_in_place (GST_OPENCV_VIDEO_FILTER (filter), TRUE);
247 gst_segmentation_set_property (GObject * object, guint prop_id,
248 const GValue * value, GParamSpec * pspec)
250 GstSegmentation *filter = GST_SEGMENTATION (object);
254 filter->method = g_value_get_enum (value);
257 filter->test_mode = g_value_get_boolean (value);
259 case PROP_LEARNING_RATE:
260 filter->learning_rate = g_value_get_float (value);
263 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
269 gst_segmentation_get_property (GObject * object, guint prop_id,
270 GValue * value, GParamSpec * pspec)
272 GstSegmentation *filter = GST_SEGMENTATION (object);
276 g_value_set_enum (value, filter->method);
279 g_value_set_boolean (value, filter->test_mode);
281 case PROP_LEARNING_RATE:
282 g_value_set_float (value, filter->learning_rate);
285 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
291 gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
292 gint in_height, int in_cv_type,
293 gint out_width, gint out_height, int out_cv_type)
295 GstSegmentation *segmentation = GST_SEGMENTATION (filter);
298 size = Size (in_width, in_height);
299 segmentation->width = in_width;
300 segmentation->height = in_height;
302 segmentation->cvRGB.create (size, CV_8UC3);
303 segmentation->cvYUV.create (size, CV_8UC3);
305 segmentation->cvFG = Mat::zeros (size, CV_8UC1);
307 segmentation->ch1.create (size, CV_8UC1);
308 segmentation->ch2.create (size, CV_8UC1);
309 segmentation->ch3.create (size, CV_8UC1);
311 /* Codebook method */
312 segmentation->TcodeBook = (codeBook *)
313 g_malloc (sizeof (codeBook) *
314 (segmentation->width * segmentation->height + 1));
315 for (int j = 0; j < segmentation->width * segmentation->height; j++) {
316 segmentation->TcodeBook[j].numEntries = 0;
317 segmentation->TcodeBook[j].t = 0;
319 segmentation->learning_interval = (int) (1.0 / segmentation->learning_rate);
321 /* Mixture-of-Gaussians (mog) methods */
322 segmentation->mog = bgsegm::createBackgroundSubtractorMOG ();
323 segmentation->mog2 = createBackgroundSubtractorMOG2 ();
330 gst_segmentation_finalize (GObject * object)
332 GstSegmentation *filter = GST_SEGMENTATION (object);
334 filter->cvRGB.release ();
335 filter->cvYUV.release ();
336 filter->cvFG.release ();
337 filter->ch1.release ();
338 filter->ch2.release ();
339 filter->ch3.release ();
340 filter->mog.release ();
341 filter->mog2.release ();
342 g_free (filter->TcodeBook);
344 G_OBJECT_CLASS (gst_segmentation_parent_class)->finalize (object);
348 gst_segmentation_transform_ip (GstOpencvVideoFilter * cvfilter,
349 GstBuffer * buffer, Mat img)
351 GstSegmentation *filter = GST_SEGMENTATION (cvfilter);
354 filter->framecount++;
356 /* Image preprocessing: color space conversion etc */
357 cvtColor (img, filter->cvRGB, COLOR_RGBA2RGB);
358 cvtColor (filter->cvRGB, filter->cvYUV, COLOR_RGB2YCrCb);
360 /* Create and update a fg/bg model using a codebook approach following the
361 * opencv O'Reilly book [1] implementation of the algo described in [2].
363 * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary
364 * Bradski and Adrian Kaehler, Published by O'Reilly Media, October 3, 2008
365 * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
366 * Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005. */
367 if (METHOD_BOOK == filter->method) {
368 unsigned cbBounds[3] = { 10, 5, 5 };
369 int minMod[3] = { 20, 20, 20 }, maxMod[3] = {
373 if (filter->framecount < 30) {
374 /* Learning background phase: update_codebook on every frame */
375 for (j = 0; j < filter->width * filter->height; j++) {
376 update_codebook (filter->cvYUV.data + j * 3,
377 (codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
380 /* this updating is responsible for FG becoming BG again */
381 if (filter->framecount % filter->learning_interval == 0) {
382 for (j = 0; j < filter->width * filter->height; j++) {
383 update_codebook (filter->cvYUV.data + j * 3,
384 (codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
387 if (filter->framecount % 60 == 0) {
388 for (j = 0; j < filter->width * filter->height; j++)
389 clear_stale_entries ((codeBook *) & (filter->TcodeBook[j]));
392 for (j = 0; j < filter->width * filter->height; j++) {
394 (filter->cvYUV.data + j * 3,
395 (codeBook *) & (filter->TcodeBook[j]), 3, minMod, maxMod)) {
396 filter->cvFG.data[j] = (char) 255;
398 filter->cvFG.data[j] = 0;
403 /* 3rd param is the smallest area to show: (w+h)/param , in pixels */
404 find_connected_components (filter->cvFG, 1, 10000);
407 /* Create the foreground and background masks using BackgroundSubtractorMOG [1],
408 * Gaussian Mixture-based Background/Foreground segmentation algorithm. OpenCV
409 * MOG implements the algorithm described in [2].
411 * [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
412 * [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
413 * mixture model for real-time tracking with shadow detection", Proc. 2nd
414 * European Workshop on Advanced Video-Based Surveillance Systems, 2001
416 else if (METHOD_MOG == filter->method) {
417 run_mog_iteration (filter);
419 /* Create the foreground and background masks using BackgroundSubtractorMOG2
420 * [1], Gaussian Mixture-based Background/Foreground segmentation algorithm.
421 * OpenCV MOG2 implements the algorithm described in [2] and [3].
423 * [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
424 * [2] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
425 * subtraction", International Conference Pattern Recognition, UK, Aug 2004.
426 * [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
427 * per Image Pixel for the Task of Background Subtraction", Pattern
428 * Recognition Letters, vol. 27, no. 7, pages 773-780, 2006. */
429 else if (METHOD_MOG2 == filter->method) {
430 run_mog2_iteration (filter);
433 /* if we want to test_mode, just overwrite the output */
434 std::vector < cv::Mat > channels (3);
436 if (filter->test_mode) {
437 cvtColor (filter->cvFG, filter->cvRGB, COLOR_GRAY2RGB);
439 split (filter->cvRGB, channels);
441 split (img, channels);
443 channels.push_back (filter->cvFG);
445 /* copy anyhow the fg/bg to the alpha channel in the output image */
446 merge (channels, img);
452 /* entry point to initialize the plug-in
453 * initialize the plug-in itself
454 * register the element factories and other features
457 gst_segmentation_plugin_init (GstPlugin * plugin)
459 GST_DEBUG_CATEGORY_INIT (gst_segmentation_debug, "segmentation",
460 0, "Performs Foreground/Background segmentation in video sequences");
462 return gst_element_register (plugin, "segmentation", GST_RANK_NONE,
463 GST_TYPE_SEGMENTATION);
468 #ifdef CODE_FROM_OREILLY_BOOK /* See license at the beginning of the page */
470 int update_codebook(uchar *p, codeBook &c, unsigned cbBounds)
471 Updates the codebook entry with a new data point
473 p Pointer to a YUV or HSI pixel
474 c Codebook for this pixel
475 cbBounds Learning bounds for codebook (Rule of thumb: 10)
476 numChannels Number of color channels we¡¯re learning
479 cvBounds must be of length equal to numChannels
485 update_codebook (unsigned char *p, codeBook * c, unsigned *cbBounds,
489 unsigned int high[3], low[3];
493 for (n = 0; n < numChannels; n++) {
494 high[n] = p[n] + cbBounds[n];
498 if (p[n] > cbBounds[n])
499 low[n] = p[n] - cbBounds[n];
504 /* SEE IF THIS FITS AN EXISTING CODEWORD */
505 for (i = 0; i < c->numEntries; i++) {
507 for (n = 0; n < numChannels; n++) {
508 if ((c->cb[i]->learnLow[n] <= *(p + n)) &&
509 /* Found an entry for this channel */
510 (*(p + n) <= c->cb[i]->learnHigh[n])) {
514 if (matchChannel == numChannels) { /* If an entry was found */
515 c->cb[i]->t_last_update = c->t;
516 /* adjust this codeword for the first channel */
517 for (n = 0; n < numChannels; n++) {
518 if (c->cb[i]->max[n] < *(p + n)) {
519 c->cb[i]->max[n] = *(p + n);
520 } else if (c->cb[i]->min[n] > *(p + n)) {
521 c->cb[i]->min[n] = *(p + n);
527 /* OVERHEAD TO TRACK POTENTIAL STALE ENTRIES */
528 for (int s = 0; s < c->numEntries; s++) {
529 /* Track which codebook entries are going stale: */
530 int negRun = c->t - c->cb[s]->t_last_update;
531 if (c->cb[s]->stale < negRun)
532 c->cb[s]->stale = negRun;
534 /* ENTER A NEW CODEWORD IF NEEDED */
535 if (i == c->numEntries) { /* if no existing codeword found, make one */
537 (code_element **) g_malloc (sizeof (code_element *) *
538 (c->numEntries + 1));
539 for (int ii = 0; ii < c->numEntries; ii++) {
540 foo[ii] = c->cb[ii]; /* copy all pointers */
542 foo[c->numEntries] = (code_element *) g_malloc (sizeof (code_element));
546 for (n = 0; n < numChannels; n++) {
547 c->cb[c->numEntries]->learnHigh[n] = high[n];
548 c->cb[c->numEntries]->learnLow[n] = low[n];
549 c->cb[c->numEntries]->max[n] = *(p + n);
550 c->cb[c->numEntries]->min[n] = *(p + n);
552 c->cb[c->numEntries]->t_last_update = c->t;
553 c->cb[c->numEntries]->stale = 0;
556 /* SLOWLY ADJUST LEARNING BOUNDS */
557 for (n = 0; n < numChannels; n++) {
558 if (c->cb[i]->learnHigh[n] < high[n])
559 c->cb[i]->learnHigh[n] += 1;
560 if (c->cb[i]->learnLow[n] > low[n])
561 c->cb[i]->learnLow[n] -= 1;
571 int clear_stale_entries(codeBook &c)
572 During learning, after you've learned for some period of time,
573 periodically call this to clear out stale codebook entries
575 c Codebook to clean up
578 number of entries cleared
581 clear_stale_entries (codeBook * c)
583 int staleThresh = c->t >> 1;
584 int *keep = (int *) g_malloc (sizeof (int) * (c->numEntries));
589 /* SEE WHICH CODEBOOK ENTRIES ARE TOO STALE */
590 for (int i = 0; i < c->numEntries; i++) {
591 if (c->cb[i]->stale > staleThresh)
592 keep[i] = 0; /* Mark for destruction */
594 keep[i] = 1; /* Mark to keep */
598 /* KEEP ONLY THE GOOD */
599 c->t = 0; /* Full reset on stale tracking */
600 foo = (code_element **) g_malloc (sizeof (code_element *) * keepCnt);
602 for (int ii = 0; ii < c->numEntries; ii++) {
605 /* We have to refresh these entries for next clearStale */
606 foo[k]->t_last_update = 0;
614 numCleared = c->numEntries - keepCnt;
615 c->numEntries = keepCnt;
622 uchar background_diff( uchar *p, codeBook &c,
623 int minMod, int maxMod)
624 Given a pixel and a codebook, determine if the pixel is
625 covered by the codebook
627 p Pixel pointer (YUV interleaved)
629 numChannels Number of channels we are testing
630 maxMod Add this (possibly negative) number onto
632 max level when determining if new pixel is foreground
633 minMod Subract this (possibly negative) number from
634 min level when determining if new pixel is foreground
637 minMod and maxMod must have length numChannels,
638 e.g. 3 channels => minMod[3], maxMod[3]. There is one min and
639 one max threshold per channel.
642 0 => background, 255 => foreground
645 background_diff (unsigned char *p, codeBook * c, int numChannels,
646 int *minMod, int *maxMod)
649 /* SEE IF THIS FITS AN EXISTING CODEWORD */
651 for (i = 0; i < c->numEntries; i++) {
653 for (int n = 0; n < numChannels; n++) {
654 if ((c->cb[i]->min[n] - minMod[n] <= *(p + n)) &&
655 (*(p + n) <= c->cb[i]->max[n] + maxMod[n])) {
656 matchChannel++; /* Found an entry for this channel */
661 if (matchChannel == numChannels) {
662 break; /* Found an entry that matched all channels */
665 if (i >= c->numEntries)
674 void find_connected_components(IplImage *mask, int poly1_hull0,
675 float perimScale, int *num,
676 CvRect *bbs, CvPoint *centers)
677 This cleans up the foreground segmentation mask derived from calls
680 mask Is a grayscale (8-bit depth) “rawâ€
\9d mask image that
684 poly1_hull0 If set, approximate connected component by
685 (DEFAULT) polygon, or else convex hull (0)
686 perimScale Len = image (width+height)/perimScale. If contour
687 len < this, delete that contour (DEFAULT: 4)
688 num Maximum number of rectangles and/or centers to
689 return; on return, will contain number filled
691 bbs Pointer to bounding box rectangle vector of
692 length num. (DEFAULT SETTING: NULL)
693 centers Pointer to contour centers vector of length
697 /* Approx.threshold - the bigger it is, the simpler is the boundary */
698 #define CVCONTOUR_APPROX_LEVEL 1
699 /* How many iterations of erosion and/or dilation there should be */
700 #define CVCLOSE_ITR 1
702 find_connected_components (Mat mask, int poly1_hull0, float perimScale)
704 /* Just some convenience variables */
705 const Scalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
706 //const Scalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);
709 /* CLEAN UP RAW MASK */
710 morphologyEx (mask, mask, MORPH_OPEN, Mat (), Point (-1, -1), CVCLOSE_ITR);
711 morphologyEx (mask, mask, MORPH_CLOSE, Mat (), Point (-1, -1), CVCLOSE_ITR);
712 /* FIND CONTOURS AROUND ONLY BIGGER REGIONS */
714 std::vector < std::vector < Point > >contours;
715 std::vector < std::vector < Point > >to_draw;
716 std::vector < Vec4i > hierarchy;
717 findContours (mask, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE,
719 if (contours.size () == 0)
722 for (; idx >= 0; idx = hierarchy[idx][0]) {
723 const std::vector < Point > &c = contours[idx];
724 double len = fabs (contourArea (Mat (c)));
725 double q = (mask.size ().height + mask.size ().width) / perimScale;
727 std::vector < Point > c_new;
729 approxPolyDP (c, c_new, CVCONTOUR_APPROX_LEVEL, (hierarchy[idx][2] < 0
730 && hierarchy[idx][3] < 0));
732 convexHull (c, c_new, true, true);
734 to_draw.push_back (c_new);
738 mask.setTo (Scalar::all (0));
739 if (to_draw.size () > 0) {
740 drawContours (mask, to_draw, -1, CVX_WHITE, FILLED);
744 #endif /*ifdef CODE_FROM_OREILLY_BOOK */
747 run_mog_iteration (GstSegmentation * filter)
750 BackgroundSubtractorMOG [1], Gaussian Mixture-based Background/Foreground
751 Segmentation Algorithm. OpenCV MOG implements the algorithm described in [2].
753 [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
754 [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
755 mixture model for real-time tracking with shadow detection", Proc. 2nd
756 European Workshop on Advanced Video-Based Surveillance Systems, 2001
759 filter->mog->apply (filter->cvYUV, filter->cvFG, filter->learning_rate);
765 run_mog2_iteration (GstSegmentation * filter)
768 BackgroundSubtractorMOG2 [1], Gaussian Mixture-based Background/Foreground
769 segmentation algorithm. OpenCV MOG2 implements the algorithm described in
772 [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
773 [2] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
774 subtraction", International Conference Pattern Recognition, UK, August, 2004.
775 [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation per
776 Image Pixel for the Task of Background Subtraction", Pattern Recognition
777 Letters, vol. 27, no. 7, pages 773-780, 2006.
780 filter->mog2->apply (filter->cvYUV, filter->cvFG, filter->learning_rate);