3 * Copyright (C) 2013 Miguel Casas-Sanchez <miguelecasassanchez@gmail.com>
4 * Except: Parts of code inside the preprocessor define CODE_FROM_OREILLY_BOOK,
5 * which are downloaded from O'Reilly website
6 * [http://examples.oreilly.com/9780596516130/]
7 * and adapted. Its license reads:
9 * Right to use this code in any way you want without warrenty, support or
10 * any guarentee of it working. "
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 * DEALINGS IN THE SOFTWARE.
31 * Alternatively, the contents of this file may be used under the
32 * GNU Lesser General Public License Version 2.1 (the "LGPL"), in
33 * which case the following provisions apply instead of the ones
36 * This library is free software; you can redistribute it and/or
37 * modify it under the terms of the GNU Library General Public
38 * License as published by the Free Software Foundation; either
39 * version 2 of the License, or (at your option) any later version.
41 * This library is distributed in the hope that it will be useful,
42 * but WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * Library General Public License for more details.
46 * You should have received a copy of the GNU Library General Public
47 * License along with this library; if not, write to the
48 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
49 * Boston, MA 02110-1301, USA.
51 #define CODE_FROM_OREILLY_BOOK
54 * SECTION:element-segmentation
56 * This element creates and updates a fg/bg model using one of several approaches.
57 * The one called "codebook" refers to the codebook approach following the opencv
58 * O'Reilly book [1] implementation of the algorithm described in K. Kim,
59 * T. H. Chalidabhongse, D. Harwood and L. Davis [2]. BackgroundSubtractorMOG [3],
60 * or MOG for shorts, refers to a Gaussian Mixture-based Background/Foreground
61 * Segmentation Algorithm. OpenCV MOG implements the algorithm described in [4].
62 * BackgroundSubtractorMOG2 [5], refers to another Gaussian Mixture-based
63 * Background/Foreground segmentation algorithm. OpenCV MOG2 implements the
64 * algorithm described in [6] and [7].
66 * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary Bradski
67 * and Adrian Kaehler, Published by O'Reilly Media, October 3, 2008
68 * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
69 * Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005.
70 * [3] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
71 * [4] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
72 * mixture model for real-time tracking with shadow detection", Proc. 2nd
73 * European Workshop on Advanced Video-Based Surveillance Systems, 2001
74 * [5] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
75 * [6] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
76 * subtraction", International Conference Pattern Recognition, UK, August, 2004.
77 * [7] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
78 * per Image Pixel for the Task of Background Subtraction", Pattern Recognition
79 * Letters, vol. 27, no. 7, pages 773-780, 2006.
82 * <title>Example launch line</title>
84 * gst-launch-1.0 v4l2src device=/dev/video0 ! videoconvert ! segmentation test-mode=true method=2 ! videoconvert ! ximagesink
93 #include "gstsegmentation.h"
94 #include <opencv2/imgproc.hpp>
95 #if (CV_MAJOR_VERSION >= 4)
96 #include <opencv2/imgproc/imgproc_c.h>
99 GST_DEBUG_CATEGORY_STATIC (gst_segmentation_debug);
100 #define GST_CAT_DEFAULT gst_segmentation_debug
106 /* Filter signals and args */
125 } GstSegmentationMethod;
127 #define DEFAULT_TEST_MODE FALSE
128 #define DEFAULT_METHOD METHOD_MOG2
129 #define DEFAULT_LEARNING_RATE 0.01
131 #define GST_TYPE_SEGMENTATION_METHOD (gst_segmentation_method_get_type ())
134 gst_segmentation_method_get_type (void)
143 {METHOD_BOOK, "Codebook-based segmentation (Bradski2008)", "codebook"},
144 {METHOD_MOG, "Mixture-of-Gaussians segmentation (Bowden2001)", "mog"},
145 {METHOD_MOG2, "Mixture-of-Gaussians segmentation (Zivkovic2004)", "mog2"},
148 etype = g_enum_register_static ("GstSegmentationMethod", values);
153 G_DEFINE_TYPE (GstSegmentation, gst_segmentation, GST_TYPE_OPENCV_VIDEO_FILTER);
157 sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
160 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
164 src_factory = GST_STATIC_PAD_TEMPLATE ("src",
167 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
171 gst_segmentation_set_property (GObject * object, guint prop_id,
172 const GValue * value, GParamSpec * pspec);
174 gst_segmentation_get_property (GObject * object, guint prop_id,
175 GValue * value, GParamSpec * pspec);
179 gst_segmentation_transform_ip (GstOpencvVideoFilter * filter,
180 GstBuffer * buffer, IplImage * img);
184 gst_segmentation_stop (GstBaseTransform * basesrc);
187 gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
188 gint in_height, gint in_depth, gint in_channels,
189 gint out_width, gint out_height, gint out_depth, gint out_channels);
191 gst_segmentation_release_all_pointers (GstSegmentation * filter);
193 /* Codebook algorithm + connected components functions*/
195 update_codebook (unsigned char *p, codeBook * c,
196 unsigned *cbBounds, int numChannels);
198 clear_stale_entries (codeBook * c);
200 background_diff (unsigned char *p, codeBook * c,
201 int numChannels, int *minMod, int *maxMod);
203 find_connected_components (IplImage * mask, int poly1_hull0,
204 float perimScale, CvMemStorage * mem_storage, CvSeq * contours);
206 /* MOG (Mixture-of-Gaussians functions */
208 initialise_mog (GstSegmentation * filter);
210 run_mog_iteration (GstSegmentation * filter);
212 run_mog2_iteration (GstSegmentation * filter);
214 finalise_mog (GstSegmentation * filter);
216 /* initialize the segmentation's class */
218 gst_segmentation_class_init (GstSegmentationClass * klass)
223 element_class = GST_ELEMENT_CLASS (klass);
224 GstBaseTransformClass *
225 basesrc_class = GST_BASE_TRANSFORM_CLASS (klass);
226 GstOpencvVideoFilterClass *
227 cvfilter_class = (GstOpencvVideoFilterClass *) klass;
229 gobject_class = (GObjectClass *) klass;
231 gobject_class->set_property = gst_segmentation_set_property;
232 gobject_class->get_property = gst_segmentation_get_property;
234 basesrc_class->stop = gst_segmentation_stop;
236 cvfilter_class->cv_trans_ip_func = gst_segmentation_transform_ip;
237 cvfilter_class->cv_set_caps = gst_segmentation_set_caps;
239 g_object_class_install_property (gobject_class, PROP_METHOD,
240 g_param_spec_enum ("method",
241 "Segmentation method to use",
242 "Segmentation method to use",
243 GST_TYPE_SEGMENTATION_METHOD, DEFAULT_METHOD,
244 (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
246 g_object_class_install_property (gobject_class, PROP_TEST_MODE,
247 g_param_spec_boolean ("test-mode", "test-mode",
248 "If true, the output RGB is overwritten with the calculated foreground (white color)",
249 DEFAULT_TEST_MODE, (GParamFlags)
250 (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
252 g_object_class_install_property (gobject_class, PROP_LEARNING_RATE,
253 g_param_spec_float ("learning-rate", "learning-rate",
254 "Speed with which a motionless foreground pixel would become background (inverse of number of frames)",
255 0, 1, DEFAULT_LEARNING_RATE, (GParamFlags) (G_PARAM_READWRITE)));
257 gst_element_class_set_static_metadata (element_class,
258 "Foreground/background video sequence segmentation",
259 "Filter/Effect/Video",
260 "Create a Foregound/Background mask applying a particular algorithm",
261 "Miguel Casas-Sanchez <miguelecasassanchez@gmail.com>");
263 gst_element_class_add_static_pad_template (element_class, &src_factory);
264 gst_element_class_add_static_pad_template (element_class, &sink_factory);
268 /* initialize the new element
269 * instantiate pads and add them to element
270 * set pad calback functions
271 * initialize instance structure
274 gst_segmentation_init (GstSegmentation * filter)
276 filter->method = DEFAULT_METHOD;
277 filter->test_mode = DEFAULT_TEST_MODE;
278 filter->framecount = 0;
279 filter->learning_rate = DEFAULT_LEARNING_RATE;
280 gst_opencv_video_filter_set_in_place (GST_OPENCV_VIDEO_FILTER (filter), TRUE);
284 gst_segmentation_set_property (GObject * object, guint prop_id,
285 const GValue * value, GParamSpec * pspec)
288 filter = GST_SEGMENTATION (object);
292 filter->method = g_value_get_enum (value);
295 filter->test_mode = g_value_get_boolean (value);
297 case PROP_LEARNING_RATE:
298 filter->learning_rate = g_value_get_float (value);
301 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
307 gst_segmentation_get_property (GObject * object, guint prop_id,
308 GValue * value, GParamSpec * pspec)
311 filter = GST_SEGMENTATION (object);
315 g_value_set_enum (value, filter->method);
318 g_value_set_boolean (value, filter->test_mode);
320 case PROP_LEARNING_RATE:
321 g_value_set_float (value, filter->learning_rate);
324 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
331 gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
332 gint in_height, gint in_depth, gint in_channels,
333 gint out_width, gint out_height, gint out_depth, gint out_channels)
336 segmentation = GST_SEGMENTATION (filter);
339 size = cvSize (in_width, in_height);
340 segmentation->width = in_width;
341 segmentation->height = in_height;
343 if (NULL != segmentation->cvRGB)
344 gst_segmentation_release_all_pointers (segmentation);
346 segmentation->cvRGB = cvCreateImage (size, IPL_DEPTH_8U, 3);
347 segmentation->cvYUV = cvCreateImage (size, IPL_DEPTH_8U, 3);
349 segmentation->cvFG = cvCreateImage (size, IPL_DEPTH_8U, 1);
350 cvZero (segmentation->cvFG);
352 segmentation->ch1 = cvCreateImage (size, IPL_DEPTH_8U, 1);
353 segmentation->ch2 = cvCreateImage (size, IPL_DEPTH_8U, 1);
354 segmentation->ch3 = cvCreateImage (size, IPL_DEPTH_8U, 1);
356 /* Codebook method */
357 segmentation->TcodeBook = (codeBook *)
358 g_malloc (sizeof (codeBook) *
359 (segmentation->width * segmentation->height + 1));
360 for (int j = 0; j < segmentation->width * segmentation->height; j++) {
361 segmentation->TcodeBook[j].numEntries = 0;
362 segmentation->TcodeBook[j].t = 0;
364 segmentation->learning_interval = (int) (1.0 / segmentation->learning_rate);
366 /* Mixture-of-Gaussians (mog) methods */
367 initialise_mog (segmentation);
375 gst_segmentation_stop (GstBaseTransform * basesrc)
378 filter = GST_SEGMENTATION (basesrc);
380 if (filter->cvRGB != NULL)
381 gst_segmentation_release_all_pointers (filter);
387 gst_segmentation_release_all_pointers (GstSegmentation * filter)
389 cvReleaseImage (&filter->cvRGB);
390 cvReleaseImage (&filter->cvYUV);
391 cvReleaseImage (&filter->cvFG);
392 cvReleaseImage (&filter->ch1);
393 cvReleaseImage (&filter->ch2);
394 cvReleaseImage (&filter->ch3);
396 cvReleaseMemStorage (&filter->mem_storage);
398 g_free (filter->TcodeBook);
399 finalise_mog (filter);
404 gst_segmentation_transform_ip (GstOpencvVideoFilter * cvfilter,
405 GstBuffer * buffer, IplImage * img)
408 filter = GST_SEGMENTATION (cvfilter);
412 filter->framecount++;
414 /* Image preprocessing: color space conversion etc */
415 cvCvtColor (img, filter->cvRGB, CV_RGBA2RGB);
416 cvCvtColor (filter->cvRGB, filter->cvYUV, CV_RGB2YCrCb);
418 /* Create and update a fg/bg model using a codebook approach following the
419 * opencv O'Reilly book [1] implementation of the algo described in [2].
421 * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary
422 * Bradski and Adrian Kaehler, Published by O'Reilly Media, October 3, 2008
423 * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
424 * Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005. */
425 if (METHOD_BOOK == filter->method) {
427 cbBounds[3] = { 10, 5, 5 };
429 minMod[3] = { 20, 20, 20 }, maxMod[3] = {
433 if (filter->framecount < 30) {
434 /* Learning background phase: update_codebook on every frame */
435 for (j = 0; j < filter->width * filter->height; j++) {
436 update_codebook ((unsigned char *) filter->cvYUV->imageData + j * 3,
437 (codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
440 /* this updating is responsible for FG becoming BG again */
441 if (filter->framecount % filter->learning_interval == 0) {
442 for (j = 0; j < filter->width * filter->height; j++) {
443 update_codebook ((uchar *) filter->cvYUV->imageData + j * 3,
444 (codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
447 if (filter->framecount % 60 == 0) {
448 for (j = 0; j < filter->width * filter->height; j++)
449 clear_stale_entries ((codeBook *) & (filter->TcodeBook[j]));
452 for (j = 0; j < filter->width * filter->height; j++) {
454 ((uchar *) filter->cvYUV->imageData + j * 3,
455 (codeBook *) & (filter->TcodeBook[j]), 3, minMod, maxMod)) {
456 filter->cvFG->imageData[j] = (char) 255;
458 filter->cvFG->imageData[j] = 0;
463 /* 3rd param is the smallest area to show: (w+h)/param , in pixels */
464 find_connected_components (filter->cvFG, 1, 10000,
465 filter->mem_storage, filter->contours);
468 /* Create the foreground and background masks using BackgroundSubtractorMOG [1],
469 * Gaussian Mixture-based Background/Foreground segmentation algorithm. OpenCV
470 * MOG implements the algorithm described in [2].
472 * [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
473 * [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
474 * mixture model for real-time tracking with shadow detection", Proc. 2nd
475 * European Workshop on Advanced Video-Based Surveillance Systems, 2001
477 else if (METHOD_MOG == filter->method) {
478 run_mog_iteration (filter);
480 /* Create the foreground and background masks using BackgroundSubtractorMOG2
481 * [1], Gaussian Mixture-based Background/Foreground segmentation algorithm.
482 * OpenCV MOG2 implements the algorithm described in [2] and [3].
484 * [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
485 * [2] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
486 * subtraction", International Conference Pattern Recognition, UK, Aug 2004.
487 * [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
488 * per Image Pixel for the Task of Background Subtraction", Pattern
489 * Recognition Letters, vol. 27, no. 7, pages 773-780, 2006. */
490 else if (METHOD_MOG2 == filter->method) {
491 run_mog2_iteration (filter);
494 /* if we want to test_mode, just overwrite the output */
495 if (filter->test_mode) {
496 cvCvtColor (filter->cvFG, filter->cvRGB, CV_GRAY2RGB);
498 cvSplit (filter->cvRGB, filter->ch1, filter->ch2, filter->ch3, NULL);
500 cvSplit (img, filter->ch1, filter->ch2, filter->ch3, NULL);
502 /* copy anyhow the fg/bg to the alpha channel in the output image */
503 cvMerge (filter->ch1, filter->ch2, filter->ch3, filter->cvFG, img);
509 /* entry point to initialize the plug-in
510 * initialize the plug-in itself
511 * register the element factories and other features
514 gst_segmentation_plugin_init (GstPlugin * plugin)
516 GST_DEBUG_CATEGORY_INIT (gst_segmentation_debug, "segmentation",
517 0, "Performs Foreground/Background segmentation in video sequences");
519 return gst_element_register (plugin, "segmentation", GST_RANK_NONE,
520 GST_TYPE_SEGMENTATION);
525 #ifdef CODE_FROM_OREILLY_BOOK /* See license at the beginning of the page */
527 int update_codebook(uchar *p, codeBook &c, unsigned cbBounds)
528 Updates the codebook entry with a new data point
530 p Pointer to a YUV or HSI pixel
531 c Codebook for this pixel
532 cbBounds Learning bounds for codebook (Rule of thumb: 10)
533 numChannels Number of color channels we¡¯re learning
536 cvBounds must be of length equal to numChannels
542 update_codebook (unsigned char *p, codeBook * c, unsigned *cbBounds,
555 for (n = 0; n < numChannels; n++) {
556 high[n] = p[n] + cbBounds[n];
560 if (p[n] > cbBounds[n])
561 low[n] = p[n] - cbBounds[n];
566 /* SEE IF THIS FITS AN EXISTING CODEWORD */
567 for (i = 0; i < c->numEntries; i++) {
569 for (n = 0; n < numChannels; n++) {
570 if ((c->cb[i]->learnLow[n] <= *(p + n)) &&
571 /* Found an entry for this channel */
572 (*(p + n) <= c->cb[i]->learnHigh[n])) {
576 if (matchChannel == numChannels) { /* If an entry was found */
577 c->cb[i]->t_last_update = c->t;
578 /* adjust this codeword for the first channel */
579 for (n = 0; n < numChannels; n++) {
580 if (c->cb[i]->max[n] < *(p + n)) {
581 c->cb[i]->max[n] = *(p + n);
582 } else if (c->cb[i]->min[n] > *(p + n)) {
583 c->cb[i]->min[n] = *(p + n);
589 /* OVERHEAD TO TRACK POTENTIAL STALE ENTRIES */
590 for (int s = 0; s < c->numEntries; s++) {
591 /* Track which codebook entries are going stale: */
593 negRun = c->t - c->cb[s]->t_last_update;
594 if (c->cb[s]->stale < negRun)
595 c->cb[s]->stale = negRun;
597 /* ENTER A NEW CODEWORD IF NEEDED */
598 if (i == c->numEntries) { /* if no existing codeword found, make one */
601 (code_element **) g_malloc (sizeof (code_element *) *
602 (c->numEntries + 1));
603 for (int ii = 0; ii < c->numEntries; ii++) {
604 foo[ii] = c->cb[ii]; /* copy all pointers */
606 foo[c->numEntries] = (code_element *) g_malloc (sizeof (code_element));
610 for (n = 0; n < numChannels; n++) {
611 c->cb[c->numEntries]->learnHigh[n] = high[n];
612 c->cb[c->numEntries]->learnLow[n] = low[n];
613 c->cb[c->numEntries]->max[n] = *(p + n);
614 c->cb[c->numEntries]->min[n] = *(p + n);
616 c->cb[c->numEntries]->t_last_update = c->t;
617 c->cb[c->numEntries]->stale = 0;
620 /* SLOWLY ADJUST LEARNING BOUNDS */
621 for (n = 0; n < numChannels; n++) {
622 if (c->cb[i]->learnHigh[n] < high[n])
623 c->cb[i]->learnHigh[n] += 1;
624 if (c->cb[i]->learnLow[n] > low[n])
625 c->cb[i]->learnLow[n] -= 1;
635 int clear_stale_entries(codeBook &c)
636 During learning, after you've learned for some period of time,
637 periodically call this to clear out stale codebook entries
639 c Codebook to clean up
642 number of entries cleared
645 clear_stale_entries (codeBook * c)
648 staleThresh = c->t >> 1;
650 keep = (int *) g_malloc (sizeof (int) * (c->numEntries));
659 /* SEE WHICH CODEBOOK ENTRIES ARE TOO STALE */
660 for (int i = 0; i < c->numEntries; i++) {
661 if (c->cb[i]->stale > staleThresh)
662 keep[i] = 0; /* Mark for destruction */
664 keep[i] = 1; /* Mark to keep */
668 /* KEEP ONLY THE GOOD */
669 c->t = 0; /* Full reset on stale tracking */
670 foo = (code_element **) g_malloc (sizeof (code_element *) * keepCnt);
672 for (int ii = 0; ii < c->numEntries; ii++) {
675 /* We have to refresh these entries for next clearStale */
676 foo[k]->t_last_update = 0;
684 numCleared = c->numEntries - keepCnt;
685 c->numEntries = keepCnt;
692 uchar background_diff( uchar *p, codeBook &c,
693 int minMod, int maxMod)
694 Given a pixel and a codebook, determine if the pixel is
695 covered by the codebook
697 p Pixel pointer (YUV interleaved)
699 numChannels Number of channels we are testing
700 maxMod Add this (possibly negative) number onto
702 max level when determining if new pixel is foreground
703 minMod Subract this (possibly negative) number from
704 min level when determining if new pixel is foreground
707 minMod and maxMod must have length numChannels,
708 e.g. 3 channels => minMod[3], maxMod[3]. There is one min and
709 one max threshold per channel.
712 0 => background, 255 => foreground
715 background_diff (unsigned char *p, codeBook * c, int numChannels,
716 int *minMod, int *maxMod)
720 /* SEE IF THIS FITS AN EXISTING CODEWORD */
723 for (i = 0; i < c->numEntries; i++) {
725 for (int n = 0; n < numChannels; n++) {
726 if ((c->cb[i]->min[n] - minMod[n] <= *(p + n)) &&
727 (*(p + n) <= c->cb[i]->max[n] + maxMod[n])) {
728 matchChannel++; /* Found an entry for this channel */
733 if (matchChannel == numChannels) {
734 break; /* Found an entry that matched all channels */
737 if (i >= c->numEntries)
746 void find_connected_components(IplImage *mask, int poly1_hull0,
747 float perimScale, int *num,
748 CvRect *bbs, CvPoint *centers)
749 This cleans up the foreground segmentation mask derived from calls
752 mask Is a grayscale (8-bit depth) “rawâ€
\9d mask image that
756 poly1_hull0 If set, approximate connected component by
757 (DEFAULT) polygon, or else convex hull (0)
758 perimScale Len = image (width+height)/perimScale. If contour
759 len < this, delete that contour (DEFAULT: 4)
760 num Maximum number of rectangles and/or centers to
761 return; on return, will contain number filled
763 bbs Pointer to bounding box rectangle vector of
764 length num. (DEFAULT SETTING: NULL)
765 centers Pointer to contour centers vector of length
769 /* Approx.threshold - the bigger it is, the simpler is the boundary */
770 #define CVCONTOUR_APPROX_LEVEL 1
771 /* How many iterations of erosion and/or dilation there should be */
772 #define CVCLOSE_ITR 1
774 find_connected_components (IplImage * mask, int poly1_hull0, float perimScale,
775 CvMemStorage * mem_storage, CvSeq * contours)
777 CvContourScanner scanner;
782 /* Just some convenience variables */
785 CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
788 CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);
790 /* CLEAN UP RAW MASK */
791 cvMorphologyEx (mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR);
792 cvMorphologyEx (mask, mask, 0, 0, CV_MOP_CLOSE, CVCLOSE_ITR);
793 /* FIND CONTOURS AROUND ONLY BIGGER REGIONS */
794 if (mem_storage == NULL) {
795 mem_storage = cvCreateMemStorage (0);
797 cvClearMemStorage (mem_storage);
800 scanner = cvStartFindContours (mask, mem_storage, sizeof (CvContour),
801 CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint (0, 0));
803 while ((c = cvFindNextContour (scanner)) != NULL) {
805 len = cvContourArea (c, CV_WHOLE_SEQ, 0);
806 /* calculate perimeter len threshold: */
808 q = (mask->height + mask->width) / perimScale;
809 /* Get rid of blob if its perimeter is too small: */
811 cvSubstituteContour (scanner, NULL);
813 /* Smooth its edges if its large enough */
817 /* Polygonal approximation */
819 cvApproxPoly (c, sizeof (CvContour), mem_storage, CV_POLY_APPROX_DP,
820 CVCONTOUR_APPROX_LEVEL, 0);
822 /* Convex Hull of the segmentation */
823 c_new = cvConvexHull2 (c, mem_storage, CV_CLOCKWISE, 1);
825 cvSubstituteContour (scanner, c_new);
829 contours = cvEndFindContours (&scanner);
831 /* PAINT THE FOUND REGIONS BACK INTO THE IMAGE */
833 /* DRAW PROCESSED CONTOURS INTO THE MASK */
834 for (c = contours; c != NULL; c = c->h_next)
835 cvDrawContours (mask, c, CVX_WHITE, CVX_BLACK, -1, CV_FILLED, 8, cvPoint (0,
838 #endif /*ifdef CODE_FROM_OREILLY_BOOK */
842 initialise_mog (GstSegmentation * filter)
844 filter->img_input_as_cvMat = (void *) new
845 Mat (cvarrToMat (filter->cvYUV, false));
846 filter->img_fg_as_cvMat = (void *) new Mat (cvarrToMat (filter->cvFG, false));
848 filter->mog = bgsegm::createBackgroundSubtractorMOG ();
849 filter->mog2 = createBackgroundSubtractorMOG2 ();
855 run_mog_iteration (GstSegmentation * filter)
857 ((cv::Mat *) filter->img_input_as_cvMat)->data =
858 (uchar *) filter->cvYUV->imageData;
859 ((cv::Mat *) filter->img_fg_as_cvMat)->data =
860 (uchar *) filter->cvFG->imageData;
863 BackgroundSubtractorMOG [1], Gaussian Mixture-based Background/Foreground
864 Segmentation Algorithm. OpenCV MOG implements the algorithm described in [2].
866 [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
867 [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
868 mixture model for real-time tracking with shadow detection", Proc. 2nd
869 European Workshop on Advanced Video-Based Surveillance Systems, 2001
872 filter->mog->apply (*((Mat *) filter->img_input_as_cvMat),
873 *((Mat *) filter->img_fg_as_cvMat), filter->learning_rate);
879 run_mog2_iteration (GstSegmentation * filter)
881 ((Mat *) filter->img_input_as_cvMat)->data =
882 (uchar *) filter->cvYUV->imageData;
883 ((Mat *) filter->img_fg_as_cvMat)->data = (uchar *) filter->cvFG->imageData;
886 BackgroundSubtractorMOG2 [1], Gaussian Mixture-based Background/Foreground
887 segmentation algorithm. OpenCV MOG2 implements the algorithm described in
890 [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
891 [2] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
892 subtraction", International Conference Pattern Recognition, UK, August, 2004.
893 [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation per
894 Image Pixel for the Task of Background Subtraction", Pattern Recognition
895 Letters, vol. 27, no. 7, pages 773-780, 2006.
898 filter->mog2->apply (*((Mat *) filter->img_input_as_cvMat),
899 *((Mat *) filter->img_fg_as_cvMat), filter->learning_rate);
905 finalise_mog (GstSegmentation * filter)
907 delete (Mat *) filter->img_input_as_cvMat;
908 delete (Mat *) filter->img_fg_as_cvMat;
910 filter->mog.release ();
911 filter->mog2.release ();