3 * Copyright (C) 2013 Miguel Casas-Sanchez <miguelecasassanchez@gmail.com>
4 * Except: Parts of code inside the preprocessor define CODE_FROM_OREILLY_BOOK,
5 * which are downloaded from O'Reilly website
6 * [http://examples.oreilly.com/9780596516130/]
7 * and adapted. Its license reads:
9 * Right to use this code in any way you want without warrenty, support or
10 * any guarentee of it working. "
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 * DEALINGS IN THE SOFTWARE.
31 * Alternatively, the contents of this file may be used under the
32 * GNU Lesser General Public License Version 2.1 (the "LGPL"), in
33 * which case the following provisions apply instead of the ones
36 * This library is free software; you can redistribute it and/or
37 * modify it under the terms of the GNU Library General Public
38 * License as published by the Free Software Foundation; either
39 * version 2 of the License, or (at your option) any later version.
41 * This library is distributed in the hope that it will be useful,
42 * but WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * Library General Public License for more details.
46 * You should have received a copy of the GNU Library General Public
47 * License along with this library; if not, write to the
48 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
49 * Boston, MA 02110-1301, USA.
51 #define CODE_FROM_OREILLY_BOOK
54 * SECTION:element-segmentation
56 * This element creates and updates a fg/bg model using one of several approaches.
57 * The one called "codebook" refers to the codebook approach following the opencv
58 * O'Reilly book [1] implementation of the algorithm described in K. Kim,
59 * T. H. Chalidabhongse, D. Harwood and L. Davis [2]. BackgroundSubtractorMOG [3],
60 * or MOG for shorts, refers to a Gaussian Mixture-based Background/Foreground
61 * Segmentation Algorithm. OpenCV MOG implements the algorithm described in [4].
62 * BackgroundSubtractorMOG2 [5], refers to another Gaussian Mixture-based
63 * Background/Foreground segmentation algorithm. OpenCV MOG2 implements the
64 * algorithm described in [6] and [7].
66 * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary Bradski
67 * and Adrian Kaehler, Published by O'Reilly Media, October 3, 2008
68 * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
69 * Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005.
70 * [3] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
71 * [4] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
72 * mixture model for real-time tracking with shadow detection", Proc. 2nd
73 * European Workshop on Advanced Video-Based Surveillance Systems, 2001
74 * [5] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
75 * [6] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
76 * subtraction", International Conference Pattern Recognition, UK, August, 2004.
77 * [7] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
78 * per Image Pixel for the Task of Background Subtraction", Pattern Recognition
79 * Letters, vol. 27, no. 7, pages 773-780, 2006.
82 * <title>Example launch line</title>
84 * gst-launch-1.0 v4l2src device=/dev/video0 ! videoconvert ! segmentation test-mode=true method=2 ! videoconvert ! ximagesink
93 #include "gstsegmentation.h"
94 #include <opencv2/imgproc/imgproc_c.h>
96 GST_DEBUG_CATEGORY_STATIC (gst_segmentation_debug);
97 #define GST_CAT_DEFAULT gst_segmentation_debug
100 using namespace cv::bgsegm;
102 /* Filter signals and args */
121 } GstSegmentationMethod;
123 #define DEFAULT_TEST_MODE FALSE
124 #define DEFAULT_METHOD METHOD_MOG2
125 #define DEFAULT_LEARNING_RATE 0.01
127 #define GST_TYPE_SEGMENTATION_METHOD (gst_segmentation_method_get_type ())
129 gst_segmentation_method_get_type (void)
131 static GType etype = 0;
133 static const GEnumValue values[] = {
134 {METHOD_BOOK, "Codebook-based segmentation (Bradski2008)", "codebook"},
135 {METHOD_MOG, "Mixture-of-Gaussians segmentation (Bowden2001)", "mog"},
136 {METHOD_MOG2, "Mixture-of-Gaussians segmentation (Zivkovic2004)", "mog2"},
139 etype = g_enum_register_static ("GstSegmentationMethod", values);
144 G_DEFINE_TYPE (GstSegmentation, gst_segmentation, GST_TYPE_OPENCV_VIDEO_FILTER);
146 static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
149 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
151 static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
154 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
157 static void gst_segmentation_set_property (GObject * object, guint prop_id,
158 const GValue * value, GParamSpec * pspec);
159 static void gst_segmentation_get_property (GObject * object, guint prop_id,
160 GValue * value, GParamSpec * pspec);
162 static GstFlowReturn gst_segmentation_transform_ip (GstOpencvVideoFilter * filter,
163 GstBuffer * buffer, IplImage * img);
165 static gboolean gst_segmentation_stop (GstBaseTransform * basesrc);
166 static gboolean gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
167 gint in_height, gint in_depth, gint in_channels,
168 gint out_width, gint out_height, gint out_depth, gint out_channels);
169 static void gst_segmentation_release_all_pointers (GstSegmentation * filter);
171 /* Codebook algorithm + connected components functions*/
172 static int update_codebook (unsigned char *p, codeBook * c,
173 unsigned *cbBounds, int numChannels);
174 static int clear_stale_entries (codeBook * c);
175 static unsigned char background_diff (unsigned char *p, codeBook * c,
176 int numChannels, int *minMod, int *maxMod);
177 static void find_connected_components (IplImage * mask, int poly1_hull0,
178 float perimScale, CvMemStorage * mem_storage, CvSeq * contours);
180 /* MOG (Mixture-of-Gaussians functions */
181 static int initialise_mog (GstSegmentation * filter);
182 static int run_mog_iteration (GstSegmentation * filter);
183 static int run_mog2_iteration (GstSegmentation * filter);
184 static int finalise_mog (GstSegmentation * filter);
186 /* initialize the segmentation's class */
188 gst_segmentation_class_init (GstSegmentationClass * klass)
190 GObjectClass *gobject_class;
191 GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
192 GstBaseTransformClass *basesrc_class = GST_BASE_TRANSFORM_CLASS (klass);
193 GstOpencvVideoFilterClass *cvfilter_class =
194 (GstOpencvVideoFilterClass *) klass;
196 gobject_class = (GObjectClass *) klass;
198 gobject_class->set_property = gst_segmentation_set_property;
199 gobject_class->get_property = gst_segmentation_get_property;
201 basesrc_class->stop = gst_segmentation_stop;
203 cvfilter_class->cv_trans_ip_func = gst_segmentation_transform_ip;
204 cvfilter_class->cv_set_caps = gst_segmentation_set_caps;
206 g_object_class_install_property (gobject_class, PROP_METHOD,
207 g_param_spec_enum ("method",
208 "Segmentation method to use",
209 "Segmentation method to use",
210 GST_TYPE_SEGMENTATION_METHOD, DEFAULT_METHOD,
211 (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
213 g_object_class_install_property (gobject_class, PROP_TEST_MODE,
214 g_param_spec_boolean ("test-mode", "test-mode",
215 "If true, the output RGB is overwritten with the calculated foreground (white color)",
216 DEFAULT_TEST_MODE, (GParamFlags)
217 (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
219 g_object_class_install_property (gobject_class, PROP_LEARNING_RATE,
220 g_param_spec_float ("learning-rate", "learning-rate",
221 "Speed with which a motionless foreground pixel would become background (inverse of number of frames)",
222 0, 1, DEFAULT_LEARNING_RATE, (GParamFlags) (G_PARAM_READWRITE)));
224 gst_element_class_set_static_metadata (element_class,
225 "Foreground/background video sequence segmentation",
226 "Filter/Effect/Video",
227 "Create a Foregound/Background mask applying a particular algorithm",
228 "Miguel Casas-Sanchez <miguelecasassanchez@gmail.com>");
230 gst_element_class_add_static_pad_template (element_class, &src_factory);
231 gst_element_class_add_static_pad_template (element_class, &sink_factory);
235 /* initialize the new element
236 * instantiate pads and add them to element
237 * set pad calback functions
238 * initialize instance structure
241 gst_segmentation_init (GstSegmentation * filter)
243 filter->method = DEFAULT_METHOD;
244 filter->test_mode = DEFAULT_TEST_MODE;
245 filter->framecount = 0;
246 filter->learning_rate = DEFAULT_LEARNING_RATE;
247 gst_opencv_video_filter_set_in_place (GST_OPENCV_VIDEO_FILTER (filter), TRUE);
251 gst_segmentation_set_property (GObject * object, guint prop_id,
252 const GValue * value, GParamSpec * pspec)
254 GstSegmentation *filter = GST_SEGMENTATION (object);
258 filter->method = g_value_get_enum (value);
261 filter->test_mode = g_value_get_boolean (value);
263 case PROP_LEARNING_RATE:
264 filter->learning_rate = g_value_get_float (value);
267 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
273 gst_segmentation_get_property (GObject * object, guint prop_id,
274 GValue * value, GParamSpec * pspec)
276 GstSegmentation *filter = GST_SEGMENTATION (object);
280 g_value_set_enum (value, filter->method);
283 g_value_set_boolean (value, filter->test_mode);
285 case PROP_LEARNING_RATE:
286 g_value_set_float (value, filter->learning_rate);
289 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
295 gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
296 gint in_height, gint in_depth, gint in_channels,
297 gint out_width, gint out_height, gint out_depth, gint out_channels)
299 GstSegmentation *segmentation = GST_SEGMENTATION (filter);
302 size = cvSize (in_width, in_height);
303 segmentation->width = in_width;
304 segmentation->height = in_height;
306 if (NULL != segmentation->cvRGB)
307 gst_segmentation_release_all_pointers (segmentation);
309 segmentation->cvRGB = cvCreateImage (size, IPL_DEPTH_8U, 3);
310 segmentation->cvYUV = cvCreateImage (size, IPL_DEPTH_8U, 3);
312 segmentation->cvFG = cvCreateImage (size, IPL_DEPTH_8U, 1);
313 cvZero (segmentation->cvFG);
315 segmentation->ch1 = cvCreateImage (size, IPL_DEPTH_8U, 1);
316 segmentation->ch2 = cvCreateImage (size, IPL_DEPTH_8U, 1);
317 segmentation->ch3 = cvCreateImage (size, IPL_DEPTH_8U, 1);
319 /* Codebook method */
320 segmentation->TcodeBook = (codeBook *)
321 g_malloc (sizeof (codeBook) *
322 (segmentation->width * segmentation->height + 1));
323 for (int j = 0; j < segmentation->width * segmentation->height; j++) {
324 segmentation->TcodeBook[j].numEntries = 0;
325 segmentation->TcodeBook[j].t = 0;
327 segmentation->learning_interval = (int) (1.0 / segmentation->learning_rate);
329 /* Mixture-of-Gaussians (mog) methods */
330 initialise_mog (segmentation);
337 gst_segmentation_stop (GstBaseTransform * basesrc)
339 GstSegmentation *filter = GST_SEGMENTATION (basesrc);
341 if (filter->cvRGB != NULL)
342 gst_segmentation_release_all_pointers (filter);
348 gst_segmentation_release_all_pointers (GstSegmentation * filter)
350 cvReleaseImage (&filter->cvRGB);
351 cvReleaseImage (&filter->cvYUV);
352 cvReleaseImage (&filter->cvFG);
353 cvReleaseImage (&filter->ch1);
354 cvReleaseImage (&filter->ch2);
355 cvReleaseImage (&filter->ch3);
357 cvReleaseMemStorage (&filter->mem_storage);
359 g_free (filter->TcodeBook);
360 finalise_mog (filter);
364 gst_segmentation_transform_ip (GstOpencvVideoFilter * cvfilter, GstBuffer * buffer,
367 GstSegmentation *filter = GST_SEGMENTATION (cvfilter);
370 filter->framecount++;
372 /* Image preprocessing: color space conversion etc */
373 cvCvtColor (img, filter->cvRGB, CV_RGBA2RGB);
374 cvCvtColor (filter->cvRGB, filter->cvYUV, CV_RGB2YCrCb);
376 /* Create and update a fg/bg model using a codebook approach following the
377 * opencv O'Reilly book [1] implementation of the algo described in [2].
379 * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary
380 * Bradski and Adrian Kaehler, Published by O'Reilly Media, October 3, 2008
381 * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
382 * Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005. */
383 if (METHOD_BOOK == filter->method) {
384 unsigned cbBounds[3] = { 10, 5, 5 };
385 int minMod[3] = { 20, 20, 20 }, maxMod[3] = {
388 if (filter->framecount < 30) {
389 /* Learning background phase: update_codebook on every frame */
390 for (j = 0; j < filter->width * filter->height; j++) {
391 update_codebook ((unsigned char *) filter->cvYUV->imageData + j * 3,
392 (codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
395 /* this updating is responsible for FG becoming BG again */
396 if (filter->framecount % filter->learning_interval == 0) {
397 for (j = 0; j < filter->width * filter->height; j++) {
398 update_codebook ((uchar *) filter->cvYUV->imageData + j * 3,
399 (codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
402 if (filter->framecount % 60 == 0) {
403 for (j = 0; j < filter->width * filter->height; j++)
404 clear_stale_entries ((codeBook *) & (filter->TcodeBook[j]));
407 for (j = 0; j < filter->width * filter->height; j++) {
409 ((uchar *) filter->cvYUV->imageData + j * 3,
410 (codeBook *) & (filter->TcodeBook[j]), 3, minMod, maxMod)) {
411 filter->cvFG->imageData[j] = (char) 255;
413 filter->cvFG->imageData[j] = 0;
418 /* 3rd param is the smallest area to show: (w+h)/param , in pixels */
419 find_connected_components (filter->cvFG, 1, 10000,
420 filter->mem_storage, filter->contours);
423 /* Create the foreground and background masks using BackgroundSubtractorMOG [1],
424 * Gaussian Mixture-based Background/Foreground segmentation algorithm. OpenCV
425 * MOG implements the algorithm described in [2].
427 * [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
428 * [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
429 * mixture model for real-time tracking with shadow detection", Proc. 2nd
430 * European Workshop on Advanced Video-Based Surveillance Systems, 2001
432 else if (METHOD_MOG == filter->method) {
433 run_mog_iteration (filter);
435 /* Create the foreground and background masks using BackgroundSubtractorMOG2
436 * [1], Gaussian Mixture-based Background/Foreground segmentation algorithm.
437 * OpenCV MOG2 implements the algorithm described in [2] and [3].
439 * [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
440 * [2] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
441 * subtraction", International Conference Pattern Recognition, UK, Aug 2004.
442 * [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
443 * per Image Pixel for the Task of Background Subtraction", Pattern
444 * Recognition Letters, vol. 27, no. 7, pages 773-780, 2006. */
445 else if (METHOD_MOG2 == filter->method) {
446 run_mog2_iteration (filter);
449 /* if we want to test_mode, just overwrite the output */
450 if (filter->test_mode) {
451 cvCvtColor (filter->cvFG, filter->cvRGB, CV_GRAY2RGB);
453 cvSplit (filter->cvRGB, filter->ch1, filter->ch2, filter->ch3, NULL);
455 cvSplit (img, filter->ch1, filter->ch2, filter->ch3, NULL);
457 /* copy anyhow the fg/bg to the alpha channel in the output image */
458 cvMerge (filter->ch1, filter->ch2, filter->ch3, filter->cvFG, img);
464 /* entry point to initialize the plug-in
465 * initialize the plug-in itself
466 * register the element factories and other features
469 gst_segmentation_plugin_init (GstPlugin * plugin)
471 GST_DEBUG_CATEGORY_INIT (gst_segmentation_debug, "segmentation",
472 0, "Performs Foreground/Background segmentation in video sequences");
474 return gst_element_register (plugin, "segmentation", GST_RANK_NONE,
475 GST_TYPE_SEGMENTATION);
480 #ifdef CODE_FROM_OREILLY_BOOK /* See license at the beginning of the page */
482 int update_codebook(uchar *p, codeBook &c, unsigned cbBounds)
483 Updates the codebook entry with a new data point
485 p Pointer to a YUV or HSI pixel
486 c Codebook for this pixel
487 cbBounds Learning bounds for codebook (Rule of thumb: 10)
488 numChannels Number of color channels we¡¯re learning
491 cvBounds must be of length equal to numChannels
497 update_codebook (unsigned char *p, codeBook * c, unsigned *cbBounds,
501 unsigned int high[3], low[3];
505 for (n = 0; n < numChannels; n++) {
506 high[n] = p[n] + cbBounds[n];
510 if (p[n] > cbBounds[n])
511 low[n] = p[n] - cbBounds[n];
516 /* SEE IF THIS FITS AN EXISTING CODEWORD */
517 for (i = 0; i < c->numEntries; i++) {
519 for (n = 0; n < numChannels; n++) {
520 if ((c->cb[i]->learnLow[n] <= *(p + n)) &&
521 /* Found an entry for this channel */
522 (*(p + n) <= c->cb[i]->learnHigh[n])) {
526 if (matchChannel == numChannels) { /* If an entry was found */
527 c->cb[i]->t_last_update = c->t;
528 /* adjust this codeword for the first channel */
529 for (n = 0; n < numChannels; n++) {
530 if (c->cb[i]->max[n] < *(p + n)) {
531 c->cb[i]->max[n] = *(p + n);
532 } else if (c->cb[i]->min[n] > *(p + n)) {
533 c->cb[i]->min[n] = *(p + n);
539 /* OVERHEAD TO TRACK POTENTIAL STALE ENTRIES */
540 for (int s = 0; s < c->numEntries; s++) {
541 /* Track which codebook entries are going stale: */
542 int negRun = c->t - c->cb[s]->t_last_update;
543 if (c->cb[s]->stale < negRun)
544 c->cb[s]->stale = negRun;
546 /* ENTER A NEW CODEWORD IF NEEDED */
547 if (i == c->numEntries) { /* if no existing codeword found, make one */
549 (code_element **) g_malloc (sizeof (code_element *) *
550 (c->numEntries + 1));
551 for (int ii = 0; ii < c->numEntries; ii++) {
552 foo[ii] = c->cb[ii]; /* copy all pointers */
554 foo[c->numEntries] = (code_element *) g_malloc (sizeof (code_element));
558 for (n = 0; n < numChannels; n++) {
559 c->cb[c->numEntries]->learnHigh[n] = high[n];
560 c->cb[c->numEntries]->learnLow[n] = low[n];
561 c->cb[c->numEntries]->max[n] = *(p + n);
562 c->cb[c->numEntries]->min[n] = *(p + n);
564 c->cb[c->numEntries]->t_last_update = c->t;
565 c->cb[c->numEntries]->stale = 0;
568 /* SLOWLY ADJUST LEARNING BOUNDS */
569 for (n = 0; n < numChannels; n++) {
570 if (c->cb[i]->learnHigh[n] < high[n])
571 c->cb[i]->learnHigh[n] += 1;
572 if (c->cb[i]->learnLow[n] > low[n])
573 c->cb[i]->learnLow[n] -= 1;
583 int clear_stale_entries(codeBook &c)
584 During learning, after you've learned for some period of time,
585 periodically call this to clear out stale codebook entries
587 c Codebook to clean up
590 number of entries cleared
593 clear_stale_entries (codeBook * c)
595 int staleThresh = c->t >> 1;
596 int *keep = (int *) g_malloc (sizeof (int) * (c->numEntries));
601 /* SEE WHICH CODEBOOK ENTRIES ARE TOO STALE */
602 for (int i = 0; i < c->numEntries; i++) {
603 if (c->cb[i]->stale > staleThresh)
604 keep[i] = 0; /* Mark for destruction */
606 keep[i] = 1; /* Mark to keep */
610 /* KEEP ONLY THE GOOD */
611 c->t = 0; /* Full reset on stale tracking */
612 foo = (code_element **) g_malloc (sizeof (code_element *) * keepCnt);
614 for (int ii = 0; ii < c->numEntries; ii++) {
617 /* We have to refresh these entries for next clearStale */
618 foo[k]->t_last_update = 0;
626 numCleared = c->numEntries - keepCnt;
627 c->numEntries = keepCnt;
634 uchar background_diff( uchar *p, codeBook &c,
635 int minMod, int maxMod)
636 Given a pixel and a codebook, determine if the pixel is
637 covered by the codebook
639 p Pixel pointer (YUV interleaved)
641 numChannels Number of channels we are testing
642 maxMod Add this (possibly negative) number onto
644 max level when determining if new pixel is foreground
645 minMod Subract this (possibly negative) number from
646 min level when determining if new pixel is foreground
649 minMod and maxMod must have length numChannels,
650 e.g. 3 channels => minMod[3], maxMod[3]. There is one min and
651 one max threshold per channel.
654 0 => background, 255 => foreground
657 background_diff (unsigned char *p, codeBook * c, int numChannels,
658 int *minMod, int *maxMod)
661 /* SEE IF THIS FITS AN EXISTING CODEWORD */
663 for (i = 0; i < c->numEntries; i++) {
665 for (int n = 0; n < numChannels; n++) {
666 if ((c->cb[i]->min[n] - minMod[n] <= *(p + n)) &&
667 (*(p + n) <= c->cb[i]->max[n] + maxMod[n])) {
668 matchChannel++; /* Found an entry for this channel */
673 if (matchChannel == numChannels) {
674 break; /* Found an entry that matched all channels */
677 if (i >= c->numEntries)
686 void find_connected_components(IplImage *mask, int poly1_hull0,
687 float perimScale, int *num,
688 CvRect *bbs, CvPoint *centers)
689 This cleans up the foreground segmentation mask derived from calls
692 mask Is a grayscale (8-bit depth) “rawâ€
\9d mask image that
696 poly1_hull0 If set, approximate connected component by
697 (DEFAULT) polygon, or else convex hull (0)
698 perimScale Len = image (width+height)/perimScale. If contour
699 len < this, delete that contour (DEFAULT: 4)
700 num Maximum number of rectangles and/or centers to
701 return; on return, will contain number filled
703 bbs Pointer to bounding box rectangle vector of
704 length num. (DEFAULT SETTING: NULL)
705 centers Pointer to contour centers vector of length
709 /* Approx.threshold - the bigger it is, the simpler is the boundary */
710 #define CVCONTOUR_APPROX_LEVEL 1
711 /* How many iterations of erosion and/or dilation there should be */
712 #define CVCLOSE_ITR 1
714 find_connected_components (IplImage * mask, int poly1_hull0, float perimScale,
715 CvMemStorage * mem_storage, CvSeq * contours)
717 CvContourScanner scanner;
720 /* Just some convenience variables */
721 const CvScalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
722 const CvScalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);
724 /* CLEAN UP RAW MASK */
725 cvMorphologyEx (mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR);
726 cvMorphologyEx (mask, mask, 0, 0, CV_MOP_CLOSE, CVCLOSE_ITR);
727 /* FIND CONTOURS AROUND ONLY BIGGER REGIONS */
728 if (mem_storage == NULL) {
729 mem_storage = cvCreateMemStorage (0);
731 cvClearMemStorage (mem_storage);
734 scanner = cvStartFindContours (mask, mem_storage, sizeof (CvContour),
735 CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint (0, 0));
737 while ((c = cvFindNextContour (scanner)) != NULL) {
738 double len = cvContourArea (c, CV_WHOLE_SEQ, 0);
739 /* calculate perimeter len threshold: */
740 double q = (mask->height + mask->width) / perimScale;
741 /* Get rid of blob if its perimeter is too small: */
743 cvSubstituteContour (scanner, NULL);
745 /* Smooth its edges if its large enough */
748 /* Polygonal approximation */
750 cvApproxPoly (c, sizeof (CvContour), mem_storage, CV_POLY_APPROX_DP,
751 CVCONTOUR_APPROX_LEVEL, 0);
753 /* Convex Hull of the segmentation */
754 c_new = cvConvexHull2 (c, mem_storage, CV_CLOCKWISE, 1);
756 cvSubstituteContour (scanner, c_new);
760 contours = cvEndFindContours (&scanner);
762 /* PAINT THE FOUND REGIONS BACK INTO THE IMAGE */
764 /* DRAW PROCESSED CONTOURS INTO THE MASK */
765 for (c = contours; c != NULL; c = c->h_next)
766 cvDrawContours (mask, c, CVX_WHITE, CVX_BLACK, -1, CV_FILLED, 8, cvPoint (0,
769 #endif /*ifdef CODE_FROM_OREILLY_BOOK */
773 initialise_mog (GstSegmentation * filter)
775 filter->img_input_as_cvMat = (void *) new Mat (cvarrToMat (filter->cvYUV, false));
776 filter->img_fg_as_cvMat = (void *) new Mat (cvarrToMat(filter->cvFG, false));
778 filter->mog = bgsegm::createBackgroundSubtractorMOG ();
779 filter->mog2 = createBackgroundSubtractorMOG2 ();
785 run_mog_iteration (GstSegmentation * filter)
787 ((cv::Mat *) filter->img_input_as_cvMat)->data =
788 (uchar *) filter->cvYUV->imageData;
789 ((cv::Mat *) filter->img_fg_as_cvMat)->data =
790 (uchar *) filter->cvFG->imageData;
793 BackgroundSubtractorMOG [1], Gaussian Mixture-based Background/Foreground
794 Segmentation Algorithm. OpenCV MOG implements the algorithm described in [2].
796 [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
797 [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
798 mixture model for real-time tracking with shadow detection", Proc. 2nd
799 European Workshop on Advanced Video-Based Surveillance Systems, 2001
802 filter->mog->apply (*((Mat *) filter->
803 img_input_as_cvMat), *((Mat *) filter->img_fg_as_cvMat),
804 filter->learning_rate);
810 run_mog2_iteration (GstSegmentation * filter)
812 ((Mat *) filter->img_input_as_cvMat)->data =
813 (uchar *) filter->cvYUV->imageData;
814 ((Mat *) filter->img_fg_as_cvMat)->data =
815 (uchar *) filter->cvFG->imageData;
818 BackgroundSubtractorMOG2 [1], Gaussian Mixture-based Background/Foreground
819 segmentation algorithm. OpenCV MOG2 implements the algorithm described in
822 [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
823 [2] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
824 subtraction", International Conference Pattern Recognition, UK, August, 2004.
825 [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation per
826 Image Pixel for the Task of Background Subtraction", Pattern Recognition
827 Letters, vol. 27, no. 7, pages 773-780, 2006.
830 filter->mog2->apply (*((Mat *) filter->
831 img_input_as_cvMat), *((Mat *) filter->img_fg_as_cvMat),
832 filter->learning_rate);
838 finalise_mog (GstSegmentation * filter)
840 delete (Mat *) filter->img_input_as_cvMat;
841 delete (Mat *) filter->img_fg_as_cvMat;
843 filter->mog.release ();
844 filter->mog2.release ();