3 * Copyright (C) 2013 Miguel Casas-Sanchez <miguelecasassanchez@gmail.com>
4 * Except: Parts of code inside the preprocessor define CODE_FROM_OREILLY_BOOK,
5 * which are downloaded from O'Reilly website
6 * [http://examples.oreilly.com/9780596516130/]
7 * and adapted. Its license reads:
9 * Right to use this code in any way you want without warrenty, support or
10 * any guarentee of it working. "
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 * DEALINGS IN THE SOFTWARE.
31 * Alternatively, the contents of this file may be used under the
32 * GNU Lesser General Public License Version 2.1 (the "LGPL"), in
33 * which case the following provisions apply instead of the ones
36 * This library is free software; you can redistribute it and/or
37 * modify it under the terms of the GNU Library General Public
38 * License as published by the Free Software Foundation; either
39 * version 2 of the License, or (at your option) any later version.
41 * This library is distributed in the hope that it will be useful,
42 * but WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * Library General Public License for more details.
46 * You should have received a copy of the GNU Library General Public
47 * License along with this library; if not, write to the
48 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
49 * Boston, MA 02110-1301, USA.
51 #define CODE_FROM_OREILLY_BOOK
54 * SECTION:element-segmentation
56 * This element creates and updates a fg/bg model using one of several approaches.
57 * The one called "codebook" refers to the codebook approach following the opencv
58 * O'Reilly book [1] implementation of the algorithm described in K. Kim,
59 * T. H. Chalidabhongse, D. Harwood and L. Davis [2]. BackgroundSubtractorMOG [3],
60 * or MOG for shorts, refers to a Gaussian Mixture-based Background/Foreground
61 * Segmentation Algorithm. OpenCV MOG implements the algorithm described in [4].
62 * BackgroundSubtractorMOG2 [5], refers to another Gaussian Mixture-based
63 * Background/Foreground segmentation algorithm. OpenCV MOG2 implements the
64 * algorithm described in [6] and [7].
66 * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary Bradski
67 * and Adrian Kaehler, Published by O'Reilly Media, October 3, 2008
68 * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
69 * Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005.
70 * [3] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
71 * [4] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
72 * mixture model for real-time tracking with shadow detection", Proc. 2nd
73 * European Workshop on Advanced Video-Based Surveillance Systems, 2001
74 * [5] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
75 * [6] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
76 * subtraction", International Conference Pattern Recognition, UK, August, 2004.
77 * [7] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
78 * per Image Pixel for the Task of Background Subtraction", Pattern Recognition
79 * Letters, vol. 27, no. 7, pages 773-780, 2006.
82 * <title>Example launch line</title>
84 * gst-launch-1.0 v4l2src device=/dev/video0 ! videoconvert ! segmentation test-mode=true method=2 ! videoconvert ! ximagesink
93 #include "gstsegmentation.h"
94 #include <opencv2/imgproc/imgproc_c.h>
96 GST_DEBUG_CATEGORY_STATIC (gst_segmentation_debug);
97 #define GST_CAT_DEFAULT gst_segmentation_debug
100 #if (CV_MAJOR_VERSION >= 3)
101 using namespace cv::bgsegm;
103 /* Filter signals and args */
122 } GstSegmentationMethod;
124 #define DEFAULT_TEST_MODE FALSE
125 #define DEFAULT_METHOD METHOD_MOG2
126 #define DEFAULT_LEARNING_RATE 0.01
128 #define GST_TYPE_SEGMENTATION_METHOD (gst_segmentation_method_get_type ())
130 gst_segmentation_method_get_type (void)
132 static GType etype = 0;
134 static const GEnumValue values[] = {
135 {METHOD_BOOK, "Codebook-based segmentation (Bradski2008)", "codebook"},
136 {METHOD_MOG, "Mixture-of-Gaussians segmentation (Bowden2001)", "mog"},
137 {METHOD_MOG2, "Mixture-of-Gaussians segmentation (Zivkovic2004)", "mog2"},
140 etype = g_enum_register_static ("GstSegmentationMethod", values);
145 G_DEFINE_TYPE (GstSegmentation, gst_segmentation, GST_TYPE_OPENCV_VIDEO_FILTER);
147 static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
150 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
152 static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
155 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
158 static void gst_segmentation_set_property (GObject * object, guint prop_id,
159 const GValue * value, GParamSpec * pspec);
160 static void gst_segmentation_get_property (GObject * object, guint prop_id,
161 GValue * value, GParamSpec * pspec);
163 static GstFlowReturn gst_segmentation_transform_ip (GstOpencvVideoFilter * filter,
164 GstBuffer * buffer, IplImage * img);
166 static gboolean gst_segmentation_stop (GstBaseTransform * basesrc);
167 static gboolean gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
168 gint in_height, gint in_depth, gint in_channels,
169 gint out_width, gint out_height, gint out_depth, gint out_channels);
170 static void gst_segmentation_release_all_pointers (GstSegmentation * filter);
172 /* Codebook algorithm + connected components functions*/
173 static int update_codebook (unsigned char *p, codeBook * c,
174 unsigned *cbBounds, int numChannels);
175 static int clear_stale_entries (codeBook * c);
176 static unsigned char background_diff (unsigned char *p, codeBook * c,
177 int numChannels, int *minMod, int *maxMod);
178 static void find_connected_components (IplImage * mask, int poly1_hull0,
179 float perimScale, CvMemStorage * mem_storage, CvSeq * contours);
181 /* MOG (Mixture-of-Gaussians functions */
182 static int initialise_mog (GstSegmentation * filter);
183 static int run_mog_iteration (GstSegmentation * filter);
184 static int run_mog2_iteration (GstSegmentation * filter);
185 static int finalise_mog (GstSegmentation * filter);
187 /* initialize the segmentation's class */
189 gst_segmentation_class_init (GstSegmentationClass * klass)
191 GObjectClass *gobject_class;
192 GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
193 GstBaseTransformClass *basesrc_class = GST_BASE_TRANSFORM_CLASS (klass);
194 GstOpencvVideoFilterClass *cvfilter_class =
195 (GstOpencvVideoFilterClass *) klass;
197 gobject_class = (GObjectClass *) klass;
199 gobject_class->set_property = gst_segmentation_set_property;
200 gobject_class->get_property = gst_segmentation_get_property;
202 basesrc_class->stop = gst_segmentation_stop;
204 cvfilter_class->cv_trans_ip_func = gst_segmentation_transform_ip;
205 cvfilter_class->cv_set_caps = gst_segmentation_set_caps;
207 g_object_class_install_property (gobject_class, PROP_METHOD,
208 g_param_spec_enum ("method",
209 "Segmentation method to use",
210 "Segmentation method to use",
211 GST_TYPE_SEGMENTATION_METHOD, DEFAULT_METHOD,
212 (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
214 g_object_class_install_property (gobject_class, PROP_TEST_MODE,
215 g_param_spec_boolean ("test-mode", "test-mode",
216 "If true, the output RGB is overwritten with the calculated foreground (white color)",
217 DEFAULT_TEST_MODE, (GParamFlags)
218 (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
220 g_object_class_install_property (gobject_class, PROP_LEARNING_RATE,
221 g_param_spec_float ("learning-rate", "learning-rate",
222 "Speed with which a motionless foreground pixel would become background (inverse of number of frames)",
223 0, 1, DEFAULT_LEARNING_RATE, (GParamFlags) (G_PARAM_READWRITE)));
225 gst_element_class_set_static_metadata (element_class,
226 "Foreground/background video sequence segmentation",
227 "Filter/Effect/Video",
228 "Create a Foregound/Background mask applying a particular algorithm",
229 "Miguel Casas-Sanchez <miguelecasassanchez@gmail.com>");
231 gst_element_class_add_static_pad_template (element_class, &src_factory);
232 gst_element_class_add_static_pad_template (element_class, &sink_factory);
236 /* initialize the new element
237 * instantiate pads and add them to element
238 * set pad calback functions
239 * initialize instance structure
242 gst_segmentation_init (GstSegmentation * filter)
244 filter->method = DEFAULT_METHOD;
245 filter->test_mode = DEFAULT_TEST_MODE;
246 filter->framecount = 0;
247 filter->learning_rate = DEFAULT_LEARNING_RATE;
248 gst_opencv_video_filter_set_in_place (GST_OPENCV_VIDEO_FILTER (filter), TRUE);
252 gst_segmentation_set_property (GObject * object, guint prop_id,
253 const GValue * value, GParamSpec * pspec)
255 GstSegmentation *filter = GST_SEGMENTATION (object);
259 filter->method = g_value_get_enum (value);
262 filter->test_mode = g_value_get_boolean (value);
264 case PROP_LEARNING_RATE:
265 filter->learning_rate = g_value_get_float (value);
268 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
274 gst_segmentation_get_property (GObject * object, guint prop_id,
275 GValue * value, GParamSpec * pspec)
277 GstSegmentation *filter = GST_SEGMENTATION (object);
281 g_value_set_enum (value, filter->method);
284 g_value_set_boolean (value, filter->test_mode);
286 case PROP_LEARNING_RATE:
287 g_value_set_float (value, filter->learning_rate);
290 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
296 gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
297 gint in_height, gint in_depth, gint in_channels,
298 gint out_width, gint out_height, gint out_depth, gint out_channels)
300 GstSegmentation *segmentation = GST_SEGMENTATION (filter);
303 size = cvSize (in_width, in_height);
304 segmentation->width = in_width;
305 segmentation->height = in_height;
307 if (NULL != segmentation->cvRGB)
308 gst_segmentation_release_all_pointers (segmentation);
310 segmentation->cvRGB = cvCreateImage (size, IPL_DEPTH_8U, 3);
311 segmentation->cvYUV = cvCreateImage (size, IPL_DEPTH_8U, 3);
313 segmentation->cvFG = cvCreateImage (size, IPL_DEPTH_8U, 1);
314 cvZero (segmentation->cvFG);
316 segmentation->ch1 = cvCreateImage (size, IPL_DEPTH_8U, 1);
317 segmentation->ch2 = cvCreateImage (size, IPL_DEPTH_8U, 1);
318 segmentation->ch3 = cvCreateImage (size, IPL_DEPTH_8U, 1);
320 /* Codebook method */
321 segmentation->TcodeBook = (codeBook *)
322 g_malloc (sizeof (codeBook) *
323 (segmentation->width * segmentation->height + 1));
324 for (int j = 0; j < segmentation->width * segmentation->height; j++) {
325 segmentation->TcodeBook[j].numEntries = 0;
326 segmentation->TcodeBook[j].t = 0;
328 segmentation->learning_interval = (int) (1.0 / segmentation->learning_rate);
330 /* Mixture-of-Gaussians (mog) methods */
331 initialise_mog (segmentation);
338 gst_segmentation_stop (GstBaseTransform * basesrc)
340 GstSegmentation *filter = GST_SEGMENTATION (basesrc);
342 if (filter->cvRGB != NULL)
343 gst_segmentation_release_all_pointers (filter);
349 gst_segmentation_release_all_pointers (GstSegmentation * filter)
351 cvReleaseImage (&filter->cvRGB);
352 cvReleaseImage (&filter->cvYUV);
353 cvReleaseImage (&filter->cvFG);
354 cvReleaseImage (&filter->ch1);
355 cvReleaseImage (&filter->ch2);
356 cvReleaseImage (&filter->ch3);
358 cvReleaseMemStorage (&filter->mem_storage);
360 g_free (filter->TcodeBook);
361 finalise_mog (filter);
365 gst_segmentation_transform_ip (GstOpencvVideoFilter * cvfilter, GstBuffer * buffer,
368 GstSegmentation *filter = GST_SEGMENTATION (cvfilter);
371 filter->framecount++;
373 /* Image preprocessing: color space conversion etc */
374 cvCvtColor (img, filter->cvRGB, CV_RGBA2RGB);
375 cvCvtColor (filter->cvRGB, filter->cvYUV, CV_RGB2YCrCb);
377 /* Create and update a fg/bg model using a codebook approach following the
378 * opencv O'Reilly book [1] implementation of the algo described in [2].
380 * [1] Learning OpenCV: Computer Vision with the OpenCV Library by Gary
381 * Bradski and Adrian Kaehler, Published by O'Reilly Media, October 3, 2008
382 * [2] "Real-time Foreground-Background Segmentation using Codebook Model",
383 * Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005. */
384 if (METHOD_BOOK == filter->method) {
385 unsigned cbBounds[3] = { 10, 5, 5 };
386 int minMod[3] = { 20, 20, 20 }, maxMod[3] = {
389 if (filter->framecount < 30) {
390 /* Learning background phase: update_codebook on every frame */
391 for (j = 0; j < filter->width * filter->height; j++) {
392 update_codebook ((unsigned char *) filter->cvYUV->imageData + j * 3,
393 (codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
396 /* this updating is responsible for FG becoming BG again */
397 if (filter->framecount % filter->learning_interval == 0) {
398 for (j = 0; j < filter->width * filter->height; j++) {
399 update_codebook ((uchar *) filter->cvYUV->imageData + j * 3,
400 (codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
403 if (filter->framecount % 60 == 0) {
404 for (j = 0; j < filter->width * filter->height; j++)
405 clear_stale_entries ((codeBook *) & (filter->TcodeBook[j]));
408 for (j = 0; j < filter->width * filter->height; j++) {
410 ((uchar *) filter->cvYUV->imageData + j * 3,
411 (codeBook *) & (filter->TcodeBook[j]), 3, minMod, maxMod)) {
412 filter->cvFG->imageData[j] = (char) 255;
414 filter->cvFG->imageData[j] = 0;
419 /* 3rd param is the smallest area to show: (w+h)/param , in pixels */
420 find_connected_components (filter->cvFG, 1, 10000,
421 filter->mem_storage, filter->contours);
424 /* Create the foreground and background masks using BackgroundSubtractorMOG [1],
425 * Gaussian Mixture-based Background/Foreground segmentation algorithm. OpenCV
426 * MOG implements the algorithm described in [2].
428 * [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
429 * [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
430 * mixture model for real-time tracking with shadow detection", Proc. 2nd
431 * European Workshop on Advanced Video-Based Surveillance Systems, 2001
433 else if (METHOD_MOG == filter->method) {
434 run_mog_iteration (filter);
436 /* Create the foreground and background masks using BackgroundSubtractorMOG2
437 * [1], Gaussian Mixture-based Background/Foreground segmentation algorithm.
438 * OpenCV MOG2 implements the algorithm described in [2] and [3].
440 * [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
441 * [2] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
442 * subtraction", International Conference Pattern Recognition, UK, Aug 2004.
443 * [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation
444 * per Image Pixel for the Task of Background Subtraction", Pattern
445 * Recognition Letters, vol. 27, no. 7, pages 773-780, 2006. */
446 else if (METHOD_MOG2 == filter->method) {
447 run_mog2_iteration (filter);
450 /* if we want to test_mode, just overwrite the output */
451 if (filter->test_mode) {
452 cvCvtColor (filter->cvFG, filter->cvRGB, CV_GRAY2RGB);
454 cvSplit (filter->cvRGB, filter->ch1, filter->ch2, filter->ch3, NULL);
456 cvSplit (img, filter->ch1, filter->ch2, filter->ch3, NULL);
458 /* copy anyhow the fg/bg to the alpha channel in the output image */
459 cvMerge (filter->ch1, filter->ch2, filter->ch3, filter->cvFG, img);
465 /* entry point to initialize the plug-in
466 * initialize the plug-in itself
467 * register the element factories and other features
470 gst_segmentation_plugin_init (GstPlugin * plugin)
472 GST_DEBUG_CATEGORY_INIT (gst_segmentation_debug, "segmentation",
473 0, "Performs Foreground/Background segmentation in video sequences");
475 return gst_element_register (plugin, "segmentation", GST_RANK_NONE,
476 GST_TYPE_SEGMENTATION);
481 #ifdef CODE_FROM_OREILLY_BOOK /* See license at the beginning of the page */
483 int update_codebook(uchar *p, codeBook &c, unsigned cbBounds)
484 Updates the codebook entry with a new data point
486 p Pointer to a YUV or HSI pixel
487 c Codebook for this pixel
488 cbBounds Learning bounds for codebook (Rule of thumb: 10)
489 numChannels Number of color channels we¡¯re learning
492 cvBounds must be of length equal to numChannels
498 update_codebook (unsigned char *p, codeBook * c, unsigned *cbBounds,
502 unsigned int high[3], low[3];
506 for (n = 0; n < numChannels; n++) {
507 high[n] = p[n] + cbBounds[n];
511 if (p[n] > cbBounds[n])
512 low[n] = p[n] - cbBounds[n];
517 /* SEE IF THIS FITS AN EXISTING CODEWORD */
518 for (i = 0; i < c->numEntries; i++) {
520 for (n = 0; n < numChannels; n++) {
521 if ((c->cb[i]->learnLow[n] <= *(p + n)) &&
522 /* Found an entry for this channel */
523 (*(p + n) <= c->cb[i]->learnHigh[n])) {
527 if (matchChannel == numChannels) { /* If an entry was found */
528 c->cb[i]->t_last_update = c->t;
529 /* adjust this codeword for the first channel */
530 for (n = 0; n < numChannels; n++) {
531 if (c->cb[i]->max[n] < *(p + n)) {
532 c->cb[i]->max[n] = *(p + n);
533 } else if (c->cb[i]->min[n] > *(p + n)) {
534 c->cb[i]->min[n] = *(p + n);
540 /* OVERHEAD TO TRACK POTENTIAL STALE ENTRIES */
541 for (int s = 0; s < c->numEntries; s++) {
542 /* Track which codebook entries are going stale: */
543 int negRun = c->t - c->cb[s]->t_last_update;
544 if (c->cb[s]->stale < negRun)
545 c->cb[s]->stale = negRun;
547 /* ENTER A NEW CODEWORD IF NEEDED */
548 if (i == c->numEntries) { /* if no existing codeword found, make one */
550 (code_element **) g_malloc (sizeof (code_element *) *
551 (c->numEntries + 1));
552 for (int ii = 0; ii < c->numEntries; ii++) {
553 foo[ii] = c->cb[ii]; /* copy all pointers */
555 foo[c->numEntries] = (code_element *) g_malloc (sizeof (code_element));
559 for (n = 0; n < numChannels; n++) {
560 c->cb[c->numEntries]->learnHigh[n] = high[n];
561 c->cb[c->numEntries]->learnLow[n] = low[n];
562 c->cb[c->numEntries]->max[n] = *(p + n);
563 c->cb[c->numEntries]->min[n] = *(p + n);
565 c->cb[c->numEntries]->t_last_update = c->t;
566 c->cb[c->numEntries]->stale = 0;
569 /* SLOWLY ADJUST LEARNING BOUNDS */
570 for (n = 0; n < numChannels; n++) {
571 if (c->cb[i]->learnHigh[n] < high[n])
572 c->cb[i]->learnHigh[n] += 1;
573 if (c->cb[i]->learnLow[n] > low[n])
574 c->cb[i]->learnLow[n] -= 1;
584 int clear_stale_entries(codeBook &c)
585 During learning, after you've learned for some period of time,
586 periodically call this to clear out stale codebook entries
588 c Codebook to clean up
591 number of entries cleared
594 clear_stale_entries (codeBook * c)
596 int staleThresh = c->t >> 1;
597 int *keep = (int *) g_malloc (sizeof (int) * (c->numEntries));
602 /* SEE WHICH CODEBOOK ENTRIES ARE TOO STALE */
603 for (int i = 0; i < c->numEntries; i++) {
604 if (c->cb[i]->stale > staleThresh)
605 keep[i] = 0; /* Mark for destruction */
607 keep[i] = 1; /* Mark to keep */
611 /* KEEP ONLY THE GOOD */
612 c->t = 0; /* Full reset on stale tracking */
613 foo = (code_element **) g_malloc (sizeof (code_element *) * keepCnt);
615 for (int ii = 0; ii < c->numEntries; ii++) {
618 /* We have to refresh these entries for next clearStale */
619 foo[k]->t_last_update = 0;
627 numCleared = c->numEntries - keepCnt;
628 c->numEntries = keepCnt;
635 uchar background_diff( uchar *p, codeBook &c,
636 int minMod, int maxMod)
637 Given a pixel and a codebook, determine if the pixel is
638 covered by the codebook
640 p Pixel pointer (YUV interleaved)
642 numChannels Number of channels we are testing
643 maxMod Add this (possibly negative) number onto
645 max level when determining if new pixel is foreground
646 minMod Subract this (possibly negative) number from
647 min level when determining if new pixel is foreground
650 minMod and maxMod must have length numChannels,
651 e.g. 3 channels => minMod[3], maxMod[3]. There is one min and
652 one max threshold per channel.
655 0 => background, 255 => foreground
658 background_diff (unsigned char *p, codeBook * c, int numChannels,
659 int *minMod, int *maxMod)
662 /* SEE IF THIS FITS AN EXISTING CODEWORD */
664 for (i = 0; i < c->numEntries; i++) {
666 for (int n = 0; n < numChannels; n++) {
667 if ((c->cb[i]->min[n] - minMod[n] <= *(p + n)) &&
668 (*(p + n) <= c->cb[i]->max[n] + maxMod[n])) {
669 matchChannel++; /* Found an entry for this channel */
674 if (matchChannel == numChannels) {
675 break; /* Found an entry that matched all channels */
678 if (i >= c->numEntries)
687 void find_connected_components(IplImage *mask, int poly1_hull0,
688 float perimScale, int *num,
689 CvRect *bbs, CvPoint *centers)
690 This cleans up the foreground segmentation mask derived from calls
693 mask Is a grayscale (8-bit depth) “rawâ€
\9d mask image that
697 poly1_hull0 If set, approximate connected component by
698 (DEFAULT) polygon, or else convex hull (0)
699 perimScale Len = image (width+height)/perimScale. If contour
700 len < this, delete that contour (DEFAULT: 4)
701 num Maximum number of rectangles and/or centers to
702 return; on return, will contain number filled
704 bbs Pointer to bounding box rectangle vector of
705 length num. (DEFAULT SETTING: NULL)
706 centers Pointer to contour centers vector of length
710 /* Approx.threshold - the bigger it is, the simpler is the boundary */
711 #define CVCONTOUR_APPROX_LEVEL 1
712 /* How many iterations of erosion and/or dilation there should be */
713 #define CVCLOSE_ITR 1
715 find_connected_components (IplImage * mask, int poly1_hull0, float perimScale,
716 CvMemStorage * mem_storage, CvSeq * contours)
718 CvContourScanner scanner;
721 /* Just some convenience variables */
722 const CvScalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
723 const CvScalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);
725 /* CLEAN UP RAW MASK */
726 cvMorphologyEx (mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR);
727 cvMorphologyEx (mask, mask, 0, 0, CV_MOP_CLOSE, CVCLOSE_ITR);
728 /* FIND CONTOURS AROUND ONLY BIGGER REGIONS */
729 if (mem_storage == NULL) {
730 mem_storage = cvCreateMemStorage (0);
732 cvClearMemStorage (mem_storage);
735 scanner = cvStartFindContours (mask, mem_storage, sizeof (CvContour),
736 CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint (0, 0));
738 while ((c = cvFindNextContour (scanner)) != NULL) {
739 double len = cvContourArea (c, CV_WHOLE_SEQ, 0);
740 /* calculate perimeter len threshold: */
741 double q = (mask->height + mask->width) / perimScale;
742 /* Get rid of blob if its perimeter is too small: */
744 cvSubstituteContour (scanner, NULL);
746 /* Smooth its edges if its large enough */
749 /* Polygonal approximation */
751 cvApproxPoly (c, sizeof (CvContour), mem_storage, CV_POLY_APPROX_DP,
752 CVCONTOUR_APPROX_LEVEL, 0);
754 /* Convex Hull of the segmentation */
755 c_new = cvConvexHull2 (c, mem_storage, CV_CLOCKWISE, 1);
757 cvSubstituteContour (scanner, c_new);
761 contours = cvEndFindContours (&scanner);
763 /* PAINT THE FOUND REGIONS BACK INTO THE IMAGE */
765 /* DRAW PROCESSED CONTOURS INTO THE MASK */
766 for (c = contours; c != NULL; c = c->h_next)
767 cvDrawContours (mask, c, CVX_WHITE, CVX_BLACK, -1, CV_FILLED, 8, cvPoint (0,
770 #endif /*ifdef CODE_FROM_OREILLY_BOOK */
774 initialise_mog (GstSegmentation * filter)
776 filter->img_input_as_cvMat = (void *) new Mat (cvarrToMat (filter->cvYUV, false));
777 filter->img_fg_as_cvMat = (void *) new Mat (cvarrToMat(filter->cvFG, false));
779 #if (CV_MAJOR_VERSION >= 3)
780 filter->mog = bgsegm::createBackgroundSubtractorMOG ();
781 filter->mog2 = createBackgroundSubtractorMOG2 ();
783 filter->mog = (void *) new BackgroundSubtractorMOG ();
784 filter->mog2 = (void *) new BackgroundSubtractorMOG2 ();
791 run_mog_iteration (GstSegmentation * filter)
793 ((cv::Mat *) filter->img_input_as_cvMat)->data =
794 (uchar *) filter->cvYUV->imageData;
795 ((cv::Mat *) filter->img_fg_as_cvMat)->data =
796 (uchar *) filter->cvFG->imageData;
799 BackgroundSubtractorMOG [1], Gaussian Mixture-based Background/Foreground
800 Segmentation Algorithm. OpenCV MOG implements the algorithm described in [2].
802 [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
803 [2] P. KadewTraKuPong and R. Bowden, "An improved adaptive background
804 mixture model for real-time tracking with shadow detection", Proc. 2nd
805 European Workshop on Advanced Video-Based Surveillance Systems, 2001
808 #if (CV_MAJOR_VERSION >= 3)
809 filter->mog->apply (*((Mat *) filter->
810 img_input_as_cvMat), *((Mat *) filter->img_fg_as_cvMat),
811 filter->learning_rate);
813 (*((BackgroundSubtractorMOG *) filter->mog)) (*((Mat *) filter->
814 img_input_as_cvMat), *((Mat *) filter->img_fg_as_cvMat),
815 filter->learning_rate);
822 run_mog2_iteration (GstSegmentation * filter)
824 ((Mat *) filter->img_input_as_cvMat)->data =
825 (uchar *) filter->cvYUV->imageData;
826 ((Mat *) filter->img_fg_as_cvMat)->data =
827 (uchar *) filter->cvFG->imageData;
830 BackgroundSubtractorMOG2 [1], Gaussian Mixture-based Background/Foreground
831 segmentation algorithm. OpenCV MOG2 implements the algorithm described in
834 [1] http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
835 [2] Z.Zivkovic, "Improved adaptive Gausian mixture model for background
836 subtraction", International Conference Pattern Recognition, UK, August, 2004.
837 [3] Z.Zivkovic, F. van der Heijden, "Efficient Adaptive Density Estimation per
838 Image Pixel for the Task of Background Subtraction", Pattern Recognition
839 Letters, vol. 27, no. 7, pages 773-780, 2006.
842 #if (CV_MAJOR_VERSION >= 3)
843 filter->mog2->apply (*((Mat *) filter->
844 img_input_as_cvMat), *((Mat *) filter->img_fg_as_cvMat),
845 filter->learning_rate);
847 (*((BackgroundSubtractorMOG *) filter->mog2)) (*((Mat *) filter->
848 img_input_as_cvMat), *((Mat *) filter->img_fg_as_cvMat),
849 filter->learning_rate);
856 finalise_mog (GstSegmentation * filter)
858 delete (Mat *) filter->img_input_as_cvMat;
859 delete (Mat *) filter->img_fg_as_cvMat;
860 #if (CV_MAJOR_VERSION >= 3)
861 filter->mog.release ();
862 filter->mog2.release ();
864 delete (BackgroundSubtractorMOG *) filter->mog;
865 delete (BackgroundSubtractorMOG2 *) filter->mog2;