bf4b4514adabb40bd67d2513125d06874cc1a037
[platform/upstream/opencv.git] / modules / legacy / src / bgfg_acmmm2003.cpp
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                        Intel License Agreement
11 //
12 // Copyright (C) 2000, Intel Corporation, all rights reserved.
13 // Third party copyrights are property of their respective owners.
14 //
15 // Redistribution and use in source and binary forms, with or without modification,
16 // are permitted provided that the following conditions are met:
17 //
18 //   * Redistribution's of source code must retain the above copyright notice,
19 //     this list of conditions and the following disclaimer.
20 //
21 //   * Redistribution's in binary form must reproduce the above copyright notice,
22 //     this list of conditions and the following disclaimer in the documentation
23 //     and/or other materials provided with the distribution.
24 //
25 //   * The name of Intel Corporation may not be used to endorse or promote products
26 //     derived from this software without specific prior written permission.
27 //
28 // This software is provided by the copyright holders and contributors "as is" and
29 // any express or implied warranties, including, but not limited to, the implied
30 // warranties of merchantability and fitness for a particular purpose are disclaimed.
31 // In no event shall the Intel Corporation or contributors be liable for any direct,
32 // indirect, incidental, special, exemplary, or consequential damages
33 // (including, but not limited to, procurement of substitute goods or services;
34 // loss of use, data, or profits; or business interruption) however caused
35 // and on any theory of liability, whether in contract, strict liability,
36 // or tort (including negligence or otherwise) arising in any way out of
37 // the use of this software, even if advised of the possibility of such damage.
38 //
39 //M*/
40
41
42 // This file implements the foreground/background pixel
43 // discrimination algorithm described in
44 //
45 //     Foreground Object Detection from Videos Containing Complex Background
46 //     Li, Huan, Gu, Tian 2003 9p
47 //     http://muq.org/~cynbe/bib/foreground-object-detection-from-videos-containing-complex-background.pdf
48
49
50 #include "precomp.hpp"
51
52 #include <math.h>
53 #include <stdio.h>
54 #include <stdlib.h>
55 //#include <algorithm>
56
57 static double* _cv_max_element( double* start, double* end )
58 {
59     double* p = start++;
60
61     for( ; start != end;  ++start) {
62
63         if (*p < *start)   p = start;
64     }
65
66     return p;
67 }
68
69 static void CV_CDECL icvReleaseFGDStatModel( CvFGDStatModel** model );
70 static int CV_CDECL icvUpdateFGDStatModel( IplImage* curr_frame,
71                                            CvFGDStatModel*  model,
72                                            double );
73
74 // Function cvCreateFGDStatModel initializes foreground detection process
75 // parameters:
76 //      first_frame - frame from video sequence
77 //      parameters  - (optional) if NULL default parameters of the algorithm will be used
78 //      p_model     - pointer to CvFGDStatModel structure
79 CV_IMPL CvBGStatModel*
80 cvCreateFGDStatModel( IplImage* first_frame, CvFGDStatModelParams* parameters )
81 {
82     CvFGDStatModel* p_model = 0;
83
84     CV_FUNCNAME( "cvCreateFGDStatModel" );
85
86     __BEGIN__;
87
88     int i, j, k, pixel_count, buf_size;
89     CvFGDStatModelParams params;
90
91     if( !CV_IS_IMAGE(first_frame) )
92         CV_ERROR( CV_StsBadArg, "Invalid or NULL first_frame parameter" );
93
94     if (first_frame->nChannels != 3)
95         CV_ERROR( CV_StsBadArg, "first_frame must have 3 color channels" );
96
97     // Initialize parameters:
98     if( parameters == NULL )
99     {
100         params.Lc      = CV_BGFG_FGD_LC;
101         params.N1c     = CV_BGFG_FGD_N1C;
102         params.N2c     = CV_BGFG_FGD_N2C;
103
104         params.Lcc     = CV_BGFG_FGD_LCC;
105         params.N1cc    = CV_BGFG_FGD_N1CC;
106         params.N2cc    = CV_BGFG_FGD_N2CC;
107
108         params.delta   = CV_BGFG_FGD_DELTA;
109
110         params.alpha1  = CV_BGFG_FGD_ALPHA_1;
111         params.alpha2  = CV_BGFG_FGD_ALPHA_2;
112         params.alpha3  = CV_BGFG_FGD_ALPHA_3;
113
114         params.T       = CV_BGFG_FGD_T;
115         params.minArea = CV_BGFG_FGD_MINAREA;
116
117         params.is_obj_without_holes = 1;
118         params.perform_morphing     = 1;
119     }
120     else
121     {
122         params = *parameters;
123     }
124
125     CV_CALL( p_model = (CvFGDStatModel*)cvAlloc( sizeof(*p_model) ));
126     memset( p_model, 0, sizeof(*p_model) );
127     p_model->type = CV_BG_MODEL_FGD;
128     p_model->release = (CvReleaseBGStatModel)icvReleaseFGDStatModel;
129     p_model->update = (CvUpdateBGStatModel)icvUpdateFGDStatModel;;
130     p_model->params = params;
131
132     // Initialize storage pools:
133     pixel_count = first_frame->width * first_frame->height;
134
135     buf_size = pixel_count*sizeof(p_model->pixel_stat[0]);
136     CV_CALL( p_model->pixel_stat = (CvBGPixelStat*)cvAlloc(buf_size) );
137     memset( p_model->pixel_stat, 0, buf_size );
138
139     buf_size = pixel_count*params.N2c*sizeof(p_model->pixel_stat[0].ctable[0]);
140     CV_CALL( p_model->pixel_stat[0].ctable = (CvBGPixelCStatTable*)cvAlloc(buf_size) );
141     memset( p_model->pixel_stat[0].ctable, 0, buf_size );
142
143     buf_size = pixel_count*params.N2cc*sizeof(p_model->pixel_stat[0].cctable[0]);
144     CV_CALL( p_model->pixel_stat[0].cctable = (CvBGPixelCCStatTable*)cvAlloc(buf_size) );
145     memset( p_model->pixel_stat[0].cctable, 0, buf_size );
146
147     for(     i = 0, k = 0; i < first_frame->height; i++ ) {
148         for( j = 0;        j < first_frame->width;  j++, k++ )
149         {
150             p_model->pixel_stat[k].ctable = p_model->pixel_stat[0].ctable + k*params.N2c;
151             p_model->pixel_stat[k].cctable = p_model->pixel_stat[0].cctable + k*params.N2cc;
152         }
153     }
154
155     // Init temporary images:
156     CV_CALL( p_model->Ftd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
157     CV_CALL( p_model->Fbd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
158     CV_CALL( p_model->foreground = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
159
160     CV_CALL( p_model->background = cvCloneImage(first_frame));
161     CV_CALL( p_model->prev_frame = cvCloneImage(first_frame));
162     CV_CALL( p_model->storage = cvCreateMemStorage());
163
164     __END__;
165
166     if( cvGetErrStatus() < 0 )
167     {
168         CvBGStatModel* base_ptr = (CvBGStatModel*)p_model;
169
170         if( p_model && p_model->release )
171             p_model->release( &base_ptr );
172         else
173             cvFree( &p_model );
174         p_model = 0;
175     }
176
177     return (CvBGStatModel*)p_model;
178 }
179
180
181 static void CV_CDECL
182 icvReleaseFGDStatModel( CvFGDStatModel** _model )
183 {
184     CV_FUNCNAME( "icvReleaseFGDStatModel" );
185
186     __BEGIN__;
187
188     if( !_model )
189         CV_ERROR( CV_StsNullPtr, "" );
190
191     if( *_model )
192     {
193         CvFGDStatModel* model = *_model;
194         if( model->pixel_stat )
195         {
196             cvFree( &model->pixel_stat[0].ctable );
197             cvFree( &model->pixel_stat[0].cctable );
198             cvFree( &model->pixel_stat );
199         }
200
201         cvReleaseImage( &model->Ftd );
202         cvReleaseImage( &model->Fbd );
203         cvReleaseImage( &model->foreground );
204         cvReleaseImage( &model->background );
205         cvReleaseImage( &model->prev_frame );
206         cvReleaseMemStorage(&model->storage);
207
208         cvFree( _model );
209     }
210
211     __END__;
212 }
213
214 //  Function cvChangeDetection performs change detection for Foreground detection algorithm
215 // parameters:
216 //      prev_frame -
217 //      curr_frame -
218 //      change_mask -
219 CV_IMPL int
220 cvChangeDetection( IplImage*  prev_frame,
221                    IplImage*  curr_frame,
222                    IplImage*  change_mask )
223 {
224     int i, j, b, x, y, thres;
225     const int PIXELRANGE=256;
226
227     if( !prev_frame
228     ||  !curr_frame
229     ||  !change_mask
230     ||   prev_frame->nChannels  != 3
231     ||   curr_frame->nChannels  != 3
232     ||   change_mask->nChannels != 1
233     ||   prev_frame->depth  != IPL_DEPTH_8U
234     ||   curr_frame->depth  != IPL_DEPTH_8U
235     ||   change_mask->depth != IPL_DEPTH_8U
236     ||   prev_frame->width  != curr_frame->width
237     ||   prev_frame->height != curr_frame->height
238     ||   prev_frame->width  != change_mask->width
239     ||   prev_frame->height != change_mask->height
240     ){
241         return 0;
242     }
243
244     cvZero ( change_mask );
245
246     // All operations per colour
247     for (b=0 ; b<prev_frame->nChannels ; b++) {
248
249         // Create histogram:
250
251         long HISTOGRAM[PIXELRANGE];
252         for (i=0 ; i<PIXELRANGE; i++) HISTOGRAM[i]=0;
253
254         for (y=0 ; y<curr_frame->height ; y++)
255         {
256             uchar* rowStart1 = (uchar*)curr_frame->imageData + y * curr_frame->widthStep + b;
257             uchar* rowStart2 = (uchar*)prev_frame->imageData + y * prev_frame->widthStep + b;
258             for (x=0 ; x<curr_frame->width ; x++, rowStart1+=curr_frame->nChannels, rowStart2+=prev_frame->nChannels) {
259                 int diff = abs( int(*rowStart1) - int(*rowStart2) );
260                 HISTOGRAM[diff]++;
261             }
262         }
263
264         double relativeVariance[PIXELRANGE];
265         for (i=0 ; i<PIXELRANGE; i++) relativeVariance[i]=0;
266
267         for (thres=PIXELRANGE-2; thres>=0 ; thres--)
268         {
269             //            fprintf(stderr, "Iter %d\n", thres);
270             double sum=0;
271             double sqsum=0;
272             int count=0;
273             //            fprintf(stderr, "Iter %d entering loop\n", thres);
274             for (j=thres ; j<PIXELRANGE ; j++) {
275                 sum   += double(j)*double(HISTOGRAM[j]);
276                 sqsum += double(j*j)*double(HISTOGRAM[j]);
277                 count += HISTOGRAM[j];
278             }
279             count = count == 0 ? 1 : count;
280             //            fprintf(stderr, "Iter %d finishing loop\n", thres);
281             double my = sum / count;
282             double sigma = sqrt( sqsum/count - my*my);
283             //            fprintf(stderr, "Iter %d sum=%g sqsum=%g count=%d sigma = %g\n", thres, sum, sqsum, count, sigma);
284             //            fprintf(stderr, "Writing to %x\n", &(relativeVariance[thres]));
285             relativeVariance[thres] = sigma;
286             //            fprintf(stderr, "Iter %d finished\n", thres);
287         }
288
289         // Find maximum:
290         uchar bestThres = 0;
291
292         double* pBestThres = _cv_max_element(relativeVariance, relativeVariance+PIXELRANGE);
293         bestThres = (uchar)(*pBestThres); if (bestThres <10) bestThres=10;
294
295         for (y=0 ; y<prev_frame->height ; y++)
296         {
297             uchar* rowStart1 = (uchar*)(curr_frame->imageData) + y * curr_frame->widthStep + b;
298             uchar* rowStart2 = (uchar*)(prev_frame->imageData) + y * prev_frame->widthStep + b;
299             uchar* rowStart3 = (uchar*)(change_mask->imageData) + y * change_mask->widthStep;
300             for (x = 0; x < curr_frame->width; x++, rowStart1+=curr_frame->nChannels,
301                 rowStart2+=prev_frame->nChannels, rowStart3+=change_mask->nChannels) {
302                 // OR between different color channels
303                 int diff = abs( int(*rowStart1) - int(*rowStart2) );
304                 if ( diff > bestThres)
305                     *rowStart3 |=255;
306             }
307         }
308     }
309
310     return 1;
311 }
312
313
314 #define MIN_PV 1E-10
315
316
317 #define V_C(k,l) ctable[k].v[l]
318 #define PV_C(k) ctable[k].Pv
319 #define PVB_C(k) ctable[k].Pvb
320 #define V_CC(k,l) cctable[k].v[l]
321 #define PV_CC(k) cctable[k].Pv
322 #define PVB_CC(k) cctable[k].Pvb
323
324
325 // Function cvUpdateFGDStatModel updates statistical model and returns number of foreground regions
326 // parameters:
327 //      curr_frame  - current frame from video sequence
328 //      p_model     - pointer to CvFGDStatModel structure
329 static int CV_CDECL
330 icvUpdateFGDStatModel( IplImage* curr_frame, CvFGDStatModel*  model, double )
331 {
332     int            mask_step = model->Ftd->widthStep;
333     CvSeq         *first_seq = NULL, *prev_seq = NULL, *seq = NULL;
334     IplImage*      prev_frame = model->prev_frame;
335     int            region_count = 0;
336     int            FG_pixels_count = 0;
337     int            deltaC  = cvRound(model->params.delta * 256 / model->params.Lc);
338     int            deltaCC = cvRound(model->params.delta * 256 / model->params.Lcc);
339     int            i, j, k, l;
340
341     //clear storages
342     cvClearMemStorage(model->storage);
343     cvZero(model->foreground);
344
345     // From foreground pixel candidates using image differencing
346     // with adaptive thresholding.  The algorithm is from:
347     //
348     //    Thresholding for Change Detection
349     //    Paul L. Rosin 1998 6p
350     //    http://www.cis.temple.edu/~latecki/Courses/CIS750-03/Papers/thresh-iccv.pdf
351     //
352     cvChangeDetection( prev_frame, curr_frame, model->Ftd );
353     cvChangeDetection( model->background, curr_frame, model->Fbd );
354
355     for( i = 0; i < model->Ftd->height; i++ )
356     {
357         for( j = 0; j < model->Ftd->width; j++ )
358         {
359             if( ((uchar*)model->Fbd->imageData)[i*mask_step+j] || ((uchar*)model->Ftd->imageData)[i*mask_step+j] )
360             {
361             float Pb  = 0;
362                 float Pv  = 0;
363                 float Pvb = 0;
364
365                 CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;
366
367                 CvBGPixelCStatTable*   ctable = stat->ctable;
368                 CvBGPixelCCStatTable* cctable = stat->cctable;
369
370                 uchar* curr_data = (uchar*)(curr_frame->imageData) + i*curr_frame->widthStep + j*3;
371                 uchar* prev_data = (uchar*)(prev_frame->imageData) + i*prev_frame->widthStep + j*3;
372
373                 int val = 0;
374
375                 // Is it a motion pixel?
376                 if( ((uchar*)model->Ftd->imageData)[i*mask_step+j] )
377                 {
378             if( !stat->is_trained_dyn_model ) {
379
380                         val = 1;
381
382             } else {
383
384                         // Compare with stored CCt vectors:
385                         for( k = 0;  PV_CC(k) > model->params.alpha2 && k < model->params.N1cc;  k++ )
386                         {
387                             if ( abs( V_CC(k,0) - prev_data[0]) <=  deltaCC &&
388                                  abs( V_CC(k,1) - prev_data[1]) <=  deltaCC &&
389                                  abs( V_CC(k,2) - prev_data[2]) <=  deltaCC &&
390                                  abs( V_CC(k,3) - curr_data[0]) <=  deltaCC &&
391                                  abs( V_CC(k,4) - curr_data[1]) <=  deltaCC &&
392                                  abs( V_CC(k,5) - curr_data[2]) <=  deltaCC)
393                             {
394                                 Pv += PV_CC(k);
395                                 Pvb += PVB_CC(k);
396                             }
397                         }
398                         Pb = stat->Pbcc;
399                         if( 2 * Pvb * Pb <= Pv ) val = 1;
400                     }
401                 }
402                 else if( stat->is_trained_st_model )
403                 {
404                     // Compare with stored Ct vectors:
405                     for( k = 0;  PV_C(k) > model->params.alpha2 && k < model->params.N1c;  k++ )
406                     {
407                         if ( abs( V_C(k,0) - curr_data[0]) <=  deltaC &&
408                              abs( V_C(k,1) - curr_data[1]) <=  deltaC &&
409                              abs( V_C(k,2) - curr_data[2]) <=  deltaC )
410                         {
411                             Pv += PV_C(k);
412                             Pvb += PVB_C(k);
413                         }
414                     }
415                     Pb = stat->Pbc;
416                     if( 2 * Pvb * Pb <= Pv ) val = 1;
417                 }
418
419                 // Update foreground:
420                 ((uchar*)model->foreground->imageData)[i*mask_step+j] = (uchar)(val*255);
421                 FG_pixels_count += val;
422
423             }           // end if( change detection...
424         }               // for j...
425     }                   // for i...
426     //end BG/FG classification
427
428     // Foreground segmentation.
429     // Smooth foreground map:
430     if( model->params.perform_morphing ){
431         cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_OPEN,  model->params.perform_morphing );
432         cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_CLOSE, model->params.perform_morphing );
433     }
434
435
436     if( model->params.minArea > 0 || model->params.is_obj_without_holes ){
437
438         // Discard under-size foreground regions:
439     //
440         cvFindContours( model->foreground, model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
441         for( seq = first_seq; seq; seq = seq->h_next )
442         {
443             CvContour* cnt = (CvContour*)seq;
444             if( cnt->rect.width * cnt->rect.height < model->params.minArea ||
445                 (model->params.is_obj_without_holes && CV_IS_SEQ_HOLE(seq)) )
446             {
447                 // Delete under-size contour:
448                 prev_seq = seq->h_prev;
449                 if( prev_seq )
450                 {
451                     prev_seq->h_next = seq->h_next;
452                     if( seq->h_next ) seq->h_next->h_prev = prev_seq;
453                 }
454                 else
455                 {
456                     first_seq = seq->h_next;
457                     if( seq->h_next ) seq->h_next->h_prev = NULL;
458                 }
459             }
460             else
461             {
462                 region_count++;
463             }
464         }
465         model->foreground_regions = first_seq;
466         cvZero(model->foreground);
467         cvDrawContours(model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1);
468
469     } else {
470
471         model->foreground_regions = NULL;
472     }
473
474     // Check ALL BG update condition:
475     if( ((float)FG_pixels_count/(model->Ftd->width*model->Ftd->height)) > CV_BGFG_FGD_BG_UPDATE_TRESH )
476     {
477          for( i = 0; i < model->Ftd->height; i++ )
478              for( j = 0; j < model->Ftd->width; j++ )
479              {
480                  CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;
481                  stat->is_trained_st_model = stat->is_trained_dyn_model = 1;
482              }
483     }
484
485
486     // Update background model:
487     for( i = 0; i < model->Ftd->height; i++ )
488     {
489         for( j = 0; j < model->Ftd->width; j++ )
490         {
491             CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;
492             CvBGPixelCStatTable* ctable = stat->ctable;
493             CvBGPixelCCStatTable* cctable = stat->cctable;
494
495             uchar *curr_data = (uchar*)(curr_frame->imageData)+i*curr_frame->widthStep+j*3;
496             uchar *prev_data = (uchar*)(prev_frame->imageData)+i*prev_frame->widthStep+j*3;
497
498             if( ((uchar*)model->Ftd->imageData)[i*mask_step+j] || !stat->is_trained_dyn_model )
499             {
500                 float alpha = stat->is_trained_dyn_model ? model->params.alpha2 : model->params.alpha3;
501                 float diff = 0;
502                 int dist, min_dist = 2147483647, indx = -1;
503
504                 //update Pb
505                 stat->Pbcc *= (1.f-alpha);
506                 if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
507                 {
508                     stat->Pbcc += alpha;
509                 }
510
511                 // Find best Vi match:
512                 for(k = 0; PV_CC(k) && k < model->params.N2cc; k++ )
513                 {
514                     // Exponential decay of memory
515                     PV_CC(k)  *= (1-alpha);
516                     PVB_CC(k) *= (1-alpha);
517                     if( PV_CC(k) < MIN_PV )
518                     {
519                         PV_CC(k) = 0;
520                         PVB_CC(k) = 0;
521                         continue;
522                     }
523
524                     dist = 0;
525                     for( l = 0; l < 3; l++ )
526                     {
527                         int val = abs( V_CC(k,l) - prev_data[l] );
528                         if( val > deltaCC ) break;
529                         dist += val;
530                         val = abs( V_CC(k,l+3) - curr_data[l] );
531                         if( val > deltaCC) break;
532                         dist += val;
533                     }
534                     if( l == 3 && dist < min_dist )
535                     {
536                         min_dist = dist;
537                         indx = k;
538                     }
539                 }
540
541
542                 if( indx < 0 )
543                 {   // Replace N2th elem in the table by new feature:
544                     indx = model->params.N2cc - 1;
545                     PV_CC(indx) = alpha;
546                     PVB_CC(indx) = alpha;
547                     //udate Vt
548                     for( l = 0; l < 3; l++ )
549                     {
550                         V_CC(indx,l) = prev_data[l];
551                         V_CC(indx,l+3) = curr_data[l];
552                     }
553                 }
554                 else
555                 {   // Update:
556                     PV_CC(indx) += alpha;
557                     if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
558                     {
559                         PVB_CC(indx) += alpha;
560                     }
561                 }
562
563                 //re-sort CCt table by Pv
564                 for( k = 0; k < indx; k++ )
565                 {
566                     if( PV_CC(k) <= PV_CC(indx) )
567                     {
568                         //shift elements
569                         CvBGPixelCCStatTable tmp1, tmp2 = cctable[indx];
570                         for( l = k; l <= indx; l++ )
571                         {
572                             tmp1 = cctable[l];
573                             cctable[l] = tmp2;
574                             tmp2 = tmp1;
575                         }
576                         break;
577                     }
578                 }
579
580
581                 float sum1=0, sum2=0;
582                 //check "once-off" changes
583                 for(k = 0; PV_CC(k) && k < model->params.N1cc; k++ )
584                 {
585                     sum1 += PV_CC(k);
586                     sum2 += PVB_CC(k);
587                 }
588                 if( sum1 > model->params.T ) stat->is_trained_dyn_model = 1;
589
590                 diff = sum1 - stat->Pbcc * sum2;
591                 // Update stat table:
592                 if( diff >  model->params.T )
593                 {
594                     //printf("once off change at motion mode\n");
595                     //new BG features are discovered
596                     for( k = 0; PV_CC(k) && k < model->params.N1cc; k++ )
597                     {
598                         PVB_CC(k) =
599                             (PV_CC(k)-stat->Pbcc*PVB_CC(k))/(1-stat->Pbcc);
600                     }
601                     assert(stat->Pbcc<=1 && stat->Pbcc>=0);
602                 }
603             }
604
605             // Handle "stationary" pixel:
606             if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] )
607             {
608                 float alpha = stat->is_trained_st_model ? model->params.alpha2 : model->params.alpha3;
609                 float diff = 0;
610                 int dist, min_dist = 2147483647, indx = -1;
611
612                 //update Pb
613                 stat->Pbc *= (1.f-alpha);
614                 if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
615                 {
616                     stat->Pbc += alpha;
617                 }
618
619                 //find best Vi match
620                 for( k = 0; k < model->params.N2c; k++ )
621                 {
622                     // Exponential decay of memory
623                     PV_C(k) *= (1-alpha);
624                     PVB_C(k) *= (1-alpha);
625                     if( PV_C(k) < MIN_PV )
626                     {
627                         PV_C(k) = 0;
628                         PVB_C(k) = 0;
629                         continue;
630                     }
631
632                     dist = 0;
633                     for( l = 0; l < 3; l++ )
634                     {
635                         int val = abs( V_C(k,l) - curr_data[l] );
636                         if( val > deltaC ) break;
637                         dist += val;
638                     }
639                     if( l == 3 && dist < min_dist )
640                     {
641                         min_dist = dist;
642                         indx = k;
643                     }
644                 }
645
646                 if( indx < 0 )
647                 {//N2th elem in the table is replaced by a new features
648                     indx = model->params.N2c - 1;
649                     PV_C(indx) = alpha;
650                     PVB_C(indx) = alpha;
651                     //udate Vt
652                     for( l = 0; l < 3; l++ )
653                     {
654                         V_C(indx,l) = curr_data[l];
655                     }
656                 } else
657                 {//update
658                     PV_C(indx) += alpha;
659                     if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
660                     {
661                         PVB_C(indx) += alpha;
662                     }
663                 }
664
665                 //re-sort Ct table by Pv
666                 for( k = 0; k < indx; k++ )
667                 {
668                     if( PV_C(k) <= PV_C(indx) )
669                     {
670                         //shift elements
671                         CvBGPixelCStatTable tmp1, tmp2 = ctable[indx];
672                         for( l = k; l <= indx; l++ )
673                         {
674                             tmp1 = ctable[l];
675                             ctable[l] = tmp2;
676                             tmp2 = tmp1;
677                         }
678                         break;
679                     }
680                 }
681
682                 // Check "once-off" changes:
683                 float sum1=0, sum2=0;
684                 for( k = 0; PV_C(k) && k < model->params.N1c; k++ )
685                 {
686                     sum1 += PV_C(k);
687                     sum2 += PVB_C(k);
688                 }
689                 diff = sum1 - stat->Pbc * sum2;
690                 if( sum1 > model->params.T ) stat->is_trained_st_model = 1;
691
692                 // Update stat table:
693                 if( diff >  model->params.T )
694                 {
695                     //printf("once off change at stat mode\n");
696                     //new BG features are discovered
697                     for( k = 0; PV_C(k) && k < model->params.N1c; k++ )
698                     {
699                         PVB_C(k) = (PV_C(k)-stat->Pbc*PVB_C(k))/(1-stat->Pbc);
700                     }
701                     stat->Pbc = 1 - stat->Pbc;
702                 }
703             }           // if !(change detection) at pixel (i,j)
704
705             // Update the reference BG image:
706             if( !((uchar*)model->foreground->imageData)[i*mask_step+j])
707             {
708                 uchar* ptr = ((uchar*)model->background->imageData) + i*model->background->widthStep+j*3;
709
710                 if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] &&
711                     !((uchar*)model->Fbd->imageData)[i*mask_step+j] )
712                 {
713                     // Apply IIR filter:
714                     for( l = 0; l < 3; l++ )
715                     {
716                         int a = cvRound(ptr[l]*(1 - model->params.alpha1) + model->params.alpha1*curr_data[l]);
717                         ptr[l] = (uchar)a;
718                         //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l]*=(1 - model->params.alpha1);
719                         //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l] += model->params.alpha1*curr_data[l];
720                     }
721                 }
722                 else
723                 {
724                     // Background change detected:
725                     for( l = 0; l < 3; l++ )
726                     {
727                         //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l] = curr_data[l];
728                         ptr[l] = curr_data[l];
729                     }
730                 }
731             }
732         }               // j
733     }                   // i
734
735     // Keep previous frame:
736     cvCopy( curr_frame, model->prev_frame );
737
738     return region_count;
739 }
740
741 /* End of file. */