1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
45 #if !defined HAVE_CUDA || defined(CUDA_DISABLER)
47 cv::gpu::OpticalFlowDual_TVL1_GPU::OpticalFlowDual_TVL1_GPU() { throw_nogpu(); }
48 void cv::gpu::OpticalFlowDual_TVL1_GPU::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_nogpu(); }
49 void cv::gpu::OpticalFlowDual_TVL1_GPU::collectGarbage() {}
50 void cv::gpu::OpticalFlowDual_TVL1_GPU::procOneScale(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_nogpu(); }
56 using namespace cv::gpu;
58 cv::gpu::OpticalFlowDual_TVL1_GPU::OpticalFlowDual_TVL1_GPU()
67 useInitialFlow = false;
70 void cv::gpu::OpticalFlowDual_TVL1_GPU::operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy)
72 CV_Assert( I0.type() == CV_8UC1 || I0.type() == CV_32FC1 );
73 CV_Assert( I0.size() == I1.size() );
74 CV_Assert( I0.type() == I1.type() );
75 CV_Assert( !useInitialFlow || (flowx.size() == I0.size() && flowx.type() == CV_32FC1 && flowy.size() == flowx.size() && flowy.type() == flowx.type()) );
76 CV_Assert( nscales > 0 );
78 // allocate memory for the pyramid structure
84 I0.convertTo(I0s[0], CV_32F, I0.depth() == CV_8U ? 1.0 : 255.0);
85 I1.convertTo(I1s[0], CV_32F, I1.depth() == CV_8U ? 1.0 : 255.0);
89 flowx.create(I0.size(), CV_32FC1);
90 flowy.create(I0.size(), CV_32FC1);
96 I1x_buf.create(I0.size(), CV_32FC1);
97 I1y_buf.create(I0.size(), CV_32FC1);
99 I1w_buf.create(I0.size(), CV_32FC1);
100 I1wx_buf.create(I0.size(), CV_32FC1);
101 I1wy_buf.create(I0.size(), CV_32FC1);
103 grad_buf.create(I0.size(), CV_32FC1);
104 rho_c_buf.create(I0.size(), CV_32FC1);
106 p11_buf.create(I0.size(), CV_32FC1);
107 p12_buf.create(I0.size(), CV_32FC1);
108 p21_buf.create(I0.size(), CV_32FC1);
109 p22_buf.create(I0.size(), CV_32FC1);
111 diff_buf.create(I0.size(), CV_32FC1);
114 for (int s = 1; s < nscales; ++s)
116 gpu::pyrDown(I0s[s - 1], I0s[s]);
117 gpu::pyrDown(I1s[s - 1], I1s[s]);
119 if (I0s[s].cols < 16 || I0s[s].rows < 16)
127 gpu::pyrDown(u1s[s - 1], u1s[s]);
128 gpu::pyrDown(u2s[s - 1], u2s[s]);
130 gpu::multiply(u1s[s], Scalar::all(0.5), u1s[s]);
131 gpu::multiply(u2s[s], Scalar::all(0.5), u2s[s]);
135 // pyramidal structure for computing the optical flow
136 for (int s = nscales - 1; s >= 0; --s)
138 // compute the optical flow at the current scale
139 procOneScale(I0s[s], I1s[s], u1s[s], u2s[s]);
141 // if this was the last scale, finish now
145 // otherwise, upsample the optical flow
147 // zoom the optical flow for the next finer scale
148 gpu::resize(u1s[s], u1s[s - 1], I0s[s - 1].size());
149 gpu::resize(u2s[s], u2s[s - 1], I0s[s - 1].size());
151 // scale the optical flow with the appropriate zoom factor
152 gpu::multiply(u1s[s - 1], Scalar::all(2), u1s[s - 1]);
153 gpu::multiply(u2s[s - 1], Scalar::all(2), u2s[s - 1]);
159 void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy);
160 void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho);
161 void estimateU(PtrStepSzf I1wx, PtrStepSzf I1wy,
162 PtrStepSzf grad, PtrStepSzf rho_c,
163 PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22,
164 PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf error,
165 float l_t, float theta);
166 void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, float taut);
169 void cv::gpu::OpticalFlowDual_TVL1_GPU::procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2)
171 using namespace tvl1flow;
173 const double scaledEpsilon = epsilon * epsilon * I0.size().area();
175 CV_DbgAssert( I1.size() == I0.size() );
176 CV_DbgAssert( I1.type() == I0.type() );
177 CV_DbgAssert( u1.empty() || u1.size() == I0.size() );
178 CV_DbgAssert( u2.size() == u1.size() );
182 u1.create(I0.size(), CV_32FC1);
183 u1.setTo(Scalar::all(0));
185 u2.create(I0.size(), CV_32FC1);
186 u2.setTo(Scalar::all(0));
189 GpuMat I1x = I1x_buf(Rect(0, 0, I0.cols, I0.rows));
190 GpuMat I1y = I1y_buf(Rect(0, 0, I0.cols, I0.rows));
191 centeredGradient(I1, I1x, I1y);
193 GpuMat I1w = I1w_buf(Rect(0, 0, I0.cols, I0.rows));
194 GpuMat I1wx = I1wx_buf(Rect(0, 0, I0.cols, I0.rows));
195 GpuMat I1wy = I1wy_buf(Rect(0, 0, I0.cols, I0.rows));
197 GpuMat grad = grad_buf(Rect(0, 0, I0.cols, I0.rows));
198 GpuMat rho_c = rho_c_buf(Rect(0, 0, I0.cols, I0.rows));
200 GpuMat p11 = p11_buf(Rect(0, 0, I0.cols, I0.rows));
201 GpuMat p12 = p12_buf(Rect(0, 0, I0.cols, I0.rows));
202 GpuMat p21 = p21_buf(Rect(0, 0, I0.cols, I0.rows));
203 GpuMat p22 = p22_buf(Rect(0, 0, I0.cols, I0.rows));
204 p11.setTo(Scalar::all(0));
205 p12.setTo(Scalar::all(0));
206 p21.setTo(Scalar::all(0));
207 p22.setTo(Scalar::all(0));
209 GpuMat diff = diff_buf(Rect(0, 0, I0.cols, I0.rows));
211 const float l_t = static_cast<float>(lambda * theta);
212 const float taut = static_cast<float>(tau / theta);
214 for (int warpings = 0; warpings < warps; ++warpings)
216 warpBackward(I0, I1, I1x, I1y, u1, u2, I1w, I1wx, I1wy, grad, rho_c);
218 double error = numeric_limits<double>::max();
219 for (int n = 0; error > scaledEpsilon && n < iterations; ++n)
221 estimateU(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, u1, u2, diff, l_t, static_cast<float>(theta));
223 error = gpu::sum(diff, norm_buf)[0];
225 estimateDualVariables(u1, u2, p11, p12, p21, p22, taut);
230 void cv::gpu::OpticalFlowDual_TVL1_GPU::collectGarbage()
256 #endif // !defined HAVE_CUDA || defined(CUDA_DISABLER)