From ce2fd7fec90769b3907aa5e73dbe71e5be130e54 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 13 Feb 2013 15:50:05 +0400 Subject: [PATCH] added dual tvl1 optical flow gpu implementation --- modules/gpu/include/opencv2/gpu/gpu.hpp | 89 +++++++++ modules/gpu/perf/perf_video.cpp | 50 +++++ modules/gpu/src/cuda/tvl1flow.cu | 332 ++++++++++++++++++++++++++++++++ modules/gpu/src/tvl1flow.cpp | 256 ++++++++++++++++++++++++ modules/gpu/test/test_optflow.cpp | 44 +++++ 5 files changed, 771 insertions(+) create mode 100644 modules/gpu/src/cuda/tvl1flow.cu create mode 100644 modules/gpu/src/tvl1flow.cpp diff --git a/modules/gpu/include/opencv2/gpu/gpu.hpp b/modules/gpu/include/opencv2/gpu/gpu.hpp index 60cff99..a574d71 100644 --- a/modules/gpu/include/opencv2/gpu/gpu.hpp +++ b/modules/gpu/include/opencv2/gpu/gpu.hpp @@ -1982,6 +1982,95 @@ private: }; +// Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method +// +// see reference: +// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow". +// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation". +class CV_EXPORTS OpticalFlowDual_TVL1_GPU +{ +public: + OpticalFlowDual_TVL1_GPU(); + + void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy); + + void collectGarbage(); + + /** + * Time step of the numerical scheme. + */ + double tau; + + /** + * Weight parameter for the data term, attachment parameter. + * This is the most relevant parameter, which determines the smoothness of the output. + * The smaller this parameter is, the smoother the solutions we obtain. + * It depends on the range of motions of the images, so its value should be adapted to each image sequence. + */ + double lambda; + + /** + * Weight parameter for (u - v)^2, tightness parameter. + * It serves as a link between the attachment and the regularization terms. + * In theory, it should have a small value in order to maintain both parts in correspondence. + * The method is stable for a large range of values of this parameter. + */ + double theta; + + /** + * Number of scales used to create the pyramid of images. + */ + int nscales; + + /** + * Number of warpings per scale. + * Represents the number of times that I1(x+u0) and grad( I1(x+u0) ) are computed per scale. + * This is a parameter that assures the stability of the method. + * It also affects the running time, so it is a compromise between speed and accuracy. + */ + int warps; + + /** + * Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time. + * A small value will yield more accurate solutions at the expense of a slower convergence. + */ + double epsilon; + + /** + * Stopping criterion iterations number used in the numerical scheme. + */ + int iterations; + + bool useInitialFlow; + +private: + void procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2); + + std::vector I0s; + std::vector I1s; + std::vector u1s; + std::vector u2s; + + GpuMat I1x_buf; + GpuMat I1y_buf; + + GpuMat I1w_buf; + GpuMat I1wx_buf; + GpuMat I1wy_buf; + + GpuMat grad_buf; + GpuMat rho_c_buf; + + GpuMat p11_buf; + GpuMat p12_buf; + GpuMat p21_buf; + GpuMat p22_buf; + + GpuMat diff_buf; + GpuMat norm_buf; +}; + + //! Interpolate frames (images) using provided optical flow (displacement field). //! frame0 - frame 0 (32-bit floating point images, single channel) //! frame1 - frame 1 (the same type and size) diff --git a/modules/gpu/perf/perf_video.cpp b/modules/gpu/perf/perf_video.cpp index b18cb17..b228580 100644 --- a/modules/gpu/perf/perf_video.cpp +++ b/modules/gpu/perf/perf_video.cpp @@ -395,6 +395,56 @@ PERF_TEST_P(ImagePair, Video_FarnebackOpticalFlow, } ////////////////////////////////////////////////////// +// OpticalFlowDual_TVL1 + +PERF_TEST_P(ImagePair, Video_OpticalFlowDual_TVL1, + Values(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png"))) +{ + declare.time(20); + + cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE); + ASSERT_FALSE(frame0.empty()); + + cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE); + ASSERT_FALSE(frame1.empty()); + + if (PERF_RUN_GPU()) + { + cv::gpu::GpuMat d_frame0(frame0); + cv::gpu::GpuMat d_frame1(frame1); + cv::gpu::GpuMat d_flowx; + cv::gpu::GpuMat d_flowy; + + cv::gpu::OpticalFlowDual_TVL1_GPU d_alg; + + d_alg(d_frame0, d_frame1, d_flowx, d_flowy); + + TEST_CYCLE() + { + d_alg(d_frame0, d_frame1, d_flowx, d_flowy); + } + + GPU_SANITY_CHECK(d_flowx); + GPU_SANITY_CHECK(d_flowy); + } + else + { + cv::Mat flow; + + cv::OpticalFlowDual_TVL1 alg; + + alg(frame0, frame1, flow); + + TEST_CYCLE() + { + alg(frame0, frame1, flow); + } + + CPU_SANITY_CHECK(flow); + } +} + +////////////////////////////////////////////////////// // FGDStatModel DEF_PARAM_TEST_1(Video, string); diff --git a/modules/gpu/src/cuda/tvl1flow.cu b/modules/gpu/src/cuda/tvl1flow.cu new file mode 100644 index 0000000..27694ad --- /dev/null +++ b/modules/gpu/src/cuda/tvl1flow.cu @@ -0,0 +1,332 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or bpied warranties, including, but not limited to, the bpied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#if !defined CUDA_DISABLER + +#include "opencv2/gpu/device/common.hpp" +#include "opencv2/gpu/device/border_interpolate.hpp" +#include "opencv2/gpu/device/limits.hpp" + +using namespace cv::gpu; +using namespace cv::gpu::device; + +//////////////////////////////////////////////////////////// +// centeredGradient + +namespace tvl1flow +{ + __global__ void centeredGradientKernel(const PtrStepSzf src, PtrStepf dx, PtrStepf dy) + { + const int x = blockIdx.x * blockDim.x + threadIdx.x; + const int y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x >= src.cols || y >= src.rows) + return; + + dx(y, x) = 0.5f * (src(y, ::min(x + 1, src.cols - 1)) - src(y, ::max(x - 1, 0))); + dy(y, x) = 0.5f * (src(::min(y + 1, src.rows - 1), x) - src(::max(y - 1, 0), x)); + } + + void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy) + { + const dim3 block(32, 8); + const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); + + centeredGradientKernel<<>>(src, dx, dy); + cudaSafeCall( cudaGetLastError() ); + + cudaSafeCall( cudaDeviceSynchronize() ); + } +} + +//////////////////////////////////////////////////////////// +// warpBackward + +namespace tvl1flow +{ + static __device__ __forceinline__ float bicubicCoeff(float x_) + { + float x = fabsf(x_); + if (x <= 1.0f) + { + return x * x * (1.5f * x - 2.5f) + 1.0f; + } + else if (x < 2.0f) + { + return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; + } + else + { + return 0.0f; + } + } + + texture tex_I1 (false, cudaFilterModePoint, cudaAddressModeClamp); + texture tex_I1x(false, cudaFilterModePoint, cudaAddressModeClamp); + texture tex_I1y(false, cudaFilterModePoint, cudaAddressModeClamp); + + __global__ void warpBackwardKernel(const PtrStepSzf I0, const PtrStepf u1, const PtrStepf u2, PtrStepf I1w, PtrStepf I1wx, PtrStepf I1wy, PtrStepf grad, PtrStepf rho) + { + const int x = blockIdx.x * blockDim.x + threadIdx.x; + const int y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x >= I0.cols || y >= I0.rows) + return; + + const float u1Val = u1(y, x); + const float u2Val = u2(y, x); + + const float wx = x + u1Val; + const float wy = y + u2Val; + + const int xmin = ::ceilf(wx - 2.0f); + const int xmax = ::floorf(wx + 2.0f); + + const int ymin = ::ceilf(wy - 2.0f); + const int ymax = ::floorf(wy + 2.0f); + + float sum = 0.0f; + float sumx = 0.0f; + float sumy = 0.0f; + float wsum = 0.0f; + + for (int cy = ymin; cy <= ymax; ++cy) + { + for (int cx = xmin; cx <= xmax; ++cx) + { + const float w = bicubicCoeff(wx - cx) * bicubicCoeff(wy - cy); + + sum += w * tex2D(tex_I1 , cx, cy); + sumx += w * tex2D(tex_I1x, cx, cy); + sumy += w * tex2D(tex_I1y, cx, cy); + + wsum += w; + } + } + + const float coeff = 1.0f / wsum; + + const float I1wVal = sum * coeff; + const float I1wxVal = sumx * coeff; + const float I1wyVal = sumy * coeff; + + I1w(y, x) = I1wVal; + I1wx(y, x) = I1wxVal; + I1wy(y, x) = I1wyVal; + + const float Ix2 = I1wxVal * I1wxVal; + const float Iy2 = I1wyVal * I1wyVal; + + // store the |Grad(I1)|^2 + grad(y, x) = Ix2 + Iy2; + + // compute the constant part of the rho function + const float I0Val = I0(y, x); + rho(y, x) = I1wVal - I1wxVal * u1Val - I1wyVal * u2Val - I0Val; + } + + void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho) + { + const dim3 block(32, 8); + const dim3 grid(divUp(I0.cols, block.x), divUp(I0.rows, block.y)); + + bindTexture(&tex_I1 , I1); + bindTexture(&tex_I1x, I1x); + bindTexture(&tex_I1y, I1y); + + warpBackwardKernel<<>>(I0, u1, u2, I1w, I1wx, I1wy, grad, rho); + cudaSafeCall( cudaGetLastError() ); + + cudaSafeCall( cudaDeviceSynchronize() ); + } +} + +//////////////////////////////////////////////////////////// +// estimateU + +namespace tvl1flow +{ + __device__ float divergence(const PtrStepf& v1, const PtrStepf& v2, int y, int x) + { + if (x > 0 && y > 0) + { + const float v1x = v1(y, x) - v1(y, x - 1); + const float v2y = v2(y, x) - v2(y - 1, x); + return v1x + v2y; + } + else + { + if (y > 0) + return v1(y, 0) + v2(y, 0) - v2(y - 1, 0); + else + { + if (x > 0) + return v1(0, x) - v1(0, x - 1) + v2(0, x); + else + return v1(0, 0) + v2(0, 0); + } + } + } + + __global__ void estimateUKernel(const PtrStepSzf I1wx, const PtrStepf I1wy, + const PtrStepf grad, const PtrStepf rho_c, + const PtrStepf p11, const PtrStepf p12, const PtrStepf p21, const PtrStepf p22, + PtrStepf u1, PtrStepf u2, PtrStepf error, + const float l_t, const float theta) + { + const int x = blockIdx.x * blockDim.x + threadIdx.x; + const int y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x >= I1wx.cols || y >= I1wx.rows) + return; + + const float I1wxVal = I1wx(y, x); + const float I1wyVal = I1wy(y, x); + const float gradVal = grad(y, x); + const float u1OldVal = u1(y, x); + const float u2OldVal = u2(y, x); + + const float rho = rho_c(y, x) + (I1wxVal * u1OldVal + I1wyVal * u2OldVal); + + // estimate the values of the variable (v1, v2) (thresholding operator TH) + + float d1 = 0.0f; + float d2 = 0.0f; + + if (rho < -l_t * gradVal) + { + d1 = l_t * I1wxVal; + d2 = l_t * I1wyVal; + } + else if (rho > l_t * gradVal) + { + d1 = -l_t * I1wxVal; + d2 = -l_t * I1wyVal; + } + else if (gradVal > numeric_limits::epsilon()) + { + const float fi = -rho / gradVal; + d1 = fi * I1wxVal; + d2 = fi * I1wyVal; + } + + const float v1 = u1OldVal + d1; + const float v2 = u2OldVal + d2; + + // compute the divergence of the dual variable (p1, p2) + + const float div_p1 = divergence(p11, p12, y, x); + const float div_p2 = divergence(p21, p22, y, x); + + // estimate the values of the optical flow (u1, u2) + + const float u1NewVal = v1 + theta * div_p1; + const float u2NewVal = v2 + theta * div_p2; + + u1(y, x) = u1NewVal; + u2(y, x) = u2NewVal; + + const float n1 = (u1OldVal - u1NewVal) * (u1OldVal - u1NewVal); + const float n2 = (u2OldVal - u2NewVal) * (u2OldVal - u2NewVal); + error(y, x) = n1 + n2; + } + + void estimateU(PtrStepSzf I1wx, PtrStepSzf I1wy, + PtrStepSzf grad, PtrStepSzf rho_c, + PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, + PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf error, + float l_t, float theta) + { + const dim3 block(32, 8); + const dim3 grid(divUp(I1wx.cols, block.x), divUp(I1wx.rows, block.y)); + + estimateUKernel<<>>(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, u1, u2, error, l_t, theta); + cudaSafeCall( cudaGetLastError() ); + + cudaSafeCall( cudaDeviceSynchronize() ); + } +} + +//////////////////////////////////////////////////////////// +// estimateDualVariables + +namespace tvl1flow +{ + __global__ void estimateDualVariablesKernel(const PtrStepSzf u1, const PtrStepf u2, PtrStepf p11, PtrStepf p12, PtrStepf p21, PtrStepf p22, const float taut) + { + const int x = blockIdx.x * blockDim.x + threadIdx.x; + const int y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x >= u1.cols || y >= u1.rows) + return; + + const float u1x = u1(y, ::min(x + 1, u1.cols - 1)) - u1(y, x); + const float u1y = u1(::min(y + 1, u1.rows - 1), x) - u1(y, x); + + const float u2x = u2(y, ::min(x + 1, u1.cols - 1)) - u2(y, x); + const float u2y = u2(::min(y + 1, u1.rows - 1), x) - u2(y, x); + + const float g1 = ::hypotf(u1x, u1y); + const float g2 = ::hypotf(u2x, u2y); + + const float ng1 = 1.0f + taut * g1; + const float ng2 = 1.0f + taut * g2; + + p11(y, x) = (p11(y, x) + taut * u1x) / ng1; + p12(y, x) = (p12(y, x) + taut * u1y) / ng1; + p21(y, x) = (p21(y, x) + taut * u2x) / ng2; + p22(y, x) = (p22(y, x) + taut * u2y) / ng2; + } + + void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, float taut) + { + const dim3 block(32, 8); + const dim3 grid(divUp(u1.cols, block.x), divUp(u1.rows, block.y)); + + estimateDualVariablesKernel<<>>(u1, u2, p11, p12, p21, p22, taut); + cudaSafeCall( cudaGetLastError() ); + + cudaSafeCall( cudaDeviceSynchronize() ); + } +} + +#endif // !defined CUDA_DISABLER diff --git a/modules/gpu/src/tvl1flow.cpp b/modules/gpu/src/tvl1flow.cpp new file mode 100644 index 0000000..a598a9e --- /dev/null +++ b/modules/gpu/src/tvl1flow.cpp @@ -0,0 +1,256 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +#if !defined HAVE_CUDA || defined(CUDA_DISABLER) + +cv::gpu::OpticalFlowDual_TVL1_GPU::OpticalFlowDual_TVL1_GPU() { throw_nogpu(); } +void cv::gpu::OpticalFlowDual_TVL1_GPU::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_nogpu(); } +void cv::gpu::OpticalFlowDual_TVL1_GPU::collectGarbage() {} +void cv::gpu::OpticalFlowDual_TVL1_GPU::procOneScale(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_nogpu(); } + +#else + +using namespace std; +using namespace cv; +using namespace cv::gpu; + +cv::gpu::OpticalFlowDual_TVL1_GPU::OpticalFlowDual_TVL1_GPU() +{ + tau = 0.25; + lambda = 0.15; + theta = 0.3; + nscales = 5; + warps = 5; + epsilon = 0.01; + iterations = 300; + useInitialFlow = false; +} + +void cv::gpu::OpticalFlowDual_TVL1_GPU::operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy) +{ + CV_Assert( I0.type() == CV_8UC1 || I0.type() == CV_32FC1 ); + CV_Assert( I0.size() == I1.size() ); + CV_Assert( I0.type() == I1.type() ); + CV_Assert( !useInitialFlow || (flowx.size() == I0.size() && flowx.type() == CV_32FC1 && flowy.size() == flowx.size() && flowy.type() == flowx.type()) ); + CV_Assert( nscales > 0 ); + + // allocate memory for the pyramid structure + I0s.resize(nscales); + I1s.resize(nscales); + u1s.resize(nscales); + u2s.resize(nscales); + + I0.convertTo(I0s[0], CV_32F, I0.depth() == CV_8U ? 1.0 : 255.0); + I1.convertTo(I1s[0], CV_32F, I1.depth() == CV_8U ? 1.0 : 255.0); + + if (!useInitialFlow) + { + flowx.create(I0.size(), CV_32FC1); + flowy.create(I0.size(), CV_32FC1); + } + + u1s[0] = flowx; + u2s[0] = flowy; + + I1x_buf.create(I0.size(), CV_32FC1); + I1y_buf.create(I0.size(), CV_32FC1); + + I1w_buf.create(I0.size(), CV_32FC1); + I1wx_buf.create(I0.size(), CV_32FC1); + I1wy_buf.create(I0.size(), CV_32FC1); + + grad_buf.create(I0.size(), CV_32FC1); + rho_c_buf.create(I0.size(), CV_32FC1); + + p11_buf.create(I0.size(), CV_32FC1); + p12_buf.create(I0.size(), CV_32FC1); + p21_buf.create(I0.size(), CV_32FC1); + p22_buf.create(I0.size(), CV_32FC1); + + diff_buf.create(I0.size(), CV_32FC1); + + // create the scales + for (int s = 1; s < nscales; ++s) + { + gpu::pyrDown(I0s[s - 1], I0s[s]); + gpu::pyrDown(I1s[s - 1], I1s[s]); + + if (I0s[s].cols < 16 || I0s[s].rows < 16) + { + nscales = s; + break; + } + + if (useInitialFlow) + { + gpu::pyrDown(u1s[s - 1], u1s[s]); + gpu::pyrDown(u2s[s - 1], u2s[s]); + + gpu::multiply(u1s[s], Scalar::all(0.5), u1s[s]); + gpu::multiply(u2s[s], Scalar::all(0.5), u2s[s]); + } + } + + // pyramidal structure for computing the optical flow + for (int s = nscales - 1; s >= 0; --s) + { + // compute the optical flow at the current scale + procOneScale(I0s[s], I1s[s], u1s[s], u2s[s]); + + // if this was the last scale, finish now + if (s == 0) + break; + + // otherwise, upsample the optical flow + + // zoom the optical flow for the next finer scale + gpu::resize(u1s[s], u1s[s - 1], I0s[s - 1].size()); + gpu::resize(u2s[s], u2s[s - 1], I0s[s - 1].size()); + + // scale the optical flow with the appropriate zoom factor + gpu::multiply(u1s[s - 1], Scalar::all(2), u1s[s - 1]); + gpu::multiply(u2s[s - 1], Scalar::all(2), u2s[s - 1]); + } +} + +namespace tvl1flow +{ + void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy); + void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho); + void estimateU(PtrStepSzf I1wx, PtrStepSzf I1wy, + PtrStepSzf grad, PtrStepSzf rho_c, + PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, + PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf error, + float l_t, float theta); + void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, float taut); +} + +void cv::gpu::OpticalFlowDual_TVL1_GPU::procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2) +{ + using namespace tvl1flow; + + const double scaledEpsilon = epsilon * epsilon * I0.size().area(); + + CV_DbgAssert( I1.size() == I0.size() ); + CV_DbgAssert( I1.type() == I0.type() ); + CV_DbgAssert( u1.empty() || u1.size() == I0.size() ); + CV_DbgAssert( u2.size() == u1.size() ); + + if (u1.empty()) + { + u1.create(I0.size(), CV_32FC1); + u1.setTo(Scalar::all(0)); + + u2.create(I0.size(), CV_32FC1); + u2.setTo(Scalar::all(0)); + } + + GpuMat I1x = I1x_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat I1y = I1y_buf(Rect(0, 0, I0.cols, I0.rows)); + centeredGradient(I1, I1x, I1y); + + GpuMat I1w = I1w_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat I1wx = I1wx_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat I1wy = I1wy_buf(Rect(0, 0, I0.cols, I0.rows)); + + GpuMat grad = grad_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat rho_c = rho_c_buf(Rect(0, 0, I0.cols, I0.rows)); + + GpuMat p11 = p11_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat p12 = p12_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat p21 = p21_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat p22 = p22_buf(Rect(0, 0, I0.cols, I0.rows)); + p11.setTo(Scalar::all(0)); + p12.setTo(Scalar::all(0)); + p21.setTo(Scalar::all(0)); + p22.setTo(Scalar::all(0)); + + GpuMat diff = diff_buf(Rect(0, 0, I0.cols, I0.rows)); + + const float l_t = static_cast(lambda * theta); + const float taut = static_cast(tau / theta); + + for (int warpings = 0; warpings < warps; ++warpings) + { + warpBackward(I0, I1, I1x, I1y, u1, u2, I1w, I1wx, I1wy, grad, rho_c); + + double error = numeric_limits::max(); + for (int n = 0; error > scaledEpsilon && n < iterations; ++n) + { + estimateU(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, u1, u2, diff, l_t, static_cast(theta)); + + error = gpu::sum(diff, norm_buf)[0]; + + estimateDualVariables(u1, u2, p11, p12, p21, p22, taut); + } + } +} + +void cv::gpu::OpticalFlowDual_TVL1_GPU::collectGarbage() +{ + I0s.clear(); + I1s.clear(); + u1s.clear(); + u2s.clear(); + + I1x_buf.release(); + I1y_buf.release(); + + I1w_buf.release(); + I1wx_buf.release(); + I1wy_buf.release(); + + grad_buf.release(); + rho_c_buf.release(); + + p11_buf.release(); + p12_buf.release(); + p21_buf.release(); + p22_buf.release(); + + diff_buf.release(); + norm_buf.release(); +} + +#endif // !defined HAVE_CUDA || defined(CUDA_DISABLER) diff --git a/modules/gpu/test/test_optflow.cpp b/modules/gpu/test/test_optflow.cpp index 6bc471e..46b71b5 100644 --- a/modules/gpu/test/test_optflow.cpp +++ b/modules/gpu/test/test_optflow.cpp @@ -401,4 +401,48 @@ INSTANTIATE_TEST_CASE_P(GPU_Video, FarnebackOpticalFlow, testing::Combine( testing::Values(FarnebackOptFlowFlags(0), FarnebackOptFlowFlags(cv::OPTFLOW_FARNEBACK_GAUSSIAN)), testing::Values(UseInitFlow(false), UseInitFlow(true)))); +////////////////////////////////////////////////////// +// OpticalFlowDual_TVL1 + +PARAM_TEST_CASE(OpticalFlowDual_TVL1, cv::gpu::DeviceInfo, UseRoi) +{ + cv::gpu::DeviceInfo devInfo; + bool useRoi; + + virtual void SetUp() + { + devInfo = GET_PARAM(0); + useRoi = GET_PARAM(1); + + cv::gpu::setDevice(devInfo.deviceID()); + } +}; + +GPU_TEST_P(OpticalFlowDual_TVL1, Accuracy) +{ + cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE); + ASSERT_FALSE(frame0.empty()); + + cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE); + ASSERT_FALSE(frame1.empty()); + + cv::gpu::OpticalFlowDual_TVL1_GPU d_alg; + cv::gpu::GpuMat d_flowx = createMat(frame0.size(), CV_32FC1, useRoi); + cv::gpu::GpuMat d_flowy = createMat(frame0.size(), CV_32FC1, useRoi); + d_alg(loadMat(frame0, useRoi), loadMat(frame1, useRoi), d_flowx, d_flowy); + + cv::OpticalFlowDual_TVL1 alg; + cv::Mat flow; + alg(frame0, frame1, flow); + cv::Mat gold[2]; + cv::split(flow, gold); + + EXPECT_MAT_SIMILAR(gold[0], d_flowx, 3e-3); + EXPECT_MAT_SIMILAR(gold[1], d_flowy, 3e-3); +} + +INSTANTIATE_TEST_CASE_P(GPU_Video, OpticalFlowDual_TVL1, testing::Combine( + ALL_DEVICES, + WHOLE_SUBMAT)); + #endif // HAVE_CUDA -- 2.7.4