Use in-place npp function for inplace arguments
authorNamgoo Lee <namgoo.lee@cognex.com>
Tue, 21 Jul 2020 01:27:43 +0000 (10:27 +0900)
committerNamgoo Lee <namgoo.lee@cognex.com>
Tue, 21 Jul 2020 01:27:43 +0000 (10:27 +0900)
modules/cudaarithm/src/core.cpp
modules/cudaarithm/test/test_core.cpp

index 6d97e15dbbdd3c94e6d5e4e0e09032770bb8f193..ac01afc7f086272b5caaa512aa39ebede9fd1d63 100644 (file)
@@ -102,6 +102,34 @@ namespace
                 cudaSafeCall( cudaDeviceSynchronize() );
         }
     };
+
+    template <int DEPTH> struct NppMirrorIFunc
+    {
+        typedef typename NppTypeTraits<DEPTH>::npp_t npp_t;
+
+        typedef NppStatus (*func_t)(npp_t* pSrcDst, int nSrcDstStep, NppiSize oROI, NppiAxis flip);
+    };
+
+    template <int DEPTH, typename NppMirrorIFunc<DEPTH>::func_t func> struct NppMirrorI
+    {
+        typedef typename NppMirrorIFunc<DEPTH>::npp_t npp_t;
+
+        static void call(GpuMat& srcDst, int flipCode, cudaStream_t stream)
+        {
+            NppStreamHandler h(stream);
+
+            NppiSize sz;
+            sz.width  = srcDst.cols;
+            sz.height = srcDst.rows;
+
+            nppSafeCall( func(srcDst.ptr<npp_t>(), static_cast<int>(srcDst.step),
+                sz,
+                (flipCode == 0 ? NPP_HORIZONTAL_AXIS : (flipCode > 0 ? NPP_VERTICAL_AXIS : NPP_BOTH_AXIS))) );
+
+            if (stream == 0)
+                cudaSafeCall( cudaDeviceSynchronize() );
+        }
+    };
 }
 
 void cv::cuda::flip(InputArray _src, OutputArray _dst, int flipCode, Stream& stream)
@@ -117,6 +145,17 @@ void cv::cuda::flip(InputArray _src, OutputArray _dst, int flipCode, Stream& str
         {NppMirror<CV_32F, nppiMirror_32f_C1R>::call, 0, NppMirror<CV_32F, nppiMirror_32f_C3R>::call, NppMirror<CV_32F, nppiMirror_32f_C4R>::call}
     };
 
+    typedef void (*ifunc_t)(GpuMat& srcDst, int flipCode, cudaStream_t stream);
+    static const ifunc_t ifuncs[6][4] =
+    {
+        {NppMirrorI<CV_8U, nppiMirror_8u_C1IR>::call, 0, NppMirrorI<CV_8U, nppiMirror_8u_C3IR>::call, NppMirrorI<CV_8U, nppiMirror_8u_C4IR>::call},
+        {0,0,0,0},
+        {NppMirrorI<CV_16U, nppiMirror_16u_C1IR>::call, 0, NppMirrorI<CV_16U, nppiMirror_16u_C3IR>::call, NppMirrorI<CV_16U, nppiMirror_16u_C4IR>::call},
+        {0,0,0,0},
+        {NppMirrorI<CV_32S, nppiMirror_32s_C1IR>::call, 0, NppMirrorI<CV_32S, nppiMirror_32s_C3IR>::call, NppMirrorI<CV_32S, nppiMirror_32s_C4IR>::call},
+        {NppMirrorI<CV_32F, nppiMirror_32f_C1IR>::call, 0, NppMirrorI<CV_32F, nppiMirror_32f_C3IR>::call, NppMirrorI<CV_32F, nppiMirror_32f_C4IR>::call}
+    };
+
     GpuMat src = getInputMat(_src, stream);
 
     CV_Assert(src.depth() == CV_8U || src.depth() == CV_16U || src.depth() == CV_32S || src.depth() == CV_32F);
@@ -125,7 +164,10 @@ void cv::cuda::flip(InputArray _src, OutputArray _dst, int flipCode, Stream& str
     _dst.create(src.size(), src.type());
     GpuMat dst = getOutputMat(_dst, src.size(), src.type(), stream);
 
-    funcs[src.depth()][src.channels() - 1](src, dst, flipCode, StreamAccessor::getStream(stream));
+    if (src.refcount != dst.refcount)
+        funcs[src.depth()][src.channels() - 1](src, dst, flipCode, StreamAccessor::getStream(stream));
+    else // in-place
+        ifuncs[src.depth()][src.channels() - 1](src, flipCode, StreamAccessor::getStream(stream));
 
     syncOutput(dst, _dst, stream);
 }
index 7e5762aa3fc4b523f627f00a78fb33f89d5c1697..bc8f3737e53c9c8bb420e32e85bb64d7dcf85e81 100644 (file)
@@ -279,6 +279,19 @@ CUDA_TEST_P(Flip, Accuracy)
     EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
 }
 
+CUDA_TEST_P(Flip, AccuracyInplace)
+{
+    cv::Mat src = randomMat(size, type);
+
+    cv::cuda::GpuMat srcDst = loadMat(src, useRoi);
+    cv::cuda::flip(srcDst, srcDst, flip_code);
+
+    cv::Mat dst_gold;
+    cv::flip(src, dst_gold, flip_code);
+
+    EXPECT_MAT_NEAR(dst_gold, srcDst, 0.0);
+}
+
 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Flip, testing::Combine(
     ALL_DEVICES,
     DIFFERENT_SIZES,