\r
#else /* !defined (HAVE_CUDA) */\r
\r
-#define NPP_VERSION (10 * NPP_VERSION_MAJOR + NPP_VERSION_MINOR)\r
-\r
-#if (defined(_WIN32) || defined(_WIN64)) && (NPP_VERSION >= 32)\r
-# define NPP_HAVE_COMPLEX_TYPE\r
-#endif\r
-\r
////////////////////////////////////////////////////////////////////////\r
// add subtract multiply divide\r
\r
{\r
CV_DbgAssert(src1.size() == src2.size() && src1.type() == src2.type());\r
\r
-#if NPP_VERSION >= 32\r
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32SC1 || src1.type() == CV_32FC1);\r
-#else\r
- CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32FC1);\r
-#endif\r
\r
dst.create( src1.size(), src1.type() );\r
\r
src2.ptr<Npp8u>(), src2.step,\r
dst.ptr<Npp8u>(), dst.step, sz, 0) );\r
break;\r
-#if NPP_VERSION >= 32\r
case CV_32SC1:\r
nppSafeCall( npp_func_32sc1(src1.ptr<Npp32s>(), src1.step,\r
src2.ptr<Npp32s>(), src2.step,\r
dst.ptr<Npp32s>(), dst.step, sz) );\r
break;\r
-#endif\r
case CV_32FC1:\r
nppSafeCall( npp_func_32fc1(src1.ptr<Npp32f>(), src1.step,\r
src2.ptr<Npp32f>(), src2.step,\r
typedef NppStatus (*func_ptr)(const Npp32f *pSrc, int nSrcStep, Npp32f nValue, Npp32f *pDst,\r
int nDstStep, NppiSize oSizeROI);\r
};\r
-#ifdef NPP_HAVE_COMPLEX_TYPE\r
template<> struct NppArithmScalarFunc<2>\r
{\r
typedef NppStatus (*func_ptr)(const Npp32fc *pSrc, int nSrcStep, Npp32fc nValue, Npp32fc *pDst,\r
int nDstStep, NppiSize oSizeROI);\r
};\r
-#endif\r
\r
template<int SCN, typename NppArithmScalarFunc<SCN>::func_ptr func> struct NppArithmScalar;\r
template<typename NppArithmScalarFunc<1>::func_ptr func> struct NppArithmScalar<1, func>\r
nppSafeCall( func(src.ptr<Npp32f>(), src.step, (Npp32f)sc[0], dst.ptr<Npp32f>(), dst.step, sz) );\r
}\r
};\r
-#ifdef NPP_HAVE_COMPLEX_TYPE\r
template<typename NppArithmScalarFunc<2>::func_ptr func> struct NppArithmScalar<2, func>\r
{\r
static void calc(const GpuMat& src, const Scalar& sc, GpuMat& dst)\r
nppSafeCall( func(src.ptr<Npp32fc>(), src.step, nValue, dst.ptr<Npp32fc>(), dst.step, sz) );\r
}\r
};\r
-#endif\r
}\r
\r
void cv::gpu::add(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)\r
{\r
-#if NPP_VERSION >= 32\r
nppArithmCaller(src1, src2, dst, nppiAdd_8u_C1RSfs, nppiAdd_8u_C4RSfs, nppiAdd_32s_C1R, nppiAdd_32f_C1R);\r
-#else\r
- nppArithmCaller(src1, src2, dst, nppiAdd_8u_C1RSfs, nppiAdd_8u_C4RSfs, 0, nppiAdd_32f_C1R);\r
-#endif\r
}\r
\r
void cv::gpu::subtract(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)\r
{\r
-#if NPP_VERSION >= 32\r
nppArithmCaller(src2, src1, dst, nppiSub_8u_C1RSfs, nppiSub_8u_C4RSfs, nppiSub_32s_C1R, nppiSub_32f_C1R);\r
-#else\r
- nppArithmCaller(src2, src1, dst, nppiSub_8u_C1RSfs, nppiSub_8u_C4RSfs, 0, nppiSub_32f_C1R);\r
-#endif\r
}\r
\r
void cv::gpu::multiply(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)\r
{\r
-#if NPP_VERSION >= 32\r
nppArithmCaller(src1, src2, dst, nppiMul_8u_C1RSfs, nppiMul_8u_C4RSfs, nppiMul_32s_C1R, nppiMul_32f_C1R);\r
-#else\r
- nppArithmCaller(src1, src2, dst, nppiMul_8u_C1RSfs, nppiMul_8u_C4RSfs, 0, nppiMul_32f_C1R);\r
-#endif\r
}\r
\r
void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)\r
{\r
-#if NPP_VERSION >= 32\r
nppArithmCaller(src2, src1, dst, nppiDiv_8u_C1RSfs, nppiDiv_8u_C4RSfs, nppiDiv_32s_C1R, nppiDiv_32f_C1R);\r
-#else\r
- nppArithmCaller(src2, src1, dst, nppiDiv_8u_C1RSfs, nppiDiv_8u_C4RSfs, 0, nppiDiv_32f_C1R);\r
-#endif\r
}\r
\r
void cv::gpu::add(const GpuMat& src, const Scalar& sc, GpuMat& dst)\r
{\r
-#ifdef NPP_HAVE_COMPLEX_TYPE\r
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst);\r
static const caller_t callers[] = {0, NppArithmScalar<1, nppiAddC_32f_C1R>::calc, NppArithmScalar<2, nppiAddC_32fc_C1R>::calc};\r
\r
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);\r
\r
callers[src.channels()](src, sc, dst);\r
-#else\r
-# if NPP_VERSION >= 32\r
- CV_Assert(src.type() == CV_32FC1);\r
- NppArithmScalar<1, nppiAddC_32f_C1R>::calc(src, sc, dst);\r
-# else\r
- CV_Assert(!"This function doesn't supported");\r
-# endif\r
-#endif\r
}\r
\r
void cv::gpu::subtract(const GpuMat& src, const Scalar& sc, GpuMat& dst)\r
{\r
-#ifdef NPP_HAVE_COMPLEX_TYPE\r
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst);\r
static const caller_t callers[] = {0, NppArithmScalar<1, nppiSubC_32f_C1R>::calc, NppArithmScalar<2, nppiSubC_32fc_C1R>::calc};\r
\r
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);\r
\r
callers[src.channels()](src, sc, dst);\r
-#else\r
-# if NPP_VERSION >= 32\r
- CV_Assert(src.type() == CV_32FC1);\r
- NppArithmScalar<1, nppiSubC_32f_C1R>::calc(src, sc, dst);\r
-# else\r
- CV_Assert(!"This function doesn't supported");\r
-# endif\r
-#endif\r
}\r
\r
void cv::gpu::multiply(const GpuMat& src, const Scalar& sc, GpuMat& dst)\r
{\r
-#ifdef NPP_HAVE_COMPLEX_TYPE\r
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst);\r
static const caller_t callers[] = {0, NppArithmScalar<1, nppiMulC_32f_C1R>::calc, NppArithmScalar<2, nppiMulC_32fc_C1R>::calc};\r
\r
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);\r
\r
callers[src.channels()](src, sc, dst);\r
-#else\r
-# if NPP_VERSION >= 32\r
- CV_Assert(src.type() == CV_32FC1);\r
- NppArithmScalar<1, nppiMulC_32f_C1R>::calc(src, sc, dst);\r
-# else\r
- CV_Assert(!"This function doesn't supported");\r
-# endif\r
-#endif\r
}\r
\r
void cv::gpu::divide(const GpuMat& src, const Scalar& sc, GpuMat& dst)\r
{\r
-#ifdef NPP_HAVE_COMPLEX_TYPE\r
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst);\r
static const caller_t callers[] = {0, NppArithmScalar<1, nppiDivC_32f_C1R>::calc, NppArithmScalar<2, nppiDivC_32fc_C1R>::calc};\r
\r
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);\r
\r
callers[src.channels()](src, sc, dst);\r
-#else\r
-# if NPP_VERSION >= 32\r
- CV_Assert(src.type() == CV_32FC1);\r
- NppArithmScalar<1, nppiDivC_32f_C1R>::calc(src, sc, dst);\r
-# else\r
- CV_Assert(!"This function doesn't supported");\r
-# endif\r
-#endif\r
}\r
\r
////////////////////////////////////////////////////////////////////////\r
{\r
CV_DbgAssert(src1.size() == src2.size() && src1.type() == src2.type());\r
\r
-#if NPP_VERSION >= 32\r
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32SC1 || src1.type() == CV_32FC1);\r
-#else\r
- CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32FC1);\r
-#endif\r
\r
dst.create( src1.size(), src1.type() );\r
\r
src2.ptr<Npp8u>(), src2.step,\r
dst.ptr<Npp8u>(), dst.step, sz) );\r
break;\r
-#if NPP_VERSION >= 32\r
case CV_32SC1:\r
nppSafeCall( nppiAbsDiff_32s_C1R(src1.ptr<Npp32s>(), src1.step,\r
src2.ptr<Npp32s>(), src2.step,\r
dst.ptr<Npp32s>(), dst.step, sz) );\r
break;\r
-#endif\r
case CV_32FC1:\r
nppSafeCall( nppiAbsDiff_32f_C1R(src1.ptr<Npp32f>(), src1.step,\r
src2.ptr<Npp32f>(), src2.step,\r
\r
void cv::gpu::absdiff(const GpuMat& src, const Scalar& s, GpuMat& dst)\r
{\r
-#if NPP_VERSION >= 32\r
CV_Assert(src.type() == CV_32FC1);\r
\r
dst.create( src.size(), src.type() );\r
sz.height = src.rows;\r
\r
nppSafeCall( nppiAbsDiffC_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz, (Npp32f)s[0]) );\r
-#else\r
- CV_Assert(!"This function doesn't supported");\r
-#endif\r
}\r
\r
////////////////////////////////////////////////////////////////////////\r
\r
Scalar cv::gpu::sum(const GpuMat& src)\r
{\r
+ CV_Assert(!"disabled until fix crash");\r
+\r
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC4);\r
\r
NppiSize sz;\r
sz.height = src.rows;\r
\r
Scalar res;\r
-#if NPP_VERSION >= 32\r
- CV_Assert(!"disabled until fix crash");\r
\r
int bufsz;\r
\r
\r
nppSafeCall( nppiSum_8u_C4R(src.ptr<Npp8u>(), src.step, sz, buf.ptr<Npp32s>(), res.val) );\r
}\r
-#else\r
- if (src.type() == CV_8UC1)\r
- nppSafeCall( nppiSum_8u_C1R(src.ptr<Npp8u>(), src.step, sz, res.val) );\r
- else\r
- nppSafeCall( nppiSum_8u_C4R(src.ptr<Npp8u>(), src.step, sz, res.val) );\r
-#endif\r
\r
return res;\r
}\r
\r
Npp8u* cuMem;\r
\r
-#if NPP_VERSION >= 32\r
cuMem = nppsMalloc_8u(8);\r
-#else\r
- cudaSafeCall( cudaMalloc((void**)&cuMem, 8 * sizeof(Npp8u)) );\r
-#endif\r
\r
nppSafeCall( nppiMinMax_8u_C4R(src.ptr<Npp8u>(), src.step, sz, cuMem, cuMem + 4) );\r
\r
if (maxVal)\r
cudaMemcpy(maxVal, cuMem + 4, 4 * sizeof(Npp8u), cudaMemcpyDeviceToHost);\r
\r
-#if NPP_VERSION >= 32\r
nppsFree(cuMem);\r
-#else\r
- cudaSafeCall( cudaFree(cuMem) );\r
-#endif\r
}\r
}\r
\r
\r
void cv::gpu::exp(const GpuMat& src, GpuMat& dst)\r
{\r
-#if NPP_VERSION >= 32\r
CV_Assert(src.type() == CV_32FC1);\r
\r
dst.create(src.size(), src.type());\r
sz.height = src.rows;\r
\r
nppSafeCall( nppiExp_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) );\r
-#else\r
- CV_Assert(!"This function doesn't supported");\r
-#endif\r
}\r
\r
////////////////////////////////////////////////////////////////////////\r
\r
void cv::gpu::log(const GpuMat& src, GpuMat& dst)\r
{\r
-#if NPP_VERSION >= 32\r
CV_Assert(src.type() == CV_32FC1);\r
\r
dst.create(src.size(), src.type());\r
sz.height = src.rows;\r
\r
nppSafeCall( nppiLn_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) );\r
-#else\r
- CV_Assert(!"This function doesn't supported");\r
-#endif\r
}\r
\r
////////////////////////////////////////////////////////////////////////\r
// NPP magnitide\r
\r
-#ifdef NPP_HAVE_COMPLEX_TYPE\r
namespace\r
{\r
typedef NppStatus (*nppMagnitude_t)(const Npp32fc* pSrc, int nSrcStep, Npp32f* pDst, int nDstStep, NppiSize oSizeROI);\r
nppSafeCall( func(src.ptr<Npp32fc>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) );\r
}\r
}\r
-#endif\r
\r
void cv::gpu::magnitude(const GpuMat& src, GpuMat& dst)\r
{\r
-#ifdef NPP_HAVE_COMPLEX_TYPE\r
::npp_magnitude(src, dst, nppiMagnitude_32fc32f_C1R);\r
-#else\r
- CV_Assert(!"This function doesn't supported");\r
-#endif\r
}\r
\r
void cv::gpu::magnitudeSqr(const GpuMat& src, GpuMat& dst)\r
{\r
-#ifdef NPP_HAVE_COMPLEX_TYPE\r
::npp_magnitude(src, dst, nppiMagnitudeSqr_32fc32f_C1R);\r
-#else\r
- CV_Assert(!"This function doesn't supported");\r
-#endif\r
}\r
\r
////////////////////////////////////////////////////////////////////////\r
//////////////////////////////////////////////////////////////////////////////\r
// Per-element bit-wise logical matrix operations\r
\r
-namespace cv { namespace gpu { namespace mathfunc \r
+namespace cv { namespace gpu { namespace mathfunc\r
{\r
void bitwise_not_caller(int rows, int cols, const PtrStep src, int elemSize, PtrStep dst, cudaStream_t stream);\r
void bitwise_not_caller(int rows, int cols, const PtrStep src, int elemSize, PtrStep dst, const PtrStep mask, cudaStream_t stream);\r
void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst, int elem_size, Mask mask, cudaStream_t stream);\r
}}}\r
\r
-namespace \r
+namespace\r
{\r
void bitwise_not_caller(const GpuMat& src, GpuMat& dst, cudaStream_t stream)\r
{\r
\r
void cv::gpu::bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask)\r
{\r
- if (mask.empty()) \r
+ if (mask.empty())\r
::bitwise_not_caller(src, dst, 0);\r
else\r
::bitwise_not_caller(src, dst, mask, 0);\r
\r
#else /* !defined (HAVE_CUDA) */\r
\r
-#define NPP_VERSION (10 * NPP_VERSION_MAJOR + NPP_VERSION_MINOR)\r
-\r
namespace cv { namespace gpu { namespace imgproc\r
{\r
void remap_gpu_1c(const DevMem2D& src, const DevMem2Df& xmap, const DevMem2Df& ymap, DevMem2D dst);\r
\r
void cv::gpu::Canny(const GpuMat& image, GpuMat& edges, double threshold1, double threshold2, int apertureSize)\r
{\r
-#if NPP_VERSION >= 32\r
CV_Assert(!"disabled until fix crash");\r
CV_Assert(image.type() == CV_8UC1);\r
\r
\r
nppSafeCall( nppiCanny_32f8u_C1R(srcDx.ptr<Npp32f>(), srcDx.step, srcDy.ptr<Npp32f>(), srcDy.step,\r
edges.ptr<Npp8u>(), edges.step, sz, (Npp32f)threshold1, (Npp32f)threshold2, buf.ptr<Npp8u>()) );\r
-#else\r
- CV_Assert(!"This function doesn't supported");\r
-#endif\r
}\r
\r
////////////////////////////////////////////////////////////////////////\r
\r
void cv::gpu::evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel)\r
{\r
-#if NPP_VERSION >= 32\r
Mat host_levels(1, nLevels, CV_32SC1);\r
nppSafeCall( nppiEvenLevelsHost_32s(host_levels.ptr<Npp32s>(), nLevels, lowerLevel, upperLevel) );\r
levels.upload(host_levels);\r
-#else\r
- CV_Assert(!"This function doesn't supported");\r
-#endif\r
}\r
\r
void cv::gpu::histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel)\r
{\r
-#if NPP_VERSION >= 32\r
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 );\r
\r
typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, int levels, int lowerLevel, int upperLevel);\r
};\r
\r
hist_callers[src.depth()](src, hist, histSize, lowerLevel, upperLevel);\r
-#else\r
- CV_Assert(!"This function doesn't supported");\r
-#endif\r
}\r
\r
void cv::gpu::histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4])\r
{\r
-#if NPP_VERSION >= 32\r
CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 );\r
\r
typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], int levels[4], int lowerLevel[4], int upperLevel[4]);\r
};\r
\r
hist_callers[src.depth()](src, hist, histSize, lowerLevel, upperLevel);\r
-#else\r
- CV_Assert(!"This function doesn't supported");\r
-#endif\r
}\r
\r
void cv::gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels)\r
{\r
-#if NPP_VERSION >= 32\r
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 || src.type() == CV_32FC1);\r
\r
typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, const GpuMat& levels);\r
};\r
\r
hist_callers[src.depth()](src, hist, levels);\r
-#else\r
- CV_Assert(!"This function doesn't supported");\r
-#endif\r
}\r
\r
void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4])\r
{\r
-#if NPP_VERSION >= 32\r
CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 || src.type() == CV_32FC4);\r
\r
typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4]);\r
};\r
\r
hist_callers[src.depth()](src, hist, levels);\r
-#else\r
- CV_Assert(!"This function doesn't supported");\r
-#endif\r
}\r
\r
#endif /* !defined (HAVE_CUDA) */\r
\r
#else /* !defined (HAVE_CUDA) */\r
\r
-#define NPP_VERSION (10 * NPP_VERSION_MAJOR + NPP_VERSION_MINOR)\r
-\r
namespace cv\r
{\r
namespace gpu\r
{NppCvt<CV_8U, CV_16U, nppiConvert_8u16u_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,NppCvt<CV_8U, CV_16U, nppiConvert_8u16u_C4R>::cvt},\r
{NppCvt<CV_8U, CV_16S, nppiConvert_8u16s_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,NppCvt<CV_8U, CV_16S, nppiConvert_8u16s_C4R>::cvt},\r
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},\r
-#if NPP_VERSION >= 32\r
{NppCvt<CV_8U, CV_32F, nppiConvert_8u32f_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},\r
-#else\r
- {convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},\r
-#endif\r
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},\r
{0,0,0,0}\r
},\r
{0,0,0,0}\r
},\r
{\r
-#if NPP_VERSION >= 32\r
{NppCvt<CV_32F, CV_8U, nppiConvert_32f8u_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},\r
-#else\r
- {convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},\r
-#endif\r
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},\r
{NppCvt<CV_32F, CV_16U, nppiConvert_32f16u_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},\r
{NppCvt<CV_32F, CV_16S, nppiConvert_32f16s_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},\r
{\r
{NppSet<CV_8U, 1, nppiSet_8u_C1R>::set,kernelSet,kernelSet,NppSet<CV_8U, 4, nppiSet_8u_C4R>::set},\r
{kernelSet,kernelSet,kernelSet,kernelSet},\r
-#if NPP_VERSION >= 32\r
{NppSet<CV_16U, 1, nppiSet_16u_C1R>::set,kernelSet,kernelSet,NppSet<CV_16U, 4, nppiSet_16u_C4R>::set},\r
-#else\r
- {kernelSet,kernelSet,kernelSet,kernelSet},\r
-#endif\r
-#if NPP_VERSION >= 32\r
{NppSet<CV_16S, 1, nppiSet_16s_C1R>::set,kernelSet,kernelSet,NppSet<CV_16S, 4, nppiSet_16s_C4R>::set},\r
-#else\r
- {kernelSet,kernelSet,kernelSet,kernelSet},\r
-#endif\r
-#if NPP_VERSION >= 32\r
{NppSet<CV_32S, 1, nppiSet_32s_C1R>::set,kernelSet,kernelSet,NppSet<CV_32S, 4, nppiSet_32s_C4R>::set},\r
-#else\r
- {NppSet<CV_32S, 1, nppiSet_32s_C1R>::set,kernelSet,kernelSet,kernelSet},\r
-#endif\r
-#if NPP_VERSION >= 32\r
{NppSet<CV_32F, 1, nppiSet_32f_C1R>::set,kernelSet,kernelSet,NppSet<CV_32F, 4, nppiSet_32f_C4R>::set},\r
-#else\r
- {NppSet<CV_32F, 1, nppiSet_32f_C1R>::set,kernelSet,kernelSet,kernelSet},\r
-#endif\r
{kernelSet,kernelSet,kernelSet,kernelSet},\r
{0,0,0,0}\r
};\r
}\r
else\r
{\r
-#if NPP_VERSION >= 32\r
typedef void (*set_caller_t)(GpuMat& src, const Scalar& s, const GpuMat& mask);\r
static const set_caller_t set_callers[8][4] =\r
{\r
{0,0,0,0}\r
};\r
set_callers[depth()][channels()-1](*this, s, mask);\r
-#else\r
- kernelSetMask(*this, s, mask);\r
-#endif\r
}\r
\r
return *this;\r