:param stream: Stream for the asynchronous version.
.. seealso:: :ocv:func:`polarToCart`
+
+
+
+gpu::normalize
+--------------
+Normalizes the norm or value range of an array.
+
+.. ocv:function:: void gpu::normalize(const GpuMat& src, GpuMat& dst, double alpha = 1, double beta = 0, int norm_type = NORM_L2, int dtype = -1, const GpuMat& mask = GpuMat())
+
+.. ocv:function:: void gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf)
+
+ :param src: input array.
+
+ :param dst: output array of the same size as ``src`` .
+
+ :param alpha: norm value to normalize to or the lower range boundary in case of the range normalization.
+
+ :param beta: upper range boundary in case of the range normalization; it is not used for the norm normalization.
+
+ :param normType: normalization type (see the details below).
+
+ :param dtype: when negative, the output array has the same type as ``src``; otherwise, it has the same number of channels as ``src`` and the depth ``=CV_MAT_DEPTH(dtype)``.
+
+ :param mask: optional operation mask.
+
+ :param norm_buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
+
+ :param cvt_buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
+
+.. seealso:: :ocv:func:`normalize`
//! supports only floating-point source
CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees = false, Stream& stream = Stream::Null());
+//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values
+CV_EXPORTS void normalize(const GpuMat& src, GpuMat& dst, double alpha = 1, double beta = 0,
+ int norm_type = NORM_L2, int dtype = -1, const GpuMat& mask = GpuMat());
+CV_EXPORTS void normalize(const GpuMat& src, GpuMat& dst, double a, double b,
+ int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf);
+
//////////////////////////// Per-element operations ////////////////////////////////////
}
}
+//////////////////////////////////////////////////////////////////////
+// Normalize
+
+DEF_PARAM_TEST(Sz_Depth_NormType, cv::Size, MatDepth, NormType);
+
+PERF_TEST_P(Sz_Depth_NormType, Core_Normalize, Combine(
+ GPU_TYPICAL_MAT_SIZES,
+ Values(CV_8U, CV_16U, CV_32F, CV_64F),
+ Values(NormType(cv::NORM_INF),
+ NormType(cv::NORM_L1),
+ NormType(cv::NORM_L2),
+ NormType(cv::NORM_MINMAX))
+ ))
+{
+ cv::Size size = GET_PARAM(0);
+ int type = GET_PARAM(1);
+ int norm_type = GET_PARAM(2);
+
+ double alpha = 1;
+ double beta = 0;
+
+ cv::Mat src(size, type);
+ fillRandom(src);
+
+ if (PERF_RUN_GPU())
+ {
+ cv::gpu::GpuMat d_src(src);
+ cv::gpu::GpuMat d_dst;
+ cv::gpu::GpuMat d_norm_buf, d_cvt_buf;
+
+ TEST_CYCLE() cv::gpu::normalize(d_src, d_dst, alpha, beta, norm_type, type, cv::gpu::GpuMat(), d_norm_buf, d_cvt_buf);
+
+ GPU_SANITY_CHECK(d_dst, 1);
+ }
+ else
+ {
+ cv::Mat dst;
+
+ TEST_CYCLE() cv::normalize(src, dst, alpha, beta, norm_type, type);
+
+ CPU_SANITY_CHECK(dst, 1);
+ }
+}
+
} // namespace
CV_ENUM(Interpolation, cv::INTER_NEAREST, cv::INTER_LINEAR, cv::INTER_CUBIC, cv::INTER_AREA)
#define ALL_INTERPOLATIONS testing::ValuesIn(Interpolation::all())
-CV_ENUM(NormType, cv::NORM_INF, cv::NORM_L1, cv::NORM_L2, cv::NORM_HAMMING)
+CV_ENUM(NormType, cv::NORM_INF, cv::NORM_L1, cv::NORM_L2, cv::NORM_HAMMING, cv::NORM_MINMAX)
const int Gray = 1, TwoChannel = 2, BGR = 3, BGRA = 4;
CV_ENUM(MatCn, Gray, TwoChannel, BGR, BGRA)
void cv::gpu::phase(const GpuMat&, const GpuMat&, GpuMat&, bool, Stream&) { throw_nogpu(); }
void cv::gpu::cartToPolar(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, Stream&) { throw_nogpu(); }
void cv::gpu::polarToCart(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, Stream&) { throw_nogpu(); }
+void cv::gpu::normalize(const GpuMat&, GpuMat&, double, double, int, int, const GpuMat&) { throw_nogpu(); }
+void cv::gpu::normalize(const GpuMat&, GpuMat&, double, double, int, int, const GpuMat&, GpuMat&, GpuMat&) { throw_nogpu(); }
#else /* !defined (HAVE_CUDA) */
polarToCart_caller(magnitude, angle, x, y, angleInDegrees, StreamAccessor::getStream(stream));
}
+////////////////////////////////////////////////////////////////////////
+// normalize
+
+void cv::gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask)
+{
+ GpuMat norm_buf;
+ GpuMat cvt_buf;
+ normalize(src, dst, a, b, norm_type, dtype, mask, norm_buf, cvt_buf);
+}
+
+void cv::gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf)
+{
+ double scale = 1, shift = 0;
+ if (norm_type == NORM_MINMAX)
+ {
+ double smin = 0, smax = 0;
+ double dmin = std::min(a, b), dmax = std::max(a, b);
+ minMax(src, &smin, &smax, mask, norm_buf);
+ scale = (dmax - dmin) * (smax - smin > numeric_limits<double>::epsilon() ? 1.0 / (smax - smin) : 0.0);
+ shift = dmin - smin * scale;
+ }
+ else if (norm_type == NORM_L2 || norm_type == NORM_L1 || norm_type == NORM_INF)
+ {
+ scale = norm(src, norm_type, mask, norm_buf);
+ scale = scale > numeric_limits<double>::epsilon() ? a / scale : 0.0;
+ shift = 0;
+ }
+ else
+ {
+ CV_Error(CV_StsBadArg, "Unknown/unsupported norm type");
+ }
+
+ if (mask.empty())
+ {
+ src.convertTo(dst, dtype, scale, shift);
+ }
+ else
+ {
+ src.convertTo(cvt_buf, dtype, scale, shift);
+ cvt_buf.copyTo(dst, mask);
+ }
+}
+
#endif /* !defined (HAVE_CUDA) */
ALL_REDUCE_CODES,
WHOLE_SUBMAT));
+//////////////////////////////////////////////////////////////////////////////
+// Normalize
+
+PARAM_TEST_CASE(Normalize, cv::gpu::DeviceInfo, cv::Size, MatDepth, NormCode, UseRoi)
+{
+ cv::gpu::DeviceInfo devInfo;
+ cv::Size size;
+ int type;
+ int norm_type;
+ bool useRoi;
+
+ double alpha;
+ double beta;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ type = GET_PARAM(2);
+ norm_type = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::gpu::setDevice(devInfo.deviceID());
+
+ alpha = 1;
+ beta = 0;
+ }
+
+};
+
+GPU_TEST_P(Normalize, WithOutMask)
+{
+ cv::Mat src = randomMat(size, type);
+
+ cv::gpu::GpuMat dst = createMat(size, type, useRoi);
+ cv::gpu::normalize(loadMat(src, useRoi), dst, alpha, beta, norm_type, type);
+
+ cv::Mat dst_gold;
+ cv::normalize(src, dst_gold, alpha, beta, norm_type, type);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-6);
+}
+
+GPU_TEST_P(Normalize, WithMask)
+{
+ cv::Mat src = randomMat(size, type);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
+
+ cv::gpu::GpuMat dst = createMat(size, type, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::gpu::normalize(loadMat(src, useRoi), dst, alpha, beta, norm_type, type, loadMat(mask, useRoi));
+
+ cv::Mat dst_gold(size, type);
+ dst_gold.setTo(cv::Scalar::all(0));
+ cv::normalize(src, dst_gold, alpha, beta, norm_type, type, mask);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-6);
+}
+
+INSTANTIATE_TEST_CASE_P(GPU_Core, Normalize, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF), NormCode(cv::NORM_MINMAX)),
+ WHOLE_SUBMAT));
+
#endif // HAVE_CUDA