fistp t;
}
return t;
-#elif defined _MSC_VER && defined _M_ARM && defined HAVE_TEGRA_OPTIMIZATION
- TEGRA_ROUND(value);
+#elif ((defined _MSC_VER && defined _M_ARM) || defined CV_ICC || defined __GNUC__) && defined HAVE_TEGRA_OPTIMIZATION
+ TEGRA_ROUND_DBL(value);
#elif defined CV_ICC || defined __GNUC__
-# ifdef HAVE_TEGRA_OPTIMIZATION
- TEGRA_ROUND(value);
-# elif CV_VFP
+# if CV_VFP
ARM_ROUND_DBL(value)
-# else
+# else
return (int)lrint(value);
-# endif
+# endif
#else
double intpart, fractpart;
fractpart = modf(value, &intpart);
/** @overload */
CV_INLINE int cvRound(float value)
{
-#if CV_VFP && !defined HAVE_TEGRA_OPTIMIZATION
+#if defined ANDROID && (defined CV_ICC || defined __GNUC__) && defined HAVE_TEGRA_OPTIMIZATION
+ TEGRA_ROUND_FLT(value);
+#elif CV_VFP && !defined HAVE_TEGRA_OPTIMIZATION
ARM_ROUND_FLT(value)
#else
return cvRound((double)value);
--- /dev/null
+#include "perf_precomp.hpp"
+
+using namespace std;
+using namespace cv;
+using namespace perf;
+using std::tr1::make_tuple;
+using std::tr1::get;
+
+template <typename T>
+static void CvRoundMat(const cv::Mat & src, cv::Mat & dst)
+{
+ for (int y = 0; y < dst.rows; ++y)
+ {
+ const T * sptr = src.ptr<T>(y);
+ int * dptr = dst.ptr<int>(y);
+
+ for (int x = 0; x < dst.cols; ++x)
+ dptr[x] = cvRound(sptr[x]);
+ }
+}
+
+PERF_TEST_P(Size_MatType, CvRound_Float,
+ testing::Combine(testing::Values(TYPICAL_MAT_SIZES),
+ testing::Values(CV_32FC1, CV_64FC1)))
+{
+ Size size = get<0>(GetParam());
+ int type = get<1>(GetParam()), depth = CV_MAT_DEPTH(type);
+
+ cv::Mat src(size, type), dst(size, CV_32SC1);
+
+ declare.in(src, WARMUP_RNG).out(dst);
+
+ if (depth == CV_32F)
+ {
+ TEST_CYCLE()
+ CvRoundMat<float>(src, dst);
+ }
+ else if (depth == CV_64F)
+ {
+ TEST_CYCLE()
+ CvRoundMat<double>(src, dst);
+ }
+
+ SANITY_CHECK_NOTHING();
+}