}
-template <class ElemType>
+template <class T>
int calcDiffElemCountImpl(const vector<Mat>& mv, const Mat& m)
{
int diffElemCount = 0;
{
for(int x = 0; x < m.cols; x++)
{
- const ElemType* mElem = &m.at<ElemType>(y,x*mChannels);
+ const T* mElem = &m.at<T>(y, x*mChannels);
size_t loc = 0;
for(size_t i = 0; i < mv.size(); i++)
{
const size_t mvChannel = mv[i].channels();
- const ElemType* mvElem = &mv[i].at<ElemType>(y,x*(int)mvChannel);
+ const T* mvElem = &mv[i].at<T>(y, x*(int)mvChannel);
for(size_t li = 0; li < mvChannel; li++)
if(mElem[loc + li] != mvElem[li])
diffElemCount++;
ASSERT_EQ( cvtest::norm(a, b, NORM_INF), 0.);
}
-#ifdef CV_CXX11
-
TEST(Core_Matx, from_initializer_list)
{
Mat_<double> a = (Mat_<double>(2,2) << 10, 11, 12, 13);
EXPECT_EQ(25u, m2.total());
}
-#endif // CXX11
-
TEST(Core_InputArray, empty)
{
vector<vector<Point> > data;
}
}
-#ifdef CV_CXX_STD_ARRAY
TEST(Core_Mat_array, outputArray_create_getMat)
{
cv::Mat_<uchar> src_base(5, 1);
EXPECT_EQ(0, cvtest::norm(src[i], dst[i], NORM_INF));
}
}
-#endif
TEST(Mat, regression_8680)
{
ASSERT_EQ(mat.channels(), 2);
}
-#ifdef CV_CXX11
-
TEST(Mat_, range_based_for)
{
Mat_<uchar> img = Mat_<uchar>::zeros(3, 3);
ASSERT_FLOAT_EQ(66.0f, *(mat.ptr<float>(idx)));
}
-#endif
-
BIGDATA_TEST(Mat, push_back_regression_4158) // memory usage: ~10.6 Gb
{
}
}
+
+TEST(Core_InputArray, fetch_MatExpr)
+{
+ Mat a(Size(10, 5), CV_32FC1, 5);
+ Mat b(Size(10, 5), CV_32FC1, 2);
+ MatExpr expr = a * b.t(); // gemm expression
+ Mat dst;
+ cv::add(expr, Scalar(1), dst); // invoke gemm() here
+ void* expr_data = expr.a.data;
+ Mat result = expr; // should not call gemm() here again
+ EXPECT_EQ(expr_data, result.data); // expr data is reused
+ EXPECT_EQ(dst.size(), result.size());
+}
+
+
TEST(Core_Vectors, issue_13078)
{
float floats_[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
<< "Mat: CV_8UC3 != " << typeToString(ab_mat.type());
}
+TEST(Core_MatExpr, issue_16689)
+{
+ Mat a(Size(10, 5), CV_32FC1, 5);
+ Mat b(Size(10, 5), CV_32FC1, 2);
+ Mat bt(Size(5, 10), CV_32FC1, 3);
+ {
+ MatExpr r = a * bt; // gemm
+ EXPECT_EQ(Mat(r).size(), r.size()) << "[10x5] x [5x10] => [5x5]";
+ }
+ {
+ MatExpr r = a * b.t(); // gemm
+ EXPECT_EQ(Mat(r).size(), r.size()) << "[10x5] x [10x5].t() => [5x5]";
+ }
+ {
+ MatExpr r = a.t() * b; // gemm
+ EXPECT_EQ(Mat(r).size(), r.size()) << "[10x5].t() x [10x5] => [10x10]";
+ }
+ {
+ MatExpr r = a.t() * bt.t(); // gemm
+ EXPECT_EQ(Mat(r).size(), r.size()) << "[10x5].t() x [5x10].t() => [10x10]";
+ }
+}
+
#ifdef HAVE_EIGEN
TEST(Core_Eigen, eigen2cv_check_Mat_type)
{
}
#endif // HAVE_EIGEN
+#ifdef OPENCV_EIGEN_TENSOR_SUPPORT
+TEST(Core_Eigen, cv2eigen_check_tensor_conversion)
+{
+ Mat A(2, 3, CV_32FC3);
+ float value = 0;
+ for(int row=0; row<A.rows; row++)
+ for(int col=0; col<A.cols; col++)
+ for(int ch=0; ch<A.channels(); ch++)
+ A.at<Vec3f>(row,col)[ch] = value++;
+
+ Eigen::Tensor<float, 3, Eigen::RowMajor> row_tensor;
+ cv2eigen(A, row_tensor);
+
+ float* mat_ptr = (float*)A.data;
+ float* tensor_ptr = row_tensor.data();
+ for (int i=0; i< row_tensor.size(); i++)
+ ASSERT_FLOAT_EQ(mat_ptr[i], tensor_ptr[i]);
+
+ Eigen::Tensor<float, 3, Eigen::ColMajor> col_tensor;
+ cv2eigen(A, col_tensor);
+ value = 0;
+ for(int row=0; row<A.rows; row++)
+ for(int col=0; col<A.cols; col++)
+ for(int ch=0; ch<A.channels(); ch++)
+ ASSERT_FLOAT_EQ(value++, col_tensor(row,col,ch));
+}
+#endif // OPENCV_EIGEN_TENSOR_SUPPORT
+
+#ifdef OPENCV_EIGEN_TENSOR_SUPPORT
+TEST(Core_Eigen, eigen2cv_check_tensor_conversion)
+{
+ Eigen::Tensor<float, 3, Eigen::RowMajor> row_tensor(2,3,3);
+ Eigen::Tensor<float, 3, Eigen::ColMajor> col_tensor(2,3,3);
+ float value = 0;
+ for(int row=0; row<row_tensor.dimension(0); row++)
+ for(int col=0; col<row_tensor.dimension(1); col++)
+ for(int ch=0; ch<row_tensor.dimension(2); ch++)
+ {
+ row_tensor(row,col,ch) = value;
+ col_tensor(row,col,ch) = value;
+ value++;
+ }
+
+ Mat A;
+ eigen2cv(row_tensor, A);
+
+ float* tensor_ptr = row_tensor.data();
+ float* mat_ptr = (float*)A.data;
+ for (int i=0; i< row_tensor.size(); i++)
+ ASSERT_FLOAT_EQ(tensor_ptr[i], mat_ptr[i]);
+
+ Mat B;
+ eigen2cv(col_tensor, B);
+
+ value = 0;
+ for(int row=0; row<B.rows; row++)
+ for(int col=0; col<B.cols; col++)
+ for(int ch=0; ch<B.channels(); ch++)
+ ASSERT_FLOAT_EQ(value++, B.at<Vec3f>(row,col)[ch]);
+}
+#endif // OPENCV_EIGEN_TENSOR_SUPPORT
+
+#ifdef OPENCV_EIGEN_TENSOR_SUPPORT
+TEST(Core_Eigen, cv2eigen_tensormap_check_tensormap_access)
+{
+ float arr[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
+ Mat a_mat(2, 2, CV_32FC3, arr);
+ Eigen::TensorMap<Eigen::Tensor<float, 3, Eigen::RowMajor>> a_tensor = cv2eigen_tensormap<float>(a_mat);
+
+ for(int i=0; i<a_mat.rows; i++) {
+ for (int j=0; j<a_mat.cols; j++) {
+ for (int ch=0; ch<a_mat.channels(); ch++) {
+ ASSERT_FLOAT_EQ(a_mat.at<Vec3f>(i,j)[ch], a_tensor(i,j,ch));
+ ASSERT_EQ(&a_mat.at<Vec3f>(i,j)[ch], &a_tensor(i,j,ch));
+ }
+ }
+ }
+}
+#endif // OPENCV_EIGEN_TENSOR_SUPPORT
+
TEST(Mat, regression_12943) // memory usage: ~4.5 Gb
{
applyTestTag(CV_TEST_TAG_MEMORY_6GB);
cv::flip(src, dst, 0);
}
+TEST(Mat, empty_iterator_16855)
+{
+ cv::Mat m;
+ EXPECT_NO_THROW(m.begin<uchar>());
+ EXPECT_NO_THROW(m.end<uchar>());
+ EXPECT_TRUE(m.begin<uchar>() == m.end<uchar>());
+}
+
+
+TEST(Mat, regression_18473)
+{
+ std::vector<int> sizes(3);
+ sizes[0] = 20;
+ sizes[1] = 50;
+ sizes[2] = 100;
+#if 1 // with the fix
+ std::vector<size_t> steps(2);
+ steps[0] = 50*100*2;
+ steps[1] = 100*2;
+#else // without the fix
+ std::vector<size_t> steps(3);
+ steps[0] = 50*100*2;
+ steps[1] = 100*2;
+ steps[2] = 2;
+#endif
+ std::vector<short> data(20*50*100, 0); // 1Mb
+ data[data.size() - 1] = 5;
+
+ // param steps Array of ndims-1 steps
+ Mat m(sizes, CV_16SC1, (void*)data.data(), (const size_t*)steps.data());
+
+ ASSERT_FALSE(m.empty());
+ EXPECT_EQ((int)5, (int)m.at<short>(19, 49, 99));
+}
+
+
}} // namespace