for( y = 0; y < gradSize.height; y++ )
{
- const uchar* currPtr = img.data + img.step*ymap[y];
- const uchar* prevPtr = img.data + img.step*ymap[y-1];
- const uchar* nextPtr = img.data + img.step*ymap[y+1];
- float* gradPtr = (float*)grad.ptr(y);
- uchar* qanglePtr = (uchar*)qangle.ptr(y);
+ const uchar* currPtr = img.ptr(ymap[y]);
+ const uchar* prevPtr = img.ptr(ymap[y-1]);
+ const uchar* nextPtr = img.ptr(ymap[y+1]);
+ float* gradPtr = grad.ptr<float>(y);
+ uchar* qanglePtr = qangle.ptr(y);
for( x = 0; x < width; x++ )
{
int magStep = (int)( grad.step / sizeof(float) );
for( binIdx = 0; binIdx < nbins; binIdx++ )
{
- histBuf = (float*)histogram[binIdx].data;
- magBuf = (const float*)grad.data;
- binsBuf = (const uchar*)qangle.data;
+ histBuf = histogram[binIdx].ptr<float>();
+ magBuf = grad.ptr<float>();
+ binsBuf = qangle.ptr();
memset( histBuf, 0, histSize.width * sizeof(histBuf[0]) );
histBuf += histStep + 1;
size_t p0, p1, p2, p3;
CV_SUM_OFFSETS( p0, p1, p2, p3, normrect, sum.step1() )
double area = normrect.width * normrect.height;
- const int *sp = (const int*)sum.data;
+ const int *sp = sum.ptr<int>();
int valSum = sp[p0] - sp[p1] - sp[p2] + sp[p3];
- const double *sqp = (const double *)sqSum.data;
+ const double *sqp = sqSum.ptr<double>();
double valSqSum = sqp[p0] - sqp[p1] - sqp[p2] + sqp[p3];
return (float) sqrt( (double) (area * valSqSum - (double)valSum * valSum) );
}
return false;
Mat mat( winSize.height, winSize.width, CV_8UC1,
- (void*)(img.data + point.y * img.step + point.x * img.elemSize()), img.step );
+ (void*)(img.ptr(point.y) + point.x * img.elemSize()), img.step );
mat.copyTo(_img);
if( (int)( point.x + (1.0F + stepFactor ) * winSize.width ) < img.cols )
Mat image = _image.getMat(); CvMat c_image = _image.getMat();
int nelems = corners.checkVector(2, CV_32F, true);
CV_Assert(nelems >= 0);
- cvDrawChessboardCorners( &c_image, patternSize, (CvPoint2D32f*)corners.data,
+ cvDrawChessboardCorners( &c_image, patternSize, corners.ptr<CvPoint2D32f>(),
nelems, patternWasFound );
}
int ni1 = imgpt1.checkVector(2, CV_32F);
CV_Assert( ni > 0 && ni == ni1 );
npoints.at<int>(i) = ni;
- memcpy( objPtData + j, objpt.data, ni*sizeof(objPtData[0]) );
- memcpy( imgPtData1 + j, imgpt1.data, ni*sizeof(imgPtData1[0]) );
+ memcpy( objPtData + j, objpt.ptr(), ni*sizeof(objPtData[0]) );
+ memcpy( imgPtData1 + j, imgpt1.ptr(), ni*sizeof(imgPtData1[0]) );
if( imgPtData2 )
{
Mat imgpt2 = imagePoints2.getMat(i);
int ni2 = imgpt2.checkVector(2, CV_32F);
CV_Assert( ni == ni2 );
- memcpy( imgPtData2 + j, imgpt2.data, ni*sizeof(imgPtData2[0]) );
+ memcpy( imgPtData2 + j, imgpt2.ptr(), ni*sizeof(imgPtData2[0]) );
}
}
}
{
_rvecs.create(3, 1, CV_64F, i, true);
Mat rv = _rvecs.getMat(i);
- memcpy(rv.data, rvecM.ptr<double>(i), 3*sizeof(double));
+ memcpy(rv.ptr(), rvecM.ptr<double>(i), 3*sizeof(double));
}
if( tvecs_needed )
{
_tvecs.create(3, 1, CV_64F, i, true);
Mat tv = _tvecs.getMat(i);
- memcpy(tv.data, tvecM.ptr<double>(i), 3*sizeof(double));
+ memcpy(tv.ptr(), tvecM.ptr<double>(i), 3*sizeof(double));
}
}
cameraMatrix.copyTo(_cameraMatrix);
if( _eulerAngles.needed() )
{
_eulerAngles.create(3, 1, CV_64F, -1, true);
- p_eulerAngles = (CvPoint3D64f*)_eulerAngles.getMat().data;
+ p_eulerAngles = _eulerAngles.getMat().ptr<CvPoint3D64f>();
}
cvDecomposeProjectionMatrix(&c_projMatrix, &c_cameraMatrix, &c_rotMatrix,
Mat EE = Mat(Vt.t()).colRange(5, 9) * 1.0;
Mat A(10, 20, CV_64F);
EE = EE.t();
- getCoeffMat((double*)EE.data, (double*)A.data);
+ getCoeffMat(EE.ptr<double>(), A.ptr<double>());
EE = EE.t();
A = A.colRange(0, 10).inv() * A.colRange(10, 20);
cv::Mat Evec = EE.col(0) * xs.back() + EE.col(1) * ys.back() + EE.col(2) * zs.back() + EE.col(3);
Evec /= norm(Evec);
- memcpy(e + count * 9, Evec.data, 9 * sizeof(double));
+ memcpy(e + count * 9, Evec.ptr(), 9 * sizeof(double));
count++;
}
if( depth == CV_32S || depth == CV_32F )
{
- const Point* ptsi = (const Point*)points.data;
- const Point2f* ptsf = (const Point2f*)points.data;
+ const Point* ptsi = points.ptr<Point>();
+ const Point2f* ptsf = points.ptr<Point2f>();
Point3f* dstf = lines.ptr<Point3f>();
for( int i = 0; i < npoints; i++ )
{
}
else
{
- const Point2d* ptsd = (const Point2d*)points.data;
+ const Point2d* ptsd = points.ptr<Point2d>();
Point3d* dstd = lines.ptr<Point3d>();
for( int i = 0; i < npoints; i++ )
{
{
if( cn == 3 )
{
- const Point3i* sptr = (const Point3i*)src.data;
- Point2f* dptr = (Point2f*)dst.data;
+ const Point3i* sptr = src.ptr<Point3i>();
+ Point2f* dptr = dst.ptr<Point2f>();
for( i = 0; i < npoints; i++ )
{
float scale = sptr[i].z != 0 ? 1.f/sptr[i].z : 1.f;
}
else
{
- const Vec4i* sptr = (const Vec4i*)src.data;
- Point3f* dptr = (Point3f*)dst.data;
+ const Vec4i* sptr = src.ptr<Vec4i>();
+ Point3f* dptr = dst.ptr<Point3f>();
for( i = 0; i < npoints; i++ )
{
float scale = sptr[i][3] != 0 ? 1.f/sptr[i][3] : 1.f;
{
if( cn == 3 )
{
- const Point3f* sptr = (const Point3f*)src.data;
- Point2f* dptr = (Point2f*)dst.data;
+ const Point3f* sptr = src.ptr<Point3f>();
+ Point2f* dptr = dst.ptr<Point2f>();
for( i = 0; i < npoints; i++ )
{
float scale = sptr[i].z != 0.f ? 1.f/sptr[i].z : 1.f;
}
else
{
- const Vec4f* sptr = (const Vec4f*)src.data;
- Point3f* dptr = (Point3f*)dst.data;
+ const Vec4f* sptr = src.ptr<Vec4f>();
+ Point3f* dptr = dst.ptr<Point3f>();
for( i = 0; i < npoints; i++ )
{
float scale = sptr[i][3] != 0.f ? 1.f/sptr[i][3] : 1.f;
{
if( cn == 3 )
{
- const Point3d* sptr = (const Point3d*)src.data;
- Point2d* dptr = (Point2d*)dst.data;
+ const Point3d* sptr = src.ptr<Point3d>();
+ Point2d* dptr = dst.ptr<Point2d>();
for( i = 0; i < npoints; i++ )
{
double scale = sptr[i].z != 0. ? 1./sptr[i].z : 1.;
}
else
{
- const Vec4d* sptr = (const Vec4d*)src.data;
- Point3d* dptr = (Point3d*)dst.data;
+ const Vec4d* sptr = src.ptr<Vec4d>();
+ Point3d* dptr = dst.ptr<Point3d>();
for( i = 0; i < npoints; i++ )
{
double scale = sptr[i][3] != 0.f ? 1./sptr[i][3] : 1.;
{
if( cn == 2 )
{
- const Point2i* sptr = (const Point2i*)src.data;
- Point3i* dptr = (Point3i*)dst.data;
+ const Point2i* sptr = src.ptr<Point2i>();
+ Point3i* dptr = dst.ptr<Point3i>();
for( i = 0; i < npoints; i++ )
dptr[i] = Point3i(sptr[i].x, sptr[i].y, 1);
}
else
{
- const Point3i* sptr = (const Point3i*)src.data;
- Vec4i* dptr = (Vec4i*)dst.data;
+ const Point3i* sptr = src.ptr<Point3i>();
+ Vec4i* dptr = dst.ptr<Vec4i>();
for( i = 0; i < npoints; i++ )
dptr[i] = Vec4i(sptr[i].x, sptr[i].y, sptr[i].z, 1);
}
{
if( cn == 2 )
{
- const Point2f* sptr = (const Point2f*)src.data;
- Point3f* dptr = (Point3f*)dst.data;
+ const Point2f* sptr = src.ptr<Point2f>();
+ Point3f* dptr = dst.ptr<Point3f>();
for( i = 0; i < npoints; i++ )
dptr[i] = Point3f(sptr[i].x, sptr[i].y, 1.f);
}
else
{
- const Point3f* sptr = (const Point3f*)src.data;
- Vec4f* dptr = (Vec4f*)dst.data;
+ const Point3f* sptr = src.ptr<Point3f>();
+ Vec4f* dptr = dst.ptr<Vec4f>();
for( i = 0; i < npoints; i++ )
dptr[i] = Vec4f(sptr[i].x, sptr[i].y, sptr[i].z, 1.f);
}
{
if( cn == 2 )
{
- const Point2d* sptr = (const Point2d*)src.data;
- Point3d* dptr = (Point3d*)dst.data;
+ const Point2d* sptr = src.ptr<Point2d>();
+ Point3d* dptr = dst.ptr<Point3d>();
for( i = 0; i < npoints; i++ )
dptr[i] = Point3d(sptr[i].x, sptr[i].y, 1.);
}
else
{
- const Point3d* sptr = (const Point3d*)src.data;
- Vec4d* dptr = (Vec4d*)dst.data;
+ const Point3d* sptr = src.ptr<Point3d>();
+ Vec4d* dptr = dst.ptr<Vec4d>();
for( i = 0; i < npoints; i++ )
dptr[i] = Vec4d(sptr[i].x, sptr[i].y, sptr[i].z, 1.);
}
int d1 = m1.channels() > 1 ? m1.channels() : m1.cols;
int d2 = m2.channels() > 1 ? m2.channels() : m2.cols;
int count = m1.checkVector(d1), count2 = m2.checkVector(d2);
- const int *m1ptr = (const int*)m1.data, *m2ptr = (const int*)m2.data;
+ const int *m1ptr = m1.ptr<int>(), *m2ptr = m2.ptr<int>();
ms1.create(modelPoints, 1, CV_MAKETYPE(m1.depth(), d1));
ms2.create(modelPoints, 1, CV_MAKETYPE(m2.depth(), d2));
- int *ms1ptr = (int*)ms1.data, *ms2ptr = (int*)ms2.data;
+ int *ms1ptr = ms1.ptr<int>(), *ms2ptr = ms2.ptr<int>();
CV_Assert( count >= modelPoints && count == count2 );
CV_Assert( (esz1 % sizeof(int)) == 0 && (esz2 % sizeof(int)) == 0 );
else
errf = err;
CV_Assert( errf.isContinuous() && errf.type() == CV_32F && (int)errf.total() == count );
- std::sort((int*)errf.data, (int*)errf.data + count);
+ std::sort(errf.ptr<int>(), errf.ptr<int>() + count);
double median = count % 2 != 0 ?
errf.at<float>(count/2) : (errf.at<float>(count/2-1) + errf.at<float>(count/2))*0.5;
int scale_g = winsize*winsize/8, scale_s = (1024 + scale_g)/(scale_g*2);
const int OFS = 256*5, TABSZ = OFS*2 + 256;
uchar tab[TABSZ];
- const uchar* sptr = src.data;
+ const uchar* sptr = src.ptr();
int srcstep = (int)src.step;
Size size = src.size();
ushort *sad, *hsad0, *hsad, *hsad_sub;
int *htext;
uchar *cbuf0, *cbuf;
- const uchar* lptr0 = left.data + lofs;
- const uchar* rptr0 = right.data + rofs;
+ const uchar* lptr0 = left.ptr() + lofs;
+ const uchar* rptr0 = right.ptr() + rofs;
const uchar *lptr, *lptr_sub, *rptr;
- short* dptr = (short*)disp.data;
+ short* dptr = disp.ptr<short>();
int sstep = (int)left.step;
int dstep = (int)(disp.step/sizeof(dptr[0]));
int cstep = (height + dy0 + dy1)*ndisp;
for( x = 0; x < width1; x++, dptr++ )
{
- short* costptr = cost.data ? (short*)cost.data + lofs + x : &costbuf;
+ short* costptr = cost.data ? cost.ptr<short>() + lofs + x : &costbuf;
int x0 = x - wsz2 - 1, x1 = x + wsz2;
const uchar* cbuf_sub = cbuf0 + ((x0 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
cbuf = cbuf0 + ((x1 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
int *sad, *hsad0, *hsad, *hsad_sub, *htext;
uchar *cbuf0, *cbuf;
- const uchar* lptr0 = left.data + lofs;
- const uchar* rptr0 = right.data + rofs;
+ const uchar* lptr0 = left.ptr() + lofs;
+ const uchar* rptr0 = right.ptr() + rofs;
const uchar *lptr, *lptr_sub, *rptr;
- short* dptr = (short*)disp.data;
+ short* dptr = disp.ptr<short>();
int sstep = (int)left.step;
int dstep = (int)(disp.step/sizeof(dptr[0]));
int cstep = (height+dy0+dy1)*ndisp;
for( x = 0; x < width1; x++, dptr++ )
{
- int* costptr = cost.data ? (int*)cost.data + lofs + x : &costbuf;
+ int* costptr = cost.data ? cost.ptr<int>() + lofs + x : &costbuf;
int x0 = x - wsz2 - 1, x1 = x + wsz2;
const uchar* cbuf_sub = cbuf0 + ((x0 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
cbuf = cbuf0 + ((x1 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
int cols = left->cols, rows = left->rows;
int _row0 = std::min(cvRound(range.start * rows / nstripes), rows);
int _row1 = std::min(cvRound(range.end * rows / nstripes), rows);
- uchar *ptr = slidingSumBuf->data + range.start * stripeBufSize;
+ uchar *ptr = slidingSumBuf->ptr() + range.start * stripeBufSize;
int FILTERED = (state->minDisparity - 1)*16;
Rect roi = validDisparityRect & Rect(0, _row0, cols, _row1 - _row0);
if( slidingSumBuf.cols < bufSize )
slidingSumBuf.create( 1, bufSize, CV_8U );
- uchar *_buf = slidingSumBuf.data;
+ uchar *_buf = slidingSumBuf.ptr();
parallel_for_(Range(0, 2), PrefilterInvoker(left0, right0, left, right, _buf, _buf + bufSize1, ¶ms), 1);
width*16*img1.channels()*sizeof(PixType) + // temp buffer for computing per-pixel cost
width*(sizeof(CostType) + sizeof(DispType)) + 1024; // disp2cost + disp2
- if( !buffer.data || !buffer.isContinuous() ||
+ if( buffer.empty() || !buffer.isContinuous() ||
buffer.cols*buffer.rows*buffer.elemSize() < totalBufSize )
buffer.create(1, (int)totalBufSize, CV_8U);
// summary cost over different (nDirs) directions
- CostType* Cbuf = (CostType*)alignPtr(buffer.data, ALIGN);
+ CostType* Cbuf = (CostType*)alignPtr(buffer.ptr(), ALIGN);
CostType* Sbuf = Cbuf + CSBufSize;
CostType* hsumBuf = Sbuf + CSBufSize;
CostType* pixDiff = hsumBuf + costBufSize*hsumBufNRows;
int width = img.cols, height = img.rows, npixels = width*height;
size_t bufSize = npixels*(int)(sizeof(Point2s) + sizeof(int) + sizeof(uchar));
- if( !_buf.isContinuous() || !_buf.data || _buf.cols*_buf.rows*_buf.elemSize() < bufSize )
+ if( !_buf.isContinuous() || _buf.empty() || _buf.cols*_buf.rows*_buf.elemSize() < bufSize )
_buf.create(1, (int)bufSize, CV_8U);
- uchar* buf = _buf.data;
+ uchar* buf = _buf.ptr();
int i, j, dstep = (int)(img.step/sizeof(T));
int* labels = (int*)buf;
buf += npixels*sizeof(labels[0]);
if ((int)status >= 0)
{
if (type == CV_8UC1)
- status = ippiMarkSpeckles_8u_C1IR((Ipp8u *)img.data, (int)img.step, roisize,
+ status = ippiMarkSpeckles_8u_C1IR(img.ptr<Ipp8u>(), (int)img.step, roisize,
(Ipp8u)newVal, maxSpeckleSize, (Ipp8u)maxDiff, ippiNormL1, buffer);
else
- status = ippiMarkSpeckles_16s_C1IR((Ipp16s *)img.data, (int)img.step, roisize,
+ status = ippiMarkSpeckles_16s_C1IR(img.ptr<Ipp16s>(), (int)img.step, roisize,
(Ipp16s)newVal, maxSpeckleSize, (Ipp16s)maxDiff, ippiNormL1, buffer);
}
flags );
assert( cameraMatrix.type() == CV_64FC1 );
- memcpy( _cameraMatrix, cameraMatrix.data, 9*sizeof(double) );
+ memcpy( _cameraMatrix, cameraMatrix.ptr(), 9*sizeof(double) );
assert( cameraMatrix.type() == CV_64FC1 );
- memcpy( _distortionCoeffs, distCoeffs.data, 4*sizeof(double) );
+ memcpy( _distortionCoeffs, distCoeffs.ptr(), 4*sizeof(double) );
vector<Mat>::iterator rvecsIt = rvecs.begin();
vector<Mat>::iterator tvecsIt = tvecs.begin();
{
Mat r9( 3, 3, CV_64FC1 );
Rodrigues( *rvecsIt, r9 );
- memcpy( rm, r9.data, 9*sizeof(double) );
- memcpy( tm, tvecsIt->data, 3*sizeof(double) );
+ memcpy( rm, r9.ptr(), 9*sizeof(double) );
+ memcpy( tm, tvecsIt->ptr(), 3*sizeof(double) );
}
}
{
Mat left = imread(imglist[i*2]);
Mat right = imread(imglist[i*2+1]);
- if(!left.data || !right.data)
+ if(left.empty() || right.empty())
{
ts->printf( cvtest::TS::LOG, "Can not load images %s and %s, testcase %d\n",
imglist[i*2].c_str(), imglist[i*2+1].c_str(), testcase );
for( int i = 0, ni = 0, j = 0; i < nimages; i++, j += ni )
{
ni = (int)objectPoints[i].size();
- ((int*)npoints.data)[i] = ni;
+ npoints.ptr<int>()[i] = ni;
std::copy(objectPoints[i].begin(), objectPoints[i].end(), objPtData + j);
std::copy(imagePoints1[i].begin(), imagePoints1[i].end(), imgPtData + j);
std::copy(imagePoints2[i].begin(), imagePoints2[i].end(), imgPtData2 + j);
cv::gemm( T, invA2, 1, Mat(), 0, F0 );
F0 *= 1./f0[8];
- uchar* status = test_mat[TEMP][1].data;
+ uchar* status = test_mat[TEMP][1].ptr();
double err_level = method <= CV_FM_8POINT ? 1 : get_success_error_level( test_case_idx, OUTPUT, 1 );
- uchar* mtfm1 = test_mat[REF_OUTPUT][1].data;
- uchar* mtfm2 = test_mat[OUTPUT][1].data;
- double* f_prop1 = (double*)test_mat[REF_OUTPUT][0].data;
- double* f_prop2 = (double*)test_mat[OUTPUT][0].data;
+ uchar* mtfm1 = test_mat[REF_OUTPUT][1].ptr();
+ uchar* mtfm2 = test_mat[OUTPUT][1].ptr();
+ double* f_prop1 = test_mat[REF_OUTPUT][0].ptr<double>();
+ double* f_prop2 = test_mat[OUTPUT][0].ptr<double>();
int i, pt_count = test_mat[INPUT][2].cols;
Mat p1( 1, pt_count, CV_64FC2 );
cv::gemm( T1, T2, 1, Mat(), 0, F0 );
F0 *= 1./f0[8];
- uchar* status = test_mat[TEMP][1].data;
+ uchar* status = test_mat[TEMP][1].ptr();
double err_level = get_success_error_level( test_case_idx, OUTPUT, 1 );
- uchar* mtfm1 = test_mat[REF_OUTPUT][1].data;
- uchar* mtfm2 = test_mat[OUTPUT][1].data;
- double* e_prop1 = (double*)test_mat[REF_OUTPUT][0].data;
- double* e_prop2 = (double*)test_mat[OUTPUT][0].data;
+ uchar* mtfm1 = test_mat[REF_OUTPUT][1].ptr();
+ uchar* mtfm2 = test_mat[OUTPUT][1].ptr();
+ double* e_prop1 = test_mat[REF_OUTPUT][0].ptr<double>();
+ double* e_prop2 = test_mat[OUTPUT][0].ptr<double>();
Mat E_prop2 = Mat(3, 1, CV_64F, e_prop2);
int i, pt_count = test_mat[INPUT][2].cols;
- double* pose_prop1 = (double*)test_mat[REF_OUTPUT][2].data;
- double* pose_prop2 = (double*)test_mat[OUTPUT][2].data;
+ double* pose_prop1 = test_mat[REF_OUTPUT][2].ptr<double>();
+ double* pose_prop2 = test_mat[OUTPUT][2].ptr<double>();
double terr1 = cvtest::norm(Rt0.col(3) / norm(Rt0.col(3)) + test_mat[TEMP][3], NORM_L2);
double terr2 = cvtest::norm(Rt0.col(3) / norm(Rt0.col(3)) - test_mat[TEMP][3], NORM_L2);
Mat rvec;
Mat_<double> res = Q * Mat_<double>(4, 1, from);
res /= res(3, 0);
- out3d_t pixel_exp = *(Vec3d*)res.data;
+ out3d_t pixel_exp = *res.ptr<Vec3d>();
out3d_t pixel_out = _3dImg(y, x);
const int largeZValue = 10000; /* see documentation */
{
if( m && m->isContinuous() )
{
- sliceStart = m->data;
+ sliceStart = m->ptr();
sliceEnd = sliceStart + m->total()*elemSize;
}
seek((const int*)0);
CV_Assert(m && m->dims <= 2);
if( m->isContinuous() )
{
- sliceStart = m->data;
+ sliceStart = m->ptr();
sliceEnd = sliceStart + m->total()*elemSize;
}
int idx[] = {_row, _col};
CV_Assert(m && m->dims <= 2);
if( m->isContinuous() )
{
- sliceStart = m->data;
+ sliceStart = m->ptr();
sliceEnd = sliceStart + m->total()*elemSize;
}
int idx[] = {_pt.y, _pt.x};
{
int scn = (int)sc.total(), cn = CV_MAT_CN(buftype);
size_t esz = CV_ELEM_SIZE(buftype);
- getConvertFunc(sc.depth(), buftype)(sc.data, 1, 0, 1, scbuf, 1, Size(std::min(cn, scn), 1), 0);
+ getConvertFunc(sc.depth(), buftype)(sc.ptr(), 1, 0, 1, scbuf, 1, Size(std::min(cn, scn), 1), 0);
// unroll the scalar
if( scn < cn )
{
if( len == (size_t)(int)len )
{
sz.width = (int)len;
- func(src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, 0);
+ func(src1.ptr(), src1.step, src2.ptr(), src2.step, dst.ptr(), dst.step, sz, 0);
return;
}
}
Mat src1 = psrc1->getMat(), src2 = psrc2->getMat(), dst = _dst.getMat();
Size sz = getContinuousSize(src1, src2, dst, src1.channels());
- tab[depth1](src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, usrdata);
+ tab[depth1](src1.ptr(), src1.step, src2.ptr(), src2.step, dst.ptr(), dst.step, sz, usrdata);
return;
}
else
{
double fval = 0;
- getConvertFunc(depth2, CV_64F)(src2.data, 1, 0, 1, (uchar *)&fval, 1, Size(1, 1), 0);
+ getConvertFunc(depth2, CV_64F)(src2.ptr(), 1, 0, 1, (uchar *)&fval, 1, Size(1, 1), 0);
if( fval < getMinVal(depth1) )
return dst.setTo(Scalar::all(op == CMP_GT || op == CMP_GE || op == CMP_NE ? 255 : 0)), true;
_dst.create(src1.size(), CV_8UC(cn));
Mat dst = _dst.getMat();
Size sz = getContinuousSize(src1, src2, dst, src1.channels());
- getCmpFunc(src1.depth())(src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, &op);
+ getCmpFunc(src1.depth())(src1.ptr(), src1.step, src2.ptr(), src2.step, dst.ptr(), dst.step, sz, &op);
return;
}
else
{
double fval=0;
- getConvertFunc(depth2, CV_64F)(src2.data, 1, 0, 1, (uchar*)&fval, 1, Size(1,1), 0);
+ getConvertFunc(depth2, CV_64F)(src2.ptr(), 1, 0, 1, (uchar*)&fval, 1, Size(1,1), 0);
if( fval < getMinVal(depth1) )
{
dst = Scalar::all(op == CMP_GT || op == CMP_GE || op == CMP_NE ? 255 : 0);
int* iubuf = ilbuf + cn;
BinaryFunc sccvtfunc = getConvertFunc(ldepth, CV_32S);
- sccvtfunc(lscalar.data, 1, 0, 1, (uchar*)ilbuf, 1, Size(cn, 1), 0);
- sccvtfunc(uscalar.data, 1, 0, 1, (uchar*)iubuf, 1, Size(cn, 1), 0);
+ sccvtfunc(lscalar.ptr(), 1, 0, 1, (uchar*)ilbuf, 1, Size(cn, 1), 0);
+ sccvtfunc(uscalar.ptr(), 1, 0, 1, (uchar*)iubuf, 1, Size(cn, 1), 0);
int minval = cvRound(getMinVal(sdepth)), maxval = cvRound(getMaxVal(sdepth));
for( int k = 0; k < cn; k++ )
int* iubuf = ilbuf + cn;
BinaryFunc sccvtfunc = getConvertFunc(scdepth, CV_32S);
- sccvtfunc(lb.data, 1, 0, 1, (uchar*)ilbuf, 1, Size(cn, 1), 0);
- sccvtfunc(ub.data, 1, 0, 1, (uchar*)iubuf, 1, Size(cn, 1), 0);
+ sccvtfunc(lb.ptr(), 1, 0, 1, (uchar*)ilbuf, 1, Size(cn, 1), 0);
+ sccvtfunc(ub.ptr(), 1, 0, 1, (uchar*)iubuf, 1, Size(cn, 1), 0);
int minval = cvRound(getMinVal(depth)), maxval = cvRound(getMaxVal(depth));
for( int k = 0; k < cn; k++ )
if( src.dims <= 2 )
{
Size sz = getContinuousSize(src, dst, cn);
- func( src.data, src.step, 0, 0, dst.data, dst.step, sz, scale );
+ func( src.ptr(), src.step, 0, 0, dst.ptr(), dst.step, sz, scale );
}
else
{
CV_DbgAssert(lutcn == 3 || lutcn == 4);
if (lutcn == 3)
{
- IppStatus status = ippiCopy_8u_C3P3R(lut.data, (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
+ IppStatus status = ippiCopy_8u_C3P3R(lut.ptr(), (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
if (status < 0)
{
setIppErrorStatus();
}
else if (lutcn == 4)
{
- IppStatus status = ippiCopy_8u_C4P4R(lut.data, (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
+ IppStatus status = ippiCopy_8u_C4P4R(lut.ptr(), (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
if (status < 0)
{
setIppErrorStatus();
if (lutcn == 3)
{
if (ippiLUTPalette_8u_C3R(
- src.data, (int)src.step[0], dst.data, (int)dst.step[0],
+ src.ptr(), (int)src.step[0], dst.ptr(), (int)dst.step[0],
ippiSize(dst.size()), lutTable, 8) >= 0)
return;
}
else if (lutcn == 4)
{
if (ippiLUTPalette_8u_C4R(
- src.data, (int)src.step[0], dst.data, (int)dst.step[0],
+ src.ptr(), (int)src.step[0], dst.ptr(), (int)dst.step[0],
ippiSize(dst.size()), lutTable, 8) >= 0)
return;
}
int len = (int)it.size;
for( size_t i = 0; i < it.nplanes; i++, ++it )
- func(ptrs[0], lut_.data, ptrs[1], len, cn, lutcn);
+ func(ptrs[0], lut_.ptr(), ptrs[1], len, cn, lutcn);
}
private:
LUTParallelBody(const LUTParallelBody&);
int len = (int)it.size;
for( size_t i = 0; i < it.nplanes; i++, ++it )
- func(ptrs[0], lut.data, ptrs[1], len, cn, lutcn);
+ func(ptrs[0], lut.ptr(), ptrs[1], len, cn, lutcn);
}
namespace cv {
if (ippFunc != 0)
{
- if (ippFunc(src.data, (int)src.step, dst.data, (int)dst.step, ippiSize(src.cols, src.rows), axis) >= 0)
+ if (ippFunc(src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, ippiSize(src.cols, src.rows), axis) >= 0)
return;
setIppErrorStatus();
}
else if (ippFuncI != 0)
{
- if (ippFuncI(dst.data, (int)dst.step, roisize, axis) >= 0)
+ if (ippFuncI(dst.ptr(), (int)dst.step, roisize, axis) >= 0)
return;
setIppErrorStatus();
}
#endif
if( flip_mode <= 0 )
- flipVert( src.data, src.step, dst.data, dst.step, src.size(), esz );
+ flipVert( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
else
- flipHoriz( src.data, src.step, dst.data, dst.step, src.size(), esz );
+ flipHoriz( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
if( flip_mode < 0 )
- flipHoriz( dst.data, dst.step, dst.data, dst.step, dst.size(), esz );
+ flipHoriz( dst.ptr(), dst.step, dst.ptr(), dst.step, dst.size(), esz );
}
/*#ifdef HAVE_OPENCL
for( y = 0; y < ssize.height; y++ )
{
for( x = 0; x < dsize.width; x += ssize.width )
- memcpy( dst.data + y*dst.step + x, src.data + y*src.step, ssize.width );
+ memcpy( dst.ptr(y) + x, src.ptr(y), ssize.width );
}
for( ; y < dsize.height; y++ )
- memcpy( dst.data + y*dst.step, dst.data + (y - ssize.height)*dst.step, dsize.width );
+ memcpy( dst.ptr(y), dst.ptr(y - ssize.height), dsize.width );
}
Mat repeat(const Mat& src, int ny, int nx)
#endif
if( borderType != BORDER_CONSTANT )
- copyMakeBorder_8u( src.data, src.step, src.size(),
- dst.data, dst.step, dst.size(),
+ copyMakeBorder_8u( src.ptr(), src.step, src.size(),
+ dst.ptr(), dst.step, dst.size(),
top, left, (int)src.elemSize(), borderType );
else
{
cn1 = 1;
}
scalarToRawData(value, buf, CV_MAKETYPE(src.depth(), cn1), cn);
- copyMakeConstBorder_8u( src.data, src.step, src.size(),
- dst.data, dst.step, dst.size(),
+ copyMakeConstBorder_8u( src.ptr(), src.step, src.size(),
+ dst.ptr(), dst.step, dst.size(),
top, left, (int)src.elemSize(), (uchar*)(double*)buf );
}
}
{
int nlabels = _labels.checkVector(1, CV_32S, true);
CV_Assert(nlabels == n);
- _labels_data = (const int*)_labels.data;
+ _labels_data = _labels.ptr<int>();
}
Mat sumstack(MAX_TREE_DEPTH*2, ptdims*2, CV_64F);
int k = idx[i];
CV_Assert( (unsigned)k < (unsigned)points.rows );
const float* src = points.ptr<float>(k);
- if( pts.data )
+ if( !pts.empty() )
std::copy(src, src + ptdims, pts.ptr<float>(i));
if( dstlabels )
dstlabels[i] = srclabels ? srclabels[k] : k;
count = dx + dy + 1;
}
- this->ptr0 = img.data;
+ this->ptr0 = img.ptr();
this->step = (int)img.step;
this->elemSize = bt_pix0;
}
int cb = ((uchar*)color)[0], cg = ((uchar*)color)[1], cr = ((uchar*)color)[2];
int _cb, _cg, _cr;
int nch = img.channels();
- uchar* ptr = img.data;
+ uchar* ptr = img.ptr();
size_t step = img.step;
Size size = img.size();
int cg = ((uchar*)color)[1];
int cr = ((uchar*)color)[2];
int pix_size = (int)img.elemSize();
- uchar *ptr = img.data, *tptr;
+ uchar *ptr = img.ptr(), *tptr;
size_t step = img.step;
Size size = img.size(), sizeScaled(size.width*XY_ONE, size.height*XY_ONE);
int i, y, imin = 0, left = 0, right = 1, x1, x2;
int edges = npts;
int xmin, xmax, ymin, ymax;
- uchar* ptr = img.data;
+ uchar* ptr = img.ptr();
Size size = img.size();
int pix_size = (int)img.elemSize();
Point p0;
if( !clipline )
{
// convert x's from fixed-point to image coordinates
- uchar *timg = img.data + y * img.step;
+ uchar *timg = img.ptr(y);
int x1 = keep_prelast->x;
int x2 = prelast->x;
Size size = img.size();
size_t step = img.step;
int pix_size = (int)img.elemSize();
- uchar* ptr = img.data;
+ uchar* ptr = img.ptr();
int err = 0, dx = radius, dy = 0, plus = 1, minus = (radius << 1) - 1;
int inside = center.x >= radius && center.x < size.width - radius &&
center.y >= radius && center.y < size.height - radius;
{
Mat img = _img.getMat(), points = _points.getMat();
CV_Assert(points.checkVector(2, CV_32S) >= 0);
- fillConvexPoly(img, (const Point*)points.data, points.rows*points.cols*points.channels()/2, color, lineType, shift);
+ fillConvexPoly(img, points.ptr<Point>(), points.rows*points.cols*points.channels()/2, color, lineType, shift);
}
{
Mat p = pts.getMat(i);
CV_Assert(p.checkVector(2, CV_32S) >= 0);
- ptsptr[i] = (Point*)p.data;
+ ptsptr[i] = p.ptr<Point>();
npts[i] = p.rows*p.cols*p.channels()/2;
}
fillPoly(img, (const Point**)ptsptr, npts, (int)ncontours, color, lineType, shift, offset);
if( p.total() == 0 )
continue;
CV_Assert(p.checkVector(2, CV_32S) >= 0);
- ptsptr[i] = (Point*)p.data;
+ ptsptr[i] = p.ptr<Point>();
npts[i] = p.rows*p.cols*p.channels()/2;
}
polylines(img, (const Point**)ptsptr, npts, (int)ncontours, isClosed, color, thickness, lineType, shift);
{
Mat ci = contours.getMat(i);
cvMakeSeqHeaderForArray(CV_SEQ_POLYGON, sizeof(CvSeq), sizeof(Point),
- !ci.empty() ? (void*)ci.data : 0, (int)ci.total(),
+ !ci.empty() ? (void*)ci.ptr() : 0, (int)ci.total(),
&seq[i], &block[i] );
int h_next = hierarchy[i][0], h_prev = hierarchy[i][1],
int npoints = ci.checkVector(2, CV_32S);
CV_Assert( npoints > 0 );
cvMakeSeqHeaderForArray( CV_SEQ_POLYGON, sizeof(CvSeq), sizeof(Point),
- ci.data, npoints, &seq[i], &block[i] );
+ ci.ptr(), npoints, &seq[i], &block[i] );
}
if( hierarchy.empty() || maxLevel == 0 )
}
for( int i = range.start; i < range.end; ++i)
- if(!ippidft((Ipp32fc*)(src.data+i*src.step), (int)src.step,(Ipp32fc*)(dst.data+i*dst.step), (int)dst.step, pDFTSpec, (Ipp8u*)pBuffer))
+ if(!ippidft(src.ptr<Ipp32fc>(i), (int)src.step,dst.ptr<Ipp32fc>(i), (int)dst.step, pDFTSpec, (Ipp8u*)pBuffer))
{
*ok = false;
}
}
if (!inv)
- status = ippiDFTFwd_CToC_32fc_C1R( (Ipp32fc*)src.data, (int)src.step, (Ipp32fc*)dst.data, (int)dst.step, pDFTSpec, pBuffer );
+ status = ippiDFTFwd_CToC_32fc_C1R( src.ptr<Ipp32fc>(), (int)src.step, dst.ptr<Ipp32fc>(), (int)dst.step, pDFTSpec, pBuffer );
else
- status = ippiDFTInv_CToC_32fc_C1R( (Ipp32fc*)src.data, (int)src.step, (Ipp32fc*)dst.data, (int)dst.step, pDFTSpec, pBuffer );
+ status = ippiDFTInv_CToC_32fc_C1R( src.ptr<Ipp32fc>(), (int)src.step, dst.ptr<Ipp32fc>(), (int)dst.step, pDFTSpec, pBuffer );
if ( sizeBuffer > 0 )
ippFree( pBuffer );
{
int a = 0, b = count;
uchar *buf0, *buf1, *dbuf0, *dbuf1;
- const uchar* sptr0 = src.data;
- uchar* dptr0 = dst.data;
+ const uchar* sptr0 = src.ptr();
+ uchar* dptr0 = dst.ptr();
buf0 = ptr;
ptr += len*complex_elem_size;
buf1 = ptr;
if( depth == CV_32F )
{
- const float* dataA = (const float*)srcA.data;
- const float* dataB = (const float*)srcB.data;
- float* dataC = (float*)dst.data;
+ const float* dataA = srcA.ptr<float>();
+ const float* dataB = srcB.ptr<float>();
+ float* dataC = dst.ptr<float>();
size_t stepA = srcA.step/sizeof(dataA[0]);
size_t stepB = srcB.step/sizeof(dataB[0]);
}
else
{
- const double* dataA = (const double*)srcA.data;
- const double* dataB = (const double*)srcB.data;
- double* dataC = (double*)dst.data;
+ const double* dataA = srcA.ptr<double>();
+ const double* dataB = srcB.ptr<double>();
+ double* dataC = dst.ptr<double>();
size_t stepA = srcA.step/sizeof(dataA[0]);
size_t stepB = srcB.step/sizeof(dataB[0]);
pBuffer = (uchar*)buf;
for( int i = range.start; i < range.end; ++i)
- if(!(*ippidct)((float*)(src->data+i*src->step), (int)src->step,(float*)(dst->data+i*dst->step), (int)dst->step, pDCTSpec, (Ipp8u*)pBuffer))
+ if(!(*ippidct)(src->ptr<float>(i), (int)src->step,dst->ptr<float>(i), (int)dst->step, pDCTSpec, (Ipp8u*)pBuffer))
*ok = false;
}
else
buf.allocate( bufSize );
pBuffer = (uchar*)buf;
- status = ippFunc((float*)src.data, (int)src.step, (float*)dst.data, (int)dst.step, pDCTSpec, (Ipp8u*)pBuffer);
+ status = ippFunc(src.ptr<float>(), (int)src.step, dst.ptr<float>(), (int)dst.step, pDCTSpec, (Ipp8u*)pBuffer);
}
if (pDCTSpec)
for( ; stage <= end_stage; stage++ )
{
- uchar *sptr = src.data, *dptr = dst.data;
+ const uchar* sptr = src.ptr();
+ uchar* dptr = dst.ptr();
size_t sstep0, sstep1, dstep0, dstep1;
if( stage == 0 )
double result = 0;
int type = mat.type(), rows = mat.rows;
size_t step = mat.step;
- const uchar* m = mat.data;
+ const uchar* m = mat.ptr();
CV_Assert( !mat.empty() );
CV_Assert( mat.rows == mat.cols && (type == CV_32F || type == CV_64F));
Mat a(rows, rows, CV_32F, (uchar*)buffer);
mat.copyTo(a);
- result = LU((float*)a.data, a.step, rows, 0, 0, 0);
+ result = LU(a.ptr<float>(), a.step, rows, 0, 0, 0);
if( result )
{
for( int i = 0; i < rows; i++ )
- result *= ((const float*)(a.data + a.step*i))[i];
+ result *= a.at<float>(i,i);
result = 1./result;
}
}
Mat a(rows, rows, CV_64F, (uchar*)buffer);
mat.copyTo(a);
- result = LU((double*)a.data, a.step, rows, 0, 0, 0);
+ result = LU(a.ptr<double>(), a.step, rows, 0, 0, 0);
if( result )
{
for( int i = 0; i < rows; i++ )
- result *= ((const double*)(a.data + a.step*i))[i];
+ result *= a.at<double>(i,i);
result = 1./result;
}
}
AutoBuffer<uchar> _buf((m*nm + nm + nm*n)*esz + sizeof(double));
uchar* buf = alignPtr((uchar*)_buf, (int)esz);
Mat u(m, nm, type, buf);
- Mat w(nm, 1, type, u.data + m*nm*esz);
- Mat vt(nm, n, type, w.data + nm*esz);
+ Mat w(nm, 1, type, u.ptr() + m*nm*esz);
+ Mat vt(nm, n, type, w.ptr() + nm*esz);
SVD::compute(src, w, u, vt);
SVD::backSubst(w, u, vt, Mat(), _dst);
AutoBuffer<uchar> _buf((n*n*2 + n)*esz + sizeof(double));
uchar* buf = alignPtr((uchar*)_buf, (int)esz);
Mat u(n, n, type, buf);
- Mat w(n, 1, type, u.data + n*n*esz);
- Mat vt(n, n, type, w.data + n*esz);
+ Mat w(n, 1, type, u.ptr() + n*n*esz);
+ Mat vt(n, n, type, w.ptr() + n*esz);
eigen(src, w, vt);
transpose(vt, u);
if( n <= 3 )
{
- const uchar* srcdata = src.data;
- uchar* dstdata = dst.data;
+ const uchar* srcdata = src.ptr();
+ uchar* dstdata = dst.ptr();
size_t srcstep = src.step;
size_t dststep = dst.step;
setIdentity(dst);
if( method == DECOMP_LU && type == CV_32F )
- result = LU((float*)src1.data, src1.step, n, (float*)dst.data, dst.step, n) != 0;
+ result = LU(src1.ptr<float>(), src1.step, n, dst.ptr<float>(), dst.step, n) != 0;
else if( method == DECOMP_LU && type == CV_64F )
- result = LU((double*)src1.data, src1.step, n, (double*)dst.data, dst.step, n) != 0;
+ result = LU(src1.ptr<double>(), src1.step, n, dst.ptr<double>(), dst.step, n) != 0;
else if( method == DECOMP_CHOLESKY && type == CV_32F )
- result = Cholesky((float*)src1.data, src1.step, n, (float*)dst.data, dst.step, n);
+ result = Cholesky(src1.ptr<float>(), src1.step, n, dst.ptr<float>(), dst.step, n);
else
- result = Cholesky((double*)src1.data, src1.step, n, (double*)dst.data, dst.step, n);
+ result = Cholesky(src1.ptr<double>(), src1.step, n, dst.ptr<double>(), dst.step, n);
if( !result )
dst = Scalar(0);
#define bf(y) ((float*)(bdata + y*src2step))[0]
#define bd(y) ((double*)(bdata + y*src2step))[0]
- const uchar* srcdata = src.data;
- const uchar* bdata = _src2.data;
- uchar* dstdata = dst.data;
+ const uchar* srcdata = src.ptr();
+ const uchar* bdata = _src2.ptr();
+ uchar* dstdata = dst.ptr();
size_t srcstep = src.step;
size_t src2step = _src2.step;
size_t dststep = dst.step;
eigen(src, evals, evects);
if( evects0.data != evects.data )
{
- const uchar* p = evects0.data;
+ const uchar* p = evects0.ptr();
evects.convertTo(evects0, evects0.type());
- CV_Assert( p == evects0.data );
+ CV_Assert( p == evects0.ptr() );
}
}
else
eigen(src, evals);
if( evals0.data != evals.data )
{
- const uchar* p = evals0.data;
+ const uchar* p = evals0.ptr();
if( evals0.size() == evals.size() )
evals.convertTo(evals0, evals0.type());
else if( evals0.type() == evals.type() )
cv::transpose(evals, evals0);
else
cv::Mat(evals.t()).convertTo(evals0, evals0.type());
- CV_Assert( p == evals0.data );
+ CV_Assert( p == evals0.ptr() );
}
}
cv::SVD svd;
if( w.size() == cv::Size(nm, 1) )
- svd.w = cv::Mat(nm, 1, type, w.data );
+ svd.w = cv::Mat(nm, 1, type, w.ptr() );
else if( w.isContinuous() )
svd.w = w;
((m != n && (svd.u.size() == cv::Size(mn, mn) ||
svd.vt.size() == cv::Size(mn, mn))) ? cv::SVD::FULL_UV : 0));
- if( u.data )
+ if( !u.empty() )
{
if( flags & CV_SVD_U_T )
cv::transpose( svd.u, u );
}
}
- if( v.data )
+ if( !v.empty() )
{
if( !(flags & CV_SVD_V_T) )
cv::transpose( svd.vt, v );
depth == CV_64F ? (ippsPolarToCart)ippsPolarToCart_64f : 0;
CV_Assert(ippFunc != 0);
- IppStatus status = ippFunc(Mag.data, Angle.data, X.data, Y.data, static_cast<int>(cn * X.total()));
+ IppStatus status = ippFunc(Mag.ptr(), Angle.ptr(), X.ptr(), Y.ptr(), static_cast<int>(cn * X.total()));
if (status >= 0)
return;
setIppErrorStatus();
}
size.width *= cn;
- IppStatus status = ippiSqr_32f_C1R((const Ipp32f *)src.data, srcstep, (Ipp32f *)dst.data, dststep, ippiSize(size.width, size.height));
+ IppStatus status = ippiSqr_32f_C1R(src.ptr<Ipp32f>(), srcstep, dst.ptr<Ipp32f>(), dststep, ippiSize(size.width, size.height));
if (status >= 0)
return;
if (src.isContinuous() && dst.isContinuous())
{
IppStatus status = depth == CV_32F ?
- ippsPowx_32f_A21((const Ipp32f *)src.data, (Ipp32f)power, (Ipp32f*)dst.data, (Ipp32s)(src.total() * cn)) :
- ippsPowx_64f_A50((const Ipp64f *)src.data, power, (Ipp64f*)dst.data, (Ipp32s)(src.total() * cn));
+ ippsPowx_32f_A21(src.ptr<Ipp32f>(), (Ipp32f)power, dst.ptr<Ipp32f>(), (Ipp32s)(src.total() * cn)) :
+ ippsPowx_64f_A50(src.ptr<Ipp64f>(), power, dst.ptr<Ipp64f>(), (Ipp32s)(src.total() * cn));
if (status >= 0)
return;
{
Cv32suf a, b;
int ia, ib;
- const int* isrc = (const int*)src.data;
+ const int* isrc = src.ptr<int>();
size_t step = src.step/sizeof(isrc[0]);
a.f = (float)std::max(minVal, (double)-FLT_MAX);
{
Cv64suf a, b;
int64 ia, ib;
- const int64* isrc = (const int64*)src.data;
+ const int64* isrc = src.ptr<int64>();
size_t step = src.step/sizeof(isrc[0]);
a.f = minVal;
break;
}
- if( C.data )
+ if( !C.empty() )
{
CV_Assert( C.type() == type &&
(((flags&GEMM_3_T) == 0 && C.rows == d_size.height && C.cols == d_size.width) ||
{
if( type == CV_32F )
{
- float* d = (float*)D.data;
- const float *a = (const float*)A.data,
- *b = (const float*)B.data,
+ float* d = D.ptr<float>();
+ const float *a = A.ptr<float>(),
+ *b = B.ptr<float>(),
*c = (const float*)C.data;
size_t d_step = D.step/sizeof(d[0]),
a_step = A.step/sizeof(a[0]),
if( type == CV_64F )
{
- double* d = (double*)D.data;
- const double *a = (const double*)A.data,
- *b = (const double*)B.data,
+ double* d = D.ptr<double>();
+ const double *a = A.ptr<double>(),
+ *b = B.ptr<double>(),
*c = (const double*)C.data;
size_t d_step = D.step/sizeof(d[0]),
a_step = A.step/sizeof(a[0]),
(d_size.width <= block_lin_size &&
d_size.height <= block_lin_size && len <= block_lin_size) )
{
- singleMulFunc( A.data, A.step, B.data, b_step, Cdata, Cstep,
- matD->data, matD->step, a_size, d_size, alpha, beta, flags );
+ singleMulFunc( A.ptr(), A.step, B.ptr(), b_step, Cdata, Cstep,
+ matD->ptr(), matD->step, a_size, d_size, alpha, beta, flags );
}
else
{
else
b_step0 = elem_size, b_step1 = b_step;
- if( !C.data )
+ if( C.empty() )
{
c_step0 = c_step1 = 0;
flags &= ~GEMM_3_T;
for( j = 0; j < d_size.width; j += dj )
{
- uchar* _d = matD->data + i*matD->step + j*elem_size;
+ uchar* _d = matD->ptr() + i*matD->step + j*elem_size;
const uchar* _c = Cdata + i*c_step0 + j*c_step1;
size_t _d_step = matD->step;
dj = dn0;
for( k = 0; k < len; k += dk )
{
- const uchar* _a = A.data + i*a_step0 + k*a_step1;
+ const uchar* _a = A.ptr() + i*a_step0 + k*a_step1;
size_t _a_step = A.step;
- const uchar* _b = B.data + k*b_step0 + j*b_step1;
+ const uchar* _b = B.ptr() + k*b_step0 + j*b_step1;
size_t _b_step = b_step;
Size a_bl_size;
if( dk0 < len )
storeFunc( _c, Cstep, _d, _d_step,
- matD->data + i*matD->step + j*elem_size,
+ matD->ptr(i) + j*elem_size,
matD->step, Size(dj,di), alpha, beta, flags );
}
}
_mbuf.allocate(dcn*(scn+1));
mbuf = (double*)_mbuf;
Mat tmp(dcn, scn+1, mtype, mbuf);
- memset(tmp.data, 0, tmp.total()*tmp.elemSize());
+ memset(tmp.ptr(), 0, tmp.total()*tmp.elemSize());
if( m.cols == scn+1 )
m.convertTo(tmp, mtype);
else
m = tmp;
}
else
- mbuf = (double*)m.data;
+ mbuf = m.ptr<double>();
if( scn == dcn )
{
m = tmp;
}
else
- mbuf = (double*)m.data;
+ mbuf = m.ptr<double>();
TransformFunc func = depth == CV_32F ?
(TransformFunc)perspectiveTransform_32f :
if (src1.isContinuous() && src2.isContinuous() && dst.isContinuous())
{
size_t len = src1.total()*cn;
- func(src1.data, src2.data, dst.data, (int)len, palpha);
+ func(src1.ptr(), src2.ptr(), dst.ptr(), (int)len, palpha);
return;
}
{
CV_Assert( data[i].size() == size && data[i].type() == type );
if( data[i].isContinuous() )
- memcpy( _data.ptr(i), data[i].data, sz*esz );
+ memcpy( _data.ptr(i), data[i].ptr(), sz*esz );
else
{
Mat dataRow(size.height, size.width, type, _data.ptr(i));
if( depth == CV_32F )
{
- const float* src1 = (const float*)v1.data;
- const float* src2 = (const float*)v2.data;
+ const float* src1 = v1.ptr<float>();
+ const float* src2 = v2.ptr<float>();
size_t step1 = v1.step/sizeof(src1[0]);
size_t step2 = v2.step/sizeof(src2[0]);
double* diff = buf;
- const float* mat = (const float*)icovar.data;
+ const float* mat = icovar.ptr<float>();
size_t matstep = icovar.step/sizeof(mat[0]);
for( ; sz.height--; src1 += step1, src2 += step2, diff += sz.width )
}
else if( depth == CV_64F )
{
- const double* src1 = (const double*)v1.data;
- const double* src2 = (const double*)v2.data;
+ const double* src1 = v1.ptr<double>();
+ const double* src2 = v2.ptr<double>();
size_t step1 = v1.step/sizeof(src1[0]);
size_t step2 = v2.step/sizeof(src2[0]);
double* diff = buf;
- const double* mat = (const double*)icovar.data;
+ const double* mat = icovar.ptr<double>();
size_t matstep = icovar.step/sizeof(mat[0]);
for( ; sz.height--; src1 += step1, src2 += step2, diff += sz.width )
MulTransposedR( const Mat& srcmat, Mat& dstmat, const Mat& deltamat, double scale )
{
int i, j, k;
- const sT* src = (const sT*)srcmat.data;
- dT* dst = (dT*)dstmat.data;
- const dT* delta = (const dT*)deltamat.data;
+ const sT* src = srcmat.ptr<sT>();
+ dT* dst = dstmat.ptr<dT>();
+ const dT* delta = deltamat.ptr<dT>();
size_t srcstep = srcmat.step/sizeof(src[0]);
size_t dststep = dstmat.step/sizeof(dst[0]);
size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0;
MulTransposedL( const Mat& srcmat, Mat& dstmat, const Mat& deltamat, double scale )
{
int i, j, k;
- const sT* src = (const sT*)srcmat.data;
- dT* dst = (dT*)dstmat.data;
- const dT* delta = (const dT*)deltamat.data;
+ const sT* src = srcmat.ptr<sT>();
+ dT* dst = dstmat.ptr<dT>();
+ const dT* delta = deltamat.ptr<dT>();
size_t srcstep = srcmat.step/sizeof(src[0]);
size_t dststep = dstmat.step/sizeof(dst[0]);
size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0;
dtype = std::max(std::max(CV_MAT_DEPTH(dtype >= 0 ? dtype : stype), delta.depth()), CV_32F);
CV_Assert( src.channels() == 1 );
- if( delta.data )
+ if( !delta.empty() )
{
CV_Assert( delta.channels() == 1 &&
(delta.rows == src.rows || delta.rows == 1) &&
{
Mat src2;
const Mat* tsrc = &src;
- if( delta.data )
+ if( !delta.empty() )
{
if( delta.size() == src.size() )
subtract( src, delta, src2 );
Mat covar( count, count, ctype );
- if( _mean.data )
+ if( !_mean.empty() )
{
CV_Assert( _mean.size() == mean_sz );
_mean.convertTo(mean, ctype);
Mat covar( count, count, ctype );
- if( _mean.data )
+ if( !_mean.empty() )
{
CV_Assert( _mean.size() == mean_sz );
_mean.convertTo(mean, ctype);
void PCA::project(InputArray _data, OutputArray result) const
{
Mat data = _data.getMat();
- CV_Assert( mean.data && eigenvectors.data &&
+ CV_Assert( !mean.empty() && !eigenvectors.empty() &&
((mean.rows == 1 && mean.cols == data.cols) || (mean.cols == 1 && mean.rows == data.rows)));
Mat tmp_data, tmp_mean = repeat(mean, data.rows/mean.rows, data.cols/mean.cols);
int ctype = mean.type();
void PCA::backProject(InputArray _data, OutputArray result) const
{
Mat data = _data.getMat();
- CV_Assert( mean.data && eigenvectors.data &&
+ CV_Assert( !mean.empty() && !eigenvectors.empty() &&
((mean.rows == 1 && eigenvectors.rows == data.cols) ||
(mean.cols == 1 && eigenvectors.rows == data.rows)));
pca.eigenvectors = evects;
pca(data, (flags & CV_PCA_USE_AVG) ? mean : cv::Mat(),
- flags, evals.data ? evals.rows + evals.cols - 1 : 0);
+ flags, !evals.empty() ? evals.rows + evals.cols - 1 : 0);
if( pca.mean.size() == mean.size() )
pca.mean.convertTo( mean, mean.type() );
m.datalimit = m.datastart + m.size[0]*m.step[0];
if( m.size[0] > 0 )
{
- m.dataend = m.data + m.size[d-1]*m.step[d-1];
+ m.dataend = m.ptr() + m.size[d-1]*m.step[d-1];
for( int i = 0; i < d-1; i++ )
m.dataend += (m.size[i] - 1)*m.step[i];
}
}
Mat buf(total, 1, type);
- cvCvtSeqToArray(seq, buf.data, CV_WHOLE_SEQ);
+ cvCvtSeqToArray(seq, buf.ptr(), CV_WHOLE_SEQ);
return buf;
}
CV_Error(CV_StsBadArg, "Unknown array type");
{
CV_Assert( i < 0 );
const Mat * const m = ((const Mat*)obj);
- return (size_t)(m->data - m->datastart);
+ return (size_t)(m->ptr() - m->datastart);
}
if( k == UMAT )
return 1;
CV_Assert( i < (int)vv.size() );
- return (size_t)(vv[i].data - vv[i].datastart);
+ return (size_t)(vv[i].ptr() - vv[i].datastart);
}
if( k == STD_VECTOR_UMAT )
{
Mat value = arr.getMat();
CV_Assert( checkScalar(value, type(), arr.kind(), _InputArray::GPU_MAT) );
- ((cuda::GpuMat*)obj)->setTo(Scalar(Vec<double, 4>((double *)value.data)), mask);
+ ((cuda::GpuMat*)obj)->setTo(Scalar(Vec<double, 4>(value.ptr<double>())), mask);
}
else
CV_Error(Error::StsNotImplemented, "");
if( type == CV_32FC1 )
{
- float* data = (float*)m.data;
+ float* data = m.ptr<float>();
float val = (float)s[0];
size_t step = m.step/sizeof(data[0]);
}
else if( type == CV_64FC1 )
{
- double* data = (double*)m.data;
+ double* data = m.ptr<double>();
double val = s[0];
size_t step = m.step/sizeof(data[0]);
if( type == CV_32FC1 )
{
- const float* ptr = (const float*)m.data;
+ const float* ptr = m.ptr<float>();
size_t step = m.step/sizeof(ptr[0]) + 1;
double _s = 0;
for( i = 0; i < nm; i++ )
if( type == CV_64FC1 )
{
- const double* ptr = (const double*)m.data;
+ const double* ptr = m.ptr<double>();
size_t step = m.step/sizeof(ptr[0]) + 1;
double _s = 0;
for( i = 0; i < nm; i++ )
IppiSize roiSize = { src.cols, src.rows };
if (ippFunc != 0)
{
- if (ippFunc(src.data, (int)src.step, dst.data, (int)dst.step, roiSize) >= 0)
+ if (ippFunc(src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, roiSize) >= 0)
return;
setIppErrorStatus();
}
else if (ippFuncI != 0)
{
- if (ippFuncI(dst.data, (int)dst.step, roiSize) >= 0)
+ if (ippFuncI(dst.ptr(), (int)dst.step, roiSize) >= 0)
return;
setIppErrorStatus();
}
TransposeInplaceFunc func = transposeInplaceTab[esz];
CV_Assert( func != 0 );
CV_Assert( dst.cols == dst.rows );
- func( dst.data, dst.step, dst.rows );
+ func( dst.ptr(), dst.step, dst.rows );
}
else
{
TransposeFunc func = transposeTab[esz];
CV_Assert( func != 0 );
- func( src.data, src.step, dst.data, dst.step, src.size() );
+ func( src.ptr(), src.step, dst.ptr(), dst.step, src.size() );
}
}
int rows = m.rows;
int j0 = 0, j1 = rows;
- uchar* data = m.data;
+ uchar* data = m.ptr();
for( int i = 0; i < rows; i++ )
{
if( !LtoR ) j1 = i; else j0 = i+1;
size.width *= srcmat.channels();
AutoBuffer<WT> buffer(size.width);
WT* buf = buffer;
- ST* dst = (ST*)dstmat.data;
- const T* src = (const T*)srcmat.data;
+ ST* dst = dstmat.ptr<ST>();
+ const T* src = srcmat.ptr<T>();
size_t srcstep = srcmat.step/sizeof(src[0]);
int i;
Op op;
for( int y = 0; y < size.height; y++ )
{
- const T* src = (const T*)(srcmat.data + srcmat.step*y);
- ST* dst = (ST*)(dstmat.data + dstmat.step*y);
+ const T* src = srcmat.ptr<T>(y);
+ ST* dst = dstmat.ptr<ST>(y);
if( size.width == cn )
for( k = 0; k < cn; k++ )
dst[k] = src[k];
if (ippFunc)
{
for (int y = 0; y < size.height; ++y)
- if (ippFunc(srcmat.data + sstep * y, sstep, roisize, dstmat.ptr<Ipp64f>(y)) < 0)
+ if (ippFunc(srcmat.ptr(y), sstep, roisize, dstmat.ptr<Ipp64f>(y)) < 0)
{
setIppErrorStatus();
cv::Mat dstroi = dstmat.rowRange(y, y + 1);
else if (ippFuncHint)
{
for (int y = 0; y < size.height; ++y)
- if (ippFuncHint(srcmat.data + sstep * y, sstep, roisize, dstmat.ptr<Ipp64f>(y), ippAlgHintAccurate) < 0)
+ if (ippFuncHint(srcmat.ptr(y), sstep, roisize, dstmat.ptr<Ipp64f>(y), ippAlgHintAccurate) < 0)
{
setIppErrorStatus();
cv::Mat dstroi = dstmat.rowRange(y, y + 1);
T* ptr = bptr;
if( sortRows )
{
- T* dptr = (T*)(dst.data + dst.step*i);
+ T* dptr = dst.ptr<T>(i);
if( !inplace )
{
- const T* sptr = (const T*)(src.data + src.step*i);
+ const T* sptr = src.ptr<T>(i);
memcpy(dptr, sptr, sizeof(T) * len);
}
ptr = dptr;
else
{
for( j = 0; j < len; j++ )
- ptr[j] = ((const T*)(src.data + src.step*j))[i];
+ ptr[j] = src.ptr<T>(j)[i];
}
#ifdef USE_IPP_SORT
if( !sortRows )
for( j = 0; j < len; j++ )
- ((T*)(dst.data + dst.step*j))[i] = ptr[j];
+ dst.ptr<T>(j)[i] = ptr[j];
}
}
if( sortRows )
{
ptr = (T*)(src.data + src.step*i);
- iptr = (int*)(dst.data + dst.step*i);
+ iptr = dst.ptr<int>(i);
}
else
{
for( j = 0; j < len; j++ )
- ptr[j] = ((const T*)(src.data + src.step*j))[i];
+ ptr[j] = src.ptr<T>(j)[i];
}
for( j = 0; j < len; j++ )
iptr[j] = j;
if( !sortRows )
for( j = 0; j < len; j++ )
- ((int*)(dst.data + dst.step*j))[i] = iptr[j];
+ dst.ptr<int>(j)[i] = iptr[j];
}
}
CV_Assert( data0.dims <= 2 && type == CV_32F && K > 0 );
CV_Assert( N >= K );
- Mat data(N, dims, CV_32F, data0.data, isrow ? dims * sizeof(float) : static_cast<size_t>(data0.step));
+ Mat data(N, dims, CV_32F, data0.ptr(), isrow ? dims * sizeof(float) : static_cast<size_t>(data0.step));
_bestLabels.create(N, 1, CV_32S, -1, true);
return Point();
CV_DbgAssert(m->dims <= 2);
- ptrdiff_t ofs = ptr - m->data;
+ ptrdiff_t ofs = ptr - m->ptr();
int y = (int)(ofs/m->step[0]);
return Point((int)((ofs - y*m->step[0])/elemSize), y);
}
void MatConstIterator::pos(int* _idx) const
{
CV_Assert(m != 0 && _idx);
- ptrdiff_t ofs = ptr - m->data;
+ ptrdiff_t ofs = ptr - m->ptr();
for( int i = 0; i < m->dims; i++ )
{
size_t s = m->step[i], v = ofs/s;
return 0;
if( m->isContinuous() )
return (ptr - sliceStart)/elemSize;
- ptrdiff_t ofs = ptr - m->data;
+ ptrdiff_t ofs = ptr - m->ptr();
int i, d = m->dims;
if( d == 2 )
{
ptrdiff_t ofs0, y;
if( relative )
{
- ofs0 = ptr - m->data;
+ ofs0 = ptr - m->ptr();
y = ofs0/m->step[0];
ofs += y*m->cols + (ofs0 - y*m->step[0])/elemSize;
}
y = ofs/m->cols;
int y1 = std::min(std::max((int)y, 0), m->rows-1);
- sliceStart = m->data + y1*m->step[0];
+ sliceStart = m->ptr(y1);
sliceEnd = sliceStart + m->cols*elemSize;
ptr = y < 0 ? sliceStart : y >= m->rows ? sliceEnd :
sliceStart + (ofs - y*m->cols)*elemSize;
ptrdiff_t t = ofs/szi;
int v = (int)(ofs - t*szi);
ofs = t;
- ptr = m->data + v*elemSize;
- sliceStart = m->data;
+ ptr = m->ptr() + v*elemSize;
+ sliceStart = m->ptr();
for( int i = d-2; i >= 0; i-- )
{
if( ofs > 0 )
ptr = sliceEnd;
else
- ptr = sliceStart + (ptr - m->data);
+ ptr = sliceStart + (ptr - m->ptr());
}
void MatConstIterator::seek(const int* _idx, bool relative)
int i, idx[CV_MAX_DIM] = {0}, d = m.dims, lastSize = m.size[d - 1];
size_t esz = m.elemSize();
- uchar* dptr = m.data;
+ const uchar* dptr = m.ptr();
for(;;)
{
KernelArg KernelArg::Constant(const Mat& m)
{
CV_Assert(m.isContinuous());
- return KernelArg(CONSTANT, 0, 0, 0, m.data, m.total()*m.elemSize());
+ return KernelArg(CONSTANT, 0, 0, 0, m.ptr(), m.total()*m.elemSize());
}
/////////////////////////////////////////// Kernel /////////////////////////////////////////////
static std::string kerToStr(const Mat & k)
{
int width = k.cols - 1, depth = k.depth();
- const T * const data = reinterpret_cast<const T *>(k.data);
+ const T * const data = k.ptr<T>();
std::ostringstream stream;
stream.precision(10);
{
_parambuf.allocate(cn*8 + n1 + n2);
double* parambuf = _parambuf;
- double* p1 = (double*)_param1.data;
- double* p2 = (double*)_param2.data;
+ double* p1 = _param1.ptr<double>();
+ double* p2 = _param2.ptr<double>();
if( !_param1.isContinuous() || _param1.type() != CV_64F || n1 != cn )
{
int esz = (int)CV_ELEM_SIZE(ptype);
if( _param1.isContinuous() && _param1.type() == ptype )
- mean = _param1.data;
+ mean = _param1.ptr();
else
{
Mat tmp(_param1.size(), ptype, parambuf);
mean[j] = mean[j - n1*esz];
if( _param2.isContinuous() && _param2.type() == ptype )
- stddev = _param2.data;
+ stddev = _param2.ptr();
else
{
Mat tmp(_param2.size(), ptype, parambuf + cn);
int sz = _arr.rows*_arr.cols, iters = cvRound(iterFactor*sz);
if( _arr.isContinuous() )
{
- T* arr = (T*)_arr.data;
+ T* arr = _arr.ptr<T>();
for( int i = 0; i < iters; i++ )
{
int j = (unsigned)rng % sz, k = (unsigned)rng % sz;
}
else
{
- uchar* data = _arr.data;
+ uchar* data = _arr.ptr();
size_t step = _arr.step;
int cols = _arr.cols;
for( int i = 0; i < iters; i++ )
if( ippFuncHint || ippFuncNoHint )
{
Ipp64f res[4];
- IppStatus ret = ippFuncHint ? ippFuncHint(src.data, (int)src.step[0], sz, res, ippAlgHintAccurate) :
- ippFuncNoHint(src.data, (int)src.step[0], sz, res);
+ IppStatus ret = ippFuncHint ? ippFuncHint(src.ptr(), (int)src.step[0], sz, res, ippAlgHintAccurate) :
+ ippFuncNoHint(src.ptr(), (int)src.step[0], sz, res);
if( ret >= 0 )
{
Scalar sc;
if( ippFuncC1 )
{
Ipp64f res;
- if( ippFuncC1(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, &res) >= 0 )
+ if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, &res) >= 0 )
return Scalar(res);
setIppErrorStatus();
}
if( ippFuncC3 )
{
Ipp64f res1, res2, res3;
- if( ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 1, &res1) >= 0 &&
- ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 2, &res2) >= 0 &&
- ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 3, &res3) >= 0 )
+ if( ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 1, &res1) >= 0 &&
+ ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 2, &res2) >= 0 &&
+ ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 3, &res3) >= 0 )
{
return Scalar(res1, res2, res3);
}
if( ippFuncHint || ippFuncNoHint )
{
Ipp64f res[4];
- IppStatus ret = ippFuncHint ? ippFuncHint(src.data, (int)src.step[0], sz, res, ippAlgHintAccurate) :
- ippFuncNoHint(src.data, (int)src.step[0], sz, res);
+ IppStatus ret = ippFuncHint ? ippFuncHint(src.ptr(), (int)src.step[0], sz, res, ippAlgHintAccurate) :
+ ippFuncNoHint(src.ptr(), (int)src.step[0], sz, res);
if( ret >= 0 )
{
Scalar sc;
part_sum funcs[3] = { ocl_part_sum<int>, ocl_part_sum<float>, ocl_part_sum<double> };
Mat dbm = db.getMat(ACCESS_READ);
- mean = funcs[ddepth - CV_32S](Mat(1, groups, dtype, dbm.data));
- stddev = funcs[sqddepth - CV_32S](Mat(1, groups, sqdtype, dbm.data + groups * CV_ELEM_SIZE(dtype)));
+ mean = funcs[ddepth - CV_32S](Mat(1, groups, dtype, dbm.ptr()));
+ stddev = funcs[sqddepth - CV_32S](Mat(1, groups, sqdtype, dbm.ptr() + groups * CV_ELEM_SIZE(dtype)));
if (haveMask)
- nz = saturate_cast<int>(funcs[0](Mat(1, groups, CV_32SC1, dbm.data +
+ nz = saturate_cast<int>(funcs[0](Mat(1, groups, CV_32SC1, dbm.ptr() +
groups * (CV_ELEM_SIZE(dtype) +
CV_ELEM_SIZE(sqdtype))))[0]);
}
_mean.create(cn, 1, CV_64F, -1, true);
mean = _mean.getMat();
dcn_mean = (int)mean.total();
- pmean = (Ipp64f *)mean.data;
+ pmean = mean.ptr<Ipp64f>();
}
int dcn_stddev = -1;
if( _sdv.needed() )
_sdv.create(cn, 1, CV_64F, -1, true);
stddev = _sdv.getMat();
dcn_stddev = (int)stddev.total();
- pstddev = (Ipp64f *)stddev.data;
+ pstddev = stddev.ptr<Ipp64f>();
}
for( int c = cn; c < dcn_mean; c++ )
pmean[c] = 0;
0;
if( ippFuncC1 )
{
- if( ippFuncC1(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, pmean, pstddev) >= 0 )
+ if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, pmean, pstddev) >= 0 )
return;
setIppErrorStatus();
}
0;
if( ippFuncC3 )
{
- if( ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 1, &pmean[0], &pstddev[0]) >= 0 &&
- ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 2, &pmean[1], &pstddev[1]) >= 0 &&
- ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 3, &pmean[2], &pstddev[2]) >= 0 )
+ if( ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 1, &pmean[0], &pstddev[0]) >= 0 &&
+ ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 2, &pmean[1], &pstddev[1]) >= 0 &&
+ ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 3, &pmean[2], &pstddev[2]) >= 0 )
return;
setIppErrorStatus();
}
0;
if( ippFuncC1 )
{
- if( ippFuncC1(src.data, (int)src.step[0], sz, pmean, pstddev) >= 0 )
+ if( ippFuncC1(src.ptr(), (int)src.step[0], sz, pmean, pstddev) >= 0 )
return;
setIppErrorStatus();
}
0;
if( ippFuncC3 )
{
- if( ippFuncC3(src.data, (int)src.step[0], sz, 1, &pmean[0], &pstddev[0]) >= 0 &&
- ippFuncC3(src.data, (int)src.step[0], sz, 2, &pmean[1], &pstddev[1]) >= 0 &&
- ippFuncC3(src.data, (int)src.step[0], sz, 3, &pmean[2], &pstddev[2]) >= 0 )
+ if( ippFuncC3(src.ptr(), (int)src.step[0], sz, 1, &pmean[0], &pstddev[0]) >= 0 &&
+ ippFuncC3(src.ptr(), (int)src.step[0], sz, 2, &pmean[1], &pstddev[1]) >= 0 &&
+ ippFuncC3(src.ptr(), (int)src.step[0], sz, 3, &pmean[2], &pstddev[2]) >= 0 )
return;
setIppErrorStatus();
}
const uint * minlocptr = NULL, * maxlocptr = NULL;
if (minVal || minLoc)
{
- minptr = (const T *)db.data;
+ minptr = db.ptr<T>();
index += sizeof(T) * groupnum;
}
if (maxVal || maxLoc)
{
- maxptr = (const T *)(db.data + index);
+ maxptr = (const T *)(db.ptr() + index);
index += sizeof(T) * groupnum;
}
if (minLoc)
{
- minlocptr = (uint *)(db.data + index);
+ minlocptr = (const uint *)(db.ptr() + index);
index += sizeof(uint) * groupnum;
}
if (maxLoc)
{
- maxlocptr = (uint *)(db.data + index);
+ maxlocptr = (const uint *)(db.ptr() + index);
index += sizeof(uint) * groupnum;
}
if (maxVal2)
- maxptr2 = (const T *)(db.data + index);
+ maxptr2 = (const T *)(db.ptr() + index);
for (int i = 0; i < groupnum; i++)
{
{
Ipp32f min, max;
IppiPoint minp, maxp;
- if( ippFuncC1(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, &min, &max, &minp, &maxp) >= 0 )
+ if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, &min, &max, &minp, &maxp) >= 0 )
{
if( minVal )
*minVal = (double)min;
if( maxVal )
*maxVal = (double)max;
- if( !minp.x && !minp.y && !maxp.x && !maxp.y && !mask.data[0] )
+ if( !minp.x && !minp.y && !maxp.x && !maxp.y && !mask.ptr()[0] )
minp.x = maxp.x = -1;
if( minIdx )
{
{
Ipp32f min, max;
IppiPoint minp, maxp;
- if( ippFuncC1(src.data, (int)src.step[0], sz, &min, &max, &minp, &maxp) >= 0 )
+ if( ippFuncC1(src.ptr(), (int)src.step[0], sz, &min, &max, &minp, &maxp) >= 0 )
{
if( minVal )
*minVal = (double)min;
if( ippFuncC1 )
{
Ipp64f norm;
- if( ippFuncC1(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, &norm) >= 0 )
+ if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, &norm) >= 0 )
return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm;
setIppErrorStatus();
if( ippFuncHint || ippFuncNoHint )
{
Ipp64f norm_array[4];
- IppStatus ret = ippFuncHint ? ippFuncHint(src.data, (int)src.step[0], sz, norm_array, ippAlgHintAccurate) :
- ippFuncNoHint(src.data, (int)src.step[0], sz, norm_array);
+ IppStatus ret = ippFuncHint ? ippFuncHint(src.ptr(), (int)src.step[0], sz, norm_array, ippAlgHintAccurate) :
+ ippFuncNoHint(src.ptr(), (int)src.step[0], sz, norm_array);
if( ret >= 0 )
{
Ipp64f norm = (normType == NORM_L2 || normType == NORM_L2SQR) ? norm_array[0] * norm_array[0] : norm_array[0];
if( ippFuncC1 )
{
Ipp64f norm;
- if( ippFuncC1(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], mask.data, (int)mask.step[0], sz, &norm) >= 0 )
+ if( ippFuncC1(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], mask.ptr(), (int)mask.step[0], sz, &norm) >= 0 )
return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm;
setIppErrorStatus();
}
if (ippFuncNoHint)
{
Ipp64f norm;
- if( ippFuncNoHint(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], sz, &norm) >= 0 )
+ if( ippFuncNoHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, &norm) >= 0 )
return (double)norm;
setIppErrorStatus();
}
if (ippFuncHint)
{
Ipp64f norm;
- if( ippFuncHint(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], sz, &norm, ippAlgHintAccurate) >= 0 )
+ if( ippFuncHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, &norm, ippAlgHintAccurate) >= 0 )
return (double)norm;
setIppErrorStatus();
}
if( ippFuncC1 )
{
Ipp64f norm;
- if( ippFuncC1(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], mask.data, (int)mask.step[0], sz, &norm) >= 0 )
+ if( ippFuncC1(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], mask.ptr(), (int)mask.step[0], sz, &norm) >= 0 )
return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm;
setIppErrorStatus();
}
if( ippFuncHint || ippFuncNoHint )
{
Ipp64f norm_array[4];
- IppStatus ret = ippFuncHint ? ippFuncHint(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], sz, norm_array, ippAlgHintAccurate) :
- ippFuncNoHint(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], sz, norm_array);
+ IppStatus ret = ippFuncHint ? ippFuncHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, norm_array, ippAlgHintAccurate) :
+ ippFuncNoHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, norm_array);
if( ret >= 0 )
{
Ipp64f norm = (normType == NORM_L2 || normType == NORM_L2SQR) ? norm_array[0] * norm_array[0] : norm_array[0];
_idx.create(n, 1, CV_32SC2);
Mat idx = _idx.getMat();
CV_Assert(idx.isContinuous());
- Point* idx_ptr = (Point*)idx.data;
+ Point* idx_ptr = idx.ptr<Point>();
for( int i = 0; i < src.rows; i++ )
{
}
Mat dst = _dst.getMat();
- u->currAllocator->download(u, dst.data, dims, sz, srcofs, step.p, dst.step.p);
+ u->currAllocator->download(u, dst.ptr(), dims, sz, srcofs, step.p, dst.step.p);
}
void UMat::copyTo(OutputArray _dst, InputArray _mask) const
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr = planes[0].data;
- const uchar* aptr = planes[1].data;
- const uchar* bptr = planes[2].data;
- uchar* dptr = planes[3].data;
+ const uchar* sptr = planes[0].ptr();
+ const uchar* aptr = planes[1].ptr();
+ const uchar* bptr = planes[2].ptr();
+ uchar* dptr = planes[3].ptr();
switch( depth )
{
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr = planes[0].data;
- uchar* dptr = planes[1].data;
+ const uchar* sptr = planes[0].ptr();
+ uchar* dptr = planes[1].ptr();
switch( depth )
{
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr = planes[0].data;
- uchar* dptr = planes[1].data;
+ const uchar* sptr = planes[0].ptr();
+ uchar* dptr = planes[1].ptr();
if( depth == CV_32F )
{
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr = planes[0].data;
- uchar* dptr = planes[1].data;
+ const uchar* sptr = planes[0].ptr();
+ uchar* dptr = planes[1].ptr();
if( depth == CV_32F )
{
{
if( depth == CV_32F )
{
- const float* xptr = (const float*)planes[0].data;
- const float* yptr = (const float*)planes[1].data;
- float* mptr = (float*)planes[2].data;
- float* aptr = (float*)planes[3].data;
+ const float* xptr = planes[0].ptr<float>();
+ const float* yptr = planes[1].ptr<float>();
+ float* mptr = planes[2].ptr<float>();
+ float* aptr = planes[3].ptr<float>();
for( j = 0; j < total; j++ )
{
}
else
{
- const double* xptr = (const double*)planes[0].data;
- const double* yptr = (const double*)planes[1].data;
- double* mptr = (double*)planes[2].data;
- double* aptr = (double*)planes[3].data;
+ const double* xptr = planes[0].ptr<double>();
+ const double* yptr = planes[1].ptr<double>();
+ double* mptr = planes[2].ptr<double>();
+ double* aptr = planes[3].ptr<double>();
for( j = 0; j < total; j++ )
{
double scale = (flags & DFT_SCALE) ? 1./n : 1.;
size_t esz = _src.elemSize();
size_t srcstep = esz, dststep = esz;
- const uchar* src0 = _src.data;
- uchar* dst0 = _dst.data;
+ const uchar* src0 = _src.ptr();
+ uchar* dst0 = _dst.ptr();
CV_Assert( _src.cols + _src.rows - 1 == n );
CvSeq* seq = cvCreateSeq(test_mat.type(), (int)sizeof(CvSeq),
(int)test_mat.elemSize(), storage);
- cvSeqPushMulti(seq, test_mat.data, test_mat.cols*test_mat.rows);
+ cvSeqPushMulti(seq, test_mat.ptr(), test_mat.cols*test_mat.rows);
CvGraph* graph = cvCreateGraph( CV_ORIENTED_GRAPH,
sizeof(CvGraph), sizeof(CvGraphVtx),
evec = svd.vt;
eval = svd.w;*/
- Mat subEval( maxComponents, 1, eval.type(), eval.data ),
- subEvec( maxComponents, evec.cols, evec.type(), evec.data );
+ Mat subEval( maxComponents, 1, eval.type(), eval.ptr() ),
+ subEvec( maxComponents, evec.cols, evec.type(), evec.ptr() );
#ifdef CHECK_C
Mat prjTestPoints, backPrjTestPoints, cPoints = rPoints.t(), cTestPoints = rTestPoints.t();
{
Mat& mat = test_mat[INPUT][0];
int count = MIN( mat.rows, mat.cols );
- Mat diag(count, 1, mat.type(), mat.data, mat.step + mat.elemSize());
+ Mat diag(count, 1, mat.type(), mat.ptr(), mat.step + mat.elemSize());
Scalar r = cvtest::mean(diag);
r *= (double)count;
case MAT_1_N_CDIM:
data.create(1, N, CV_32FC(dims));
for( i = 0; i < N; i++ )
- memcpy(data.data + i * dims * sizeof(float), data0.ptr(rng.uniform(0, N0)), dims * sizeof(float));
+ memcpy(data.ptr() + i * dims * sizeof(float), data0.ptr(rng.uniform(0, N0)), dims * sizeof(float));
break;
case MAT_N_DIM_C1_NONCONT:
int dist_type, double& refval, double& realval)
{
Mat hist0(hist.size(), CV_32F);
- const int* H = (const int*)hist.data;
- float* H0 = ((float*)hist0.data);
+ const int* H = hist.ptr<int>();
+ float* H0 = hist0.ptr<float>();
int i, hsz = hist.cols;
double sum = 0;
for( c = 0; c < cn; c++ )
{
- const uchar* data = arr[0].data;
+ const uchar* data = arr[0].ptr();
int* H = hist[c].ptr<int>();
int HSZ = hist[c].cols;
double minVal = dist_type == CV_RAND_UNI ? A[c] : A[c] - B[c]*4;
int SDIM = cvtest::randInt(rng) % (MAX_SDIM-1) + 2;
int N0 = (SZ*cn/SDIM), n = 0;
double r2 = 0;
- const uchar* data = arr[0].data;
+ const uchar* data = arr[0].ptr();
double scale[4], delta[4];
for( c = 0; c < cn; c++ )
{
Mat imgDescriptor = _imgDescriptor.getMat();
- float *dptr = (float*)imgDescriptor.data;
+ float *dptr = imgDescriptor.ptr<float>();
for( size_t i = 0; i < matches.size(); i++ )
{
int queryIdx = matches[i].queryIdx;
if (dx + dy > 2)
{
// now the calculation:
- const uchar* ptr = image.data + x_left + imagecols * y_top;
+ const uchar* ptr = image.ptr() + x_left + imagecols * y_top;
// first the corners:
ret_val = A * int(*ptr);
ptr += dx + 1;
ret_val += D * int(*ptr);
// next the edges:
- int* ptr_integral = (int*) integral.data + x_left + integralcols * y_top + 1;
+ const int* ptr_integral = integral.ptr<int>() + x_left + integralcols * y_top + 1;
// find a simple path through the different surface corners
const int tmp1 = (*ptr_integral);
ptr_integral += dx;
}
// now the calculation:
- const uchar* ptr = image.data + x_left + imagecols * y_top;
+ const uchar* ptr = image.ptr() + x_left + imagecols * y_top;
// first row:
ret_val = A * int(*ptr);
ptr++;
int t2;
// the feature orientation
- const uchar* ptr = descriptors.data;
+ const uchar* ptr = descriptors.ptr();
for (size_t k = 0; k < ksize; k++)
{
cv::KeyPoint& kp = keypoints[k];
{
const cv::Mat& scores = pyramid_[layer].scores();
const int scorescols = scores.cols;
- const uchar* data = scores.data + y_layer * scorescols + x_layer;
+ const uchar* data = scores.ptr() + y_layer * scorescols + x_layer;
// decision tree:
const uchar center = (*data);
data--;
{
// in this case, we have to analyze the situation more carefully:
// the values are gaussian blurred and then we really decide
- data = scores.data + y_layer * scorescols + x_layer;
+ data = scores.ptr() + y_layer * scorescols + x_layer;
int smoothedcenter = 4 * center + 2 * (s_10 + s10 + s0_1 + s01) + s_1_1 + s1_1 + s_11 + s11;
for (unsigned int i = 0; i < deltasize; i += 2)
{
- data = scores.data + (y_layer - 1 + delta[i + 1]) * scorescols + x_layer + delta[i] - 1;
+ data = scores.ptr() + (y_layer - 1 + delta[i + 1]) * scorescols + x_layer + delta[i] - 1;
int othercenter = *data;
data++;
othercenter += 2 * (*data);
const int r_y = (int)((yf - y) * 1024);
const int r_x_1 = (1024 - r_x);
const int r_y_1 = (1024 - r_y);
- const uchar* ptr = image.data + x + y * imagecols;
+ const uchar* ptr = image.ptr() + x + y * imagecols;
// just interpolate:
ret_val = (r_x_1 * r_y_1 * int(*ptr));
ptr++;
const int r_y1_i = (int)(r_y1 * scaling);
// now the calculation:
- const uchar* ptr = image.data + x_left + imagecols * y_top;
+ const uchar* ptr = image.ptr() + x_left + imagecols * y_top;
// first row:
ret_val = A * int(*ptr);
ptr++;
fwrite( (void*)&type, sizeof(int), 1, f );
int dataSize = (int)(mat.step * mat.rows * mat.channels());
fwrite( (void*)&dataSize, sizeof(int), 1, f );
- fwrite( (void*)mat.data, 1, dataSize, f );
+ fwrite( (void*)mat.ptr(), 1, dataSize, f );
fclose(f);
}
}
CV_Assert(query.isContinuous() && indices.isContinuous() && dists.isContinuous());
::cvflann::Matrix<ElementType> _query((ElementType*)query.data, query.rows, query.cols);
- ::cvflann::Matrix<int> _indices((int*)indices.data, indices.rows, indices.cols);
- ::cvflann::Matrix<DistanceType> _dists((DistanceType*)dists.data, dists.rows, dists.cols);
+ ::cvflann::Matrix<int> _indices(indices.ptr<int>(), indices.rows, indices.cols);
+ ::cvflann::Matrix<DistanceType> _dists(dists.ptr<DistanceType>(), dists.rows, dists.cols);
((IndexType*)index)->knnSearch(_query, _indices, _dists, knn,
(const ::cvflann::SearchParams&)get_params(params));
CV_Assert(query.isContinuous() && indices.isContinuous() && dists.isContinuous());
::cvflann::Matrix<ElementType> _query((ElementType*)query.data, query.rows, query.cols);
- ::cvflann::Matrix<int> _indices((int*)indices.data, indices.rows, indices.cols);
- ::cvflann::Matrix<DistanceType> _dists((DistanceType*)dists.data, dists.rows, dists.cols);
+ ::cvflann::Matrix<int> _indices(indices.ptr<int>(), indices.rows, indices.cols);
+ ::cvflann::Matrix<DistanceType> _dists(dists.ptr<DistanceType>(), dists.rows, dists.cols);
return ((IndexType*)index)->radiusSearch(_query, _indices, _dists,
saturate_cast<float>(radius),
bool BmpDecoder::readData( Mat& img )
{
- uchar* data = img.data;
+ uchar* data = img.ptr();
int step = (int)img.step;
bool color = img.channels() > 1;
uchar gray_palette[256];
width *= channels;
for( int y = height - 1; y >= 0; y-- )
{
- strm.putBytes( img.data + img.step*y, width );
+ strm.putBytes( img.ptr(y), width );
if( fileStep > width )
strm.putBytes( zeropad, fileStep - width );
}
m_native_depth = CV_MAT_DEPTH(type()) == img.depth();
bool color = img.channels() > 1;
- uchar* data = img.data;
+ uchar* data = img.ptr();
int step = img.step;
bool justcopy = m_native_depth;
bool chromatorgb = false;
bool issigned = depth == CV_8S || depth == CV_16S || depth == CV_32S;
bool isfloat = depth == CV_32F || depth == CV_64F;
depth = CV_ELEM_SIZE1(depth)*8;
- uchar* data = img.data;
- int step = img.step;
+ const int step = img.step;
Header header( width, height );
Imf::PixelType type;
int size;
if( type == FLOAT && depth == 32 )
{
- buffer = (char *)const_cast<uchar *>(data);
+ buffer = (char *)const_cast<uchar *>(img.ptr());
bufferstep = step;
size = 4;
}
if( depth <= 8 )
{
+ const uchar* sd = img.ptr(line);
for(int i = 0; i < width * channels; i++)
- buf[i] = data[i] + offset;
+ buf[i] = sd[i] + offset;
}
else if( depth <= 16 )
{
- unsigned short *sd = (unsigned short *)data;
+ const unsigned short *sd = img.ptr<unsigned short>(line);
for(int i = 0; i < width * channels; i++)
buf[i] = sd[i] + offset;
}
else
{
- int *sd = (int *)data; // FIXME 64-bit problems
+ const int *sd = img.ptr<int>(line); // FIXME 64-bit problems
for(int i = 0; i < width * channels; i++)
buf[i] = (unsigned) sd[i] + offset;
}
if( depth <= 8 )
{
+ const uchar* sd = img.ptr(line);
for(int i = 0; i < width * channels; i++)
- buf[i] = data[i];
+ buf[i] = sd[i];
}
else if( depth <= 16 )
{
- unsigned short *sd = (unsigned short *)data;
+ const unsigned short *sd = img.ptr<unsigned short>(line);
for(int i = 0; i < width * channels; i++)
buf[i] = sd[i];
}
result = false;
break;
}
- data += step;
}
delete[] buffer;
}
if( !m_buf.empty() )
{
jpeg_buffer_src(&state->cinfo, &state->source);
- state->source.pub.next_input_byte = m_buf.data;
+ state->source.pub.next_input_byte = m_buf.ptr();
state->source.pub.bytes_in_buffer = m_buf.cols*m_buf.rows*m_buf.elemSize();
}
else
buffer = (*cinfo->mem->alloc_sarray)((j_common_ptr)cinfo,
JPOOL_IMAGE, m_width*4, 1 );
- uchar* data = img.data;
+ uchar* data = img.ptr();
for( ; m_height--; data += step )
{
jpeg_read_scanlines( cinfo, buffer, 1 );
png_error(png_ptr, "PNG input buffer is incomplete");
return;
}
- memcpy( dst, &decoder->m_buf.data[decoder->m_buf_pos], size );
+ memcpy( dst, decoder->m_buf.ptr() + decoder->m_buf_pos, size );
decoder->m_buf_pos += size;
}
AutoBuffer<uchar*> _buffer(m_height);
uchar** buffer = _buffer;
int color = img.channels() > 1;
- uchar* data = img.data;
+ uchar* data = img.ptr();
int step = (int)img.step;
if( m_png_ptr && m_info_ptr && m_end_info && m_width && m_height )
bool PxMDecoder::readData( Mat& img )
{
int color = img.channels() > 1;
- uchar* data = img.data;
+ uchar* data = img.ptr();
int step = (int)img.step;
PaletteEntry palette[256];
bool result = false;
for( y = 0; y < height; y++ )
{
- uchar* data = img.data + img.step*y;
+ const uchar* const data = img.ptr(y);
if( isBinary )
{
if( _channels == 3 )
{
if( depth == 8 )
- icvCvt_BGR2RGB_8u_C3R( (uchar*)data, 0,
+ icvCvt_BGR2RGB_8u_C3R( (const uchar*)data, 0,
(uchar*)buffer, 0, cvSize(width,1) );
else
- icvCvt_BGR2RGB_16u_C3R( (ushort*)data, 0,
+ icvCvt_BGR2RGB_16u_C3R( (const ushort*)data, 0,
(ushort*)buffer, 0, cvSize(width,1) );
}
buffer[x + 1] = v;
}
}
- strm.putBytes( (channels > 1 || depth > 8) ? buffer : (char*)data, fileStep );
+ strm.putBytes( (channels > 1 || depth > 8) ? buffer : (const char*)data, fileStep );
}
else
{
{
for( x = 0; x < width*channels; x += channels )
{
- sprintf( ptr, "% 6d", ((ushort *)data)[x + 2] );
+ sprintf( ptr, "% 6d", ((const ushort *)data)[x + 2] );
ptr += 6;
- sprintf( ptr, "% 6d", ((ushort *)data)[x + 1] );
+ sprintf( ptr, "% 6d", ((const ushort *)data)[x + 1] );
ptr += 6;
- sprintf( ptr, "% 6d", ((ushort *)data)[x] );
+ sprintf( ptr, "% 6d", ((const ushort *)data)[x] );
ptr += 6;
*ptr++ = ' ';
*ptr++ = ' ';
{
for( x = 0; x < width; x++ )
{
- sprintf( ptr, "% 6d", ((ushort *)data)[x] );
+ sprintf( ptr, "% 6d", ((const ushort *)data)[x] );
ptr += 6;
}
}
bool SunRasterDecoder::readData( Mat& img )
{
int color = img.channels() > 1;
- uchar* data = img.data;
+ uchar* data = img.ptr();
int step = (int)img.step;
uchar gray_palette[256];
bool result = false;
strm.putDWord( 0 );
for( y = 0; y < height; y++ )
- strm.putBytes( img.data + img.step*y, fileStep );
+ strm.putBytes( img.ptr(y), fileStep );
strm.close();
result = true;
}
bool result = false;
bool color = img.channels() > 1;
- uchar* data = img.data;
+ uchar* data = img.ptr();
if( img.depth() != CV_8U && img.depth() != CV_16U && img.depth() != CV_32F && img.depth() != CV_64F )
return false;
{
case 1:
{
- memcpy(buffer, img.data + img.step * y, scanlineSize);
+ memcpy(buffer, img.ptr(y), scanlineSize);
break;
}
case 3:
{
if (depth == CV_8U)
- icvCvt_BGR2RGB_8u_C3R( img.data + img.step*y, 0, buffer, 0, cvSize(width,1) );
+ icvCvt_BGR2RGB_8u_C3R( img.ptr(y), 0, buffer, 0, cvSize(width,1) );
else
- icvCvt_BGR2RGB_16u_C3R( (const ushort*)(img.data + img.step*y), 0, (ushort*)buffer, 0, cvSize(width,1) );
+ icvCvt_BGR2RGB_16u_C3R( img.ptr<ushort>(y), 0, (ushort*)buffer, 0, cvSize(width,1) );
break;
}
case 4:
{
if (depth == CV_8U)
- icvCvt_BGRA2RGBA_8u_C4R( img.data + img.step*y, 0, buffer, 0, cvSize(width,1) );
+ icvCvt_BGRA2RGBA_8u_C4R( img.ptr(y), 0, buffer, 0, cvSize(width,1) );
else
- icvCvt_BGRA2RGBA_16u_C4R( (const ushort*)(img.data + img.step*y), 0, (ushort*)buffer, 0, cvSize(width,1) );
+ icvCvt_BGRA2RGBA_16u_C4R( img.ptr<ushort>(y), 0, (ushort*)buffer, 0, cvSize(width,1) );
break;
}
if( channels == 3 )
{
if (depth == CV_8U)
- icvCvt_BGR2RGB_8u_C3R( img.data + img.step*y, 0, buffer, 0, cvSize(width,1) );
+ icvCvt_BGR2RGB_8u_C3R( img.ptr(y), 0, buffer, 0, cvSize(width,1) );
else
- icvCvt_BGR2RGB_16u_C3R( (const ushort*)(img.data + img.step*y), 0, (ushort*)buffer, 0, cvSize(width,1) );
+ icvCvt_BGR2RGB_16u_C3R( img.ptr<ushort>(y), 0, (ushort*)buffer, 0, cvSize(width,1) );
}
else
{
if( channels == 4 )
{
if (depth == CV_8U)
- icvCvt_BGRA2RGBA_8u_C4R( img.data + img.step*y, 0, buffer, 0, cvSize(width,1) );
+ icvCvt_BGRA2RGBA_8u_C4R( img.ptr(y), 0, buffer, 0, cvSize(width,1) );
else
- icvCvt_BGRA2RGBA_16u_C4R( (const ushort*)(img.data + img.step*y), 0, (ushort*)buffer, 0, cvSize(width,1) );
+ icvCvt_BGRA2RGBA_16u_C4R( img.ptr<ushort>(y), 0, (ushort*)buffer, 0, cvSize(width,1) );
}
}
- strm.putBytes( channels > 1 ? buffer : img.data + img.step*y, fileStep );
+ strm.putBytes( channels > 1 ? buffer : img.ptr(y), fileStep );
}
stripCounts[i] = (short)(strm.getPos() - stripOffsets[i]);
data.create(1, wfile_size, CV_8U);
- size_t data_size = fread(data.data, 1, wfile_size, wfile);
+ size_t data_size = fread(data.ptr(), 1, wfile_size, wfile);
if(wfile)
{
}
WebPBitstreamFeatures features;
- if(VP8_STATUS_OK == WebPGetFeatures(data.data, WEBP_HEADER_SIZE, &features))
+ if(VP8_STATUS_OK == WebPGetFeatures(data.ptr(), WEBP_HEADER_SIZE, &features))
{
m_width = features.width;
m_height = features.height;
img.create(m_height, m_width, m_type);
}
- uchar* out_data = img.data;
+ uchar* out_data = img.ptr();
size_t out_data_size = img.cols * img.rows * img.elemSize();
uchar *res_ptr = 0;
if (channels == 3)
{
- res_ptr = WebPDecodeBGRInto(data.data, data.total(), out_data,
+ res_ptr = WebPDecodeBGRInto(data.ptr(), data.total(), out_data,
(int)out_data_size, (int)img.step);
}
else if (channels == 4)
{
- res_ptr = WebPDecodeBGRAInto(data.data, data.total(), out_data,
+ res_ptr = WebPDecodeBGRAInto(data.ptr(), data.total(), out_data,
(int)out_data_size, (int)img.step);
}
{
if(channels == 3)
{
- size = WebPEncodeLosslessBGR(image->data, width, height, (int)image->step, &out);
+ size = WebPEncodeLosslessBGR(image->ptr(), width, height, (int)image->step, &out);
}
else if(channels == 4)
{
- size = WebPEncodeLosslessBGRA(image->data, width, height, (int)image->step, &out);
+ size = WebPEncodeLosslessBGRA(image->ptr(), width, height, (int)image->step, &out);
}
}
else
{
if(channels == 3)
{
- size = WebPEncodeBGR(image->data, width, height, (int)image->step, quality, &out);
+ size = WebPEncodeBGR(image->ptr(), width, height, (int)image->step, quality, &out);
}
else if(channels == 4)
{
- size = WebPEncodeBGRA(image->data, width, height, (int)image->step, quality, &out);
+ size = WebPEncodeBGRA(image->ptr(), width, height, (int)image->step, quality, &out);
}
}
static void*
imdecode_( const Mat& buf, int flags, int hdrtype, Mat* mat=0 )
{
- CV_Assert(buf.data && buf.isContinuous());
+ CV_Assert(!buf.empty() && buf.isContinuous());
IplImage* image = 0;
CvMat *matrix = 0;
Mat temp, *data = &temp;
if( !f )
return 0;
size_t bufSize = buf.cols*buf.rows*buf.elemSize();
- fwrite( &buf.data[0], 1, bufSize, f );
+ fwrite( buf.ptr(), 1, bufSize, f );
fclose(f);
decoder->setSource(filename);
}
ASSERT_TRUE(img.channels() == 4);
- unsigned char* img_data = (unsigned char*)img.data;
+ unsigned char* img_data = img.ptr();
// Verification first pixel is red in BGRA
ASSERT_TRUE(img_data[0] == 0x00);
ASSERT_TRUE(img.channels() == 3);
- img_data = (unsigned char*)img.data;
+ img_data = img.ptr();
// Verification first pixel is red in BGR
ASSERT_TRUE(img_data[0] == 0x00);
ASSERT_TRUE(img.channels() == 3);
- img_data = (unsigned char*)img.data;
+ img_data = img.ptr();
// Verification first pixel is red in BGR
ASSERT_TRUE(img_data[0] == 0x00);
ASSERT_TRUE(img.channels() == 3);
- img_data = (unsigned char*)img.data;
+ img_data = img.ptr();
// Verification first pixel is red in BGR
ASSERT_TRUE(img_data[0] == 0x00);
size.width *= scn;
if (mask.empty())
- status = ippFunc(src.data, srcstep, (Ipp32f *)dst.data, dststep, ippiSize(size.width, size.height));
+ status = ippFunc(src.ptr(), srcstep, dst.ptr<Ipp32f>(), dststep, ippiSize(size.width, size.height));
else
- status = ippFuncMask(src.data, srcstep, (const Ipp8u *)mask.data, maskstep,
- (Ipp32f *)dst.data, dststep, ippiSize(size.width, size.height));
+ status = ippFuncMask(src.ptr(), srcstep, mask.ptr<Ipp8u>(), maskstep,
+ dst.ptr<Ipp32f>(), dststep, ippiSize(size.width, size.height));
if (status >= 0)
return;
size.width *= scn;
if (mask.empty())
- status = ippFunc(src.data, srcstep, (Ipp32f *)dst.data, dststep, ippiSize(size.width, size.height));
+ status = ippFunc(src.ptr(), srcstep, dst.ptr<Ipp32f>(), dststep, ippiSize(size.width, size.height));
else
- status = ippFuncMask(src.data, srcstep, (const Ipp8u *)mask.data, maskstep,
- (Ipp32f *)dst.data, dststep, ippiSize(size.width, size.height));
+ status = ippFuncMask(src.ptr(), srcstep, mask.ptr<Ipp8u>(), maskstep,
+ dst.ptr<Ipp32f>(), dststep, ippiSize(size.width, size.height));
if (status >= 0)
return;
size.width *= scn;
if (mask.empty())
- status = ippFunc(src1.data, src1step, src2.data, src2step, (Ipp32f *)dst.data,
+ status = ippFunc(src1.ptr(), src1step, src2.ptr(), src2step, dst.ptr<Ipp32f>(),
dststep, ippiSize(size.width, size.height));
else
- status = ippFuncMask(src1.data, src1step, src2.data, src2step, (const Ipp8u *)mask.data, maskstep,
- (Ipp32f *)dst.data, dststep, ippiSize(size.width, size.height));
+ status = ippFuncMask(src1.ptr(), src1step, src2.ptr(), src2step, mask.ptr<Ipp8u>(), maskstep,
+ dst.ptr<Ipp32f>(), dststep, ippiSize(size.width, size.height));
if (status >= 0)
return;
size.width *= scn;
if (mask.empty())
- status = ippFunc(src.data, srcstep, (Ipp32f *)dst.data, dststep, ippiSize(size.width, size.height), (Ipp32f)alpha);
+ status = ippFunc(src.ptr(), srcstep, dst.ptr<Ipp32f>(), dststep, ippiSize(size.width, size.height), (Ipp32f)alpha);
else
- status = ippFuncMask(src.data, srcstep, (const Ipp8u *)mask.data, maskstep,
- (Ipp32f *)dst.data, dststep, ippiSize(size.width, size.height), (Ipp32f)alpha);
+ status = ippFuncMask(src.ptr(), srcstep, mask.ptr<Ipp8u>(), maskstep,
+ dst.ptr<Ipp32f>(), dststep, ippiSize(size.width, size.height), (Ipp32f)alpha);
if (status >= 0)
return;
uchar* buffer = alignPtr((uchar*)buf, 32);
Mat _dx(_src.rows, _src.cols, CV_16S);
- if( ippiFilterSobelNegVertBorder_8u16s_C1R(_src.data, (int)_src.step,
+ if( ippiFilterSobelNegVertBorder_8u16s_C1R(_src.ptr(), (int)_src.step,
_dx.ptr<short>(), (int)_dx.step, roi,
ippMskSize3x3, ippBorderRepl, 0, buffer) < 0 )
return false;
Mat _dy(_src.rows, _src.cols, CV_16S);
- if( ippiFilterSobelHorizBorder_8u16s_C1R(_src.data, (int)_src.step,
+ if( ippiFilterSobelHorizBorder_8u16s_C1R(_src.ptr(), (int)_src.step,
_dy.ptr<short>(), (int)_dy.step, roi,
ippMskSize3x3, ippBorderRepl, 0, buffer) < 0 )
return false;
if( ippiCanny_16s8u_C1R(_dx.ptr<short>(), (int)_dx.step,
_dy.ptr<short>(), (int)_dy.step,
- _dst.data, (int)_dst.step, roi, low, high, buffer) < 0 )
+ _dst.ptr(), (int)_dst.step, roi, low, high, buffer) < 0 )
return false;
return true;
}
const void* srcarray[3] = { src, src, src };
Mat temp(rows, cols, CV_MAKETYPE(depth, 3));
- if(func1(srcarray, srcStep, temp.data, (int)temp.step[0], ippiSize(cols, rows)) < 0)
+ if(func1(srcarray, srcStep, temp.ptr(), (int)temp.step[0], ippiSize(cols, rows)) < 0)
return false;
int order[4] = {0, 1, 2, 3};
- return func2(temp.data, (int)temp.step[0], dst, dstStep, ippiSize(cols, rows), order) >= 0;
+ return func2(temp.ptr(), (int)temp.step[0], dst, dstStep, ippiSize(cols, rows), order) >= 0;
}
private:
ippiGeneralFunc func1;
Mat temp;
temp.create(rows, cols, CV_MAKETYPE(depth, 3));
- if(func1(src, srcStep, temp.data, (int)temp.step[0], ippiSize(cols, rows), order) < 0)
+ if(func1(src, srcStep, temp.ptr(), (int)temp.step[0], ippiSize(cols, rows), order) < 0)
return false;
- return func2(temp.data, (int)temp.step[0], dst, dstStep, ippiSize(cols, rows)) >= 0;
+ return func2(temp.ptr(), (int)temp.step[0], dst, dstStep, ippiSize(cols, rows)) >= 0;
}
private:
ippiReorderFunc func1;
Mat temp;
temp.create(rows, cols, CV_MAKETYPE(depth, 3));
- if(func1(src, srcStep, temp.data, (int)temp.step[0], ippiSize(cols, rows)) < 0)
+ if(func1(src, srcStep, temp.ptr(), (int)temp.step[0], ippiSize(cols, rows)) < 0)
return false;
- return func2(temp.data, (int)temp.step[0], dst, dstStep, ippiSize(cols, rows), order) >= 0;
+ return func2(temp.ptr(), (int)temp.step[0], dst, dstStep, ippiSize(cols, rows), order) >= 0;
}
private:
ippiGeneralFunc func1;
LabelT lunique = 1;
//scanning phase
for(int r_i = 0; r_i < rows; ++r_i){
- LabelT *Lrow = (LabelT *)(L.data + L.step.p[0] * r_i);
- LabelT *Lrow_prev = (LabelT *)(((char *)Lrow) - L.step.p[0]);
- const PixelT *Irow = (PixelT *)(I.data + I.step.p[0] * r_i);
- const PixelT *Irow_prev = (const PixelT *)(((char *)Irow) - I.step.p[0]);
+ LabelT * const Lrow = L.ptr<LabelT>(r_i);
+ LabelT * const Lrow_prev = (LabelT *)(((char *)Lrow) - L.step.p[0]);
+ const PixelT * const Irow = I.ptr<PixelT>(r_i);
+ const PixelT * const Irow_prev = (const PixelT *)(((char *)Irow) - I.step.p[0]);
LabelT *Lrows[2] = {
Lrow,
Lrow_prev
sop.init(nLabels);
for(int r_i = 0; r_i < rows; ++r_i){
- LabelT *Lrow_start = (LabelT *)(L.data + L.step.p[0] * r_i);
+ LabelT *Lrow_start = L.ptr<LabelT>(r_i);
LabelT *Lrow_end = Lrow_start + cols;
LabelT *Lrow = Lrow_start;
for(int c_i = 0; Lrow != Lrow_end; ++Lrow, ++c_i){
_contours.create((int)c->total, 1, CV_32SC2, i, true);
Mat ci = _contours.getMat(i);
CV_Assert( ci.isContinuous() );
- cvCvtSeqToArray(c, ci.data);
+ cvCvtSeqToArray(c, ci.ptr());
}
if( _hierarchy.needed() )
AutoBuffer<int> _stack(total + 2), _hullbuf(total);
Point** pointer = _pointer;
Point2f** pointerf = (Point2f**)pointer;
- Point* data0 = (Point*)points.data;
+ Point* data0 = points.ptr<Point>();
int* stack = _stack;
int* hullbuf = _hullbuf;
Mat hull = _hull.getMat();
size_t step = !hull.isContinuous() ? hull.step[0] : sizeof(Point);
for( i = 0; i < nout; i++ )
- *(Point*)(hull.data + i*step) = data0[hullbuf[i]];
+ *(Point*)(hull.ptr() + i*step) = data0[hullbuf[i]];
}
}
int hpoints = hull.checkVector(1, CV_32S);
CV_Assert( hpoints > 2 );
- const Point* ptr = (const Point*)points.data;
+ const Point* ptr = points.ptr<Point>();
const int* hptr = hull.ptr<int>();
std::vector<Vec4i> defects;
return false;
return depth == CV_32S ?
- isContourConvex_((const Point*)contour.data, total ) :
- isContourConvex_((const Point2f*)contour.data, total );
+ isContourConvex_(contour.ptr<Point>(), total ) :
+ isContourConvex_(contour.ptr<Point2f>(), total );
}
}
}
}
else
- cvSeqPushMulti(hullseq, h0.data, (int)h0.total());
+ cvSeqPushMulti(hullseq, h0.ptr(), (int)h0.total());
if( mat )
{
for( i = 0; i < size.height; i++ )
{
- const float* cov = (const float*)(_cov.data + _cov.step*i);
- float* dst = (float*)(_dst.data + _dst.step*i);
+ const float* cov = _cov.ptr<float>(i);
+ float* dst = _dst.ptr<float>(i);
j = 0;
#if CV_SSE
if( simd )
for( i = 0; i < size.height; i++ )
{
- const float* cov = (const float*)(_cov.data + _cov.step*i);
- float* dst = (float*)(_dst.data + _dst.step*i);
+ const float* cov = _cov.ptr<float>(i);
+ float* dst = _dst.ptr<float>(i);
j = 0;
#if CV_SSE
for( int i = 0; i < size.height; i++ )
{
- const float* cov = (const float*)(_cov.data + _cov.step*i);
- float* dst = (float*)(_dst.data + _dst.step*i);
+ const float* cov = _cov.ptr<float>(i);
+ float* dst = _dst.ptr<float>(i);
eigen2x2(cov, dst, size.width);
}
for( i = 0; i < size.height; i++ )
{
- float* cov_data = (float*)(cov.data + i*cov.step);
- const float* dxdata = (const float*)(Dx.data + i*Dx.step);
- const float* dydata = (const float*)(Dy.data + i*Dy.step);
+ float* cov_data = cov.ptr<float>(i);
+ const float* dxdata = Dx.ptr<float>(i);
+ const float* dydata = Dy.ptr<float>(i);
for( j = 0; j < size.width; j++ )
{
if (ok >= 0)
{
AutoBuffer<uchar> buffer(bufferSize);
- ok = minEigenValFunc(src.data, (int) src.step, (Ipp32f*) dst.data, (int) dst.step, srcRoi, kerType, kerSize, blockSize, buffer);
+ ok = minEigenValFunc(src.ptr(), (int) src.step, dst.ptr<Ipp32f>(), (int) dst.step, srcRoi, kerType, kerSize, blockSize, buffer);
CV_SUPPRESS_DEPRECATED_START
- if (ok >= 0) ok = ippiMulC_32f_C1IR(norm_coef, (Ipp32f*) dst.data, (int) dst.step, srcRoi);
+ if (ok >= 0) ok = ippiMulC_32f_C1IR(norm_coef, dst.ptr<Ipp32f>(), (int) dst.step, srcRoi);
CV_SUPPRESS_DEPRECATED_END
if (ok >= 0)
return;
int i, j;
for( i = 0; i < size.height; i++ )
{
- float* dstdata = (float*)(dst.data + i*dst.step);
- const float* dxdata = (const float*)(Dx.data + i*Dx.step);
- const float* dydata = (const float*)(Dy.data + i*Dy.step);
- const float* d2xdata = (const float*)(D2x.data + i*D2x.step);
- const float* d2ydata = (const float*)(D2y.data + i*D2y.step);
- const float* dxydata = (const float*)(Dxy.data + i*Dxy.step);
+ float* dstdata = dst.ptr<float>(i);
+ const float* dxdata = Dx.ptr<float>(i);
+ const float* dydata = Dy.ptr<float>(i);
+ const float* d2xdata = D2x.ptr<float>(i);
+ const float* d2ydata = D2y.ptr<float>(i);
+ const float* dxydata = Dxy.ptr<float>(i);
j = 0;
cv::Mat src = _image.getMat(), cornersmat = _corners.getMat();
int count = cornersmat.checkVector(2, CV_32F);
CV_Assert( count >= 0 );
- Point2f* corners = (Point2f*)cornersmat.data;
+ Point2f* corners = cornersmat.ptr<Point2f>();
if( count == 0 )
return;
const int G2Y = 9617;
const int SHIFT = 14;
- const T* bayer0 = (const T*)srcmat.data;
+ const T* bayer0 = srcmat.ptr<T>();
int bayer_step = (int)(srcmat.step/sizeof(T));
T* dst0 = (T*)dstmat.data;
int dst_step = (int)(dstmat.step/sizeof(T));
}
size = dstmat.size();
- T* dst0 = (T*)dstmat.data;
+ T* dst0 = dstmat.ptr<T>();
int dst_step = (int)(dstmat.step/sizeof(T));
if( size.height > 2 )
for( int i = 0; i < size.width; i++ )
int dcn2 = dcn << 1;
int bayer_step = (int)(srcmat.step/sizeof(T));
- const T* bayer0 = reinterpret_cast<const T*>(srcmat.data) + bayer_step * range.start;
+ const T* bayer0 = srcmat.ptr<T>() + bayer_step * range.start;
int dst_step = (int)(dstmat.step/sizeof(T));
T* dst0 = reinterpret_cast<T*>(dstmat.data) + (range.start + 1) * dst_step + dcn + 1;
// filling the first and the last rows
size = dstmat.size();
- T* dst0 = (T*)dstmat.data;
+ T* dst0 = dstmat.ptr<T>();
if( size.height > 2 )
for( int i = 0; i < size.width*dcn; i++ )
{
static void Bayer2RGB_VNG_8u( const Mat& srcmat, Mat& dstmat, int code )
{
- const uchar* bayer = srcmat.data;
+ const uchar* bayer = srcmat.ptr();
int bstep = (int)srcmat.step;
- uchar* dst = dstmat.data;
+ uchar* dst = dstmat.ptr();
int dststep = (int)dstmat.step;
Size size = srcmat.size();
int sstep = int(src.step / src.elemSize1()), dstep = int(dst.step / dst.elemSize1());
SIMDInterpolator vecOp;
- const T* S = reinterpret_cast<const T*>(src.data + (range.start + 1) * src.step) + 1;
+ const T* S = src.ptr<T>(range.start + 1) + 1;
T* D = reinterpret_cast<T*>(dst.data + (range.start + 1) * dst.step) + dcn;
if (range.start % 2)
size = dst.size();
size.width *= dst.channels();
size_t dstep = dst.step / dst.elemSize1();
- T* firstRow = reinterpret_cast<T*>(dst.data);
- T* lastRow = reinterpret_cast<T*>(dst.data) + (size.height-1) * dstep;
+ T* firstRow = dst.ptr<T>();
+ T* lastRow = dst.ptr<T>() + (size.height-1) * dstep;
if (size.height > 2)
{
pBuffer = ippsMalloc_8u(bufferSize);
if (NULL == pBuffer)
IPP_RETURN_ERROR
- sts = ippiFilterScharrHorizMaskBorder_8u16s_C1R(src.data, (int)src.step, (Ipp16s *)dst.data, (int)dst.step, roiSize, ippMskSize3x3, ippiBorderType, 0, pBuffer);
+ sts = ippiFilterScharrHorizMaskBorder_8u16s_C1R(src.ptr(), (int)src.step, dst.ptr<Ipp16s>(), (int)dst.step, roiSize, ippMskSize3x3, ippiBorderType, 0, pBuffer);
}
else
{
pBuffer = ippsMalloc_8u(bufferSize);
if (NULL == pBuffer)
IPP_RETURN_ERROR
- sts = ippiFilterScharrVertMaskBorder_8u16s_C1R(src.data, (int)src.step, (Ipp16s *)dst.data, (int)dst.step, roiSize, ippMskSize3x3, ippiBorderType, 0, pBuffer);
+ sts = ippiFilterScharrVertMaskBorder_8u16s_C1R(src.ptr(), (int)src.step, dst.ptr<Ipp16s>(), (int)dst.step, roiSize, ippMskSize3x3, ippiBorderType, 0, pBuffer);
}
ippsFree(pBuffer);
}
pBuffer = ippsMalloc_8u(bufferSize);
if (NULL == pBuffer)
IPP_RETURN_ERROR
- sts = ippiFilterScharrHorizMaskBorder_16s_C1R((Ipp16s *)src.data, (int)src.step, (Ipp16s *)dst.data, (int)dst.step, roiSize, ippMskSize3x3, ippiBorderType, 0, pBuffer);
+ sts = ippiFilterScharrHorizMaskBorder_16s_C1R(src.ptr<Ipp16s>(), (int)src.step, dst.ptr<Ipp16s>(), (int)dst.step, roiSize, ippMskSize3x3, ippiBorderType, 0, pBuffer);
}
else
{
pBuffer = ippsMalloc_8u(bufferSize);
if (NULL == pBuffer)
IPP_RETURN_ERROR
- sts = ippiFilterScharrVertMaskBorder_16s_C1R((Ipp16s *)src.data, (int)src.step, (Ipp16s *)dst.data, (int)dst.step, roiSize, ippMskSize3x3, ippiBorderType, 0, pBuffer);
+ sts = ippiFilterScharrVertMaskBorder_16s_C1R(src.ptr<Ipp16s>(), (int)src.step, dst.ptr<Ipp16s>(), (int)dst.step, roiSize, ippMskSize3x3, ippiBorderType, 0, pBuffer);
}
ippsFree(pBuffer);
}
pBuffer = ippsMalloc_8u(bufferSize);
if (NULL == pBuffer)
IPP_RETURN_ERROR
- sts = ippiFilterScharrHorizMaskBorder_32f_C1R((Ipp32f *)src.data, (int)src.step, (Ipp32f *)dst.data, (int)dst.step, roiSize, ippMskSize3x3, ippiBorderType, 0, pBuffer);
+ sts = ippiFilterScharrHorizMaskBorder_32f_C1R(src.ptr<Ipp32f>(), (int)src.step, dst.ptr<Ipp32f>(), (int)dst.step, roiSize, ippMskSize3x3, ippiBorderType, 0, pBuffer);
}
else
{
pBuffer = ippsMalloc_8u(bufferSize);
if (NULL == pBuffer)
IPP_RETURN_ERROR
- sts = ippiFilterScharrVertMaskBorder_32f_C1R((Ipp32f *)src.data, (int)src.step, (Ipp32f *)dst.data, (int)dst.step, roiSize, ippMskSize3x3, ippiBorderType, 0, pBuffer);
+ sts = ippiFilterScharrVertMaskBorder_32f_C1R(src.ptr<Ipp32f>(), (int)src.step, dst.ptr<Ipp32f>(), (int)dst.step, roiSize, ippMskSize3x3, ippiBorderType, 0, pBuffer);
}
ippsFree(pBuffer);
if (sts < 0)
IPP_RETURN_ERROR;
if (FLT_EPSILON < fabs(scale - 1.0))
- sts = ippiMulC_32f_C1R((Ipp32f *)dst.data, (int)dst.step, (Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, roiSize);
+ sts = ippiMulC_32f_C1R(dst.ptr<Ipp32f>(), (int)dst.step, (Ipp32f)scale, dst.ptr<Ipp32f>(), (int)dst.step, roiSize);
}
return (0 <= sts);
}
if (0 > ippiFilterScharrVertGetBufferSize_8u16s_C1R(roi,&bufSize))
return false;
buffer.allocate(bufSize);
- return (0 <= ippiFilterScharrVertBorder_8u16s_C1R((const Ipp8u*)src.data, (int)src.step,
- (Ipp16s*)dst.data, (int)dst.step, roi, ippBorderRepl, 0, (Ipp8u*)(char*)buffer));
+ return (0 <= ippiFilterScharrVertBorder_8u16s_C1R(src.ptr<Ipp8u>(), (int)src.step,
+ dst.ptr<Ipp16s>(), (int)dst.step, roi, ippBorderRepl, 0, (Ipp8u*)(char*)buffer));
}
if ((dx == 0) && (dy == 1))
{
if (0 > ippiFilterScharrHorizGetBufferSize_8u16s_C1R(roi,&bufSize))
return false;
buffer.allocate(bufSize);
- return (0 <= ippiFilterScharrHorizBorder_8u16s_C1R((const Ipp8u*)src.data, (int)src.step,
- (Ipp16s*)dst.data, (int)dst.step, roi, ippBorderRepl, 0, (Ipp8u*)(char*)buffer));
+ return (0 <= ippiFilterScharrHorizBorder_8u16s_C1R(src.ptr<Ipp8u>(), (int)src.step,
+ dst.ptr<Ipp16s>(), (int)dst.step, roi, ippBorderRepl, 0, (Ipp8u*)(char*)buffer));
}
return false;
}
return false;
buffer.allocate(bufSize);
- if (0 > ippiFilterScharrVertBorder_32f_C1R((const Ipp32f*)src.data, (int)src.step,
- (Ipp32f*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows),
+ if (0 > ippiFilterScharrVertBorder_32f_C1R(src.ptr<Ipp32f>(), (int)src.step,
+ dst.ptr<Ipp32f>(), (int)dst.step, ippiSize(src.cols, src.rows),
ippBorderRepl, 0, (Ipp8u*)(char*)buffer))
{
return false;
if (scale != 1)
/* IPP is fast, so MulC produce very little perf degradation.*/
- //ippiMulC_32f_C1IR((Ipp32f)scale, (Ipp32f*)dst.data, (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
- ippiMulC_32f_C1R((Ipp32f*)dst.data, (int)dst.step, (Ipp32f)scale, (Ipp32f*)dst.data, (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
+ //ippiMulC_32f_C1IR((Ipp32f)scale, dst.ptr<Ipp32f>(), (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
+ ippiMulC_32f_C1R(dst.ptr<Ipp32f>(), (int)dst.step, (Ipp32f)scale, dst.ptr<Ipp32f>(), (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
return true;
}
if ((dx == 0) && (dy == 1))
return false;
buffer.allocate(bufSize);
- if (0 > ippiFilterScharrHorizBorder_32f_C1R((const Ipp32f*)src.data, (int)src.step,
- (Ipp32f*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows),
+ if (0 > ippiFilterScharrHorizBorder_32f_C1R(src.ptr<Ipp32f>(), (int)src.step,
+ dst.ptr<Ipp32f>(), (int)dst.step, ippiSize(src.cols, src.rows),
ippBorderRepl, 0, (Ipp8u*)(char*)buffer))
return false;
if (scale != 1)
- ippiMulC_32f_C1R((Ipp32f *)dst.data, (int)dst.step, (Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
+ ippiMulC_32f_C1R(dst.ptr<Ipp32f>(), (int)dst.step, (Ipp32f)scale, dst.ptr<Ipp32f>(), (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
return true;
}
}
IPP_RETURN_ERROR
buffer.allocate(bufSize);
- if (0 > ippiFilterSobelNegVertBorder_8u16s_C1R((const Ipp8u*)src.data, (int)src.step,
- (Ipp16s*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
+ if (0 > ippiFilterSobelNegVertBorder_8u16s_C1R(src.ptr<Ipp8u>(), (int)src.step,
+ dst.ptr<Ipp16s>(), (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
ippBorderRepl, 0, (Ipp8u*)(char*)buffer))
IPP_RETURN_ERROR
return true;
IPP_RETURN_ERROR
buffer.allocate(bufSize);
- if (0 > ippiFilterSobelHorizBorder_8u16s_C1R((const Ipp8u*)src.data, (int)src.step,
- (Ipp16s*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
+ if (0 > ippiFilterSobelHorizBorder_8u16s_C1R(src.ptr<Ipp8u>(), (int)src.step,
+ dst.ptr<Ipp16s>(), (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
ippBorderRepl, 0, (Ipp8u*)(char*)buffer))
IPP_RETURN_ERROR
return true;
IPP_RETURN_ERROR
buffer.allocate(bufSize);
- if (0 > ippiFilterSobelVertSecondBorder_8u16s_C1R((const Ipp8u*)src.data, (int)src.step,
- (Ipp16s*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
+ if (0 > ippiFilterSobelVertSecondBorder_8u16s_C1R(src.ptr<Ipp8u>(), (int)src.step,
+ dst.ptr<Ipp16s>(), (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
ippBorderRepl, 0, (Ipp8u*)(char*)buffer))
IPP_RETURN_ERROR
return true;
IPP_RETURN_ERROR
buffer.allocate(bufSize);
- if (0 > ippiFilterSobelHorizSecondBorder_8u16s_C1R((const Ipp8u*)src.data, (int)src.step,
- (Ipp16s*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
+ if (0 > ippiFilterSobelHorizSecondBorder_8u16s_C1R(src.ptr<Ipp8u>(), (int)src.step,
+ dst.ptr<Ipp16s>(), (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
ippBorderRepl, 0, (Ipp8u*)(char*)buffer))
IPP_RETURN_ERROR
return true;
IPP_RETURN_ERROR
buffer.allocate(bufSize);
- if (0 > ippiFilterSobelNegVertBorder_32f_C1R((const Ipp32f*)src.data, (int)src.step,
- (Ipp32f*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
+ if (0 > ippiFilterSobelNegVertBorder_32f_C1R(src.ptr<Ipp32f>(), (int)src.step,
+ dst.ptr<Ipp32f>(), (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
ippBorderRepl, 0, (Ipp8u*)(char*)buffer))
IPP_RETURN_ERROR
if(scale != 1)
- ippiMulC_32f_C1R((Ipp32f *)dst.data, (int)dst.step, (Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
+ ippiMulC_32f_C1R(dst.ptr<Ipp32f>(), (int)dst.step, (Ipp32f)scale, dst.ptr<Ipp32f>(), (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
return true;
}
if (0 > ippiFilterSobelHorizGetBufferSize_32f_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize))
IPP_RETURN_ERROR
buffer.allocate(bufSize);
- if (0 > ippiFilterSobelHorizBorder_32f_C1R((const Ipp32f*)src.data, (int)src.step,
- (Ipp32f*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
+ if (0 > ippiFilterSobelHorizBorder_32f_C1R(src.ptr<Ipp32f>(), (int)src.step,
+ dst.ptr<Ipp32f>(), (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
ippBorderRepl, 0, (Ipp8u*)(char*)buffer))
IPP_RETURN_ERROR
if(scale != 1)
- ippiMulC_32f_C1R((Ipp32f *)dst.data, (int)dst.step, (Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
+ ippiMulC_32f_C1R(dst.ptr<Ipp32f>(), (int)dst.step, (Ipp32f)scale, dst.ptr<Ipp32f>(), (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
return true;
}
#endif
IPP_RETURN_ERROR
buffer.allocate(bufSize);
- if (0 > ippiFilterSobelVertSecondBorder_32f_C1R((const Ipp32f*)src.data, (int)src.step,
- (Ipp32f*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
+ if (0 > ippiFilterSobelVertSecondBorder_32f_C1R(src.ptr<Ipp32f>(), (int)src.step,
+ dst.ptr<Ipp32f>(), (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
ippBorderRepl, 0, (Ipp8u*)(char*)buffer))
IPP_RETURN_ERROR
if(scale != 1)
- ippiMulC_32f_C1R((Ipp32f *)dst.data, (int)dst.step, (Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
+ ippiMulC_32f_C1R(dst.ptr<Ipp32f>(), (int)dst.step, (Ipp32f)scale, dst.ptr<Ipp32f>(), (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
return true;
}
IPP_RETURN_ERROR
buffer.allocate(bufSize);
- if (0 > ippiFilterSobelHorizSecondBorder_32f_C1R((const Ipp32f*)src.data, (int)src.step,
- (Ipp32f*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
+ if (0 > ippiFilterSobelHorizSecondBorder_32f_C1R(src.ptr<Ipp32f>(), (int)src.step,
+ dst.ptr<Ipp32f>(), (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
ippBorderRepl, 0, (Ipp8u*)(char*)buffer))
IPP_RETURN_ERROR
if(scale != 1)
- ippiMulC_32f_C1R((Ipp32f *)dst.data, (int)dst.step, (Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
+ ippiMulC_32f_C1R(dst.ptr<Ipp32f>(), (int)dst.step, (Ipp32f)scale, dst.ptr<Ipp32f>(), (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));
return true;
}
#endif
if (borderTypeIpp >= 0 && ippiFilterLaplacianGetBufferSize_##ippfavor##_C1R(roisize, masksize, &bufsize) >= 0) \
{ \
Ipp8u * buffer = ippsMalloc_8u(bufsize); \
- status = ippiFilterLaplacianBorder_##ippfavor##_C1R((const ippsrctype *)src.data, (int)src.step, (ippdsttype *)dst.data, \
+ status = ippiFilterLaplacianBorder_##ippfavor##_C1R(src.ptr<ippsrctype>(), (int)src.step, dst.ptr<ippdsttype>(), \
(int)dst.step, roisize, masksize, borderTypeIpp, 0, buffer); \
ippsFree(buffer); \
} \
IPP_FILTER_LAPLACIAN(Ipp8u, Ipp16s, 8u16s);
if (needScale && status >= 0)
- status = ippiMulC_16s_C1IRSfs((Ipp16s)iscale, (Ipp16s *)dst.data, (int)dst.step, roisize, 0);
+ status = ippiMulC_16s_C1IRSfs((Ipp16s)iscale, dst.ptr<Ipp16s>(), (int)dst.step, roisize, 0);
if (needDelta && status >= 0)
- status = ippiAddC_16s_C1IRSfs((Ipp16s)idelta, (Ipp16s *)dst.data, (int)dst.step, roisize, 0);
+ status = ippiAddC_16s_C1IRSfs((Ipp16s)idelta, dst.ptr<Ipp16s>(), (int)dst.step, roisize, 0);
}
else if (sdepth == CV_32F && ddepth == CV_32F)
{
IPP_FILTER_LAPLACIAN(Ipp32f, Ipp32f, 32f);
if (needScale && status >= 0)
- status = ippiMulC_32f_C1IR((Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, roisize);
+ status = ippiMulC_32f_C1IR((Ipp32f)scale, dst.ptr<Ipp32f>(), (int)dst.step, roisize);
if (needDelta && status >= 0)
- status = ippiAddC_32f_C1IR((Ipp32f)delta, (Ipp32f *)dst.data, (int)dst.step, roisize);
+ status = ippiAddC_32f_C1IR((Ipp32f)delta, dst.ptr<Ipp32f>(), (int)dst.step, roisize);
}
CV_SUPPRESS_DEPRECATED_END
Mat src = _src.getMat(), dst = _dst.getMat();
int y = fx->start(src), dsty = 0, dy = 0;
fy->start(src);
- const uchar* sptr = src.data + y*src.step;
+ const uchar* sptr = src.ptr(y);
int dy0 = std::min(std::max((int)(STRIPE_SIZE/(CV_ELEM_SIZE(stype)*src.cols)), 1), src.rows);
Mat d2x( dy0 + kd.rows - 1, src.cols, wtype );
for( ; dsty < src.rows; sptr += dy0*src.step, dsty += dy )
{
- fx->proceed( sptr, (int)src.step, dy0, d2x.data, (int)d2x.step );
- dy = fy->proceed( sptr, (int)src.step, dy0, d2y.data, (int)d2y.step );
+ fx->proceed( sptr, (int)src.step, dy0, d2x.ptr(), (int)d2x.step );
+ dy = fy->proceed( sptr, (int)src.step, dy0, d2y.ptr(), (int)d2y.step );
if( dy > 0 )
{
Mat dstripe = dst.rowRange(dsty, dsty + dy);
const int DIAG_DIST = CV_FLT_TO_FIX( metrics[1], DIST_SHIFT );
const float scale = 1.f/(1 << DIST_SHIFT);
- const uchar* src = _src.data;
+ const uchar* src = _src.ptr();
int* temp = _temp.ptr<int>();
float* dist = _dist.ptr<float>();
int srcstep = (int)(_src.step/sizeof(src[0]));
const int LONG_DIST = CV_FLT_TO_FIX( metrics[2], DIST_SHIFT );
const float scale = 1.f/(1 << DIST_SHIFT);
- const uchar* src = _src.data;
+ const uchar* src = _src.ptr();
int* temp = _temp.ptr<int>();
float* dist = _dist.ptr<float>();
int srcstep = (int)(_src.step/sizeof(src[0]));
const int LONG_DIST = CV_FLT_TO_FIX( metrics[2], DIST_SHIFT );
const float scale = 1.f/(1 << DIST_SHIFT);
- const uchar* src = _src.data;
+ const uchar* src = _src.ptr();
int* temp = _temp.ptr<int>();
float* dist = _dist.ptr<float>();
int* labels = _labels.ptr<int>();
uchar lut[256];
int x, y;
- const uchar *sbase = src.data;
- uchar *dbase = dst.data;
+ const uchar *sbase = src.ptr();
+ uchar *dbase = dst.ptr();
int srcstep = (int)src.step;
int dststep = (int)dst.step;
for( i = 0; i < total; i++ )
{
- int ofs = (int)((const uchar*)tmpCorners[i] - eig.data);
+ int ofs = (int)((const uchar*)tmpCorners[i] - eig.ptr());
int y = (int)(ofs / eig.step);
int x = (int)((ofs - y*eig.step)/sizeof(float));
{
for( i = 0; i < total; i++ )
{
- int ofs = (int)((const uchar*)tmpCorners[i] - eig.data);
+ int ofs = (int)((const uchar*)tmpCorners[i] - eig.ptr());
int y = (int)(ofs / eig.step);
int x = (int)((ofs - y*eig.step)/sizeof(float));
dstOfs.y + srcRoi.height <= dst.rows );
int y = start(src, srcRoi, isolated);
- proceed( src.data + y*src.step
+ proceed( src.ptr(y)
+ srcRoi.x*src.elemSize(),
(int)src.step, endY - startY,
- dst.data + dstOfs.y*dst.step +
+ dst.ptr(dstOfs.y) +
dstOfs.x*dst.elemSize(), (int)dst.step );
}
Mat kernel;
_kernel.convertTo(kernel, CV_64F);
- const double* coeffs = (double*)kernel.data;
+ const double* coeffs = kernel.ptr<double>();
double sum = 0;
int type = KERNEL_SMOOTH + KERNEL_INTEGER;
if( (_kernel.rows == 1 || _kernel.cols == 1) &&
int k, ksize = kernel.rows + kernel.cols - 1;
for( k = 0; k < ksize; k++ )
{
- int v = ((const int*)kernel.data)[k];
+ int v = kernel.ptr<int>()[k];
if( v < SHRT_MIN || v > SHRT_MAX )
{
smallValues = false;
int i = 0, k, _ksize = kernel.rows + kernel.cols - 1;
int* dst = (int*)_dst;
- const int* _kx = (const int*)kernel.data;
+ const int* _kx = kernel.ptr<int>();
width *= cn;
if( smallValues )
int k, ksize = kernel.rows + kernel.cols - 1;
for( k = 0; k < ksize; k++ )
{
- int v = ((const int*)kernel.data)[k];
+ int v = kernel.ptr<int>()[k];
if( v < SHRT_MIN || v > SHRT_MAX )
{
smallValues = false;
int i = 0, j, k, _ksize = kernel.rows + kernel.cols - 1;
int* dst = (int*)_dst;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
- const int* kx = (const int*)kernel.data + _ksize/2;
+ const int* kx = kernel.ptr<int>() + _ksize/2;
if( !smallValues )
return 0;
return 0;
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
- const float* ky = (const float*)kernel.data + ksize2;
+ const float* ky = kernel.ptr<float>() + ksize2;
int i = 0, k;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const int** src = (const int**)_src;
return 0;
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
- const float* ky = (const float*)kernel.data + ksize2;
+ const float* ky = kernel.ptr<float>() + ksize2;
int i = 0;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const int** src = (const int**)_src;
int i = 0, k, _ksize = kernel.rows + kernel.cols - 1;
float* dst = (float*)_dst;
- const float* _kx = (const float*)kernel.data;
+ const float* _kx = kernel.ptr<float>();
width *= cn;
for( ; i <= width - 8; i += 8 )
return 0;
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
- const float* ky = (const float*)kernel.data + ksize2;
+ const float* ky = kernel.ptr<float>() + ksize2;
int i = 0, k;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const float** src = (const float**)_src;
int _ksize = kernel.rows + kernel.cols - 1;
const float* src0 = (const float*)_src;
float* dst = (float*)_dst;
- const float* _kx = (const float*)kernel.data;
+ const float* _kx = kernel.ptr<float>();
if( !haveSSE )
return 0;
float* dst = (float*)_dst;
const float* src = (const float*)_src + (_ksize/2)*cn;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
- const float* kx = (const float*)kernel.data + _ksize/2;
+ const float* kx = kernel.ptr<float>() + _ksize/2;
width *= cn;
if( symmetrical )
return 0;
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
- const float* ky = (const float*)kernel.data + ksize2;
+ const float* ky = kernel.ptr<float>() + ksize2;
int i = 0, k;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const float** src = (const float**)_src;
return 0;
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
- const float* ky = (const float*)kernel.data + ksize2;
+ const float* ky = kernel.ptr<float>() + ksize2;
int i = 0;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const float** src = (const float**)_src;
void operator()(const uchar* src, uchar* dst, int width, int cn)
{
int _ksize = ksize;
- const DT* kx = (const DT*)kernel.data;
+ const DT* kx = kernel.ptr<DT>();
const ST* S;
DT* D = (DT*)dst;
int i, k;
void operator()(const uchar* src, uchar* dst, int width, int cn)
{
int ksize2 = this->ksize/2, ksize2n = ksize2*cn;
- const DT* kx = (const DT*)this->kernel.data + ksize2;
+ const DT* kx = this->kernel.ptr<DT>() + ksize2;
bool symmetrical = (this->symmetryType & KERNEL_SYMMETRICAL) != 0;
DT* D = (DT*)dst;
int i = this->vecOp(src, dst, width, cn), j, k;
void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
{
- const ST* ky = (const ST*)kernel.data;
+ const ST* ky = kernel.ptr<ST>();
ST _delta = delta;
int _ksize = ksize;
int i, k;
void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
{
int ksize2 = this->ksize/2;
- const ST* ky = (const ST*)this->kernel.data + ksize2;
+ const ST* ky = this->kernel.ptr<ST>() + ksize2;
int i, k;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
ST _delta = this->delta;
void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
{
int ksize2 = this->ksize/2;
- const ST* ky = (const ST*)this->kernel.data + ksize2;
+ const ST* ky = this->kernel.ptr<ST>() + ksize2;
int i;
bool symmetrical = (this->symmetryType & KERNEL_SYMMETRICAL) != 0;
bool is_1_2_1 = ky[0] == 1 && ky[1] == 2;
for( i = k = 0; i < kernel.rows; i++ )
{
- const uchar* krow = kernel.data + kernel.step*i;
+ const uchar* krow = kernel.ptr(i);
for( j = 0; j < kernel.cols; j++ )
{
if( ktype == CV_8U )
_Tp newVal, ConnectedComp* region, int flags,
std::vector<FFillSegment>* buffer )
{
- _Tp* img = (_Tp*)(image.data + image.step * seed.y);
+ _Tp* img = image.ptr<_Tp>(seed.y);
Size roi = image.size();
int i, L, R;
int area = 0;
for( k = 0; k < 3; k++ )
{
dir = data[k][0];
- img = (_Tp*)(image.data + (YC + dir) * image.step);
+ img = image.ptr<_Tp>(YC + dir);
int left = data[k][1];
int right = data[k][2];
std::vector<FFillSegment>* buffer )
{
int step = (int)image.step, maskStep = (int)msk.step;
- uchar* pImage = image.data;
+ uchar* pImage = image.ptr();
_Tp* img = (_Tp*)(pImage + step*seed.y);
- uchar* pMask = msk.data + maskStep + sizeof(_MTp);
+ uchar* pMask = msk.ptr() + maskStep + sizeof(_MTp);
_MTp* mask = (_MTp*)(pMask + maskStep*seed.y);
int i, L, R;
int area = 0;
if( is_simple )
{
size_t elem_size = img.elemSize();
- const uchar* seed_ptr = img.data + img.step*seedPoint.y + elem_size*seedPoint.x;
+ const uchar* seed_ptr = img.ptr(seedPoint.y) + elem_size*seedPoint.x;
size_t k = 0;
for(; k < elem_size; k++)
CV_Assert( mask.type() == CV_8U );
}
- memset( mask.data, 1, mask.cols );
- memset( mask.data + mask.step*(mask.rows-1), 1, mask.cols );
+ memset( mask.ptr(), 1, mask.cols );
+ memset( mask.ptr(mask.rows-1), 1, mask.cols );
for( i = 1; i <= size.height; i++ )
{
if( total == 0 )
return measureDist ? -DBL_MAX : -1;
- const Point* cnt = (const Point*)contour.data;
+ const Point* cnt = contour.ptr<Point>();
const Point2f* cntf = (const Point2f*)cnt;
if( !is_float && !measureDist && ip.x == pt.x && ip.y == pt.y )
deltas[i*2+1] = (int)(images[j].step/esz1 - imsize.width*deltas[i*2]);
}
- if( mask.data )
+ if( !mask.empty() )
{
CV_Assert( mask.size() == imsize && mask.channels() == 1 );
isContinuous = isContinuous && mask.isContinuous();
{
T** ptrs = (T**)&_ptrs[0];
const int* deltas = &_deltas[0];
- uchar* H = hist.data;
+ uchar* H = hist.ptr();
int i, x;
const uchar* mask = _ptrs[dims];
int mstep = _deltas[dims*2 + 1];
{
uchar** ptrs = &_ptrs[0];
const int* deltas = &_deltas[0];
- uchar* H = hist.data;
+ uchar* H = hist.ptr();
int x;
const uchar* mask = _ptrs[dims];
int mstep = _deltas[dims*2 + 1];
Mat phist(hist->size(), hist->type(), Scalar::all(0));
IppStatus status = ippiHistogramEven_8u_C1R(
- src->data + src->step * range.start, (int)src->step, ippiSize(src->cols, range.end - range.start),
- (Ipp32s *)phist.data, (Ipp32s *)*levels, histSize, low, high);
+ src->ptr(range.start), (int)src->step, ippiSize(src->cols, range.end - range.start),
+ phist.ptr<Ipp32s>(), (Ipp32s *)*levels, histSize, low, high);
if (status < 0)
{
CV_Assert(dims > 0 && histSize);
- uchar* histdata = _hist.getMat().data;
+ const uchar* const histdata = _hist.getMat().ptr();
_hist.create(dims, histSize, CV_32F);
Mat hist = _hist.getMat(), ihist = hist;
ihist.flags = (ihist.flags & ~CV_MAT_TYPE_MASK)|CV_32S;
std::vector<double> uniranges;
Size imsize;
- CV_Assert( !mask.data || mask.type() == CV_8UC1 );
+ CV_Assert( mask.empty() || mask.type() == CV_8UC1 );
histPrepareImages( images, nimages, channels, mask, dims, hist.size, ranges,
uniform, ptrs, deltas, imsize, uniranges );
const double* _uniranges = uniform ? &uniranges[0] : 0;
std::vector<double> uniranges;
Size imsize;
- CV_Assert( !mask.data || mask.type() == CV_8UC1 );
+ CV_Assert( mask.empty() || mask.type() == CV_8UC1 );
histPrepareImages( images, nimages, channels, mask, dims, hist.hdr->size, ranges,
uniform, ptrs, deltas, imsize, uniranges );
const double* _uniranges = uniform ? &uniranges[0] : 0;
{
T** ptrs = (T**)&_ptrs[0];
const int* deltas = &_deltas[0];
- uchar* H = hist.data;
+ const uchar* H = hist.ptr();
int i, x;
BT* bproj = (BT*)_ptrs[dims];
int bpstep = _deltas[dims*2 + 1];
for( x = 0; x < imsize.width; x++, p0 += d0 )
{
int idx = cvFloor(*p0*a + b);
- bproj[x] = (unsigned)idx < (unsigned)sz ? saturate_cast<BT>(((float*)H)[idx]*scale) : 0;
+ bproj[x] = (unsigned)idx < (unsigned)sz ? saturate_cast<BT>(((const float*)H)[idx]*scale) : 0;
}
}
}
int idx1 = cvFloor(*p1*a1 + b1);
bproj[x] = (unsigned)idx0 < (unsigned)sz0 &&
(unsigned)idx1 < (unsigned)sz1 ?
- saturate_cast<BT>(((float*)(H + hstep0*idx0))[idx1]*scale) : 0;
+ saturate_cast<BT>(((const float*)(H + hstep0*idx0))[idx1]*scale) : 0;
}
}
}
bproj[x] = (unsigned)idx0 < (unsigned)sz0 &&
(unsigned)idx1 < (unsigned)sz1 &&
(unsigned)idx2 < (unsigned)sz2 ?
- saturate_cast<BT>(((float*)(H + hstep0*idx0 + hstep1*idx1))[idx2]*scale) : 0;
+ saturate_cast<BT>(((const float*)(H + hstep0*idx0 + hstep1*idx1))[idx2]*scale) : 0;
}
}
}
{
for( x = 0; x < imsize.width; x++ )
{
- uchar* Hptr = H;
+ const uchar* Hptr = H;
for( i = 0; i < dims; i++ )
{
int idx = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]);
}
if( i == dims )
- bproj[x] = saturate_cast<BT>(*(float*)Hptr*scale);
+ bproj[x] = saturate_cast<BT>(*(const float*)Hptr*scale);
else
{
bproj[x] = 0;
{
for( x = 0; x < imsize.width; x++ )
{
- uchar* Hptr = H;
+ const uchar* Hptr = H;
for( i = 0; i < dims; i++ )
{
float v = (float)*ptrs[i];
}
if( i == dims )
- bproj[x] = saturate_cast<BT>(*(float*)Hptr*scale);
+ bproj[x] = saturate_cast<BT>(*(const float*)Hptr*scale);
else
{
bproj[x] = 0;
{
uchar** ptrs = &_ptrs[0];
const int* deltas = &_deltas[0];
- uchar* H = hist.data;
+ const uchar* H = hist.ptr();
int i, x;
uchar* bproj = _ptrs[dims];
int bpstep = _deltas[dims*2 + 1];
for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
{
size_t idx = tab[*p0] + tab[*p1 + 256];
- bproj[x] = idx < OUT_OF_RANGE ? saturate_cast<uchar>(*(float*)(H + idx)*scale) : 0;
+ bproj[x] = idx < OUT_OF_RANGE ? saturate_cast<uchar>(*(const float*)(H + idx)*scale) : 0;
}
}
}
for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
{
size_t idx = tab[*p0] + tab[*p1 + 256] + tab[*p2 + 512];
- bproj[x] = idx < OUT_OF_RANGE ? saturate_cast<uchar>(*(float*)(H + idx)*scale) : 0;
+ bproj[x] = idx < OUT_OF_RANGE ? saturate_cast<uchar>(*(const float*)(H + idx)*scale) : 0;
}
}
}
{
for( x = 0; x < imsize.width; x++ )
{
- uchar* Hptr = H;
+ const uchar* Hptr = H;
for( i = 0; i < dims; i++ )
{
size_t idx = tab[*ptrs[i] + i*256];
}
if( i == dims )
- bproj[x] = saturate_cast<uchar>(*(float*)Hptr*scale);
+ bproj[x] = saturate_cast<uchar>(*(const float*)Hptr*scale);
else
{
bproj[x] = 0;
Size imsize;
int dims = hist.dims == 2 && hist.size[1] == 1 ? 1 : hist.dims;
- CV_Assert( dims > 0 && hist.data );
+ CV_Assert( dims > 0 && !hist.empty() );
_backProject.create( images[0].size(), images[0].depth() );
Mat backProject = _backProject.getMat();
histPrepareImages( images, nimages, channels, backProject, dims, hist.size, ranges,
int hsz[CV_CN_MAX+1];
memcpy(hsz, &H0.size[0], H0.dims*sizeof(hsz[0]));
hsz[H0.dims] = hcn;
- H = Mat(H0.dims+1, hsz, H0.depth(), H0.data);
+ H = Mat(H0.dims+1, hsz, H0.depth(), H0.ptr());
}
else
H = H0;
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
- const float* h1 = (const float*)it.planes[0].data;
- const float* h2 = (const float*)it.planes[1].data;
+ const float* h1 = it.planes[0].ptr<float>();
+ const float* h2 = it.planes[1].ptr<float>();
len = it.planes[0].rows*it.planes[0].cols*H1.channels();
if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT))
CV_Assert( img.type() == CV_8UC1 );
- const uchar* image = img.data;
+ const uchar* image = img.ptr();
int step = (int)img.step;
int width = img.cols;
int height = img.rows;
threshold = MIN( threshold, 255 );
- const uchar* image_src = img.data;
+ const uchar* image_src = img.ptr();
int step = (int)img.step;
int w = img.cols;
int h = img.rows;
trigtab[n*2+1] = (float)(sin((double)n*theta) * irho);
}
const float* ttab = &trigtab[0];
- uchar* mdata0 = mask.data;
+ uchar* mdata0 = mask.ptr();
std::vector<Point> nzloc;
// stage 1. collect non-zero image points
Point point = nzloc[idx];
Point line_end[2];
float a, b;
- int* adata = (int*)accum.data;
+ int* adata = accum.ptr<int>();
int i = point.y, j = point.x, k, x0, y0, dx0, dy0, xflag;
int good_line;
const int shift = 16;
{
if( good_line )
{
- adata = (int*)accum.data;
+ adata = accum.ptr<int>();
for( int n = 0; n < numangle; n++, adata += numrho )
{
int r = cvRound( j1 * ttab[n*2] + i1 * ttab[n*2+1] );
}
else
{
- cvSeqPushMulti(lines, lx.data, nlines);
+ cvSeqPushMulti(lines, lx.ptr(), nlines);
}
}
{
_arr.create(1, seq->total, seq->flags, -1, true);
Mat arr = _arr.getMat();
- cvCvtSeqToArray(seq, arr.data);
+ cvCvtSeqToArray(seq, arr.ptr());
}
else
_arr.release();
{
uchar* D = dst.data + dst.step*y;
int sy = std::min(cvFloor(y*ify), ssize.height-1);
- const uchar* S = src.data + src.step*sy;
+ const uchar* S = src.ptr(sy);
switch( pix_size )
{
}
if( k1 == ksize )
k0 = std::min(k0, k); // remember the first row that needs to be computed
- srows[k] = (T*)(src.data + src.step*sy);
+ srows[k] = src.ptr<T>(sy);
prev_sy[k] = sy;
}
continue;
}
- dx = vop((const T*)(src.data + src.step * sy0), D, w);
+ dx = vop(src.ptr<T>(sy0), D, w);
for( ; dx < w; dx++ )
{
- const T* S = (const T*)(src.data + src.step * sy0) + xofs[dx];
+ const T* S = src.ptr<T>(sy0) + xofs[dx];
WT sum = 0;
k = 0;
#if CV_ENABLE_UNROLLED
{
if( sy0 + sy >= ssize.height )
break;
- const T* S = (const T*)(src.data + src.step*(sy0 + sy)) + sx0;
+ const T* S = src.ptr<T>(sy0 + sy) + sx0;
for( int sx = 0; sx < scale_x*cn; sx += cn )
{
if( sx0 + sx >= ssize.width )
int sy = ytab[j].si;
{
- const T* S = (const T*)(src->data + src->step*sy);
+ const T* S = src->ptr<T>(sy);
for( dx = 0; dx < dsize.width; dx++ )
buf[dx] = (WT)0;
if( dy != prev_dy )
{
- T* D = (T*)(dst->data + dst->step*prev_dy);
+ T* D = dst->ptr<T>(prev_dy);
for( dx = 0; dx < dsize.width; dx++ )
{
}
{
- T* D = (T*)(dst->data + dst->step*prev_dy);
+ T* D = dst->ptr<T>(prev_dy);
for( dx = 0; dx < dsize.width; dx++ )
D[dx] = saturate_cast<T>(sum[dx]);
}
CHECK_IPP_STATUS(getBufferSizeFunc(pSpec, dstSize, cn, &bufsize));
CHECK_IPP_STATUS(getSrcOffsetFunc(pSpec, dstOffset, &srcOffset));
- const Ipp8u* pSrc = (const Ipp8u*)src.data + (int)src.step[0] * srcOffset.y + srcOffset.x * cn * itemSize;
- Ipp8u* pDst = (Ipp8u*)dst.data + (int)dst.step[0] * dstOffset.y + dstOffset.x * cn * itemSize;
+ const Ipp8u* pSrc = src.ptr<Ipp8u>(srcOffset.y) + srcOffset.x * cn * itemSize;
+ Ipp8u* pDst = dst.ptr<Ipp8u>(dstOffset.y) + dstOffset.x * cn * itemSize;
AutoBuffer<uchar> buf(bufsize + 64);
uchar* bufptr = alignPtr((uchar*)buf, 32);
{
Size ssize = _src.size(), dsize = _dst.size();
int cn = _src.channels();
- const T* S0 = (const T*)_src.data;
+ const T* S0 = _src.ptr<T>();
size_t sstep = _src.step/sizeof(S0[0]);
Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
saturate_cast<T>(_borderValue[1]),
for( dy = 0; dy < dsize.height; dy++ )
{
- T* D = (T*)(_dst.data + _dst.step*dy);
- const short* XY = (const short*)(_xy.data + _xy.step*dy);
+ T* D = _dst.ptr<T>(dy);
+ const short* XY = _xy.ptr<short>(dy);
if( cn == 1 )
{
sstep > 0x8000 )
return 0;
- const uchar *S0 = _src.data, *S1 = _src.data + _src.step;
+ const uchar *S0 = _src.ptr(), *S1 = _src.ptr(1);
const short* wtab = cn == 1 ? (const short*)_wtab : &BilinearTab_iC4[0][0][0];
uchar* D = (uchar*)_dst;
__m128i delta = _mm_set1_epi32(INTER_REMAP_COEF_SCALE/2);
Size ssize = _src.size(), dsize = _dst.size();
int cn = _src.channels();
const AT* wtab = (const AT*)_wtab;
- const T* S0 = (const T*)_src.data;
+ const T* S0 = _src.ptr<T>();
size_t sstep = _src.step/sizeof(S0[0]);
Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
saturate_cast<T>(_borderValue[1]),
for( dy = 0; dy < dsize.height; dy++ )
{
- T* D = (T*)(_dst.data + _dst.step*dy);
- const short* XY = (const short*)(_xy.data + _xy.step*dy);
- const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy);
+ T* D = _dst.ptr<T>(dy);
+ const short* XY = _xy.ptr<short>(dy);
+ const ushort* FXY = _fxy.ptr<ushort>(dy);
int X0 = 0;
bool prevInlier = false;
Size ssize = _src.size(), dsize = _dst.size();
int cn = _src.channels();
const AT* wtab = (const AT*)_wtab;
- const T* S0 = (const T*)_src.data;
+ const T* S0 = _src.ptr<T>();
size_t sstep = _src.step/sizeof(S0[0]);
Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
saturate_cast<T>(_borderValue[1]),
for( dy = 0; dy < dsize.height; dy++ )
{
- T* D = (T*)(_dst.data + _dst.step*dy);
- const short* XY = (const short*)(_xy.data + _xy.step*dy);
- const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy);
+ T* D = _dst.ptr<T>(dy);
+ const short* XY = _xy.ptr<short>(dy);
+ const ushort* FXY = _fxy.ptr<ushort>(dy);
for( dx = 0; dx < dsize.width; dx++, D += cn )
{
Size ssize = _src.size(), dsize = _dst.size();
int cn = _src.channels();
const AT* wtab = (const AT*)_wtab;
- const T* S0 = (const T*)_src.data;
+ const T* S0 = _src.ptr<T>();
size_t sstep = _src.step/sizeof(S0[0]);
Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
saturate_cast<T>(_borderValue[1]),
for( dy = 0; dy < dsize.height; dy++ )
{
- T* D = (T*)(_dst.data + _dst.step*dy);
- const short* XY = (const short*)(_xy.data + _xy.step*dy);
- const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy);
+ T* D = _dst.ptr<T>(dy);
+ const short* XY = _xy.ptr<short>(dy);
+ const ushort* FXY = _fxy.ptr<ushort>(dy);
for( dx = 0; dx < dsize.width; dx++, D += cn )
{
if( nnfunc )
{
- if( m1->type() == CV_16SC2 && !m2->data ) // the data is already in the right format
+ if( m1->type() == CV_16SC2 && m2->empty() ) // the data is already in the right format
bufxy = (*m1)(Rect(x, y, bcols, brows));
else if( map_depth != CV_32F )
{
for( y1 = 0; y1 < brows; y1++ )
{
- short* XY = (short*)(bufxy.data + bufxy.step*y1);
- const short* sXY = (const short*)(m1->data + m1->step*(y+y1)) + x*2;
- const ushort* sA = (const ushort*)(m2->data + m2->step*(y+y1)) + x;
+ short* XY = bufxy.ptr<short>(y1);
+ const short* sXY = m1->ptr<short>(y+y1) + x*2;
+ const ushort* sA = m2->ptr<ushort>(y+y1) + x;
for( x1 = 0; x1 < bcols; x1++ )
{
{
for( y1 = 0; y1 < brows; y1++ )
{
- short* XY = (short*)(bufxy.data + bufxy.step*y1);
- const float* sX = (const float*)(m1->data + m1->step*(y+y1)) + x;
- const float* sY = (const float*)(m2->data + m2->step*(y+y1)) + x;
+ short* XY = bufxy.ptr<short>(y1);
+ const float* sX = m1->ptr<float>(y+y1) + x;
+ const float* sY = m2->ptr<float>(y+y1) + x;
x1 = 0;
#if CV_SSE2
Mat bufa(_bufa, Rect(0, 0, bcols, brows));
for( y1 = 0; y1 < brows; y1++ )
{
- short* XY = (short*)(bufxy.data + bufxy.step*y1);
- ushort* A = (ushort*)(bufa.data + bufa.step*y1);
+ short* XY = bufxy.ptr<short>(y1);
+ ushort* A = bufa.ptr<ushort>(y1);
if( m1->type() == CV_16SC2 && (m2->type() == CV_16UC1 || m2->type() == CV_16SC1) )
{
bufxy = (*m1)(Rect(x, y, bcols, brows));
- const ushort* sA = (const ushort*)(m2->data + m2->step*(y+y1)) + x;
+ const ushort* sA = m2->ptr<ushort>(y+y1) + x;
for( x1 = 0; x1 < bcols; x1++ )
A[x1] = (ushort)(sA[x1] & (INTER_TAB_SIZE2-1));
}
else if( planar_input )
{
- const float* sX = (const float*)(m1->data + m1->step*(y+y1)) + x;
- const float* sY = (const float*)(m2->data + m2->step*(y+y1)) + x;
+ const float* sX = m1->ptr<float>(y+y1) + x;
+ const float* sY = m2->ptr<float>(y+y1) + x;
x1 = 0;
#if CV_SSE2
}
else
{
- const float* sXY = (const float*)(m1->data + m1->step*(y+y1)) + x*2;
+ const float* sXY = m1->ptr<float>(y+y1) + x*2;
for( x1 = 0; x1 < bcols; x1++ )
{
Mat scalar(1, 1, sctype, borderValue);
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnly(src), dstarg = ocl::KernelArg::WriteOnly(dst),
map1arg = ocl::KernelArg::ReadOnlyNoSize(map1),
- scalararg = ocl::KernelArg::Constant((void*)scalar.data, scalar.elemSize());
+ scalararg = ocl::KernelArg::Constant((void*)scalar.ptr(), scalar.elemSize());
if (map2.empty())
k.args(srcarg, dstarg, map1arg, scalararg);
int type = dst.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
if (borderType == BORDER_CONSTANT &&
- !IPPSet(borderValue, dstRoi.data, (int)dstRoi.step, dstRoiSize, cn, depth))
+ !IPPSet(borderValue, dstRoi.ptr(), (int)dstRoi.step, dstRoiSize, cn, depth))
{
*ok = false;
return;
}
- if (ippFunc(src.data, ippiSize(src.size()), (int)src.step, srcRoiRect,
- (const Ipp32f *)map1.data, (int)map1.step, (const Ipp32f *)map2.data, (int)map2.step,
- dstRoi.data, (int)dstRoi.step, dstRoiSize, ippInterpolation) < 0)
+ if (ippFunc(src.ptr(), ippiSize(src.size()), (int)src.step, srcRoiRect,
+ map1.ptr<Ipp32f>(), (int)map1.step, map2.ptr<Ipp32f>(), (int)map2.step,
+ dstRoi.ptr(), (int)dstRoi.step, dstRoiSize, ippInterpolation) < 0)
*ok = false;
}
const Mat *m1 = &map1, *m2 = &map2;
- if( (map1.type() == CV_16SC2 && (map2.type() == CV_16UC1 || map2.type() == CV_16SC1 || !map2.data)) ||
- (map2.type() == CV_16SC2 && (map1.type() == CV_16UC1 || map1.type() == CV_16SC1 || !map1.data)) )
+ if( (map1.type() == CV_16SC2 && (map2.type() == CV_16UC1 || map2.type() == CV_16SC1 || map2.empty())) ||
+ (map2.type() == CV_16SC2 && (map1.type() == CV_16UC1 || map1.type() == CV_16SC1 || map1.empty())) )
{
if( map1.type() != CV_16SC2 )
std::swap(m1, m2);
}
else
{
- CV_Assert( ((map1.type() == CV_32FC2 || map1.type() == CV_16SC2) && !map2.data) ||
+ CV_Assert( ((map1.type() == CV_32FC2 || map1.type() == CV_16SC2) && map2.empty()) ||
(map1.type() == CV_32FC1 && map2.type() == CV_32FC1) );
planar_input = map1.channels() == 1;
}
CV_Assert( (m1type == CV_16SC2 && (nninterpolate || m2type == CV_16UC1 || m2type == CV_16SC1)) ||
(m2type == CV_16SC2 && (nninterpolate || m1type == CV_16UC1 || m1type == CV_16SC1)) ||
(m1type == CV_32FC1 && m2type == CV_32FC1) ||
- (m1type == CV_32FC2 && !m2->data) );
+ (m1type == CV_32FC2 && m2->empty()) );
if( m2type == CV_16SC2 )
{
(m1type == CV_32FC2 && dstm1type == CV_16SC2))) )
{
m1->convertTo( dstmap1, dstmap1.type() );
- if( dstmap2.data && dstmap2.type() == m2->type() )
+ if( !dstmap2.empty() && dstmap2.type() == m2->type() )
m2->copyTo( dstmap2 );
return;
}
return;
}
- if( m1->isContinuous() && (!m2->data || m2->isContinuous()) &&
- dstmap1.isContinuous() && (!dstmap2.data || dstmap2.isContinuous()) )
+ if( m1->isContinuous() && (m2->empty() || m2->isContinuous()) &&
+ dstmap1.isContinuous() && (dstmap2.empty() || dstmap2.isContinuous()) )
{
size.width *= size.height;
size.height = 1;
int x, y;
for( y = 0; y < size.height; y++ )
{
- const float* src1f = (const float*)(m1->data + m1->step*y);
- const float* src2f = (const float*)(m2->data + m2->step*y);
+ const float* src1f = m1->ptr<float>(y);
+ const float* src2f = m2->ptr<float>(y);
const short* src1 = (const short*)src1f;
const ushort* src2 = (const ushort*)src2f;
- float* dst1f = (float*)(dstmap1.data + dstmap1.step*y);
- float* dst2f = (float*)(dstmap2.data + dstmap2.step*y);
+ float* dst1f = dstmap1.ptr<float>(y);
+ float* dst2f = dstmap2.ptr<float>(y);
short* dst1 = (short*)dst1f;
ushort* dst2 = (ushort*)dst2f;
if( borderType == BORDER_CONSTANT )
{
IppiSize setSize = { dst.cols, range.end - range.start };
- void *dataPointer = dst.data + dst.step[0] * range.start;
+ void *dataPointer = dst.ptr(range.start);
if( !IPPSet( borderValue, dataPointer, (int)dst.step[0], setSize, cnn, src.depth() ) )
{
*ok = false;
}
// Aug 2013: problem in IPP 7.1, 8.0 : sometimes function return ippStsCoeffErr
- IppStatus status = func( src.data, srcsize, (int)src.step[0], srcroi, dst.data,
+ IppStatus status = func( src.ptr(), srcsize, (int)src.step[0], srcroi, dst.ptr(),
(int)dst.step[0], dstroi, coeffs, mode );
if( status < 0)
*ok = false;
if( borderType == BORDER_CONSTANT )
{
IppiSize setSize = {dst.cols, range.end - range.start};
- void *dataPointer = dst.data + dst.step[0] * range.start;
+ void *dataPointer = dst.ptr(range.start);
if( !IPPSet( borderValue, dataPointer, (int)dst.step[0], setSize, cnn, src.depth() ) )
{
*ok = false;
}
}
- IppStatus status = func(src.data, srcsize, (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, coeffs, mode);
+ IppStatus status = func(src.ptr(), srcsize, (int)src.step[0], srcroi, dst.ptr(), (int)dst.step[0], dstroi, coeffs, mode);
if (status != ippStsNoErr)
*ok = false;
}
double beta = sin(angle)*scale;
Mat M(2, 3, CV_64F);
- double* m = (double*)M.data;
+ double* m = M.ptr<double>();
m[0] = alpha;
m[1] = beta;
*/
cv::Mat cv::getPerspectiveTransform( const Point2f src[], const Point2f dst[] )
{
- Mat M(3, 3, CV_64F), X(8, 1, CV_64F, M.data);
+ Mat M(3, 3, CV_64F), X(8, 1, CV_64F, M.ptr());
double a[8][8], b[8];
Mat A(8, 8, CV_64F, a), B(8, 1, CV_64F, b);
}
solve( A, B, X, DECOMP_SVD );
- ((double*)M.data)[8] = 1.;
+ M.ptr<double>()[8] = 1.;
return M;
}
cv::Mat cv::getAffineTransform( const Point2f src[], const Point2f dst[] )
{
- Mat M(2, 3, CV_64F), X(6, 1, CV_64F, M.data);
+ Mat M(2, 3, CV_64F), X(6, 1, CV_64F, M.ptr());
double a[6*6], b[6];
Mat A(6, 6, CV_64F, a), B(6, 1, CV_64F, b);
if( matM.type() == CV_32F )
{
- const float* M = (const float*)matM.data;
- float* iM = (float*)_iM.data;
+ const float* M = matM.ptr<float>();
+ float* iM = _iM.ptr<float>();
int step = (int)(matM.step/sizeof(M[0])), istep = (int)(_iM.step/sizeof(iM[0]));
double D = M[0]*M[step+1] - M[1]*M[step];
}
else if( matM.type() == CV_64F )
{
- const double* M = (const double*)matM.data;
- double* iM = (double*)_iM.data;
+ const double* M = matM.ptr<double>();
+ double* iM = _iM.ptr<double>();
int step = (int)(matM.step/sizeof(M[0])), istep = (int)(_iM.step/sizeof(iM[0]));
double D = M[0]*M[step+1] - M[1]*M[step];
{
dstmap2 = cv::cvarrToMat(dstarr2);
if( dstmap2.type() == CV_16SC1 )
- dstmap2 = cv::Mat(dstmap2.size(), CV_16UC1, dstmap2.data, dstmap2.step);
+ dstmap2 = cv::Mat(dstmap2.size(), CV_16UC1, dstmap2.ptr(), dstmap2.step);
}
cv::convertMaps( map1, map2, dstmap1, dstmap2, dstmap1.type(), false );
for(size_t i = 0, list_size = list.size(); i < list_size; ++i)
{
unsigned int adx = list[i].p.x + list[i].p.y * img_width;
- if((used.data[adx] == NOTUSED) && (angles_data[adx] != NOTDEF))
+ if((used.ptr()[adx] == NOTUSED) && (angles_data[adx] != NOTDEF))
{
int reg_size;
double reg_angle;
reg[0].x = s.x;
reg[0].y = s.y;
int addr = s.x + s.y * img_width;
- reg[0].used = used.data + addr;
+ reg[0].used = used.ptr() + addr;
reg_angle = angles_data[addr];
reg[0].angle = reg_angle;
reg[0].modgrad = modgrad_data[addr];
int c_addr = xx_min + yy * img_width;
for(int xx = xx_min; xx <= xx_max; ++xx, ++c_addr)
{
- if((used.data[c_addr] != USED) &&
+ if((used.ptr()[c_addr] != USED) &&
(isAligned(c_addr, reg_angle, prec)))
{
// Add point
- used.data[c_addr] = USED;
+ used.ptr()[c_addr] = USED;
RegionPoint& region_point = reg[reg_size];
region_point.x = xx;
region_point.y = yy;
- region_point.used = &(used.data[c_addr]);
+ region_point.used = &(used.ptr()[c_addr]);
region_point.modgrad = modgrad_data[c_addr];
const double& angle = angles_data[c_addr];
region_point.angle = angle;
for (unsigned int i = 0; i < I1.total(); ++i)
{
- uchar i1 = I1.data[i];
- uchar i2 = I2.data[i];
+ uchar i1 = I1.ptr()[i];
+ uchar i2 = I2.ptr()[i];
if (i1 || i2)
{
unsigned int base_idx = i * 3;
- if (i1) img.data[base_idx] = 255;
- else img.data[base_idx] = 0;
- img.data[base_idx + 1] = 0;
- if (i2) img.data[base_idx + 2] = 255;
- else img.data[base_idx + 2] = 0;
+ if (i1) img.ptr()[base_idx] = 255;
+ else img.ptr()[base_idx] = 0;
+ img.ptr()[base_idx + 1] = 0;
+ if (i2) img.ptr()[base_idx + 2] = 255;
+ else img.ptr()[base_idx + 2] = 0;
}
}
}
Moments m;
int lpt = contour.checkVector(2);
int is_float = contour.depth() == CV_32F;
- const Point* ptsi = (const Point*)contour.data;
- const Point2f* ptsf = (const Point2f*)contour.data;
+ const Point* ptsi = contour.ptr<Point>();
+ const Point2f* ptsf = contour.ptr<Point2f>();
CV_Assert( contour.depth() == CV_32S || contour.depth() == CV_32F );
for( y = 0; y < size.height; y++ )
{
- const T* ptr = (const T*)(img.data + y*img.step);
+ const T* ptr = img.ptr<T>(y);
WT x0 = 0, x1 = 0, x2 = 0;
MT x3 = 0;
x = vop(ptr, size.width, x0, x1, x2, x3);
_hu.create(7, 1, CV_64F);
Mat hu = _hu.getMat();
CV_Assert( hu.isContinuous() );
- HuMoments(m, (double*)hu.data);
+ HuMoments(m, hu.ptr<double>());
}
for( i = 0; i < ksize.height; i++ )
{
- uchar* ptr = elem.data + i*elem.step;
+ uchar* ptr = elem.ptr(i);
int j1 = 0, j2 = 0;
if( shape == MORPH_RECT || (shape == MORPH_CROSS && i == anchor.y) )
return false;\
IppiMorphState *pSpec = (IppiMorphState*)ippMalloc(specSize);\
Ipp8u *pBuffer = (Ipp8u*)ippMalloc(bufferSize);\
- if (0 > ippiMorphologyBorderInit_##flavor(roiSize.width, kernel.data, kernelSize, pSpec, pBuffer))\
+ if (0 > ippiMorphologyBorderInit_##flavor(roiSize.width, kernel.ptr(), kernelSize, pSpec, pBuffer))\
{\
ippFree(pBuffer);\
ippFree(pSpec);\
}\
bool ok = false;\
if (op == MORPH_ERODE)\
- ok = (0 <= ippiErodeBorder_##flavor((Ipp##data_type *)_src->data, (int)_src->step[0], (Ipp##data_type *)dst.data, (int)dst.step[0],\
+ ok = (0 <= ippiErodeBorder_##flavor(_src->ptr<Ipp##data_type>(), (int)_src->step[0], dst.ptr<Ipp##data_type>(), (int)dst.step[0],\
roiSize, ippBorderRepl, 0, pSpec, pBuffer));\
else\
- ok = (0 <= ippiDilateBorder_##flavor((Ipp##data_type *)_src->data, (int)_src->step[0], (Ipp##data_type *)dst.data, (int)dst.step[0],\
+ ok = (0 <= ippiDilateBorder_##flavor(_src->ptr<Ipp##data_type>(), (int)_src->step[0], dst.ptr<Ipp##data_type>(), (int)dst.step[0],\
roiSize, ippBorderRepl, 0, pSpec, pBuffer));\
ippFree(pBuffer);\
ippFree(pSpec);\
{\
int specSize = 0;\
int bufferSize = 0;\
- if (0 > ippiMorphologyGetSize_##flavor( roiSize.width, kernel.data kernelSize, &specSize))\
+ if (0 > ippiMorphologyGetSize_##flavor( roiSize.width, kernel.ptr() kernelSize, &specSize))\
return false;\
bool ok = false;\
IppiMorphState* pState = (IppiMorphState*)ippMalloc(specSize);\
- if (ippiMorphologyInit_##flavor(roiSize.width, kernel.data, kernelSize, point, pState) >= 0)\
+ if (ippiMorphologyInit_##flavor(roiSize.width, kernel.ptr(), kernelSize, point, pState) >= 0)\
{\
if (op == MORPH_ERODE)\
- ok = ippiErodeBorderReplicate_##flavor((Ipp##data_type *)_src->data, (int)_src->step[0],\
- (Ipp##data_type *)dst.data, (int)dst.step[0],\
+ ok = ippiErodeBorderReplicate_##flavor(_src->ptr<Ipp##data_type>(), (int)_src->step[0],\
+ dst.ptr<Ipp##data_type>(), (int)dst.step[0],\
roiSize, ippBorderRepl, pState ) >= 0;\
else\
- ok = ippiDilateBorderReplicate_##flavor((Ipp##data_type *)_src->data, (int)_src->step[0],\
- (Ipp##data_type *)dst.data, (int)dst.step[0],\
+ ok = ippiDilateBorderReplicate_##flavor(_src->ptr<Ipp##data_type>(), (int)_src->step[0],\
+ dst.ptr<Ipp##data_type>(), (int)dst.step[0],\
roiSize, ippBorderRepl, pState ) >= 0;\
}\
ippFree(pState);\
AutoBuffer<uchar> buf(bufSize + 64);\
uchar* buffer = alignPtr((uchar*)buf, 32);\
if (op == MORPH_ERODE)\
- return (0 <= ippiFilterMinBorderReplicate_##flavor((Ipp##data_type *)_src->data, (int)_src->step[0], (Ipp##data_type *)dst.data, (int)dst.step[0], roiSize, kernelSize, point, buffer));\
- return (0 <= ippiFilterMaxBorderReplicate_##flavor((Ipp##data_type *)_src->data, (int)_src->step[0], (Ipp##data_type *)dst.data, (int)dst.step[0], roiSize, kernelSize, point, buffer));\
+ return (0 <= ippiFilterMinBorderReplicate_##flavor(_src->ptr<Ipp##data_type>(), (int)_src->step[0], dst.ptr<Ipp##data_type>(), (int)dst.step[0], roiSize, kernelSize, point, buffer));\
+ return (0 <= ippiFilterMaxBorderReplicate_##flavor(_src->ptr<Ipp##data_type>(), (int)_src->step[0], dst.ptr<Ipp##data_type>(), (int)dst.step[0], roiSize, kernelSize, point, buffer));\
}\
break;
}
}
- Size ksize = kernel.data ? kernel.size() : Size(3,3);
+ Size ksize = !kernel.empty() ? kernel.size() : Size(3,3);
_dst.create( src.size(), src.type() );
Mat dst = _dst.getMat();
}
bool rectKernel = false;
- if( !kernel.data )
+ if( kernel.empty() )
{
ksize = Size(1+iterations*2,1+iterations*2);
anchor = Point(iterations, iterations);
int type = _src.type(), depth = CV_MAT_DEPTH(type),
cn = CV_MAT_CN(type), esz = CV_ELEM_SIZE(type);
Mat kernel = _kernel.getMat();
- Size ksize = kernel.data ? kernel.size() : Size(3, 3), ssize = _src.size();
+ Size ksize = !kernel.empty() ? kernel.size() : Size(3, 3), ssize = _src.size();
bool doubleSupport = dev.doubleFPConfig() > 0;
if ((depth == CV_64F && !doubleSupport) || borderType != BORDER_CONSTANT)
bool haveExtraMat = !_extraMat.empty();
CV_Assert(actual_op <= 3 || haveExtraMat);
- if (!kernel.data)
+ if (kernel.empty())
{
kernel = getStructuringElement(MORPH_RECT, Size(1+iterations*2,1+iterations*2));
anchor = Point(iterations, iterations);
int borderType, const Scalar& borderValue )
{
Mat kernel = _kernel.getMat();
- Size ksize = kernel.data ? kernel.size() : Size(3,3);
+ Size ksize = !kernel.empty() ? kernel.size() : Size(3,3);
anchor = normalizeAnchor(anchor, ksize);
CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2 && _src.channels() <= 4 &&
return;
}
- if (!kernel.data)
+ if (kernel.empty())
{
kernel = getStructuringElement(MORPH_RECT, Size(1+iterations*2,1+iterations*2));
anchor = Point(iterations, iterations);
{
cv::Mat elem = cv::getStructuringElement(shape, ksize, anchor);
for( i = 0; i < size; i++ )
- element->values[i] = elem.data[i];
+ element->values[i] = elem.ptr()[i];
}
return element;
int i, size = src->nRows*src->nCols;
for( i = 0; i < size; i++ )
- dst.data[i] = (uchar)(src->values[i] != 0);
+ dst.ptr()[i] = (uchar)(src->values[i] != 0);
}
if( depth == CV_32F )
{
- const float* dataSrc = (const float*)src.data;
- float* dataDst = (float*)dst.data;
+ const float* dataSrc = src.ptr<float>();
+ float* dataDst = dst.ptr<float>();
size_t stepSrc = src.step/sizeof(dataSrc[0]);
size_t stepDst = dst.step/sizeof(dataDst[0]);
}
else
{
- const double* dataSrc = (const double*)src.data;
- double* dataDst = (double*)dst.data;
+ const double* dataSrc = src.ptr<double>();
+ double* dataDst = dst.ptr<double>();
size_t stepSrc = src.step/sizeof(dataSrc[0]);
size_t stepDst = dst.step/sizeof(dataDst[0]);
if( depth == CV_32F )
{
- const float* dataA = (const float*)srcA.data;
- const float* dataB = (const float*)srcB.data;
- float* dataC = (float*)dst.data;
+ const float* dataA = srcA.ptr<float>();
+ const float* dataB = srcB.ptr<float>();
+ float* dataC = dst.ptr<float>();
float eps = FLT_EPSILON; // prevent div0 problems
size_t stepA = srcA.step/sizeof(dataA[0]);
}
else
{
- const double* dataA = (const double*)srcA.data;
- const double* dataB = (const double*)srcB.data;
- double* dataC = (double*)dst.data;
+ const double* dataA = srcA.ptr<double>();
+ const double* dataB = srcB.ptr<double>();
+ double* dataC = dst.ptr<double>();
double eps = DBL_EPSILON; // prevent div0 problems
size_t stepA = srcA.step/sizeof(dataA[0]);
if(type == CV_32FC1)
{
- const float* dataIn = (const float*)src.data;
+ const float* dataIn = src.ptr<float>();
dataIn += minr*src.cols;
for(int y = minr; y <= maxr; y++)
{
}
else
{
- const double* dataIn = (const double*)src.data;
+ const double* dataIn = src.ptr<double>();
dataIn += minr*src.cols;
for(int y = minr; y <= maxr; y++)
{
for( int y = 0; y < dsize.height; y++ )
{
- T* dst = (T*)(_dst.data + _dst.step*y);
+ T* dst = _dst.ptr<T>(y);
WT *row0, *row1, *row2, *row3, *row4;
// fill the ring buffer (horizontal convolution and decimation)
{
WT* row = buf + ((sy - sy0) % PD_SZ)*bufstep;
int _sy = borderInterpolate(sy, ssize.height, borderType);
- const T* src = (const T*)(_src.data + _src.step*_sy);
+ const T* src = _src.ptr<T>(_sy);
int limit = cn;
const int* tab = tabL;
for( int y = 0; y < ssize.height; y++ )
{
- T* dst0 = (T*)(_dst.data + _dst.step*y*2);
- T* dst1 = (T*)(_dst.data + _dst.step*(y*2+1));
+ T* dst0 = _dst.ptr<T>(y*2);
+ T* dst1 = _dst.ptr<T>(y*2+1);
WT *row0, *row1, *row2;
if( y*2+1 >= dsize.height )
{
WT* row = buf + ((sy - sy0) % PU_SZ)*bufstep;
int _sy = borderInterpolate(sy*2, dsize.height, BORDER_REFLECT_101)/2;
- const T* src = (const T*)(_src.data + _src.step*_sy);
+ const T* src = _src.ptr<T>(_sy);
if( ssize.width == cn )
{
}
int n = hull.checkVector(2);
- const Point2f* hpoints = (const Point2f*)hull.data;
+ const Point2f* hpoints = hull.ptr<Point2f>();
if( n > 2 )
{
{
_pts.create(4, 2, CV_32F);
Mat pts = _pts.getMat();
- box.points((Point2f*)pts.data);
+ box.points(pts.ptr<Point2f>());
}
if( ippfunc)
{
- if (ippfunc(image.data, (int)image.step, src_size, patch.data,
+ if (ippfunc(image.ptr(), (int)image.step, src_size, patch.ptr(),
(int)patch.step, win_size, icenter, &minpt, &maxpt) >= 0 )
return;
setIppErrorStatus();
if( depth == CV_8U && ddepth == CV_8U )
getRectSubPix_Cn_<uchar, uchar, int, scale_fixpt, cast_8u>
- (image.data, image.step, image.size(), patch.data, patch.step, patch.size(), center, cn);
+ (image.ptr(), image.step, image.size(), patch.ptr(), patch.step, patch.size(), center, cn);
else if( depth == CV_8U && ddepth == CV_32F )
getRectSubPix_8u32f
- (image.data, image.step, image.size(), (float*)patch.data, patch.step, patch.size(), center, cn);
+ (image.ptr(), image.step, image.size(), patch.ptr<float>(), patch.step, patch.size(), center, cn);
else if( depth == CV_32F && ddepth == CV_32F )
getRectSubPix_Cn_<float, float, float, nop<float>, nop<float> >
- ((const float*)image.data, image.step, image.size(), (float*)patch.data, patch.step, patch.size(), center, cn);
+ (image.ptr<float>(), image.step, image.size(), patch.ptr<float>(), patch.step, patch.size(), center, cn);
else
CV_Error( CV_StsUnsupportedFormat, "Unsupported combination of input and output formats");
}
CV_IMPL void
cvGetQuadrangleSubPix( const void* srcarr, void* dstarr, const CvMat* mat )
{
- cv::Mat src = cv::cvarrToMat(srcarr), m = cv::cvarrToMat(mat);
- const cv::Mat dst = cv::cvarrToMat(dstarr);
+ const cv::Mat src = cv::cvarrToMat(srcarr), m = cv::cvarrToMat(mat);
+ cv::Mat dst = cv::cvarrToMat(dstarr);
CV_Assert( src.channels() == dst.channels() );
matrix[5] -= matrix[3]*dx + matrix[4]*dy;
if( src.depth() == CV_8U && dst.depth() == CV_32F )
- cv::getQuadrangleSubPix_8u32f_CnR( src.data, src.step, src.size(),
- (float*)dst.data, dst.step, dst.size(),
+ cv::getQuadrangleSubPix_8u32f_CnR( src.ptr(), src.step, src.size(),
+ dst.ptr<float>(), dst.step, dst.size(),
matrix, src.channels());
else
{
CV_Assert( src.type() == CV_8UC3 && dst.type() == CV_32SC1 );
CV_Assert( src.size() == dst.size() );
- const uchar* img = src.data;
+ const uchar* img = src.ptr();
int istep = int(src.step/sizeof(img[0]));
int* mask = dst.ptr<int>();
int mstep = int(dst.step / sizeof(mask[0]));
return;
active_queue = i;
- img = src.data;
+ img = src.ptr();
mask = dst.ptr<int>();
// recursively fill the basins
{
cv::Mat src = src_pyramid[level];
cv::Size size = src.size();
- uchar* sptr = src.data;
+ const uchar* sptr = src.ptr();
int sstep = (int)src.step;
uchar* mask = 0;
int mstep = 0;
if( level < max_level )
{
cv::Size size1 = dst_pyramid[level+1].size();
- cv::Mat m( size.height, size.width, CV_8UC1, mask0.data );
+ cv::Mat m( size.height, size.width, CV_8UC1, mask0.ptr() );
dstep = (int)dst_pyramid[level+1].step;
- dptr = dst_pyramid[level+1].data + dstep + cn;
+ dptr = dst_pyramid[level+1].ptr() + dstep + cn;
mstep = (int)m.step;
- mask = m.data + mstep;
+ mask = m.ptr() + mstep;
//cvResize( dst_pyramid[level+1], dst_pyramid[level], CV_INTER_CUBIC );
cv::pyrUp( dst_pyramid[level+1], dst_pyramid[level], dst_pyramid[level].size() );
m.setTo(cv::Scalar::all(0));
}
cv::dilate( m, m, cv::Mat() );
- mask = m.data;
+ mask = m.ptr();
}
- dptr = dst_pyramid[level].data;
+ dptr = dst_pyramid[level].ptr();
dstep = (int)dst_pyramid[level].step;
for( i = 0; i < size.height; i++, sptr += sstep - size.width*3,
// iterate meanshift procedure
for( iter = 0; iter < termcrit.maxCount; iter++ )
{
- uchar* ptr;
+ const uchar* ptr;
int x, y, count = 0;
int minx, miny, maxx, maxy;
int s0 = 0, s1 = 0, s2 = 0, sx = 0, sy = 0;
return;
bool is_float = depth == CV_32F;
- const Point* ptsi = (const Point*)points.data;
- const Point2f* ptsf = (const Point2f*)points.data;
+ const Point* ptsi = points.ptr<Point>();
+ const Point2f* ptsf = points.ptr<Point2f>();
Point2f pt = is_float ? ptsf[0] : Point2f((float)ptsi[0].x,(float)ptsi[0].y);
Point2f pts[4] = {pt, pt, pt, pt};
bool is_float = depth == CV_32F;
int last = is_closed ? count-1 : 0;
- const Point* pti = (const Point*)curve.data;
- const Point2f* ptf = (const Point2f*)curve.data;
+ const Point* pti = curve.ptr<Point>();
+ const Point2f* ptf = curve.ptr<Point2f>();
Point2f prev = is_float ? ptf[last] : Point2f((float)pti[last].x,(float)pti[last].y);
double a00 = 0;
bool is_float = depth == CV_32F;
- const Point* ptsi = (const Point*)contour.data;
- const Point2f* ptsf = (const Point2f*)contour.data;
+ const Point* ptsi = contour.ptr<Point>();
+ const Point2f* ptsf = contour.ptr<Point2f>();
Point2f prev = is_float ? ptsf[npoints-1] : Point2f((float)ptsi[npoints-1].x, (float)ptsi[npoints-1].y);
for( int i = 0; i < npoints; i++ )
double gfp[5], rp[5], t;
const double min_eps = 1e-8;
bool is_float = depth == CV_32F;
- const Point* ptsi = (const Point*)points.data;
- const Point2f* ptsf = (const Point2f*)points.data;
+ const Point* ptsi = points.ptr<Point>();
+ const Point2f* ptsf = points.ptr<Point2f>();
AutoBuffer<double> _Ad(n*5), _bd(n);
double *Ad = _Ad, *bd = _bd;
if( npoints == 0 )
return Rect();
- const Point* pts = (const Point*)points.data;
+ const Point* pts = points.ptr<Point>();
Point pt = pts[0];
#if CV_SSE4_2
Ipp8u * buffer = ippsMalloc_8u(bufSize); \
ippType borderValue[4] = { 0, 0, 0, 0 }; \
ippBorderType = ippBorderType == BORDER_CONSTANT ? ippBorderConst : ippBorderRepl; \
- IppStatus status = ippiFilterBoxBorder_##flavor((const ippType *)src.data, (int)src.step, (ippType *)dst.data, \
+ IppStatus status = ippiFilterBoxBorder_##flavor(src.ptr<ippType>(), (int)src.step, dst.ptr<ippType>(), \
(int)dst.step, roiSize, maskSize, \
(IppiBorderType)ippBorderType, borderValue, buffer); \
ippsFree(buffer); \
CV_Assert( ktype == CV_32F || ktype == CV_64F );
Mat kernel(n, 1, ktype);
- float* cf = (float*)kernel.data;
- double* cd = (double*)kernel.data;
+ float* cf = kernel.ptr<float>();
+ double* cd = kernel.ptr<double>();
double sigmaX = sigma > 0 ? sigma : ((n-1)*0.5 - 1)*0.3 + 0.8;
double scale2X = -0.5/(sigmaX*sigmaX);
typedef Ipp##ippfavor ippType; \
ippType borderValues[] = { 0, 0, 0 }; \
IppStatus status = ippcn == 1 ? \
- ippiFilterGaussianBorder_##ippfavor##_C1R((const ippType *)src.data, (int)src.step, \
- (ippType *)dst.data, (int)dst.step, roiSize, borderValues[0], pSpec, pBuffer) : \
- ippiFilterGaussianBorder_##ippfavor##_C3R((const ippType *)src.data, (int)src.step, \
- (ippType *)dst.data, (int)dst.step, roiSize, borderValues, pSpec, pBuffer); \
+ ippiFilterGaussianBorder_##ippfavor##_C1R(src.ptr<ippType>(), (int)src.step, \
+ dst.ptr<ippType>(), (int)dst.step, roiSize, borderValues[0], pSpec, pBuffer) : \
+ ippiFilterGaussianBorder_##ippfavor##_C3R(src.ptr<ippType>(), (int)src.step, \
+ dst.ptr<ippType>(), (int)dst.step, roiSize, borderValues, pSpec, pBuffer); \
ippFree(pBuffer); \
ippFree(pSpec); \
if (status >= 0) \
for( int x = 0; x < _dst.cols; x += STRIPE_SIZE )
{
int i, j, k, c, n = std::min(_dst.cols - x, STRIPE_SIZE) + r*2;
- const uchar* src = _src.data + x*cn;
- uchar* dst = _dst.data + (x - r)*cn;
+ const uchar* src = _src.ptr() + x*cn;
+ uchar* dst = _dst.ptr() + (x - r)*cn;
memset( h_coarse, 0, 16*n*cn*sizeof(h_coarse[0]) );
memset( h_fine, 0, 16*16*n*cn*sizeof(h_fine[0]) );
int x, y;
int n2 = m*m/2;
Size size = _dst.size();
- const uchar* src = _src.data;
- uchar* dst = _dst.data;
+ const uchar* src = _src.ptr();
+ uchar* dst = _dst.ptr();
int src_step = (int)_src.step, dst_step = (int)_dst.step;
int cn = _src.channels();
const uchar* src_max = src + size.height*src_step;
typedef typename Op::arg_type WT;
typedef typename VecOp::arg_type VT;
- const T* src = (const T*)_src.data;
- T* dst = (T*)_dst.data;
+ const T* src = _src.ptr<T>();
+ T* dst = _dst.ptr<T>();
int sstep = (int)(_src.step/sizeof(T));
int dstep = (int)(_dst.step/sizeof(T));
Size size = _dst.size();
ippDataType, CV_MAT_CN(type), &bufSize) >= 0) \
{ \
Ipp8u * buffer = ippsMalloc_8u(bufSize); \
- IppStatus status = ippiFilterMedianBorder_##flavor((const ippType *)src0.data, (int)src0.step, \
- (ippType *)dst.data, (int)dst.step, dstRoiSize, maskSize, \
+ IppStatus status = ippiFilterMedianBorder_##flavor(src0.ptr<ippType>(), (int)src0.step, \
+ dst.ptr<ippType>(), (int)dst.step, dstRoiSize, maskSize, \
ippBorderRepl, (ippType)0, buffer); \
ippsFree(buffer); \
if (status >= 0) \
else
CV_Error( CV_StsUnsupportedFormat, "" );
- func( src.data, src.step, sum.data, sum.step, sqsum.data, sqsum.step,
- tilted.data, tilted.step, src.size(), cn );
+ func( src.ptr(), src.step, sum.ptr(), sum.step, sqsum.ptr(), sqsum.step,
+ tilted.ptr(), tilted.step, src.size(), cn );
}
void cv::integral( InputArray src, OutputArray sum, int sdepth )
pBuffer = ippsMalloc_8u( bufSize );
- status = ippFunc(src.data, (int)src.step, srcRoiSize, tpl.data, (int)tpl.step, tplRoiSize, (Ipp32f*)dst.data, (int)dst.step, funCfg, pBuffer);
+ status = ippFunc(src.ptr(), (int)src.step, srcRoiSize, tpl.ptr(), (int)tpl.step, tplRoiSize, dst.ptr<Ipp32f>(), (int)dst.step, funCfg, pBuffer);
ippsFree( pBuffer );
return status >= 0;
pBuffer = ippsMalloc_8u( bufSize );
- status = ippFunc(src.data, (int)src.step, srcRoiSize, tpl.data, (int)tpl.step, tplRoiSize, (Ipp32f*)dst.data, (int)dst.step, funCfg, pBuffer);
+ status = ippFunc(src.ptr(), (int)src.step, srcRoiSize, tpl.ptr(), (int)tpl.step, tplRoiSize, dst.ptr<Ipp32f>(), (int)dst.step, funCfg, pBuffer);
ippsFree( pBuffer );
return status >= 0;
for( i = 0; i < result.rows; i++ )
{
- float* rrow = (float*)(result.data + i*result.step);
+ float* rrow = result.ptr<float>(i);
int idx = i * sumstep;
int idx2 = i * sqstep;
{
case THRESH_TRUNC:
#ifndef HAVE_IPP_ICV_ONLY
- if (_src.data == _dst.data && ippiThreshold_GT_8u_C1IR(_src.data, (int)src_step, sz, thresh) >= 0)
+ if (_src.data == _dst.data && ippiThreshold_GT_8u_C1IR(_src.ptr(), (int)src_step, sz, thresh) >= 0)
return;
#endif
- if (ippiThreshold_GT_8u_C1R(_src.data, (int)src_step, _dst.data, (int)dst_step, sz, thresh) >= 0)
+ if (ippiThreshold_GT_8u_C1R(_src.ptr(), (int)src_step, _dst.ptr(), (int)dst_step, sz, thresh) >= 0)
return;
setIppErrorStatus();
break;
case THRESH_TOZERO:
#ifndef HAVE_IPP_ICV_ONLY
- if (_src.data == _dst.data && ippiThreshold_LTVal_8u_C1IR(_src.data, (int)src_step, sz, thresh+1, 0) >= 0)
+ if (_src.data == _dst.data && ippiThreshold_LTVal_8u_C1IR(_src.ptr(), (int)src_step, sz, thresh+1, 0) >= 0)
return;
#endif
- if (ippiThreshold_LTVal_8u_C1R(_src.data, (int)src_step, _dst.data, (int)dst_step, sz, thresh+1, 0) >= 0)
+ if (ippiThreshold_LTVal_8u_C1R(_src.ptr(), (int)src_step, _dst.ptr(), (int)dst_step, sz, thresh+1, 0) >= 0)
return;
setIppErrorStatus();
break;
case THRESH_TOZERO_INV:
#ifndef HAVE_IPP_ICV_ONLY
- if (_src.data == _dst.data && ippiThreshold_GTVal_8u_C1IR(_src.data, (int)src_step, sz, thresh, 0) >= 0)
+ if (_src.data == _dst.data && ippiThreshold_GTVal_8u_C1IR(_src.ptr(), (int)src_step, sz, thresh, 0) >= 0)
return;
#endif
- if (ippiThreshold_GTVal_8u_C1R(_src.data, (int)src_step, _dst.data, (int)dst_step, sz, thresh, 0) >= 0)
+ if (ippiThreshold_GTVal_8u_C1R(_src.ptr(), (int)src_step, _dst.ptr(), (int)dst_step, sz, thresh, 0) >= 0)
return;
setIppErrorStatus();
break;
for( i = 0; i < roi.height; i++ )
{
- const uchar* src = (const uchar*)(_src.data + src_step*i);
- uchar* dst = (uchar*)(_dst.data + dst_step*i);
+ const uchar* src = _src.ptr() + src_step*i;
+ uchar* dst = _dst.ptr() + dst_step*i;
switch( type )
{
{
for( i = 0; i < roi.height; i++ )
{
- const uchar* src = (const uchar*)(_src.data + src_step*i);
- uchar* dst = (uchar*)(_dst.data + dst_step*i);
+ const uchar* src = _src.ptr() + src_step*i;
+ uchar* dst = _dst.ptr() + dst_step*i;
j = j_scalar;
#if CV_ENABLE_UNROLLED
for( ; j <= roi.width - 4; j += 4 )
int i, j;
Size roi = _src.size();
roi.width *= _src.channels();
- const short* src = (const short*)_src.data;
- short* dst = (short*)_dst.data;
+ const short* src = _src.ptr<short>();
+ short* dst = _dst.ptr<short>();
size_t src_step = _src.step/sizeof(src[0]);
size_t dst_step = _dst.step/sizeof(dst[0]);
int i, j;
Size roi = _src.size();
roi.width *= _src.channels();
- const float* src = (const float*)_src.data;
- float* dst = (float*)_dst.data;
+ const float* src = _src.ptr<float>();
+ float* dst = _dst.ptr<float>();
size_t src_step = _src.step/sizeof(src[0]);
size_t dst_step = _dst.step/sizeof(dst[0]);
IppiSize srcSize = { size.width, size.height };
Ipp8u thresh;
CV_SUPPRESS_DEPRECATED_START
- IppStatus ok = ippiComputeThreshold_Otsu_8u_C1R(_src.data, step, srcSize, &thresh);
+ IppStatus ok = ippiComputeThreshold_Otsu_8u_C1R(_src.ptr(), step, srcSize, &thresh);
CV_SUPPRESS_DEPRECATED_END
if (ok >= 0)
return thresh;
int i, j, h[N] = {0};
for( i = 0; i < size.height; i++ )
{
- const uchar* src = _src.data + step*i;
+ const uchar* src = _src.ptr() + step*i;
j = 0;
#if CV_ENABLE_UNROLLED
for( ; j <= size.width - 4; j += 4 )
for( i = 0; i < size.height; i++ )
{
- const uchar* sdata = src.data + src.step*i;
- const uchar* mdata = mean.data + mean.step*i;
- uchar* ddata = dst.data + dst.step*i;
+ const uchar* sdata = src.ptr(i);
+ const uchar* mdata = mean.ptr(i);
+ uchar* ddata = dst.ptr(i);
for( j = 0; j < size.width; j++ )
ddata[j] = tab[sdata[j] - mdata[j] + 255];
cameraMatrix.convertTo(newCameraMatrix, CV_64F);
if( centerPrincipalPoint )
{
- ((double*)newCameraMatrix.data)[2] = (imgsize.width-1)*0.5;
- ((double*)newCameraMatrix.data)[5] = (imgsize.height-1)*0.5;
+ newCameraMatrix.ptr<double>()[2] = (imgsize.width-1)*0.5;
+ newCameraMatrix.ptr<double>()[5] = (imgsize.height-1)*0.5;
}
return newCameraMatrix;
}
Mat_<double> R = Mat_<double>::eye(3, 3);
Mat_<double> A = Mat_<double>(cameraMatrix), Ar;
- if( newCameraMatrix.data )
+ if( !newCameraMatrix.empty() )
Ar = Mat_<double>(newCameraMatrix);
else
Ar = getDefaultNewCameraMatrix( A, size, true );
- if( matR.data )
+ if( !matR.empty() )
R = Mat_<double>(matR);
- if( distCoeffs.data )
+ if( !distCoeffs.empty() )
distCoeffs = Mat_<double>(distCoeffs);
else
{
if( distCoeffs.rows != 1 && !distCoeffs.isContinuous() )
distCoeffs = distCoeffs.t();
- double k1 = ((double*)distCoeffs.data)[0];
- double k2 = ((double*)distCoeffs.data)[1];
- double p1 = ((double*)distCoeffs.data)[2];
- double p2 = ((double*)distCoeffs.data)[3];
- double k3 = distCoeffs.cols + distCoeffs.rows - 1 >= 5 ? ((double*)distCoeffs.data)[4] : 0.;
- double k4 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? ((double*)distCoeffs.data)[5] : 0.;
- double k5 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? ((double*)distCoeffs.data)[6] : 0.;
- double k6 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? ((double*)distCoeffs.data)[7] : 0.;
- double s1 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? ((double*)distCoeffs.data)[8] : 0.;
- double s2 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? ((double*)distCoeffs.data)[9] : 0.;
- double s3 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? ((double*)distCoeffs.data)[10] : 0.;
- double s4 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? ((double*)distCoeffs.data)[11] : 0.;
+ const double* const distPtr = distCoeffs.ptr<double>();
+ double k1 = distPtr[0];
+ double k2 = distPtr[1];
+ double p1 = distPtr[2];
+ double p2 = distPtr[3];
+ double k3 = distCoeffs.cols + distCoeffs.rows - 1 >= 5 ? distPtr[4] : 0.;
+ double k4 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[5] : 0.;
+ double k5 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[6] : 0.;
+ double k6 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[7] : 0.;
+ double s1 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[8] : 0.;
+ double s2 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[9] : 0.;
+ double s3 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[10] : 0.;
+ double s4 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[11] : 0.;
for( int i = 0; i < size.height; i++ )
{
- float* m1f = (float*)(map1.data + map1.step*i);
- float* m2f = (float*)(map2.data + map2.step*i);
+ float* m1f = map1.ptr<float>(i);
+ float* m2f = map2.ptr<float>(i);
short* m1 = (short*)m1f;
ushort* m2 = (ushort*)m2f;
double _x = i*ir[1] + ir[2], _y = i*ir[4] + ir[5], _w = i*ir[7] + ir[8];
Mat_<double> A, Ar, I = Mat_<double>::eye(3,3);
cameraMatrix.convertTo(A, CV_64F);
- if( distCoeffs.data )
+ if( !distCoeffs.empty() )
distCoeffs = Mat_<double>(distCoeffs);
else
{
distCoeffs = 0.;
}
- if( newCameraMatrix.data )
+ if( !newCameraMatrix.empty() )
newCameraMatrix.convertTo(Ar, CV_64F);
else
A.copyTo(Ar);
CvMat _csrc = src, _cdst = dst, _ccameraMatrix = cameraMatrix;
CvMat matR, matP, _cdistCoeffs, *pR=0, *pP=0, *pD=0;
- if( R.data )
+ if( !R.empty() )
pR = &(matR = R);
- if( P.data )
+ if( !P.empty() )
pP = &(matP = P);
- if( distCoeffs.data )
+ if( !distCoeffs.empty() )
pD = &(_cdistCoeffs = distCoeffs);
cvUndistortPoints(&_csrc, &_cdst, &_ccameraMatrix, pD, pR, pP);
}
for( i = 0; i < size.height; i++ )
{
- const float* sptr = (const float*)(temp.data + (i+radius)*temp.step) + radius*cn;
- float* dptr = (float*)(dst.data + i*dst.step);
+ const float* sptr = temp.ptr<float>(i+radius) + radius*cn;
+ float* dptr = dst.ptr<float>(i);
if( cn == 1 )
{
Mat gold = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_gold.png", IMREAD_UNCHANGED);
Mat result;
- CV_Assert(given.data != NULL && gold.data != NULL);
+ CV_Assert( !given.empty() && !gold.empty() );
cvtColor(given, result, CV_BayerBG2GRAY);
Mat gold = imread(goldfname, IMREAD_UNCHANGED);
Mat result;
- CV_Assert(given.data != NULL);
+ CV_Assert( !given.empty() );
cvtColor(given, result, CV_BayerBG2BGR_VNG, 3);
std::string full_path = parent_path + image_name;
src = imread(full_path, IMREAD_UNCHANGED);
- if (src.data == NULL)
+ if ( src.empty() )
{
ts->set_failed_test_info(cvtest::TS::FAIL_MISSING_TEST_DATA);
ts->printf(cvtest::TS::SUMMARY, "No input image\n");
// reading a reference image
full_path = parent_path + pattern[i] + image_name;
reference = imread(full_path, IMREAD_UNCHANGED);
- if (reference.data == NULL)
+ if ( reference.empty() )
{
imwrite(full_path, dst);
continue;
}
++size.width;
- uchar* firstRow = dst.data, *lastRow = dst.data + size.height * dst.step;
+ uchar* firstRow = dst.ptr(), *lastRow = dst.ptr(size.height);
size.width *= dcn;
for (int x = 0; x < size.width; ++x)
{
for (int y = 0; y < size.height && next; ++y)
{
- const T* A = reinterpret_cast<const T*>(actual.data + actual.step * y);
- const T* R = reinterpret_cast<const T*>(reference.data + reference.step * y);
+ const T* A = actual.ptr<T>(y);
+ const T* R = reference.ptr<T>(y);
for (int x = 0; x < size.width && next; ++x)
if (std::abs(A[x] - R[x]) > 1)
std::string full_path = parent_path + image_name;
src = imread(full_path, IMREAD_UNCHANGED);
- if (src.data == NULL)
+ if (src.empty())
{
ts->set_failed_test_info(cvtest::TS::FAIL_MISSING_TEST_DATA);
ts->printf(cvtest::TS::SUMMARY, "No input image\n");
if( shape == CV_SHAPE_CUSTOM )
{
eldata.resize(aperture_size.width*aperture_size.height);
- const uchar* src = test_mat[INPUT][1].data;
+ const uchar* src = test_mat[INPUT][1].ptr();
int srcstep = (int)test_mat[INPUT][1].step;
int i, j, nonzero = 0;
mask = test_array[INPUT_OUTPUT][1] ? cv::cvarrToMat(test_array[INPUT_OUTPUT][1]) : cv::Mat();
cv::Rect rect;
int area;
- if( !mask.data )
+ if( mask.empty() )
area = cv::floodFill( img, seed_pt, new_val, &rect, l_diff, u_diff, flags );
else
area = cv::floodFill( img, mask, seed_pt, new_val, &rect, l_diff, u_diff, flags );
int x, y, k;
int drows = dst.rows, dcols = dst.cols;
int srows = src.rows, scols = src.cols;
- const uchar* sptr0 = src.data;
+ const uchar* sptr0 = src.ptr();
int depth = src.depth(), cn = src.channels();
int elem_size = (int)src.elemSize();
int step = (int)(src.step / CV_ELEM_SIZE(depth));
Mat _extended_src_row(1, _src.cols + ksize * 2, _src.type());
const uchar* srow = _src.ptr(dy);
- memcpy(_extended_src_row.data + elemsize * ksize, srow, _src.step);
+ memcpy(_extended_src_row.ptr() + elemsize * ksize, srow, _src.step);
for (int k = 0; k < ksize; ++k)
{
- memcpy(_extended_src_row.data + k * elemsize, srow, elemsize);
- memcpy(_extended_src_row.data + (ksize + k) * elemsize + _src.step, srow + _src.step - elemsize, elemsize);
+ memcpy(_extended_src_row.ptr() + k * elemsize, srow, elemsize);
+ memcpy(_extended_src_row.ptr() + (ksize + k) * elemsize + _src.step, srow + _src.step - elemsize, elemsize);
}
for (int dx = 0; dx < dsize.width; ++dx)
if (interpolation == INTER_NEAREST)
mapy = Mat();
- CV_Assert(((interpolation == INTER_NEAREST && !mapy.data) || mapy.type() == CV_16UC1 ||
+ CV_Assert(((interpolation == INTER_NEAREST && mapy.empty()) || mapy.type() == CV_16UC1 ||
mapy.type() == CV_16SC1) && mapx.type() == CV_16SC2);
}
void CV_Remap_Test::remap_nearest(const Mat& _src, Mat& _dst)
{
CV_Assert(_src.depth() == CV_32F && _dst.type() == _src.type());
- CV_Assert(mapx.type() == CV_16SC2 && !mapy.data);
+ CV_Assert(mapx.type() == CV_16SC2 && mapy.empty());
Size ssize = _src.size(), dsize = _dst.size();
CV_Assert(ssize.area() > 0 && dsize.area() > 0);
}
}
- CV_Assert(mapx.type() == CV_16SC2 && ((inter == INTER_NEAREST && !mapy.data) || mapy.type() == CV_16SC1));
+ CV_Assert(mapx.type() == CV_16SC2 && ((inter == INTER_NEAREST && mapy.empty()) || mapy.type() == CV_16SC1));
cv::remap(_src, _dst, mapx, mapy, inter, borderType, borderValue);
}
}
}
- CV_Assert(mapx.type() == CV_16SC2 && ((inter == INTER_NEAREST && !mapy.data) || mapy.type() == CV_16SC1));
+ CV_Assert(mapx.type() == CV_16SC2 && ((inter == INTER_NEAREST && mapy.empty()) || mapy.type() == CV_16SC1));
cv::remap(_src, _dst, mapx, mapy, inter, borderType, borderValue);
}
void CV_HuMomentsTest::run_func()
{
- cvGetHuMoments( (CvMoments*)test_mat[INPUT][0].data,
- (CvHuMoments*)test_mat[OUTPUT][0].data );
+ cvGetHuMoments( test_mat[INPUT][0].ptr<CvMoments>(),
+ test_mat[OUTPUT][0].ptr<CvHuMoments>() );
}
void CV_HuMomentsTest::prepare_to_validation( int /*test_case_idx*/ )
{
- CvMoments* m = (CvMoments*)test_mat[INPUT][0].data;
- CvHuMoments* hu = (CvHuMoments*)test_mat[REF_OUTPUT][0].data;
+ CvMoments* m = test_mat[INPUT][0].ptr<CvMoments>();
+ CvHuMoments* hu = test_mat[REF_OUTPUT][0].ptr<CvHuMoments>();
double inv_m00 = m->inv_sqrt_m00*m->inv_sqrt_m00;
double s2 = inv_m00*inv_m00; /* 1./(m00 ^ (2/2 + 1)) */
//expected image was added with 1 in order to save to png
//so now we substract 1 to get real color
- if(exp.data)
+ if(!exp.empty())
colors.push_back(exp.ptr(p->y)[p->x] - 1);
}
fs.release();
continue; // bad result, doing nothing and going to get error latter;
// repaint in saved color to compare with expected;
- if(exp.data)
+ if(!exp.empty())
pixel = colors[pixel];
}
}
size_t esz = weights[0].elemSize();
fs << "input_scale" << "[";
- fs.writeRaw("d", weights[0].data, weights[0].total()*esz);
+ fs.writeRaw("d", weights[0].ptr(), weights[0].total()*esz);
fs << "]" << "output_scale" << "[";
- fs.writeRaw("d", weights[l_count].data, weights[l_count].total()*esz);
+ fs.writeRaw("d", weights[l_count].ptr(), weights[l_count].total()*esz);
fs << "]" << "inv_output_scale" << "[";
- fs.writeRaw("d", weights[l_count+1].data, weights[l_count+1].total()*esz);
+ fs.writeRaw("d", weights[l_count+1].ptr(), weights[l_count+1].total()*esz);
fs << "]" << "weights" << "[";
for( i = 1; i < l_count; i++ )
{
fs << "[";
- fs.writeRaw("d", weights[i].data, weights[i].total()*esz);
+ fs.writeRaw("d", weights[i].ptr(), weights[i].total()*esz);
fs << "]";
}
fs << "]";
size_t esz = weights[0].elemSize();
FileNode w = fn["input_scale"];
- w.readRaw("d", weights[0].data, weights[0].total()*esz);
+ w.readRaw("d", weights[0].ptr(), weights[0].total()*esz);
w = fn["output_scale"];
- w.readRaw("d", weights[l_count].data, weights[l_count].total()*esz);
+ w.readRaw("d", weights[l_count].ptr(), weights[l_count].total()*esz);
w = fn["inv_output_scale"];
- w.readRaw("d", weights[l_count+1].data, weights[l_count+1].total()*esz);
+ w.readRaw("d", weights[l_count+1].ptr(), weights[l_count+1].total()*esz);
FileNodeIterator w_it = fn["weights"].begin();
for( i = 1; i < l_count; i++, ++w_it )
- (*w_it).readRaw("d", weights[i].data, weights[i].total()*esz);
+ (*w_it).readRaw("d", weights[i].ptr(), weights[i].total()*esz);
trained = true;
}
else
{
Mat mask(1, nsamples, CV_8U);
- uchar* mptr = mask.data;
+ uchar* mptr = mask.ptr();
for( i = 0; i < nsamples; i++ )
mptr[i] = (uchar)(i < count);
trainSampleIdx.create(1, count, CV_32S);
err += fabs(val - val0) > FLT_EPSILON;
else
err += (val - val0)*(val - val0);
- if( resp.data )
+ if( !resp.empty() )
resp.at<float>(i) = val;
/*if( i < 100 )
{
vidx = &_vidx;
cls_labels = &_cls_labels;
results = &_results;
- results_prob = _results_prob.data ? &_results_prob : 0;
+ results_prob = !_results_prob.empty() ? &_results_prob : 0;
rawOutput = _rawOutput;
}
for (i = 0; i < nscales; i++)
{
const ScaleData& s = scaleData->at(i);
- Mat dst(s.szi.height - 1, s.szi.width - 1, CV_8U, rbuf.data);
+ Mat dst(s.szi.height - 1, s.szi.width - 1, CV_8U, rbuf.ptr());
resize(image, dst, dst.size(), 1. / s.scale, 1. / s.scale, INTER_LINEAR);
computeChannels((int)i, dst);
}
for (int j = 0; j < blocks_per_img.width; ++j)
{
const float *src = &svmDetector[0] + (j * blocks_per_img.height + i) * block_hist_size;
- float *dst = (float*)detector_reordered.data + (i * blocks_per_img.width + j) * block_hist_size;
+ float *dst = detector_reordered.ptr<float>() + (i * blocks_per_img.width + j) * block_hist_size;
for (size_t k = 0; k < block_hist_size; ++k)
dst[k] = src[k];
}
float angleScale = (float)(nbins/CV_PI);
for( y = 0; y < gradsize.height; y++ )
{
- const uchar* imgPtr = img.data + img.step*ymap[y];
- const uchar* prevPtr = img.data + img.step*ymap[y-1];
- const uchar* nextPtr = img.data + img.step*ymap[y+1];
+ const uchar* imgPtr = img.ptr(ymap[y]);
+ const uchar* prevPtr = img.ptr(ymap[y-1]);
+ const uchar* nextPtr = img.ptr(ymap[y+1]);
- float* gradPtr = (float*)grad.ptr(y);
- uchar* qanglePtr = (uchar*)qangle.ptr(y);
+ float* gradPtr = grad.ptr<float>(y);
+ uchar* qanglePtr = qangle.ptr(y);
if( cn == 1 )
{
}
int k, C1 = count1, C2 = count2, C4 = count4;
- const float* gradPtr = (const float*)(grad.data + grad.step*pt.y) + pt.x*2;
- const uchar* qanglePtr = qangle.data + qangle.step*pt.y + pt.x*2;
+ const float* gradPtr = grad.ptr<float>(pt.y) + pt.x*2;
+ const uchar* qanglePtr = qangle.ptr(pt.y) + pt.x*2;
// CV_Assert( blockHist != 0 );
memset(blockHist, 0, sizeof(float) * blockHistogramSize);
{
double scale = levelScale[i];
Size sz(cvRound(img.cols/scale), cvRound(img.rows/scale));
- Mat smallerImg(sz, img.type(), smallerImgBuf.data);
+ Mat smallerImg(sz, img.type(), smallerImgBuf.ptr());
if( sz == img.size() )
smallerImg = Mat(sz, img.type(), img.data, img.step);
else
double scale = (*locations)[i].scale;
Size sz(cvRound(img.cols / scale), cvRound(img.rows / scale));
- Mat smallerImg(sz, img.type(), smallerImgBuf.data);
+ Mat smallerImg(sz, img.type(), smallerImgBuf.ptr());
if( sz == img.size() )
smallerImg = Mat(sz, img.type(), img.data, img.step);
}
int k, C1 = count1, C2 = count2, C4 = count4;
- const float* gradPtr = (const float*)(grad.data + grad.step*pt.y) + pt.x*2;
- const uchar* qanglePtr = qangle.data + qangle.step*pt.y + pt.x*2;
+ const float* gradPtr = grad.ptr<float>(pt.y) + pt.x*2;
+ const uchar* qanglePtr = qangle.ptr(pt.y) + pt.x*2;
CV_Assert( blockHist != 0 );
for( k = 0; k < blockHistogramSize; k++ )
float angleScale = (float)(_nbins/CV_PI);
for( y = 0; y < gradsize.height; y++ )
{
- const uchar* imgPtr = img.data + img.step*ymap[y];
- const uchar* prevPtr = img.data + img.step*ymap[y-1];
- const uchar* nextPtr = img.data + img.step*ymap[y+1];
+ const uchar* imgPtr = img.ptr(ymap[y]);
+ const uchar* prevPtr = img.ptr(ymap[y-1]);
+ const uchar* nextPtr = img.ptr(ymap[y+1]);
float* gradPtr = (float*)grad.ptr(y);
uchar* qanglePtr = (uchar*)qangle.ptr(y);
Mat_<double> proxy_x;
if(x_mat.rows>1){
buf_x.create(1,ndim);
- Mat_<double> proxy(ndim,1,(double*)buf_x.data);
+ Mat_<double> proxy(ndim,1,buf_x.ptr<double>());
x_mat.copyTo(proxy);
proxy_x=buf_x;
}else{
proxy_x=x_mat;
}
- _Function->getGradient((double*)proxy_x.data,(double*)d.data);
+ _Function->getGradient(proxy_x.ptr<double>(),d.ptr<double>());
d*=-1.0;
d.copyTo(r);
for(int count=0;count<_termcrit.maxCount;count++){
minimizeOnTheLine(_Function,proxy_x,d,minimizeOnTheLine_buf1,minimizeOnTheLine_buf2);
r.copyTo(r_old);
- _Function->getGradient((double*)proxy_x.data,(double*)r.data);
+ _Function->getGradient(proxy_x.ptr<double>(),r.ptr<double>());
r*=-1.0;
double r_norm_sq=norm(r);
if(_termcrit.type==(TermCriteria::MAX_ITER+TermCriteria::EPS) && r_norm_sq<_termcrit.epsilon){
if(x_mat.rows>1){
- Mat(ndim, 1, CV_64F, (double*)proxy_x.data).copyTo(x);
+ Mat(ndim, 1, CV_64F, proxy_x.ptr<double>()).copyTo(x);
}
- return _Function->calc((double*)proxy_x.data);
+ return _Function->calc(proxy_x.ptr<double>());
}
ConjGradSolverImpl::ConjGradSolverImpl(){
{
ptry(j)=coord_sum(j)*fac1-p(ihi,j)*fac2;
}
- ytry=f->calc((double*)ptry.data);
+ ytry=f->calc(ptry.ptr<double>());
if (ytry < y(ihi))
{
y(ihi)=ytry;
{
p(i,j) = coord_sum(j) = 0.5*(p(i,j)+p(ilo,j));
}
- y(i)=f->calc((double*)coord_sum.data);
+ y(i)=f->calc(coord_sum.ptr<double>());
}
}
nfunk += ndim;
if(x_mat.rows>1){
buf_x.create(1,_step.cols);
- Mat_<double> proxy(_step.cols,1,(double*)buf_x.data);
+ Mat_<double> proxy(_step.cols,1,buf_x.ptr<double>());
x_mat.copyTo(proxy);
proxy_x=buf_x;
}else{
dprintf(("%d iterations done\n",count));
if(x_mat.rows>1){
- Mat(x_mat.rows, 1, CV_64F, (double*)proxy_x.data).copyTo(x);
+ Mat(x_mat.rows, 1, CV_64F, proxy_x.ptr<double>()).copyTo(x);
}
return res;
}
{
CV_Assert(H.type() == CV_64F && H.size() == Size(3, 3));
- const double* h = reinterpret_cast<const double*>(H.data);
+ const double* h = H.ptr<double>();
double d1, d2; // Denominators
double v1, v2; // Focal squares value candidates
if(!b.empty())
buf[1].create(1, maxsize, CV_64FC(cn));
buf[2].create(1, maxsize, CV_64FC(cn));
- scalarToRawData(gamma, buf[2].data, CV_64FC(cn), (int)(maxsize*cn));
+ scalarToRawData(gamma, buf[2].ptr(), CV_64FC(cn), (int)(maxsize*cn));
for( i = 0; i < nplanes; i++, ++it)
{
apart0.convertTo(apart, apart.type(), alpha);
size_t k, n = (j2 - j)*cn;
- double* aptr = (double*)apart.data;
- const double* gptr = (const double*)buf[2].data;
+ double* aptr = apart.ptr<double>();
+ const double* gptr = buf[2].ptr<double>();
if( b.empty() )
{
Mat bpart0 = planes[1].colRange((int)j, (int)j2);
Mat bpart = buf[1].colRange(0, (int)(j2 - j));
bpart0.convertTo(bpart, bpart.type(), beta);
- const double* bptr = (const double*)bpart.data;
+ const double* bptr = bpart.ptr<double>();
for( k = 0; k < n; k++ )
aptr[k] += bptr[k] + gptr[k];
for( i = 0; i < nplanes; i++, ++it)
{
- const uchar* sptr = planes[0].data;
- uchar* dptr = planes[1].data;
+ const uchar* sptr = planes[0].ptr();
+ uchar* dptr = planes[1].ptr();
switch( src.depth() )
{
size_t planeSize = planes[0].total()*src.elemSize();
for( i = 0; i < nplanes; i++, ++it )
- memcpy(planes[1].data, planes[0].data, planeSize);
+ memcpy(planes[1].ptr(), planes[0].ptr(), planeSize);
return;
}
for( i = 0; i < nplanes; i++, ++it)
{
- const uchar* sptr = planes[0].data;
- uchar* dptr = planes[1].data;
- const uchar* mptr = planes[2].data;
+ const uchar* sptr = planes[0].ptr();
+ uchar* dptr = planes[1].ptr();
+ const uchar* mptr = planes[2].ptr();
for( j = 0; j < total; j++, sptr += elemSize, dptr += elemSize )
{
for( i = 0; i < nplanes; i++, ++it )
{
- uchar* dptr = plane.data;
+ uchar* dptr = plane.ptr();
if( uniform )
memset( dptr, gptr[0], planeSize );
else if( i == 0 )
dptr[k] = gptr[k];
}
else
- memcpy(dptr, dst.data, planeSize);
+ memcpy(dptr, dst.ptr(), planeSize);
}
return;
}
for( i = 0; i < nplanes; i++, ++it)
{
- uchar* dptr = planes[0].data;
- const uchar* mptr = planes[1].data;
+ uchar* dptr = planes[0].ptr();
+ const uchar* mptr = planes[1].ptr();
for( j = 0; j < total; j++, dptr += elemSize )
{
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr = planes[0].data;
- uchar* dptr = planes[1].data + coi*size0;
+ const uchar* sptr = planes[0].ptr();
+ uchar* dptr = planes[1].ptr() + coi*size0;
for( j = 0; j < total; j++, sptr += size0, dptr += size1 )
{
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr = planes[0].data + coi*size1;
- uchar* dptr = planes[1].data;
+ const uchar* sptr = planes[0].ptr() + coi*size1;
+ uchar* dptr = planes[1].ptr();
for( j = 0; j < total; j++, sptr += size0, dptr += size1 )
{
for( i = 0; i < nplanes; i++, ++it, startidx += total )
{
- const uchar* sptr = planes[0].data;
- const uchar* mptr = planes[1].data;
+ const uchar* sptr = planes[0].ptr();
+ const uchar* mptr = planes[1].ptr();
switch( depth )
{
int cellSize = normType == NORM_HAMMING ? 1 : 2;
for( i = 0; i < nplanes; i++, ++it )
- result += normHamming(planes[0].data, total, cellSize);
+ result += normHamming(planes[0].ptr(), total, cellSize);
return result;
}
int normType0 = normType;
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr = planes[0].data;
- const uchar* mptr = planes[1].data;
+ const uchar* sptr = planes[0].ptr();
+ const uchar* mptr = planes[1].ptr();
switch( depth )
{
int cellSize = normType == NORM_HAMMING ? 1 : 2;
for( i = 0; i < nplanes; i++, ++it )
- result += normHamming(planes[0].data, total, cellSize);
+ result += normHamming(planes[0].ptr(), total, cellSize);
return result;
}
int normType0 = normType;
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr1 = planes[0].data;
- const uchar* sptr2 = planes[1].data;
- const uchar* mptr = planes[2].data;
+ const uchar* sptr1 = planes[0].ptr();
+ const uchar* sptr2 = planes[1].ptr();
+ const uchar* mptr = planes[2].ptr();
switch( depth )
{
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr1 = planes[0].data;
- const uchar* sptr2 = planes[1].data;
+ const uchar* sptr1 = planes[0].ptr();
+ const uchar* sptr2 = planes[1].ptr();
switch( depth )
{
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr1 = planes[0].data;
- const uchar* sptr2 = planes[1].data;
- uchar* dptr = planes[2].data;
+ const uchar* sptr1 = planes[0].ptr();
+ const uchar* sptr2 = planes[1].ptr();
+ uchar* dptr = planes[2].ptr();
logicOp_(sptr1, sptr2, dptr, total, op);
}
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr = planes[0].data;
- uchar* dptr = planes[1].data;
+ const uchar* sptr = planes[0].ptr();
+ uchar* dptr = planes[1].ptr();
logicOpS_(sptr, (uchar*)&buf[0], dptr, total, op);
}
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr1 = planes[0].data;
- const uchar* sptr2 = planes[1].data;
- uchar* dptr = planes[2].data;
+ const uchar* sptr1 = planes[0].ptr();
+ const uchar* sptr2 = planes[1].ptr();
+ uchar* dptr = planes[2].ptr();
switch( depth )
{
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr = planes[0].data;
- uchar* dptr = planes[1].data;
+ const uchar* sptr = planes[0].ptr();
+ uchar* dptr = planes[1].ptr();
switch( depth )
{
for( i = 0; i < nplanes; i++, ++it, startidx += total )
{
- const uchar* sptr1 = planes[0].data;
- const uchar* sptr2 = planes[1].data;
+ const uchar* sptr1 = planes[0].ptr();
+ const uchar* sptr2 = planes[1].ptr();
double realmaxdiff = 0;
switch( depth )
for( i = 0; i < nplanes; i++, ++it, startidx += total )
{
- const uchar* aptr = plane.data;
+ const uchar* aptr = plane.ptr();
switch( depth )
{
for( i = 0; i < nplanes; i++, ++it, startidx += total )
{
- const uchar* sptr1 = planes[0].data;
- const uchar* sptr2 = planes[1].data;
+ const uchar* sptr1 = planes[0].ptr();
+ const uchar* sptr2 = planes[1].ptr();
switch( depth )
{
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr = planes[0].data;
- uchar* dptr = planes[1].data;
+ const uchar* sptr = planes[0].ptr();
+ uchar* dptr = planes[1].ptr();
switch( depth )
{
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr1 = planes[0].data;
- const uchar* sptr2 = planes[1].data;
- uchar* dptr = planes[2].data;
+ const uchar* sptr1 = planes[0].ptr();
+ const uchar* sptr2 = planes[1].ptr();
+ uchar* dptr = planes[2].ptr();
switch( depth )
{
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr1 = planes[0].data;
- uchar* dptr = planes[1].data;
+ const uchar* sptr1 = planes[0].ptr();
+ uchar* dptr = planes[1].ptr();
switch( depth )
{
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr1 = planes[0].data;
- const uchar* sptr2 = planes[1].data;
- uchar* dptr = planes[2].data;
+ const uchar* sptr1 = planes[0].ptr();
+ const uchar* sptr2 = planes[1].ptr();
+ uchar* dptr = planes[2].ptr();
switch( depth )
{
for( i = 0; i < nplanes; i++, ++it )
{
- const uchar* sptr = planes[0].data;
- const uchar* mptr = planes[1].data;
+ const uchar* sptr = planes[0].ptr();
+ const uchar* mptr = planes[1].ptr();
switch( depth )
{
parallel_for_(Range(0, image.rows),
MOG2Invoker(image, fgmask,
- (GMM*)bgmodel.data,
- (float*)(bgmodel.data + sizeof(GMM)*nmixtures*image.rows*image.cols),
- bgmodelUsedModes.data, nmixtures, (float)learningRate,
+ bgmodel.ptr<GMM>(),
+ (float*)(bgmodel.ptr() + sizeof(GMM)*nmixtures*image.rows*image.cols),
+ bgmodelUsedModes.ptr(), nmixtures, (float)learningRate,
(float)varThreshold,
backgroundRatio, varThresholdGen,
fVarInit, fVarMin, fVarMax, float(-learningRate*fCT), fTau,
CV_Assert(nchannels == 1 || nchannels == 3);
Mat meanBackground(frameSize, CV_MAKETYPE(CV_8U, nchannels), Scalar::all(0));
int firstGaussianIdx = 0;
- const GMM* gmm = (GMM*)bgmodel.data;
+ const GMM* gmm = bgmodel.ptr<GMM>();
const float* mean = reinterpret_cast<const float*>(gmm + frameSize.width*frameSize.height*nmixtures);
std::vector<float> meanVal(nchannels, 0.f);
for(int row=0; row<meanBackground.rows; row++)
// update the state: x'(k) = A*x(k)
statePre = transitionMatrix*statePost;
- if( control.data )
+ if( !control.empty() )
// x'(k) = x'(k) + B*u(k)
statePre += controlMatrix*control;
int x, y;
for( y = 0; y < winSize.height; y++ )
{
- const uchar* src = (const uchar*)I.data + (y + iprevPt.y)*stepI + iprevPt.x*cn;
- const deriv_type* dsrc = (const deriv_type*)derivI.data + (y + iprevPt.y)*dstep + iprevPt.x*cn2;
+ const uchar* src = I.ptr() + (y + iprevPt.y)*stepI + iprevPt.x*cn;
+ const deriv_type* dsrc = derivI.ptr<deriv_type>() + (y + iprevPt.y)*dstep + iprevPt.x*cn2;
- deriv_type* Iptr = (deriv_type*)(IWinBuf.data + y*IWinBuf.step);
- deriv_type* dIptr = (deriv_type*)(derivIWinBuf.data + y*derivIWinBuf.step);
+ deriv_type* Iptr = IWinBuf.ptr<deriv_type>(y);
+ deriv_type* dIptr = derivIWinBuf.ptr<deriv_type>(y);
x = 0;
for( y = 0; y < winSize.height; y++ )
{
- const uchar* Jptr = (const uchar*)J.data + (y + inextPt.y)*stepJ + inextPt.x*cn;
- const deriv_type* Iptr = (const deriv_type*)(IWinBuf.data + y*IWinBuf.step);
- const deriv_type* dIptr = (const deriv_type*)(derivIWinBuf.data + y*derivIWinBuf.step);
+ const uchar* Jptr = J.ptr() + (y + inextPt.y)*stepJ + inextPt.x*cn;
+ const deriv_type* Iptr = IWinBuf.ptr<deriv_type>(y);
+ const deriv_type* dIptr = derivIWinBuf.ptr<deriv_type>(y);
x = 0;
for( y = 0; y < winSize.height; y++ )
{
- const uchar* Jptr = (const uchar*)J.data + (y + inextPoint.y)*stepJ + inextPoint.x*cn;
- const deriv_type* Iptr = (const deriv_type*)(IWinBuf.data + y*IWinBuf.step);
+ const uchar* Jptr = J.ptr() + (y + inextPoint.y)*stepJ + inextPoint.x*cn;
+ const deriv_type* Iptr = IWinBuf.ptr<deriv_type>(y);
for( x = 0; x < winSize.width*cn; x++ )
{
Mat nextPtsMat = _nextPts.getMat();
CV_Assert( nextPtsMat.checkVector(2, CV_32F, true) == npoints );
- const Point2f* prevPts = (const Point2f*)prevPtsMat.data;
- Point2f* nextPts = (Point2f*)nextPtsMat.data;
+ const Point2f* prevPts = prevPtsMat.ptr<Point2f>();
+ Point2f* nextPts = nextPtsMat.ptr<Point2f>();
_status.create((int)npoints, 1, CV_8U, -1, true);
Mat statusMat = _status.getMat(), errMat;
CV_Assert( statusMat.isContinuous() );
- uchar* status = statusMat.data;
+ uchar* status = statusMat.ptr();
float* err = 0;
for( i = 0; i < npoints; i++ )
_err.create((int)npoints, 1, CV_32F, -1, true);
errMat = _err.getMat();
CV_Assert( errMat.isContinuous() );
- err = (float*)errMat.data;
+ err = errMat.ptr<float>();
}
std::vector<Mat> prevPyr, nextPyr;
{
Size imgSize = prevPyr[level * lvlStep1].size();
Mat _derivI( imgSize.height + winSize.height*2,
- imgSize.width + winSize.width*2, derivIBuf.type(), derivIBuf.data );
+ imgSize.width + winSize.width*2, derivIBuf.type(), derivIBuf.ptr() );
derivI = _derivI(Rect(winSize.width, winSize.height, imgSize.width, imgSize.height));
calcSharrDeriv(prevPyr[level * lvlStep1], derivI);
copyMakeBorder(derivI, _derivI, winSize.height, winSize.height, winSize.width, winSize.width, BORDER_CONSTANT|BORDER_ISOLATED);
for( y = 0; y < height; y++ )
{
float g0 = g[0], g1, g2;
- float *srow0 = (float*)(src.data + src.step*y), *srow1 = 0;
- float *drow = (float*)(dst.data + dst.step*y);
+ const float *srow0 = src.ptr<float>(y), *srow1 = 0;
+ float *drow = dst.ptr<float>(y);
// vertical part of convolution
for( x = 0; x < width; x++ )
for( k = 1; k <= n; k++ )
{
g0 = g[k]; g1 = xg[k]; g2 = xxg[k];
- srow0 = (float*)(src.data + src.step*std::max(y-k,0));
- srow1 = (float*)(src.data + src.step*std::min(y+k,height-1));
+ srow0 = src.ptr<float>(std::max(y-k,0));
+ srow1 = src.ptr<float>(std::min(y+k,height-1));
for( x = 0; x < width; x++ )
{
static const float border[BORDER] = {0.14f, 0.14f, 0.4472f, 0.4472f, 0.4472f};
int x, y, width = _flow.cols, height = _flow.rows;
- const float* R1 = (float*)_R1.data;
+ const float* R1 = _R1.ptr<float>();
size_t step1 = _R1.step/sizeof(R1[0]);
matM.create(height, width, CV_32FC(5));
for( y = _y0; y < _y1; y++ )
{
- const float* flow = (float*)(_flow.data + y*_flow.step);
- const float* R0 = (float*)(_R0.data + y*_R0.step);
- float* M = (float*)(matM.data + y*matM.step);
+ const float* flow = _flow.ptr<float>(y);
+ const float* R0 = _R0.ptr<float>(y);
+ float* M = matM.ptr<float>(y);
for( x = 0; x < width; x++ )
{
double* vsum = _vsum + (m+1)*5;
// init vsum
- const float* srow0 = (const float*)matM.data;
+ const float* srow0 = matM.ptr<float>();
for( x = 0; x < width*5; x++ )
vsum[x] = srow0[x]*(m+2);
for( y = 1; y < m; y++ )
{
- srow0 = (float*)(matM.data + matM.step*std::min(y,height-1));
+ srow0 = matM.ptr<float>(std::min(y,height-1));
for( x = 0; x < width*5; x++ )
vsum[x] += srow0[x];
}
for( y = 0; y < height; y++ )
{
double g11, g12, g22, h1, h2;
- float* flow = (float*)(_flow.data + _flow.step*y);
+ float* flow = _flow.ptr<float>(y);
- srow0 = (const float*)(matM.data + matM.step*std::max(y-m-1,0));
- const float* srow1 = (const float*)(matM.data + matM.step*std::min(y+m,height-1));
+ srow0 = matM.ptr<float>(std::max(y-m-1,0));
+ const float* srow1 = matM.ptr<float>(std::min(y+m,height-1));
// vertical blur
for( x = 0; x < width*5; x++ )
for( y = 0; y < height; y++ )
{
double g11, g12, g22, h1, h2;
- float* flow = (float*)(_flow.data + _flow.step*y);
+ float* flow = _flow.ptr<float>(y);
// vertical blur
for( i = 0; i <= m; i++ )
{
- srow[m-i] = (const float*)(matM.data + matM.step*std::max(y-i,0));
- srow[m+i] = (const float*)(matM.data + matM.step*std::min(y+i,height-1));
+ srow[m-i] = matM.ptr<float>(std::max(y-i,0));
+ srow[m+i] = matM.ptr<float>(std::min(y+i,height-1));
}
x = 0;
else
flow = flow0;
- if( !prevFlow.data )
+ if( prevFlow.empty() )
{
if( flags & OPTFLOW_USE_INITIAL_FLOW )
{
printf("%s\n", imageList[i*3+k].c_str());
view = imread(imageList[i*3+k], 1);
- if(view.data)
+ if(!view.empty())
{
vector<Point2f> ptvec;
imageSize = view.size();
int k2 = k == 0 ? 1 : k == 1 ? 0 : 2;
view = imread(imageList[i*3+k], 1);
- if(!view.data)
+ if(view.empty())
continue;
Mat rview = canvas.colRange(k2*imageSize.width, (k2+1)*imageSize.width);
else if( i < (int)imageList.size() )
view = imread(imageList[i], 1);
- if(!view.data)
+ if(view.empty())
{
if( imagePoints.size() > 0 )
runAndSave(outputFilename, imagePoints, imageSize,
for( i = 0; i < (int)imageList.size(); i++ )
{
view = imread(imageList[i], 1);
- if(!view.data)
+ if(view.empty())
continue;
//undistort( view, rview, cameraMatrix, distCoeffs, cameraMatrix );
remap(view, rview, map1, map2, INTER_LINEAR);
img2 = imread(dest);
- if(!img0.data)
+ if(img0.empty())
{
cout << "Source Image does not exist" << endl;
exit(0);
}
- if(!img2.data)
+ if(img2.empty())
{
cout << "Destination Image does not exist" << endl;
exit(0);
img0 = imread(src);
- if(!img0.data)
+ if(img0.empty())
{
cout << "Source Image does not exist" << endl;
exit(0);
img0 = imread(src);
- if(!img0.data)
+ if(img0.empty())
{
cout << "Source Image does not exist" << endl;
exit(0);
img0 = imread(src);
- if(!img0.data)
+ if(img0.empty())
{
cout << "Source Image does not exist" << endl;
exit(0);
}
#endif
- if( !img.data ) // check if the image has been loaded properly
+ if( img.empty() ) // check if the image has been loaded properly
return -1;
Mat img_yuv;
int main( int argc, char** argv )
{
char* filename = argc == 2 ? argv[1] : (char*)"baboon.jpg";
- if( (src = imread(filename,1)).data == 0 )
+ if( (src = imread(filename,1)).empty() )
return -1;
help();
Mat I = imread(argv[1]);
- if(!I.data)
+ if(I.empty())
{
cout << "Image not found" << endl;
exit(0);
Mat tmp_frame, bgmask, out_frame;
cap >> tmp_frame;
- if(!tmp_frame.data)
+ if(tmp_frame.empty())
{
printf("can not read data from the video source\n");
return -1;
for(;;)
{
cap >> tmp_frame;
- if( !tmp_frame.data )
+ if( tmp_frame.empty() )
break;
bgsubtractor->apply(tmp_frame, bgmask, update_bg_model ? -1 : 0);
refineSegments(tmp_frame, bgmask, out_frame);
projectPoints(Mat(objpt), rvec, tvec, cameraMatrix, Mat(), imgpt);
- if( shownFrame.data )
+ if( !shownFrame.empty() )
{
if( nobjpt == 1 )
circle(shownFrame, imgpt[0], 3, Scalar(0,255,0), -1, LINE_AA);
}
else
capture >> frame0;
- if( !frame0.data )
+ if( frame0.empty() )
break;
- if( !frame.data )
+ if( frame.empty() )
{
if( frame0.size() != calibratedImageSize )
{
hog_detector.clear();
hog_detector.resize(sv.cols + 1);
- memcpy(&hog_detector[0], sv.data, sv.cols*sizeof(hog_detector[0]));
+ memcpy(&hog_detector[0], sv.ptr(), sv.cols*sizeof(hog_detector[0]));
hog_detector[sv.cols] = (float)-rho;
}
break;
}
Mat img = imread( (prefix+line).c_str() ); // load the image
- if( !img.data ) // invalid image, just skip it.
+ if( img.empty() ) // invalid image, just skip it.
continue;
#ifdef _DEBUG
imshow( "image", img );
while( !end_of_process )
{
video >> img;
- if( !img.data )
+ if( img.empty() )
break;
draw = img.clone();
src1 = imread("../images/LinuxLogo.jpg");
src2 = imread("../images/WindowsLogo.jpg");
- if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
- if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
+ if( src1.empty() ) { printf("Error loading src1 \n"); return -1; }
+ if( src2.empty() ) { printf("Error loading src2 \n"); return -1; }
/// Initialize values
alpha_slider = 0;
/// Load image
src = imread( argv[1], 1 );
- if( !src.data )
+ if( src.empty() )
{ cout<<"Usage: ./Histogram_Demo <path_to_image>"<<endl;
return -1;
}
/// Load image
src = imread( argv[1], 1 );
- if( !src.data )
+ if( src.empty() )
{ return -1; }
/// Separate the image in 3 places ( B, G and R )
src1 = imread("../images/LinuxLogo.jpg");
src2 = imread("../images/WindowsLogo.jpg");
- if( !src1.data ) { std::cout<< "Error loading src1"<<std::endl; return -1; }
- if( !src2.data ) { std::cout<< "Error loading src2"<<std::endl; return -1; }
+ if( src1.empty() ) { std::cout<< "Error loading src1"<<std::endl; return -1; }
+ if( src2.empty() ) { std::cout<< "Error loading src2"<<std::endl; return -1; }
/// Create Windows
namedWindow("Linear Blend", 1);
/// Load an image
src = imread( argv[1] );
- if( !src.data )
+ if( src.empty() )
{ return -1; }
/// Create windows
/// Load an image
src = imread( argv[1] );
- if( !src.data )
+ if( src.empty() )
{ return -1; }
/// Create window
/// Test image - Make sure it s divisible by 2^{n}
src = imread( "../images/chicky_512.png" );
- if( !src.data )
+ if( src.empty() )
{ printf(" No data! -- Exiting the program \n");
return -1; }
/// Load an image
src = imread( argv[1] );
- if( !src.data )
+ if( src.empty() )
{ return -1; }
/// Create a matrix of the same type and size as src (for dst)
// Read the image
src = imread( argv[1], 1 );
- if( !src.data )
+ if( src.empty() )
{
std::cerr<<"Invalid input image\n";
std::cout<<usage;
/// Load an image
src = imread( argv[1] );
- if( !src.data )
+ if( src.empty() )
{ return -1; }
/// Remove noise by blurring with a Gaussian filter
/// Load an image
src = imread( argv[1] );
- if( !src.data )
+ if( src.empty() )
{ return -1; }
GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT );
/// Load an image
src = imread( argv[1] );
- if( !src.data )
+ if( src.empty() )
{
printf(" No data entered, please enter the path to an image file \n");
return -1;
/// Load an image
src = imread( argv[1] );
- if( !src.data )
+ if( src.empty() )
{ return -1; }
/// Create window
Mat imgDisparity16S = Mat( imgLeft.rows, imgLeft.cols, CV_16S );
Mat imgDisparity8U = Mat( imgLeft.rows, imgLeft.cols, CV_8UC1 );
- if( !imgLeft.data || !imgRight.data )
+ if( imgLeft.empty() || imgRight.empty() )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- 2. Call the constructor for StereoBM
else
I = imread(argv[1], IMREAD_COLOR);
- if (!I.data)
+ if (I.empty())
{
cout << "The image" << argv[1] << " could not be loaded." << endl;
return -1;
<< times << " runs): " << t << " milliseconds."<< endl;
Mat lookUpTable(1, 256, CV_8U);
- uchar* p = lookUpTable.data;
+ uchar* p = lookUpTable.ptr();
for( int i = 0; i < 256; ++i)
p[i] = table[i];
Mat image;
image = imread(argv[1], IMREAD_COLOR); // Read the file
- if(! image.data ) // Check for invalid input
+ if( image.empty() ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
Mat image;
image = imread(argv[1], IMREAD_COLOR); // Read the file
- if(! image.data ) // Check for invalid input
+ if( image.empty() ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
Mat I = imread(argv[1]);
- if(!I.data)
+ if(I.empty())
{
cout << "Image not found" << endl;
exit(0);
img2 = imread(dest);
- if(!img0.data)
+ if(img0.empty())
{
cout << "Source Image does not exist" << endl;
exit(0);
}
- if(!img2.data)
+ if(img2.empty())
{
cout << "Destination Image does not exist" << endl;
exit(0);
img0 = imread(src);
- if(!img0.data)
+ if(img0.empty())
{
cout << "Source Image does not exist" << endl;
exit(0);
img0 = imread(src);
- if(!img0.data)
+ if(img0.empty())
{
cout << "Source Image does not exist" << endl;
exit(0);
img0 = imread(src);
- if(!img0.data)
+ if(img0.empty())
{
cout << "Source Image does not exist" << endl;
exit(0);
void processImages(char* fistFrameFilename) {
//read the first file of the sequence
frame = imread(fistFrameFilename);
- if(!frame.data){
+ if(frame.empty()){
//error in opening the first image
cerr << "Unable to open first image frame: " << fistFrameFilename << endl;
exit(EXIT_FAILURE);
string nextFrameFilename = prefix + nextFrameNumberString + suffix;
//read the next frame
frame = imread(nextFrameFilename);
- if(!frame.data){
+ if(frame.empty()){
//error in opening the next image in the sequence
cerr << "Unable to open image frame: " << nextFrameFilename << endl;
exit(EXIT_FAILURE);
desc.CPUAccessFlags = cv::ocl::useOpenCL() ? 0 : D3D10_CPU_ACCESS_READ;
D3D10_SUBRESOURCE_DATA srInitData;
- srInitData.pSysMem = inputMat.data;
+ srInitData.pSysMem = inputMat.ptr();
srInitData.SysMemPitch = (UINT)inputMat.step[0];
if (FAILED(dev->CreateTexture2D(&desc, &srInitData, &pInputTexture)))
desc.CPUAccessFlags = cv::ocl::useOpenCL() ? 0 : D3D11_CPU_ACCESS_READ;
D3D11_SUBRESOURCE_DATA srInitData;
- srInitData.pSysMem = inputMat.data;
+ srInitData.pSysMem = inputMat.ptr();
srInitData.SysMemPitch = (UINT)inputMat.step[0];
if (FAILED(dev->CreateTexture2D(&desc, &srInitData, &pInputTexture)))