namespace cuda
{
class CV_EXPORTS GpuMat;
- class CV_EXPORTS CudaMem;
+ class CV_EXPORTS HostMem;
class CV_EXPORTS Stream;
class CV_EXPORTS Event;
}
//! @addtogroup cuda_struct
//! @{
-//////////////////////////////// GpuMat ///////////////////////////////
+//===================================================================================
+// GpuMat
+//===================================================================================
/** @brief Base storage class for GPU memory with reference counting.
CV_EXPORTS void setBufferPoolUsage(bool on);
CV_EXPORTS void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCount);
-//////////////////////////////// CudaMem ////////////////////////////////
+//===================================================================================
+// HostMem
+//===================================================================================
/** @brief Class with reference counting wrapping special memory type allocation functions from CUDA.
@note Allocation size of such memory types is usually limited. For more details, see *CUDA 2.2
Pinned Memory APIs* document or *CUDA C Programming Guide*.
*/
-class CV_EXPORTS CudaMem
+class CV_EXPORTS HostMem
{
public:
enum AllocType { PAGE_LOCKED = 1, SHARED = 2, WRITE_COMBINED = 4 };
- explicit CudaMem(AllocType alloc_type = PAGE_LOCKED);
+ explicit HostMem(AllocType alloc_type = PAGE_LOCKED);
- CudaMem(const CudaMem& m);
+ HostMem(const HostMem& m);
- CudaMem(int rows, int cols, int type, AllocType alloc_type = PAGE_LOCKED);
- CudaMem(Size size, int type, AllocType alloc_type = PAGE_LOCKED);
+ HostMem(int rows, int cols, int type, AllocType alloc_type = PAGE_LOCKED);
+ HostMem(Size size, int type, AllocType alloc_type = PAGE_LOCKED);
//! creates from host memory with coping data
- explicit CudaMem(InputArray arr, AllocType alloc_type = PAGE_LOCKED);
+ explicit HostMem(InputArray arr, AllocType alloc_type = PAGE_LOCKED);
- ~CudaMem();
+ ~HostMem();
- CudaMem& operator =(const CudaMem& m);
+ HostMem& operator =(const HostMem& m);
//! swaps with other smart pointer
- void swap(CudaMem& b);
+ void swap(HostMem& b);
//! returns deep copy of the matrix, i.e. the data is copied
- CudaMem clone() const;
+ HostMem clone() const;
//! allocates new matrix data unless the matrix already has specified size and type.
void create(int rows, int cols, int type);
void create(Size size, int type);
- //! creates alternative CudaMem header for the same data, with different
+ //! creates alternative HostMem header for the same data, with different
//! number of channels and/or different number of rows
- CudaMem reshape(int cn, int rows = 0) const;
+ HostMem reshape(int cn, int rows = 0) const;
//! decrements reference counter and released memory if needed.
void release();
- //! returns matrix header with disabled reference counting for CudaMem data.
+ //! returns matrix header with disabled reference counting for HostMem data.
Mat createMatHeader() const;
/** @brief Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting
*/
CV_EXPORTS void unregisterPageLocked(Mat& m);
-///////////////////////////////// Stream //////////////////////////////////
+//===================================================================================
+// Stream
+//===================================================================================
/** @brief This class encapsulates a queue of asynchronous calls.
//! @} cuda_struct
-//////////////////////////////// Initialization & Info ////////////////////////
+//===================================================================================
+// Initialization & Info
+//===================================================================================
//! @addtogroup cuda_init
//! @{
namespace cv { namespace cuda {
-//////////////////////////////// GpuMat ///////////////////////////////
+//===================================================================================
+// GpuMat
+//===================================================================================
inline
GpuMat::GpuMat(Allocator* allocator_)
a.swap(b);
}
-//////////////////////////////// CudaMem ////////////////////////////////
+//===================================================================================
+// HostMem
+//===================================================================================
inline
-CudaMem::CudaMem(AllocType alloc_type_)
+HostMem::HostMem(AllocType alloc_type_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
{
}
inline
-CudaMem::CudaMem(const CudaMem& m)
+HostMem::HostMem(const HostMem& m)
: flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type)
{
if( refcount )
}
inline
-CudaMem::CudaMem(int rows_, int cols_, int type_, AllocType alloc_type_)
+HostMem::HostMem(int rows_, int cols_, int type_, AllocType alloc_type_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
{
if (rows_ > 0 && cols_ > 0)
}
inline
-CudaMem::CudaMem(Size size_, int type_, AllocType alloc_type_)
+HostMem::HostMem(Size size_, int type_, AllocType alloc_type_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
{
if (size_.height > 0 && size_.width > 0)
}
inline
-CudaMem::CudaMem(InputArray arr, AllocType alloc_type_)
+HostMem::HostMem(InputArray arr, AllocType alloc_type_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
{
arr.getMat().copyTo(*this);
}
inline
-CudaMem::~CudaMem()
+HostMem::~HostMem()
{
release();
}
inline
-CudaMem& CudaMem::operator =(const CudaMem& m)
+HostMem& HostMem::operator =(const HostMem& m)
{
if (this != &m)
{
- CudaMem temp(m);
+ HostMem temp(m);
swap(temp);
}
}
inline
-void CudaMem::swap(CudaMem& b)
+void HostMem::swap(HostMem& b)
{
std::swap(flags, b.flags);
std::swap(rows, b.rows);
}
inline
-CudaMem CudaMem::clone() const
+HostMem HostMem::clone() const
{
- CudaMem m(size(), type(), alloc_type);
+ HostMem m(size(), type(), alloc_type);
createMatHeader().copyTo(m);
return m;
}
inline
-void CudaMem::create(Size size_, int type_)
+void HostMem::create(Size size_, int type_)
{
create(size_.height, size_.width, type_);
}
inline
-Mat CudaMem::createMatHeader() const
+Mat HostMem::createMatHeader() const
{
return Mat(size(), type(), data, step);
}
inline
-bool CudaMem::isContinuous() const
+bool HostMem::isContinuous() const
{
return (flags & Mat::CONTINUOUS_FLAG) != 0;
}
inline
-size_t CudaMem::elemSize() const
+size_t HostMem::elemSize() const
{
return CV_ELEM_SIZE(flags);
}
inline
-size_t CudaMem::elemSize1() const
+size_t HostMem::elemSize1() const
{
return CV_ELEM_SIZE1(flags);
}
inline
-int CudaMem::type() const
+int HostMem::type() const
{
return CV_MAT_TYPE(flags);
}
inline
-int CudaMem::depth() const
+int HostMem::depth() const
{
return CV_MAT_DEPTH(flags);
}
inline
-int CudaMem::channels() const
+int HostMem::channels() const
{
return CV_MAT_CN(flags);
}
inline
-size_t CudaMem::step1() const
+size_t HostMem::step1() const
{
return step / elemSize1();
}
inline
-Size CudaMem::size() const
+Size HostMem::size() const
{
return Size(cols, rows);
}
inline
-bool CudaMem::empty() const
+bool HostMem::empty() const
{
return data == 0;
}
static inline
-void swap(CudaMem& a, CudaMem& b)
+void swap(HostMem& a, HostMem& b)
{
a.swap(b);
}
-//////////////////////////////// Stream ///////////////////////////////
+//===================================================================================
+// Stream
+//===================================================================================
inline
Stream::Stream(const Ptr<Impl>& impl)
{
}
-//////////////////////////////// Initialization & Info ////////////////////////
+//===================================================================================
+// Initialization & Info
+//===================================================================================
inline
bool TargetArchs::has(int major, int minor)
}} // namespace cv { namespace cuda {
-//////////////////////////////// Mat ////////////////////////////////
+//===================================================================================
+// Mat
+//===================================================================================
namespace cv {
STD_VECTOR_MAT = 5 << KIND_SHIFT,
EXPR = 6 << KIND_SHIFT,
OPENGL_BUFFER = 7 << KIND_SHIFT,
- CUDA_MEM = 8 << KIND_SHIFT,
- GPU_MAT = 9 << KIND_SHIFT,
+ CUDA_HOST_MEM = 8 << KIND_SHIFT,
+ CUDA_GPU_MAT = 9 << KIND_SHIFT,
UMAT =10 << KIND_SHIFT,
STD_VECTOR_UMAT =11 << KIND_SHIFT
};
_InputArray(const double& val);
_InputArray(const cuda::GpuMat& d_mat);
_InputArray(const ogl::Buffer& buf);
- _InputArray(const cuda::CudaMem& cuda_mem);
+ _InputArray(const cuda::HostMem& cuda_mem);
template<typename _Tp> _InputArray(const cudev::GpuMat_<_Tp>& m);
_InputArray(const UMat& um);
_InputArray(const std::vector<UMat>& umv);
_OutputArray(std::vector<Mat>& vec);
_OutputArray(cuda::GpuMat& d_mat);
_OutputArray(ogl::Buffer& buf);
- _OutputArray(cuda::CudaMem& cuda_mem);
+ _OutputArray(cuda::HostMem& cuda_mem);
template<typename _Tp> _OutputArray(cudev::GpuMat_<_Tp>& m);
template<typename _Tp> _OutputArray(std::vector<_Tp>& vec);
template<typename _Tp> _OutputArray(std::vector<std::vector<_Tp> >& vec);
_OutputArray(const std::vector<Mat>& vec);
_OutputArray(const cuda::GpuMat& d_mat);
_OutputArray(const ogl::Buffer& buf);
- _OutputArray(const cuda::CudaMem& cuda_mem);
+ _OutputArray(const cuda::HostMem& cuda_mem);
template<typename _Tp> _OutputArray(const cudev::GpuMat_<_Tp>& m);
template<typename _Tp> _OutputArray(const std::vector<_Tp>& vec);
template<typename _Tp> _OutputArray(const std::vector<std::vector<_Tp> >& vec);
virtual UMat& getUMatRef(int i=-1) const;
virtual cuda::GpuMat& getGpuMatRef() const;
virtual ogl::Buffer& getOGlBufferRef() const;
- virtual cuda::CudaMem& getCudaMemRef() const;
+ virtual cuda::HostMem& getHostMemRef() const;
virtual void create(Size sz, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
_InputOutputArray(std::vector<Mat>& vec);
_InputOutputArray(cuda::GpuMat& d_mat);
_InputOutputArray(ogl::Buffer& buf);
- _InputOutputArray(cuda::CudaMem& cuda_mem);
+ _InputOutputArray(cuda::HostMem& cuda_mem);
template<typename _Tp> _InputOutputArray(cudev::GpuMat_<_Tp>& m);
template<typename _Tp> _InputOutputArray(std::vector<_Tp>& vec);
template<typename _Tp> _InputOutputArray(std::vector<std::vector<_Tp> >& vec);
_InputOutputArray(const std::vector<Mat>& vec);
_InputOutputArray(const cuda::GpuMat& d_mat);
_InputOutputArray(const ogl::Buffer& buf);
- _InputOutputArray(const cuda::CudaMem& cuda_mem);
+ _InputOutputArray(const cuda::HostMem& cuda_mem);
template<typename _Tp> _InputOutputArray(const cudev::GpuMat_<_Tp>& m);
template<typename _Tp> _InputOutputArray(const std::vector<_Tp>& vec);
template<typename _Tp> _InputOutputArray(const std::vector<std::vector<_Tp> >& vec);
{ init(FIXED_TYPE + FIXED_SIZE + EXPR + ACCESS_READ, &expr); }
inline _InputArray::_InputArray(const cuda::GpuMat& d_mat)
-{ init(GPU_MAT + ACCESS_READ, &d_mat); }
+{ init(CUDA_GPU_MAT + ACCESS_READ, &d_mat); }
inline _InputArray::_InputArray(const ogl::Buffer& buf)
{ init(OPENGL_BUFFER + ACCESS_READ, &buf); }
-inline _InputArray::_InputArray(const cuda::CudaMem& cuda_mem)
-{ init(CUDA_MEM + ACCESS_READ, &cuda_mem); }
+inline _InputArray::_InputArray(const cuda::HostMem& cuda_mem)
+{ init(CUDA_HOST_MEM + ACCESS_READ, &cuda_mem); }
inline _InputArray::~_InputArray() {}
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_WRITE, vec, Size(n, 1)); }
inline _OutputArray::_OutputArray(cuda::GpuMat& d_mat)
-{ init(GPU_MAT + ACCESS_WRITE, &d_mat); }
+{ init(CUDA_GPU_MAT + ACCESS_WRITE, &d_mat); }
inline _OutputArray::_OutputArray(ogl::Buffer& buf)
{ init(OPENGL_BUFFER + ACCESS_WRITE, &buf); }
-inline _OutputArray::_OutputArray(cuda::CudaMem& cuda_mem)
-{ init(CUDA_MEM + ACCESS_WRITE, &cuda_mem); }
+inline _OutputArray::_OutputArray(cuda::HostMem& cuda_mem)
+{ init(CUDA_HOST_MEM + ACCESS_WRITE, &cuda_mem); }
inline _OutputArray::_OutputArray(const Mat& m)
{ init(FIXED_TYPE + FIXED_SIZE + MAT + ACCESS_WRITE, &m); }
{ init(FIXED_SIZE + STD_VECTOR_UMAT + ACCESS_WRITE, &vec); }
inline _OutputArray::_OutputArray(const cuda::GpuMat& d_mat)
-{ init(FIXED_TYPE + FIXED_SIZE + GPU_MAT + ACCESS_WRITE, &d_mat); }
+{ init(FIXED_TYPE + FIXED_SIZE + CUDA_GPU_MAT + ACCESS_WRITE, &d_mat); }
inline _OutputArray::_OutputArray(const ogl::Buffer& buf)
{ init(FIXED_TYPE + FIXED_SIZE + OPENGL_BUFFER + ACCESS_WRITE, &buf); }
-inline _OutputArray::_OutputArray(const cuda::CudaMem& cuda_mem)
-{ init(FIXED_TYPE + FIXED_SIZE + CUDA_MEM + ACCESS_WRITE, &cuda_mem); }
+inline _OutputArray::_OutputArray(const cuda::HostMem& cuda_mem)
+{ init(FIXED_TYPE + FIXED_SIZE + CUDA_HOST_MEM + ACCESS_WRITE, &cuda_mem); }
///////////////////////////////////////////////////////////////////////////////////////////
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_RW, vec, Size(n, 1)); }
inline _InputOutputArray::_InputOutputArray(cuda::GpuMat& d_mat)
-{ init(GPU_MAT + ACCESS_RW, &d_mat); }
+{ init(CUDA_GPU_MAT + ACCESS_RW, &d_mat); }
inline _InputOutputArray::_InputOutputArray(ogl::Buffer& buf)
{ init(OPENGL_BUFFER + ACCESS_RW, &buf); }
-inline _InputOutputArray::_InputOutputArray(cuda::CudaMem& cuda_mem)
-{ init(CUDA_MEM + ACCESS_RW, &cuda_mem); }
+inline _InputOutputArray::_InputOutputArray(cuda::HostMem& cuda_mem)
+{ init(CUDA_HOST_MEM + ACCESS_RW, &cuda_mem); }
inline _InputOutputArray::_InputOutputArray(const Mat& m)
{ init(FIXED_TYPE + FIXED_SIZE + MAT + ACCESS_RW, &m); }
{ init(FIXED_SIZE + STD_VECTOR_UMAT + ACCESS_RW, &vec); }
inline _InputOutputArray::_InputOutputArray(const cuda::GpuMat& d_mat)
-{ init(FIXED_TYPE + FIXED_SIZE + GPU_MAT + ACCESS_RW, &d_mat); }
+{ init(FIXED_TYPE + FIXED_SIZE + CUDA_GPU_MAT + ACCESS_RW, &d_mat); }
inline _InputOutputArray::_InputOutputArray(const ogl::Buffer& buf)
{ init(FIXED_TYPE + FIXED_SIZE + OPENGL_BUFFER + ACCESS_RW, &buf); }
-inline _InputOutputArray::_InputOutputArray(const cuda::CudaMem& cuda_mem)
-{ init(FIXED_TYPE + FIXED_SIZE + CUDA_MEM + ACCESS_RW, &cuda_mem); }
+inline _InputOutputArray::_InputOutputArray(const cuda::HostMem& cuda_mem)
+{ init(FIXED_TYPE + FIXED_SIZE + CUDA_HOST_MEM + ACCESS_RW, &cuda_mem); }
//////////////////////////////////////////// Mat //////////////////////////////////////////
::createContinuousImpl(rows, cols, type, arr.getMatRef());
break;
- case _InputArray::GPU_MAT:
+ case _InputArray::CUDA_GPU_MAT:
::createContinuousImpl(rows, cols, type, arr.getGpuMatRef());
break;
- case _InputArray::CUDA_MEM:
- ::createContinuousImpl(rows, cols, type, arr.getCudaMemRef());
+ case _InputArray::CUDA_HOST_MEM:
+ ::createContinuousImpl(rows, cols, type, arr.getHostMemRef());
break;
default:
::ensureSizeIsEnoughImpl(rows, cols, type, arr.getMatRef());
break;
- case _InputArray::GPU_MAT:
+ case _InputArray::CUDA_GPU_MAT:
::ensureSizeIsEnoughImpl(rows, cols, type, arr.getGpuMatRef());
break;
- case _InputArray::CUDA_MEM:
- ::ensureSizeIsEnoughImpl(rows, cols, type, arr.getCudaMemRef());
+ case _InputArray::CUDA_HOST_MEM:
+ ::ensureSizeIsEnoughImpl(rows, cols, type, arr.getHostMemRef());
break;
default:
}
#endif
-void cv::cuda::CudaMem::create(int rows_, int cols_, int type_)
+void cv::cuda::HostMem::create(int rows_, int cols_, int type_)
{
#ifndef HAVE_CUDA
(void) rows_;
#endif
}
-CudaMem cv::cuda::CudaMem::reshape(int new_cn, int new_rows) const
+HostMem cv::cuda::HostMem::reshape(int new_cn, int new_rows) const
{
- CudaMem hdr = *this;
+ HostMem hdr = *this;
int cn = channels();
if (new_cn == 0)
return hdr;
}
-void cv::cuda::CudaMem::release()
+void cv::cuda::HostMem::release()
{
#ifdef HAVE_CUDA
if (refcount && CV_XADD(refcount, -1) == 1)
#endif
}
-GpuMat cv::cuda::CudaMem::createGpuMatHeader() const
+GpuMat cv::cuda::HostMem::createGpuMatHeader() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
return Mat();
}
- if( k == GPU_MAT )
+ if( k == CUDA_GPU_MAT )
{
CV_Assert( i < 0 );
CV_Error(cv::Error::StsNotImplemented, "You should explicitly call download method for cuda::GpuMat object");
return Mat();
}
- if( k == CUDA_MEM )
+ if( k == CUDA_HOST_MEM )
{
CV_Assert( i < 0 );
- const cuda::CudaMem* cuda_mem = (const cuda::CudaMem*)obj;
+ const cuda::HostMem* cuda_mem = (const cuda::HostMem*)obj;
return cuda_mem->createMatHeader();
}
{
int k = kind();
- if (k == GPU_MAT)
+ if (k == CUDA_GPU_MAT)
{
const cuda::GpuMat* d_mat = (const cuda::GpuMat*)obj;
return *d_mat;
}
- if (k == CUDA_MEM)
+ if (k == CUDA_HOST_MEM)
{
- const cuda::CudaMem* cuda_mem = (const cuda::CudaMem*)obj;
+ const cuda::HostMem* cuda_mem = (const cuda::HostMem*)obj;
return cuda_mem->createGpuMatHeader();
}
if (k == NONE)
return cuda::GpuMat();
- CV_Error(cv::Error::StsNotImplemented, "getGpuMat is available only for cuda::GpuMat and cuda::CudaMem");
+ CV_Error(cv::Error::StsNotImplemented, "getGpuMat is available only for cuda::GpuMat and cuda::HostMem");
return cuda::GpuMat();
}
return buf->size();
}
- if( k == GPU_MAT )
+ if( k == CUDA_GPU_MAT )
{
CV_Assert( i < 0 );
const cuda::GpuMat* d_mat = (const cuda::GpuMat*)obj;
return d_mat->size();
}
- CV_Assert( k == CUDA_MEM );
- //if( k == CUDA_MEM )
+ CV_Assert( k == CUDA_HOST_MEM );
+ //if( k == CUDA_HOST_MEM )
{
CV_Assert( i < 0 );
- const cuda::CudaMem* cuda_mem = (const cuda::CudaMem*)obj;
+ const cuda::HostMem* cuda_mem = (const cuda::HostMem*)obj;
return cuda_mem->size();
}
}
return 2;
}
- if( k == GPU_MAT )
+ if( k == CUDA_GPU_MAT )
{
CV_Assert( i < 0 );
return 2;
}
- CV_Assert( k == CUDA_MEM );
- //if( k == CUDA_MEM )
+ CV_Assert( k == CUDA_HOST_MEM );
+ //if( k == CUDA_HOST_MEM )
{
CV_Assert( i < 0 );
return 2;
if( k == OPENGL_BUFFER )
return ((const ogl::Buffer*)obj)->type();
- if( k == GPU_MAT )
+ if( k == CUDA_GPU_MAT )
return ((const cuda::GpuMat*)obj)->type();
- CV_Assert( k == CUDA_MEM );
- //if( k == CUDA_MEM )
- return ((const cuda::CudaMem*)obj)->type();
+ CV_Assert( k == CUDA_HOST_MEM );
+ //if( k == CUDA_HOST_MEM )
+ return ((const cuda::HostMem*)obj)->type();
}
int _InputArray::depth(int i) const
if( k == OPENGL_BUFFER )
return ((const ogl::Buffer*)obj)->empty();
- if( k == GPU_MAT )
+ if( k == CUDA_GPU_MAT )
return ((const cuda::GpuMat*)obj)->empty();
- CV_Assert( k == CUDA_MEM );
- //if( k == CUDA_MEM )
- return ((const cuda::CudaMem*)obj)->empty();
+ CV_Assert( k == CUDA_HOST_MEM );
+ //if( k == CUDA_HOST_MEM )
+ return ((const cuda::HostMem*)obj)->empty();
}
bool _InputArray::isContinuous(int i) const
return vv[i].offset;
}
- if( k == GPU_MAT )
+ if( k == CUDA_GPU_MAT )
{
CV_Assert( i < 0 );
const cuda::GpuMat * const m = ((const cuda::GpuMat*)obj);
return vv[i].step;
}
- if( k == GPU_MAT )
+ if( k == CUDA_GPU_MAT )
{
CV_Assert( i < 0 );
return ((const cuda::GpuMat*)obj)->step;
((UMat*)obj)->create(_sz, mtype);
return;
}
- if( k == GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
+ if( k == CUDA_GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((cuda::GpuMat*)obj)->size() == _sz);
CV_Assert(!fixedType() || ((cuda::GpuMat*)obj)->type() == mtype);
((ogl::Buffer*)obj)->create(_sz, mtype);
return;
}
- if( k == CUDA_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
+ if( k == CUDA_HOST_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
- CV_Assert(!fixedSize() || ((cuda::CudaMem*)obj)->size() == _sz);
- CV_Assert(!fixedType() || ((cuda::CudaMem*)obj)->type() == mtype);
- ((cuda::CudaMem*)obj)->create(_sz, mtype);
+ CV_Assert(!fixedSize() || ((cuda::HostMem*)obj)->size() == _sz);
+ CV_Assert(!fixedType() || ((cuda::HostMem*)obj)->type() == mtype);
+ ((cuda::HostMem*)obj)->create(_sz, mtype);
return;
}
int sizes[] = {_sz.height, _sz.width};
((UMat*)obj)->create(_rows, _cols, mtype);
return;
}
- if( k == GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
+ if( k == CUDA_GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((cuda::GpuMat*)obj)->size() == Size(_cols, _rows));
CV_Assert(!fixedType() || ((cuda::GpuMat*)obj)->type() == mtype);
((ogl::Buffer*)obj)->create(_rows, _cols, mtype);
return;
}
- if( k == CUDA_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
+ if( k == CUDA_HOST_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
- CV_Assert(!fixedSize() || ((cuda::CudaMem*)obj)->size() == Size(_cols, _rows));
- CV_Assert(!fixedType() || ((cuda::CudaMem*)obj)->type() == mtype);
- ((cuda::CudaMem*)obj)->create(_rows, _cols, mtype);
+ CV_Assert(!fixedSize() || ((cuda::HostMem*)obj)->size() == Size(_cols, _rows));
+ CV_Assert(!fixedType() || ((cuda::HostMem*)obj)->type() == mtype);
+ ((cuda::HostMem*)obj)->create(_rows, _cols, mtype);
return;
}
int sizes[] = {_rows, _cols};
return;
}
- if( k == GPU_MAT )
+ if( k == CUDA_GPU_MAT )
{
((cuda::GpuMat*)obj)->release();
return;
}
- if( k == CUDA_MEM )
+ if( k == CUDA_HOST_MEM )
{
- ((cuda::CudaMem*)obj)->release();
+ ((cuda::HostMem*)obj)->release();
return;
}
cuda::GpuMat& _OutputArray::getGpuMatRef() const
{
int k = kind();
- CV_Assert( k == GPU_MAT );
+ CV_Assert( k == CUDA_GPU_MAT );
return *(cuda::GpuMat*)obj;
}
return *(ogl::Buffer*)obj;
}
-cuda::CudaMem& _OutputArray::getCudaMemRef() const
+cuda::HostMem& _OutputArray::getHostMemRef() const
{
int k = kind();
- CV_Assert( k == CUDA_MEM );
- return *(cuda::CudaMem*)obj;
+ CV_Assert( k == CUDA_HOST_MEM );
+ return *(cuda::HostMem*)obj;
}
void _OutputArray::setTo(const _InputArray& arr, const _InputArray & mask) const
}
else if( k == UMAT )
((UMat*)obj)->setTo(arr, mask);
- else if( k == GPU_MAT )
+ else if( k == CUDA_GPU_MAT )
{
Mat value = arr.getMat();
- CV_Assert( checkScalar(value, type(), arr.kind(), _InputArray::GPU_MAT) );
+ CV_Assert( checkScalar(value, type(), arr.kind(), _InputArray::CUDA_GPU_MAT) );
((cuda::GpuMat*)obj)->setTo(Scalar(Vec<double, 4>(value.ptr<double>())), mask);
}
else
switch (kind)
{
case _InputArray::OPENGL_BUFFER:
- case _InputArray::GPU_MAT:
+ case _InputArray::CUDA_GPU_MAT:
copyFrom(arr, target, autoRelease);
break;
break;
}
- case _InputArray::GPU_MAT:
+ case _InputArray::CUDA_GPU_MAT:
{
#ifndef HAVE_CUDA
throw_no_cuda();
break;
}
- case _InputArray::GPU_MAT:
+ case _InputArray::CUDA_GPU_MAT:
{
#ifndef HAVE_CUDA
throw_no_cuda();
break;
}
- case _InputArray::GPU_MAT:
+ case _InputArray::CUDA_GPU_MAT:
{
#ifndef HAVE_CUDA
throw_no_cuda();
break;
}
- case _InputArray::GPU_MAT:
+ case _InputArray::CUDA_GPU_MAT:
{
#ifndef HAVE_CUDA
throw_no_cuda();
break;
}
- case _InputArray::GPU_MAT:
+ case _InputArray::CUDA_GPU_MAT:
{
#ifndef HAVE_CUDA
throw_no_cuda();
struct Async : testing::TestWithParam<cv::cuda::DeviceInfo>
{
- cv::cuda::CudaMem src;
+ cv::cuda::HostMem src;
cv::cuda::GpuMat d_src;
- cv::cuda::CudaMem dst;
+ cv::cuda::HostMem dst;
cv::cuda::GpuMat d_dst;
virtual void SetUp()
cv::cuda::DeviceInfo devInfo = GetParam();
cv::cuda::setDevice(devInfo.deviceID());
- src = cv::cuda::CudaMem(cv::cuda::CudaMem::PAGE_LOCKED);
+ src = cv::cuda::HostMem(cv::cuda::HostMem::PAGE_LOCKED);
cv::Mat m = randomMat(cv::Size(128, 128), CV_8UC1);
m.copyTo(src);
Async* test = reinterpret_cast<Async*>(userData);
- cv::cuda::CudaMem src = test->src;
- cv::cuda::CudaMem dst = test->dst;
+ cv::cuda::HostMem src = test->src;
+ cv::cuda::HostMem dst = test->dst;
cv::Mat dst_gold = cv::Mat::zeros(src.size(), src.type());
Async* test = reinterpret_cast<Async*>(userData);
- cv::cuda::CudaMem src = test->src;
- cv::cuda::CudaMem dst = test->dst;
+ cv::cuda::HostMem src = test->src;
+ cv::cuda::HostMem dst = test->dst;
cv::Mat dst_gold;
src.createMatHeader().convertTo(dst_gold, CV_32S);
LookUpTableImpl::LookUpTableImpl(InputArray _lut)
{
- if (_lut.kind() == _InputArray::GPU_MAT)
+ if (_lut.kind() == _InputArray::CUDA_GPU_MAT)
{
d_lut = _lut.getGpuMat();
}
_levels.create(1, nLevels, CV_32SC1);
Mat host_levels;
- if (kind == _InputArray::GPU_MAT)
+ if (kind == _InputArray::CUDA_GPU_MAT)
host_levels.create(1, nLevels, CV_32SC1);
else
host_levels = _levels.getMat();
nppSafeCall( nppiEvenLevelsHost_32s(host_levels.ptr<Npp32s>(), nLevels, lowerLevel, upperLevel) );
- if (kind == _InputArray::GPU_MAT)
+ if (kind == _InputArray::CUDA_GPU_MAT)
_levels.getGpuMatRef().upload(host_levels);
}
template<typename _Tp>
__host__ _InputArray::_InputArray(const cudev::GpuMat_<_Tp>& m)
- : flags(FIXED_TYPE + GPU_MAT + DataType<_Tp>::type), obj((void*)&m)
+ : flags(FIXED_TYPE + CUDA_GPU_MAT + DataType<_Tp>::type), obj((void*)&m)
{}
template<typename _Tp>
cv::ogl::Texture2D& tex = ownWndTexs[winname];
- if (_img.kind() == _InputArray::GPU_MAT)
+ if (_img.kind() == _InputArray::CUDA_GPU_MAT)
{
cv::ogl::Buffer& buf = ownWndBufs[winname];
buf.copyFrom(_img);
++outPos_;
const GpuMat& curOutput = at(outPos_, outputs_);
- if (_output.kind() == _InputArray::GPU_MAT)
+ if (_output.kind() == _InputArray::CUDA_GPU_MAT)
curOutput.convertTo(_output.getGpuMatRef(), CV_8U);
else
{
{
if (_frame.kind() == _InputArray::MAT)
vc_ >> _frame.getMatRef();
- else if(_frame.kind() == _InputArray::GPU_MAT)
+ else if(_frame.kind() == _InputArray::CUDA_GPU_MAT)
{
vc_ >> frame_;
arrCopy(frame_, _frame);
void VideoFrameSource_CUDA::nextFrame(OutputArray _frame)
{
- if (_frame.kind() == _InputArray::GPU_MAT)
+ if (_frame.kind() == _InputArray::CUDA_GPU_MAT)
{
bool res = reader_->nextFrame(_frame.getGpuMatRef());
if (!res)
{
switch (arr.kind())
{
- case _InputArray::GPU_MAT:
+ case _InputArray::CUDA_GPU_MAT:
arr.getGpuMat().download(buf);
return buf;
{
switch (arr.kind())
{
- case _InputArray::GPU_MAT:
+ case _InputArray::CUDA_GPU_MAT:
arr.getGpuMat().download(buf);
return buf;
{
switch (arr.kind())
{
- case _InputArray::GPU_MAT:
+ case _InputArray::CUDA_GPU_MAT:
return arr.getGpuMat();
case _InputArray::OPENGL_BUFFER:
switch (src.kind())
{
- case _InputArray::GPU_MAT:
+ case _InputArray::CUDA_GPU_MAT:
#ifdef HAVE_OPENCV_CUDAIMGPROC
cuda::cvtColor(src.getGpuMat(), dst.getGpuMatRef(), code, cn);
#else
switch (src.kind())
{
- case _InputArray::GPU_MAT:
+ case _InputArray::CUDA_GPU_MAT:
src.getGpuMat().convertTo(dst.getGpuMatRef(), depth, scale);
break;
GpuMat input0 = convertToType(frame0, work_type_, buf_[2], buf_[3]);
GpuMat input1 = convertToType(frame1, work_type_, buf_[4], buf_[5]);
- if (_flow2.needed() && _flow1.kind() == _InputArray::GPU_MAT && _flow2.kind() == _InputArray::GPU_MAT)
+ if (_flow2.needed() && _flow1.kind() == _InputArray::CUDA_GPU_MAT && _flow2.kind() == _InputArray::CUDA_GPU_MAT)
{
impl(input0, input1, _flow1.getGpuMatRef(), _flow2.getGpuMatRef());
return;
Mat getMat(InputArray arr)
{
- if (arr.kind() == _InputArray::GPU_MAT)
+ if (arr.kind() == _InputArray::CUDA_GPU_MAT)
{
Mat m;
arr.getGpuMat().download(m);
StereoMultiGpuStream();
~StereoMultiGpuStream();
- void compute(const CudaMem& leftFrame, const CudaMem& rightFrame, CudaMem& disparity);
+ void compute(const HostMem& leftFrame, const HostMem& rightFrame, HostMem& disparity);
private:
GpuMat d_leftFrames[2];
streams[1].release();
}
-void StereoMultiGpuStream::compute(const CudaMem& leftFrame, const CudaMem& rightFrame, CudaMem& disparity)
+void StereoMultiGpuStream::compute(const HostMem& leftFrame, const HostMem& rightFrame, HostMem& disparity)
{
disparity.create(leftFrame.size(), CV_8UC1);
cout << endl;
Mat leftFrame, rightFrame;
- CudaMem leftGrayFrame, rightGrayFrame;
+ HostMem leftGrayFrame, rightGrayFrame;
StereoSingleGpu gpu0Alg(0);
StereoSingleGpu gpu1Alg(1);
Mat disparityGpu0;
Mat disparityGpu1;
Mat disparityMultiThread;
- CudaMem disparityMultiStream;
+ HostMem disparityMultiStream;
Mat disparityGpu0Show;
Mat disparityGpu1Show;