-\r
-set(name "gpu") \r
+set(name "gpu")\r
set(DEPS "opencv_core")\r
- \r
+\r
\r
set(the_target "opencv_${name}")\r
\r
"${CMAKE_CURRENT_BINARY_DIR}")\r
\r
foreach(d ${DEPS})\r
- if(${d} MATCHES "opencv_") \r
+ if(${d} MATCHES "opencv_")\r
string(REPLACE "opencv_" "${CMAKE_CURRENT_SOURCE_DIR}/../" d_dir ${d})\r
- include_directories("${d_dir}/include") \r
+ include_directories("${d_dir}/include")\r
endif()\r
-endforeach() \r
+endforeach()\r
\r
file(GLOB lib_srcs "src/*.cpp")\r
file(GLOB lib_int_hdrs "src/*.h*")\r
file(GLOB lib_cuda "cuda/*.cu")\r
-file(GLOB lib_cuda_hdrs "cuda/*.h*") \r
+file(GLOB lib_cuda_hdrs "cuda/*.h*")\r
source_group("Src" FILES ${lib_srcs} ${lib_int_hdrs})\r
source_group("Cuda" FILES ${lib_cuda} ${lib_cuda_hdrs})\r
\r
-file(GLOB lib_hdrs "include/opencv2/${name}/*.h*") \r
+file(GLOB lib_hdrs "include/opencv2/${name}/*.h*")\r
source_group("Include" FILES ${lib_hdrs})\r
\r
if (HAVE_CUDA)\r
if (UNIX OR APPLE)\r
set (CUDA_NVCC_FLAGS "-Xcompiler;-fPIC")\r
endif()\r
- \r
+\r
CUDA_COMPILE(cuda_objs ${lib_cuda})\r
#CUDA_BUILD_CLEAN_TARGET()\r
endif()\r
- \r
+\r
\r
add_library(${the_target} ${lib_srcs} ${lib_hdrs} ${lib_int_hdrs} ${lib_cuda} ${lib_cuda_hdrs} ${cuda_objs})\r
\r
if(${CMAKE_GENERATOR} MATCHES "Visual*" OR ${CMAKE_GENERATOR} MATCHES "Xcode*")\r
if(${CMAKE_GENERATOR} MATCHES "Visual*")\r
set(${the_target}_pch "src/precomp.cpp")\r
- endif() \r
+ endif()\r
add_native_precompiled_header(${the_target} ${pch_header})\r
elseif(CMAKE_COMPILER_IS_GNUCXX AND ${CMAKE_GENERATOR} MATCHES ".*Makefiles")\r
add_precompiled_header(${the_target} ${pch_header})\r
namespace cv\r
{\r
namespace gpu\r
- { \r
+ {\r
typedef unsigned char uchar;\r
typedef unsigned short ushort;\r
- typedef unsigned int uint; \r
+ typedef unsigned int uint;\r
\r
extern "C" void error( const char *error_string, const char *file, const int line, const char *func = "");\r
\r
namespace impl\r
- { \r
+ {\r
static inline int divUp(int a, int b) { return (a % b == 0) ? a/b : a/b + 1; }\r
\r
extern "C" void stereoBM_GPU(const DevMem2D& left, const DevMem2D& right, DevMem2D& disp, int maxdisp, DevMem2D_<uint>& minSSD_buf);\r
+\r
+ extern "C" void set_to_without_mask (const DevMem2D& mat, const double * scalar, int depth, int channels);\r
+ extern "C" void set_to_with_mask (const DevMem2D& mat, const double * scalar, const DevMem2D& mask, int depth, int channels);\r
}\r
}\r
}\r
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "cuda_shared.hpp"
+#include "cuda_runtime.h"
+
+__constant__ float scalar_d[4];
+
+namespace mat_operators
+{
+ template <typename T, int channels>
+ __global__ void kernel_set_to_without_mask(T * mat)
+ {
+ int i = blockIdx.x * blockDim.x + threadIdx.x;
+ mat[i * sizeof(T)] = static_cast<T>(scalar_d[i % channels]);
+ }
+}
+
+
+extern "C" void cv::gpu::impl::set_to_with_mask(const DevMem2D& mat, const double * scalar, const DevMem2D& mask, int depth, int channels)
+{
+
+}
+
+extern "C" void cv::gpu::impl::set_to_without_mask(const DevMem2D& mat, const double * scalar, int depth, int channels)
+{
+ scalar_d[0] = scalar[0];
+ scalar_d[1] = scalar[1];
+ scalar_d[2] = scalar[2];
+ scalar_d[3] = scalar[3];
+
+ int numBlocks = mat.rows * mat.step / 256;
+
+ dim3 threadsPerBlock(256);
+
+ if (channels == 1)
+ {
+ if (depth == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 1><<<numBlocks,threadsPerBlock>>>(mat.ptr);
+ if (depth == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 1><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr);
+ if (depth == 4) ::mat_operators::kernel_set_to_without_mask<unsigned int, 1><<<numBlocks,threadsPerBlock>>>((unsigned int *)mat.ptr);
+ }
+ if (channels == 2)
+ {
+ if (depth == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 2><<<numBlocks,threadsPerBlock>>>(mat.ptr);
+ if (depth == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 2><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr);
+ if (depth == 4) ::mat_operators::kernel_set_to_without_mask<unsigned int, 2><<<numBlocks,threadsPerBlock>>>((unsigned int *)mat.ptr);
+ }
+ if (channels == 3)
+ {
+ if (depth == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 3><<<numBlocks,threadsPerBlock>>>(mat.ptr);
+ if (depth == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 3><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr);
+ if (depth == 4) ::mat_operators::kernel_set_to_without_mask<unsigned int, 3><<<numBlocks,threadsPerBlock>>>((unsigned int *)mat.ptr);
+ }
+}
//M*/\r
\r
#include "precomp.hpp"\r
-#include "opencv2/gpu/stream_access.hpp"\r
+//#include "opencv2/gpu/stream_access.hpp"\r
\r
using namespace cv;\r
using namespace cv::gpu;\r
\r
\r
-cv::gpu::CudaStream::CudaStream() : impl( (Impl*)fastMalloc(sizeof(Impl)) )\r
+cv::gpu::CudaStream::CudaStream() //: impl( (Impl*)fastMalloc(sizeof(Impl)) )\r
{\r
//cudaSafeCall( cudaStreamCreate( &impl->stream) );\r
}\r
-cv::gpu::CudaStream::~CudaStream() \r
-{ \r
+cv::gpu::CudaStream::~CudaStream()\r
+{\r
if (impl)\r
{\r
cudaSafeCall( cudaStreamDestroy( *(cudaStream_t*)impl ) );\r
cv::fastFree( impl );\r
- } \r
+ }\r
}\r
\r
bool cv::gpu::CudaStream::queryIfComplete()\r
//if (err == cudaErrorNotReady)\r
// return false;\r
\r
- ////cudaErrorInvalidResourceHandle \r
- //cudaSafeCall( err ); \r
+ ////cudaErrorInvalidResourceHandle\r
+ //cudaSafeCall( err );\r
return true;\r
}\r
void cv::gpu::CudaStream::waitForCompletion()\r
\r
void cv::gpu::CudaStream::enqueueDownload(const GpuMat& src, Mat& dst)\r
{\r
-// cudaMemcpy2DAsync(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyDeviceToHost, \r
+// cudaMemcpy2DAsync(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyDeviceToHost,\r
}\r
void cv::gpu::CudaStream::enqueueUpload(const Mat& src, GpuMat& dst)\r
{\r
\r
//struct cudaStream_t& cv::gpu::CudaStream::getStream() { return stream; }\r
\r
- \r
+\r
cudaSafeCall( cudaMemcpy2D(m.data, m.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice) );\r
cudaSafeCall( cudaThreadSynchronize() );\r
}\r
- \r
+\r
void GpuMat::copyTo( GpuMat& /*m*/, const GpuMat&/* mask */) const\r
-{ \r
+{\r
CV_Assert(!"Not implemented");\r
}\r
- \r
+\r
void GpuMat::convertTo( GpuMat& /*m*/, int /*rtype*/, double /*alpha*/, double /*beta*/ ) const\r
{\r
CV_Assert(!"Not implemented");\r
}\r
\r
-GpuMat& GpuMat::operator = (const Scalar& /*s*/)\r
+GpuMat& GpuMat::operator = (const Scalar& s)\r
{\r
- CV_Assert(!"Not implemented"); \r
+ CV_Assert(!"Not implemented");\r
+ cv::gpu::impl::set_to_without_mask(*this, s.val, this->depth(), this->channels());\r
return *this;\r
}\r
\r
-GpuMat& GpuMat::setTo(const Scalar& /*s*/, const GpuMat& /*mask*/)\r
+GpuMat& GpuMat::setTo(const Scalar& s, const GpuMat& mask)\r
{\r
- CV_Assert(!"Not implemented"); \r
+ CV_Assert(!"Not implemented");\r
+\r
+ CV_DbgAssert(!this->empty());\r
+\r
+ this->channels();\r
+ this->depth();\r
+\r
+ if (mask.empty())\r
+ {\r
+ cv::gpu::impl::set_to_without_mask(*this, s.val, this->depth(), this->channels());\r
+ }\r
+ else\r
+ {\r
+ cv::gpu::impl::set_to_with_mask(*this, s.val, mask, this->depth(), this->channels());\r
+ }\r
+\r
return *this;\r
}\r
\r
rows = _rows;\r
cols = _cols;\r
\r
- size_t esz = elemSize(); \r
- \r
+ size_t esz = elemSize();\r
+\r
void *dev_ptr;\r
cudaSafeCall( cudaMallocPitch(&dev_ptr, &step, esz * cols, rows) );\r
\r
\r
int64 _nettosize = (int64)step*rows;\r
size_t nettosize = (size_t)_nettosize;\r
- \r
+\r
datastart = data = (uchar*)dev_ptr;\r
- dataend = data + nettosize; \r
- \r
+ dataend = data + nettosize;\r
+\r
refcount = (int*)fastMalloc(sizeof(*refcount));\r
*refcount = 1;\r
}\r
if( refcount && CV_XADD(refcount, -1) == 1 )\r
{\r
fastFree(refcount);\r
- cudaSafeCall( cudaFree(datastart) ); \r
+ cudaSafeCall( cudaFree(datastart) );\r
}\r
data = datastart = dataend = 0;\r
step = rows = cols = 0;\r