//#undef __CV_GPU_DEPR_BEFORE__
//#undef __CV_GPU_DEPR_AFTER__
+ namespace device
+ {
+ using cv::gpu::PtrSz;
+ using cv::gpu::PtrStep;
+ using cv::gpu::PtrStepSz;
+
+ using cv::gpu::PtrStepSzb;
+ using cv::gpu::PtrStepSzf;
+ using cv::gpu::PtrStepSzi;
+
+ using cv::gpu::PtrStepb;
+ using cv::gpu::PtrStepf;
+ using cv::gpu::PtrStepi;
+ }
}
}
namespace
{
- template <class T> void getCudaAttribute(T *attribute, CUdevice_attribute device_attribute, int device)
- {
- *attribute = T();
- //CUresult error = CUDA_SUCCESS;// = cuDeviceGetAttribute( attribute, device_attribute, device ); why link erros under ubuntu??
- CUresult error = cuDeviceGetAttribute( attribute, device_attribute, device );
- if( CUDA_SUCCESS == error )
- return;
-
- printf("Driver API error = %04d\n", error);
- cv::gpu::error("driver API error", __FILE__, __LINE__);
- }
-
int convertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
int Cores;
} SMtoCores;
- SMtoCores gpuArchCoresPerSM[] = { { 0x10, 8 }, { 0x11, 8 }, { 0x12, 8 }, { 0x13, 8 }, { 0x20, 32 }, { 0x21, 48 }, {0x30, 192}, { -1, -1 } };
+ SMtoCores gpuArchCoresPerSM[] = { { 0x10, 8 }, { 0x11, 8 }, { 0x12, 8 }, { 0x13, 8 }, { 0x20, 32 }, { 0x21, 48 }, {0x30, 192}, {0x35, 192}, { -1, -1 } };
int index = 0;
while (gpuArchCoresPerSM[index].SM != -1)
return gpuArchCoresPerSM[index].Cores;
index++;
}
- printf("MapSMtoCores undefined SMversion %d.%d!\n", major, minor);
+
return -1;
}
}
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, driverVersion%100, runtimeVersion/1000, runtimeVersion%100);
printf(" CUDA Capability Major/Minor version number: %d.%d\n", prop.major, prop.minor);
printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)prop.totalGlobalMem/1048576.0f, (unsigned long long) prop.totalGlobalMem);
- printf(" (%2d) Multiprocessors x (%2d) CUDA Cores/MP: %d CUDA Cores\n",
- prop.multiProcessorCount, convertSMVer2Cores(prop.major, prop.minor),
- convertSMVer2Cores(prop.major, prop.minor) * prop.multiProcessorCount);
- printf(" GPU Clock Speed: %.2f GHz\n", prop.clockRate * 1e-6f);
- // This is not available in the CUDA Runtime API, so we make the necessary calls the driver API to support this for output
- int memoryClock, memBusWidth, L2CacheSize;
- getCudaAttribute<int>( &memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev );
- getCudaAttribute<int>( &memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev );
- getCudaAttribute<int>( &L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev );
+ int cores = convertSMVer2Cores(prop.major, prop.minor);
+ if (cores > 0)
+ printf(" (%2d) Multiprocessors x (%2d) CUDA Cores/MP: %d CUDA Cores\n", prop.multiProcessorCount, cores, cores * prop.multiProcessorCount);
- printf(" Memory Clock rate: %.2f Mhz\n", memoryClock * 1e-3f);
- printf(" Memory Bus Width: %d-bit\n", memBusWidth);
- if (L2CacheSize)
- printf(" L2 Cache Size: %d bytes\n", L2CacheSize);
+ printf(" GPU Clock Speed: %.2f GHz\n", prop.clockRate * 1e-6f);
printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n",
prop.maxTexture1D, prop.maxTexture2D[0], prop.maxTexture2D[1],
const char *arch_str = prop.major < 2 ? " (not Fermi)" : "";
printf("Device %d: \"%s\" %.0fMb", dev, prop.name, (float)prop.totalGlobalMem/1048576.0f);
- printf(", sm_%d%d%s, %d cores", prop.major, prop.minor, arch_str, convertSMVer2Cores(prop.major, prop.minor) * prop.multiProcessorCount);
+ printf(", sm_%d%d%s", prop.major, prop.minor, arch_str);
+
+ int cores = convertSMVer2Cores(prop.major, prop.minor);
+ if (cores > 0)
+ printf(", %d cores", cores * prop.multiProcessorCount);
+
printf(", Driver/Runtime ver.%d.%d/%d.%d\n", driverVersion/1000, driverVersion%100, runtimeVersion/1000, runtimeVersion%100);
}
fflush(stdout);
set(cuda_link_libs ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY})
if(NOT APPLE)
- unset(CUDA_nvcuvid_LIBRARY CACHE)
find_cuda_helper_libs(nvcuvid)
set(cuda_link_libs ${cuda_link_libs} ${CUDA_nvcuvid_LIBRARY})
endif()
if(WIN32)
- unset(CUDA_nvcuvenc_LIBRARY CACHE)
find_cuda_helper_libs(nvcuvenc)
set(cuda_link_libs ${cuda_link_libs} ${CUDA_nvcuvenc_LIBRARY})
endif()
* **CV_REDUCE_MIN** The output is the minimum (column/row-wise) of all rows/columns of the matrix.
:param dtype: When it is negative, the destination vector will have the same type as the source matrix. Otherwise, its type will be ``CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), mtx.channels())`` .
-
+
The function ``reduce`` reduces the matrix to a vector by treating the matrix rows/columns as a set of 1D vectors and performing the specified operation on the vectors until a single row/column is obtained. For example, the function can be used to compute horizontal and vertical projections of a raster image. In case of ``CV_REDUCE_SUM`` and ``CV_REDUCE_AVG`` , the output may have a larger element bit-depth to preserve accuracy. And multi-channel arrays are also supported in these two reduction modes.
.. seealso:: :ocv:func:`reduce`
cv::gpu::error(cudaGetErrorString(err), file, line, func);
}
-#ifdef __CUDACC__
-
namespace cv { namespace gpu
{
__host__ __device__ __forceinline__ int divUp(int total, int grain)
namespace device
{
+ using cv::gpu::divUp;
+
+#ifdef __CUDACC__
typedef unsigned char uchar;
typedef unsigned short ushort;
typedef signed char schar;
- typedef unsigned int uint;
+ #ifdef _WIN32
+ typedef unsigned int uint;
+ #endif
template<class T> inline void bindTexture(const textureReference* tex, const PtrStepSz<T>& img)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
cudaSafeCall( cudaBindTexture2D(0, tex, img.ptr(), &desc, img.cols, img.rows, img.step) );
}
+#endif // __CUDACC__
}
}}
-#endif // __CUDACC__
+
#endif // __OPENCV_GPU_COMMON_HPP__
#define OPENCV_GPU_EMULATION_HPP_
#include "warp_reduce.hpp"
-#include <stdio.h>
namespace cv { namespace gpu { namespace device
{
template <> struct name<type> : binary_function<type, type, type> \
{ \
__device__ __forceinline__ type operator()(type lhs, type rhs) const {return op(lhs, rhs);} \
- __device__ __forceinline__ name(const name& other):binary_function<type, type, type>(){}\
- __device__ __forceinline__ name():binary_function<type, type, type>(){}\
+ __device__ __forceinline__ name() {}\
+ __device__ __forceinline__ name(const name&) {}\
};
template <typename T> struct maximum : binary_function<T, T, T>
{
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType lhs, typename TypeTraits<T>::ParameterType rhs) const
{
- return lhs < rhs ? rhs : lhs;
+ return max(lhs, rhs);
}
- __device__ __forceinline__ maximum(const maximum& other):binary_function<T, T, T>(){}
- __device__ __forceinline__ maximum():binary_function<T, T, T>(){}
+ __device__ __forceinline__ maximum() {}
+ __device__ __forceinline__ maximum(const maximum&) {}
};
OPENCV_GPU_IMPLEMENT_MINMAX(maximum, uchar, ::max)
{
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType lhs, typename TypeTraits<T>::ParameterType rhs) const
{
- return lhs < rhs ? lhs : rhs;
+ return min(lhs, rhs);
}
- __device__ __forceinline__ minimum(const minimum& other):binary_function<T, T, T>(){}
- __device__ __forceinline__ minimum():binary_function<T, T, T>(){}
+ __device__ __forceinline__ minimum() {}
+ __device__ __forceinline__ minimum(const minimum&) {}
};
OPENCV_GPU_IMPLEMENT_MINMAX(minimum, uchar, ::min)
// Math functions
///bound=========================================
+
+ template <typename T> struct abs_func : unary_function<T, T>
+ {
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType x) const
+ {
+ return abs(x);
+ }
+
+ __device__ __forceinline__ abs_func() {}
+ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<unsigned char> : unary_function<unsigned char, unsigned char>
+ {
+ __device__ __forceinline__ unsigned char operator ()(unsigned char x) const
+ {
+ return x;
+ }
+
+ __device__ __forceinline__ abs_func() {}
+ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<signed char> : unary_function<signed char, signed char>
+ {
+ __device__ __forceinline__ signed char operator ()(signed char x) const
+ {
+ return ::abs((int)x);
+ }
+
+ __device__ __forceinline__ abs_func() {}
+ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<char> : unary_function<char, char>
+ {
+ __device__ __forceinline__ char operator ()(char x) const
+ {
+ return ::abs((int)x);
+ }
+
+ __device__ __forceinline__ abs_func() {}
+ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<unsigned short> : unary_function<unsigned short, unsigned short>
+ {
+ __device__ __forceinline__ unsigned short operator ()(unsigned short x) const
+ {
+ return x;
+ }
+
+ __device__ __forceinline__ abs_func() {}
+ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<short> : unary_function<short, short>
+ {
+ __device__ __forceinline__ short operator ()(short x) const
+ {
+ return ::abs((int)x);
+ }
+
+ __device__ __forceinline__ abs_func() {}
+ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<unsigned int> : unary_function<unsigned int, unsigned int>
+ {
+ __device__ __forceinline__ unsigned int operator ()(unsigned int x) const
+ {
+ return x;
+ }
+
+ __device__ __forceinline__ abs_func() {}
+ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<int> : unary_function<int, int>
+ {
+ __device__ __forceinline__ int operator ()(int x) const
+ {
+ return ::abs(x);
+ }
+
+ __device__ __forceinline__ abs_func() {}
+ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<float> : unary_function<float, float>
+ {
+ __device__ __forceinline__ float operator ()(float x) const
+ {
+ return ::fabsf(x);
+ }
+
+ __device__ __forceinline__ abs_func() {}
+ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<double> : unary_function<double, double>
+ {
+ __device__ __forceinline__ double operator ()(double x) const
+ {
+ return ::fabs(x);
+ }
+
+ __device__ __forceinline__ abs_func() {}
+ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+
#define OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(name, func) \
template <typename T> struct name ## _func : unary_function<T, float> \
{ \
{ \
return func ## f(v); \
} \
+ __device__ __forceinline__ name ## _func() {} \
+ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
}; \
template <> struct name ## _func<double> : unary_function<double, double> \
{ \
{ \
return func(v); \
} \
+ __device__ __forceinline__ name ## _func() {} \
+ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
};
#define OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(name, func) \
} \
};
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(fabs, ::fabs)
OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sqrt, ::sqrt)
OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp, ::exp)
OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp2, ::exp2)
OPENCV_GPU_IMPLEMENT_VEC_UNOP (type, operator ! , logical_not) \
OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, max, maximum) \
OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, min, minimum) \
- OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, fabs, fabs_func) \
+ OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, abs, abs_func) \
OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, sqrt, sqrt_func) \
OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, exp, exp_func) \
OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, exp2, exp2_func) \
#undef OPENCV_GPU_IMPLEMENT_VEC_INT_OP
}}} // namespace cv { namespace gpu { namespace device
-#endif // __OPENCV_GPU_VECMATH_HPP__
\ No newline at end of file
+#endif // __OPENCV_GPU_VECMATH_HPP__
const float* distance_ptr = distance.ptr<float>();
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx, ++trainIdx_ptr, ++distance_ptr)
{
- int _trainIdx = *trainIdx_ptr;
+ int train_idx = *trainIdx_ptr;
- if (_trainIdx == -1)
+ if (train_idx == -1)
continue;
- float _distance = *distance_ptr;
+ float distance_local = *distance_ptr;
- DMatch m(queryIdx, _trainIdx, 0, _distance);
+ DMatch m(queryIdx, train_idx, 0, distance_local);
matches.push_back(m);
}
const float* distance_ptr = distance.ptr<float>();
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx, ++trainIdx_ptr, ++imgIdx_ptr, ++distance_ptr)
{
- int trainIdx = *trainIdx_ptr;
+ int _trainIdx = *trainIdx_ptr;
- if (trainIdx == -1)
+ if (_trainIdx == -1)
continue;
- int imgIdx = *imgIdx_ptr;
+ int _imgIdx = *imgIdx_ptr;
- float distance = *distance_ptr;
+ float _distance = *distance_ptr;
- DMatch m(queryIdx, trainIdx, imgIdx, distance);
+ DMatch m(queryIdx, _trainIdx, _imgIdx, _distance);
matches.push_back(m);
}
for (int i = 0; i < k; ++i, ++trainIdx_ptr, ++distance_ptr)
{
- int trainIdx = *trainIdx_ptr;
+ int _trainIdx = *trainIdx_ptr;
- if (trainIdx != -1)
+ if (_trainIdx != -1)
{
- float distance = *distance_ptr;
+ float _distance = *distance_ptr;
- DMatch m(queryIdx, trainIdx, 0, distance);
+ DMatch m(queryIdx, _trainIdx, 0, _distance);
curMatches.push_back(m);
}
for (int i = 0; i < 2; ++i, ++trainIdx_ptr, ++imgIdx_ptr, ++distance_ptr)
{
- int trainIdx = *trainIdx_ptr;
+ int _trainIdx = *trainIdx_ptr;
- if (trainIdx != -1)
+ if (_trainIdx != -1)
{
- int imgIdx = *imgIdx_ptr;
+ int _imgIdx = *imgIdx_ptr;
- float distance = *distance_ptr;
+ float _distance = *distance_ptr;
- DMatch m(queryIdx, trainIdx, imgIdx, distance);
+ DMatch m(queryIdx, _trainIdx, _imgIdx, _distance);
curMatches.push_back(m);
}
const int* trainIdx_ptr = trainIdx.ptr<int>(queryIdx);
const float* distance_ptr = distance.ptr<float>(queryIdx);
- const int nMatches = std::min(nMatches_ptr[queryIdx], trainIdx.cols);
+ const int nMatched = std::min(nMatches_ptr[queryIdx], trainIdx.cols);
- if (nMatches == 0)
+ if (nMatched == 0)
{
if (!compactResult)
matches.push_back(vector<DMatch>());
continue;
}
- matches.push_back(vector<DMatch>(nMatches));
+ matches.push_back(vector<DMatch>(nMatched));
vector<DMatch>& curMatches = matches.back();
- for (int i = 0; i < nMatches; ++i, ++trainIdx_ptr, ++distance_ptr)
+ for (int i = 0; i < nMatched; ++i, ++trainIdx_ptr, ++distance_ptr)
{
- int trainIdx = *trainIdx_ptr;
+ int _trainIdx = *trainIdx_ptr;
- float distance = *distance_ptr;
+ float _distance = *distance_ptr;
- DMatch m(queryIdx, trainIdx, 0, distance);
+ DMatch m(queryIdx, _trainIdx, 0, _distance);
curMatches[i] = m;
}
const int* imgIdx_ptr = imgIdx.ptr<int>(queryIdx);
const float* distance_ptr = distance.ptr<float>(queryIdx);
- const int nMatches = std::min(nMatches_ptr[queryIdx], trainIdx.cols);
+ const int nMatched = std::min(nMatches_ptr[queryIdx], trainIdx.cols);
- if (nMatches == 0)
+ if (nMatched == 0)
{
if (!compactResult)
matches.push_back(vector<DMatch>());
matches.push_back(vector<DMatch>());
vector<DMatch>& curMatches = matches.back();
- curMatches.reserve(nMatches);
+ curMatches.reserve(nMatched);
- for (int i = 0; i < nMatches; ++i, ++trainIdx_ptr, ++imgIdx_ptr, ++distance_ptr)
+ for (int i = 0; i < nMatched; ++i, ++trainIdx_ptr, ++imgIdx_ptr, ++distance_ptr)
{
int _trainIdx = *trainIdx_ptr;
int _imgIdx = *imgIdx_ptr;
}
// copy data structures on gpu
- stage_mat.upload(cv::Mat(1, stages.size() * sizeof(Stage), CV_8UC1, (uchar*)&(stages[0]) ));
+ stage_mat.upload(cv::Mat(1, (int) (stages.size() * sizeof(Stage)), CV_8UC1, (uchar*)&(stages[0]) ));
trees_mat.upload(cv::Mat(cl_trees).reshape(1,1));
nodes_mat.upload(cv::Mat(cl_nodes).reshape(1,1));
leaves_mat.upload(cv::Mat(cl_leaves).reshape(1,1));
void labelComponents(const PtrStepSzb& edges, PtrStepSzi comps, int flags, cudaStream_t stream)
{
+ (void) flags;
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid(divUp(edges.cols, TILE_COLS), divUp(edges.rows, TILE_ROWS));
}
} } }
-#endif /* CUDA_DISABLER */
\ No newline at end of file
+#endif /* CUDA_DISABLER */
#if !defined CUDA_DISABLER
+#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "opencv2/gpu/device/common.hpp"
}}}
-#endif /* CUDA_DISABLER */
\ No newline at end of file
+#endif /* CUDA_DISABLER */
#if !defined CUDA_DISABLER
+#include <thrust/device_ptr.h>
#include <thrust/sort.h>
+
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/emulation.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
}}}
-#endif /* CUDA_DISABLER */
\ No newline at end of file
+#endif /* CUDA_DISABLER */
int grid = divUp(workAmount, block);
cudaFuncSetCacheConfig(lbp_cascade, cudaFuncCachePreferL1);
Cascade cascade((Stage*)mstages.ptr(), nstages, (ClNode*)mnodes.ptr(), mleaves.ptr(), msubsets.ptr(), (uchar4*)mfeatures.ptr(), subsetSize);
- lbp_cascade<<<grid, block>>>(cascade, frameW, frameH, windowW, windowH, initialScale, factor, workAmount, integral.ptr(), integral.step / sizeof(int), objects, classified);
+ lbp_cascade<<<grid, block>>>(cascade, frameW, frameH, windowW, windowH, initialScale, factor, workAmount, integral.ptr(), (int)integral.step / sizeof(int), objects, classified);
}
}
}}}
static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float scale)
{
float angle = ::atan2f(y_data, x_data);
- angle += (angle < 0) * 2.0 * CV_PI;
+ angle += (angle < 0) * 2.0f * CV_PI_F;
dst[y * dst_step + x] = scale * angle;
}
};
grid.x = divUp(x.cols, threads.x);
grid.y = divUp(x.rows, threads.y);
- const float scale = angleInDegrees ? (float)(180.0f / CV_PI) : 1.f;
+ const float scale = angleInDegrees ? (180.0f / CV_PI_F) : 1.f;
cartToPolar<Mag, Angle><<<grid, threads, 0, stream>>>(
x.data, x.step/x.elemSize(), y.data, y.step/y.elemSize(),
grid.x = divUp(mag.cols, threads.x);
grid.y = divUp(mag.rows, threads.y);
- const float scale = angleInDegrees ? (float)(CV_PI / 180.0f) : 1.0f;
+ const float scale = angleInDegrees ? (CV_PI_F / 180.0f) : 1.0f;
polarToCart<Mag><<<grid, threads, 0, stream>>>(mag.data, mag.step/mag.elemSize(),
angle.data, angle.step/angle.elemSize(), scale, x.data, x.step/x.elemSize(), y.data, y.step/y.elemSize(), mag.cols, mag.rows);
} // namespace mathfunc
}}} // namespace cv { namespace gpu { namespace device
-#endif /* CUDA_DISABLER */
\ No newline at end of file
+#endif /* CUDA_DISABLER */
r = ::fmin(r, 2.5f);
- v[1].x = arrow_x + r * ::cosf(theta - CV_PI / 2.0f);
- v[1].y = arrow_y + r * ::sinf(theta - CV_PI / 2.0f);
+ v[1].x = arrow_x + r * ::cosf(theta - CV_PI_F / 2.0f);
+ v[1].y = arrow_y + r * ::sinf(theta - CV_PI_F / 2.0f);
- v[4].x = arrow_x + r * ::cosf(theta + CV_PI / 2.0f);
- v[4].y = arrow_y + r * ::sinf(theta + CV_PI / 2.0f);
+ v[4].x = arrow_x + r * ::cosf(theta + CV_PI_F / 2.0f);
+ v[4].y = arrow_y + r * ::sinf(theta + CV_PI_F / 2.0f);
int indx = (y * u_avg.cols + x) * NUM_VERTS_PER_ARROW * 3;
- color_data[indx] = (theta - CV_PI) / CV_PI * 180.0f;
+ color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[0].x * xscale;
vertex_data[indx++] = v[0].y * yscale;
vertex_data[indx++] = v[0].z;
- color_data[indx] = (theta - CV_PI) / CV_PI * 180.0f;
+ color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[1].x * xscale;
vertex_data[indx++] = v[1].y * yscale;
vertex_data[indx++] = v[1].z;
- color_data[indx] = (theta - CV_PI) / CV_PI * 180.0f;
+ color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[2].x * xscale;
vertex_data[indx++] = v[2].y * yscale;
vertex_data[indx++] = v[2].z;
- color_data[indx] = (theta - CV_PI) / CV_PI * 180.0f;
+ color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[3].x * xscale;
vertex_data[indx++] = v[3].y * yscale;
vertex_data[indx++] = v[3].z;
- color_data[indx] = (theta - CV_PI) / CV_PI * 180.0f;
+ color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[4].x * xscale;
vertex_data[indx++] = v[4].y * yscale;
vertex_data[indx++] = v[4].z;
- color_data[indx] = (theta - CV_PI) / CV_PI * 180.0f;
+ color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[5].x * xscale;
vertex_data[indx++] = v[5].y * yscale;
vertex_data[indx++] = v[5].z;
}
}}}
-#endif /* CUDA_DISABLER */
\ No newline at end of file
+#endif /* CUDA_DISABLER */
#if !defined CUDA_DISABLER
-#include <stdio.h>
#include "internal_shared.hpp"
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#define BORDER_SIZE 5
#define MAX_KSIZE_HALF 100
-using namespace std;
-
namespace cv { namespace gpu { namespace device { namespace optflow_farneback
{
__constant__ float c_g[8];
}
__device__ __forceinline__ float4 abs_(const float4& a)
{
- return fabs(a);
+ return abs(a);
}
template <int cn, int PATCH_X, int PATCH_Y, bool calcErr>
}
}}}
-#endif /* CUDA_DISABLER */
\ No newline at end of file
+#endif /* CUDA_DISABLER */
}}} // namespace cv { namespace gpu { namespace device
-#endif /* CUDA_DISABLER */
\ No newline at end of file
+#endif /* CUDA_DISABLER */
grid.x = divUp(cols, threads.x << 1);
grid.y = divUp(rows, threads.y);
- int elem_step = u.step/sizeof(T);
+ int elem_step = (int)(u.step / sizeof(T));
for(int t = 0; t < iters; ++t)
{
kp_dir *= 180.0f / CV_PI_F;
kp_dir = 360.0f - kp_dir;
- if (abs(kp_dir - 360.f) < FLT_EPSILON)
+ if (::fabsf(kp_dir - 360.f) < FLT_EPSILON)
kp_dir = 0.f;
featureDir[blockIdx.x] = kp_dir;
}}} // namespace cv { namespace gpu { namespace device
-#endif /* CUDA_DISABLER */
\ No newline at end of file
+#endif /* CUDA_DISABLER */
namespace device
{
- using pcl::gpu::TextureBinder;
+ using cv::gpu::TextureBinder;
}
}
CV_Assert(img.type() == CV_8UC1);
CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.size() == img.size()));
- if (!TargetArchs::builtWith(GLOBAL_ATOMICS) || !DeviceInfo().supports(GLOBAL_ATOMICS))
- CV_Error(CV_StsNotImplemented, "The device doesn't support global atomics");
-
int maxKeypoints = static_cast<int>(keypointsRatio * img.size().area());
ensureSizeIsEnough(1, maxKeypoints, CV_16SC2, kpLoc_);
{
using namespace cv::gpu::device::fast;
- if (!TargetArchs::builtWith(GLOBAL_ATOMICS) || !DeviceInfo().supports(GLOBAL_ATOMICS))
- CV_Error(CV_StsNotImplemented, "The device doesn't support global atomics");
-
if (count_ == 0)
return 0;
CV_Assert(qualityLevel > 0 && minDistance >= 0 && maxCorners >= 0);
CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.size() == image.size()));
- if (!TargetArchs::builtWith(GLOBAL_ATOMICS) || !DeviceInfo().supports(GLOBAL_ATOMICS))
- CV_Error(CV_StsNotImplemented, "The device doesn't support global atomics");
-
ensureSizeIsEnough(image.size(), CV_32F, eig_);
if (useHarrisDetector)
#include <vector>
#include "NCV.hpp"
-using namespace std;
-
//==============================================================================
//
//==============================================================================
-static void stdDebugOutput(const string &msg)
+static void stdDebugOutput(const std::string &msg)
{
- cout << msg;
+ std::cout << msg;
}
static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput;
-void ncvDebugOutput(const string &msg)
+void ncvDebugOutput(const std::string &msg)
{
debugOutputHandler(msg);
}
do \
{ \
cudaError_t res = cudacall; \
- ncvAssertPrintReturn(cudaSuccess==res, "cudaError_t=" << res, errCode); \
+ ncvAssertPrintReturn(cudaSuccess==res, "cudaError_t=" << (int)res, errCode); \
} while (0)
do \
{ \
cudaError_t res = cudaGetLastError(); \
- ncvAssertPrintReturn(cudaSuccess==res, "cudaError_t=" << res, errCode); \
+ ncvAssertPrintReturn(cudaSuccess==res, "cudaError_t=" << (int)res, errCode); \
} while (0)
const string wndTitle = "NVIDIA Computer Vision :: Haar Classifiers Cascade";
-void matPrint(Mat &img, int lineOffsY, Scalar fontColor, const string &ss)
+static void matPrint(Mat &img, int lineOffsY, Scalar fontColor, const string &ss)
{
int fontFace = FONT_HERSHEY_DUPLEX;
double fontScale = 0.8;
}
-void displayState(Mat &canvas, bool bHelp, bool bGpu, bool bLargestFace, bool bFilter, double fps)
+static void displayState(Mat &canvas, bool bHelp, bool bGpu, bool bLargestFace, bool bFilter, double fps)
{
Scalar fontColorRed = CV_RGB(255,0,0);
Scalar fontColorNV = CV_RGB(118,185,0);
}
-NCVStatus process(Mat *srcdst,
+static NCVStatus process(Mat *srcdst,
Ncv32u width, Ncv32u height,
NcvBool bFilterRects, NcvBool bLargestFace,
HaarClassifierCascadeDescriptor &haar,
//==============================================================================
namedWindow(wndTitle, 1);
- Mat gray, frameDisp;
+ Mat frameDisp;
do
{
// Each GPU is associated with its own context
CUcontext contexts[2];
-int main(int argc, char **argv)
+int main()
{
- if (argc > 1)
- {
- cout << "CUDA driver API sample\n";
- return -1;
- }
-
int num_devices = getCudaEnabledDeviceCount();
if (num_devices < 2)
{
// CPU result
Mat result;
-void printHelp()
+static void printHelp()
{
std::cout << "Usage: driver_api_stereo_multi_gpu --left <left_image> --right <right_image>\n";
}
class RgbToR
{
public:
- float operator ()(unsigned char b, unsigned char g, unsigned char r)
+ float operator ()(unsigned char /*b*/, unsigned char /*g*/, unsigned char r)
{
return static_cast<float>(r)/255.0f;
}
class RgbToG
{
public:
- float operator ()(unsigned char b, unsigned char g, unsigned char r)
+ float operator ()(unsigned char /*b*/, unsigned char g, unsigned char /*r*/)
{
return static_cast<float>(g)/255.0f;
}
class RgbToB
{
public:
- float operator ()(unsigned char b, unsigned char g, unsigned char r)
+ float operator ()(unsigned char b, unsigned char /*g*/, unsigned char /*r*/)
{
return static_cast<float>(b)/255.0f;
}
return NCV_SUCCESS;
}
-NCVStatus LoadImages (const char *frame0Name,
+static NCVStatus LoadImages (const char *frame0Name,
const char *frame1Name,
int &width,
int &height,
return c + (d - c) * (x - a) / (b - a);
}
-NCVStatus ShowFlow (NCVMatrixAlloc<Ncv32f> &u, NCVMatrixAlloc<Ncv32f> &v, const char *name)
+static NCVStatus ShowFlow (NCVMatrixAlloc<Ncv32f> &u, NCVMatrixAlloc<Ncv32f> &v, const char *name)
{
IplImage *flowField;
return NCV_SUCCESS;
}
-IplImage *CreateImage (NCVMatrixAlloc<Ncv32f> &h_r, NCVMatrixAlloc<Ncv32f> &h_g, NCVMatrixAlloc<Ncv32f> &h_b)
+static IplImage *CreateImage (NCVMatrixAlloc<Ncv32f> &h_r, NCVMatrixAlloc<Ncv32f> &h_g, NCVMatrixAlloc<Ncv32f> &h_b)
{
CvSize imageSize = cvSize (h_r.width (), h_r.height ());
IplImage *image = cvCreateImage (imageSize, IPL_DEPTH_8U, 4);
return image;
}
-void PrintHelp ()
+static void PrintHelp ()
{
std::cout << "Usage help:\n";
std::cout << std::setiosflags(std::ios::left);
std::cout << "\t" << std::setw(15) << PARAM_HELP << " - display this help message\n";
}
-int ProcessCommandLine(int argc, char **argv,
+static int ProcessCommandLine(int argc, char **argv,
Ncv32f &timeStep,
char *&frame0Name,
char *&frame1Name,
// CPU result
Mat result;
-void printHelp()
+static void printHelp()
{
std::cout << "Usage: stereo_multi_gpu --left <image> --right <image>\n";
}