}\r
}\r
\r
+#if defined __GNUC__ && __GNUC__ > 2 && __GNUC_MINOR__ > 4\r
+typedef Npp32s __attribute__((__may_alias__)) Npp32s_a;\r
+#else\r
+typedef Npp32s Npp32s_a;\r
+#endif\r
+\r
void cv::gpu::copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, int borderType, const Scalar& value, Stream& s)\r
{\r
CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);\r
case CV_32FC1:\r
{\r
Npp32f val = saturate_cast<Npp32f>(value[0]);\r
- Npp32s nVal = *(reinterpret_cast<Npp32s*>(&val));\r
+ Npp32s nVal = *(reinterpret_cast<Npp32s_a*>(&val));\r
nppSafeCall( nppiCopyConstBorder_32s_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), srcsz,\r
dst.ptr<Npp32s>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );\r
break;\r
// Guaranteed size cross-platform classifier structures\r
//\r
//==============================================================================\r
-\r
+#if defined __GNUC__ && __GNUC__ > 2 && __GNUC_MINOR__ > 4\r
+typedef Ncv32f __attribute__((__may_alias__)) Ncv32f_a;\r
+#else\r
+typedef Ncv32f Ncv32f_a;\r
+#endif\r
\r
struct HaarFeature64\r
{\r
\r
__host__ NCVStatus setWeight(Ncv32f weight)\r
{\r
- ((Ncv32f*)&(this->_ui2.y))[0] = weight;\r
+ ((Ncv32f_a*)&(this->_ui2.y))[0] = weight;\r
return NCV_SUCCESS;\r
}\r
\r
\r
__device__ __host__ Ncv32f getWeight(void)\r
{\r
- return *(Ncv32f*)(&this->_ui2.y);\r
+ return *(Ncv32f_a*)(&this->_ui2.y);\r
}\r
};\r
\r
}\r
};\r
\r
-\r
struct HaarClassifierNodeDescriptor32\r
{\r
uint1 _ui1;\r
\r
__host__ NCVStatus create(Ncv32f leafValue)\r
{\r
- *(Ncv32f *)&this->_ui1 = leafValue;\r
+ *(Ncv32f_a *)&this->_ui1 = leafValue;\r
return NCV_SUCCESS;\r
}\r
\r
\r
__host__ Ncv32f getLeafValueHost(void)\r
{\r
- return *(Ncv32f *)&this->_ui1.x;\r
+ return *(Ncv32f_a *)&this->_ui1.x;\r
}\r
\r
#ifdef __CUDACC__\r
}\r
};\r
\r
+#if defined __GNUC__ && __GNUC__ > 2 && __GNUC_MINOR__ > 4\r
+typedef Ncv32u __attribute__((__may_alias__)) Ncv32u_a;\r
+#else\r
+typedef Ncv32u Ncv32u_a;\r
+#endif\r
\r
struct HaarClassifierNode128\r
{\r
\r
__host__ NCVStatus setThreshold(Ncv32f t)\r
{\r
- this->_ui4.y = *(Ncv32u *)&t;\r
+ this->_ui4.y = *(Ncv32u_a *)&t;\r
return NCV_SUCCESS;\r
}\r
\r
__host__ NCVStatus setLeftNodeDesc(HaarClassifierNodeDescriptor32 nl)\r
{\r
- this->_ui4.z = *(Ncv32u *)&nl;\r
+ this->_ui4.z = *(Ncv32u_a *)&nl;\r
return NCV_SUCCESS;\r
}\r
\r
__host__ NCVStatus setRightNodeDesc(HaarClassifierNodeDescriptor32 nr)\r
{\r
- this->_ui4.w = *(Ncv32u *)&nr;\r
+ this->_ui4.w = *(Ncv32u_a *)&nr;\r
return NCV_SUCCESS;\r
}\r
\r
\r
__host__ __device__ Ncv32f getThreshold(void)\r
{\r
- return *(Ncv32f*)&this->_ui4.y;\r
+ return *(Ncv32f_a*)&this->_ui4.y;\r
}\r
\r
__host__ __device__ HaarClassifierNodeDescriptor32 getLeftNodeDesc(void)\r
\r
__host__ NCVStatus setStageThreshold(Ncv32f t)\r
{\r
- this->_ui2.x = *(Ncv32u *)&t;\r
+ this->_ui2.x = *(Ncv32u_a *)&t;\r
return NCV_SUCCESS;\r
}\r
\r
\r
__host__ __device__ Ncv32f getStageThreshold(void)\r
{\r
- return *(Ncv32f*)&this->_ui2.x;\r
+ return *(Ncv32f_a*)&this->_ui2.x;\r
}\r
\r
__host__ __device__ Ncv32u getStartClassifierRootNodeOffset(void)\r
(d_hierSums.ptr() + partSumOffsets[i],\r
partSumNums[i], NULL,\r
d_hierSums.ptr() + partSumOffsets[i+1],\r
- NULL);\r
+ 0);\r
}\r
else\r
{\r
(d_hierSums.ptr() + partSumOffsets[i],\r
partSumNums[i], NULL,\r
NULL,\r
- NULL);\r
+ 0);\r
}\r
\r
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);\r
}\r
\r
\r
+#if defined __GNUC__ && __GNUC__ > 2 && __GNUC_MINOR__ > 4\r
+typedef Ncv32u __attribute__((__may_alias__)) Ncv32u_a;\r
+#else\r
+typedef Ncv32u Ncv32u_a;\r
+#endif\r
+\r
NCVStatus nppsStCompact_32f(Ncv32f *d_src, Ncv32u srcLen,\r
Ncv32f *d_dst, Ncv32u *p_dstLen,\r
Ncv32f elemRemove, Ncv8u *pBuffer,\r
Ncv32u bufSize, cudaDeviceProp &devProp)\r
{\r
return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen,\r
- *(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp);\r
+ *(Ncv32u_a *)&elemRemove, pBuffer, bufSize, devProp);\r
}\r
\r
-\r
NCVStatus nppsStCompact_32u_host(Ncv32u *h_src, Ncv32u srcLen,\r
Ncv32u *h_dst, Ncv32u *dstLen, Ncv32u elemRemove)\r
{\r
NCVStatus nppsStCompact_32s_host(Ncv32s *h_src, Ncv32u srcLen,\r
Ncv32s *h_dst, Ncv32u *dstLen, Ncv32s elemRemove)\r
{\r
- return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u *)&elemRemove);\r
+ return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove);\r
}\r
\r
\r
NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen,\r
Ncv32f *h_dst, Ncv32u *dstLen, Ncv32f elemRemove)\r
{\r
- return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u *)&elemRemove);\r
+ return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove);\r
}\r
\r
-\r
//==============================================================================\r
//\r
// Filter.cu\r
\r
int devId;\r
ncvAssertCUDAReturn(cudaGetDevice(&devId), false);\r
- cudaDeviceProp devProp;\r
- ncvAssertCUDAReturn(cudaGetDeviceProperties(&devProp, devId), false);\r
+ cudaDeviceProp _devProp;\r
+ ncvAssertCUDAReturn(cudaGetDeviceProperties(&_devProp, devId), false);\r
\r
ncvStat = ncvApplyHaarClassifierCascade_device(\r
d_integralImage, d_rectStdDev, d_pixelMask,\r
haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false,\r
searchRoiU, 1, 1.0f,\r
*this->allocatorGPU.get(), *this->allocatorCPU.get(),\r
- devProp, 0);\r
+ _devProp, 0);\r
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
\r
NCVMatrixAlloc<Ncv32u> h_pixelMask_d(*this->allocatorCPU.get(), this->width, this->height);\r
-#pragma warning (disable : 4408 4201 4100)\r
+#if defined _MSC_VER && _MSC_VER >= 1200\r
+# pragma warning (disable : 4408 4201 4100)\r
+#endif\r
\r
#include <cstdio>\r
\r