1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include <cuda_invoker.hpp>
46 #include "opencv2/core/cuda/common.hpp"
48 namespace cv { namespace softcascade { namespace cudev {
50 typedef unsigned char uchar;
53 __device__ __forceinline__ uchar shrink(const uchar* ptr, const int pitch, const int y, const int x)
57 for(int dy = 0; dy < FACTOR; ++dy)
59 for(int dx = 0; dx < FACTOR; ++dx)
61 out += ptr[dy * pitch + dx];
64 return static_cast<uchar>(out / (FACTOR * FACTOR));
68 __global__ void shrink(const uchar* __restrict__ hogluv, const size_t inPitch,
69 uchar* __restrict__ shrank, const size_t outPitch )
71 const int y = blockIdx.y * blockDim.y + threadIdx.y;
72 const int x = blockIdx.x * blockDim.x + threadIdx.x;
74 const uchar* ptr = hogluv + (FACTOR * y) * inPitch + (FACTOR * x);
76 shrank[ y * outPitch + x] = shrink<FACTOR>(ptr, inPitch, y, x);
79 void shrink(const cv::cuda::PtrStepSzb& channels, cv::cuda::PtrStepSzb shrunk)
82 dim3 grid(shrunk.cols / 32, shrunk.rows / 8);
83 shrink<4><<<grid, block>>>((uchar*)channels.ptr(), channels.step, (uchar*)shrunk.ptr(), shrunk.step);
84 cudaSafeCall(cudaDeviceSynchronize());
87 __device__ __forceinline__ void luv(const float& b, const float& g, const float& r, uchar& __l, uchar& __u, uchar& __v)
90 float x = 0.412453f * r + 0.357580f * g + 0.180423f * b;
91 float y = 0.212671f * r + 0.715160f * g + 0.072169f * b;
92 float z = 0.019334f * r + 0.119193f * g + 0.950227f * b;
95 const float _ur = 0.19783303699678276f;
96 const float _vr = 0.46833047435252234f;
98 const float divisor = fmax((x + 15.f * y + 3.f * z), FLT_EPSILON);
99 const float _u = __fdividef(4.f * x, divisor);
100 const float _v = __fdividef(9.f * y, divisor);
102 float hack = static_cast<float>(__float2int_rn(y * 2047)) / 2047;
103 const float L = fmax(0.f, ((116.f * cbrtf(hack)) - 16.f));
104 const float U = 13.f * L * (_u - _ur);
105 const float V = 13.f * L * (_v - _vr);
107 // L in [0, 100], u in [-134, 220], v in [-140, 122]
108 __l = static_cast<uchar>( L * (255.f / 100.f));
109 __u = static_cast<uchar>((U + 134.f) * (255.f / (220.f + 134.f )));
110 __v = static_cast<uchar>((V + 140.f) * (255.f / (122.f + 140.f )));
113 __global__ void bgr2Luv_d(const uchar* rgb, const size_t rgbPitch, uchar* luvg, const size_t luvgPitch)
115 const int y = blockIdx.y * blockDim.y + threadIdx.y;
116 const int x = blockIdx.x * blockDim.x + threadIdx.x;
118 uchar3 color = ((uchar3*)(rgb + rgbPitch * y))[x];
120 luv(color.x / 255.f, color.y / 255.f, color.z / 255.f, l, u, v);
122 luvg[luvgPitch * y + x] = l;
123 luvg[luvgPitch * (y + 480) + x] = u;
124 luvg[luvgPitch * (y + 2 * 480) + x] = v;
127 void bgr2Luv(const cv::cuda::PtrStepSzb& bgr, cv::cuda::PtrStepSzb luv)
130 dim3 grid(bgr.cols / 32, bgr.rows / 8);
132 bgr2Luv_d<<<grid, block>>>((const uchar*)bgr.ptr(0), bgr.step, (uchar*)luv.ptr(0), luv.step);
134 cudaSafeCall(cudaDeviceSynchronize());
137 template<bool isDefaultNum>
138 __device__ __forceinline__ int fast_angle_bin(const float& dx, const float& dy)
140 const float angle_quantum = CV_PI_F / 6.f;
141 float angle = atan2(dx, dy) + (angle_quantum / 2.f);
143 if (angle < 0) angle += CV_PI_F;
145 const float angle_scaling = 1.f / angle_quantum;
146 return static_cast<int>(angle * angle_scaling) % 6;
150 __device__ __forceinline__ int fast_angle_bin<true>(const float& dy, const float& dx)
154 float max_dot = fabs(dx);
157 const float dot_product = fabs(dx * 0.8660254037844386f + dy * 0.5f);
159 if(dot_product > max_dot)
161 max_dot = dot_product;
166 const float dot_product = fabs(dy * 0.8660254037844386f + dx * 0.5f);
168 if(dot_product > max_dot)
170 max_dot = dot_product;
177 bin_vector_i.x = ::cos(i * (CV_PI_F / 6.f));
178 bin_vector_i.y = ::sin(i * (CV_PI_F / 6.f));
180 const float dot_product = fabs(dx * bin_vector_i.x + dy * bin_vector_i.y);
181 if(dot_product > max_dot)
183 max_dot = dot_product;
188 const float dot_product = fabs(dx * (-0.4999999999999998f) + dy * 0.8660254037844387f);
189 if(dot_product > max_dot)
191 max_dot = dot_product;
196 const float dot_product = fabs(dx * (-0.8660254037844387f) + dy * 0.49999999999999994f);
197 if(dot_product > max_dot)
199 max_dot = dot_product;
206 texture<uchar, cudaTextureType2D, cudaReadModeElementType> tgray;
208 template<bool isDefaultNum>
209 __global__ void gray2hog(cv::cuda::PtrStepSzb mag)
211 const int x = blockIdx.x * blockDim.x + threadIdx.x;
212 const int y = blockIdx.y * blockDim.y + threadIdx.y;
214 const float dx = tex2D(tgray, x + 1, y + 0) - tex2D(tgray, x - 1, y - 0);
215 const float dy = tex2D(tgray, x + 0, y + 1) - tex2D(tgray, x - 0, y - 1);
217 const float magnitude = sqrtf((dx * dx) + (dy * dy)) * (1.0f / sqrtf(2));
218 const uchar cmag = static_cast<uchar>(magnitude);
220 mag( 480 * 6 + y, x) = cmag;
221 mag( 480 * fast_angle_bin<isDefaultNum>(dy, dx) + y, x) = cmag;
224 void gray2hog(const cv::cuda::PtrStepSzb& gray, cv::cuda::PtrStepSzb mag, const int bins)
227 dim3 grid(gray.cols / 32, gray.rows / 8);
229 cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar>();
230 cudaSafeCall( cudaBindTexture2D(0, tgray, gray.data, desc, gray.cols, gray.rows, gray.step) );
233 gray2hog<true><<<grid, block>>>(mag);
235 gray2hog<false><<<grid, block>>>(mag);
237 cudaSafeCall(cudaDeviceSynchronize());
240 // ToDo: use textures or uncached load instruction.
241 __global__ void magToHist(const uchar* __restrict__ mag,
242 const float* __restrict__ angle, const size_t angPitch,
243 uchar* __restrict__ hog, const size_t hogPitch, const int fh)
245 const int y = blockIdx.y * blockDim.y + threadIdx.y;
246 const int x = blockIdx.x * blockDim.x + threadIdx.x;
248 const int bin = (int)(angle[y * angPitch + x]);
249 const uchar val = mag[y * hogPitch + x];
250 hog[((fh * bin) + y) * hogPitch + x] = val;
253 void fillBins(cv::cuda::PtrStepSzb hogluv, const cv::cuda::PtrStepSzf& nangle,
254 const int fw, const int fh, const int bins, cudaStream_t stream )
256 const uchar* mag = (const uchar*)hogluv.ptr(fh * bins);
257 uchar* hog = (uchar*)hogluv.ptr();
258 const float* angle = (const float*)nangle.ptr();
261 dim3 grid(fw / 32, fh / 8);
263 magToHist<<<grid, block, 0, stream>>>(mag, angle, nangle.step / sizeof(float), hog, hogluv.step, fh);
266 cudaSafeCall( cudaGetLastError() );
267 cudaSafeCall( cudaDeviceSynchronize() );
271 __device__ __forceinline__ float overlapArea(const Detection &a, const Detection &b)
273 int w = ::min(a.x + a.w, b.x + b.w) - ::max(a.x, b.x);
274 int h = ::min(a.y + a.h, b.y + b.h) - ::max(a.y, b.y);
276 return (w < 0 || h < 0)? 0.f : (float)(w * h);
279 texture<uint4, cudaTextureType2D, cudaReadModeElementType> tdetections;
281 __global__ void overlap(const uint* n, uchar* overlaps)
283 const int idx = threadIdx.x;
284 const int total = *n;
286 for (int i = idx + 1; i < total; i += 192)
288 const uint4 _a = tex2D(tdetections, i, 0);
289 const Detection& a = *((Detection*)(&_a));
290 bool excluded = false;
292 for (int j = i + 1; j < total; ++j)
294 const uint4 _b = tex2D(tdetections, j, 0);
295 const Detection& b = *((Detection*)(&_b));
296 float ovl = overlapArea(a, b) / ::min(a.w * a.h, b.w * b.h);
300 int suppessed = (a.confidence > b.confidence)? j : i;
301 overlaps[suppessed] = 1;
302 excluded = excluded || (suppessed == i);
305 #if defined __CUDA_ARCH__ && (__CUDA_ARCH__ >= 120)
306 if (__all(excluded)) break;
312 __global__ void collect(const uint* n, uchar* overlaps, uint* ctr, uint4* suppressed)
314 const int idx = threadIdx.x;
315 const int total = *n;
317 for (int i = idx; i < total; i += 192)
321 int oidx = atomicInc(ctr, 50);
322 suppressed[oidx] = tex2D(tdetections, i + 1, 0);
327 void suppress(const cv::cuda::PtrStepSzb& objects, cv::cuda::PtrStepSzb overlaps, cv::cuda::PtrStepSzi ndetections,
328 cv::cuda::PtrStepSzb suppressed, cudaStream_t stream)
333 cudaChannelFormatDesc desc = cudaCreateChannelDesc<uint4>();
335 cudaSafeCall( cudaBindTexture2D(&offset, tdetections, objects.data, desc, objects.cols / sizeof(uint4), objects.rows, objects.step));
337 overlap<<<grid, block>>>((uint*)ndetections.ptr(0), (uchar*)overlaps.ptr(0));
338 collect<<<grid, block>>>((uint*)ndetections.ptr(0), (uchar*)overlaps.ptr(0), (uint*)suppressed.ptr(0), ((uint4*)suppressed.ptr(0)) + 1);
342 cudaSafeCall( cudaGetLastError());
343 cudaSafeCall( cudaDeviceSynchronize());
347 template<typename Policy>
350 __device_inline__ static void apply(float& impact)
352 #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
354 // scan on shuffle functions
355 for (int i = 1; i < Policy::WARP; i *= 2)
357 const float n = __shfl_up(impact, i, Policy::WARP);
359 if (threadIdx.x >= i)
363 __shared__ volatile float ptr[Policy::STA_X * Policy::STA_Y];
365 const int idx = threadIdx.y * Policy::STA_X + threadIdx.x;
369 if ( threadIdx.x >= 1) ptr [idx ] = (ptr [idx - 1] + ptr [idx]);
370 if ( threadIdx.x >= 2) ptr [idx ] = (ptr [idx - 2] + ptr [idx]);
371 if ( threadIdx.x >= 4) ptr [idx ] = (ptr [idx - 4] + ptr [idx]);
372 if ( threadIdx.x >= 8) ptr [idx ] = (ptr [idx - 8] + ptr [idx]);
373 if ( threadIdx.x >= 16) ptr [idx ] = (ptr [idx - 16] + ptr [idx]);
380 texture<int, cudaTextureType2D, cudaReadModeElementType> thogluv;
383 __device__ __forceinline__ float rescale(const Level& level, Node& node)
385 uchar4& scaledRect = node.rect;
386 float relScale = level.relScale;
387 float farea = (scaledRect.z - scaledRect.x) * (scaledRect.w - scaledRect.y);
390 scaledRect.x = __float2int_rn(relScale * scaledRect.x);
391 scaledRect.y = __float2int_rn(relScale * scaledRect.y);
392 scaledRect.z = __float2int_rn(relScale * scaledRect.z);
393 scaledRect.w = __float2int_rn(relScale * scaledRect.w);
395 float sarea = (scaledRect.z - scaledRect.x) * (scaledRect.w - scaledRect.y);
397 const float expected_new_area = farea * relScale * relScale;
398 float approx = (sarea == 0)? 1: __fdividef(sarea, expected_new_area);
400 float rootThreshold = (node.threshold & 0x0FFFFFFFU) * approx * level.scaling[(node.threshold >> 28) > 6];
402 return rootThreshold;
406 __device__ __forceinline__ float rescale<true>(const Level& level, Node& node)
408 uchar4& scaledRect = node.rect;
409 float relScale = level.relScale;
410 float farea = scaledRect.z * scaledRect.w;
413 scaledRect.x = __float2int_rn(relScale * scaledRect.x);
414 scaledRect.y = __float2int_rn(relScale * scaledRect.y);
415 scaledRect.z = __float2int_rn(relScale * scaledRect.z);
416 scaledRect.w = __float2int_rn(relScale * scaledRect.w);
418 float sarea = scaledRect.z * scaledRect.w;
420 const float expected_new_area = farea * relScale * relScale;
421 float approx = __fdividef(sarea, expected_new_area);
423 float rootThreshold = (node.threshold & 0x0FFFFFFFU) * approx * level.scaling[(node.threshold >> 28) > 6];
425 return rootThreshold;
429 __device__ __forceinline__ int get(int x, int y, uchar4 area)
431 int a = tex2D(thogluv, x + area.x, y + area.y);
432 int b = tex2D(thogluv, x + area.z, y + area.y);
433 int c = tex2D(thogluv, x + area.z, y + area.w);
434 int d = tex2D(thogluv, x + area.x, y + area.w);
436 return (a - b + c - d);
440 __device__ __forceinline__ int get<true>(int x, int y, uchar4 area)
445 int a = tex2D(thogluv, x, y);
446 int b = tex2D(thogluv, x + area.z, y);
447 int c = tex2D(thogluv, x + area.z, y + area.w);
448 int d = tex2D(thogluv, x, y + area.w);
450 return (a - b + c - d);
453 texture<float2, cudaTextureType2D, cudaReadModeElementType> troi;
455 template<typename Policy>
457 __device_inline__ void CascadeInvoker<Policy>::detect(Detection* objects, const uint ndetections, uint* ctr, const int downscales) const
459 const int y = blockIdx.y * blockDim.y + threadIdx.y;
460 const int x = blockIdx.x;
463 __shared__ Level level;
466 __shared__ volatile char roiCache[Policy::STA_Y];
468 if (!threadIdx.y && !threadIdx.x)
469 ((float2*)roiCache)[threadIdx.x] = tex2D(troi, blockIdx.y, x);
473 if (!roiCache[threadIdx.y]) return;
476 level = levels[downscales + blockIdx.z];
478 if(x >= level.workRect.x || y >= level.workRect.y) return;
480 int st = level.octave * level.step;
481 const int stEnd = st + level.step;
483 const int hogluvStep = gridDim.y * Policy::STA_Y;
484 float confidence = 0.f;
485 for(; st < stEnd; st += Policy::WARP)
487 const int nId = (st + threadIdx.x) * 3;
489 Node node = nodes[nId];
491 float threshold = rescale<isUp>(level, node);
492 int sum = get<isUp>(x, y + (node.threshold >> 28) * hogluvStep, node.rect);
494 int next = 1 + (int)(sum >= threshold);
496 node = nodes[nId + next];
497 threshold = rescale<isUp>(level, node);
498 sum = get<isUp>(x, y + (node.threshold >> 28) * hogluvStep, node.rect);
500 const int lShift = (next - 1) * 2 + (int)(sum >= threshold);
501 float impact = leaves[(st + threadIdx.x) * 4 + lShift];
503 PrefixSum<Policy>::apply(impact);
505 #if __CUDA_ARCH__ >= 120
506 if(__any((confidence + impact <= stages[(st + threadIdx.x)]))) st += 2048;
508 #if __CUDA_ARCH__ >= 300
509 impact = __shfl(impact, 31);
512 confidence += impact;
515 if(!threadIdx.x && st == stEnd && ((confidence - FLT_EPSILON) >= 0))
517 int idx = atomicInc(ctr, ndetections);
518 objects[idx] = Detection(__float2int_rn(x * Policy::SHRINKAGE),
519 __float2int_rn(y * Policy::SHRINKAGE), level.objSize.x, level.objSize.y, confidence);
523 template<typename Policy, bool isUp>
524 __global__ void soft_cascade(const CascadeInvoker<Policy> invoker, Detection* objects, const uint n, uint* ctr, const int downs)
526 invoker.template detect<isUp>(objects, n, ctr, downs);
529 template<typename Policy>
530 void CascadeInvoker<Policy>::operator()(const cv::cuda::PtrStepSzb& roi, const cv::cuda::PtrStepSzi& hogluv,
531 cv::cuda::PtrStepSz<uchar4> objects, const int downscales, const cudaStream_t& stream) const
536 dim3 grid(fw, fh / Policy::STA_Y, downscales);
538 uint* ctr = (uint*)(objects.ptr(0));
539 Detection* det = ((Detection*)objects.ptr(0)) + 1;
540 uint max_det = objects.cols / sizeof(Detection);
542 cudaChannelFormatDesc desc = cudaCreateChannelDesc<int>();
543 cudaSafeCall( cudaBindTexture2D(0, thogluv, hogluv.data, desc, hogluv.cols, hogluv.rows, hogluv.step));
545 cudaChannelFormatDesc desc_roi = cudaCreateChannelDesc<typename Policy::roi_type>();
546 cudaSafeCall( cudaBindTexture2D(0, troi, roi.data, desc_roi, roi.cols / Policy::STA_Y, roi.rows, roi.step));
548 const CascadeInvoker<Policy> inv = *this;
550 soft_cascade<Policy, false><<<grid, Policy::block(), 0, stream>>>(inv, det, max_det, ctr, 0);
551 cudaSafeCall( cudaGetLastError());
553 grid = dim3(fw, fh / Policy::STA_Y, min(38, scales) - downscales);
554 soft_cascade<Policy, true><<<grid, Policy::block(), 0, stream>>>(inv, det, max_det, ctr, downscales);
558 cudaSafeCall( cudaGetLastError());
559 cudaSafeCall( cudaDeviceSynchronize());
563 template void CascadeInvoker<GK107PolicyX4>::operator()(const cv::cuda::PtrStepSzb& roi, const cv::cuda::PtrStepSzi& hogluv,
564 cv::cuda::PtrStepSz<uchar4> objects, const int downscales, const cudaStream_t& stream) const;