1 /*M///////////////////////////////////////////////////////////////////////////////////////
\r
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
\r
5 // By downloading, copying, installing or using the software you agree to this license.
\r
6 // If you do not agree to this license, do not download, install,
\r
7 // copy or use the software.
\r
10 // License Agreement
\r
11 // For Open Source Computer Vision Library
\r
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
\r
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
\r
15 // Third party copyrights are property of their respective owners.
\r
17 // Redistribution and use in source and binary forms, with or without modification,
\r
18 // are permitted provided that the following conditions are met:
\r
20 // * Redistribution's of source code must retain the above copyright notice,
\r
21 // this list of conditions and the following disclaimer.
\r
23 // * Redistribution's in binary form must reproduce the above copyright notice,
\r
24 // this list of conditions and the following disclaimer in the documentation
\r
25 // and/or other materials provided with the distribution.
\r
27 // * The name of the copyright holders may not be used to endorse or promote products
\r
28 // derived from this software without specific prior written permission.
\r
30 // This software is provided by the copyright holders and contributors "as is" and
\r
31 // any express or implied warranties, including, but not limited to, the implied
\r
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
\r
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
\r
34 // indirect, incidental, special, exemplary, or consequential damages
\r
35 // (including, but not limited to, procurement of substitute goods or services;
\r
36 // loss of use, data, or profits; or business interruption) however caused
\r
37 // and on any theory of liability, whether in contract, strict liability,
\r
38 // or tort (including negligence or otherwise) arising in any way out of
\r
39 // the use of this software, even if advised of the possibility of such damage.
\r
43 #ifndef __OPENCV_GPU_SATURATE_CAST_HPP__
\r
44 #define __OPENCV_GPU_SATURATE_CAST_HPP__
\r
46 #include "internal_shared.hpp"
\r
48 namespace cv { namespace gpu { namespace device
\r
50 template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(uchar v) { return _Tp(v); }
\r
51 template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(schar v) { return _Tp(v); }
\r
52 template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(ushort v) { return _Tp(v); }
\r
53 template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(short v) { return _Tp(v); }
\r
54 template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(uint v) { return _Tp(v); }
\r
55 template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(int v) { return _Tp(v); }
\r
56 template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(float v) { return _Tp(v); }
\r
57 template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(double v) { return _Tp(v); }
\r
59 template<> __device__ __forceinline__ uchar saturate_cast<uchar>(schar v)
\r
61 return (uchar) ::max((int)v, 0);
\r
63 template<> __device__ __forceinline__ uchar saturate_cast<uchar>(ushort v)
\r
65 return (uchar) ::min((uint)v, (uint)UCHAR_MAX);
\r
67 template<> __device__ __forceinline__ uchar saturate_cast<uchar>(int v)
\r
69 return (uchar)((uint)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0);
\r
71 template<> __device__ __forceinline__ uchar saturate_cast<uchar>(uint v)
\r
73 return (uchar) ::min(v, (uint)UCHAR_MAX);
\r
75 template<> __device__ __forceinline__ uchar saturate_cast<uchar>(short v)
\r
77 return saturate_cast<uchar>((uint)v);
\r
80 template<> __device__ __forceinline__ uchar saturate_cast<uchar>(float v)
\r
82 int iv = __float2int_rn(v);
\r
83 return saturate_cast<uchar>(iv);
\r
85 template<> __device__ __forceinline__ uchar saturate_cast<uchar>(double v)
\r
87 #if __CUDA_ARCH__ >= 130
\r
88 int iv = __double2int_rn(v);
\r
89 return saturate_cast<uchar>(iv);
\r
91 return saturate_cast<uchar>((float)v);
\r
95 template<> __device__ __forceinline__ schar saturate_cast<schar>(uchar v)
\r
97 return (schar) ::min((int)v, SCHAR_MAX);
\r
99 template<> __device__ __forceinline__ schar saturate_cast<schar>(ushort v)
\r
101 return (schar) ::min((uint)v, (uint)SCHAR_MAX);
\r
103 template<> __device__ __forceinline__ schar saturate_cast<schar>(int v)
\r
105 return (schar)((uint)(v-SCHAR_MIN) <= (uint)UCHAR_MAX ? v : v > 0 ? SCHAR_MAX : SCHAR_MIN);
\r
107 template<> __device__ __forceinline__ schar saturate_cast<schar>(short v)
\r
109 return saturate_cast<schar>((int)v);
\r
111 template<> __device__ __forceinline__ schar saturate_cast<schar>(uint v)
\r
113 return (schar) ::min(v, (uint)SCHAR_MAX);
\r
116 template<> __device__ __forceinline__ schar saturate_cast<schar>(float v)
\r
118 int iv = __float2int_rn(v);
\r
119 return saturate_cast<schar>(iv);
\r
121 template<> __device__ __forceinline__ schar saturate_cast<schar>(double v)
\r
123 #if __CUDA_ARCH__ >= 130
\r
124 int iv = __double2int_rn(v);
\r
125 return saturate_cast<schar>(iv);
\r
127 return saturate_cast<schar>((float)v);
\r
131 template<> __device__ __forceinline__ ushort saturate_cast<ushort>(schar v)
\r
133 return (ushort) ::max((int)v, 0);
\r
135 template<> __device__ __forceinline__ ushort saturate_cast<ushort>(short v)
\r
137 return (ushort) ::max((int)v, 0);
\r
139 template<> __device__ __forceinline__ ushort saturate_cast<ushort>(int v)
\r
141 return (ushort)((uint)v <= (uint)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0);
\r
143 template<> __device__ __forceinline__ ushort saturate_cast<ushort>(uint v)
\r
145 return (ushort) ::min(v, (uint)USHRT_MAX);
\r
147 template<> __device__ __forceinline__ ushort saturate_cast<ushort>(float v)
\r
149 int iv = __float2int_rn(v);
\r
150 return saturate_cast<ushort>(iv);
\r
152 template<> __device__ __forceinline__ ushort saturate_cast<ushort>(double v)
\r
154 #if __CUDA_ARCH__ >= 130
\r
155 int iv = __double2int_rn(v);
\r
156 return saturate_cast<ushort>(iv);
\r
158 return saturate_cast<ushort>((float)v);
\r
162 template<> __device__ __forceinline__ short saturate_cast<short>(ushort v)
\r
164 return (short) ::min((int)v, SHRT_MAX);
\r
166 template<> __device__ __forceinline__ short saturate_cast<short>(int v)
\r
168 return (short)((uint)(v - SHRT_MIN) <= (uint)USHRT_MAX ? v : v > 0 ? SHRT_MAX : SHRT_MIN);
\r
170 template<> __device__ __forceinline__ short saturate_cast<short>(uint v)
\r
172 return (short) ::min(v, (uint)SHRT_MAX);
\r
174 template<> __device__ __forceinline__ short saturate_cast<short>(float v)
\r
176 int iv = __float2int_rn(v);
\r
177 return saturate_cast<short>(iv);
\r
179 template<> __device__ __forceinline__ short saturate_cast<short>(double v)
\r
181 #if __CUDA_ARCH__ >= 130
\r
182 int iv = __double2int_rn(v);
\r
183 return saturate_cast<short>(iv);
\r
185 return saturate_cast<short>((float)v);
\r
189 template<> __device__ __forceinline__ int saturate_cast<int>(float v)
\r
191 return __float2int_rn(v);
\r
193 template<> __device__ __forceinline__ int saturate_cast<int>(double v)
\r
195 #if __CUDA_ARCH__ >= 130
\r
196 return __double2int_rn(v);
\r
198 return saturate_cast<int>((float)v);
\r
202 template<> __device__ __forceinline__ uint saturate_cast<uint>(float v)
\r
204 return __float2uint_rn(v);
\r
206 template<> __device__ __forceinline__ uint saturate_cast<uint>(double v)
\r
208 #if __CUDA_ARCH__ >= 130
\r
209 return __double2uint_rn(v);
\r
211 return saturate_cast<uint>((float)v);
\r
216 #endif /* __OPENCV_GPU_SATURATE_CAST_HPP__ */