added v_reduce_sum4() universal intrinsic; corrected number of threads in cv::getNumT...
[platform/upstream/opencv.git] / modules / core / include / opencv2 / core / hal / intrin_neon.hpp
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                          License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
16 // Copyright (C) 2015, Itseez Inc., all rights reserved.
17 // Third party copyrights are property of their respective owners.
18 //
19 // Redistribution and use in source and binary forms, with or without modification,
20 // are permitted provided that the following conditions are met:
21 //
22 //   * Redistribution's of source code must retain the above copyright notice,
23 //     this list of conditions and the following disclaimer.
24 //
25 //   * Redistribution's in binary form must reproduce the above copyright notice,
26 //     this list of conditions and the following disclaimer in the documentation
27 //     and/or other materials provided with the distribution.
28 //
29 //   * The name of the copyright holders may not be used to endorse or promote products
30 //     derived from this software without specific prior written permission.
31 //
32 // This software is provided by the copyright holders and contributors "as is" and
33 // any express or implied warranties, including, but not limited to, the implied
34 // warranties of merchantability and fitness for a particular purpose are disclaimed.
35 // In no event shall the Intel Corporation or contributors be liable for any direct,
36 // indirect, incidental, special, exemplary, or consequential damages
37 // (including, but not limited to, procurement of substitute goods or services;
38 // loss of use, data, or profits; or business interruption) however caused
39 // and on any theory of liability, whether in contract, strict liability,
40 // or tort (including negligence or otherwise) arising in any way out of
41 // the use of this software, even if advised of the possibility of such damage.
42 //
43 //M*/
44
45 #ifndef OPENCV_HAL_INTRIN_NEON_HPP
46 #define OPENCV_HAL_INTRIN_NEON_HPP
47
48 #include <algorithm>
49 #include "opencv2/core/utility.hpp"
50
51 namespace cv
52 {
53
54 //! @cond IGNORED
55
56 CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
57
58 #define CV_SIMD128 1
59 #if defined(__aarch64__)
60 #define CV_SIMD128_64F 1
61 #else
62 #define CV_SIMD128_64F 0
63 #endif
64
65 #if CV_SIMD128_64F
66 #define OPENCV_HAL_IMPL_NEON_REINTERPRET(_Tpv, suffix) \
67 template <typename T> static inline \
68 _Tpv vreinterpretq_##suffix##_f64(T a) { return (_Tpv) a; } \
69 template <typename T> static inline \
70 float64x2_t vreinterpretq_f64_##suffix(T a) { return (float64x2_t) a; }
71 OPENCV_HAL_IMPL_NEON_REINTERPRET(uint8x16_t, u8)
72 OPENCV_HAL_IMPL_NEON_REINTERPRET(int8x16_t, s8)
73 OPENCV_HAL_IMPL_NEON_REINTERPRET(uint16x8_t, u16)
74 OPENCV_HAL_IMPL_NEON_REINTERPRET(int16x8_t, s16)
75 OPENCV_HAL_IMPL_NEON_REINTERPRET(uint32x4_t, u32)
76 OPENCV_HAL_IMPL_NEON_REINTERPRET(int32x4_t, s32)
77 OPENCV_HAL_IMPL_NEON_REINTERPRET(uint64x2_t, u64)
78 OPENCV_HAL_IMPL_NEON_REINTERPRET(int64x2_t, s64)
79 OPENCV_HAL_IMPL_NEON_REINTERPRET(float32x4_t, f32)
80 #endif
81
82 struct v_uint8x16
83 {
84     typedef uchar lane_type;
85     enum { nlanes = 16 };
86
87     v_uint8x16() {}
88     explicit v_uint8x16(uint8x16_t v) : val(v) {}
89     v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7,
90                uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15)
91     {
92         uchar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15};
93         val = vld1q_u8(v);
94     }
95     uchar get0() const
96     {
97         return vgetq_lane_u8(val, 0);
98     }
99
100     uint8x16_t val;
101 };
102
103 struct v_int8x16
104 {
105     typedef schar lane_type;
106     enum { nlanes = 16 };
107
108     v_int8x16() {}
109     explicit v_int8x16(int8x16_t v) : val(v) {}
110     v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7,
111                schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15)
112     {
113         schar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15};
114         val = vld1q_s8(v);
115     }
116     schar get0() const
117     {
118         return vgetq_lane_s8(val, 0);
119     }
120
121     int8x16_t val;
122 };
123
124 struct v_uint16x8
125 {
126     typedef ushort lane_type;
127     enum { nlanes = 8 };
128
129     v_uint16x8() {}
130     explicit v_uint16x8(uint16x8_t v) : val(v) {}
131     v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7)
132     {
133         ushort v[] = {v0, v1, v2, v3, v4, v5, v6, v7};
134         val = vld1q_u16(v);
135     }
136     ushort get0() const
137     {
138         return vgetq_lane_u16(val, 0);
139     }
140
141     uint16x8_t val;
142 };
143
144 struct v_int16x8
145 {
146     typedef short lane_type;
147     enum { nlanes = 8 };
148
149     v_int16x8() {}
150     explicit v_int16x8(int16x8_t v) : val(v) {}
151     v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7)
152     {
153         short v[] = {v0, v1, v2, v3, v4, v5, v6, v7};
154         val = vld1q_s16(v);
155     }
156     short get0() const
157     {
158         return vgetq_lane_s16(val, 0);
159     }
160
161     int16x8_t val;
162 };
163
164 struct v_uint32x4
165 {
166     typedef unsigned lane_type;
167     enum { nlanes = 4 };
168
169     v_uint32x4() {}
170     explicit v_uint32x4(uint32x4_t v) : val(v) {}
171     v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3)
172     {
173         unsigned v[] = {v0, v1, v2, v3};
174         val = vld1q_u32(v);
175     }
176     unsigned get0() const
177     {
178         return vgetq_lane_u32(val, 0);
179     }
180
181     uint32x4_t val;
182 };
183
184 struct v_int32x4
185 {
186     typedef int lane_type;
187     enum { nlanes = 4 };
188
189     v_int32x4() {}
190     explicit v_int32x4(int32x4_t v) : val(v) {}
191     v_int32x4(int v0, int v1, int v2, int v3)
192     {
193         int v[] = {v0, v1, v2, v3};
194         val = vld1q_s32(v);
195     }
196     int get0() const
197     {
198         return vgetq_lane_s32(val, 0);
199     }
200     int32x4_t val;
201 };
202
203 struct v_float32x4
204 {
205     typedef float lane_type;
206     enum { nlanes = 4 };
207
208     v_float32x4() {}
209     explicit v_float32x4(float32x4_t v) : val(v) {}
210     v_float32x4(float v0, float v1, float v2, float v3)
211     {
212         float v[] = {v0, v1, v2, v3};
213         val = vld1q_f32(v);
214     }
215     float get0() const
216     {
217         return vgetq_lane_f32(val, 0);
218     }
219     float32x4_t val;
220 };
221
222 struct v_uint64x2
223 {
224     typedef uint64 lane_type;
225     enum { nlanes = 2 };
226
227     v_uint64x2() {}
228     explicit v_uint64x2(uint64x2_t v) : val(v) {}
229     v_uint64x2(unsigned v0, unsigned v1)
230     {
231         uint64 v[] = {v0, v1};
232         val = vld1q_u64(v);
233     }
234     uint64 get0() const
235     {
236         return vgetq_lane_u64(val, 0);
237     }
238     uint64x2_t val;
239 };
240
241 struct v_int64x2
242 {
243     typedef int64 lane_type;
244     enum { nlanes = 2 };
245
246     v_int64x2() {}
247     explicit v_int64x2(int64x2_t v) : val(v) {}
248     v_int64x2(int v0, int v1)
249     {
250         int64 v[] = {v0, v1};
251         val = vld1q_s64(v);
252     }
253     int64 get0() const
254     {
255         return vgetq_lane_s64(val, 0);
256     }
257     int64x2_t val;
258 };
259
260 #if CV_SIMD128_64F
261 struct v_float64x2
262 {
263     typedef double lane_type;
264     enum { nlanes = 2 };
265
266     v_float64x2() {}
267     explicit v_float64x2(float64x2_t v) : val(v) {}
268     v_float64x2(double v0, double v1)
269     {
270         double v[] = {v0, v1};
271         val = vld1q_f64(v);
272     }
273     double get0() const
274     {
275         return vgetq_lane_f64(val, 0);
276     }
277     float64x2_t val;
278 };
279 #endif
280
281 #if CV_FP16
282 // Workaround for old comiplers
283 template <typename T> static inline int16x4_t vreinterpret_s16_f16(T a)
284 { return (int16x4_t)a; }
285 template <typename T> static inline float16x4_t vreinterpret_f16_s16(T a)
286 { return (float16x4_t)a; }
287 template <typename T> static inline float16x4_t vld1_f16(const T* ptr)
288 { return vreinterpret_f16_s16(vld1_s16((const short*)ptr)); }
289 template <typename T> static inline void vst1_f16(T* ptr, float16x4_t a)
290 { vst1_s16((short*)ptr, vreinterpret_s16_f16(a)); }
291
292 struct v_float16x4
293 {
294     typedef short lane_type;
295     enum { nlanes = 4 };
296
297     v_float16x4() {}
298     explicit v_float16x4(float16x4_t v) : val(v) {}
299     v_float16x4(short v0, short v1, short v2, short v3)
300     {
301         short v[] = {v0, v1, v2, v3};
302         val = vld1_f16(v);
303     }
304     short get0() const
305     {
306         return vget_lane_s16(vreinterpret_s16_f16(val), 0);
307     }
308     float16x4_t val;
309 };
310 #endif
311
312 #define OPENCV_HAL_IMPL_NEON_INIT(_Tpv, _Tp, suffix) \
313 inline v_##_Tpv v_setzero_##suffix() { return v_##_Tpv(vdupq_n_##suffix((_Tp)0)); } \
314 inline v_##_Tpv v_setall_##suffix(_Tp v) { return v_##_Tpv(vdupq_n_##suffix(v)); } \
315 inline _Tpv##_t vreinterpretq_##suffix##_##suffix(_Tpv##_t v) { return v; } \
316 inline v_uint8x16 v_reinterpret_as_u8(const v_##_Tpv& v) { return v_uint8x16(vreinterpretq_u8_##suffix(v.val)); } \
317 inline v_int8x16 v_reinterpret_as_s8(const v_##_Tpv& v) { return v_int8x16(vreinterpretq_s8_##suffix(v.val)); } \
318 inline v_uint16x8 v_reinterpret_as_u16(const v_##_Tpv& v) { return v_uint16x8(vreinterpretq_u16_##suffix(v.val)); } \
319 inline v_int16x8 v_reinterpret_as_s16(const v_##_Tpv& v) { return v_int16x8(vreinterpretq_s16_##suffix(v.val)); } \
320 inline v_uint32x4 v_reinterpret_as_u32(const v_##_Tpv& v) { return v_uint32x4(vreinterpretq_u32_##suffix(v.val)); } \
321 inline v_int32x4 v_reinterpret_as_s32(const v_##_Tpv& v) { return v_int32x4(vreinterpretq_s32_##suffix(v.val)); } \
322 inline v_uint64x2 v_reinterpret_as_u64(const v_##_Tpv& v) { return v_uint64x2(vreinterpretq_u64_##suffix(v.val)); } \
323 inline v_int64x2 v_reinterpret_as_s64(const v_##_Tpv& v) { return v_int64x2(vreinterpretq_s64_##suffix(v.val)); } \
324 inline v_float32x4 v_reinterpret_as_f32(const v_##_Tpv& v) { return v_float32x4(vreinterpretq_f32_##suffix(v.val)); }
325
326 OPENCV_HAL_IMPL_NEON_INIT(uint8x16, uchar, u8)
327 OPENCV_HAL_IMPL_NEON_INIT(int8x16, schar, s8)
328 OPENCV_HAL_IMPL_NEON_INIT(uint16x8, ushort, u16)
329 OPENCV_HAL_IMPL_NEON_INIT(int16x8, short, s16)
330 OPENCV_HAL_IMPL_NEON_INIT(uint32x4, unsigned, u32)
331 OPENCV_HAL_IMPL_NEON_INIT(int32x4, int, s32)
332 OPENCV_HAL_IMPL_NEON_INIT(uint64x2, uint64, u64)
333 OPENCV_HAL_IMPL_NEON_INIT(int64x2, int64, s64)
334 OPENCV_HAL_IMPL_NEON_INIT(float32x4, float, f32)
335 #if CV_SIMD128_64F
336 #define OPENCV_HAL_IMPL_NEON_INIT_64(_Tpv, suffix) \
337 inline v_float64x2 v_reinterpret_as_f64(const v_##_Tpv& v) { return v_float64x2(vreinterpretq_f64_##suffix(v.val)); }
338 OPENCV_HAL_IMPL_NEON_INIT(float64x2, double, f64)
339 OPENCV_HAL_IMPL_NEON_INIT_64(uint8x16, u8)
340 OPENCV_HAL_IMPL_NEON_INIT_64(int8x16, s8)
341 OPENCV_HAL_IMPL_NEON_INIT_64(uint16x8, u16)
342 OPENCV_HAL_IMPL_NEON_INIT_64(int16x8, s16)
343 OPENCV_HAL_IMPL_NEON_INIT_64(uint32x4, u32)
344 OPENCV_HAL_IMPL_NEON_INIT_64(int32x4, s32)
345 OPENCV_HAL_IMPL_NEON_INIT_64(uint64x2, u64)
346 OPENCV_HAL_IMPL_NEON_INIT_64(int64x2, s64)
347 OPENCV_HAL_IMPL_NEON_INIT_64(float32x4, f32)
348 OPENCV_HAL_IMPL_NEON_INIT_64(float64x2, f64)
349 #endif
350
351 #define OPENCV_HAL_IMPL_NEON_PACK(_Tpvec, _Tp, hreg, suffix, _Tpwvec, wsuffix, pack, op) \
352 inline _Tpvec v_##pack(const _Tpwvec& a, const _Tpwvec& b) \
353 { \
354     hreg a1 = vqmov##op##_##wsuffix(a.val), b1 = vqmov##op##_##wsuffix(b.val); \
355     return _Tpvec(vcombine_##suffix(a1, b1)); \
356 } \
357 inline void v_##pack##_store(_Tp* ptr, const _Tpwvec& a) \
358 { \
359     hreg a1 = vqmov##op##_##wsuffix(a.val); \
360     vst1_##suffix(ptr, a1); \
361 } \
362 template<int n> inline \
363 _Tpvec v_rshr_##pack(const _Tpwvec& a, const _Tpwvec& b) \
364 { \
365     hreg a1 = vqrshr##op##_n_##wsuffix(a.val, n); \
366     hreg b1 = vqrshr##op##_n_##wsuffix(b.val, n); \
367     return _Tpvec(vcombine_##suffix(a1, b1)); \
368 } \
369 template<int n> inline \
370 void v_rshr_##pack##_store(_Tp* ptr, const _Tpwvec& a) \
371 { \
372     hreg a1 = vqrshr##op##_n_##wsuffix(a.val, n); \
373     vst1_##suffix(ptr, a1); \
374 }
375
376 OPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_uint16x8, u16, pack, n)
377 OPENCV_HAL_IMPL_NEON_PACK(v_int8x16, schar, int8x8_t, s8, v_int16x8, s16, pack, n)
378 OPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_uint32x4, u32, pack, n)
379 OPENCV_HAL_IMPL_NEON_PACK(v_int16x8, short, int16x4_t, s16, v_int32x4, s32, pack, n)
380 OPENCV_HAL_IMPL_NEON_PACK(v_uint32x4, unsigned, uint32x2_t, u32, v_uint64x2, u64, pack, n)
381 OPENCV_HAL_IMPL_NEON_PACK(v_int32x4, int, int32x2_t, s32, v_int64x2, s64, pack, n)
382
383 OPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_int16x8, s16, pack_u, un)
384 OPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_int32x4, s32, pack_u, un)
385
386 inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,
387                             const v_float32x4& m1, const v_float32x4& m2,
388                             const v_float32x4& m3)
389 {
390     float32x2_t vl = vget_low_f32(v.val), vh = vget_high_f32(v.val);
391     float32x4_t res = vmulq_lane_f32(m0.val, vl, 0);
392     res = vmlaq_lane_f32(res, m1.val, vl, 1);
393     res = vmlaq_lane_f32(res, m2.val, vh, 0);
394     res = vmlaq_lane_f32(res, m3.val, vh, 1);
395     return v_float32x4(res);
396 }
397
398 #define OPENCV_HAL_IMPL_NEON_BIN_OP(bin_op, _Tpvec, intrin) \
399 inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
400 { \
401     return _Tpvec(intrin(a.val, b.val)); \
402 } \
403 inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \
404 { \
405     a.val = intrin(a.val, b.val); \
406     return a; \
407 }
408
409 OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint8x16, vqaddq_u8)
410 OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint8x16, vqsubq_u8)
411 OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int8x16, vqaddq_s8)
412 OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int8x16, vqsubq_s8)
413 OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint16x8, vqaddq_u16)
414 OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint16x8, vqsubq_u16)
415 OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_uint16x8, vmulq_u16)
416 OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int16x8, vqaddq_s16)
417 OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int16x8, vqsubq_s16)
418 OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_int16x8, vmulq_s16)
419 OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int32x4, vaddq_s32)
420 OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int32x4, vsubq_s32)
421 OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_int32x4, vmulq_s32)
422 OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint32x4, vaddq_u32)
423 OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint32x4, vsubq_u32)
424 OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_uint32x4, vmulq_u32)
425 OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_float32x4, vaddq_f32)
426 OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_float32x4, vsubq_f32)
427 OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_float32x4, vmulq_f32)
428 OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int64x2, vaddq_s64)
429 OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int64x2, vsubq_s64)
430 OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint64x2, vaddq_u64)
431 OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint64x2, vsubq_u64)
432 #if CV_SIMD128_64F
433 OPENCV_HAL_IMPL_NEON_BIN_OP(/, v_float32x4, vdivq_f32)
434 OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_float64x2, vaddq_f64)
435 OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_float64x2, vsubq_f64)
436 OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_float64x2, vmulq_f64)
437 OPENCV_HAL_IMPL_NEON_BIN_OP(/, v_float64x2, vdivq_f64)
438 #else
439 inline v_float32x4 operator / (const v_float32x4& a, const v_float32x4& b)
440 {
441     float32x4_t reciprocal = vrecpeq_f32(b.val);
442     reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal);
443     reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal);
444     return v_float32x4(vmulq_f32(a.val, reciprocal));
445 }
446 inline v_float32x4& operator /= (v_float32x4& a, const v_float32x4& b)
447 {
448     float32x4_t reciprocal = vrecpeq_f32(b.val);
449     reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal);
450     reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal);
451     a.val = vmulq_f32(a.val, reciprocal);
452     return a;
453 }
454 #endif
455
456 inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b,
457                          v_int32x4& c, v_int32x4& d)
458 {
459     c.val = vmull_s16(vget_low_s16(a.val), vget_low_s16(b.val));
460     d.val = vmull_s16(vget_high_s16(a.val), vget_high_s16(b.val));
461 }
462
463 inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b,
464                          v_uint32x4& c, v_uint32x4& d)
465 {
466     c.val = vmull_u16(vget_low_u16(a.val), vget_low_u16(b.val));
467     d.val = vmull_u16(vget_high_u16(a.val), vget_high_u16(b.val));
468 }
469
470 inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b,
471                          v_uint64x2& c, v_uint64x2& d)
472 {
473     c.val = vmull_u32(vget_low_u32(a.val), vget_low_u32(b.val));
474     d.val = vmull_u32(vget_high_u32(a.val), vget_high_u32(b.val));
475 }
476
477 inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b)
478 {
479     int32x4_t c = vmull_s16(vget_low_s16(a.val), vget_low_s16(b.val));
480     int32x4_t d = vmull_s16(vget_high_s16(a.val), vget_high_s16(b.val));
481     int32x4x2_t cd = vuzpq_s32(c, d);
482     return v_int32x4(vaddq_s32(cd.val[0], cd.val[1]));
483 }
484
485 #define OPENCV_HAL_IMPL_NEON_LOGIC_OP(_Tpvec, suffix) \
486     OPENCV_HAL_IMPL_NEON_BIN_OP(&, _Tpvec, vandq_##suffix) \
487     OPENCV_HAL_IMPL_NEON_BIN_OP(|, _Tpvec, vorrq_##suffix) \
488     OPENCV_HAL_IMPL_NEON_BIN_OP(^, _Tpvec, veorq_##suffix) \
489     inline _Tpvec operator ~ (const _Tpvec& a) \
490     { \
491         return _Tpvec(vreinterpretq_##suffix##_u8(vmvnq_u8(vreinterpretq_u8_##suffix(a.val)))); \
492     }
493
494 OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint8x16, u8)
495 OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int8x16, s8)
496 OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint16x8, u16)
497 OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int16x8, s16)
498 OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint32x4, u32)
499 OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int32x4, s32)
500 OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint64x2, u64)
501 OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int64x2, s64)
502
503 #define OPENCV_HAL_IMPL_NEON_FLT_BIT_OP(bin_op, intrin) \
504 inline v_float32x4 operator bin_op (const v_float32x4& a, const v_float32x4& b) \
505 { \
506     return v_float32x4(vreinterpretq_f32_s32(intrin(vreinterpretq_s32_f32(a.val), vreinterpretq_s32_f32(b.val)))); \
507 } \
508 inline v_float32x4& operator bin_op##= (v_float32x4& a, const v_float32x4& b) \
509 { \
510     a.val = vreinterpretq_f32_s32(intrin(vreinterpretq_s32_f32(a.val), vreinterpretq_s32_f32(b.val))); \
511     return a; \
512 }
513
514 OPENCV_HAL_IMPL_NEON_FLT_BIT_OP(&, vandq_s32)
515 OPENCV_HAL_IMPL_NEON_FLT_BIT_OP(|, vorrq_s32)
516 OPENCV_HAL_IMPL_NEON_FLT_BIT_OP(^, veorq_s32)
517
518 inline v_float32x4 operator ~ (const v_float32x4& a)
519 {
520     return v_float32x4(vreinterpretq_f32_s32(vmvnq_s32(vreinterpretq_s32_f32(a.val))));
521 }
522
523 #if CV_SIMD128_64F
524 inline v_float32x4 v_sqrt(const v_float32x4& x)
525 {
526     return v_float32x4(vsqrtq_f32(x.val));
527 }
528
529 inline v_float32x4 v_invsqrt(const v_float32x4& x)
530 {
531     v_float32x4 one = v_setall_f32(1.0f);
532     return one / v_sqrt(x);
533 }
534 #else
535 inline v_float32x4 v_sqrt(const v_float32x4& x)
536 {
537     float32x4_t x1 = vmaxq_f32(x.val, vdupq_n_f32(FLT_MIN));
538     float32x4_t e = vrsqrteq_f32(x1);
539     e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x1, e), e), e);
540     e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x1, e), e), e);
541     return v_float32x4(vmulq_f32(x.val, e));
542 }
543
544 inline v_float32x4 v_invsqrt(const v_float32x4& x)
545 {
546     float32x4_t e = vrsqrteq_f32(x.val);
547     e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x.val, e), e), e);
548     e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x.val, e), e), e);
549     return v_float32x4(e);
550 }
551 #endif
552
553 #define OPENCV_HAL_IMPL_NEON_ABS(_Tpuvec, _Tpsvec, usuffix, ssuffix) \
554 inline _Tpuvec v_abs(const _Tpsvec& a) { return v_reinterpret_as_##usuffix(_Tpsvec(vabsq_##ssuffix(a.val))); }
555
556 OPENCV_HAL_IMPL_NEON_ABS(v_uint8x16, v_int8x16, u8, s8)
557 OPENCV_HAL_IMPL_NEON_ABS(v_uint16x8, v_int16x8, u16, s16)
558 OPENCV_HAL_IMPL_NEON_ABS(v_uint32x4, v_int32x4, u32, s32)
559
560 inline v_float32x4 v_abs(v_float32x4 x)
561 { return v_float32x4(vabsq_f32(x.val)); }
562
563 #if CV_SIMD128_64F
564 #define OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(bin_op, intrin) \
565 inline v_float64x2 operator bin_op (const v_float64x2& a, const v_float64x2& b) \
566 { \
567     return v_float64x2(vreinterpretq_f64_s64(intrin(vreinterpretq_s64_f64(a.val), vreinterpretq_s64_f64(b.val)))); \
568 } \
569 inline v_float64x2& operator bin_op##= (v_float64x2& a, const v_float64x2& b) \
570 { \
571     a.val = vreinterpretq_f64_s64(intrin(vreinterpretq_s64_f64(a.val), vreinterpretq_s64_f64(b.val))); \
572     return a; \
573 }
574
575 OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(&, vandq_s64)
576 OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(|, vorrq_s64)
577 OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(^, veorq_s64)
578
579 inline v_float64x2 operator ~ (const v_float64x2& a)
580 {
581     return v_float64x2(vreinterpretq_f64_s32(vmvnq_s32(vreinterpretq_s32_f64(a.val))));
582 }
583
584 inline v_float64x2 v_sqrt(const v_float64x2& x)
585 {
586     return v_float64x2(vsqrtq_f64(x.val));
587 }
588
589 inline v_float64x2 v_invsqrt(const v_float64x2& x)
590 {
591     v_float64x2 one = v_setall_f64(1.0f);
592     return one / v_sqrt(x);
593 }
594
595 inline v_float64x2 v_abs(v_float64x2 x)
596 { return v_float64x2(vabsq_f64(x.val)); }
597 #endif
598
599 // TODO: exp, log, sin, cos
600
601 #define OPENCV_HAL_IMPL_NEON_BIN_FUNC(_Tpvec, func, intrin) \
602 inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \
603 { \
604     return _Tpvec(intrin(a.val, b.val)); \
605 }
606
607 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_min, vminq_u8)
608 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_max, vmaxq_u8)
609 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_min, vminq_s8)
610 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_max, vmaxq_s8)
611 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_min, vminq_u16)
612 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_max, vmaxq_u16)
613 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_min, vminq_s16)
614 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_max, vmaxq_s16)
615 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_min, vminq_u32)
616 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_max, vmaxq_u32)
617 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int32x4, v_min, vminq_s32)
618 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int32x4, v_max, vmaxq_s32)
619 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_min, vminq_f32)
620 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_max, vmaxq_f32)
621 #if CV_SIMD128_64F
622 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_min, vminq_f64)
623 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_max, vmaxq_f64)
624 #endif
625
626 #if CV_SIMD128_64F
627 inline int64x2_t vmvnq_s64(int64x2_t a)
628 {
629     int64x2_t vx = vreinterpretq_s64_u32(vdupq_n_u32(0xFFFFFFFF));
630     return veorq_s64(a, vx);
631 }
632 inline uint64x2_t vmvnq_u64(uint64x2_t a)
633 {
634     uint64x2_t vx = vreinterpretq_u64_u32(vdupq_n_u32(0xFFFFFFFF));
635     return veorq_u64(a, vx);
636 }
637 #endif
638 #define OPENCV_HAL_IMPL_NEON_INT_CMP_OP(_Tpvec, cast, suffix, not_suffix) \
639 inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \
640 { return _Tpvec(cast(vceqq_##suffix(a.val, b.val))); } \
641 inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \
642 { return _Tpvec(cast(vmvnq_##not_suffix(vceqq_##suffix(a.val, b.val)))); } \
643 inline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \
644 { return _Tpvec(cast(vcltq_##suffix(a.val, b.val))); } \
645 inline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \
646 { return _Tpvec(cast(vcgtq_##suffix(a.val, b.val))); } \
647 inline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \
648 { return _Tpvec(cast(vcleq_##suffix(a.val, b.val))); } \
649 inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \
650 { return _Tpvec(cast(vcgeq_##suffix(a.val, b.val))); }
651
652 OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint8x16, OPENCV_HAL_NOP, u8, u8)
653 OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int8x16, vreinterpretq_s8_u8, s8, u8)
654 OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint16x8, OPENCV_HAL_NOP, u16, u16)
655 OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int16x8, vreinterpretq_s16_u16, s16, u16)
656 OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint32x4, OPENCV_HAL_NOP, u32, u32)
657 OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int32x4, vreinterpretq_s32_u32, s32, u32)
658 OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_float32x4, vreinterpretq_f32_u32, f32, u32)
659 #if CV_SIMD128_64F
660 OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint64x2, OPENCV_HAL_NOP, u64, u64)
661 OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int64x2, vreinterpretq_s64_u64, s64, u64)
662 OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_float64x2, vreinterpretq_f64_u64, f64, u64)
663 #endif
664
665 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_add_wrap, vaddq_u8)
666 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_add_wrap, vaddq_s8)
667 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_add_wrap, vaddq_u16)
668 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_add_wrap, vaddq_s16)
669 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_sub_wrap, vsubq_u8)
670 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_sub_wrap, vsubq_s8)
671 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_sub_wrap, vsubq_u16)
672 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_sub_wrap, vsubq_s16)
673
674 // TODO: absdiff for signed integers
675 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_absdiff, vabdq_u8)
676 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_absdiff, vabdq_u16)
677 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_absdiff, vabdq_u32)
678 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_absdiff, vabdq_f32)
679 #if CV_SIMD128_64F
680 OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_absdiff, vabdq_f64)
681 #endif
682
683 #define OPENCV_HAL_IMPL_NEON_BIN_FUNC2(_Tpvec, _Tpvec2, cast, func, intrin) \
684 inline _Tpvec2 func(const _Tpvec& a, const _Tpvec& b) \
685 { \
686     return _Tpvec2(cast(intrin(a.val, b.val))); \
687 }
688
689 OPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int8x16, v_uint8x16, vreinterpretq_u8_s8, v_absdiff, vabdq_s8)
690 OPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int16x8, v_uint16x8, vreinterpretq_u16_s16, v_absdiff, vabdq_s16)
691 OPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int32x4, v_uint32x4, vreinterpretq_u32_s32, v_absdiff, vabdq_s32)
692
693 inline v_float32x4 v_magnitude(const v_float32x4& a, const v_float32x4& b)
694 {
695     v_float32x4 x(vmlaq_f32(vmulq_f32(a.val, a.val), b.val, b.val));
696     return v_sqrt(x);
697 }
698
699 inline v_float32x4 v_sqr_magnitude(const v_float32x4& a, const v_float32x4& b)
700 {
701     return v_float32x4(vmlaq_f32(vmulq_f32(a.val, a.val), b.val, b.val));
702 }
703
704 inline v_float32x4 v_muladd(const v_float32x4& a, const v_float32x4& b, const v_float32x4& c)
705 {
706     return v_float32x4(vmlaq_f32(c.val, a.val, b.val));
707 }
708
709 #if CV_SIMD128_64F
710 inline v_float64x2 v_magnitude(const v_float64x2& a, const v_float64x2& b)
711 {
712     v_float64x2 x(vaddq_f64(vmulq_f64(a.val, a.val), vmulq_f64(b.val, b.val)));
713     return v_sqrt(x);
714 }
715
716 inline v_float64x2 v_sqr_magnitude(const v_float64x2& a, const v_float64x2& b)
717 {
718     return v_float64x2(vaddq_f64(vmulq_f64(a.val, a.val), vmulq_f64(b.val, b.val)));
719 }
720
721 inline v_float64x2 v_muladd(const v_float64x2& a, const v_float64x2& b, const v_float64x2& c)
722 {
723     return v_float64x2(vaddq_f64(c.val, vmulq_f64(a.val, b.val)));
724 }
725 #endif
726
727 // trade efficiency for convenience
728 #define OPENCV_HAL_IMPL_NEON_SHIFT_OP(_Tpvec, suffix, _Tps, ssuffix) \
729 inline _Tpvec operator << (const _Tpvec& a, int n) \
730 { return _Tpvec(vshlq_##suffix(a.val, vdupq_n_##ssuffix((_Tps)n))); } \
731 inline _Tpvec operator >> (const _Tpvec& a, int n) \
732 { return _Tpvec(vshlq_##suffix(a.val, vdupq_n_##ssuffix((_Tps)-n))); } \
733 template<int n> inline _Tpvec v_shl(const _Tpvec& a) \
734 { return _Tpvec(vshlq_n_##suffix(a.val, n)); } \
735 template<int n> inline _Tpvec v_shr(const _Tpvec& a) \
736 { return _Tpvec(vshrq_n_##suffix(a.val, n)); } \
737 template<int n> inline _Tpvec v_rshr(const _Tpvec& a) \
738 { return _Tpvec(vrshrq_n_##suffix(a.val, n)); }
739
740 OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint8x16, u8, schar, s8)
741 OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int8x16, s8, schar, s8)
742 OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint16x8, u16, short, s16)
743 OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int16x8, s16, short, s16)
744 OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint32x4, u32, int, s32)
745 OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int32x4, s32, int, s32)
746 OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint64x2, u64, int64, s64)
747 OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int64x2, s64, int64, s64)
748
749 #define OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(_Tpvec, _Tp, suffix) \
750 inline _Tpvec v_load(const _Tp* ptr) \
751 { return _Tpvec(vld1q_##suffix(ptr)); } \
752 inline _Tpvec v_load_aligned(const _Tp* ptr) \
753 { return _Tpvec(vld1q_##suffix(ptr)); } \
754 inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \
755 { return _Tpvec(vcombine_##suffix(vld1_##suffix(ptr0), vld1_##suffix(ptr1))); } \
756 inline void v_store(_Tp* ptr, const _Tpvec& a) \
757 { vst1q_##suffix(ptr, a.val); } \
758 inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \
759 { vst1q_##suffix(ptr, a.val); } \
760 inline void v_store_low(_Tp* ptr, const _Tpvec& a) \
761 { vst1_##suffix(ptr, vget_low_##suffix(a.val)); } \
762 inline void v_store_high(_Tp* ptr, const _Tpvec& a) \
763 { vst1_##suffix(ptr, vget_high_##suffix(a.val)); }
764
765 OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint8x16, uchar, u8)
766 OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int8x16, schar, s8)
767 OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint16x8, ushort, u16)
768 OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int16x8, short, s16)
769 OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint32x4, unsigned, u32)
770 OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int32x4, int, s32)
771 OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint64x2, uint64, u64)
772 OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int64x2, int64, s64)
773 OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float32x4, float, f32)
774 #if CV_SIMD128_64F
775 OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float64x2, double, f64)
776 #endif
777
778 #if CV_FP16
779 // Workaround for old comiplers
780 inline v_float16x4 v_load_f16(const short* ptr)
781 { return v_float16x4(vld1_f16(ptr)); }
782 inline void v_store_f16(short* ptr, v_float16x4& a)
783 { vst1_f16(ptr, a.val); }
784 #endif
785
786 #define OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \
787 inline scalartype v_reduce_##func(const _Tpvec& a) \
788 { \
789     _Tpnvec##_t a0 = vp##vectorfunc##_##suffix(vget_low_##suffix(a.val), vget_high_##suffix(a.val)); \
790     a0 = vp##vectorfunc##_##suffix(a0, a0); \
791     return (scalartype)vget_lane_##suffix(vp##vectorfunc##_##suffix(a0, a0),0); \
792 }
793
794 OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_uint16x8, uint16x4, unsigned short, sum, add, u16)
795 OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_uint16x8, uint16x4, unsigned short, max, max, u16)
796 OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_uint16x8, uint16x4, unsigned short, min, min, u16)
797 OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_int16x8, int16x4, short, sum, add, s16)
798 OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_int16x8, int16x4, short, max, max, s16)
799 OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_int16x8, int16x4, short, min, min, s16)
800
801 #define OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \
802 inline scalartype v_reduce_##func(const _Tpvec& a) \
803 { \
804     _Tpnvec##_t a0 = vp##vectorfunc##_##suffix(vget_low_##suffix(a.val), vget_high_##suffix(a.val)); \
805     return (scalartype)vget_lane_##suffix(vp##vectorfunc##_##suffix(a0, vget_high_##suffix(a.val)),0); \
806 }
807
808 OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, uint32x2, unsigned, sum, add, u32)
809 OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, uint32x2, unsigned, max, max, u32)
810 OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, uint32x2, unsigned, min, min, u32)
811 OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int32x2, int, sum, add, s32)
812 OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int32x2, int, max, max, s32)
813 OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int32x2, int, min, min, s32)
814 OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float32x2, float, sum, add, f32)
815 OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float32x2, float, max, max, f32)
816 OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float32x2, float, min, min, f32)
817
818 inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b,
819                                  const v_float32x4& c, const v_float32x4& d)
820 {
821     float32x4x2_t ab = vtrnq_f32(a.val, b.val);
822     float32x4x2_t cd = vtrnq_f32(c.val, d.val);
823
824     float32x4_t u0 = vaddq_f32(ab.val[0], ab.val[1]); // a0+a1 b0+b1 a2+a3 b2+b3
825     float32x4_t u1 = vaddq_f32(cd.val[0], cd.val[1]); // c0+c1 d0+d1 c2+c3 d2+d3
826
827     float32x4_t v0 = vcombine_f32(vget_low_f32(u0), vget_low_f32(u1));
828     float32x4_t v1 = vcombine_f32(vget_high_f32(u0), vget_high_f32(u1));
829
830     return v_float32x4(vaddq_f32(v0, v1));
831 }
832
833 #define OPENCV_HAL_IMPL_NEON_POPCOUNT(_Tpvec, cast) \
834 inline v_uint32x4 v_popcount(const _Tpvec& a) \
835 { \
836     uint8x16_t t = vcntq_u8(cast(a.val)); \
837     uint16x8_t t0 = vpaddlq_u8(t);  /* 16 -> 8 */ \
838     uint32x4_t t1 = vpaddlq_u16(t0); /* 8 -> 4 */ \
839     return v_uint32x4(t1); \
840 }
841
842 OPENCV_HAL_IMPL_NEON_POPCOUNT(v_uint8x16, OPENCV_HAL_NOP)
843 OPENCV_HAL_IMPL_NEON_POPCOUNT(v_uint16x8, vreinterpretq_u8_u16)
844 OPENCV_HAL_IMPL_NEON_POPCOUNT(v_uint32x4, vreinterpretq_u8_u32)
845 OPENCV_HAL_IMPL_NEON_POPCOUNT(v_int8x16, vreinterpretq_u8_s8)
846 OPENCV_HAL_IMPL_NEON_POPCOUNT(v_int16x8, vreinterpretq_u8_s16)
847 OPENCV_HAL_IMPL_NEON_POPCOUNT(v_int32x4, vreinterpretq_u8_s32)
848
849 inline int v_signmask(const v_uint8x16& a)
850 {
851     int8x8_t m0 = vcreate_s8(CV_BIG_UINT(0x0706050403020100));
852     uint8x16_t v0 = vshlq_u8(vshrq_n_u8(a.val, 7), vcombine_s8(m0, m0));
853     uint64x2_t v1 = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(v0)));
854     return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 8);
855 }
856 inline int v_signmask(const v_int8x16& a)
857 { return v_signmask(v_reinterpret_as_u8(a)); }
858
859 inline int v_signmask(const v_uint16x8& a)
860 {
861     int16x4_t m0 = vcreate_s16(CV_BIG_UINT(0x0003000200010000));
862     uint16x8_t v0 = vshlq_u16(vshrq_n_u16(a.val, 15), vcombine_s16(m0, m0));
863     uint64x2_t v1 = vpaddlq_u32(vpaddlq_u16(v0));
864     return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 4);
865 }
866 inline int v_signmask(const v_int16x8& a)
867 { return v_signmask(v_reinterpret_as_u16(a)); }
868
869 inline int v_signmask(const v_uint32x4& a)
870 {
871     int32x2_t m0 = vcreate_s32(CV_BIG_UINT(0x0000000100000000));
872     uint32x4_t v0 = vshlq_u32(vshrq_n_u32(a.val, 31), vcombine_s32(m0, m0));
873     uint64x2_t v1 = vpaddlq_u32(v0);
874     return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 2);
875 }
876 inline int v_signmask(const v_int32x4& a)
877 { return v_signmask(v_reinterpret_as_u32(a)); }
878 inline int v_signmask(const v_float32x4& a)
879 { return v_signmask(v_reinterpret_as_u32(a)); }
880 #if CV_SIMD128_64F
881 inline int v_signmask(const v_uint64x2& a)
882 {
883     int64x1_t m0 = vdup_n_s64(0);
884     uint64x2_t v0 = vshlq_u64(vshrq_n_u64(a.val, 63), vcombine_s64(m0, m0));
885     return (int)vgetq_lane_u64(v0, 0) + ((int)vgetq_lane_u64(v0, 1) << 1);
886 }
887 inline int v_signmask(const v_float64x2& a)
888 { return v_signmask(v_reinterpret_as_u64(a)); }
889 #endif
890
891 #define OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(_Tpvec, suffix, shift) \
892 inline bool v_check_all(const v_##_Tpvec& a) \
893 { \
894     _Tpvec##_t v0 = vshrq_n_##suffix(vmvnq_##suffix(a.val), shift); \
895     uint64x2_t v1 = vreinterpretq_u64_##suffix(v0); \
896     return (vgetq_lane_u64(v1, 0) | vgetq_lane_u64(v1, 1)) == 0; \
897 } \
898 inline bool v_check_any(const v_##_Tpvec& a) \
899 { \
900     _Tpvec##_t v0 = vshrq_n_##suffix(a.val, shift); \
901     uint64x2_t v1 = vreinterpretq_u64_##suffix(v0); \
902     return (vgetq_lane_u64(v1, 0) | vgetq_lane_u64(v1, 1)) != 0; \
903 }
904
905 OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint8x16, u8, 7)
906 OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint16x8, u16, 15)
907 OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint32x4, u32, 31)
908 #if CV_SIMD128_64F
909 OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint64x2, u64, 63)
910 #endif
911
912 inline bool v_check_all(const v_int8x16& a)
913 { return v_check_all(v_reinterpret_as_u8(a)); }
914 inline bool v_check_all(const v_int16x8& a)
915 { return v_check_all(v_reinterpret_as_u16(a)); }
916 inline bool v_check_all(const v_int32x4& a)
917 { return v_check_all(v_reinterpret_as_u32(a)); }
918 inline bool v_check_all(const v_float32x4& a)
919 { return v_check_all(v_reinterpret_as_u32(a)); }
920
921 inline bool v_check_any(const v_int8x16& a)
922 { return v_check_any(v_reinterpret_as_u8(a)); }
923 inline bool v_check_any(const v_int16x8& a)
924 { return v_check_any(v_reinterpret_as_u16(a)); }
925 inline bool v_check_any(const v_int32x4& a)
926 { return v_check_any(v_reinterpret_as_u32(a)); }
927 inline bool v_check_any(const v_float32x4& a)
928 { return v_check_any(v_reinterpret_as_u32(a)); }
929
930 #if CV_SIMD128_64F
931 inline bool v_check_all(const v_int64x2& a)
932 { return v_check_all(v_reinterpret_as_u64(a)); }
933 inline bool v_check_all(const v_float64x2& a)
934 { return v_check_all(v_reinterpret_as_u64(a)); }
935 inline bool v_check_any(const v_int64x2& a)
936 { return v_check_any(v_reinterpret_as_u64(a)); }
937 inline bool v_check_any(const v_float64x2& a)
938 { return v_check_any(v_reinterpret_as_u64(a)); }
939 #endif
940
941 #define OPENCV_HAL_IMPL_NEON_SELECT(_Tpvec, suffix, usuffix) \
942 inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \
943 { \
944     return _Tpvec(vbslq_##suffix(vreinterpretq_##usuffix##_##suffix(mask.val), a.val, b.val)); \
945 }
946
947 OPENCV_HAL_IMPL_NEON_SELECT(v_uint8x16, u8, u8)
948 OPENCV_HAL_IMPL_NEON_SELECT(v_int8x16, s8, u8)
949 OPENCV_HAL_IMPL_NEON_SELECT(v_uint16x8, u16, u16)
950 OPENCV_HAL_IMPL_NEON_SELECT(v_int16x8, s16, u16)
951 OPENCV_HAL_IMPL_NEON_SELECT(v_uint32x4, u32, u32)
952 OPENCV_HAL_IMPL_NEON_SELECT(v_int32x4, s32, u32)
953 OPENCV_HAL_IMPL_NEON_SELECT(v_float32x4, f32, u32)
954 #if CV_SIMD128_64F
955 OPENCV_HAL_IMPL_NEON_SELECT(v_float64x2, f64, u64)
956 #endif
957
958 #define OPENCV_HAL_IMPL_NEON_EXPAND(_Tpvec, _Tpwvec, _Tp, suffix) \
959 inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \
960 { \
961     b0.val = vmovl_##suffix(vget_low_##suffix(a.val)); \
962     b1.val = vmovl_##suffix(vget_high_##suffix(a.val)); \
963 } \
964 inline _Tpwvec v_load_expand(const _Tp* ptr) \
965 { \
966     return _Tpwvec(vmovl_##suffix(vld1_##suffix(ptr))); \
967 }
968
969 OPENCV_HAL_IMPL_NEON_EXPAND(v_uint8x16, v_uint16x8, uchar, u8)
970 OPENCV_HAL_IMPL_NEON_EXPAND(v_int8x16, v_int16x8, schar, s8)
971 OPENCV_HAL_IMPL_NEON_EXPAND(v_uint16x8, v_uint32x4, ushort, u16)
972 OPENCV_HAL_IMPL_NEON_EXPAND(v_int16x8, v_int32x4, short, s16)
973 OPENCV_HAL_IMPL_NEON_EXPAND(v_uint32x4, v_uint64x2, uint, u32)
974 OPENCV_HAL_IMPL_NEON_EXPAND(v_int32x4, v_int64x2, int, s32)
975
976 inline v_uint32x4 v_load_expand_q(const uchar* ptr)
977 {
978     uint8x8_t v0 = vcreate_u8(*(unsigned*)ptr);
979     uint16x4_t v1 = vget_low_u16(vmovl_u8(v0));
980     return v_uint32x4(vmovl_u16(v1));
981 }
982
983 inline v_int32x4 v_load_expand_q(const schar* ptr)
984 {
985     int8x8_t v0 = vcreate_s8(*(unsigned*)ptr);
986     int16x4_t v1 = vget_low_s16(vmovl_s8(v0));
987     return v_int32x4(vmovl_s16(v1));
988 }
989
990 #if defined(__aarch64__)
991 #define OPENCV_HAL_IMPL_NEON_UNPACKS(_Tpvec, suffix) \
992 inline void v_zip(const v_##_Tpvec& a0, const v_##_Tpvec& a1, v_##_Tpvec& b0, v_##_Tpvec& b1) \
993 { \
994     b0.val = vzip1q_##suffix(a0.val, a1.val); \
995     b1.val = vzip2q_##suffix(a0.val, a1.val); \
996 } \
997 inline v_##_Tpvec v_combine_low(const v_##_Tpvec& a, const v_##_Tpvec& b) \
998 { \
999     return v_##_Tpvec(vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val))); \
1000 } \
1001 inline v_##_Tpvec v_combine_high(const v_##_Tpvec& a, const v_##_Tpvec& b) \
1002 { \
1003     return v_##_Tpvec(vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val))); \
1004 } \
1005 inline void v_recombine(const v_##_Tpvec& a, const v_##_Tpvec& b, v_##_Tpvec& c, v_##_Tpvec& d) \
1006 { \
1007     c.val = vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val)); \
1008     d.val = vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val)); \
1009 }
1010 #else
1011 #define OPENCV_HAL_IMPL_NEON_UNPACKS(_Tpvec, suffix) \
1012 inline void v_zip(const v_##_Tpvec& a0, const v_##_Tpvec& a1, v_##_Tpvec& b0, v_##_Tpvec& b1) \
1013 { \
1014     _Tpvec##x2_t p = vzipq_##suffix(a0.val, a1.val); \
1015     b0.val = p.val[0]; \
1016     b1.val = p.val[1]; \
1017 } \
1018 inline v_##_Tpvec v_combine_low(const v_##_Tpvec& a, const v_##_Tpvec& b) \
1019 { \
1020     return v_##_Tpvec(vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val))); \
1021 } \
1022 inline v_##_Tpvec v_combine_high(const v_##_Tpvec& a, const v_##_Tpvec& b) \
1023 { \
1024     return v_##_Tpvec(vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val))); \
1025 } \
1026 inline void v_recombine(const v_##_Tpvec& a, const v_##_Tpvec& b, v_##_Tpvec& c, v_##_Tpvec& d) \
1027 { \
1028     c.val = vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val)); \
1029     d.val = vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val)); \
1030 }
1031 #endif
1032
1033 OPENCV_HAL_IMPL_NEON_UNPACKS(uint8x16, u8)
1034 OPENCV_HAL_IMPL_NEON_UNPACKS(int8x16, s8)
1035 OPENCV_HAL_IMPL_NEON_UNPACKS(uint16x8, u16)
1036 OPENCV_HAL_IMPL_NEON_UNPACKS(int16x8, s16)
1037 OPENCV_HAL_IMPL_NEON_UNPACKS(uint32x4, u32)
1038 OPENCV_HAL_IMPL_NEON_UNPACKS(int32x4, s32)
1039 OPENCV_HAL_IMPL_NEON_UNPACKS(float32x4, f32)
1040 #if CV_SIMD128_64F
1041 OPENCV_HAL_IMPL_NEON_UNPACKS(float64x2, f64)
1042 #endif
1043
1044 #define OPENCV_HAL_IMPL_NEON_EXTRACT(_Tpvec, suffix) \
1045 template <int s> \
1046 inline v_##_Tpvec v_extract(const v_##_Tpvec& a, const v_##_Tpvec& b) \
1047 { \
1048     return v_##_Tpvec(vextq_##suffix(a.val, b.val, s)); \
1049 }
1050
1051 OPENCV_HAL_IMPL_NEON_EXTRACT(uint8x16, u8)
1052 OPENCV_HAL_IMPL_NEON_EXTRACT(int8x16, s8)
1053 OPENCV_HAL_IMPL_NEON_EXTRACT(uint16x8, u16)
1054 OPENCV_HAL_IMPL_NEON_EXTRACT(int16x8, s16)
1055 OPENCV_HAL_IMPL_NEON_EXTRACT(uint32x4, u32)
1056 OPENCV_HAL_IMPL_NEON_EXTRACT(int32x4, s32)
1057 OPENCV_HAL_IMPL_NEON_EXTRACT(uint64x2, u64)
1058 OPENCV_HAL_IMPL_NEON_EXTRACT(int64x2, s64)
1059 OPENCV_HAL_IMPL_NEON_EXTRACT(float32x4, f32)
1060 #if CV_SIMD128_64F
1061 OPENCV_HAL_IMPL_NEON_EXTRACT(float64x2, f64)
1062 #endif
1063
1064 inline v_int32x4 v_round(const v_float32x4& a)
1065 {
1066     static const int32x4_t v_sign = vdupq_n_s32(1 << 31),
1067         v_05 = vreinterpretq_s32_f32(vdupq_n_f32(0.5f));
1068
1069     int32x4_t v_addition = vorrq_s32(v_05, vandq_s32(v_sign, vreinterpretq_s32_f32(a.val)));
1070     return v_int32x4(vcvtq_s32_f32(vaddq_f32(a.val, vreinterpretq_f32_s32(v_addition))));
1071 }
1072
1073 inline v_int32x4 v_floor(const v_float32x4& a)
1074 {
1075     int32x4_t a1 = vcvtq_s32_f32(a.val);
1076     uint32x4_t mask = vcgtq_f32(vcvtq_f32_s32(a1), a.val);
1077     return v_int32x4(vaddq_s32(a1, vreinterpretq_s32_u32(mask)));
1078 }
1079
1080 inline v_int32x4 v_ceil(const v_float32x4& a)
1081 {
1082     int32x4_t a1 = vcvtq_s32_f32(a.val);
1083     uint32x4_t mask = vcgtq_f32(a.val, vcvtq_f32_s32(a1));
1084     return v_int32x4(vsubq_s32(a1, vreinterpretq_s32_u32(mask)));
1085 }
1086
1087 inline v_int32x4 v_trunc(const v_float32x4& a)
1088 { return v_int32x4(vcvtq_s32_f32(a.val)); }
1089
1090 #if CV_SIMD128_64F
1091 inline v_int32x4 v_round(const v_float64x2& a)
1092 {
1093     static const int32x2_t zero = vdup_n_s32(0);
1094     return v_int32x4(vcombine_s32(vmovn_s64(vcvtaq_s64_f64(a.val)), zero));
1095 }
1096
1097 inline v_int32x4 v_floor(const v_float64x2& a)
1098 {
1099     static const int32x2_t zero = vdup_n_s32(0);
1100     int64x2_t a1 = vcvtq_s64_f64(a.val);
1101     uint64x2_t mask = vcgtq_f64(vcvtq_f64_s64(a1), a.val);
1102     a1 = vaddq_s64(a1, vreinterpretq_s64_u64(mask));
1103     return v_int32x4(vcombine_s32(vmovn_s64(a1), zero));
1104 }
1105
1106 inline v_int32x4 v_ceil(const v_float64x2& a)
1107 {
1108     static const int32x2_t zero = vdup_n_s32(0);
1109     int64x2_t a1 = vcvtq_s64_f64(a.val);
1110     uint64x2_t mask = vcgtq_f64(a.val, vcvtq_f64_s64(a1));
1111     a1 = vsubq_s64(a1, vreinterpretq_s64_u64(mask));
1112     return v_int32x4(vcombine_s32(vmovn_s64(a1), zero));
1113 }
1114
1115 inline v_int32x4 v_trunc(const v_float64x2& a)
1116 {
1117     static const int32x2_t zero = vdup_n_s32(0);
1118     return v_int32x4(vcombine_s32(vmovn_s64(vcvtaq_s64_f64(a.val)), zero));
1119 }
1120 #endif
1121
1122 #define OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(_Tpvec, suffix) \
1123 inline void v_transpose4x4(const v_##_Tpvec& a0, const v_##_Tpvec& a1, \
1124                          const v_##_Tpvec& a2, const v_##_Tpvec& a3, \
1125                          v_##_Tpvec& b0, v_##_Tpvec& b1, \
1126                          v_##_Tpvec& b2, v_##_Tpvec& b3) \
1127 { \
1128     /* m00 m01 m02 m03 */ \
1129     /* m10 m11 m12 m13 */ \
1130     /* m20 m21 m22 m23 */ \
1131     /* m30 m31 m32 m33 */ \
1132     _Tpvec##x2_t t0 = vtrnq_##suffix(a0.val, a1.val); \
1133     _Tpvec##x2_t t1 = vtrnq_##suffix(a2.val, a3.val); \
1134     /* m00 m10 m02 m12 */ \
1135     /* m01 m11 m03 m13 */ \
1136     /* m20 m30 m22 m32 */ \
1137     /* m21 m31 m23 m33 */ \
1138     b0.val = vcombine_##suffix(vget_low_##suffix(t0.val[0]), vget_low_##suffix(t1.val[0])); \
1139     b1.val = vcombine_##suffix(vget_low_##suffix(t0.val[1]), vget_low_##suffix(t1.val[1])); \
1140     b2.val = vcombine_##suffix(vget_high_##suffix(t0.val[0]), vget_high_##suffix(t1.val[0])); \
1141     b3.val = vcombine_##suffix(vget_high_##suffix(t0.val[1]), vget_high_##suffix(t1.val[1])); \
1142 }
1143
1144 OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(uint32x4, u32)
1145 OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(int32x4, s32)
1146 OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(float32x4, f32)
1147
1148 #define OPENCV_HAL_IMPL_NEON_INTERLEAVED(_Tpvec, _Tp, suffix) \
1149 inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b) \
1150 { \
1151     _Tpvec##x2_t v = vld2q_##suffix(ptr); \
1152     a.val = v.val[0]; \
1153     b.val = v.val[1]; \
1154 } \
1155 inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b, v_##_Tpvec& c) \
1156 { \
1157     _Tpvec##x3_t v = vld3q_##suffix(ptr); \
1158     a.val = v.val[0]; \
1159     b.val = v.val[1]; \
1160     c.val = v.val[2]; \
1161 } \
1162 inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b, \
1163                                 v_##_Tpvec& c, v_##_Tpvec& d) \
1164 { \
1165     _Tpvec##x4_t v = vld4q_##suffix(ptr); \
1166     a.val = v.val[0]; \
1167     b.val = v.val[1]; \
1168     c.val = v.val[2]; \
1169     d.val = v.val[3]; \
1170 } \
1171 inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b) \
1172 { \
1173     _Tpvec##x2_t v; \
1174     v.val[0] = a.val; \
1175     v.val[1] = b.val; \
1176     vst2q_##suffix(ptr, v); \
1177 } \
1178 inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b, const v_##_Tpvec& c) \
1179 { \
1180     _Tpvec##x3_t v; \
1181     v.val[0] = a.val; \
1182     v.val[1] = b.val; \
1183     v.val[2] = c.val; \
1184     vst3q_##suffix(ptr, v); \
1185 } \
1186 inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b, \
1187                                const v_##_Tpvec& c, const v_##_Tpvec& d) \
1188 { \
1189     _Tpvec##x4_t v; \
1190     v.val[0] = a.val; \
1191     v.val[1] = b.val; \
1192     v.val[2] = c.val; \
1193     v.val[3] = d.val; \
1194     vst4q_##suffix(ptr, v); \
1195 }
1196
1197 OPENCV_HAL_IMPL_NEON_INTERLEAVED(uint8x16, uchar, u8)
1198 OPENCV_HAL_IMPL_NEON_INTERLEAVED(int8x16, schar, s8)
1199 OPENCV_HAL_IMPL_NEON_INTERLEAVED(uint16x8, ushort, u16)
1200 OPENCV_HAL_IMPL_NEON_INTERLEAVED(int16x8, short, s16)
1201 OPENCV_HAL_IMPL_NEON_INTERLEAVED(uint32x4, unsigned, u32)
1202 OPENCV_HAL_IMPL_NEON_INTERLEAVED(int32x4, int, s32)
1203 OPENCV_HAL_IMPL_NEON_INTERLEAVED(float32x4, float, f32)
1204 #if CV_SIMD128_64F
1205 OPENCV_HAL_IMPL_NEON_INTERLEAVED(float64x2, double, f64)
1206 #endif
1207
1208 inline v_float32x4 v_cvt_f32(const v_int32x4& a)
1209 {
1210     return v_float32x4(vcvtq_f32_s32(a.val));
1211 }
1212
1213 #if CV_SIMD128_64F
1214 inline v_float32x4 v_cvt_f32(const v_float64x2& a)
1215 {
1216     float32x2_t zero = vdup_n_f32(0.0f);
1217     return v_float32x4(vcombine_f32(vcvt_f32_f64(a.val), zero));
1218 }
1219
1220 inline v_float64x2 v_cvt_f64(const v_int32x4& a)
1221 {
1222     return v_float64x2(vcvt_f64_f32(vcvt_f32_s32(vget_low_s32(a.val))));
1223 }
1224
1225 inline v_float64x2 v_cvt_f64_high(const v_int32x4& a)
1226 {
1227     return v_float64x2(vcvt_f64_f32(vcvt_f32_s32(vget_high_s32(a.val))));
1228 }
1229
1230 inline v_float64x2 v_cvt_f64(const v_float32x4& a)
1231 {
1232     return v_float64x2(vcvt_f64_f32(vget_low_f32(a.val)));
1233 }
1234
1235 inline v_float64x2 v_cvt_f64_high(const v_float32x4& a)
1236 {
1237     return v_float64x2(vcvt_f64_f32(vget_high_f32(a.val)));
1238 }
1239 #endif
1240
1241 #if CV_FP16
1242 inline v_float32x4 v_cvt_f32(const v_float16x4& a)
1243 {
1244     return v_float32x4(vcvt_f32_f16(a.val));
1245 }
1246
1247 inline v_float16x4 v_cvt_f16(const v_float32x4& a)
1248 {
1249     return v_float16x4(vcvt_f16_f32(a.val));
1250 }
1251 #endif
1252
1253 //! @name Check SIMD support
1254 //! @{
1255 //! @brief Check CPU capability of SIMD operation
1256 static inline bool hasSIMD128()
1257 {
1258     return (CV_CPU_HAS_SUPPORT_NEON) ? true : false;
1259 }
1260
1261 //! @}
1262
1263 CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
1264
1265 //! @endcond
1266
1267 }
1268
1269 #endif