1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
16 // Copyright (C) 2015, Itseez Inc., all rights reserved.
17 // Third party copyrights are property of their respective owners.
19 // Redistribution and use in source and binary forms, with or without modification,
20 // are permitted provided that the following conditions are met:
22 // * Redistribution's of source code must retain the above copyright notice,
23 // this list of conditions and the following disclaimer.
25 // * Redistribution's in binary form must reproduce the above copyright notice,
26 // this list of conditions and the following disclaimer in the documentation
27 // and/or other materials provided with the distribution.
29 // * The name of the copyright holders may not be used to endorse or promote products
30 // derived from this software without specific prior written permission.
32 // This software is provided by the copyright holders and contributors "as is" and
33 // any express or implied warranties, including, but not limited to, the implied
34 // warranties of merchantability and fitness for a particular purpose are disclaimed.
35 // In no event shall the Intel Corporation or contributors be liable for any direct,
36 // indirect, incidental, special, exemplary, or consequential damages
37 // (including, but not limited to, procurement of substitute goods or services;
38 // loss of use, data, or profits; or business interruption) however caused
39 // and on any theory of liability, whether in contract, strict liability,
40 // or tort (including negligence or otherwise) arising in any way out of
41 // the use of this software, even if advised of the possibility of such damage.
45 #ifndef OPENCV_HAL_VSX_HPP
46 #define OPENCV_HAL_VSX_HPP
49 #include "opencv2/core/utility.hpp"
52 #define CV_SIMD128_64F 1
55 * todo: supporting half precision for power9
56 * convert instractions xvcvhpsp, xvcvsphp
64 CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
66 ///////// Types ////////////
70 typedef uchar lane_type;
74 explicit v_uint8x16(const vec_uchar16& v) : val(v)
76 v_uint8x16() : val(vec_uchar16_z)
78 v_uint8x16(vec_bchar16 v) : val(vec_uchar16_c(v))
80 v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7,
81 uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15)
82 : val(vec_uchar16_set(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15))
85 { return vec_extract(val, 0); }
90 typedef schar lane_type;
94 explicit v_int8x16(const vec_char16& v) : val(v)
96 v_int8x16() : val(vec_char16_z)
98 v_int8x16(vec_bchar16 v) : val(vec_char16_c(v))
100 v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7,
101 schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15)
102 : val(vec_char16_set(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15))
105 { return vec_extract(val, 0); }
110 typedef ushort lane_type;
114 explicit v_uint16x8(const vec_ushort8& v) : val(v)
116 v_uint16x8() : val(vec_ushort8_z)
118 v_uint16x8(vec_bshort8 v) : val(vec_ushort8_c(v))
120 v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7)
121 : val(vec_ushort8_set(v0, v1, v2, v3, v4, v5, v6, v7))
124 { return vec_extract(val, 0); }
129 typedef short lane_type;
133 explicit v_int16x8(const vec_short8& v) : val(v)
135 v_int16x8() : val(vec_short8_z)
137 v_int16x8(vec_bshort8 v) : val(vec_short8_c(v))
139 v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7)
140 : val(vec_short8_set(v0, v1, v2, v3, v4, v5, v6, v7))
143 { return vec_extract(val, 0); }
148 typedef unsigned lane_type;
152 explicit v_uint32x4(const vec_uint4& v) : val(v)
154 v_uint32x4() : val(vec_uint4_z)
156 v_uint32x4(vec_bint4 v) : val(vec_uint4_c(v))
158 v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3) : val(vec_uint4_set(v0, v1, v2, v3))
161 { return vec_extract(val, 0); }
166 typedef int lane_type;
170 explicit v_int32x4(const vec_int4& v) : val(v)
172 v_int32x4() : val(vec_int4_z)
174 v_int32x4(vec_bint4 v) : val(vec_int4_c(v))
176 v_int32x4(int v0, int v1, int v2, int v3) : val(vec_int4_set(v0, v1, v2, v3))
179 { return vec_extract(val, 0); }
184 typedef float lane_type;
188 explicit v_float32x4(const vec_float4& v) : val(v)
190 v_float32x4() : val(vec_float4_z)
192 v_float32x4(vec_bint4 v) : val(vec_float4_c(v))
194 v_float32x4(float v0, float v1, float v2, float v3) : val(vec_float4_set(v0, v1, v2, v3))
197 { return vec_extract(val, 0); }
202 typedef uint64 lane_type;
206 explicit v_uint64x2(const vec_udword2& v) : val(v)
208 v_uint64x2() : val(vec_udword2_z)
210 v_uint64x2(vec_bdword2 v) : val(vec_udword2_c(v))
212 v_uint64x2(uint64 v0, uint64 v1) : val(vec_udword2_set(v0, v1))
215 { return vec_extract(val, 0); }
220 typedef int64 lane_type;
224 explicit v_int64x2(const vec_dword2& v) : val(v)
226 v_int64x2() : val(vec_dword2_z)
228 v_int64x2(vec_bdword2 v) : val(vec_dword2_c(v))
230 v_int64x2(int64 v0, int64 v1) : val(vec_dword2_set(v0, v1))
233 { return vec_extract(val, 0); }
238 typedef double lane_type;
242 explicit v_float64x2(const vec_double2& v) : val(v)
244 v_float64x2() : val(vec_double2_z)
246 v_float64x2(vec_bdword2 v) : val(vec_double2_c(v))
248 v_float64x2(double v0, double v1) : val(vec_double2_set(v0, v1))
251 { return vec_extract(val, 0); }
254 //////////////// Load and store operations ///////////////
257 * clang-5 aborted during parse "vec_xxx_c" only if it's
258 * inside a function template which is defined by preprocessor macro.
260 * if vec_xxx_c defined as C++ cast, clang-5 will pass it
262 #define OPENCV_HAL_IMPL_VSX_INITVEC(_Tpvec, _Tp, suffix, cast) \
263 inline _Tpvec v_setzero_##suffix() { return _Tpvec(); } \
264 inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec(vec_splats((_Tp)v));} \
265 template<typename _Tpvec0> inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0 &a) \
266 { return _Tpvec((cast)a.val); }
268 OPENCV_HAL_IMPL_VSX_INITVEC(v_uint8x16, uchar, u8, vec_uchar16)
269 OPENCV_HAL_IMPL_VSX_INITVEC(v_int8x16, schar, s8, vec_char16)
270 OPENCV_HAL_IMPL_VSX_INITVEC(v_uint16x8, ushort, u16, vec_ushort8)
271 OPENCV_HAL_IMPL_VSX_INITVEC(v_int16x8, short, s16, vec_short8)
272 OPENCV_HAL_IMPL_VSX_INITVEC(v_uint32x4, uint, u32, vec_uint4)
273 OPENCV_HAL_IMPL_VSX_INITVEC(v_int32x4, int, s32, vec_int4)
274 OPENCV_HAL_IMPL_VSX_INITVEC(v_uint64x2, uint64, u64, vec_udword2)
275 OPENCV_HAL_IMPL_VSX_INITVEC(v_int64x2, int64, s64, vec_dword2)
276 OPENCV_HAL_IMPL_VSX_INITVEC(v_float32x4, float, f32, vec_float4)
277 OPENCV_HAL_IMPL_VSX_INITVEC(v_float64x2, double, f64, vec_double2)
279 #define OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(_Tpvec, _Tp, ld_func, st_func) \
280 inline _Tpvec v_load(const _Tp* ptr) \
281 { return _Tpvec(ld_func(0, ptr)); } \
282 inline _Tpvec v_load_aligned(const _Tp* ptr) \
283 { return _Tpvec(ld_func(0, ptr)); } \
284 inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \
285 { return _Tpvec(vec_mergesqh(vec_ld_l8(ptr0), vec_ld_l8(ptr1))); } \
286 inline void v_store(_Tp* ptr, const _Tpvec& a) \
287 { st_func(a.val, 0, ptr); } \
288 inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \
289 { st_func(a.val, 0, ptr); } \
290 inline void v_store_low(_Tp* ptr, const _Tpvec& a) \
291 { vec_st_l8(a.val, ptr); } \
292 inline void v_store_high(_Tp* ptr, const _Tpvec& a) \
293 { vec_st_h8(a.val, ptr); }
295 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_uint8x16, uchar, vsx_ld, vsx_st)
296 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_int8x16, schar, vsx_ld, vsx_st)
297 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_uint16x8, ushort, vsx_ld, vsx_st)
298 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_int16x8, short, vsx_ld, vsx_st)
299 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_uint32x4, uint, vsx_ld, vsx_st)
300 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_int32x4, int, vsx_ld, vsx_st)
301 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_float32x4, float, vsx_ld, vsx_st)
302 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_float64x2, double, vsx_ld, vsx_st)
303 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_uint64x2, uint64, vsx_ld2, vsx_st2)
304 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_int64x2, int64, vsx_ld2, vsx_st2)
306 //////////////// Value reordering ///////////////
309 #define OPENCV_HAL_IMPL_VSX_INTERLEAVE(_Tp, _Tpvec) \
310 inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, _Tpvec& b) \
311 { vec_ld_deinterleave(ptr, a.val, b.val);} \
312 inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, \
313 _Tpvec& b, _Tpvec& c) \
314 { vec_ld_deinterleave(ptr, a.val, b.val, c.val); } \
315 inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, _Tpvec& b, \
316 _Tpvec& c, _Tpvec& d) \
317 { vec_ld_deinterleave(ptr, a.val, b.val, c.val, d.val); } \
318 inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, const _Tpvec& b) \
319 { vec_st_interleave(a.val, b.val, ptr); } \
320 inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, \
321 const _Tpvec& b, const _Tpvec& c) \
322 { vec_st_interleave(a.val, b.val, c.val, ptr); } \
323 inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, const _Tpvec& b, \
324 const _Tpvec& c, const _Tpvec& d) \
325 { vec_st_interleave(a.val, b.val, c.val, d.val, ptr); }
327 OPENCV_HAL_IMPL_VSX_INTERLEAVE(uchar, v_uint8x16)
328 OPENCV_HAL_IMPL_VSX_INTERLEAVE(schar, v_int8x16)
329 OPENCV_HAL_IMPL_VSX_INTERLEAVE(ushort, v_uint16x8)
330 OPENCV_HAL_IMPL_VSX_INTERLEAVE(short, v_int16x8)
331 OPENCV_HAL_IMPL_VSX_INTERLEAVE(uint, v_uint32x4)
332 OPENCV_HAL_IMPL_VSX_INTERLEAVE(int, v_int32x4)
333 OPENCV_HAL_IMPL_VSX_INTERLEAVE(float, v_float32x4)
334 OPENCV_HAL_IMPL_VSX_INTERLEAVE(double, v_float64x2)
337 #define OPENCV_HAL_IMPL_VSX_EXPAND(_Tpvec, _Tpwvec, _Tp, fl, fh) \
338 inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \
340 b0.val = fh(a.val); \
341 b1.val = fl(a.val); \
343 inline _Tpwvec v_load_expand(const _Tp* ptr) \
344 { return _Tpwvec(fh(vsx_ld(0, ptr))); }
346 OPENCV_HAL_IMPL_VSX_EXPAND(v_uint8x16, v_uint16x8, uchar, vec_unpacklu, vec_unpackhu)
347 OPENCV_HAL_IMPL_VSX_EXPAND(v_int8x16, v_int16x8, schar, vec_unpackl, vec_unpackh)
348 OPENCV_HAL_IMPL_VSX_EXPAND(v_uint16x8, v_uint32x4, ushort, vec_unpacklu, vec_unpackhu)
349 OPENCV_HAL_IMPL_VSX_EXPAND(v_int16x8, v_int32x4, short, vec_unpackl, vec_unpackh)
350 OPENCV_HAL_IMPL_VSX_EXPAND(v_uint32x4, v_uint64x2, uint, vec_unpacklu, vec_unpackhu)
351 OPENCV_HAL_IMPL_VSX_EXPAND(v_int32x4, v_int64x2, int, vec_unpackl, vec_unpackh)
353 inline v_uint32x4 v_load_expand_q(const uchar* ptr)
354 { return v_uint32x4(vec_ld_buw(ptr)); }
356 inline v_int32x4 v_load_expand_q(const schar* ptr)
357 { return v_int32x4(vec_ld_bsw(ptr)); }
360 #define OPENCV_HAL_IMPL_VSX_PACK(_Tpvec, _Tp, _Tpwvec, _Tpvn, _Tpdel, sfnc, pkfnc, addfnc, pack) \
361 inline _Tpvec v_##pack(const _Tpwvec& a, const _Tpwvec& b) \
363 return _Tpvec(pkfnc(a.val, b.val)); \
365 inline void v_##pack##_store(_Tp* ptr, const _Tpwvec& a) \
367 vec_st_l8(pkfnc(a.val, a.val), ptr); \
370 inline _Tpvec v_rshr_##pack(const _Tpwvec& a, const _Tpwvec& b) \
372 const __vector _Tpvn vn = vec_splats((_Tpvn)n); \
373 const __vector _Tpdel delta = vec_splats((_Tpdel)((_Tpdel)1 << (n-1))); \
374 return _Tpvec(pkfnc(sfnc(addfnc(a.val, delta), vn), sfnc(addfnc(b.val, delta), vn))); \
377 inline void v_rshr_##pack##_store(_Tp* ptr, const _Tpwvec& a) \
379 const __vector _Tpvn vn = vec_splats((_Tpvn)n); \
380 const __vector _Tpdel delta = vec_splats((_Tpdel)((_Tpdel)1 << (n-1))); \
381 vec_st_l8(pkfnc(sfnc(addfnc(a.val, delta), vn), delta), ptr); \
384 OPENCV_HAL_IMPL_VSX_PACK(v_uint8x16, uchar, v_uint16x8, unsigned short, unsigned short,
385 vec_sr, vec_packs, vec_adds, pack)
386 OPENCV_HAL_IMPL_VSX_PACK(v_int8x16, schar, v_int16x8, unsigned short, short,
387 vec_sra, vec_packs, vec_adds, pack)
389 OPENCV_HAL_IMPL_VSX_PACK(v_uint16x8, ushort, v_uint32x4, unsigned int, unsigned int,
390 vec_sr, vec_packs, vec_add, pack)
391 OPENCV_HAL_IMPL_VSX_PACK(v_int16x8, short, v_int32x4, unsigned int, int,
392 vec_sra, vec_packs, vec_add, pack)
394 OPENCV_HAL_IMPL_VSX_PACK(v_uint32x4, uint, v_uint64x2, unsigned long long, unsigned long long,
395 vec_sr, vec_packs, vec_add, pack)
396 OPENCV_HAL_IMPL_VSX_PACK(v_int32x4, int, v_int64x2, unsigned long long, long long,
397 vec_sra, vec_packs, vec_add, pack)
399 OPENCV_HAL_IMPL_VSX_PACK(v_uint8x16, uchar, v_int16x8, unsigned short, short,
400 vec_sra, vec_packsu, vec_adds, pack_u)
401 OPENCV_HAL_IMPL_VSX_PACK(v_uint16x8, ushort, v_int32x4, unsigned int, int,
402 vec_sra, vec_packsu, vec_add, pack_u)
403 OPENCV_HAL_IMPL_VSX_PACK(v_uint32x4, uint, v_int64x2, unsigned long long, long long,
404 vec_sra, vec_packsu, vec_add, pack_u)
407 template <typename _Tpvec>
408 inline void v_zip(const _Tpvec& a0, const _Tpvec& a1, _Tpvec& b0, _Tpvec& b1)
410 b0.val = vec_mergeh(a0.val, a1.val);
411 b1.val = vec_mergel(a0.val, a1.val);
414 template <typename _Tpvec>
415 inline _Tpvec v_combine_high(const _Tpvec& a, const _Tpvec& b)
416 { return _Tpvec(vec_mergesql(a.val, b.val)); }
418 template <typename _Tpvec>
419 inline _Tpvec v_combine_low(const _Tpvec& a, const _Tpvec& b)
420 { return _Tpvec(vec_mergesqh(a.val, b.val)); }
422 template <typename _Tpvec>
423 inline void v_recombine(const _Tpvec& a, const _Tpvec& b, _Tpvec& c, _Tpvec& d)
425 c.val = vec_mergesqh(a.val, b.val);
426 d.val = vec_mergesql(a.val, b.val);
430 template<int s, typename _Tpvec>
431 inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b)
433 const int w = sizeof(typename _Tpvec::lane_type);
434 const int n = _Tpvec::nlanes;
435 const unsigned int sf = ((w * n) - (s * w));
437 return _Tpvec(a.val);
440 // bitwise it just to make xlc happy
441 return _Tpvec(vec_sld(b.val, a.val, sf & 15));
444 #define OPENCV_HAL_IMPL_VSX_EXTRACT_2(_Tpvec) \
446 inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b) \
449 case 0: return _Tpvec(a.val); \
450 case 2: return _Tpvec(b.val); \
451 case 1: return _Tpvec(vec_sldw(b.val, a.val, 2)); \
452 default: return _Tpvec(); \
455 OPENCV_HAL_IMPL_VSX_EXTRACT_2(v_uint64x2)
456 OPENCV_HAL_IMPL_VSX_EXTRACT_2(v_int64x2)
459 ////////// Arithmetic, bitwise and comparison operations /////////
461 /* Element-wise binary and unary operations */
463 #define OPENCV_HAL_IMPL_VSX_BIN_OP(bin_op, _Tpvec, intrin) \
464 inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
465 { return _Tpvec(intrin(a.val, b.val)); } \
466 inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \
467 { a.val = intrin(a.val, b.val); return a; }
469 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint8x16, vec_adds)
470 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint8x16, vec_subs)
471 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int8x16, vec_adds)
472 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int8x16, vec_subs)
473 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint16x8, vec_adds)
474 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint16x8, vec_subs)
475 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_uint16x8, vec_mul)
476 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int16x8, vec_adds)
477 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int16x8, vec_subs)
478 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_int16x8, vec_mul)
479 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint32x4, vec_add)
480 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint32x4, vec_sub)
481 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_uint32x4, vec_mul)
482 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int32x4, vec_add)
483 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int32x4, vec_sub)
484 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_int32x4, vec_mul)
485 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_float32x4, vec_add)
486 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_float32x4, vec_sub)
487 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_float32x4, vec_mul)
488 OPENCV_HAL_IMPL_VSX_BIN_OP(/, v_float32x4, vec_div)
489 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_float64x2, vec_add)
490 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_float64x2, vec_sub)
491 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_float64x2, vec_mul)
492 OPENCV_HAL_IMPL_VSX_BIN_OP(/, v_float64x2, vec_div)
493 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint64x2, vec_add)
494 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint64x2, vec_sub)
495 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int64x2, vec_add)
496 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int64x2, vec_sub)
498 inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b, v_int32x4& c, v_int32x4& d)
500 c.val = vec_mul(vec_unpackh(a.val), vec_unpackh(b.val));
501 d.val = vec_mul(vec_unpackl(a.val), vec_unpackl(b.val));
503 inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b, v_uint32x4& c, v_uint32x4& d)
505 c.val = vec_mul(vec_unpackhu(a.val), vec_unpackhu(b.val));
506 d.val = vec_mul(vec_unpacklu(a.val), vec_unpacklu(b.val));
508 inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b, v_uint64x2& c, v_uint64x2& d)
510 c.val = vec_mul(vec_unpackhu(a.val), vec_unpackhu(b.val));
511 d.val = vec_mul(vec_unpacklu(a.val), vec_unpacklu(b.val));
514 /** Non-saturating arithmetics **/
515 #define OPENCV_HAL_IMPL_VSX_BIN_FUNC(func, intrin) \
516 template<typename _Tpvec> \
517 inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \
518 { return _Tpvec(intrin(a.val, b.val)); }
520 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_add_wrap, vec_add)
521 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_sub_wrap, vec_sub)
523 /** Bitwise shifts **/
524 #define OPENCV_HAL_IMPL_VSX_SHIFT_OP(_Tpuvec, splfunc) \
525 inline _Tpuvec operator << (const _Tpuvec& a, int imm) \
526 { return _Tpuvec(vec_sl(a.val, splfunc(imm))); } \
527 inline _Tpuvec operator >> (const _Tpuvec& a, int imm) \
528 { return _Tpuvec(vec_sr(a.val, splfunc(imm))); } \
529 template<int imm> inline _Tpuvec v_shl(const _Tpuvec& a) \
530 { return _Tpuvec(vec_sl(a.val, splfunc(imm))); } \
531 template<int imm> inline _Tpuvec v_shr(const _Tpuvec& a) \
532 { return _Tpuvec(vec_sr(a.val, splfunc(imm))); }
534 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint8x16, vec_uchar16_sp)
535 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int8x16, vec_uchar16_sp)
536 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint16x8, vec_ushort8_sp)
537 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int16x8, vec_ushort8_sp)
538 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint32x4, vec_uint4_sp)
539 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int32x4, vec_uint4_sp)
540 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint64x2, vec_udword2_sp)
541 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int64x2, vec_udword2_sp)
543 /** Bitwise logic **/
544 #define OPENCV_HAL_IMPL_VSX_LOGIC_OP(_Tpvec) \
545 OPENCV_HAL_IMPL_VSX_BIN_OP(&, _Tpvec, vec_and) \
546 OPENCV_HAL_IMPL_VSX_BIN_OP(|, _Tpvec, vec_or) \
547 OPENCV_HAL_IMPL_VSX_BIN_OP(^, _Tpvec, vec_xor) \
548 inline _Tpvec operator ~ (const _Tpvec& a) \
549 { return _Tpvec(vec_not(a.val)); }
551 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint8x16)
552 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int8x16)
553 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint16x8)
554 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int16x8)
555 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint32x4)
556 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int32x4)
557 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint64x2)
558 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int64x2)
559 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_float32x4)
560 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_float64x2)
562 /** Bitwise select **/
563 #define OPENCV_HAL_IMPL_VSX_SELECT(_Tpvec, cast) \
564 inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \
565 { return _Tpvec(vec_sel(b.val, a.val, cast(mask.val))); }
567 OPENCV_HAL_IMPL_VSX_SELECT(v_uint8x16, vec_bchar16_c)
568 OPENCV_HAL_IMPL_VSX_SELECT(v_int8x16, vec_bchar16_c)
569 OPENCV_HAL_IMPL_VSX_SELECT(v_uint16x8, vec_bshort8_c)
570 OPENCV_HAL_IMPL_VSX_SELECT(v_int16x8, vec_bshort8_c)
571 OPENCV_HAL_IMPL_VSX_SELECT(v_uint32x4, vec_bint4_c)
572 OPENCV_HAL_IMPL_VSX_SELECT(v_int32x4, vec_bint4_c)
573 OPENCV_HAL_IMPL_VSX_SELECT(v_float32x4, vec_bint4_c)
574 OPENCV_HAL_IMPL_VSX_SELECT(v_float64x2, vec_bdword2_c)
577 #define OPENCV_HAL_IMPL_VSX_INT_CMP_OP(_Tpvec) \
578 inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \
579 { return _Tpvec(vec_cmpeq(a.val, b.val)); } \
580 inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \
581 { return _Tpvec(vec_cmpne(a.val, b.val)); } \
582 inline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \
583 { return _Tpvec(vec_cmplt(a.val, b.val)); } \
584 inline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \
585 { return _Tpvec(vec_cmpgt(a.val, b.val)); } \
586 inline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \
587 { return _Tpvec(vec_cmple(a.val, b.val)); } \
588 inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \
589 { return _Tpvec(vec_cmpge(a.val, b.val)); }
591 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint8x16)
592 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int8x16)
593 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint16x8)
594 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int16x8)
595 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint32x4)
596 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int32x4)
597 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_float32x4)
598 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_float64x2)
599 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint64x2)
600 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int64x2)
603 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_min, vec_min)
604 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_max, vec_max)
606 ////////// Reduce and mask /////////
609 inline short v_reduce_sum(const v_int16x8& a)
611 const vec_int4 zero = vec_int4_z;
612 return saturate_cast<short>(vec_extract(vec_sums(vec_sum4s(a.val, zero), zero), 3));
614 inline ushort v_reduce_sum(const v_uint16x8& a)
616 const vec_int4 v4 = vec_int4_c(vec_unpackhu(vec_adds(a.val, vec_sld(a.val, a.val, 8))));
617 return saturate_cast<ushort>(vec_extract(vec_sums(v4, vec_int4_z), 3));
620 #define OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(_Tpvec, _Tpvec2, scalartype, suffix, func) \
621 inline scalartype v_reduce_##suffix(const _Tpvec& a) \
623 const _Tpvec2 rs = func(a.val, vec_sld(a.val, a.val, 8)); \
624 return vec_extract(func(rs, vec_sld(rs, rs, 4)), 0); \
626 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_uint32x4, vec_uint4, uint, sum, vec_add)
627 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_uint32x4, vec_uint4, uint, max, vec_max)
628 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_uint32x4, vec_uint4, uint, min, vec_min)
629 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_int32x4, vec_int4, int, sum, vec_add)
630 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_int32x4, vec_int4, int, max, vec_max)
631 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_int32x4, vec_int4, int, min, vec_min)
632 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_float32x4, vec_float4, float, sum, vec_add)
633 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_float32x4, vec_float4, float, max, vec_max)
634 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_float32x4, vec_float4, float, min, vec_min)
636 #define OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(_Tpvec, _Tpvec2, scalartype, suffix, func) \
637 inline scalartype v_reduce_##suffix(const _Tpvec& a) \
639 _Tpvec2 rs = func(a.val, vec_sld(a.val, a.val, 8)); \
640 rs = func(rs, vec_sld(rs, rs, 4)); \
641 return vec_extract(func(rs, vec_sld(rs, rs, 2)), 0); \
643 OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_uint16x8, vec_ushort8, ushort, max, vec_max)
644 OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_uint16x8, vec_ushort8, ushort, min, vec_min)
645 OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_int16x8, vec_short8, short, max, vec_max)
646 OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_int16x8, vec_short8, short, min, vec_min)
648 inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b,
649 const v_float32x4& c, const v_float32x4& d)
651 vec_float4 ac = vec_add(vec_mergel(a.val, c.val), vec_mergeh(a.val, c.val));
652 ac = vec_add(ac, vec_sld(ac, ac, 8));
654 vec_float4 bd = vec_add(vec_mergel(b.val, d.val), vec_mergeh(b.val, d.val));
655 bd = vec_add(bd, vec_sld(bd, bd, 8));
656 return v_float32x4(vec_mergeh(ac, bd));
660 #define OPENCV_HAL_IMPL_VSX_POPCOUNT_8(_Tpvec) \
661 inline v_uint32x4 v_popcount(const _Tpvec& a) \
663 vec_uchar16 v16 = vec_popcntu(a.val); \
664 vec_ushort8 v8 = vec_add(vec_unpacklu(v16), vec_unpackhu(v16)); \
665 return v_uint32x4(vec_add(vec_unpacklu(v8), vec_unpackhu(v8))); \
667 OPENCV_HAL_IMPL_VSX_POPCOUNT_8(v_int8x16)
668 OPENCV_HAL_IMPL_VSX_POPCOUNT_8(v_uint8x16)
670 #define OPENCV_HAL_IMPL_VSX_POPCOUNT_16(_Tpvec) \
671 inline v_uint32x4 v_popcount(const _Tpvec& a) \
673 vec_ushort8 v8 = vec_popcntu(a.val); \
674 return v_uint32x4(vec_add(vec_unpacklu(v8), vec_unpackhu(v8))); \
676 OPENCV_HAL_IMPL_VSX_POPCOUNT_16(v_int16x8)
677 OPENCV_HAL_IMPL_VSX_POPCOUNT_16(v_uint16x8)
679 #define OPENCV_HAL_IMPL_VSX_POPCOUNT_32(_Tpvec) \
680 inline v_uint32x4 v_popcount(const _Tpvec& a) \
681 { return v_uint32x4(vec_popcntu(a.val)); }
683 OPENCV_HAL_IMPL_VSX_POPCOUNT_32(v_int32x4)
684 OPENCV_HAL_IMPL_VSX_POPCOUNT_32(v_uint32x4)
687 inline int v_signmask(const v_uint8x16& a)
689 vec_uchar16 sv = vec_sr(a.val, vec_uchar16_sp(7));
690 static const vec_uchar16 slm = {0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7};
691 sv = vec_sl(sv, slm);
692 vec_uint4 sv4 = vec_sum4s(sv, vec_uint4_z);
693 static const vec_uint4 slm4 = {0, 0, 8, 8};
694 sv4 = vec_sl(sv4, slm4);
695 return vec_extract(vec_sums((vec_int4) sv4, vec_int4_z), 3);
697 inline int v_signmask(const v_int8x16& a)
698 { return v_signmask(v_reinterpret_as_u8(a)); }
700 inline int v_signmask(const v_int16x8& a)
702 static const vec_ushort8 slm = {0, 1, 2, 3, 4, 5, 6, 7};
703 vec_short8 sv = vec_sr(a.val, vec_ushort8_sp(15));
704 sv = vec_sl(sv, slm);
705 vec_int4 svi = vec_int4_z;
706 svi = vec_sums(vec_sum4s(sv, svi), svi);
707 return vec_extract(svi, 3);
709 inline int v_signmask(const v_uint16x8& a)
710 { return v_signmask(v_reinterpret_as_s16(a)); }
712 inline int v_signmask(const v_int32x4& a)
714 static const vec_uint4 slm = {0, 1, 2, 3};
715 vec_int4 sv = vec_sr(a.val, vec_uint4_sp(31));
716 sv = vec_sl(sv, slm);
717 sv = vec_sums(sv, vec_int4_z);
718 return vec_extract(sv, 3);
720 inline int v_signmask(const v_uint32x4& a)
721 { return v_signmask(v_reinterpret_as_s32(a)); }
722 inline int v_signmask(const v_float32x4& a)
723 { return v_signmask(v_reinterpret_as_s32(a)); }
725 inline int v_signmask(const v_int64x2& a)
727 const vec_dword2 sv = vec_sr(a.val, vec_udword2_sp(63));
728 return (int)vec_extract(sv, 0) | (int)vec_extract(sv, 1) << 1;
730 inline int v_signmask(const v_uint64x2& a)
731 { return v_signmask(v_reinterpret_as_s64(a)); }
732 inline int v_signmask(const v_float64x2& a)
733 { return v_signmask(v_reinterpret_as_s64(a)); }
736 template<typename _Tpvec>
737 inline bool v_check_all(const _Tpvec& a)
738 { return vec_all_lt(a.val, _Tpvec().val);}
739 inline bool v_check_all(const v_uint8x16 &a)
740 { return v_check_all(v_reinterpret_as_s8(a)); }
741 inline bool v_check_all(const v_uint16x8 &a)
742 { return v_check_all(v_reinterpret_as_s16(a)); }
743 inline bool v_check_all(const v_uint32x4 &a)
744 { return v_check_all(v_reinterpret_as_s32(a)); }
746 template<typename _Tpvec>
747 inline bool v_check_any(const _Tpvec& a)
748 { return vec_any_lt(a.val, _Tpvec().val);}
749 inline bool v_check_any(const v_uint8x16 &a)
750 { return v_check_any(v_reinterpret_as_s8(a)); }
751 inline bool v_check_any(const v_uint16x8 &a)
752 { return v_check_any(v_reinterpret_as_s16(a)); }
753 inline bool v_check_any(const v_uint32x4 &a)
754 { return v_check_any(v_reinterpret_as_s32(a)); }
756 ////////// Other math /////////
758 /** Some frequent operations **/
759 inline v_float32x4 v_sqrt(const v_float32x4& x)
760 { return v_float32x4(vec_sqrt(x.val)); }
761 inline v_float64x2 v_sqrt(const v_float64x2& x)
762 { return v_float64x2(vec_sqrt(x.val)); }
764 inline v_float32x4 v_invsqrt(const v_float32x4& x)
765 { return v_float32x4(vec_rsqrt(x.val)); }
766 inline v_float64x2 v_invsqrt(const v_float64x2& x)
767 { return v_float64x2(vec_rsqrt(x.val)); }
769 #define OPENCV_HAL_IMPL_VSX_MULADD(_Tpvec) \
770 inline _Tpvec v_magnitude(const _Tpvec& a, const _Tpvec& b) \
771 { return _Tpvec(vec_sqrt(vec_madd(a.val, a.val, vec_mul(b.val, b.val)))); } \
772 inline _Tpvec v_sqr_magnitude(const _Tpvec& a, const _Tpvec& b) \
773 { return _Tpvec(vec_madd(a.val, a.val, vec_mul(b.val, b.val))); } \
774 inline _Tpvec v_muladd(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \
775 { return _Tpvec(vec_madd(a.val, b.val, c.val)); }
777 OPENCV_HAL_IMPL_VSX_MULADD(v_float32x4)
778 OPENCV_HAL_IMPL_VSX_MULADD(v_float64x2)
780 // TODO: exp, log, sin, cos
782 /** Absolute values **/
783 inline v_uint8x16 v_abs(const v_int8x16& x)
784 { return v_uint8x16(vec_uchar16_c(vec_abs(x.val))); }
786 inline v_uint16x8 v_abs(const v_int16x8& x)
787 { return v_uint16x8(vec_ushort8_c(vec_abs(x.val))); }
789 inline v_uint32x4 v_abs(const v_int32x4& x)
790 { return v_uint32x4(vec_uint4_c(vec_abs(x.val))); }
792 inline v_float32x4 v_abs(const v_float32x4& x)
793 { return v_float32x4(vec_abs(x.val)); }
795 inline v_float64x2 v_abs(const v_float64x2& x)
796 { return v_float64x2(vec_abs(x.val)); }
798 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_absdiff, vec_absd)
800 #define OPENCV_HAL_IMPL_VSX_BIN_FUNC2(_Tpvec, _Tpvec2, cast, func, intrin) \
801 inline _Tpvec2 func(const _Tpvec& a, const _Tpvec& b) \
802 { return _Tpvec2(cast(intrin(a.val, b.val))); }
804 OPENCV_HAL_IMPL_VSX_BIN_FUNC2(v_int8x16, v_uint8x16, vec_uchar16_c, v_absdiff, vec_absd)
805 OPENCV_HAL_IMPL_VSX_BIN_FUNC2(v_int16x8, v_uint16x8, vec_ushort8_c, v_absdiff, vec_absd)
806 OPENCV_HAL_IMPL_VSX_BIN_FUNC2(v_int32x4, v_uint32x4, vec_uint4_c, v_absdiff, vec_absd)
807 OPENCV_HAL_IMPL_VSX_BIN_FUNC2(v_int64x2, v_uint64x2, vec_udword2_c, v_absdiff, vec_absd)
809 ////////// Conversions /////////
812 inline v_int32x4 v_round(const v_float32x4& a)
813 { return v_int32x4(vec_cts(vec_round(a.val), 0)); }
815 inline v_int32x4 v_round(const v_float64x2& a)
817 static const vec_uchar16 perm = {16, 17, 18, 19, 24, 25, 26, 27, 0, 0, 0, 0, 0, 0, 0, 0};
818 return v_int32x4(vec_perm(vec_int4_z, vec_ctsw(vec_round(a.val)), perm));
821 inline v_int32x4 v_floor(const v_float32x4& a)
822 { return v_int32x4(vec_cts(vec_floor(a.val), 0)); }
824 inline v_int32x4 v_floor(const v_float64x2& a)
826 static const vec_uchar16 perm = {16, 17, 18, 19, 24, 25, 26, 27, 0, 0, 0, 0, 0, 0, 0, 0};
827 return v_int32x4(vec_perm(vec_int4_z, vec_ctsw(vec_floor(a.val)), perm));
830 inline v_int32x4 v_ceil(const v_float32x4& a)
831 { return v_int32x4(vec_cts(vec_ceil(a.val), 0)); }
833 inline v_int32x4 v_ceil(const v_float64x2& a)
835 static const vec_uchar16 perm = {16, 17, 18, 19, 24, 25, 26, 27, 0, 0, 0, 0, 0, 0, 0, 0};
836 return v_int32x4(vec_perm(vec_int4_z, vec_ctsw(vec_ceil(a.val)), perm));
839 inline v_int32x4 v_trunc(const v_float32x4& a)
840 { return v_int32x4(vec_cts(a.val, 0)); }
842 inline v_int32x4 v_trunc(const v_float64x2& a)
844 static const vec_uchar16 perm = {16, 17, 18, 19, 24, 25, 26, 27, 0, 0, 0, 0, 0, 0, 0, 0};
845 return v_int32x4(vec_perm(vec_int4_z, vec_ctsw(a.val), perm));
849 inline v_float32x4 v_cvt_f32(const v_int32x4& a)
850 { return v_float32x4(vec_ctf(a.val, 0)); }
852 inline v_float32x4 v_cvt_f32(const v_float64x2& a)
854 static const vec_uchar16 perm = {16, 17, 18, 19, 24, 25, 26, 27, 0, 0, 0, 0, 0, 0, 0, 0};
855 return v_float32x4(vec_perm(vec_float4_z, vec_cvf(a.val), perm));
857 inline v_float64x2 v_cvt_f64(const v_int32x4& a)
859 return v_float64x2(vec_ctd(vec_mergeh(a.val, a.val), 0));
861 inline v_float64x2 v_cvt_f64_high(const v_int32x4& a)
863 return v_float64x2(vec_ctd(vec_mergel(a.val, a.val), 0));
865 inline v_float64x2 v_cvt_f64(const v_float32x4& a)
867 return v_float64x2(vec_cvf(vec_mergeh(a.val, a.val)));
869 inline v_float64x2 v_cvt_f64_high(const v_float32x4& a)
871 return v_float64x2(vec_cvf(vec_mergel(a.val, a.val)));
875 /** its up there with load and store operations **/
877 ////////// Matrix operations /////////
879 inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b)
880 { return v_int32x4(vec_msum(a.val, b.val, vec_int4_z)); }
882 inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,
883 const v_float32x4& m1, const v_float32x4& m2,
884 const v_float32x4& m3)
886 const vec_float4 v0 = vec_splat(v.val, 0);
887 const vec_float4 v1 = vec_splat(v.val, 1);
888 const vec_float4 v2 = vec_splat(v.val, 2);
889 const vec_float4 v3 = vec_splat(v.val, 3);
890 return v_float32x4(vec_madd(v0, m0.val, vec_madd(v1, m1.val, vec_madd(v2, m2.val, vec_mul(v3, m3.val)))));
893 #define OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(_Tpvec, _Tpvec2) \
894 inline void v_transpose4x4(const _Tpvec& a0, const _Tpvec& a1, \
895 const _Tpvec& a2, const _Tpvec& a3, \
896 _Tpvec& b0, _Tpvec& b1, _Tpvec& b2, _Tpvec& b3) \
898 _Tpvec2 a02 = vec_mergeh(a0.val, a2.val); \
899 _Tpvec2 a13 = vec_mergeh(a1.val, a3.val); \
900 b0.val = vec_mergeh(a02, a13); \
901 b1.val = vec_mergel(a02, a13); \
902 a02 = vec_mergel(a0.val, a2.val); \
903 a13 = vec_mergel(a1.val, a3.val); \
904 b2.val = vec_mergeh(a02, a13); \
905 b3.val = vec_mergel(a02, a13); \
907 OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(v_uint32x4, vec_uint4)
908 OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(v_int32x4, vec_int4)
909 OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(v_float32x4, vec_float4)
911 //! @name Check SIMD support
913 //! @brief Check CPU capability of SIMD operation
914 static inline bool hasSIMD128()
916 return (CV_CPU_HAS_SUPPORT_VSX) ? true : false;
921 CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
927 #endif // OPENCV_HAL_VSX_HPP