1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
16 // Copyright (C) 2015, Itseez Inc., all rights reserved.
17 // Third party copyrights are property of their respective owners.
19 // Redistribution and use in source and binary forms, with or without modification,
20 // are permitted provided that the following conditions are met:
22 // * Redistribution's of source code must retain the above copyright notice,
23 // this list of conditions and the following disclaimer.
25 // * Redistribution's in binary form must reproduce the above copyright notice,
26 // this list of conditions and the following disclaimer in the documentation
27 // and/or other materials provided with the distribution.
29 // * The name of the copyright holders may not be used to endorse or promote products
30 // derived from this software without specific prior written permission.
32 // This software is provided by the copyright holders and contributors "as is" and
33 // any express or implied warranties, including, but not limited to, the implied
34 // warranties of merchantability and fitness for a particular purpose are disclaimed.
35 // In no event shall the Intel Corporation or contributors be liable for any direct,
36 // indirect, incidental, special, exemplary, or consequential damages
37 // (including, but not limited to, procurement of substitute goods or services;
38 // loss of use, data, or profits; or business interruption) however caused
39 // and on any theory of liability, whether in contract, strict liability,
40 // or tort (including negligence or otherwise) arising in any way out of
41 // the use of this software, even if advised of the possibility of such damage.
45 #ifndef OPENCV_HAL_VSX_HPP
46 #define OPENCV_HAL_VSX_HPP
49 #include "opencv2/core/utility.hpp"
52 #define CV_SIMD128_64F 1
55 * todo: supporting half precision for power9
56 * convert instractions xvcvhpsp, xvcvsphp
64 CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
66 ///////// Types ////////////
70 typedef uchar lane_type;
74 explicit v_uint8x16(const vec_uchar16& v) : val(v)
76 v_uint8x16() : val(vec_uchar16_z)
78 v_uint8x16(vec_bchar16 v) : val(vec_uchar16_c(v))
80 v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7,
81 uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15)
82 : val(vec_uchar16_set(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15))
85 { return vec_extract(val, 0); }
90 typedef schar lane_type;
94 explicit v_int8x16(const vec_char16& v) : val(v)
96 v_int8x16() : val(vec_char16_z)
98 v_int8x16(vec_bchar16 v) : val(vec_char16_c(v))
100 v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7,
101 schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15)
102 : val(vec_char16_set(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15))
105 { return vec_extract(val, 0); }
110 typedef ushort lane_type;
114 explicit v_uint16x8(const vec_ushort8& v) : val(v)
116 v_uint16x8() : val(vec_ushort8_z)
118 v_uint16x8(vec_bshort8 v) : val(vec_ushort8_c(v))
120 v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7)
121 : val(vec_ushort8_set(v0, v1, v2, v3, v4, v5, v6, v7))
124 { return vec_extract(val, 0); }
129 typedef short lane_type;
133 explicit v_int16x8(const vec_short8& v) : val(v)
135 v_int16x8() : val(vec_short8_z)
137 v_int16x8(vec_bshort8 v) : val(vec_short8_c(v))
139 v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7)
140 : val(vec_short8_set(v0, v1, v2, v3, v4, v5, v6, v7))
143 { return vec_extract(val, 0); }
148 typedef unsigned lane_type;
152 explicit v_uint32x4(const vec_uint4& v) : val(v)
154 v_uint32x4() : val(vec_uint4_z)
156 v_uint32x4(vec_bint4 v) : val(vec_uint4_c(v))
158 v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3) : val(vec_uint4_set(v0, v1, v2, v3))
161 { return vec_extract(val, 0); }
166 typedef int lane_type;
170 explicit v_int32x4(const vec_int4& v) : val(v)
172 v_int32x4() : val(vec_int4_z)
174 v_int32x4(vec_bint4 v) : val(vec_int4_c(v))
176 v_int32x4(int v0, int v1, int v2, int v3) : val(vec_int4_set(v0, v1, v2, v3))
179 { return vec_extract(val, 0); }
184 typedef float lane_type;
188 explicit v_float32x4(const vec_float4& v) : val(v)
190 v_float32x4() : val(vec_float4_z)
192 v_float32x4(vec_bint4 v) : val(vec_float4_c(v))
194 v_float32x4(float v0, float v1, float v2, float v3) : val(vec_float4_set(v0, v1, v2, v3))
197 { return vec_extract(val, 0); }
202 typedef uint64 lane_type;
206 explicit v_uint64x2(const vec_udword2& v) : val(v)
208 v_uint64x2() : val(vec_udword2_z)
210 v_uint64x2(vec_bdword2 v) : val(vec_udword2_c(v))
212 v_uint64x2(uint64 v0, uint64 v1) : val(vec_udword2_set(v0, v1))
215 { return vec_extract(val, 0); }
220 typedef int64 lane_type;
224 explicit v_int64x2(const vec_dword2& v) : val(v)
226 v_int64x2() : val(vec_dword2_z)
228 v_int64x2(vec_bdword2 v) : val(vec_dword2_c(v))
230 v_int64x2(int64 v0, int64 v1) : val(vec_dword2_set(v0, v1))
233 { return vec_extract(val, 0); }
238 typedef double lane_type;
242 explicit v_float64x2(const vec_double2& v) : val(v)
244 v_float64x2() : val(vec_double2_z)
246 v_float64x2(vec_bdword2 v) : val(vec_double2_c(v))
248 v_float64x2(double v0, double v1) : val(vec_double2_set(v0, v1))
251 { return vec_extract(val, 0); }
254 //////////////// Load and store operations ///////////////
257 * clang-5 aborted during parse "vec_xxx_c" only if it's
258 * inside a function template which is defined by preprocessor macro.
260 * if vec_xxx_c defined as C++ cast, clang-5 will pass it
262 #define OPENCV_HAL_IMPL_VSX_INITVEC(_Tpvec, _Tp, suffix, cast) \
263 inline _Tpvec v_setzero_##suffix() { return _Tpvec(); } \
264 inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec(vec_splats((_Tp)v));} \
265 template<typename _Tpvec0> inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0 &a) \
266 { return _Tpvec((cast)a.val); }
268 OPENCV_HAL_IMPL_VSX_INITVEC(v_uint8x16, uchar, u8, vec_uchar16)
269 OPENCV_HAL_IMPL_VSX_INITVEC(v_int8x16, schar, s8, vec_char16)
270 OPENCV_HAL_IMPL_VSX_INITVEC(v_uint16x8, ushort, u16, vec_ushort8)
271 OPENCV_HAL_IMPL_VSX_INITVEC(v_int16x8, short, s16, vec_short8)
272 OPENCV_HAL_IMPL_VSX_INITVEC(v_uint32x4, uint, u32, vec_uint4)
273 OPENCV_HAL_IMPL_VSX_INITVEC(v_int32x4, int, s32, vec_int4)
274 OPENCV_HAL_IMPL_VSX_INITVEC(v_uint64x2, uint64, u64, vec_udword2)
275 OPENCV_HAL_IMPL_VSX_INITVEC(v_int64x2, int64, s64, vec_dword2)
276 OPENCV_HAL_IMPL_VSX_INITVEC(v_float32x4, float, f32, vec_float4)
277 OPENCV_HAL_IMPL_VSX_INITVEC(v_float64x2, double, f64, vec_double2)
279 #define OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(_Tpvec, _Tp, ld_func, st_func) \
280 inline _Tpvec v_load(const _Tp* ptr) \
281 { return _Tpvec(ld_func(0, ptr)); } \
282 inline _Tpvec v_load_aligned(const _Tp* ptr) \
283 { return _Tpvec(ld_func(0, ptr)); } \
284 inline _Tpvec v_load_low(const _Tp* ptr) \
285 { return _Tpvec(vec_ld_l8(ptr)); } \
286 inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \
287 { return _Tpvec(vec_mergesqh(vec_ld_l8(ptr0), vec_ld_l8(ptr1))); } \
288 inline void v_store(_Tp* ptr, const _Tpvec& a) \
289 { st_func(a.val, 0, ptr); } \
290 inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \
291 { st_func(a.val, 0, ptr); } \
292 inline void v_store_low(_Tp* ptr, const _Tpvec& a) \
293 { vec_st_l8(a.val, ptr); } \
294 inline void v_store_high(_Tp* ptr, const _Tpvec& a) \
295 { vec_st_h8(a.val, ptr); }
297 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_uint8x16, uchar, vsx_ld, vsx_st)
298 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_int8x16, schar, vsx_ld, vsx_st)
299 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_uint16x8, ushort, vsx_ld, vsx_st)
300 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_int16x8, short, vsx_ld, vsx_st)
301 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_uint32x4, uint, vsx_ld, vsx_st)
302 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_int32x4, int, vsx_ld, vsx_st)
303 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_float32x4, float, vsx_ld, vsx_st)
304 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_float64x2, double, vsx_ld, vsx_st)
305 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_uint64x2, uint64, vsx_ld2, vsx_st2)
306 OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_int64x2, int64, vsx_ld2, vsx_st2)
308 //////////////// Value reordering ///////////////
311 #define OPENCV_HAL_IMPL_VSX_INTERLEAVE(_Tp, _Tpvec) \
312 inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, _Tpvec& b) \
313 { vec_ld_deinterleave(ptr, a.val, b.val);} \
314 inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, \
315 _Tpvec& b, _Tpvec& c) \
316 { vec_ld_deinterleave(ptr, a.val, b.val, c.val); } \
317 inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, _Tpvec& b, \
318 _Tpvec& c, _Tpvec& d) \
319 { vec_ld_deinterleave(ptr, a.val, b.val, c.val, d.val); } \
320 inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, const _Tpvec& b) \
321 { vec_st_interleave(a.val, b.val, ptr); } \
322 inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, \
323 const _Tpvec& b, const _Tpvec& c) \
324 { vec_st_interleave(a.val, b.val, c.val, ptr); } \
325 inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, const _Tpvec& b, \
326 const _Tpvec& c, const _Tpvec& d) \
327 { vec_st_interleave(a.val, b.val, c.val, d.val, ptr); }
329 OPENCV_HAL_IMPL_VSX_INTERLEAVE(uchar, v_uint8x16)
330 OPENCV_HAL_IMPL_VSX_INTERLEAVE(schar, v_int8x16)
331 OPENCV_HAL_IMPL_VSX_INTERLEAVE(ushort, v_uint16x8)
332 OPENCV_HAL_IMPL_VSX_INTERLEAVE(short, v_int16x8)
333 OPENCV_HAL_IMPL_VSX_INTERLEAVE(uint, v_uint32x4)
334 OPENCV_HAL_IMPL_VSX_INTERLEAVE(int, v_int32x4)
335 OPENCV_HAL_IMPL_VSX_INTERLEAVE(float, v_float32x4)
336 OPENCV_HAL_IMPL_VSX_INTERLEAVE(double, v_float64x2)
339 #define OPENCV_HAL_IMPL_VSX_EXPAND(_Tpvec, _Tpwvec, _Tp, fl, fh) \
340 inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \
342 b0.val = fh(a.val); \
343 b1.val = fl(a.val); \
345 inline _Tpwvec v_load_expand(const _Tp* ptr) \
346 { return _Tpwvec(fh(vsx_ld(0, ptr))); }
348 OPENCV_HAL_IMPL_VSX_EXPAND(v_uint8x16, v_uint16x8, uchar, vec_unpacklu, vec_unpackhu)
349 OPENCV_HAL_IMPL_VSX_EXPAND(v_int8x16, v_int16x8, schar, vec_unpackl, vec_unpackh)
350 OPENCV_HAL_IMPL_VSX_EXPAND(v_uint16x8, v_uint32x4, ushort, vec_unpacklu, vec_unpackhu)
351 OPENCV_HAL_IMPL_VSX_EXPAND(v_int16x8, v_int32x4, short, vec_unpackl, vec_unpackh)
352 OPENCV_HAL_IMPL_VSX_EXPAND(v_uint32x4, v_uint64x2, uint, vec_unpacklu, vec_unpackhu)
353 OPENCV_HAL_IMPL_VSX_EXPAND(v_int32x4, v_int64x2, int, vec_unpackl, vec_unpackh)
355 inline v_uint32x4 v_load_expand_q(const uchar* ptr)
356 { return v_uint32x4(vec_ld_buw(ptr)); }
358 inline v_int32x4 v_load_expand_q(const schar* ptr)
359 { return v_int32x4(vec_ld_bsw(ptr)); }
362 #define OPENCV_HAL_IMPL_VSX_PACK(_Tpvec, _Tp, _Tpwvec, _Tpvn, _Tpdel, sfnc, pkfnc, addfnc, pack) \
363 inline _Tpvec v_##pack(const _Tpwvec& a, const _Tpwvec& b) \
365 return _Tpvec(pkfnc(a.val, b.val)); \
367 inline void v_##pack##_store(_Tp* ptr, const _Tpwvec& a) \
369 vec_st_l8(pkfnc(a.val, a.val), ptr); \
372 inline _Tpvec v_rshr_##pack(const _Tpwvec& a, const _Tpwvec& b) \
374 const __vector _Tpvn vn = vec_splats((_Tpvn)n); \
375 const __vector _Tpdel delta = vec_splats((_Tpdel)((_Tpdel)1 << (n-1))); \
376 return _Tpvec(pkfnc(sfnc(addfnc(a.val, delta), vn), sfnc(addfnc(b.val, delta), vn))); \
379 inline void v_rshr_##pack##_store(_Tp* ptr, const _Tpwvec& a) \
381 const __vector _Tpvn vn = vec_splats((_Tpvn)n); \
382 const __vector _Tpdel delta = vec_splats((_Tpdel)((_Tpdel)1 << (n-1))); \
383 vec_st_l8(pkfnc(sfnc(addfnc(a.val, delta), vn), delta), ptr); \
386 OPENCV_HAL_IMPL_VSX_PACK(v_uint8x16, uchar, v_uint16x8, unsigned short, unsigned short,
387 vec_sr, vec_packs, vec_adds, pack)
388 OPENCV_HAL_IMPL_VSX_PACK(v_int8x16, schar, v_int16x8, unsigned short, short,
389 vec_sra, vec_packs, vec_adds, pack)
391 OPENCV_HAL_IMPL_VSX_PACK(v_uint16x8, ushort, v_uint32x4, unsigned int, unsigned int,
392 vec_sr, vec_packs, vec_add, pack)
393 OPENCV_HAL_IMPL_VSX_PACK(v_int16x8, short, v_int32x4, unsigned int, int,
394 vec_sra, vec_packs, vec_add, pack)
396 OPENCV_HAL_IMPL_VSX_PACK(v_uint32x4, uint, v_uint64x2, unsigned long long, unsigned long long,
397 vec_sr, vec_pack, vec_add, pack)
398 OPENCV_HAL_IMPL_VSX_PACK(v_int32x4, int, v_int64x2, unsigned long long, long long,
399 vec_sra, vec_pack, vec_add, pack)
401 OPENCV_HAL_IMPL_VSX_PACK(v_uint8x16, uchar, v_int16x8, unsigned short, short,
402 vec_sra, vec_packsu, vec_adds, pack_u)
403 OPENCV_HAL_IMPL_VSX_PACK(v_uint16x8, ushort, v_int32x4, unsigned int, int,
404 vec_sra, vec_packsu, vec_add, pack_u)
405 // Following variant is not implemented on other platforms:
406 //OPENCV_HAL_IMPL_VSX_PACK(v_uint32x4, uint, v_int64x2, unsigned long long, long long,
407 // vec_sra, vec_packsu, vec_add, pack_u)
410 template <typename _Tpvec>
411 inline void v_zip(const _Tpvec& a0, const _Tpvec& a1, _Tpvec& b0, _Tpvec& b1)
413 b0.val = vec_mergeh(a0.val, a1.val);
414 b1.val = vec_mergel(a0.val, a1.val);
417 template <typename _Tpvec>
418 inline _Tpvec v_combine_high(const _Tpvec& a, const _Tpvec& b)
419 { return _Tpvec(vec_mergesql(a.val, b.val)); }
421 template <typename _Tpvec>
422 inline _Tpvec v_combine_low(const _Tpvec& a, const _Tpvec& b)
423 { return _Tpvec(vec_mergesqh(a.val, b.val)); }
425 template <typename _Tpvec>
426 inline void v_recombine(const _Tpvec& a, const _Tpvec& b, _Tpvec& c, _Tpvec& d)
428 c.val = vec_mergesqh(a.val, b.val);
429 d.val = vec_mergesql(a.val, b.val);
433 template<int s, typename _Tpvec>
434 inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b)
436 const int w = sizeof(typename _Tpvec::lane_type);
437 const int n = _Tpvec::nlanes;
438 const unsigned int sf = ((w * n) - (s * w));
440 return _Tpvec(a.val);
443 // bitwise it just to make xlc happy
444 return _Tpvec(vec_sld(b.val, a.val, sf & 15));
447 #define OPENCV_HAL_IMPL_VSX_EXTRACT_2(_Tpvec) \
449 inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b) \
452 case 0: return _Tpvec(a.val); \
453 case 2: return _Tpvec(b.val); \
454 case 1: return _Tpvec(vec_sldw(b.val, a.val, 2)); \
455 default: return _Tpvec(); \
458 OPENCV_HAL_IMPL_VSX_EXTRACT_2(v_uint64x2)
459 OPENCV_HAL_IMPL_VSX_EXTRACT_2(v_int64x2)
462 ////////// Arithmetic, bitwise and comparison operations /////////
464 /* Element-wise binary and unary operations */
466 #define OPENCV_HAL_IMPL_VSX_BIN_OP(bin_op, _Tpvec, intrin) \
467 inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
468 { return _Tpvec(intrin(a.val, b.val)); } \
469 inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \
470 { a.val = intrin(a.val, b.val); return a; }
472 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint8x16, vec_adds)
473 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint8x16, vec_subs)
474 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int8x16, vec_adds)
475 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int8x16, vec_subs)
476 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint16x8, vec_adds)
477 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint16x8, vec_subs)
478 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_uint16x8, vec_mul)
479 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int16x8, vec_adds)
480 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int16x8, vec_subs)
481 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_int16x8, vec_mul)
482 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint32x4, vec_add)
483 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint32x4, vec_sub)
484 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_uint32x4, vec_mul)
485 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int32x4, vec_add)
486 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int32x4, vec_sub)
487 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_int32x4, vec_mul)
488 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_float32x4, vec_add)
489 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_float32x4, vec_sub)
490 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_float32x4, vec_mul)
491 OPENCV_HAL_IMPL_VSX_BIN_OP(/, v_float32x4, vec_div)
492 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_float64x2, vec_add)
493 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_float64x2, vec_sub)
494 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_float64x2, vec_mul)
495 OPENCV_HAL_IMPL_VSX_BIN_OP(/, v_float64x2, vec_div)
496 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint64x2, vec_add)
497 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint64x2, vec_sub)
498 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int64x2, vec_add)
499 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int64x2, vec_sub)
501 inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b, v_int32x4& c, v_int32x4& d)
503 c.val = vec_mul(vec_unpackh(a.val), vec_unpackh(b.val));
504 d.val = vec_mul(vec_unpackl(a.val), vec_unpackl(b.val));
506 inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b, v_uint32x4& c, v_uint32x4& d)
508 c.val = vec_mul(vec_unpackhu(a.val), vec_unpackhu(b.val));
509 d.val = vec_mul(vec_unpacklu(a.val), vec_unpacklu(b.val));
511 inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b, v_uint64x2& c, v_uint64x2& d)
513 c.val = vec_mul(vec_unpackhu(a.val), vec_unpackhu(b.val));
514 d.val = vec_mul(vec_unpacklu(a.val), vec_unpacklu(b.val));
517 /** Non-saturating arithmetics **/
518 #define OPENCV_HAL_IMPL_VSX_BIN_FUNC(func, intrin) \
519 template<typename _Tpvec> \
520 inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \
521 { return _Tpvec(intrin(a.val, b.val)); }
523 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_add_wrap, vec_add)
524 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_sub_wrap, vec_sub)
526 /** Bitwise shifts **/
527 #define OPENCV_HAL_IMPL_VSX_SHIFT_OP(_Tpvec, shr, splfunc) \
528 inline _Tpvec operator << (const _Tpvec& a, int imm) \
529 { return _Tpvec(vec_sl(a.val, splfunc(imm))); } \
530 inline _Tpvec operator >> (const _Tpvec& a, int imm) \
531 { return _Tpvec(shr(a.val, splfunc(imm))); } \
532 template<int imm> inline _Tpvec v_shl(const _Tpvec& a) \
533 { return _Tpvec(vec_sl(a.val, splfunc(imm))); } \
534 template<int imm> inline _Tpvec v_shr(const _Tpvec& a) \
535 { return _Tpvec(shr(a.val, splfunc(imm))); }
537 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint8x16, vec_sr, vec_uchar16_sp)
538 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint16x8, vec_sr, vec_ushort8_sp)
539 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint32x4, vec_sr, vec_uint4_sp)
540 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint64x2, vec_sr, vec_udword2_sp)
541 // algebraic right shift
542 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int8x16, vec_sra, vec_uchar16_sp)
543 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int16x8, vec_sra, vec_ushort8_sp)
544 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int32x4, vec_sra, vec_uint4_sp)
545 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int64x2, vec_sra, vec_udword2_sp)
547 /** Bitwise logic **/
548 #define OPENCV_HAL_IMPL_VSX_LOGIC_OP(_Tpvec) \
549 OPENCV_HAL_IMPL_VSX_BIN_OP(&, _Tpvec, vec_and) \
550 OPENCV_HAL_IMPL_VSX_BIN_OP(|, _Tpvec, vec_or) \
551 OPENCV_HAL_IMPL_VSX_BIN_OP(^, _Tpvec, vec_xor) \
552 inline _Tpvec operator ~ (const _Tpvec& a) \
553 { return _Tpvec(vec_not(a.val)); }
555 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint8x16)
556 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int8x16)
557 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint16x8)
558 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int16x8)
559 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint32x4)
560 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int32x4)
561 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint64x2)
562 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int64x2)
563 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_float32x4)
564 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_float64x2)
566 /** Bitwise select **/
567 #define OPENCV_HAL_IMPL_VSX_SELECT(_Tpvec, cast) \
568 inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \
569 { return _Tpvec(vec_sel(b.val, a.val, cast(mask.val))); }
571 OPENCV_HAL_IMPL_VSX_SELECT(v_uint8x16, vec_bchar16_c)
572 OPENCV_HAL_IMPL_VSX_SELECT(v_int8x16, vec_bchar16_c)
573 OPENCV_HAL_IMPL_VSX_SELECT(v_uint16x8, vec_bshort8_c)
574 OPENCV_HAL_IMPL_VSX_SELECT(v_int16x8, vec_bshort8_c)
575 OPENCV_HAL_IMPL_VSX_SELECT(v_uint32x4, vec_bint4_c)
576 OPENCV_HAL_IMPL_VSX_SELECT(v_int32x4, vec_bint4_c)
577 OPENCV_HAL_IMPL_VSX_SELECT(v_float32x4, vec_bint4_c)
578 OPENCV_HAL_IMPL_VSX_SELECT(v_float64x2, vec_bdword2_c)
581 #define OPENCV_HAL_IMPL_VSX_INT_CMP_OP(_Tpvec) \
582 inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \
583 { return _Tpvec(vec_cmpeq(a.val, b.val)); } \
584 inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \
585 { return _Tpvec(vec_cmpne(a.val, b.val)); } \
586 inline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \
587 { return _Tpvec(vec_cmplt(a.val, b.val)); } \
588 inline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \
589 { return _Tpvec(vec_cmpgt(a.val, b.val)); } \
590 inline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \
591 { return _Tpvec(vec_cmple(a.val, b.val)); } \
592 inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \
593 { return _Tpvec(vec_cmpge(a.val, b.val)); }
595 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint8x16)
596 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int8x16)
597 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint16x8)
598 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int16x8)
599 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint32x4)
600 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int32x4)
601 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_float32x4)
602 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_float64x2)
603 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint64x2)
604 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int64x2)
607 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_min, vec_min)
608 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_max, vec_max)
611 #define OPENCV_IMPL_VSX_ROTATE(_Tpvec, suffix, shf, cast) \
613 inline _Tpvec v_rotate_##suffix(const _Tpvec& a) \
615 const int wd = imm * sizeof(typename _Tpvec::lane_type); \
618 return _Tpvec((cast)shf(vec_uchar16_c(a.val), vec_uchar16_sp(wd << 3))); \
621 #define OPENCV_IMPL_VSX_ROTATE_LR(_Tpvec, cast) \
622 OPENCV_IMPL_VSX_ROTATE(_Tpvec, left, vec_slo, cast) \
623 OPENCV_IMPL_VSX_ROTATE(_Tpvec, right, vec_sro, cast)
625 OPENCV_IMPL_VSX_ROTATE_LR(v_uint8x16, vec_uchar16)
626 OPENCV_IMPL_VSX_ROTATE_LR(v_int8x16, vec_char16)
627 OPENCV_IMPL_VSX_ROTATE_LR(v_uint16x8, vec_ushort8)
628 OPENCV_IMPL_VSX_ROTATE_LR(v_int16x8, vec_short8)
629 OPENCV_IMPL_VSX_ROTATE_LR(v_uint32x4, vec_uint4)
630 OPENCV_IMPL_VSX_ROTATE_LR(v_int32x4, vec_int4)
631 OPENCV_IMPL_VSX_ROTATE_LR(v_uint64x2, vec_udword2)
632 OPENCV_IMPL_VSX_ROTATE_LR(v_int64x2, vec_dword2)
635 template<int imm, typename _Tpvec>
636 inline _Tpvec v_rotate_right(const _Tpvec& a, const _Tpvec& b)
638 enum { CV_SHIFT = 16 - imm * (sizeof(typename _Tpvec::lane_type)) };
642 return _Tpvec(vec_sld(b.val, a.val, CV_SHIFT & 15));
644 return _Tpvec(vec_sld(b.val, a.val, CV_SHIFT));
648 template<int imm, typename _Tpvec>
649 inline _Tpvec v_rotate_left(const _Tpvec& a, const _Tpvec& b)
651 enum { CV_SHIFT = imm * (sizeof(typename _Tpvec::lane_type)) };
654 return _Tpvec(vec_sld(a.val, b.val, CV_SHIFT));
657 #define OPENCV_IMPL_VSX_ROTATE_64(_Tpvec, suffix, rg1, rg2) \
659 inline _Tpvec v_rotate_##suffix(const _Tpvec& a, const _Tpvec& b) \
662 return _Tpvec(vec_permi(rg1.val, rg2.val, 2)); \
663 return imm ? b : a; \
666 OPENCV_IMPL_VSX_ROTATE_64(v_int64x2, right, a, b)
667 OPENCV_IMPL_VSX_ROTATE_64(v_uint64x2, right, a, b)
669 OPENCV_IMPL_VSX_ROTATE_64(v_int64x2, left, b, a)
670 OPENCV_IMPL_VSX_ROTATE_64(v_uint64x2, left, b, a)
672 ////////// Reduce and mask /////////
675 inline short v_reduce_sum(const v_int16x8& a)
677 const vec_int4 zero = vec_int4_z;
678 return saturate_cast<short>(vec_extract(vec_sums(vec_sum4s(a.val, zero), zero), 3));
680 inline ushort v_reduce_sum(const v_uint16x8& a)
682 const vec_int4 v4 = vec_int4_c(vec_unpackhu(vec_adds(a.val, vec_sld(a.val, a.val, 8))));
683 return saturate_cast<ushort>(vec_extract(vec_sums(v4, vec_int4_z), 3));
686 #define OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(_Tpvec, _Tpvec2, scalartype, suffix, func) \
687 inline scalartype v_reduce_##suffix(const _Tpvec& a) \
689 const _Tpvec2 rs = func(a.val, vec_sld(a.val, a.val, 8)); \
690 return vec_extract(func(rs, vec_sld(rs, rs, 4)), 0); \
692 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_uint32x4, vec_uint4, uint, sum, vec_add)
693 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_uint32x4, vec_uint4, uint, max, vec_max)
694 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_uint32x4, vec_uint4, uint, min, vec_min)
695 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_int32x4, vec_int4, int, sum, vec_add)
696 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_int32x4, vec_int4, int, max, vec_max)
697 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_int32x4, vec_int4, int, min, vec_min)
698 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_float32x4, vec_float4, float, sum, vec_add)
699 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_float32x4, vec_float4, float, max, vec_max)
700 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_float32x4, vec_float4, float, min, vec_min)
702 #define OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(_Tpvec, _Tpvec2, scalartype, suffix, func) \
703 inline scalartype v_reduce_##suffix(const _Tpvec& a) \
705 _Tpvec2 rs = func(a.val, vec_sld(a.val, a.val, 8)); \
706 rs = func(rs, vec_sld(rs, rs, 4)); \
707 return vec_extract(func(rs, vec_sld(rs, rs, 2)), 0); \
709 OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_uint16x8, vec_ushort8, ushort, max, vec_max)
710 OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_uint16x8, vec_ushort8, ushort, min, vec_min)
711 OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_int16x8, vec_short8, short, max, vec_max)
712 OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_int16x8, vec_short8, short, min, vec_min)
714 inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b,
715 const v_float32x4& c, const v_float32x4& d)
717 vec_float4 ac = vec_add(vec_mergel(a.val, c.val), vec_mergeh(a.val, c.val));
718 ac = vec_add(ac, vec_sld(ac, ac, 8));
720 vec_float4 bd = vec_add(vec_mergel(b.val, d.val), vec_mergeh(b.val, d.val));
721 bd = vec_add(bd, vec_sld(bd, bd, 8));
722 return v_float32x4(vec_mergeh(ac, bd));
726 template<typename _Tpvec>
727 inline v_uint32x4 v_popcount(const _Tpvec& a)
728 { return v_uint32x4(vec_popcntu(vec_uint4_c(a.val))); }
731 inline int v_signmask(const v_uint8x16& a)
733 vec_uchar16 sv = vec_sr(a.val, vec_uchar16_sp(7));
734 static const vec_uchar16 slm = {0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7};
735 sv = vec_sl(sv, slm);
736 vec_uint4 sv4 = vec_sum4s(sv, vec_uint4_z);
737 static const vec_uint4 slm4 = {0, 0, 8, 8};
738 sv4 = vec_sl(sv4, slm4);
739 return vec_extract(vec_sums((vec_int4) sv4, vec_int4_z), 3);
741 inline int v_signmask(const v_int8x16& a)
742 { return v_signmask(v_reinterpret_as_u8(a)); }
744 inline int v_signmask(const v_int16x8& a)
746 static const vec_ushort8 slm = {0, 1, 2, 3, 4, 5, 6, 7};
747 vec_short8 sv = vec_sr(a.val, vec_ushort8_sp(15));
748 sv = vec_sl(sv, slm);
749 vec_int4 svi = vec_int4_z;
750 svi = vec_sums(vec_sum4s(sv, svi), svi);
751 return vec_extract(svi, 3);
753 inline int v_signmask(const v_uint16x8& a)
754 { return v_signmask(v_reinterpret_as_s16(a)); }
756 inline int v_signmask(const v_int32x4& a)
758 static const vec_uint4 slm = {0, 1, 2, 3};
759 vec_int4 sv = vec_sr(a.val, vec_uint4_sp(31));
760 sv = vec_sl(sv, slm);
761 sv = vec_sums(sv, vec_int4_z);
762 return vec_extract(sv, 3);
764 inline int v_signmask(const v_uint32x4& a)
765 { return v_signmask(v_reinterpret_as_s32(a)); }
766 inline int v_signmask(const v_float32x4& a)
767 { return v_signmask(v_reinterpret_as_s32(a)); }
769 inline int v_signmask(const v_int64x2& a)
771 VSX_UNUSED(const vec_dword2) sv = vec_sr(a.val, vec_udword2_sp(63));
772 return (int)vec_extract(sv, 0) | (int)vec_extract(sv, 1) << 1;
774 inline int v_signmask(const v_uint64x2& a)
775 { return v_signmask(v_reinterpret_as_s64(a)); }
776 inline int v_signmask(const v_float64x2& a)
777 { return v_signmask(v_reinterpret_as_s64(a)); }
780 template<typename _Tpvec>
781 inline bool v_check_all(const _Tpvec& a)
782 { return vec_all_lt(a.val, _Tpvec().val);}
783 inline bool v_check_all(const v_uint8x16 &a)
784 { return v_check_all(v_reinterpret_as_s8(a)); }
785 inline bool v_check_all(const v_uint16x8 &a)
786 { return v_check_all(v_reinterpret_as_s16(a)); }
787 inline bool v_check_all(const v_uint32x4 &a)
788 { return v_check_all(v_reinterpret_as_s32(a)); }
790 template<typename _Tpvec>
791 inline bool v_check_any(const _Tpvec& a)
792 { return vec_any_lt(a.val, _Tpvec().val);}
793 inline bool v_check_any(const v_uint8x16 &a)
794 { return v_check_any(v_reinterpret_as_s8(a)); }
795 inline bool v_check_any(const v_uint16x8 &a)
796 { return v_check_any(v_reinterpret_as_s16(a)); }
797 inline bool v_check_any(const v_uint32x4 &a)
798 { return v_check_any(v_reinterpret_as_s32(a)); }
800 ////////// Other math /////////
802 /** Some frequent operations **/
803 inline v_float32x4 v_sqrt(const v_float32x4& x)
804 { return v_float32x4(vec_sqrt(x.val)); }
805 inline v_float64x2 v_sqrt(const v_float64x2& x)
806 { return v_float64x2(vec_sqrt(x.val)); }
808 inline v_float32x4 v_invsqrt(const v_float32x4& x)
809 { return v_float32x4(vec_rsqrt(x.val)); }
810 inline v_float64x2 v_invsqrt(const v_float64x2& x)
811 { return v_float64x2(vec_rsqrt(x.val)); }
813 #define OPENCV_HAL_IMPL_VSX_MULADD(_Tpvec) \
814 inline _Tpvec v_magnitude(const _Tpvec& a, const _Tpvec& b) \
815 { return _Tpvec(vec_sqrt(vec_madd(a.val, a.val, vec_mul(b.val, b.val)))); } \
816 inline _Tpvec v_sqr_magnitude(const _Tpvec& a, const _Tpvec& b) \
817 { return _Tpvec(vec_madd(a.val, a.val, vec_mul(b.val, b.val))); } \
818 inline _Tpvec v_muladd(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \
819 { return _Tpvec(vec_madd(a.val, b.val, c.val)); }
821 OPENCV_HAL_IMPL_VSX_MULADD(v_float32x4)
822 OPENCV_HAL_IMPL_VSX_MULADD(v_float64x2)
824 // TODO: exp, log, sin, cos
826 /** Absolute values **/
827 inline v_uint8x16 v_abs(const v_int8x16& x)
828 { return v_uint8x16(vec_uchar16_c(vec_abs(x.val))); }
830 inline v_uint16x8 v_abs(const v_int16x8& x)
831 { return v_uint16x8(vec_ushort8_c(vec_abs(x.val))); }
833 inline v_uint32x4 v_abs(const v_int32x4& x)
834 { return v_uint32x4(vec_uint4_c(vec_abs(x.val))); }
836 inline v_float32x4 v_abs(const v_float32x4& x)
837 { return v_float32x4(vec_abs(x.val)); }
839 inline v_float64x2 v_abs(const v_float64x2& x)
840 { return v_float64x2(vec_abs(x.val)); }
842 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_absdiff, vec_absd)
844 #define OPENCV_HAL_IMPL_VSX_BIN_FUNC2(_Tpvec, _Tpvec2, cast, func, intrin) \
845 inline _Tpvec2 func(const _Tpvec& a, const _Tpvec& b) \
846 { return _Tpvec2(cast(intrin(a.val, b.val))); }
848 OPENCV_HAL_IMPL_VSX_BIN_FUNC2(v_int8x16, v_uint8x16, vec_uchar16_c, v_absdiff, vec_absd)
849 OPENCV_HAL_IMPL_VSX_BIN_FUNC2(v_int16x8, v_uint16x8, vec_ushort8_c, v_absdiff, vec_absd)
850 OPENCV_HAL_IMPL_VSX_BIN_FUNC2(v_int32x4, v_uint32x4, vec_uint4_c, v_absdiff, vec_absd)
851 OPENCV_HAL_IMPL_VSX_BIN_FUNC2(v_int64x2, v_uint64x2, vec_udword2_c, v_absdiff, vec_absd)
853 ////////// Conversions /////////
856 inline v_int32x4 v_round(const v_float32x4& a)
857 { return v_int32x4(vec_cts(vec_round(a.val))); }
859 inline v_int32x4 v_round(const v_float64x2& a)
860 { return v_int32x4(vec_mergesqo(vec_ctso(vec_round(a.val)), vec_int4_z)); }
862 inline v_int32x4 v_floor(const v_float32x4& a)
863 { return v_int32x4(vec_cts(vec_floor(a.val))); }
865 inline v_int32x4 v_floor(const v_float64x2& a)
866 { return v_int32x4(vec_mergesqo(vec_ctso(vec_floor(a.val)), vec_int4_z)); }
868 inline v_int32x4 v_ceil(const v_float32x4& a)
869 { return v_int32x4(vec_cts(vec_ceil(a.val))); }
871 inline v_int32x4 v_ceil(const v_float64x2& a)
872 { return v_int32x4(vec_mergesqo(vec_ctso(vec_ceil(a.val)), vec_int4_z)); }
874 inline v_int32x4 v_trunc(const v_float32x4& a)
875 { return v_int32x4(vec_cts(a.val)); }
877 inline v_int32x4 v_trunc(const v_float64x2& a)
878 { return v_int32x4(vec_mergesqo(vec_ctso(a.val), vec_int4_z)); }
881 inline v_float32x4 v_cvt_f32(const v_int32x4& a)
882 { return v_float32x4(vec_ctf(a.val)); }
884 inline v_float32x4 v_cvt_f32(const v_float64x2& a)
885 { return v_float32x4(vec_mergesqo(vec_cvfo(a.val), vec_float4_z)); }
887 inline v_float64x2 v_cvt_f64(const v_int32x4& a)
888 { return v_float64x2(vec_ctdo(vec_mergeh(a.val, a.val))); }
890 inline v_float64x2 v_cvt_f64_high(const v_int32x4& a)
891 { return v_float64x2(vec_ctdo(vec_mergel(a.val, a.val))); }
893 inline v_float64x2 v_cvt_f64(const v_float32x4& a)
894 { return v_float64x2(vec_cvfo(vec_mergeh(a.val, a.val))); }
896 inline v_float64x2 v_cvt_f64_high(const v_float32x4& a)
897 { return v_float64x2(vec_cvfo(vec_mergel(a.val, a.val))); }
900 /** its up there with load and store operations **/
902 ////////// Matrix operations /////////
904 inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b)
905 { return v_int32x4(vec_msum(a.val, b.val, vec_int4_z)); }
907 inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,
908 const v_float32x4& m1, const v_float32x4& m2,
909 const v_float32x4& m3)
911 const vec_float4 v0 = vec_splat(v.val, 0);
912 const vec_float4 v1 = vec_splat(v.val, 1);
913 const vec_float4 v2 = vec_splat(v.val, 2);
914 VSX_UNUSED(const vec_float4) v3 = vec_splat(v.val, 3);
915 return v_float32x4(vec_madd(v0, m0.val, vec_madd(v1, m1.val, vec_madd(v2, m2.val, vec_mul(v3, m3.val)))));
918 inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0,
919 const v_float32x4& m1, const v_float32x4& m2,
920 const v_float32x4& a)
922 const vec_float4 v0 = vec_splat(v.val, 0);
923 const vec_float4 v1 = vec_splat(v.val, 1);
924 const vec_float4 v2 = vec_splat(v.val, 2);
925 return v_float32x4(vec_madd(v0, m0.val, vec_madd(v1, m1.val, vec_madd(v2, m2.val, a.val))));
928 #define OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(_Tpvec, _Tpvec2) \
929 inline void v_transpose4x4(const _Tpvec& a0, const _Tpvec& a1, \
930 const _Tpvec& a2, const _Tpvec& a3, \
931 _Tpvec& b0, _Tpvec& b1, _Tpvec& b2, _Tpvec& b3) \
933 _Tpvec2 a02 = vec_mergeh(a0.val, a2.val); \
934 _Tpvec2 a13 = vec_mergeh(a1.val, a3.val); \
935 b0.val = vec_mergeh(a02, a13); \
936 b1.val = vec_mergel(a02, a13); \
937 a02 = vec_mergel(a0.val, a2.val); \
938 a13 = vec_mergel(a1.val, a3.val); \
939 b2.val = vec_mergeh(a02, a13); \
940 b3.val = vec_mergel(a02, a13); \
942 OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(v_uint32x4, vec_uint4)
943 OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(v_int32x4, vec_int4)
944 OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(v_float32x4, vec_float4)
946 //! @name Check SIMD support
948 //! @brief Check CPU capability of SIMD operation
949 static inline bool hasSIMD128()
951 return (CV_CPU_HAS_SUPPORT_VSX) ? true : false;
956 CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
962 #endif // OPENCV_HAL_VSX_HPP