1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html
5 #ifndef OPENCV_HAL_VSX_HPP
6 #define OPENCV_HAL_VSX_HPP
9 #include "opencv2/core/utility.hpp"
12 #define CV_SIMD128_64F 1
15 * todo: supporting half precision for power9
16 * convert instractions xvcvhpsp, xvcvsphp
24 CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
26 ///////// Types ////////////
30 typedef uchar lane_type;
34 explicit v_uint8x16(const vec_uchar16& v) : val(v)
36 v_uint8x16() : val(vec_uchar16_z)
38 v_uint8x16(vec_bchar16 v) : val(vec_uchar16_c(v))
40 v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7,
41 uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15)
42 : val(vec_uchar16_set(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15))
45 { return vec_extract(val, 0); }
50 typedef schar lane_type;
54 explicit v_int8x16(const vec_char16& v) : val(v)
56 v_int8x16() : val(vec_char16_z)
58 v_int8x16(vec_bchar16 v) : val(vec_char16_c(v))
60 v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7,
61 schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15)
62 : val(vec_char16_set(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15))
65 { return vec_extract(val, 0); }
70 typedef ushort lane_type;
74 explicit v_uint16x8(const vec_ushort8& v) : val(v)
76 v_uint16x8() : val(vec_ushort8_z)
78 v_uint16x8(vec_bshort8 v) : val(vec_ushort8_c(v))
80 v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7)
81 : val(vec_ushort8_set(v0, v1, v2, v3, v4, v5, v6, v7))
84 { return vec_extract(val, 0); }
89 typedef short lane_type;
93 explicit v_int16x8(const vec_short8& v) : val(v)
95 v_int16x8() : val(vec_short8_z)
97 v_int16x8(vec_bshort8 v) : val(vec_short8_c(v))
99 v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7)
100 : val(vec_short8_set(v0, v1, v2, v3, v4, v5, v6, v7))
103 { return vec_extract(val, 0); }
108 typedef unsigned lane_type;
112 explicit v_uint32x4(const vec_uint4& v) : val(v)
114 v_uint32x4() : val(vec_uint4_z)
116 v_uint32x4(vec_bint4 v) : val(vec_uint4_c(v))
118 v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3) : val(vec_uint4_set(v0, v1, v2, v3))
121 { return vec_extract(val, 0); }
126 typedef int lane_type;
130 explicit v_int32x4(const vec_int4& v) : val(v)
132 v_int32x4() : val(vec_int4_z)
134 v_int32x4(vec_bint4 v) : val(vec_int4_c(v))
136 v_int32x4(int v0, int v1, int v2, int v3) : val(vec_int4_set(v0, v1, v2, v3))
139 { return vec_extract(val, 0); }
144 typedef float lane_type;
148 explicit v_float32x4(const vec_float4& v) : val(v)
150 v_float32x4() : val(vec_float4_z)
152 v_float32x4(vec_bint4 v) : val(vec_float4_c(v))
154 v_float32x4(float v0, float v1, float v2, float v3) : val(vec_float4_set(v0, v1, v2, v3))
157 { return vec_extract(val, 0); }
162 typedef uint64 lane_type;
166 explicit v_uint64x2(const vec_udword2& v) : val(v)
168 v_uint64x2() : val(vec_udword2_z)
170 v_uint64x2(vec_bdword2 v) : val(vec_udword2_c(v))
172 v_uint64x2(uint64 v0, uint64 v1) : val(vec_udword2_set(v0, v1))
175 { return vec_extract(val, 0); }
180 typedef int64 lane_type;
184 explicit v_int64x2(const vec_dword2& v) : val(v)
186 v_int64x2() : val(vec_dword2_z)
188 v_int64x2(vec_bdword2 v) : val(vec_dword2_c(v))
190 v_int64x2(int64 v0, int64 v1) : val(vec_dword2_set(v0, v1))
193 { return vec_extract(val, 0); }
198 typedef double lane_type;
202 explicit v_float64x2(const vec_double2& v) : val(v)
204 v_float64x2() : val(vec_double2_z)
206 v_float64x2(vec_bdword2 v) : val(vec_double2_c(v))
208 v_float64x2(double v0, double v1) : val(vec_double2_set(v0, v1))
211 { return vec_extract(val, 0); }
214 //////////////// Load and store operations ///////////////
217 * clang-5 aborted during parse "vec_xxx_c" only if it's
218 * inside a function template which is defined by preprocessor macro.
220 * if vec_xxx_c defined as C++ cast, clang-5 will pass it
222 #define OPENCV_HAL_IMPL_VSX_INITVEC(_Tpvec, _Tp, suffix, cast) \
223 inline _Tpvec v_setzero_##suffix() { return _Tpvec(); } \
224 inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec(vec_splats((_Tp)v));} \
225 template<typename _Tpvec0> inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0 &a) \
226 { return _Tpvec((cast)a.val); }
228 OPENCV_HAL_IMPL_VSX_INITVEC(v_uint8x16, uchar, u8, vec_uchar16)
229 OPENCV_HAL_IMPL_VSX_INITVEC(v_int8x16, schar, s8, vec_char16)
230 OPENCV_HAL_IMPL_VSX_INITVEC(v_uint16x8, ushort, u16, vec_ushort8)
231 OPENCV_HAL_IMPL_VSX_INITVEC(v_int16x8, short, s16, vec_short8)
232 OPENCV_HAL_IMPL_VSX_INITVEC(v_uint32x4, uint, u32, vec_uint4)
233 OPENCV_HAL_IMPL_VSX_INITVEC(v_int32x4, int, s32, vec_int4)
234 OPENCV_HAL_IMPL_VSX_INITVEC(v_uint64x2, uint64, u64, vec_udword2)
235 OPENCV_HAL_IMPL_VSX_INITVEC(v_int64x2, int64, s64, vec_dword2)
236 OPENCV_HAL_IMPL_VSX_INITVEC(v_float32x4, float, f32, vec_float4)
237 OPENCV_HAL_IMPL_VSX_INITVEC(v_float64x2, double, f64, vec_double2)
239 #define OPENCV_HAL_IMPL_VSX_LOADSTORE_C(_Tpvec, _Tp, ld, ld_a, st, st_a) \
240 inline _Tpvec v_load(const _Tp* ptr) \
241 { return _Tpvec(ld(0, ptr)); } \
242 inline _Tpvec v_load_aligned(VSX_UNUSED(const _Tp* ptr)) \
243 { return _Tpvec(ld_a(0, ptr)); } \
244 inline _Tpvec v_load_low(const _Tp* ptr) \
245 { return _Tpvec(vec_ld_l8(ptr)); } \
246 inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \
247 { return _Tpvec(vec_mergesqh(vec_ld_l8(ptr0), vec_ld_l8(ptr1))); } \
248 inline void v_store(_Tp* ptr, const _Tpvec& a) \
249 { st(a.val, 0, ptr); } \
250 inline void v_store_aligned(VSX_UNUSED(_Tp* ptr), const _Tpvec& a) \
251 { st_a(a.val, 0, ptr); } \
252 inline void v_store_low(_Tp* ptr, const _Tpvec& a) \
253 { vec_st_l8(a.val, ptr); } \
254 inline void v_store_high(_Tp* ptr, const _Tpvec& a) \
255 { vec_st_h8(a.val, ptr); }
257 #define OPENCV_HAL_IMPL_VSX_LOADSTORE(_Tpvec, _Tp) \
258 OPENCV_HAL_IMPL_VSX_LOADSTORE_C(_Tpvec, _Tp, vsx_ld, vec_ld, vsx_st, vec_st)
260 OPENCV_HAL_IMPL_VSX_LOADSTORE(v_uint8x16, uchar)
261 OPENCV_HAL_IMPL_VSX_LOADSTORE(v_int8x16, schar)
262 OPENCV_HAL_IMPL_VSX_LOADSTORE(v_uint16x8, ushort)
263 OPENCV_HAL_IMPL_VSX_LOADSTORE(v_int16x8, short)
264 OPENCV_HAL_IMPL_VSX_LOADSTORE(v_uint32x4, uint)
265 OPENCV_HAL_IMPL_VSX_LOADSTORE(v_int32x4, int)
266 OPENCV_HAL_IMPL_VSX_LOADSTORE(v_float32x4, float)
268 OPENCV_HAL_IMPL_VSX_LOADSTORE_C(v_float64x2, double, vsx_ld, vsx_ld, vsx_st, vsx_st)
269 OPENCV_HAL_IMPL_VSX_LOADSTORE_C(v_uint64x2, uint64, vsx_ld2, vsx_ld2, vsx_st2, vsx_st2)
270 OPENCV_HAL_IMPL_VSX_LOADSTORE_C(v_int64x2, int64, vsx_ld2, vsx_ld2, vsx_st2, vsx_st2)
272 //////////////// Value reordering ///////////////
275 #define OPENCV_HAL_IMPL_VSX_INTERLEAVE(_Tp, _Tpvec) \
276 inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, _Tpvec& b) \
277 { vec_ld_deinterleave(ptr, a.val, b.val);} \
278 inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, \
279 _Tpvec& b, _Tpvec& c) \
280 { vec_ld_deinterleave(ptr, a.val, b.val, c.val); } \
281 inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, _Tpvec& b, \
282 _Tpvec& c, _Tpvec& d) \
283 { vec_ld_deinterleave(ptr, a.val, b.val, c.val, d.val); } \
284 inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, const _Tpvec& b) \
285 { vec_st_interleave(a.val, b.val, ptr); } \
286 inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, \
287 const _Tpvec& b, const _Tpvec& c) \
288 { vec_st_interleave(a.val, b.val, c.val, ptr); } \
289 inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, const _Tpvec& b, \
290 const _Tpvec& c, const _Tpvec& d) \
291 { vec_st_interleave(a.val, b.val, c.val, d.val, ptr); }
293 OPENCV_HAL_IMPL_VSX_INTERLEAVE(uchar, v_uint8x16)
294 OPENCV_HAL_IMPL_VSX_INTERLEAVE(schar, v_int8x16)
295 OPENCV_HAL_IMPL_VSX_INTERLEAVE(ushort, v_uint16x8)
296 OPENCV_HAL_IMPL_VSX_INTERLEAVE(short, v_int16x8)
297 OPENCV_HAL_IMPL_VSX_INTERLEAVE(uint, v_uint32x4)
298 OPENCV_HAL_IMPL_VSX_INTERLEAVE(int, v_int32x4)
299 OPENCV_HAL_IMPL_VSX_INTERLEAVE(float, v_float32x4)
300 OPENCV_HAL_IMPL_VSX_INTERLEAVE(double, v_float64x2)
301 OPENCV_HAL_IMPL_VSX_INTERLEAVE(int64, v_int64x2)
302 OPENCV_HAL_IMPL_VSX_INTERLEAVE(uint64, v_uint64x2)
305 #define OPENCV_HAL_IMPL_VSX_EXPAND(_Tpvec, _Tpwvec, _Tp, fl, fh) \
306 inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \
308 b0.val = fh(a.val); \
309 b1.val = fl(a.val); \
311 inline _Tpwvec v_load_expand(const _Tp* ptr) \
312 { return _Tpwvec(fh(vec_ld_l8(ptr))); }
314 OPENCV_HAL_IMPL_VSX_EXPAND(v_uint8x16, v_uint16x8, uchar, vec_unpacklu, vec_unpackhu)
315 OPENCV_HAL_IMPL_VSX_EXPAND(v_int8x16, v_int16x8, schar, vec_unpackl, vec_unpackh)
316 OPENCV_HAL_IMPL_VSX_EXPAND(v_uint16x8, v_uint32x4, ushort, vec_unpacklu, vec_unpackhu)
317 OPENCV_HAL_IMPL_VSX_EXPAND(v_int16x8, v_int32x4, short, vec_unpackl, vec_unpackh)
318 OPENCV_HAL_IMPL_VSX_EXPAND(v_uint32x4, v_uint64x2, uint, vec_unpacklu, vec_unpackhu)
319 OPENCV_HAL_IMPL_VSX_EXPAND(v_int32x4, v_int64x2, int, vec_unpackl, vec_unpackh)
321 inline v_uint32x4 v_load_expand_q(const uchar* ptr)
322 { return v_uint32x4(vec_uint4_set(ptr[0], ptr[1], ptr[2], ptr[3])); }
324 inline v_int32x4 v_load_expand_q(const schar* ptr)
325 { return v_int32x4(vec_int4_set(ptr[0], ptr[1], ptr[2], ptr[3])); }
328 #define OPENCV_HAL_IMPL_VSX_PACK(_Tpvec, _Tp, _Tpwvec, _Tpvn, _Tpdel, sfnc, pkfnc, addfnc, pack) \
329 inline _Tpvec v_##pack(const _Tpwvec& a, const _Tpwvec& b) \
331 return _Tpvec(pkfnc(a.val, b.val)); \
333 inline void v_##pack##_store(_Tp* ptr, const _Tpwvec& a) \
335 vec_st_l8(pkfnc(a.val, a.val), ptr); \
338 inline _Tpvec v_rshr_##pack(const _Tpwvec& a, const _Tpwvec& b) \
340 const __vector _Tpvn vn = vec_splats((_Tpvn)n); \
341 const __vector _Tpdel delta = vec_splats((_Tpdel)((_Tpdel)1 << (n-1))); \
342 return _Tpvec(pkfnc(sfnc(addfnc(a.val, delta), vn), sfnc(addfnc(b.val, delta), vn))); \
345 inline void v_rshr_##pack##_store(_Tp* ptr, const _Tpwvec& a) \
347 const __vector _Tpvn vn = vec_splats((_Tpvn)n); \
348 const __vector _Tpdel delta = vec_splats((_Tpdel)((_Tpdel)1 << (n-1))); \
349 vec_st_l8(pkfnc(sfnc(addfnc(a.val, delta), vn), delta), ptr); \
352 OPENCV_HAL_IMPL_VSX_PACK(v_uint8x16, uchar, v_uint16x8, unsigned short, unsigned short,
353 vec_sr, vec_packs, vec_adds, pack)
354 OPENCV_HAL_IMPL_VSX_PACK(v_int8x16, schar, v_int16x8, unsigned short, short,
355 vec_sra, vec_packs, vec_adds, pack)
357 OPENCV_HAL_IMPL_VSX_PACK(v_uint16x8, ushort, v_uint32x4, unsigned int, unsigned int,
358 vec_sr, vec_packs, vec_add, pack)
359 OPENCV_HAL_IMPL_VSX_PACK(v_int16x8, short, v_int32x4, unsigned int, int,
360 vec_sra, vec_packs, vec_add, pack)
362 OPENCV_HAL_IMPL_VSX_PACK(v_uint32x4, uint, v_uint64x2, unsigned long long, unsigned long long,
363 vec_sr, vec_pack, vec_add, pack)
364 OPENCV_HAL_IMPL_VSX_PACK(v_int32x4, int, v_int64x2, unsigned long long, long long,
365 vec_sra, vec_pack, vec_add, pack)
367 OPENCV_HAL_IMPL_VSX_PACK(v_uint8x16, uchar, v_int16x8, unsigned short, short,
368 vec_sra, vec_packsu, vec_adds, pack_u)
369 OPENCV_HAL_IMPL_VSX_PACK(v_uint16x8, ushort, v_int32x4, unsigned int, int,
370 vec_sra, vec_packsu, vec_add, pack_u)
371 // Following variant is not implemented on other platforms:
372 //OPENCV_HAL_IMPL_VSX_PACK(v_uint32x4, uint, v_int64x2, unsigned long long, long long,
373 // vec_sra, vec_packsu, vec_add, pack_u)
376 template <typename _Tpvec>
377 inline void v_zip(const _Tpvec& a0, const _Tpvec& a1, _Tpvec& b0, _Tpvec& b1)
379 b0.val = vec_mergeh(a0.val, a1.val);
380 b1.val = vec_mergel(a0.val, a1.val);
383 template <typename _Tpvec>
384 inline _Tpvec v_combine_high(const _Tpvec& a, const _Tpvec& b)
385 { return _Tpvec(vec_mergesql(a.val, b.val)); }
387 template <typename _Tpvec>
388 inline _Tpvec v_combine_low(const _Tpvec& a, const _Tpvec& b)
389 { return _Tpvec(vec_mergesqh(a.val, b.val)); }
391 template <typename _Tpvec>
392 inline void v_recombine(const _Tpvec& a, const _Tpvec& b, _Tpvec& c, _Tpvec& d)
394 c.val = vec_mergesqh(a.val, b.val);
395 d.val = vec_mergesql(a.val, b.val);
398 ////////// Arithmetic, bitwise and comparison operations /////////
400 /* Element-wise binary and unary operations */
402 #define OPENCV_HAL_IMPL_VSX_BIN_OP(bin_op, _Tpvec, intrin) \
403 inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
404 { return _Tpvec(intrin(a.val, b.val)); } \
405 inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \
406 { a.val = intrin(a.val, b.val); return a; }
408 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint8x16, vec_adds)
409 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint8x16, vec_subs)
410 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int8x16, vec_adds)
411 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int8x16, vec_subs)
412 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint16x8, vec_adds)
413 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint16x8, vec_subs)
414 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_uint16x8, vec_mul)
415 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int16x8, vec_adds)
416 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int16x8, vec_subs)
417 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_int16x8, vec_mul)
418 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint32x4, vec_add)
419 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint32x4, vec_sub)
420 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_uint32x4, vec_mul)
421 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int32x4, vec_add)
422 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int32x4, vec_sub)
423 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_int32x4, vec_mul)
424 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_float32x4, vec_add)
425 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_float32x4, vec_sub)
426 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_float32x4, vec_mul)
427 OPENCV_HAL_IMPL_VSX_BIN_OP(/, v_float32x4, vec_div)
428 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_float64x2, vec_add)
429 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_float64x2, vec_sub)
430 OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_float64x2, vec_mul)
431 OPENCV_HAL_IMPL_VSX_BIN_OP(/, v_float64x2, vec_div)
432 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint64x2, vec_add)
433 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint64x2, vec_sub)
434 OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int64x2, vec_add)
435 OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int64x2, vec_sub)
437 inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b, v_int32x4& c, v_int32x4& d)
439 c.val = vec_mul(vec_unpackh(a.val), vec_unpackh(b.val));
440 d.val = vec_mul(vec_unpackl(a.val), vec_unpackl(b.val));
442 inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b, v_uint32x4& c, v_uint32x4& d)
444 c.val = vec_mul(vec_unpackhu(a.val), vec_unpackhu(b.val));
445 d.val = vec_mul(vec_unpacklu(a.val), vec_unpacklu(b.val));
447 inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b, v_uint64x2& c, v_uint64x2& d)
449 c.val = vec_mul(vec_unpackhu(a.val), vec_unpackhu(b.val));
450 d.val = vec_mul(vec_unpacklu(a.val), vec_unpacklu(b.val));
453 /** Non-saturating arithmetics **/
454 #define OPENCV_HAL_IMPL_VSX_BIN_FUNC(func, intrin) \
455 template<typename _Tpvec> \
456 inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \
457 { return _Tpvec(intrin(a.val, b.val)); }
459 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_add_wrap, vec_add)
460 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_sub_wrap, vec_sub)
462 /** Bitwise shifts **/
463 #define OPENCV_HAL_IMPL_VSX_SHIFT_OP(_Tpvec, shr, splfunc) \
464 inline _Tpvec operator << (const _Tpvec& a, int imm) \
465 { return _Tpvec(vec_sl(a.val, splfunc(imm))); } \
466 inline _Tpvec operator >> (const _Tpvec& a, int imm) \
467 { return _Tpvec(shr(a.val, splfunc(imm))); } \
468 template<int imm> inline _Tpvec v_shl(const _Tpvec& a) \
469 { return _Tpvec(vec_sl(a.val, splfunc(imm))); } \
470 template<int imm> inline _Tpvec v_shr(const _Tpvec& a) \
471 { return _Tpvec(shr(a.val, splfunc(imm))); }
473 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint8x16, vec_sr, vec_uchar16_sp)
474 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint16x8, vec_sr, vec_ushort8_sp)
475 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint32x4, vec_sr, vec_uint4_sp)
476 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint64x2, vec_sr, vec_udword2_sp)
477 // algebraic right shift
478 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int8x16, vec_sra, vec_uchar16_sp)
479 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int16x8, vec_sra, vec_ushort8_sp)
480 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int32x4, vec_sra, vec_uint4_sp)
481 OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int64x2, vec_sra, vec_udword2_sp)
483 /** Bitwise logic **/
484 #define OPENCV_HAL_IMPL_VSX_LOGIC_OP(_Tpvec) \
485 OPENCV_HAL_IMPL_VSX_BIN_OP(&, _Tpvec, vec_and) \
486 OPENCV_HAL_IMPL_VSX_BIN_OP(|, _Tpvec, vec_or) \
487 OPENCV_HAL_IMPL_VSX_BIN_OP(^, _Tpvec, vec_xor) \
488 inline _Tpvec operator ~ (const _Tpvec& a) \
489 { return _Tpvec(vec_not(a.val)); }
491 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint8x16)
492 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int8x16)
493 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint16x8)
494 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int16x8)
495 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint32x4)
496 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int32x4)
497 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint64x2)
498 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int64x2)
499 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_float32x4)
500 OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_float64x2)
502 /** Bitwise select **/
503 #define OPENCV_HAL_IMPL_VSX_SELECT(_Tpvec, cast) \
504 inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \
505 { return _Tpvec(vec_sel(b.val, a.val, cast(mask.val))); }
507 OPENCV_HAL_IMPL_VSX_SELECT(v_uint8x16, vec_bchar16_c)
508 OPENCV_HAL_IMPL_VSX_SELECT(v_int8x16, vec_bchar16_c)
509 OPENCV_HAL_IMPL_VSX_SELECT(v_uint16x8, vec_bshort8_c)
510 OPENCV_HAL_IMPL_VSX_SELECT(v_int16x8, vec_bshort8_c)
511 OPENCV_HAL_IMPL_VSX_SELECT(v_uint32x4, vec_bint4_c)
512 OPENCV_HAL_IMPL_VSX_SELECT(v_int32x4, vec_bint4_c)
513 OPENCV_HAL_IMPL_VSX_SELECT(v_float32x4, vec_bint4_c)
514 OPENCV_HAL_IMPL_VSX_SELECT(v_float64x2, vec_bdword2_c)
517 #define OPENCV_HAL_IMPL_VSX_INT_CMP_OP(_Tpvec) \
518 inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \
519 { return _Tpvec(vec_cmpeq(a.val, b.val)); } \
520 inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \
521 { return _Tpvec(vec_cmpne(a.val, b.val)); } \
522 inline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \
523 { return _Tpvec(vec_cmplt(a.val, b.val)); } \
524 inline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \
525 { return _Tpvec(vec_cmpgt(a.val, b.val)); } \
526 inline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \
527 { return _Tpvec(vec_cmple(a.val, b.val)); } \
528 inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \
529 { return _Tpvec(vec_cmpge(a.val, b.val)); }
531 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint8x16)
532 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int8x16)
533 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint16x8)
534 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int16x8)
535 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint32x4)
536 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int32x4)
537 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_float32x4)
538 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_float64x2)
539 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint64x2)
540 OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int64x2)
543 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_min, vec_min)
544 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_max, vec_max)
547 #define OPENCV_IMPL_VSX_ROTATE(_Tpvec, suffix, shf, cast) \
549 inline _Tpvec v_rotate_##suffix(const _Tpvec& a) \
551 const int wd = imm * sizeof(typename _Tpvec::lane_type); \
554 return _Tpvec((cast)shf(vec_uchar16_c(a.val), vec_uchar16_sp(wd << 3))); \
557 #define OPENCV_IMPL_VSX_ROTATE_LR(_Tpvec, cast) \
558 OPENCV_IMPL_VSX_ROTATE(_Tpvec, left, vec_slo, cast) \
559 OPENCV_IMPL_VSX_ROTATE(_Tpvec, right, vec_sro, cast)
561 OPENCV_IMPL_VSX_ROTATE_LR(v_uint8x16, vec_uchar16)
562 OPENCV_IMPL_VSX_ROTATE_LR(v_int8x16, vec_char16)
563 OPENCV_IMPL_VSX_ROTATE_LR(v_uint16x8, vec_ushort8)
564 OPENCV_IMPL_VSX_ROTATE_LR(v_int16x8, vec_short8)
565 OPENCV_IMPL_VSX_ROTATE_LR(v_uint32x4, vec_uint4)
566 OPENCV_IMPL_VSX_ROTATE_LR(v_int32x4, vec_int4)
567 OPENCV_IMPL_VSX_ROTATE_LR(v_float32x4, vec_float4)
568 OPENCV_IMPL_VSX_ROTATE_LR(v_uint64x2, vec_udword2)
569 OPENCV_IMPL_VSX_ROTATE_LR(v_int64x2, vec_dword2)
570 OPENCV_IMPL_VSX_ROTATE_LR(v_float64x2, vec_double2)
572 template<int imm, typename _Tpvec>
573 inline _Tpvec v_rotate_right(const _Tpvec& a, const _Tpvec& b)
575 enum { CV_SHIFT = 16 - imm * (sizeof(typename _Tpvec::lane_type)) };
579 return _Tpvec(vec_sld(b.val, a.val, CV_SHIFT & 15));
581 return _Tpvec(vec_sld(b.val, a.val, CV_SHIFT));
585 template<int imm, typename _Tpvec>
586 inline _Tpvec v_rotate_left(const _Tpvec& a, const _Tpvec& b)
588 enum { CV_SHIFT = imm * (sizeof(typename _Tpvec::lane_type)) };
591 return _Tpvec(vec_sld(a.val, b.val, CV_SHIFT));
594 #define OPENCV_IMPL_VSX_ROTATE_64_2RG(_Tpvec, suffix, rg1, rg2) \
596 inline _Tpvec v_rotate_##suffix(const _Tpvec& a, const _Tpvec& b) \
599 return _Tpvec(vec_permi(rg1.val, rg2.val, 2)); \
600 return imm ? b : a; \
603 #define OPENCV_IMPL_VSX_ROTATE_64_2RG_LR(_Tpvec) \
604 OPENCV_IMPL_VSX_ROTATE_64_2RG(_Tpvec, left, b, a) \
605 OPENCV_IMPL_VSX_ROTATE_64_2RG(_Tpvec, right, a, b)
607 OPENCV_IMPL_VSX_ROTATE_64_2RG_LR(v_float64x2)
608 OPENCV_IMPL_VSX_ROTATE_64_2RG_LR(v_uint64x2)
609 OPENCV_IMPL_VSX_ROTATE_64_2RG_LR(v_int64x2)
612 template<int s, typename _Tpvec>
613 inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b)
614 { return v_rotate_right<s>(a, b); }
616 ////////// Reduce and mask /////////
619 inline short v_reduce_sum(const v_int16x8& a)
621 const vec_int4 zero = vec_int4_z;
622 return saturate_cast<short>(vec_extract(vec_sums(vec_sum4s(a.val, zero), zero), 3));
624 inline ushort v_reduce_sum(const v_uint16x8& a)
626 const vec_int4 v4 = vec_int4_c(vec_unpackhu(vec_adds(a.val, vec_sld(a.val, a.val, 8))));
627 return saturate_cast<ushort>(vec_extract(vec_sums(v4, vec_int4_z), 3));
630 #define OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(_Tpvec, _Tpvec2, scalartype, suffix, func) \
631 inline scalartype v_reduce_##suffix(const _Tpvec& a) \
633 const _Tpvec2 rs = func(a.val, vec_sld(a.val, a.val, 8)); \
634 return vec_extract(func(rs, vec_sld(rs, rs, 4)), 0); \
636 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_uint32x4, vec_uint4, uint, sum, vec_add)
637 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_uint32x4, vec_uint4, uint, max, vec_max)
638 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_uint32x4, vec_uint4, uint, min, vec_min)
639 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_int32x4, vec_int4, int, sum, vec_add)
640 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_int32x4, vec_int4, int, max, vec_max)
641 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_int32x4, vec_int4, int, min, vec_min)
642 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_float32x4, vec_float4, float, sum, vec_add)
643 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_float32x4, vec_float4, float, max, vec_max)
644 OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_float32x4, vec_float4, float, min, vec_min)
646 #define OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(_Tpvec, _Tpvec2, scalartype, suffix, func) \
647 inline scalartype v_reduce_##suffix(const _Tpvec& a) \
649 _Tpvec2 rs = func(a.val, vec_sld(a.val, a.val, 8)); \
650 rs = func(rs, vec_sld(rs, rs, 4)); \
651 return vec_extract(func(rs, vec_sld(rs, rs, 2)), 0); \
653 OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_uint16x8, vec_ushort8, ushort, max, vec_max)
654 OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_uint16x8, vec_ushort8, ushort, min, vec_min)
655 OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_int16x8, vec_short8, short, max, vec_max)
656 OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_int16x8, vec_short8, short, min, vec_min)
658 inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b,
659 const v_float32x4& c, const v_float32x4& d)
661 vec_float4 ac = vec_add(vec_mergel(a.val, c.val), vec_mergeh(a.val, c.val));
662 ac = vec_add(ac, vec_sld(ac, ac, 8));
664 vec_float4 bd = vec_add(vec_mergel(b.val, d.val), vec_mergeh(b.val, d.val));
665 bd = vec_add(bd, vec_sld(bd, bd, 8));
666 return v_float32x4(vec_mergeh(ac, bd));
670 template<typename _Tpvec>
671 inline v_uint32x4 v_popcount(const _Tpvec& a)
672 { return v_uint32x4(vec_popcntu(vec_uint4_c(a.val))); }
675 inline int v_signmask(const v_uint8x16& a)
677 vec_uchar16 sv = vec_sr(a.val, vec_uchar16_sp(7));
678 static const vec_uchar16 slm = {0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7};
679 sv = vec_sl(sv, slm);
680 vec_uint4 sv4 = vec_sum4s(sv, vec_uint4_z);
681 static const vec_uint4 slm4 = {0, 0, 8, 8};
682 sv4 = vec_sl(sv4, slm4);
683 return vec_extract(vec_sums((vec_int4) sv4, vec_int4_z), 3);
685 inline int v_signmask(const v_int8x16& a)
686 { return v_signmask(v_reinterpret_as_u8(a)); }
688 inline int v_signmask(const v_int16x8& a)
690 static const vec_ushort8 slm = {0, 1, 2, 3, 4, 5, 6, 7};
691 vec_short8 sv = vec_sr(a.val, vec_ushort8_sp(15));
692 sv = vec_sl(sv, slm);
693 vec_int4 svi = vec_int4_z;
694 svi = vec_sums(vec_sum4s(sv, svi), svi);
695 return vec_extract(svi, 3);
697 inline int v_signmask(const v_uint16x8& a)
698 { return v_signmask(v_reinterpret_as_s16(a)); }
700 inline int v_signmask(const v_int32x4& a)
702 static const vec_uint4 slm = {0, 1, 2, 3};
703 vec_int4 sv = vec_sr(a.val, vec_uint4_sp(31));
704 sv = vec_sl(sv, slm);
705 sv = vec_sums(sv, vec_int4_z);
706 return vec_extract(sv, 3);
708 inline int v_signmask(const v_uint32x4& a)
709 { return v_signmask(v_reinterpret_as_s32(a)); }
710 inline int v_signmask(const v_float32x4& a)
711 { return v_signmask(v_reinterpret_as_s32(a)); }
713 inline int v_signmask(const v_int64x2& a)
715 VSX_UNUSED(const vec_dword2) sv = vec_sr(a.val, vec_udword2_sp(63));
716 return (int)vec_extract(sv, 0) | (int)vec_extract(sv, 1) << 1;
718 inline int v_signmask(const v_uint64x2& a)
719 { return v_signmask(v_reinterpret_as_s64(a)); }
720 inline int v_signmask(const v_float64x2& a)
721 { return v_signmask(v_reinterpret_as_s64(a)); }
723 template<typename _Tpvec>
724 inline bool v_check_all(const _Tpvec& a)
725 { return vec_all_lt(a.val, _Tpvec().val); }
726 inline bool v_check_all(const v_uint8x16& a)
727 { return v_check_all(v_reinterpret_as_s8(a)); }
728 inline bool v_check_all(const v_uint16x8& a)
729 { return v_check_all(v_reinterpret_as_s16(a)); }
730 inline bool v_check_all(const v_uint32x4& a)
731 { return v_check_all(v_reinterpret_as_s32(a)); }
732 inline bool v_check_all(const v_float32x4& a)
733 { return v_check_all(v_reinterpret_as_s32(a)); }
734 inline bool v_check_all(const v_float64x2& a)
735 { return v_check_all(v_reinterpret_as_s64(a)); }
737 template<typename _Tpvec>
738 inline bool v_check_any(const _Tpvec& a)
739 { return vec_any_lt(a.val, _Tpvec().val); }
740 inline bool v_check_any(const v_uint8x16& a)
741 { return v_check_any(v_reinterpret_as_s8(a)); }
742 inline bool v_check_any(const v_uint16x8& a)
743 { return v_check_any(v_reinterpret_as_s16(a)); }
744 inline bool v_check_any(const v_uint32x4& a)
745 { return v_check_any(v_reinterpret_as_s32(a)); }
746 inline bool v_check_any(const v_float32x4& a)
747 { return v_check_any(v_reinterpret_as_s32(a)); }
748 inline bool v_check_any(const v_float64x2& a)
749 { return v_check_any(v_reinterpret_as_s64(a)); }
751 ////////// Other math /////////
753 /** Some frequent operations **/
754 inline v_float32x4 v_sqrt(const v_float32x4& x)
755 { return v_float32x4(vec_sqrt(x.val)); }
756 inline v_float64x2 v_sqrt(const v_float64x2& x)
757 { return v_float64x2(vec_sqrt(x.val)); }
759 inline v_float32x4 v_invsqrt(const v_float32x4& x)
760 { return v_float32x4(vec_rsqrt(x.val)); }
761 inline v_float64x2 v_invsqrt(const v_float64x2& x)
762 { return v_float64x2(vec_rsqrt(x.val)); }
764 #define OPENCV_HAL_IMPL_VSX_MULADD(_Tpvec) \
765 inline _Tpvec v_magnitude(const _Tpvec& a, const _Tpvec& b) \
766 { return _Tpvec(vec_sqrt(vec_madd(a.val, a.val, vec_mul(b.val, b.val)))); } \
767 inline _Tpvec v_sqr_magnitude(const _Tpvec& a, const _Tpvec& b) \
768 { return _Tpvec(vec_madd(a.val, a.val, vec_mul(b.val, b.val))); } \
769 inline _Tpvec v_fma(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \
770 { return _Tpvec(vec_madd(a.val, b.val, c.val)); } \
771 inline _Tpvec v_muladd(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \
772 { return _Tpvec(vec_madd(a.val, b.val, c.val)); }
774 OPENCV_HAL_IMPL_VSX_MULADD(v_float32x4)
775 OPENCV_HAL_IMPL_VSX_MULADD(v_float64x2)
777 inline v_int32x4 v_muladd(const v_int32x4& a, const v_int32x4& b, const v_int32x4& c)
778 { return a * b + c; }
780 // TODO: exp, log, sin, cos
782 /** Absolute values **/
783 inline v_uint8x16 v_abs(const v_int8x16& x)
784 { return v_uint8x16(vec_uchar16_c(vec_abs(x.val))); }
786 inline v_uint16x8 v_abs(const v_int16x8& x)
787 { return v_uint16x8(vec_ushort8_c(vec_abs(x.val))); }
789 inline v_uint32x4 v_abs(const v_int32x4& x)
790 { return v_uint32x4(vec_uint4_c(vec_abs(x.val))); }
792 inline v_float32x4 v_abs(const v_float32x4& x)
793 { return v_float32x4(vec_abs(x.val)); }
795 inline v_float64x2 v_abs(const v_float64x2& x)
796 { return v_float64x2(vec_abs(x.val)); }
798 OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_absdiff, vec_absd)
800 #define OPENCV_HAL_IMPL_VSX_BIN_FUNC2(_Tpvec, _Tpvec2, cast, func, intrin) \
801 inline _Tpvec2 func(const _Tpvec& a, const _Tpvec& b) \
802 { return _Tpvec2(cast(intrin(a.val, b.val))); }
804 OPENCV_HAL_IMPL_VSX_BIN_FUNC2(v_int8x16, v_uint8x16, vec_uchar16_c, v_absdiff, vec_absd)
805 OPENCV_HAL_IMPL_VSX_BIN_FUNC2(v_int16x8, v_uint16x8, vec_ushort8_c, v_absdiff, vec_absd)
806 OPENCV_HAL_IMPL_VSX_BIN_FUNC2(v_int32x4, v_uint32x4, vec_uint4_c, v_absdiff, vec_absd)
807 OPENCV_HAL_IMPL_VSX_BIN_FUNC2(v_int64x2, v_uint64x2, vec_udword2_c, v_absdiff, vec_absd)
809 ////////// Conversions /////////
812 inline v_int32x4 v_round(const v_float32x4& a)
813 { return v_int32x4(vec_cts(vec_round(a.val))); }
815 inline v_int32x4 v_round(const v_float64x2& a)
816 { return v_int32x4(vec_mergesqo(vec_ctso(vec_round(a.val)), vec_int4_z)); }
818 inline v_int32x4 v_floor(const v_float32x4& a)
819 { return v_int32x4(vec_cts(vec_floor(a.val))); }
821 inline v_int32x4 v_floor(const v_float64x2& a)
822 { return v_int32x4(vec_mergesqo(vec_ctso(vec_floor(a.val)), vec_int4_z)); }
824 inline v_int32x4 v_ceil(const v_float32x4& a)
825 { return v_int32x4(vec_cts(vec_ceil(a.val))); }
827 inline v_int32x4 v_ceil(const v_float64x2& a)
828 { return v_int32x4(vec_mergesqo(vec_ctso(vec_ceil(a.val)), vec_int4_z)); }
830 inline v_int32x4 v_trunc(const v_float32x4& a)
831 { return v_int32x4(vec_cts(a.val)); }
833 inline v_int32x4 v_trunc(const v_float64x2& a)
834 { return v_int32x4(vec_mergesqo(vec_ctso(a.val), vec_int4_z)); }
837 inline v_float32x4 v_cvt_f32(const v_int32x4& a)
838 { return v_float32x4(vec_ctf(a.val)); }
840 inline v_float32x4 v_cvt_f32(const v_float64x2& a)
841 { return v_float32x4(vec_mergesqo(vec_cvfo(a.val), vec_float4_z)); }
843 inline v_float32x4 v_cvt_f32(const v_float64x2& a, const v_float64x2& b)
844 { return v_float32x4(vec_mergesqo(vec_cvfo(a.val), vec_cvfo(b.val))); }
846 inline v_float64x2 v_cvt_f64(const v_int32x4& a)
847 { return v_float64x2(vec_ctdo(vec_mergeh(a.val, a.val))); }
849 inline v_float64x2 v_cvt_f64_high(const v_int32x4& a)
850 { return v_float64x2(vec_ctdo(vec_mergel(a.val, a.val))); }
852 inline v_float64x2 v_cvt_f64(const v_float32x4& a)
853 { return v_float64x2(vec_cvfo(vec_mergeh(a.val, a.val))); }
855 inline v_float64x2 v_cvt_f64_high(const v_float32x4& a)
856 { return v_float64x2(vec_cvfo(vec_mergel(a.val, a.val))); }
858 ////////////// Lookup table access ////////////////////
860 inline v_int32x4 v_lut(const int* tab, const v_int32x4& idxvec)
862 int CV_DECL_ALIGNED(32) idx[4];
863 v_store_aligned(idx, idxvec);
864 return v_int32x4(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]);
867 inline v_float32x4 v_lut(const float* tab, const v_int32x4& idxvec)
869 int CV_DECL_ALIGNED(32) idx[4];
870 v_store_aligned(idx, idxvec);
871 return v_float32x4(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]);
874 inline v_float64x2 v_lut(const double* tab, const v_int32x4& idxvec)
876 int CV_DECL_ALIGNED(32) idx[4];
877 v_store_aligned(idx, idxvec);
878 return v_float64x2(tab[idx[0]], tab[idx[1]]);
881 inline void v_lut_deinterleave(const float* tab, const v_int32x4& idxvec, v_float32x4& x, v_float32x4& y)
883 int CV_DECL_ALIGNED(32) idx[4];
884 v_store_aligned(idx, idxvec);
885 x = v_float32x4(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]);
886 y = v_float32x4(tab[idx[0]+1], tab[idx[1]+1], tab[idx[2]+1], tab[idx[3]+1]);
889 inline void v_lut_deinterleave(const double* tab, const v_int32x4& idxvec, v_float64x2& x, v_float64x2& y)
891 int CV_DECL_ALIGNED(32) idx[4];
892 v_store_aligned(idx, idxvec);
893 x = v_float64x2(tab[idx[0]], tab[idx[1]]);
894 y = v_float64x2(tab[idx[0]+1], tab[idx[1]+1]);
897 inline void v_cleanup() {}
901 /** its up there with load and store operations **/
903 ////////// Matrix operations /////////
905 inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b)
906 { return v_int32x4(vec_msum(a.val, b.val, vec_int4_z)); }
908 inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b, const v_int32x4& c)
909 { return v_int32x4(vec_msum(a.val, b.val, c.val)); }
911 inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,
912 const v_float32x4& m1, const v_float32x4& m2,
913 const v_float32x4& m3)
915 const vec_float4 v0 = vec_splat(v.val, 0);
916 const vec_float4 v1 = vec_splat(v.val, 1);
917 const vec_float4 v2 = vec_splat(v.val, 2);
918 VSX_UNUSED(const vec_float4) v3 = vec_splat(v.val, 3);
919 return v_float32x4(vec_madd(v0, m0.val, vec_madd(v1, m1.val, vec_madd(v2, m2.val, vec_mul(v3, m3.val)))));
922 inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0,
923 const v_float32x4& m1, const v_float32x4& m2,
924 const v_float32x4& a)
926 const vec_float4 v0 = vec_splat(v.val, 0);
927 const vec_float4 v1 = vec_splat(v.val, 1);
928 const vec_float4 v2 = vec_splat(v.val, 2);
929 return v_float32x4(vec_madd(v0, m0.val, vec_madd(v1, m1.val, vec_madd(v2, m2.val, a.val))));
932 #define OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(_Tpvec, _Tpvec2) \
933 inline void v_transpose4x4(const _Tpvec& a0, const _Tpvec& a1, \
934 const _Tpvec& a2, const _Tpvec& a3, \
935 _Tpvec& b0, _Tpvec& b1, _Tpvec& b2, _Tpvec& b3) \
937 _Tpvec2 a02 = vec_mergeh(a0.val, a2.val); \
938 _Tpvec2 a13 = vec_mergeh(a1.val, a3.val); \
939 b0.val = vec_mergeh(a02, a13); \
940 b1.val = vec_mergel(a02, a13); \
941 a02 = vec_mergel(a0.val, a2.val); \
942 a13 = vec_mergel(a1.val, a3.val); \
943 b2.val = vec_mergeh(a02, a13); \
944 b3.val = vec_mergel(a02, a13); \
946 OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(v_uint32x4, vec_uint4)
947 OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(v_int32x4, vec_int4)
948 OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(v_float32x4, vec_float4)
950 //! @name Check SIMD support
952 //! @brief Check CPU capability of SIMD operation
953 static inline bool hasSIMD128()
955 return (CV_CPU_HAS_SUPPORT_VSX) ? true : false;
960 CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
966 #endif // OPENCV_HAL_VSX_HPP