using namespace CV__SIMD_NAMESPACE;
#endif
+ namespace CV__SIMD_NAMESPACE {
+ //! @addtogroup core_hal_intrin
+ //! @{
+ //! @name Wide init with value
+ //! @{
+ //! @brief Create maximum available capacity vector with elements set to a specific value
+ inline v_uint8 vx_setall_u8(uchar v) { return VXPREFIX(_setall_u8)(v); }
+ inline v_int8 vx_setall_s8(schar v) { return VXPREFIX(_setall_s8)(v); }
+ inline v_uint16 vx_setall_u16(ushort v) { return VXPREFIX(_setall_u16)(v); }
+ inline v_int16 vx_setall_s16(short v) { return VXPREFIX(_setall_s16)(v); }
+ inline v_int32 vx_setall_s32(int v) { return VXPREFIX(_setall_s32)(v); }
+ inline v_uint32 vx_setall_u32(unsigned v) { return VXPREFIX(_setall_u32)(v); }
+ inline v_float32 vx_setall_f32(float v) { return VXPREFIX(_setall_f32)(v); }
+ inline v_int64 vx_setall_s64(int64 v) { return VXPREFIX(_setall_s64)(v); }
+ inline v_uint64 vx_setall_u64(uint64 v) { return VXPREFIX(_setall_u64)(v); }
+ #if CV_SIMD_64F
+ inline v_float64 vx_setall_f64(double v) { return VXPREFIX(_setall_f64)(v); }
+ #endif
+ //! @}
+
+ //! @name Wide init with zero
+ //! @{
+ //! @brief Create maximum available capacity vector with elements set to zero
+ inline v_uint8 vx_setzero_u8() { return VXPREFIX(_setzero_u8)(); }
+ inline v_int8 vx_setzero_s8() { return VXPREFIX(_setzero_s8)(); }
+ inline v_uint16 vx_setzero_u16() { return VXPREFIX(_setzero_u16)(); }
+ inline v_int16 vx_setzero_s16() { return VXPREFIX(_setzero_s16)(); }
+ inline v_int32 vx_setzero_s32() { return VXPREFIX(_setzero_s32)(); }
+ inline v_uint32 vx_setzero_u32() { return VXPREFIX(_setzero_u32)(); }
+ inline v_float32 vx_setzero_f32() { return VXPREFIX(_setzero_f32)(); }
+ inline v_int64 vx_setzero_s64() { return VXPREFIX(_setzero_s64)(); }
+ inline v_uint64 vx_setzero_u64() { return VXPREFIX(_setzero_u64)(); }
+ #if CV_SIMD_64F
+ inline v_float64 vx_setzero_f64() { return VXPREFIX(_setzero_f64)(); }
+ #endif
+ //! @}
+
+ //! @name Wide load from memory
+ //! @{
+ //! @brief Load maximum available capacity register contents from memory
+ inline v_uint8 vx_load(const uchar * ptr) { return VXPREFIX(_load)(ptr); }
+ inline v_int8 vx_load(const schar * ptr) { return VXPREFIX(_load)(ptr); }
+ inline v_uint16 vx_load(const ushort * ptr) { return VXPREFIX(_load)(ptr); }
+ inline v_int16 vx_load(const short * ptr) { return VXPREFIX(_load)(ptr); }
+ inline v_int32 vx_load(const int * ptr) { return VXPREFIX(_load)(ptr); }
+ inline v_uint32 vx_load(const unsigned * ptr) { return VXPREFIX(_load)(ptr); }
+ inline v_float32 vx_load(const float * ptr) { return VXPREFIX(_load)(ptr); }
+ inline v_int64 vx_load(const int64 * ptr) { return VXPREFIX(_load)(ptr); }
+ inline v_uint64 vx_load(const uint64 * ptr) { return VXPREFIX(_load)(ptr); }
+ #if CV_SIMD_64F
+ inline v_float64 vx_load(const double * ptr) { return VXPREFIX(_load)(ptr); }
+ #endif
+ //! @}
+
+ //! @name Wide load from memory(aligned)
+ //! @{
+ //! @brief Load maximum available capacity register contents from memory(aligned)
+ inline v_uint8 vx_load_aligned(const uchar * ptr) { return VXPREFIX(_load_aligned)(ptr); }
+ inline v_int8 vx_load_aligned(const schar * ptr) { return VXPREFIX(_load_aligned)(ptr); }
+ inline v_uint16 vx_load_aligned(const ushort * ptr) { return VXPREFIX(_load_aligned)(ptr); }
+ inline v_int16 vx_load_aligned(const short * ptr) { return VXPREFIX(_load_aligned)(ptr); }
+ inline v_int32 vx_load_aligned(const int * ptr) { return VXPREFIX(_load_aligned)(ptr); }
+ inline v_uint32 vx_load_aligned(const unsigned * ptr) { return VXPREFIX(_load_aligned)(ptr); }
+ inline v_float32 vx_load_aligned(const float * ptr) { return VXPREFIX(_load_aligned)(ptr); }
+ inline v_int64 vx_load_aligned(const int64 * ptr) { return VXPREFIX(_load_aligned)(ptr); }
+ inline v_uint64 vx_load_aligned(const uint64 * ptr) { return VXPREFIX(_load_aligned)(ptr); }
+ #if CV_SIMD_64F
+ inline v_float64 vx_load_aligned(const double * ptr) { return VXPREFIX(_load_aligned)(ptr); }
+ #endif
+ //! @}
+
+ //! @name Wide load lower half from memory
+ //! @{
+ //! @brief Load lower half of maximum available capacity register from memory
+ inline v_uint8 vx_load_low(const uchar * ptr) { return VXPREFIX(_load_low)(ptr); }
+ inline v_int8 vx_load_low(const schar * ptr) { return VXPREFIX(_load_low)(ptr); }
+ inline v_uint16 vx_load_low(const ushort * ptr) { return VXPREFIX(_load_low)(ptr); }
+ inline v_int16 vx_load_low(const short * ptr) { return VXPREFIX(_load_low)(ptr); }
+ inline v_int32 vx_load_low(const int * ptr) { return VXPREFIX(_load_low)(ptr); }
+ inline v_uint32 vx_load_low(const unsigned * ptr) { return VXPREFIX(_load_low)(ptr); }
+ inline v_float32 vx_load_low(const float * ptr) { return VXPREFIX(_load_low)(ptr); }
+ inline v_int64 vx_load_low(const int64 * ptr) { return VXPREFIX(_load_low)(ptr); }
+ inline v_uint64 vx_load_low(const uint64 * ptr) { return VXPREFIX(_load_low)(ptr); }
+ #if CV_SIMD_64F
+ inline v_float64 vx_load_low(const double * ptr) { return VXPREFIX(_load_low)(ptr); }
+ #endif
+ //! @}
+
+ //! @name Wide load halfs from memory
+ //! @{
+ //! @brief Load maximum available capacity register contents from two memory blocks
+ inline v_uint8 vx_load_halves(const uchar * ptr0, const uchar * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); }
+ inline v_int8 vx_load_halves(const schar * ptr0, const schar * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); }
+ inline v_uint16 vx_load_halves(const ushort * ptr0, const ushort * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); }
+ inline v_int16 vx_load_halves(const short * ptr0, const short * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); }
+ inline v_int32 vx_load_halves(const int * ptr0, const int * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); }
+ inline v_uint32 vx_load_halves(const unsigned * ptr0, const unsigned * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); }
+ inline v_float32 vx_load_halves(const float * ptr0, const float * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); }
+ inline v_int64 vx_load_halves(const int64 * ptr0, const int64 * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); }
+ inline v_uint64 vx_load_halves(const uint64 * ptr0, const uint64 * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); }
+ #if CV_SIMD_64F
+ inline v_float64 vx_load_halves(const double * ptr0, const double * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); }
+ #endif
+ //! @}
+
+ //! @name Wide LUT of elements
+ //! @{
+ //! @brief Load maximum available capacity register contents with array elements by provided indexes
+ inline v_uint8 vx_lut(const uchar * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); }
+ inline v_int8 vx_lut(const schar * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); }
+ inline v_uint16 vx_lut(const ushort * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); }
+ inline v_int16 vx_lut(const short* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); }
+ inline v_int32 vx_lut(const int* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); }
+ inline v_uint32 vx_lut(const unsigned* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); }
+ inline v_float32 vx_lut(const float* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); }
+ inline v_int64 vx_lut(const int64 * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); }
+ inline v_uint64 vx_lut(const uint64 * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); }
+ #if CV_SIMD_64F
+ inline v_float64 vx_lut(const double* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); }
+ #endif
+ //! @}
+
+ //! @name Wide LUT of element pairs
+ //! @{
+ //! @brief Load maximum available capacity register contents with array element pairs by provided indexes
+ inline v_uint8 vx_lut_pairs(const uchar * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); }
+ inline v_int8 vx_lut_pairs(const schar * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); }
+ inline v_uint16 vx_lut_pairs(const ushort * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); }
+ inline v_int16 vx_lut_pairs(const short* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); }
+ inline v_int32 vx_lut_pairs(const int* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); }
+ inline v_uint32 vx_lut_pairs(const unsigned* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); }
+ inline v_float32 vx_lut_pairs(const float* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); }
+ inline v_int64 vx_lut_pairs(const int64 * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); }
+ inline v_uint64 vx_lut_pairs(const uint64 * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); }
+ #if CV_SIMD_64F
+ inline v_float64 vx_lut_pairs(const double* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); }
+ #endif
+ //! @}
+
+ //! @name Wide LUT of element quads
+ //! @{
+ //! @brief Load maximum available capacity register contents with array element quads by provided indexes
+ inline v_uint8 vx_lut_quads(const uchar* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); }
+ inline v_int8 vx_lut_quads(const schar* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); }
+ inline v_uint16 vx_lut_quads(const ushort* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); }
+ inline v_int16 vx_lut_quads(const short* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); }
+ inline v_int32 vx_lut_quads(const int* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); }
+ inline v_uint32 vx_lut_quads(const unsigned* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); }
+ inline v_float32 vx_lut_quads(const float* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); }
+ //! @}
+
+ //! @name Wide load with double expansion
+ //! @{
+ //! @brief Load maximum available capacity register contents from memory with double expand
+ inline v_uint16 vx_load_expand(const uchar * ptr) { return VXPREFIX(_load_expand)(ptr); }
+ inline v_int16 vx_load_expand(const schar * ptr) { return VXPREFIX(_load_expand)(ptr); }
+ inline v_uint32 vx_load_expand(const ushort * ptr) { return VXPREFIX(_load_expand)(ptr); }
+ inline v_int32 vx_load_expand(const short* ptr) { return VXPREFIX(_load_expand)(ptr); }
+ inline v_int64 vx_load_expand(const int* ptr) { return VXPREFIX(_load_expand)(ptr); }
+ inline v_uint64 vx_load_expand(const unsigned* ptr) { return VXPREFIX(_load_expand)(ptr); }
+ inline v_float32 vx_load_expand(const float16_t * ptr) { return VXPREFIX(_load_expand)(ptr); }
+ //! @}
+
+ //! @name Wide load with quad expansion
+ //! @{
+ //! @brief Load maximum available capacity register contents from memory with quad expand
+ inline v_uint32 vx_load_expand_q(const uchar * ptr) { return VXPREFIX(_load_expand_q)(ptr); }
+ inline v_int32 vx_load_expand_q(const schar * ptr) { return VXPREFIX(_load_expand_q)(ptr); }
+ //! @}
+
+ /** @brief SIMD processing state cleanup call */
+ inline void vx_cleanup() { VXPREFIX(_cleanup)(); }
++
++
++//! @cond IGNORED
++
++ // backward compatibility
++ template<typename _Tp, typename _Tvec> static inline
++ void vx_store(_Tp* dst, const _Tvec& v) { return v_store(dst, v); }
++ // backward compatibility
++ template<typename _Tp, typename _Tvec> static inline
++ void vx_store_aligned(_Tp* dst, const _Tvec& v) { return v_store_aligned(dst, v); }
++
++//! @endcond
++
++
+ //! @}
+ #undef VXPREFIX
+ } // namespace
+
+ //! @cond IGNORED
#ifndef CV_SIMD_64F
#define CV_SIMD_64F 0
#endif