void acc_##suffix(const type* src, acctype* dst, \
const uchar* mask, int len, int cn) \
{ \
- CV_CPU_CALL_NEON(acc_simd_, (src, dst, mask, len, cn)); \
- CV_CPU_CALL_SSE2(acc_simd_, (src, dst, mask, len, cn)); \
- CV_CPU_CALL_BASELINE(acc_general_, (src, dst, mask, len, cn)); \
+ CV_CPU_DISPATCH(acc_simd_, (src, dst, mask, len, cn), CV_CPU_DISPATCH_MODES_ALL); \
} \
void accSqr_##suffix(const type* src, acctype* dst, \
const uchar* mask, int len, int cn) \
{ \
- CV_CPU_CALL_NEON(accSqr_simd_, (src, dst, mask, len, cn)); \
- CV_CPU_CALL_SSE2(accSqr_simd_, (src, dst, mask, len, cn)); \
- CV_CPU_CALL_BASELINE(accSqr_general_, (src, dst, mask, len, cn)); \
+ CV_CPU_DISPATCH(accSqr_simd_, (src, dst, mask, len, cn), CV_CPU_DISPATCH_MODES_ALL); \
} \
void accProd_##suffix(const type* src1, const type* src2, \
acctype* dst, const uchar* mask, int len, int cn) \
{ \
- CV_CPU_CALL_NEON(accProd_simd_, (src1, src2, dst, mask, len, cn)); \
- CV_CPU_CALL_SSE2(accProd_simd_, (src1, src2, dst, mask, len, cn)); \
- CV_CPU_CALL_BASELINE(accProd_general_, (src1, src2, dst, mask, len, cn)); \
+ CV_CPU_DISPATCH(accProd_simd_, (src1, src2, dst, mask, len, cn), CV_CPU_DISPATCH_MODES_ALL); \
} \
void accW_##suffix(const type* src, acctype* dst, \
const uchar* mask, int len, int cn, double alpha) \
{ \
- CV_CPU_CALL_NEON(accW_simd_, (src, dst, mask, len, cn, alpha)); \
- CV_CPU_CALL_SSE2(accW_simd_, (src, dst, mask, len, cn, alpha)); \
- CV_CPU_CALL_BASELINE(accW_general_, (src, dst, mask, len, cn, alpha)); \
+ CV_CPU_DISPATCH(accW_simd_, (src, dst, mask, len, cn, alpha), CV_CPU_DISPATCH_MODES_ALL); \
}
#define DEF_ACC_FLT_FUNCS(suffix, type, acctype) \
void acc_##suffix(const type* src, acctype* dst, \
const uchar* mask, int len, int cn) \
{ \
- CV_CPU_CALL_AVX(acc_avx_##suffix, (src, dst, mask, len, cn)); \
- CV_CPU_CALL_NEON(acc_simd_, (src, dst, mask, len, cn)); \
- CV_CPU_CALL_SSE2(acc_simd_, (src, dst, mask, len, cn)); \
- CV_CPU_CALL_BASELINE(acc_general_, (src, dst, mask, len, cn)); \
+ CV_CPU_DISPATCH(acc_simd_, (src, dst, mask, len, cn), CV_CPU_DISPATCH_MODES_ALL); \
} \
void accSqr_##suffix(const type* src, acctype* dst, \
const uchar* mask, int len, int cn) \
{ \
- CV_CPU_CALL_AVX(accSqr_avx_##suffix, (src, dst, mask, len, cn)); \
- CV_CPU_CALL_NEON(accSqr_simd_, (src, dst, mask, len, cn)); \
- CV_CPU_CALL_SSE2(accSqr_simd_, (src, dst, mask, len, cn)); \
- CV_CPU_CALL_BASELINE(accSqr_general_, (src, dst, mask, len, cn)); \
+ CV_CPU_DISPATCH(accSqr_simd_, (src, dst, mask, len, cn), CV_CPU_DISPATCH_MODES_ALL); \
} \
void accProd_##suffix(const type* src1, const type* src2, \
acctype* dst, const uchar* mask, int len, int cn) \
{ \
- CV_CPU_CALL_AVX(accProd_avx_##suffix, (src1, src2, dst, mask, len, cn)); \
- CV_CPU_CALL_NEON(accProd_simd_, (src1, src2, dst, mask, len, cn)); \
- CV_CPU_CALL_SSE2(accProd_simd_, (src1, src2, dst, mask, len, cn)); \
- CV_CPU_CALL_BASELINE(accProd_general_, (src1, src2, dst, mask, len, cn)); \
+ CV_CPU_DISPATCH(accProd_simd_, (src1, src2, dst, mask, len, cn), CV_CPU_DISPATCH_MODES_ALL); \
} \
void accW_##suffix(const type* src, acctype* dst, \
const uchar* mask, int len, int cn, double alpha) \
{ \
- CV_CPU_CALL_AVX(accW_avx_##suffix, (src, dst, mask, len, cn, alpha)); \
- CV_CPU_CALL_NEON(accW_simd_, (src, dst, mask, len, cn, alpha)); \
- CV_CPU_CALL_SSE2(accW_simd_, (src, dst, mask, len, cn, alpha)); \
- CV_CPU_CALL_BASELINE(accW_general_, (src, dst, mask, len, cn, alpha)); \
+ CV_CPU_DISPATCH(accW_simd_, (src, dst, mask, len, cn, alpha), CV_CPU_DISPATCH_MODES_ALL); \
}
#define DECLARATE_ACC_FUNCS(suffix, type, acctype) \
void acc_##suffix(const type* src, acctype* dst, const uchar* mask, int len, int cn); \
void accW_simd_(const float* src, double* dst, const uchar* mask, int len, int cn, double alpha);
void accW_simd_(const double* src, double* dst, const uchar* mask, int len, int cn, double alpha);
-// accumulate series optimized by AVX
-void acc_avx_32f(const float* src, float* dst, const uchar* mask, int len, int cn);
-void acc_avx_32f64f(const float* src, double* dst, const uchar* mask, int len, int cn);
-void acc_avx_64f(const double* src, double* dst, const uchar* mask, int len, int cn);
-void accSqr_avx_32f(const float* src, float* dst, const uchar* mask, int len, int cn);
-void accSqr_avx_32f64f(const float* src, double* dst, const uchar* mask, int len, int cn);
-void accSqr_avx_64f(const double* src, double* dst, const uchar* mask, int len, int cn);
-void accProd_avx_32f(const float* src1, const float* src2, float* dst, const uchar* mask, int len, int cn);
-void accProd_avx_32f64f(const float* src1, const float* src2, double* dst, const uchar* mask, int len, int cn);
-void accProd_avx_64f(const double* src1, const double* src2, double* dst, const uchar* mask, int len, int cn);
-void accW_avx_32f(const float* src, float* dst, const uchar* mask, int len, int cn, double alpha);
-void accW_avx_32f64f(const float* src, double* dst, const uchar* mask, int len, int cn, double alpha);
-void accW_avx_64f(const double* src, double* dst, const uchar* mask, int len, int cn, double alpha);
-
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
-
+// todo: remove AVX branch after support it by universal intrinsics
template <typename T, typename AT>
void acc_general_(const T* src, AT* dst, const uchar* mask, int len, int cn, int start = 0 )
{
}
}
}
-
+#if CV_AVX && !CV_AVX2
+ _mm256_zeroupper();
+#elif CV_SIMD
+ vx_cleanup();
+#endif
}
template<typename T, typename AT> void
}
}
}
+#if CV_AVX && !CV_AVX2
+ _mm256_zeroupper();
+#elif CV_SIMD
+ vx_cleanup();
+#endif
}
template<typename T, typename AT> void
}
}
}
+#if CV_AVX && !CV_AVX2
+ _mm256_zeroupper();
+#elif CV_SIMD
+ vx_cleanup();
+#endif
}
template<typename T, typename AT> void
}
}
}
+#if CV_AVX && !CV_AVX2
+ _mm256_zeroupper();
+#elif CV_SIMD
+ vx_cleanup();
+#endif
}
-
-#if CV_SIMD128
-
void acc_simd_(const uchar* src, float* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 16;
+#if CV_SIMD
+ const int cVectorWidth = v_uint8::nlanes;
+ const int step = v_float32::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint8x16 v_src = v_load(src + x);
- v_uint16x8 v_src0, v_src1;
+ v_uint8 v_src = vx_load(src + x);
+ v_uint16 v_src0, v_src1;
v_expand(v_src, v_src0, v_src1);
- v_uint32x4 v_src00, v_src01, v_src10, v_src11;
+ v_uint32 v_src00, v_src01, v_src10, v_src11;
v_expand(v_src0, v_src00, v_src01);
v_expand(v_src1, v_src10, v_src11);
- v_store(dst + x, v_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src00)));
- v_store(dst + x + 4, v_load(dst + x + 4) + v_cvt_f32(v_reinterpret_as_s32(v_src01)));
- v_store(dst + x + 8, v_load(dst + x + 8) + v_cvt_f32(v_reinterpret_as_s32(v_src10)));
- v_store(dst + x + 12, v_load(dst + x + 12) + v_cvt_f32(v_reinterpret_as_s32(v_src11)));
+ v_store(dst + x, vx_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src00)));
+ v_store(dst + x + step, vx_load(dst + x + step) + v_cvt_f32(v_reinterpret_as_s32(v_src01)));
+ v_store(dst + x + step * 2, vx_load(dst + x + step * 2) + v_cvt_f32(v_reinterpret_as_s32(v_src10)));
+ v_store(dst + x + step * 3, vx_load(dst + x + step * 3) + v_cvt_f32(v_reinterpret_as_s32(v_src11)));
}
}
else
{
- v_uint8x16 v_0 = v_setall_u8(0);
+ v_uint8 v_0 = vx_setall_u8(0);
if (cn == 1)
{
for ( ; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint8x16 v_mask = v_load(mask + x);
+ v_uint8 v_mask = vx_load(mask + x);
v_mask = ~(v_0 == v_mask);
- v_uint8x16 v_src = v_load(src + x);
+ v_uint8 v_src = vx_load(src + x);
v_src = v_src & v_mask;
- v_uint16x8 v_src0, v_src1;
+ v_uint16 v_src0, v_src1;
v_expand(v_src, v_src0, v_src1);
- v_uint32x4 v_src00, v_src01, v_src10, v_src11;
+ v_uint32 v_src00, v_src01, v_src10, v_src11;
v_expand(v_src0, v_src00, v_src01);
v_expand(v_src1, v_src10, v_src11);
- v_store(dst + x, v_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src00)));
- v_store(dst + x + 4, v_load(dst + x + 4) + v_cvt_f32(v_reinterpret_as_s32(v_src01)));
- v_store(dst + x + 8, v_load(dst + x + 8) + v_cvt_f32(v_reinterpret_as_s32(v_src10)));
- v_store(dst + x + 12, v_load(dst + x + 12) + v_cvt_f32(v_reinterpret_as_s32(v_src11)));
+ v_store(dst + x, vx_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src00)));
+ v_store(dst + x + step, vx_load(dst + x + step) + v_cvt_f32(v_reinterpret_as_s32(v_src01)));
+ v_store(dst + x + step * 2, vx_load(dst + x + step * 2) + v_cvt_f32(v_reinterpret_as_s32(v_src10)));
+ v_store(dst + x + step * 3, vx_load(dst + x + step * 3) + v_cvt_f32(v_reinterpret_as_s32(v_src11)));
}
}
else if (cn == 3)
{
for ( ; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint8x16 v_mask = v_load(mask + x);
+ v_uint8 v_mask = vx_load(mask + x);
v_mask = ~(v_0 == v_mask);
- v_uint8x16 v_src0, v_src1, v_src2;
+ v_uint8 v_src0, v_src1, v_src2;
v_load_deinterleave(src + (x * cn), v_src0, v_src1, v_src2);
v_src0 = v_src0 & v_mask;
v_src1 = v_src1 & v_mask;
v_src2 = v_src2 & v_mask;
- v_uint16x8 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
+ v_uint16 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
v_expand(v_src0, v_src00, v_src01);
v_expand(v_src1, v_src10, v_src11);
v_expand(v_src2, v_src20, v_src21);
- v_uint32x4 v_src000, v_src001, v_src010, v_src011;
- v_uint32x4 v_src100, v_src101, v_src110, v_src111;
- v_uint32x4 v_src200, v_src201, v_src210, v_src211;
+ v_uint32 v_src000, v_src001, v_src010, v_src011;
+ v_uint32 v_src100, v_src101, v_src110, v_src111;
+ v_uint32 v_src200, v_src201, v_src210, v_src211;
v_expand(v_src00, v_src000, v_src001);
v_expand(v_src01, v_src010, v_src011);
v_expand(v_src10, v_src100, v_src101);
v_expand(v_src20, v_src200, v_src201);
v_expand(v_src21, v_src210, v_src211);
- v_float32x4 v_dst000, v_dst001, v_dst010, v_dst011;
- v_float32x4 v_dst100, v_dst101, v_dst110, v_dst111;
- v_float32x4 v_dst200, v_dst201, v_dst210, v_dst211;
+ v_float32 v_dst000, v_dst001, v_dst010, v_dst011;
+ v_float32 v_dst100, v_dst101, v_dst110, v_dst111;
+ v_float32 v_dst200, v_dst201, v_dst210, v_dst211;
v_load_deinterleave(dst + (x * cn), v_dst000, v_dst100, v_dst200);
- v_load_deinterleave(dst + ((x + 4) * cn), v_dst001, v_dst101, v_dst201);
- v_load_deinterleave(dst + ((x + 8) * cn), v_dst010, v_dst110, v_dst210);
- v_load_deinterleave(dst + ((x + 12) * cn), v_dst011, v_dst111, v_dst211);
-
- v_store_interleave(dst + (x * cn), v_dst000 + v_cvt_f32(v_reinterpret_as_s32(v_src000)), v_dst100 + v_cvt_f32(v_reinterpret_as_s32(v_src100)), v_dst200 + v_cvt_f32(v_reinterpret_as_s32(v_src200)));
- v_store_interleave(dst + ((x + 4) * cn), v_dst001 + v_cvt_f32(v_reinterpret_as_s32(v_src001)), v_dst101 + v_cvt_f32(v_reinterpret_as_s32(v_src101)), v_dst201 + v_cvt_f32(v_reinterpret_as_s32(v_src201)));
- v_store_interleave(dst + ((x + 8) * cn), v_dst010 + v_cvt_f32(v_reinterpret_as_s32(v_src010)), v_dst110 + v_cvt_f32(v_reinterpret_as_s32(v_src110)), v_dst210 + v_cvt_f32(v_reinterpret_as_s32(v_src210)));
- v_store_interleave(dst + ((x + 12) * cn), v_dst011 + v_cvt_f32(v_reinterpret_as_s32(v_src011)), v_dst111 + v_cvt_f32(v_reinterpret_as_s32(v_src111)), v_dst211 + v_cvt_f32(v_reinterpret_as_s32(v_src211)));
+ v_load_deinterleave(dst + ((x + step) * cn), v_dst001, v_dst101, v_dst201);
+ v_load_deinterleave(dst + ((x + step * 2) * cn), v_dst010, v_dst110, v_dst210);
+ v_load_deinterleave(dst + ((x + step * 3) * cn), v_dst011, v_dst111, v_dst211);
+
+ v_dst000 += v_cvt_f32(v_reinterpret_as_s32(v_src000));
+ v_dst100 += v_cvt_f32(v_reinterpret_as_s32(v_src100));
+ v_dst200 += v_cvt_f32(v_reinterpret_as_s32(v_src200));
+ v_dst001 += v_cvt_f32(v_reinterpret_as_s32(v_src001));
+ v_dst101 += v_cvt_f32(v_reinterpret_as_s32(v_src101));
+ v_dst201 += v_cvt_f32(v_reinterpret_as_s32(v_src201));
+ v_dst010 += v_cvt_f32(v_reinterpret_as_s32(v_src010));
+ v_dst110 += v_cvt_f32(v_reinterpret_as_s32(v_src110));
+ v_dst210 += v_cvt_f32(v_reinterpret_as_s32(v_src210));
+ v_dst011 += v_cvt_f32(v_reinterpret_as_s32(v_src011));
+ v_dst111 += v_cvt_f32(v_reinterpret_as_s32(v_src111));
+ v_dst211 += v_cvt_f32(v_reinterpret_as_s32(v_src211));
+
+ v_store_interleave(dst + (x * cn), v_dst000, v_dst100, v_dst200);
+ v_store_interleave(dst + ((x + step) * cn), v_dst001, v_dst101, v_dst201);
+ v_store_interleave(dst + ((x + step * 2) * cn), v_dst010, v_dst110, v_dst210);
+ v_store_interleave(dst + ((x + step * 3) * cn), v_dst011, v_dst111, v_dst211);
}
}
}
-
+#endif // CV_SIMD
acc_general_(src, dst, mask, len, cn, x);
}
void acc_simd_(const ushort* src, float* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 8;
+#if CV_SIMD
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float32::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_src = v_load(src + x);
- v_uint32x4 v_src0, v_src1;
+ v_uint16 v_src = vx_load(src + x);
+ v_uint32 v_src0, v_src1;
v_expand(v_src, v_src0, v_src1);
- v_store(dst + x, v_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src0)));
- v_store(dst + x + 4, v_load(dst + x + 4) + v_cvt_f32(v_reinterpret_as_s32(v_src1)));
+ v_store(dst + x, vx_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src0)));
+ v_store(dst + x + step, vx_load(dst + x + step) + v_cvt_f32(v_reinterpret_as_s32(v_src1)));
}
}
else
{
if (cn == 1)
{
- v_uint16x8 v_0 = v_setall_u16(0);
+ v_uint16 v_0 = vx_setall_u16(0);
for ( ; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask = v_load_expand(mask + x);
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_mask == v_0);
- v_uint16x8 v_src = v_load(src + x);
+ v_uint16 v_src = vx_load(src + x);
v_src = v_src & v_mask;
- v_uint32x4 v_src0, v_src1;
+ v_uint32 v_src0, v_src1;
v_expand(v_src, v_src0, v_src1);
- v_store(dst + x, v_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src0)));
- v_store(dst + x + 4, v_load(dst + x + 4) + v_cvt_f32(v_reinterpret_as_s32(v_src1)));
+ v_store(dst + x, vx_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src0)));
+ v_store(dst + x + step, vx_load(dst + x + step) + v_cvt_f32(v_reinterpret_as_s32(v_src1)));
}
}
else if (cn == 3)
{
- v_uint16x8 v_0 = v_setall_u16(0);
+ v_uint16 v_0 = vx_setall_u16(0);
for ( ; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask = v_load_expand(mask + x);
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_mask == v_0);
- v_uint16x8 v_src0, v_src1, v_src2;
+ v_uint16 v_src0, v_src1, v_src2;
v_load_deinterleave(src + x * cn, v_src0, v_src1, v_src2);
v_src0 = v_src0 & v_mask;
v_src1 = v_src1 & v_mask;
v_src2 = v_src2 & v_mask;
- v_uint32x4 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
+ v_uint32 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
v_expand(v_src0, v_src00, v_src01);
v_expand(v_src1, v_src10, v_src11);
v_expand(v_src2, v_src20, v_src21);
- v_float32x4 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
+ v_float32 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 4) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
- v_store_interleave(dst + x * cn, v_dst00 + v_cvt_f32(v_reinterpret_as_s32(v_src00)), v_dst10 + v_cvt_f32(v_reinterpret_as_s32(v_src10)), v_dst20 + v_cvt_f32(v_reinterpret_as_s32(v_src20)));
- v_store_interleave(dst + (x + 4) * cn, v_dst01 + v_cvt_f32(v_reinterpret_as_s32(v_src01)), v_dst11 + v_cvt_f32(v_reinterpret_as_s32(v_src11)), v_dst21 + v_cvt_f32(v_reinterpret_as_s32(v_src21)));
+ v_dst00 += v_cvt_f32(v_reinterpret_as_s32(v_src00));
+ v_dst01 += v_cvt_f32(v_reinterpret_as_s32(v_src01));
+ v_dst10 += v_cvt_f32(v_reinterpret_as_s32(v_src10));
+ v_dst11 += v_cvt_f32(v_reinterpret_as_s32(v_src11));
+ v_dst20 += v_cvt_f32(v_reinterpret_as_s32(v_src20));
+ v_dst21 += v_cvt_f32(v_reinterpret_as_s32(v_src21));
+
+ v_store_interleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
+ v_store_interleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
}
}
}
-
+#endif // CV_SIMD
acc_general_(src, dst, mask, len, cn, x);
}
-
+// todo: remove AVX branch after support it by universal intrinsics
void acc_simd_(const float* src, float* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 8;
+#if CV_SIMD
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float32::nlanes;
if (!mask)
{
int size = len * cn;
+ #if CV_AVX && !CV_AVX2
+ for (; x <= size - 8 ; x += 8)
+ {
+ __m256 v_src = _mm256_loadu_ps(src + x);
+ __m256 v_dst = _mm256_loadu_ps(dst + x);
+ v_dst = _mm256_add_ps(v_src, v_dst);
+ _mm256_storeu_ps(dst + x, v_dst);
+ }
+ #else
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_store(dst + x, v_load(dst + x) + v_load(src + x));
- v_store(dst + x + 4, v_load(dst + x + 4) + v_load(src + x + 4));
+ v_store(dst + x, vx_load(dst + x) + vx_load(src + x));
+ v_store(dst + x + step, vx_load(dst + x + step) + vx_load(src + x + step));
}
+ #endif // CV_AVX && !CV_AVX2
}
else
{
- v_float32x4 v_0 = v_setzero_f32();
+ v_float32 v_0 = vx_setzero_f32();
if (cn == 1)
{
for ( ; x <= len - cVectorWidth ; x += cVectorWidth)
{
- v_uint16x8 v_masku16 = v_load_expand(mask + x);
- v_uint32x4 v_masku320, v_masku321;
+ v_uint16 v_masku16 = vx_load_expand(mask + x);
+ v_uint32 v_masku320, v_masku321;
v_expand(v_masku16, v_masku320, v_masku321);
- v_float32x4 v_mask0 = v_reinterpret_as_f32(~(v_masku320 == v_reinterpret_as_u32(v_0)));
- v_float32x4 v_mask1 = v_reinterpret_as_f32(~(v_masku321 == v_reinterpret_as_u32(v_0)));
+ v_float32 v_mask0 = v_reinterpret_as_f32(~(v_masku320 == v_reinterpret_as_u32(v_0)));
+ v_float32 v_mask1 = v_reinterpret_as_f32(~(v_masku321 == v_reinterpret_as_u32(v_0)));
- v_store(dst + x, v_load(dst + x) + (v_load(src + x) & v_mask0));
- v_store(dst + x + 4, v_load(dst + x + 4) + (v_load(src + x + 4) & v_mask1));
+ v_store(dst + x, vx_load(dst + x) + (vx_load(src + x) & v_mask0));
+ v_store(dst + x + step, vx_load(dst + x + step) + (vx_load(src + x + step) & v_mask1));
}
}
else if (cn == 3)
{
for ( ; x <= len - cVectorWidth ; x += cVectorWidth)
{
- v_uint16x8 v_masku16 = v_load_expand(mask + x);
- v_uint32x4 v_masku320, v_masku321;
+ v_uint16 v_masku16 = vx_load_expand(mask + x);
+ v_uint32 v_masku320, v_masku321;
v_expand(v_masku16, v_masku320, v_masku321);
- v_float32x4 v_mask0 = v_reinterpret_as_f32(~(v_masku320 == v_reinterpret_as_u32(v_0)));
- v_float32x4 v_mask1 = v_reinterpret_as_f32(~(v_masku321 == v_reinterpret_as_u32(v_0)));
+ v_float32 v_mask0 = v_reinterpret_as_f32(~(v_masku320 == v_reinterpret_as_u32(v_0)));
+ v_float32 v_mask1 = v_reinterpret_as_f32(~(v_masku321 == v_reinterpret_as_u32(v_0)));
- v_float32x4 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
+ v_float32 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
v_load_deinterleave(src + x * cn, v_src00, v_src10, v_src20);
- v_load_deinterleave(src + (x + 4) * cn, v_src01, v_src11, v_src21);
+ v_load_deinterleave(src + (x + step) * cn, v_src01, v_src11, v_src21);
v_src00 = v_src00 & v_mask0;
v_src01 = v_src01 & v_mask1;
v_src10 = v_src10 & v_mask0;
v_src20 = v_src20 & v_mask0;
v_src21 = v_src21 & v_mask1;
- v_float32x4 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
+ v_float32 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 4) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 4) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
+ v_store_interleave(dst + (x + step) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
}
}
}
-
+#endif // CV_SIMD
acc_general_(src, dst, mask, len, cn, x);
}
-#if CV_SIMD128_64F
void acc_simd_(const uchar* src, double* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 16;
+#if CV_SIMD_64F
+ const int cVectorWidth = v_uint8::nlanes;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint8x16 v_src = v_load(src + x);
- v_uint16x8 v_int0, v_int1;
+ v_uint8 v_src = vx_load(src + x);
+ v_uint16 v_int0, v_int1;
v_expand(v_src, v_int0, v_int1);
- v_uint32x4 v_int00, v_int01, v_int10, v_int11;
+ v_uint32 v_int00, v_int01, v_int10, v_int11;
v_expand(v_int0, v_int00, v_int01);
v_expand(v_int1, v_int10, v_int11);
- v_float64x2 v_src0 = v_cvt_f64(v_reinterpret_as_s32(v_int00));
- v_float64x2 v_src1 = v_cvt_f64_high(v_reinterpret_as_s32(v_int00));
- v_float64x2 v_src2 = v_cvt_f64(v_reinterpret_as_s32(v_int01));
- v_float64x2 v_src3 = v_cvt_f64_high(v_reinterpret_as_s32(v_int01));
- v_float64x2 v_src4 = v_cvt_f64(v_reinterpret_as_s32(v_int10));
- v_float64x2 v_src5 = v_cvt_f64_high(v_reinterpret_as_s32(v_int10));
- v_float64x2 v_src6 = v_cvt_f64(v_reinterpret_as_s32(v_int11));
- v_float64x2 v_src7 = v_cvt_f64_high(v_reinterpret_as_s32(v_int11));
-
- v_float64x2 v_dst0 = v_load(dst + x);
- v_float64x2 v_dst1 = v_load(dst + x + 2);
- v_float64x2 v_dst2 = v_load(dst + x + 4);
- v_float64x2 v_dst3 = v_load(dst + x + 6);
- v_float64x2 v_dst4 = v_load(dst + x + 8);
- v_float64x2 v_dst5 = v_load(dst + x + 10);
- v_float64x2 v_dst6 = v_load(dst + x + 12);
- v_float64x2 v_dst7 = v_load(dst + x + 14);
+ v_float64 v_src0 = v_cvt_f64(v_reinterpret_as_s32(v_int00));
+ v_float64 v_src1 = v_cvt_f64_high(v_reinterpret_as_s32(v_int00));
+ v_float64 v_src2 = v_cvt_f64(v_reinterpret_as_s32(v_int01));
+ v_float64 v_src3 = v_cvt_f64_high(v_reinterpret_as_s32(v_int01));
+ v_float64 v_src4 = v_cvt_f64(v_reinterpret_as_s32(v_int10));
+ v_float64 v_src5 = v_cvt_f64_high(v_reinterpret_as_s32(v_int10));
+ v_float64 v_src6 = v_cvt_f64(v_reinterpret_as_s32(v_int11));
+ v_float64 v_src7 = v_cvt_f64_high(v_reinterpret_as_s32(v_int11));
+
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+ v_float64 v_dst2 = vx_load(dst + x + step * 2);
+ v_float64 v_dst3 = vx_load(dst + x + step * 3);
+ v_float64 v_dst4 = vx_load(dst + x + step * 4);
+ v_float64 v_dst5 = vx_load(dst + x + step * 5);
+ v_float64 v_dst6 = vx_load(dst + x + step * 6);
+ v_float64 v_dst7 = vx_load(dst + x + step * 7);
v_dst0 = v_dst0 + v_src0;
v_dst1 = v_dst1 + v_src1;
v_dst7 = v_dst7 + v_src7;
v_store(dst + x, v_dst0);
- v_store(dst + x + 2, v_dst1);
- v_store(dst + x + 4, v_dst2);
- v_store(dst + x + 6, v_dst3);
- v_store(dst + x + 8, v_dst4);
- v_store(dst + x + 10, v_dst5);
- v_store(dst + x + 12, v_dst6);
- v_store(dst + x + 14, v_dst7);
+ v_store(dst + x + step, v_dst1);
+ v_store(dst + x + step * 2, v_dst2);
+ v_store(dst + x + step * 3, v_dst3);
+ v_store(dst + x + step * 4, v_dst4);
+ v_store(dst + x + step * 5, v_dst5);
+ v_store(dst + x + step * 6, v_dst6);
+ v_store(dst + x + step * 7, v_dst7);
}
}
else
{
- v_uint8x16 v_0 = v_setall_u8(0);
+ v_uint8 v_0 = vx_setall_u8(0);
if (cn == 1)
{
for ( ; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint8x16 v_mask = v_load(mask + x);
+ v_uint8 v_mask = vx_load(mask + x);
v_mask = ~(v_mask == v_0);
- v_uint8x16 v_src = v_load(src + x);
+ v_uint8 v_src = vx_load(src + x);
v_src = v_src & v_mask;
- v_uint16x8 v_int0, v_int1;
+ v_uint16 v_int0, v_int1;
v_expand(v_src, v_int0, v_int1);
- v_uint32x4 v_int00, v_int01, v_int10, v_int11;
+ v_uint32 v_int00, v_int01, v_int10, v_int11;
v_expand(v_int0, v_int00, v_int01);
v_expand(v_int1, v_int10, v_int11);
- v_float64x2 v_src0 = v_cvt_f64(v_reinterpret_as_s32(v_int00));
- v_float64x2 v_src1 = v_cvt_f64_high(v_reinterpret_as_s32(v_int00));
- v_float64x2 v_src2 = v_cvt_f64(v_reinterpret_as_s32(v_int01));
- v_float64x2 v_src3 = v_cvt_f64_high(v_reinterpret_as_s32(v_int01));
- v_float64x2 v_src4 = v_cvt_f64(v_reinterpret_as_s32(v_int10));
- v_float64x2 v_src5 = v_cvt_f64_high(v_reinterpret_as_s32(v_int10));
- v_float64x2 v_src6 = v_cvt_f64(v_reinterpret_as_s32(v_int11));
- v_float64x2 v_src7 = v_cvt_f64_high(v_reinterpret_as_s32(v_int11));
-
- v_float64x2 v_dst0 = v_load(dst + x);
- v_float64x2 v_dst1 = v_load(dst + x + 2);
- v_float64x2 v_dst2 = v_load(dst + x + 4);
- v_float64x2 v_dst3 = v_load(dst + x + 6);
- v_float64x2 v_dst4 = v_load(dst + x + 8);
- v_float64x2 v_dst5 = v_load(dst + x + 10);
- v_float64x2 v_dst6 = v_load(dst + x + 12);
- v_float64x2 v_dst7 = v_load(dst + x + 14);
+ v_float64 v_src0 = v_cvt_f64(v_reinterpret_as_s32(v_int00));
+ v_float64 v_src1 = v_cvt_f64_high(v_reinterpret_as_s32(v_int00));
+ v_float64 v_src2 = v_cvt_f64(v_reinterpret_as_s32(v_int01));
+ v_float64 v_src3 = v_cvt_f64_high(v_reinterpret_as_s32(v_int01));
+ v_float64 v_src4 = v_cvt_f64(v_reinterpret_as_s32(v_int10));
+ v_float64 v_src5 = v_cvt_f64_high(v_reinterpret_as_s32(v_int10));
+ v_float64 v_src6 = v_cvt_f64(v_reinterpret_as_s32(v_int11));
+ v_float64 v_src7 = v_cvt_f64_high(v_reinterpret_as_s32(v_int11));
+
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+ v_float64 v_dst2 = vx_load(dst + x + step * 2);
+ v_float64 v_dst3 = vx_load(dst + x + step * 3);
+ v_float64 v_dst4 = vx_load(dst + x + step * 4);
+ v_float64 v_dst5 = vx_load(dst + x + step * 5);
+ v_float64 v_dst6 = vx_load(dst + x + step * 6);
+ v_float64 v_dst7 = vx_load(dst + x + step * 7);
v_dst0 = v_dst0 + v_src0;
v_dst1 = v_dst1 + v_src1;
v_dst7 = v_dst7 + v_src7;
v_store(dst + x, v_dst0);
- v_store(dst + x + 2, v_dst1);
- v_store(dst + x + 4, v_dst2);
- v_store(dst + x + 6, v_dst3);
- v_store(dst + x + 8, v_dst4);
- v_store(dst + x + 10, v_dst5);
- v_store(dst + x + 12, v_dst6);
- v_store(dst + x + 14, v_dst7);
+ v_store(dst + x + step, v_dst1);
+ v_store(dst + x + step * 2, v_dst2);
+ v_store(dst + x + step * 3, v_dst3);
+ v_store(dst + x + step * 4, v_dst4);
+ v_store(dst + x + step * 5, v_dst5);
+ v_store(dst + x + step * 6, v_dst6);
+ v_store(dst + x + step * 7, v_dst7);
}
}
else if (cn == 3)
{
for ( ; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint8x16 v_mask = v_load(mask + x);
+ v_uint8 v_mask = vx_load(mask + x);
v_mask = ~(v_0 == v_mask);
- v_uint8x16 v_src0, v_src1, v_src2;
+ v_uint8 v_src0, v_src1, v_src2;
v_load_deinterleave(src + (x * cn), v_src0, v_src1, v_src2);
v_src0 = v_src0 & v_mask;
v_src1 = v_src1 & v_mask;
v_src2 = v_src2 & v_mask;
- v_uint16x8 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
+ v_uint16 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
v_expand(v_src0, v_src00, v_src01);
v_expand(v_src1, v_src10, v_src11);
v_expand(v_src2, v_src20, v_src21);
- v_uint32x4 v_src000, v_src001, v_src010, v_src011;
- v_uint32x4 v_src100, v_src101, v_src110, v_src111;
- v_uint32x4 v_src200, v_src201, v_src210, v_src211;
+ v_uint32 v_src000, v_src001, v_src010, v_src011;
+ v_uint32 v_src100, v_src101, v_src110, v_src111;
+ v_uint32 v_src200, v_src201, v_src210, v_src211;
v_expand(v_src00, v_src000, v_src001);
v_expand(v_src01, v_src010, v_src011);
v_expand(v_src10, v_src100, v_src101);
v_expand(v_src20, v_src200, v_src201);
v_expand(v_src21, v_src210, v_src211);
- v_float64x2 v_src0000, v_src0001, v_src0010, v_src0011, v_src0100, v_src0101, v_src0110, v_src0111;
- v_float64x2 v_src1000, v_src1001, v_src1010, v_src1011, v_src1100, v_src1101, v_src1110, v_src1111;
- v_float64x2 v_src2000, v_src2001, v_src2010, v_src2011, v_src2100, v_src2101, v_src2110, v_src2111;
+ v_float64 v_src0000, v_src0001, v_src0010, v_src0011, v_src0100, v_src0101, v_src0110, v_src0111;
+ v_float64 v_src1000, v_src1001, v_src1010, v_src1011, v_src1100, v_src1101, v_src1110, v_src1111;
+ v_float64 v_src2000, v_src2001, v_src2010, v_src2011, v_src2100, v_src2101, v_src2110, v_src2111;
v_src0000 = v_cvt_f64(v_cvt_f32(v_reinterpret_as_s32(v_src000)));
v_src0001 = v_cvt_f64_high(v_cvt_f32(v_reinterpret_as_s32(v_src000)));
v_src0010 = v_cvt_f64(v_cvt_f32(v_reinterpret_as_s32(v_src001)));
v_src2110 = v_cvt_f64(v_cvt_f32(v_reinterpret_as_s32(v_src211)));
v_src2111 = v_cvt_f64_high(v_cvt_f32(v_reinterpret_as_s32(v_src211)));
- v_float64x2 v_dst0000, v_dst0001, v_dst0010, v_dst0011, v_dst0100, v_dst0101, v_dst0110, v_dst0111;
- v_float64x2 v_dst1000, v_dst1001, v_dst1010, v_dst1011, v_dst1100, v_dst1101, v_dst1110, v_dst1111;
- v_float64x2 v_dst2000, v_dst2001, v_dst2010, v_dst2011, v_dst2100, v_dst2101, v_dst2110, v_dst2111;
+ v_float64 v_dst0000, v_dst0001, v_dst0010, v_dst0011, v_dst0100, v_dst0101, v_dst0110, v_dst0111;
+ v_float64 v_dst1000, v_dst1001, v_dst1010, v_dst1011, v_dst1100, v_dst1101, v_dst1110, v_dst1111;
+ v_float64 v_dst2000, v_dst2001, v_dst2010, v_dst2011, v_dst2100, v_dst2101, v_dst2110, v_dst2111;
v_load_deinterleave(dst + (x * cn), v_dst0000, v_dst1000, v_dst2000);
- v_load_deinterleave(dst + ((x + 2) * cn), v_dst0001, v_dst1001, v_dst2001);
- v_load_deinterleave(dst + ((x + 4) * cn), v_dst0010, v_dst1010, v_dst2010);
- v_load_deinterleave(dst + ((x + 6) * cn), v_dst0011, v_dst1011, v_dst2011);
- v_load_deinterleave(dst + ((x + 8) * cn), v_dst0100, v_dst1100, v_dst2100);
- v_load_deinterleave(dst + ((x + 10) * cn), v_dst0101, v_dst1101, v_dst2101);
- v_load_deinterleave(dst + ((x + 12) * cn), v_dst0110, v_dst1110, v_dst2110);
- v_load_deinterleave(dst + ((x + 14) * cn), v_dst0111, v_dst1111, v_dst2111);
+ v_load_deinterleave(dst + ((x + step) * cn), v_dst0001, v_dst1001, v_dst2001);
+ v_load_deinterleave(dst + ((x + step * 2) * cn), v_dst0010, v_dst1010, v_dst2010);
+ v_load_deinterleave(dst + ((x + step * 3) * cn), v_dst0011, v_dst1011, v_dst2011);
+ v_load_deinterleave(dst + ((x + step * 4) * cn), v_dst0100, v_dst1100, v_dst2100);
+ v_load_deinterleave(dst + ((x + step * 5) * cn), v_dst0101, v_dst1101, v_dst2101);
+ v_load_deinterleave(dst + ((x + step * 6) * cn), v_dst0110, v_dst1110, v_dst2110);
+ v_load_deinterleave(dst + ((x + step * 7) * cn), v_dst0111, v_dst1111, v_dst2111);
v_store_interleave(dst + (x * cn), v_dst0000 + v_src0000, v_dst1000 + v_src1000, v_dst2000 + v_src2000);
- v_store_interleave(dst + ((x + 2) * cn), v_dst0001 + v_src0001, v_dst1001 + v_src1001, v_dst2001 + v_src2001);
- v_store_interleave(dst + ((x + 4) * cn), v_dst0010 + v_src0010, v_dst1010 + v_src1010, v_dst2010 + v_src2010);
- v_store_interleave(dst + ((x + 6) * cn), v_dst0011 + v_src0011, v_dst1011 + v_src1011, v_dst2011 + v_src2011);
- v_store_interleave(dst + ((x + 8) * cn), v_dst0100 + v_src0100, v_dst1100 + v_src1100, v_dst2100 + v_src2100);
- v_store_interleave(dst + ((x + 10) * cn), v_dst0101 + v_src0101, v_dst1101 + v_src1101, v_dst2101 + v_src2101);
- v_store_interleave(dst + ((x + 12) * cn), v_dst0110 + v_src0110, v_dst1110 + v_src1110, v_dst2110 + v_src2110);
- v_store_interleave(dst + ((x + 14) * cn), v_dst0111 + v_src0111, v_dst1111 + v_src1111, v_dst2111 + v_src2111);
+ v_store_interleave(dst + ((x + step) * cn), v_dst0001 + v_src0001, v_dst1001 + v_src1001, v_dst2001 + v_src2001);
+ v_store_interleave(dst + ((x + step * 2) * cn), v_dst0010 + v_src0010, v_dst1010 + v_src1010, v_dst2010 + v_src2010);
+ v_store_interleave(dst + ((x + step * 3) * cn), v_dst0011 + v_src0011, v_dst1011 + v_src1011, v_dst2011 + v_src2011);
+ v_store_interleave(dst + ((x + step * 4) * cn), v_dst0100 + v_src0100, v_dst1100 + v_src1100, v_dst2100 + v_src2100);
+ v_store_interleave(dst + ((x + step * 5) * cn), v_dst0101 + v_src0101, v_dst1101 + v_src1101, v_dst2101 + v_src2101);
+ v_store_interleave(dst + ((x + step * 6) * cn), v_dst0110 + v_src0110, v_dst1110 + v_src1110, v_dst2110 + v_src2110);
+ v_store_interleave(dst + ((x + step * 7) * cn), v_dst0111 + v_src0111, v_dst1111 + v_src1111, v_dst2111 + v_src2111);
}
}
}
-
+#endif // CV_SIMD_64F
acc_general_(src, dst, mask, len, cn, x);
}
void acc_simd_(const ushort* src, double* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 8;
+#if CV_SIMD_64F
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_src = v_load(src + x);
- v_uint32x4 v_int0, v_int1;
+ v_uint16 v_src = vx_load(src + x);
+ v_uint32 v_int0, v_int1;
v_expand(v_src, v_int0, v_int1);
- v_float64x2 v_src0 = v_cvt_f64(v_reinterpret_as_s32(v_int0));
- v_float64x2 v_src1 = v_cvt_f64_high(v_reinterpret_as_s32(v_int0));
- v_float64x2 v_src2 = v_cvt_f64(v_reinterpret_as_s32(v_int1));
- v_float64x2 v_src3 = v_cvt_f64_high(v_reinterpret_as_s32(v_int1));
+ v_float64 v_src0 = v_cvt_f64(v_reinterpret_as_s32(v_int0));
+ v_float64 v_src1 = v_cvt_f64_high(v_reinterpret_as_s32(v_int0));
+ v_float64 v_src2 = v_cvt_f64(v_reinterpret_as_s32(v_int1));
+ v_float64 v_src3 = v_cvt_f64_high(v_reinterpret_as_s32(v_int1));
- v_float64x2 v_dst0 = v_load(dst + x);
- v_float64x2 v_dst1 = v_load(dst + x + 2);
- v_float64x2 v_dst2 = v_load(dst + x + 4);
- v_float64x2 v_dst3 = v_load(dst + x + 6);
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+ v_float64 v_dst2 = vx_load(dst + x + step * 2);
+ v_float64 v_dst3 = vx_load(dst + x + step * 3);
v_dst0 = v_dst0 + v_src0;
v_dst1 = v_dst1 + v_src1;
v_dst3 = v_dst3 + v_src3;
v_store(dst + x, v_dst0);
- v_store(dst + x + 2, v_dst1);
- v_store(dst + x + 4, v_dst2);
- v_store(dst + x + 6, v_dst3);
+ v_store(dst + x + step, v_dst1);
+ v_store(dst + x + step * 2, v_dst2);
+ v_store(dst + x + step * 3, v_dst3);
}
}
else
{
- v_uint16x8 v_0 = v_setzero_u16();
+ v_uint16 v_0 = vx_setzero_u16();
if (cn == 1)
{
for ( ; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask = v_load_expand(mask + x);
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_mask == v_0);
- v_uint16x8 v_src = v_load(src + x);
+ v_uint16 v_src = vx_load(src + x);
v_src = v_src & v_mask;
- v_uint32x4 v_int0, v_int1;
+ v_uint32 v_int0, v_int1;
v_expand(v_src, v_int0, v_int1);
- v_float64x2 v_src0 = v_cvt_f64(v_reinterpret_as_s32(v_int0));
- v_float64x2 v_src1 = v_cvt_f64_high(v_reinterpret_as_s32(v_int0));
- v_float64x2 v_src2 = v_cvt_f64(v_reinterpret_as_s32(v_int1));
- v_float64x2 v_src3 = v_cvt_f64_high(v_reinterpret_as_s32(v_int1));
+ v_float64 v_src0 = v_cvt_f64(v_reinterpret_as_s32(v_int0));
+ v_float64 v_src1 = v_cvt_f64_high(v_reinterpret_as_s32(v_int0));
+ v_float64 v_src2 = v_cvt_f64(v_reinterpret_as_s32(v_int1));
+ v_float64 v_src3 = v_cvt_f64_high(v_reinterpret_as_s32(v_int1));
- v_float64x2 v_dst0 = v_load(dst + x);
- v_float64x2 v_dst1 = v_load(dst + x + 2);
- v_float64x2 v_dst2 = v_load(dst + x + 4);
- v_float64x2 v_dst3 = v_load(dst + x + 6);
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+ v_float64 v_dst2 = vx_load(dst + x + step * 2);
+ v_float64 v_dst3 = vx_load(dst + x + step * 3);
v_dst0 = v_dst0 + v_src0;
v_dst1 = v_dst1 + v_src1;
v_dst3 = v_dst3 + v_src3;
v_store(dst + x, v_dst0);
- v_store(dst + x + 2, v_dst1);
- v_store(dst + x + 4, v_dst2);
- v_store(dst + x + 6, v_dst3);
+ v_store(dst + x + step, v_dst1);
+ v_store(dst + x + step * 2, v_dst2);
+ v_store(dst + x + step * 3, v_dst3);
}
}
if (cn == 3)
{
for ( ; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask = v_load_expand(mask + x);
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_mask == v_0);
- v_uint16x8 v_src0, v_src1, v_src2;
+ v_uint16 v_src0, v_src1, v_src2;
v_load_deinterleave(src + x * cn, v_src0, v_src1, v_src2);
v_src0 = v_src0 & v_mask;
v_src1 = v_src1 & v_mask;
v_src2 = v_src2 & v_mask;
- v_uint32x4 v_int00, v_int01, v_int10, v_int11, v_int20, v_int21;
+ v_uint32 v_int00, v_int01, v_int10, v_int11, v_int20, v_int21;
v_expand(v_src0, v_int00, v_int01);
v_expand(v_src1, v_int10, v_int11);
v_expand(v_src2, v_int20, v_int21);
- v_float64x2 v_src00 = v_cvt_f64(v_reinterpret_as_s32(v_int00));
- v_float64x2 v_src01 = v_cvt_f64_high(v_reinterpret_as_s32(v_int00));
- v_float64x2 v_src02 = v_cvt_f64(v_reinterpret_as_s32(v_int01));
- v_float64x2 v_src03 = v_cvt_f64_high(v_reinterpret_as_s32(v_int01));
- v_float64x2 v_src10 = v_cvt_f64(v_reinterpret_as_s32(v_int10));
- v_float64x2 v_src11 = v_cvt_f64_high(v_reinterpret_as_s32(v_int10));
- v_float64x2 v_src12 = v_cvt_f64(v_reinterpret_as_s32(v_int11));
- v_float64x2 v_src13 = v_cvt_f64_high(v_reinterpret_as_s32(v_int11));
- v_float64x2 v_src20 = v_cvt_f64(v_reinterpret_as_s32(v_int20));
- v_float64x2 v_src21 = v_cvt_f64_high(v_reinterpret_as_s32(v_int20));
- v_float64x2 v_src22 = v_cvt_f64(v_reinterpret_as_s32(v_int21));
- v_float64x2 v_src23 = v_cvt_f64_high(v_reinterpret_as_s32(v_int21));
-
- v_float64x2 v_dst00, v_dst01, v_dst02, v_dst03, v_dst10, v_dst11, v_dst12, v_dst13, v_dst20, v_dst21, v_dst22, v_dst23;
+ v_float64 v_src00 = v_cvt_f64(v_reinterpret_as_s32(v_int00));
+ v_float64 v_src01 = v_cvt_f64_high(v_reinterpret_as_s32(v_int00));
+ v_float64 v_src02 = v_cvt_f64(v_reinterpret_as_s32(v_int01));
+ v_float64 v_src03 = v_cvt_f64_high(v_reinterpret_as_s32(v_int01));
+ v_float64 v_src10 = v_cvt_f64(v_reinterpret_as_s32(v_int10));
+ v_float64 v_src11 = v_cvt_f64_high(v_reinterpret_as_s32(v_int10));
+ v_float64 v_src12 = v_cvt_f64(v_reinterpret_as_s32(v_int11));
+ v_float64 v_src13 = v_cvt_f64_high(v_reinterpret_as_s32(v_int11));
+ v_float64 v_src20 = v_cvt_f64(v_reinterpret_as_s32(v_int20));
+ v_float64 v_src21 = v_cvt_f64_high(v_reinterpret_as_s32(v_int20));
+ v_float64 v_src22 = v_cvt_f64(v_reinterpret_as_s32(v_int21));
+ v_float64 v_src23 = v_cvt_f64_high(v_reinterpret_as_s32(v_int21));
+
+ v_float64 v_dst00, v_dst01, v_dst02, v_dst03, v_dst10, v_dst11, v_dst12, v_dst13, v_dst20, v_dst21, v_dst22, v_dst23;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 2) * cn, v_dst01, v_dst11, v_dst21);
- v_load_deinterleave(dst + (x + 4) * cn, v_dst02, v_dst12, v_dst22);
- v_load_deinterleave(dst + (x + 6) * cn, v_dst03, v_dst13, v_dst23);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step * 2) * cn, v_dst02, v_dst12, v_dst22);
+ v_load_deinterleave(dst + (x + step * 3) * cn, v_dst03, v_dst13, v_dst23);
v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 2) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
- v_store_interleave(dst + (x + 4) * cn, v_dst02 + v_src02, v_dst12 + v_src12, v_dst22 + v_src22);
- v_store_interleave(dst + (x + 6) * cn, v_dst03 + v_src03, v_dst13 + v_src13, v_dst23 + v_src23);
+ v_store_interleave(dst + (x + step) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
+ v_store_interleave(dst + (x + step * 2) * cn, v_dst02 + v_src02, v_dst12 + v_src12, v_dst22 + v_src22);
+ v_store_interleave(dst + (x + step * 3) * cn, v_dst03 + v_src03, v_dst13 + v_src13, v_dst23 + v_src23);
}
}
}
-
+#endif // CV_SIMD_64F
acc_general_(src, dst, mask, len, cn, x);
}
void acc_simd_(const float* src, double* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 4;
+#if CV_SIMD_64F
+ const int cVectorWidth = v_float32::nlanes;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
+ #if CV_AVX && !CV_AVX2
+ for (; x <= size - 8 ; x += 8)
+ {
+ __m256 v_src = _mm256_loadu_ps(src + x);
+ __m256d v_src0 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_src, 0));
+ __m256d v_src1 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_src, 1));
+ __m256d v_dst0 = _mm256_loadu_pd(dst + x);
+ __m256d v_dst1 = _mm256_loadu_pd(dst + x + 4);
+ v_dst0 = _mm256_add_pd(v_src0, v_dst0);
+ v_dst1 = _mm256_add_pd(v_src1, v_dst1);
+ _mm256_storeu_pd(dst + x, v_dst0);
+ _mm256_storeu_pd(dst + x + 4, v_dst1);
+ }
+ #else
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_float32x4 v_src = v_load(src + x);
- v_float64x2 v_src0 = v_cvt_f64(v_src);
- v_float64x2 v_src1 = v_cvt_f64_high(v_src);
+ v_float32 v_src = vx_load(src + x);
+ v_float64 v_src0 = v_cvt_f64(v_src);
+ v_float64 v_src1 = v_cvt_f64_high(v_src);
- v_store(dst + x, v_load(dst + x) + v_src0);
- v_store(dst + x + 2, v_load(dst + x + 2) + v_src1);
+ v_store(dst + x, vx_load(dst + x) + v_src0);
+ v_store(dst + x + step, vx_load(dst + x + step) + v_src1);
}
+ #endif // CV_AVX && !CV_AVX2
}
else
{
- v_uint64x2 v_0 = v_setzero_u64();
+ v_uint64 v_0 = vx_setzero_u64();
if (cn == 1)
{
for ( ; x <= len - cVectorWidth ; x += cVectorWidth)
{
- v_uint32x4 v_masku32 = v_load_expand_q(mask + x);
- v_uint64x2 v_masku640, v_masku641;
+ v_uint32 v_masku32 = vx_load_expand_q(mask + x);
+ v_uint64 v_masku640, v_masku641;
v_expand(v_masku32, v_masku640, v_masku641);
- v_float64x2 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
- v_float64x2 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
+ v_float64 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
+ v_float64 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
- v_float32x4 v_src = v_load(src + x);
- v_float64x2 v_src0 = v_cvt_f64(v_src) & v_mask0;
- v_float64x2 v_src1 = v_cvt_f64_high(v_src) & v_mask1;
+ v_float32 v_src = vx_load(src + x);
+ v_float64 v_src0 = v_cvt_f64(v_src) & v_mask0;
+ v_float64 v_src1 = v_cvt_f64_high(v_src) & v_mask1;
- v_store(dst + x, v_load(dst + x) + v_src0);
- v_store(dst + x + 2, v_load(dst + x + 2) + v_src1);
+ v_store(dst + x, vx_load(dst + x) + v_src0);
+ v_store(dst + x + step, vx_load(dst + x + step) + v_src1);
}
}
else if (cn == 3)
{
for ( ; x <= len - cVectorWidth ; x += cVectorWidth)
{
- v_uint32x4 v_masku32 = v_load_expand_q(mask + x);
- v_uint64x2 v_masku640, v_masku641;
+ v_uint32 v_masku32 = vx_load_expand_q(mask + x);
+ v_uint64 v_masku640, v_masku641;
v_expand(v_masku32, v_masku640, v_masku641);
- v_float64x2 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
- v_float64x2 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
+ v_float64 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
+ v_float64 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
- v_float32x4 v_src0, v_src1, v_src2;
+ v_float32 v_src0, v_src1, v_src2;
v_load_deinterleave(src + x * cn, v_src0, v_src1, v_src2);
- v_float64x2 v_src00 = v_cvt_f64(v_src0) & v_mask0;
- v_float64x2 v_src01 = v_cvt_f64_high(v_src0) & v_mask1;
- v_float64x2 v_src10 = v_cvt_f64(v_src1) & v_mask0;
- v_float64x2 v_src11 = v_cvt_f64_high(v_src1) & v_mask1;
- v_float64x2 v_src20 = v_cvt_f64(v_src2) & v_mask0;
- v_float64x2 v_src21 = v_cvt_f64_high(v_src2) & v_mask1;
-
- v_float64x2 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
+ v_float64 v_src00 = v_cvt_f64(v_src0) & v_mask0;
+ v_float64 v_src01 = v_cvt_f64_high(v_src0) & v_mask1;
+ v_float64 v_src10 = v_cvt_f64(v_src1) & v_mask0;
+ v_float64 v_src11 = v_cvt_f64_high(v_src1) & v_mask1;
+ v_float64 v_src20 = v_cvt_f64(v_src2) & v_mask0;
+ v_float64 v_src21 = v_cvt_f64_high(v_src2) & v_mask1;
+
+ v_float64 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 2) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 2) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
+ v_store_interleave(dst + (x + step) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
}
}
}
-
+#endif // CV_SIMD_64F
acc_general_(src, dst, mask, len, cn, x);
}
void acc_simd_(const double* src, double* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 4;
+#if CV_SIMD_64F
+ const int cVectorWidth = v_float64::nlanes * 2;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
+ #if CV_AVX && !CV_AVX2
+ for ( ; x <= size - 4 ; x += 4)
+ {
+ __m256d v_src = _mm256_loadu_pd(src + x);
+ __m256d v_dst = _mm256_loadu_pd(dst + x);
+ v_dst = _mm256_add_pd(v_dst, v_src);
+ _mm256_storeu_pd(dst + x, v_dst);
+ }
+ #else
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_float64x2 v_src0 = v_load(src + x);
- v_float64x2 v_src1 = v_load(src + x + 2);
+ v_float64 v_src0 = vx_load(src + x);
+ v_float64 v_src1 = vx_load(src + x + step);
- v_store(dst + x, v_load(dst + x) + v_src0);
- v_store(dst + x + 2, v_load(dst + x + 2) + v_src1);
+ v_store(dst + x, vx_load(dst + x) + v_src0);
+ v_store(dst + x + step, vx_load(dst + x + step) + v_src1);
}
+ #endif // CV_AVX && !CV_AVX2
}
else
{
- v_uint64x2 v_0 = v_setzero_u64();
+ v_uint64 v_0 = vx_setzero_u64();
if (cn == 1)
{
for ( ; x <= len - cVectorWidth ; x += cVectorWidth)
{
- v_uint32x4 v_masku32 = v_load_expand_q(mask + x);
- v_uint64x2 v_masku640, v_masku641;
+ v_uint32 v_masku32 = vx_load_expand_q(mask + x);
+ v_uint64 v_masku640, v_masku641;
v_expand(v_masku32, v_masku640, v_masku641);
- v_float64x2 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
- v_float64x2 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
+ v_float64 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
+ v_float64 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
- v_float64x2 v_src0 = v_load(src + x);
- v_float64x2 v_src1 = v_load(src + x + 2);
+ v_float64 v_src0 = vx_load(src + x);
+ v_float64 v_src1 = vx_load(src + x + step);
- v_store(dst + x, v_load(dst + x) + (v_src0 & v_mask0));
- v_store(dst + x + 2, v_load(dst + x + 2) + (v_src1 & v_mask1));
+ v_store(dst + x, vx_load(dst + x) + (v_src0 & v_mask0));
+ v_store(dst + x + step, vx_load(dst + x + step) + (v_src1 & v_mask1));
}
}
else if (cn == 3)
{
for ( ; x <= len - cVectorWidth ; x += cVectorWidth)
{
- v_uint32x4 v_masku32 = v_load_expand_q(mask + x);
- v_uint64x2 v_masku640, v_masku641;
+ v_uint32 v_masku32 = vx_load_expand_q(mask + x);
+ v_uint64 v_masku640, v_masku641;
v_expand(v_masku32, v_masku640, v_masku641);
- v_float64x2 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
- v_float64x2 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
+ v_float64 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
+ v_float64 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
- v_float64x2 v_src00, v_src10, v_src20, v_src01, v_src11, v_src21;
+ v_float64 v_src00, v_src10, v_src20, v_src01, v_src11, v_src21;
v_load_deinterleave(src + x * cn, v_src00, v_src10, v_src20);
- v_load_deinterleave(src + (x + 2) * cn, v_src01, v_src11, v_src21);
+ v_load_deinterleave(src + (x + step) * cn, v_src01, v_src11, v_src21);
v_src00 = v_src00 & v_mask0;
v_src01 = v_src01 & v_mask1;
v_src10 = v_src10 & v_mask0;
v_src20 = v_src20 & v_mask0;
v_src21 = v_src21 & v_mask1;
- v_float64x2 v_dst00, v_dst10, v_dst20, v_dst01, v_dst11, v_dst21;
+ v_float64 v_dst00, v_dst10, v_dst20, v_dst01, v_dst11, v_dst21;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 2) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 2) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
+ v_store_interleave(dst + (x + step) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
}
}
}
-
+#endif // CV_SIMD_64F
acc_general_(src, dst, mask, len, cn, x);
}
-#else
-void acc_simd_(const uchar* src, double* dst, const uchar* mask, int len, int cn)
-{
- acc_general_(src, dst, mask, len, cn, 0);
-}
-
-void acc_simd_(const ushort* src, double* dst, const uchar* mask, int len, int cn)
-{
- acc_general_(src, dst, mask, len, cn, 0);
-}
-
-void acc_simd_(const float* src, double* dst, const uchar* mask, int len, int cn)
-{
- acc_general_(src, dst, mask, len, cn, 0);
-}
-
-void acc_simd_(const double* src, double* dst, const uchar* mask, int len, int cn)
-{
- acc_general_(src, dst, mask, len, cn, 0);
-}
-#endif
// square accumulate optimized by universal intrinsic
void accSqr_simd_(const uchar* src, float* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 16;
+#if CV_SIMD
+ const int cVectorWidth = v_uint8::nlanes;
+ const int step = v_float32::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint8x16 v_src = v_load(src + x);
- v_uint16x8 v_src0, v_src1;
+ v_uint8 v_src = vx_load(src + x);
+ v_uint16 v_src0, v_src1;
v_expand(v_src, v_src0, v_src1);
v_src0 = v_mul_wrap(v_src0, v_src0);
v_src1 = v_mul_wrap(v_src1, v_src1);
- v_uint32x4 v_src00, v_src01, v_src10, v_src11;
+ v_uint32 v_src00, v_src01, v_src10, v_src11;
v_expand(v_src0, v_src00, v_src01);
v_expand(v_src1, v_src10, v_src11);
- v_store(dst + x, v_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src00)));
- v_store(dst + x + 4, v_load(dst + x + 4) + v_cvt_f32(v_reinterpret_as_s32(v_src01)));
- v_store(dst + x + 8, v_load(dst + x + 8) + v_cvt_f32(v_reinterpret_as_s32(v_src10)));
- v_store(dst + x + 12, v_load(dst + x + 12) + v_cvt_f32(v_reinterpret_as_s32(v_src11)));
+ v_store(dst + x, vx_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src00)));
+ v_store(dst + x + step, vx_load(dst + x + step) + v_cvt_f32(v_reinterpret_as_s32(v_src01)));
+ v_store(dst + x + step * 2, vx_load(dst + x + step * 2) + v_cvt_f32(v_reinterpret_as_s32(v_src10)));
+ v_store(dst + x + step * 3, vx_load(dst + x + step * 3) + v_cvt_f32(v_reinterpret_as_s32(v_src11)));
}
}
else
{
- v_uint8x16 v_0 = v_setall_u8(0);
+ v_uint8 v_0 = vx_setall_u8(0);
if (cn == 1)
{
for ( ; x <= len - cVectorWidth ; x += cVectorWidth)
{
- v_uint8x16 v_mask = v_load(mask + x);
+ v_uint8 v_mask = vx_load(mask + x);
v_mask = ~(v_0 == v_mask);
- v_uint8x16 v_src = v_load(src + x);
+ v_uint8 v_src = vx_load(src + x);
v_src = v_src & v_mask;
- v_uint16x8 v_src0, v_src1;
+ v_uint16 v_src0, v_src1;
v_expand(v_src, v_src0, v_src1);
v_src0 = v_mul_wrap(v_src0, v_src0);
v_src1 = v_mul_wrap(v_src1, v_src1);
- v_uint32x4 v_src00, v_src01, v_src10, v_src11;
+ v_uint32 v_src00, v_src01, v_src10, v_src11;
v_expand(v_src0, v_src00, v_src01);
v_expand(v_src1, v_src10, v_src11);
- v_store(dst + x, v_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src00)));
- v_store(dst + x + 4, v_load(dst + x + 4) + v_cvt_f32(v_reinterpret_as_s32(v_src01)));
- v_store(dst + x + 8, v_load(dst + x + 8) + v_cvt_f32(v_reinterpret_as_s32(v_src10)));
- v_store(dst + x + 12, v_load(dst + x + 12) + v_cvt_f32(v_reinterpret_as_s32(v_src11)));
+ v_store(dst + x, vx_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src00)));
+ v_store(dst + x + step, vx_load(dst + x + step) + v_cvt_f32(v_reinterpret_as_s32(v_src01)));
+ v_store(dst + x + step * 2, vx_load(dst + x + step * 2) + v_cvt_f32(v_reinterpret_as_s32(v_src10)));
+ v_store(dst + x + step * 3, vx_load(dst + x + step * 3) + v_cvt_f32(v_reinterpret_as_s32(v_src11)));
}
}
else if (cn == 3)
{
for ( ; x <= len - cVectorWidth ; x += cVectorWidth)
{
- v_uint8x16 v_mask = v_load(mask + x);
+ v_uint8 v_mask = vx_load(mask + x);
v_mask = ~(v_0 == v_mask);
- v_uint8x16 v_src0, v_src1, v_src2;
+ v_uint8 v_src0, v_src1, v_src2;
v_load_deinterleave(src + x * cn, v_src0, v_src1, v_src2);
v_src0 = v_src0 & v_mask;
v_src1 = v_src1 & v_mask;
v_src2 = v_src2 & v_mask;
- v_uint16x8 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
+ v_uint16 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
v_expand(v_src0, v_src00, v_src01);
v_expand(v_src1, v_src10, v_src11);
v_expand(v_src2, v_src20, v_src21);
v_src20 = v_mul_wrap(v_src20, v_src20);
v_src21 = v_mul_wrap(v_src21, v_src21);
- v_uint32x4 v_src000, v_src001, v_src010, v_src011;
- v_uint32x4 v_src100, v_src101, v_src110, v_src111;
- v_uint32x4 v_src200, v_src201, v_src210, v_src211;
+ v_uint32 v_src000, v_src001, v_src010, v_src011;
+ v_uint32 v_src100, v_src101, v_src110, v_src111;
+ v_uint32 v_src200, v_src201, v_src210, v_src211;
v_expand(v_src00, v_src000, v_src001);
v_expand(v_src01, v_src010, v_src011);
v_expand(v_src10, v_src100, v_src101);
v_expand(v_src20, v_src200, v_src201);
v_expand(v_src21, v_src210, v_src211);
- v_float32x4 v_dst000, v_dst001, v_dst010, v_dst011;
- v_float32x4 v_dst100, v_dst101, v_dst110, v_dst111;
- v_float32x4 v_dst200, v_dst201, v_dst210, v_dst211;
+ v_float32 v_dst000, v_dst001, v_dst010, v_dst011;
+ v_float32 v_dst100, v_dst101, v_dst110, v_dst111;
+ v_float32 v_dst200, v_dst201, v_dst210, v_dst211;
v_load_deinterleave(dst + x * cn, v_dst000, v_dst100, v_dst200);
- v_load_deinterleave(dst + (x + 4) * cn, v_dst001, v_dst101, v_dst201);
- v_load_deinterleave(dst + (x + 8) * cn, v_dst010, v_dst110, v_dst210);
- v_load_deinterleave(dst + (x + 12) * cn, v_dst011, v_dst111, v_dst211);
-
- v_store_interleave(dst + x * cn, v_dst000 + v_cvt_f32(v_reinterpret_as_s32(v_src000)), v_dst100 + v_cvt_f32(v_reinterpret_as_s32(v_src100)), v_dst200 + v_cvt_f32(v_reinterpret_as_s32(v_src200)));
- v_store_interleave(dst + (x + 4) * cn, v_dst001 + v_cvt_f32(v_reinterpret_as_s32(v_src001)), v_dst101 + v_cvt_f32(v_reinterpret_as_s32(v_src101)), v_dst201 + v_cvt_f32(v_reinterpret_as_s32(v_src201)));
- v_store_interleave(dst + (x + 8) * cn, v_dst010 + v_cvt_f32(v_reinterpret_as_s32(v_src010)), v_dst110 + v_cvt_f32(v_reinterpret_as_s32(v_src110)), v_dst210 + v_cvt_f32(v_reinterpret_as_s32(v_src210)));
- v_store_interleave(dst + (x + 12) * cn, v_dst011 + v_cvt_f32(v_reinterpret_as_s32(v_src011)), v_dst111 + v_cvt_f32(v_reinterpret_as_s32(v_src111)), v_dst211 + v_cvt_f32(v_reinterpret_as_s32(v_src211)));
+ v_load_deinterleave(dst + (x + step) * cn, v_dst001, v_dst101, v_dst201);
+ v_load_deinterleave(dst + (x + step * 2) * cn, v_dst010, v_dst110, v_dst210);
+ v_load_deinterleave(dst + (x + step * 3) * cn, v_dst011, v_dst111, v_dst211);
+
+ v_dst000 += v_cvt_f32(v_reinterpret_as_s32(v_src000));
+ v_dst001 += v_cvt_f32(v_reinterpret_as_s32(v_src001));
+ v_dst010 += v_cvt_f32(v_reinterpret_as_s32(v_src010));
+ v_dst011 += v_cvt_f32(v_reinterpret_as_s32(v_src011));
+
+ v_dst100 += v_cvt_f32(v_reinterpret_as_s32(v_src100));
+ v_dst101 += v_cvt_f32(v_reinterpret_as_s32(v_src101));
+ v_dst110 += v_cvt_f32(v_reinterpret_as_s32(v_src110));
+ v_dst111 += v_cvt_f32(v_reinterpret_as_s32(v_src111));
+
+ v_dst200 += v_cvt_f32(v_reinterpret_as_s32(v_src200));
+ v_dst201 += v_cvt_f32(v_reinterpret_as_s32(v_src201));
+ v_dst210 += v_cvt_f32(v_reinterpret_as_s32(v_src210));
+ v_dst211 += v_cvt_f32(v_reinterpret_as_s32(v_src211));
+
+ v_store_interleave(dst + x * cn, v_dst000, v_dst100, v_dst200);
+ v_store_interleave(dst + (x + step) * cn, v_dst001, v_dst101, v_dst201);
+ v_store_interleave(dst + (x + step * 2) * cn, v_dst010, v_dst110, v_dst210);
+ v_store_interleave(dst + (x + step * 3) * cn, v_dst011, v_dst111, v_dst211);
}
}
}
-
+#endif // CV_SIMD
accSqr_general_(src, dst, mask, len, cn, x);
}
void accSqr_simd_(const ushort* src, float* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 8;
+#if CV_SIMD
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float32::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_src = v_load(src + x);
- v_uint32x4 v_src0, v_src1;
+ v_uint16 v_src = vx_load(src + x);
+ v_uint32 v_src0, v_src1;
v_expand(v_src, v_src0, v_src1);
- v_float32x4 v_float0, v_float1;
+ v_float32 v_float0, v_float1;
v_float0 = v_cvt_f32(v_reinterpret_as_s32(v_src0));
v_float1 = v_cvt_f32(v_reinterpret_as_s32(v_src1));
- v_float0 = v_float0 * v_float0;
- v_float1 = v_float1 * v_float1;
- v_store(dst + x, v_load(dst + x) + v_float0);
- v_store(dst + x + 4, v_load(dst + x + 4) + v_float1);
+ v_store(dst + x, v_fma(v_float0, v_float0, vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(v_float1, v_float1, vx_load(dst + x + step)));
}
}
else
{
- v_uint32x4 v_0 = v_setzero_u32();
+ v_uint32 v_0 = vx_setzero_u32();
if (cn == 1)
{
for ( ; x <= len - cVectorWidth ; x += cVectorWidth)
{
- v_uint16x8 v_mask16 = v_load_expand(mask + x);
- v_uint32x4 v_mask0, v_mask1;
+ v_uint16 v_mask16 = vx_load_expand(mask + x);
+ v_uint32 v_mask0, v_mask1;
v_expand(v_mask16, v_mask0, v_mask1);
v_mask0 = ~(v_mask0 == v_0);
v_mask1 = ~(v_mask1 == v_0);
- v_uint16x8 v_src = v_load(src + x);
- v_uint32x4 v_src0, v_src1;
+ v_uint16 v_src = vx_load(src + x);
+ v_uint32 v_src0, v_src1;
v_expand(v_src, v_src0, v_src1);
v_src0 = v_src0 & v_mask0;
v_src1 = v_src1 & v_mask1;
- v_float32x4 v_float0, v_float1;
+ v_float32 v_float0, v_float1;
v_float0 = v_cvt_f32(v_reinterpret_as_s32(v_src0));
v_float1 = v_cvt_f32(v_reinterpret_as_s32(v_src1));
- v_float0 = v_float0 * v_float0;
- v_float1 = v_float1 * v_float1;
- v_store(dst + x, v_load(dst + x) + v_float0);
- v_store(dst + x + 4, v_load(dst + x + 4) + v_float1);
+ v_store(dst + x, v_fma(v_float0, v_float0, vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(v_float1, v_float1, vx_load(dst + x + step)));
}
}
else if (cn == 3)
{
for ( ; x <= len - cVectorWidth ; x += cVectorWidth)
{
- v_uint16x8 v_mask16 = v_load_expand(mask + x);
- v_uint32x4 v_mask0, v_mask1;
+ v_uint16 v_mask16 = vx_load_expand(mask + x);
+ v_uint32 v_mask0, v_mask1;
v_expand(v_mask16, v_mask0, v_mask1);
v_mask0 = ~(v_mask0 == v_0);
v_mask1 = ~(v_mask1 == v_0);
- v_uint16x8 v_src0, v_src1, v_src2;
+ v_uint16 v_src0, v_src1, v_src2;
v_load_deinterleave(src + x * cn, v_src0, v_src1, v_src2);
- v_uint32x4 v_int00, v_int01, v_int10, v_int11, v_int20, v_int21;
+ v_uint32 v_int00, v_int01, v_int10, v_int11, v_int20, v_int21;
v_expand(v_src0, v_int00, v_int01);
v_expand(v_src1, v_int10, v_int11);
v_expand(v_src2, v_int20, v_int21);
v_int20 = v_int20 & v_mask0;
v_int21 = v_int21 & v_mask1;
- v_float32x4 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
+ v_float32 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
v_src00 = v_cvt_f32(v_reinterpret_as_s32(v_int00));
v_src01 = v_cvt_f32(v_reinterpret_as_s32(v_int01));
v_src10 = v_cvt_f32(v_reinterpret_as_s32(v_int10));
v_src11 = v_cvt_f32(v_reinterpret_as_s32(v_int11));
v_src20 = v_cvt_f32(v_reinterpret_as_s32(v_int20));
v_src21 = v_cvt_f32(v_reinterpret_as_s32(v_int21));
- v_src00 = v_src00 * v_src00;
- v_src01 = v_src01 * v_src01;
- v_src10 = v_src10 * v_src10;
- v_src11 = v_src11 * v_src11;
- v_src20 = v_src20 * v_src20;
- v_src21 = v_src21 * v_src21;
-
- v_float32x4 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
+
+ v_float32 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 4) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
- v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 4) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
+ v_dst00 = v_fma(v_src00, v_src00, v_dst00);
+ v_dst01 = v_fma(v_src01, v_src01, v_dst01);
+ v_dst10 = v_fma(v_src10, v_src10, v_dst10);
+ v_dst11 = v_fma(v_src11, v_src11, v_dst11);
+ v_dst20 = v_fma(v_src20, v_src20, v_dst20);
+ v_dst21 = v_fma(v_src21, v_src21, v_dst21);
+
+ v_store_interleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
+ v_store_interleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
}
}
}
-
+#endif // CV_SIMD
accSqr_general_(src, dst, mask, len, cn, x);
}
void accSqr_simd_(const float* src, float* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 8;
+#if CV_SIMD
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float32::nlanes;
if (!mask)
{
int size = len * cn;
+ #if CV_AVX && !CV_AVX2
+ for ( ; x <= size - 8 ; x += 8)
+ {
+ __m256 v_src = _mm256_loadu_ps(src + x);
+ __m256 v_dst = _mm256_loadu_ps(dst + x);
+ v_src = _mm256_mul_ps(v_src, v_src);
+ v_dst = _mm256_add_ps(v_src, v_dst);
+ _mm256_storeu_ps(dst + x, v_dst);
+ }
+ #else
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_float32x4 v_src0 = v_load(src + x);
- v_float32x4 v_src1 = v_load(src + x + 4);
- v_src0 = v_src0 * v_src0;
- v_src1 = v_src1 * v_src1;
+ v_float32 v_src0 = vx_load(src + x);
+ v_float32 v_src1 = vx_load(src + x + step);
- v_store(dst + x, v_load(dst + x) + v_src0);
- v_store(dst + x + 4, v_load(dst + x + 4) + v_src1);
+ v_store(dst + x, v_fma(v_src0, v_src0, vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(v_src1, v_src1, vx_load(dst + x + step)));
}
+ #endif // CV_AVX && !CV_AVX2
}
else
{
- v_uint32x4 v_0 = v_setzero_u32();
+ v_uint32 v_0 = vx_setzero_u32();
if (cn == 1)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask16 = v_load_expand(mask + x);
- v_uint32x4 v_mask_0, v_mask_1;
+ v_uint16 v_mask16 = vx_load_expand(mask + x);
+ v_uint32 v_mask_0, v_mask_1;
v_expand(v_mask16, v_mask_0, v_mask_1);
- v_float32x4 v_mask0 = v_reinterpret_as_f32(~(v_mask_0 == v_0));
- v_float32x4 v_mask1 = v_reinterpret_as_f32(~(v_mask_1 == v_0));
- v_float32x4 v_src0 = v_load(src + x);
- v_float32x4 v_src1 = v_load(src + x + 4);
+ v_float32 v_mask0 = v_reinterpret_as_f32(~(v_mask_0 == v_0));
+ v_float32 v_mask1 = v_reinterpret_as_f32(~(v_mask_1 == v_0));
+ v_float32 v_src0 = vx_load(src + x);
+ v_float32 v_src1 = vx_load(src + x + step);
v_src0 = v_src0 & v_mask0;
v_src1 = v_src1 & v_mask1;
- v_src0 = v_src0 * v_src0;
- v_src1 = v_src1 * v_src1;
- v_store(dst + x, v_load(dst + x) + v_src0);
- v_store(dst + x + 4, v_load(dst + x + 4) + v_src1);
+ v_store(dst + x, v_fma(v_src0, v_src0, vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(v_src1, v_src1, vx_load(dst + x + step)));
}
}
else if (cn == 3)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask16 = v_load_expand(mask + x);
- v_uint32x4 v_mask_0, v_mask_1;
+ v_uint16 v_mask16 = vx_load_expand(mask + x);
+ v_uint32 v_mask_0, v_mask_1;
v_expand(v_mask16, v_mask_0, v_mask_1);
- v_float32x4 v_mask0 = v_reinterpret_as_f32(~(v_mask_0 == v_0));
- v_float32x4 v_mask1 = v_reinterpret_as_f32(~(v_mask_1 == v_0));
+ v_float32 v_mask0 = v_reinterpret_as_f32(~(v_mask_0 == v_0));
+ v_float32 v_mask1 = v_reinterpret_as_f32(~(v_mask_1 == v_0));
- v_float32x4 v_src00, v_src10, v_src20, v_src01, v_src11, v_src21;
+ v_float32 v_src00, v_src10, v_src20, v_src01, v_src11, v_src21;
v_load_deinterleave(src + x * cn, v_src00, v_src10, v_src20);
- v_load_deinterleave(src + (x + 4) * cn, v_src01, v_src11, v_src21);
+ v_load_deinterleave(src + (x + step) * cn, v_src01, v_src11, v_src21);
v_src00 = v_src00 & v_mask0;
v_src01 = v_src01 & v_mask1;
v_src10 = v_src10 & v_mask0;
v_src11 = v_src11 & v_mask1;
v_src20 = v_src20 & v_mask0;
v_src21 = v_src21 & v_mask1;
- v_src00 = v_src00 * v_src00;
- v_src01 = v_src01 * v_src01;
- v_src10 = v_src10 * v_src10;
- v_src11 = v_src11 * v_src11;
- v_src20 = v_src20 * v_src20;
- v_src21 = v_src21 * v_src21;
-
- v_float32x4 v_dst00, v_dst10, v_dst20, v_dst01, v_dst11, v_dst21;
+
+ v_float32 v_dst00, v_dst10, v_dst20, v_dst01, v_dst11, v_dst21;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 4) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
- v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 4) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
+ v_dst00 = v_fma(v_src00, v_src00, v_dst00);
+ v_dst01 = v_fma(v_src01, v_src01, v_dst01);
+ v_dst10 = v_fma(v_src10, v_src10, v_dst10);
+ v_dst11 = v_fma(v_src11, v_src11, v_dst11);
+ v_dst20 = v_fma(v_src20, v_src20, v_dst20);
+ v_dst21 = v_fma(v_src21, v_src21, v_dst21);
+
+ v_store_interleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
+ v_store_interleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
}
}
}
-
+#endif // CV_SIMD
accSqr_general_(src, dst, mask, len, cn, x);
}
-#if CV_SIMD128_64F
+
void accSqr_simd_(const uchar* src, double* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 8;
+#if CV_SIMD_64F
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_int = v_load_expand(src + x);
+ v_uint16 v_int = vx_load_expand(src + x);
- v_uint32x4 v_int0, v_int1;
+ v_uint32 v_int0, v_int1;
v_expand(v_int, v_int0, v_int1);
- v_float64x2 v_src0 = v_cvt_f64(v_reinterpret_as_s32(v_int0));
- v_float64x2 v_src1 = v_cvt_f64_high(v_reinterpret_as_s32(v_int0));
- v_float64x2 v_src2 = v_cvt_f64(v_reinterpret_as_s32(v_int1));
- v_float64x2 v_src3 = v_cvt_f64_high(v_reinterpret_as_s32(v_int1));
- v_src0 = v_src0 * v_src0;
- v_src1 = v_src1 * v_src1;
- v_src2 = v_src2 * v_src2;
- v_src3 = v_src3 * v_src3;
-
- v_float64x2 v_dst0 = v_load(dst + x);
- v_float64x2 v_dst1 = v_load(dst + x + 2);
- v_float64x2 v_dst2 = v_load(dst + x + 4);
- v_float64x2 v_dst3 = v_load(dst + x + 6);
-
- v_dst0 += v_src0;
- v_dst1 += v_src1;
- v_dst2 += v_src2;
- v_dst3 += v_src3;
+ v_float64 v_src0 = v_cvt_f64(v_reinterpret_as_s32(v_int0));
+ v_float64 v_src1 = v_cvt_f64_high(v_reinterpret_as_s32(v_int0));
+ v_float64 v_src2 = v_cvt_f64(v_reinterpret_as_s32(v_int1));
+ v_float64 v_src3 = v_cvt_f64_high(v_reinterpret_as_s32(v_int1));
+
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+ v_float64 v_dst2 = vx_load(dst + x + step * 2);
+ v_float64 v_dst3 = vx_load(dst + x + step * 3);
+
+ v_dst0 = v_fma(v_src0, v_src0, v_dst0);
+ v_dst1 = v_fma(v_src1, v_src1, v_dst1);
+ v_dst2 = v_fma(v_src2, v_src2, v_dst2);
+ v_dst3 = v_fma(v_src3, v_src3, v_dst3);
v_store(dst + x, v_dst0);
- v_store(dst + x + 2, v_dst1);
- v_store(dst + x + 4, v_dst2);
- v_store(dst + x + 6, v_dst3);
+ v_store(dst + x + step, v_dst1);
+ v_store(dst + x + step * 2, v_dst2);
+ v_store(dst + x + step * 3, v_dst3);
}
}
else
{
- v_uint16x8 v_0 = v_setzero_u16();
+ v_uint16 v_0 = vx_setzero_u16();
if (cn == 1)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask = v_load_expand(mask + x);
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_mask == v_0);
- v_uint16x8 v_src = v_load_expand(src + x);
- v_uint16x8 v_int = v_src & v_mask;
+ v_uint16 v_src = vx_load_expand(src + x);
+ v_uint16 v_int = v_src & v_mask;
- v_uint32x4 v_int0, v_int1;
+ v_uint32 v_int0, v_int1;
v_expand(v_int, v_int0, v_int1);
- v_float64x2 v_src0 = v_cvt_f64(v_reinterpret_as_s32(v_int0));
- v_float64x2 v_src1 = v_cvt_f64_high(v_reinterpret_as_s32(v_int0));
- v_float64x2 v_src2 = v_cvt_f64(v_reinterpret_as_s32(v_int1));
- v_float64x2 v_src3 = v_cvt_f64_high(v_reinterpret_as_s32(v_int1));
- v_src0 = v_src0 * v_src0;
- v_src1 = v_src1 * v_src1;
- v_src2 = v_src2 * v_src2;
- v_src3 = v_src3 * v_src3;
-
- v_float64x2 v_dst0 = v_load(dst + x);
- v_float64x2 v_dst1 = v_load(dst + x + 2);
- v_float64x2 v_dst2 = v_load(dst + x + 4);
- v_float64x2 v_dst3 = v_load(dst + x + 6);
-
- v_dst0 += v_src0;
- v_dst1 += v_src1;
- v_dst2 += v_src2;
- v_dst3 += v_src3;
+ v_float64 v_src0 = v_cvt_f64(v_reinterpret_as_s32(v_int0));
+ v_float64 v_src1 = v_cvt_f64_high(v_reinterpret_as_s32(v_int0));
+ v_float64 v_src2 = v_cvt_f64(v_reinterpret_as_s32(v_int1));
+ v_float64 v_src3 = v_cvt_f64_high(v_reinterpret_as_s32(v_int1));
+
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+ v_float64 v_dst2 = vx_load(dst + x + step * 2);
+ v_float64 v_dst3 = vx_load(dst + x + step * 3);
+
+ v_dst0 = v_fma(v_src0, v_src0, v_dst0);
+ v_dst1 = v_fma(v_src1, v_src1, v_dst1);
+ v_dst2 = v_fma(v_src2, v_src2, v_dst2);
+ v_dst3 = v_fma(v_src3, v_src3, v_dst3);
v_store(dst + x, v_dst0);
- v_store(dst + x + 2, v_dst1);
- v_store(dst + x + 4, v_dst2);
- v_store(dst + x + 6, v_dst3);
+ v_store(dst + x + step, v_dst1);
+ v_store(dst + x + step * 2, v_dst2);
+ v_store(dst + x + step * 3, v_dst3);
}
}
else if (cn == 3)
{
- for (; x <= len - /*cVectorWidth*/16; x += cVectorWidth)
+ for (; x <= len - cVectorWidth * 2; x += cVectorWidth)
{
- v_uint8x16 v_src0, v_src1, v_src2;
+ v_uint8 v_src0, v_src1, v_src2;
v_load_deinterleave(src + x * cn, v_src0, v_src1, v_src2);
- v_uint16x8 v_int0, v_int1, v_int2, dummy;
- v_expand(v_src0, v_int0, dummy);
- v_expand(v_src1, v_int1, dummy);
- v_expand(v_src2, v_int2, dummy);
- v_uint16x8 v_mask = v_load_expand(mask + x);
+
+ v_uint16 v_int0 = v_expand_low(v_src0);
+ v_uint16 v_int1 = v_expand_low(v_src1);
+ v_uint16 v_int2 = v_expand_low(v_src2);
+
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_mask == v_0);
v_int0 = v_int0 & v_mask;
v_int1 = v_int1 & v_mask;
v_int2 = v_int2 & v_mask;
- v_uint32x4 v_int00, v_int01, v_int10, v_int11, v_int20, v_int21;
+ v_uint32 v_int00, v_int01, v_int10, v_int11, v_int20, v_int21;
v_expand(v_int0, v_int00, v_int01);
v_expand(v_int1, v_int10, v_int11);
v_expand(v_int2, v_int20, v_int21);
- v_float64x2 v_src00 = v_cvt_f64(v_reinterpret_as_s32(v_int00));
- v_float64x2 v_src01 = v_cvt_f64_high(v_reinterpret_as_s32(v_int00));
- v_float64x2 v_src02 = v_cvt_f64(v_reinterpret_as_s32(v_int01));
- v_float64x2 v_src03 = v_cvt_f64_high(v_reinterpret_as_s32(v_int01));
- v_float64x2 v_src10 = v_cvt_f64(v_reinterpret_as_s32(v_int10));
- v_float64x2 v_src11 = v_cvt_f64_high(v_reinterpret_as_s32(v_int10));
- v_float64x2 v_src12 = v_cvt_f64(v_reinterpret_as_s32(v_int11));
- v_float64x2 v_src13 = v_cvt_f64_high(v_reinterpret_as_s32(v_int11));
- v_float64x2 v_src20 = v_cvt_f64(v_reinterpret_as_s32(v_int20));
- v_float64x2 v_src21 = v_cvt_f64_high(v_reinterpret_as_s32(v_int20));
- v_float64x2 v_src22 = v_cvt_f64(v_reinterpret_as_s32(v_int21));
- v_float64x2 v_src23 = v_cvt_f64_high(v_reinterpret_as_s32(v_int21));
- v_src00 = v_src00 * v_src00;
- v_src01 = v_src01 * v_src01;
- v_src02 = v_src02 * v_src02;
- v_src03 = v_src03 * v_src03;
- v_src10 = v_src10 * v_src10;
- v_src11 = v_src11 * v_src11;
- v_src12 = v_src12 * v_src12;
- v_src13 = v_src13 * v_src13;
- v_src20 = v_src20 * v_src20;
- v_src21 = v_src21 * v_src21;
- v_src22 = v_src22 * v_src22;
- v_src23 = v_src23 * v_src23;
-
- v_float64x2 v_dst00, v_dst01, v_dst02, v_dst03, v_dst10, v_dst11, v_dst12, v_dst13, v_dst20, v_dst21, v_dst22, v_dst23;
+ v_float64 v_src00 = v_cvt_f64(v_reinterpret_as_s32(v_int00));
+ v_float64 v_src01 = v_cvt_f64_high(v_reinterpret_as_s32(v_int00));
+ v_float64 v_src02 = v_cvt_f64(v_reinterpret_as_s32(v_int01));
+ v_float64 v_src03 = v_cvt_f64_high(v_reinterpret_as_s32(v_int01));
+ v_float64 v_src10 = v_cvt_f64(v_reinterpret_as_s32(v_int10));
+ v_float64 v_src11 = v_cvt_f64_high(v_reinterpret_as_s32(v_int10));
+ v_float64 v_src12 = v_cvt_f64(v_reinterpret_as_s32(v_int11));
+ v_float64 v_src13 = v_cvt_f64_high(v_reinterpret_as_s32(v_int11));
+ v_float64 v_src20 = v_cvt_f64(v_reinterpret_as_s32(v_int20));
+ v_float64 v_src21 = v_cvt_f64_high(v_reinterpret_as_s32(v_int20));
+ v_float64 v_src22 = v_cvt_f64(v_reinterpret_as_s32(v_int21));
+ v_float64 v_src23 = v_cvt_f64_high(v_reinterpret_as_s32(v_int21));
+
+ v_float64 v_dst00, v_dst01, v_dst02, v_dst03, v_dst10, v_dst11, v_dst12, v_dst13, v_dst20, v_dst21, v_dst22, v_dst23;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 2) * cn, v_dst01, v_dst11, v_dst21);
- v_load_deinterleave(dst + (x + 4) * cn, v_dst02, v_dst12, v_dst22);
- v_load_deinterleave(dst + (x + 6) * cn, v_dst03, v_dst13, v_dst23);
-
- v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 2) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
- v_store_interleave(dst + (x + 4) * cn, v_dst02 + v_src02, v_dst12 + v_src12, v_dst22 + v_src22);
- v_store_interleave(dst + (x + 6) * cn, v_dst03 + v_src03, v_dst13 + v_src13, v_dst23 + v_src23);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step * 2) * cn, v_dst02, v_dst12, v_dst22);
+ v_load_deinterleave(dst + (x + step * 3) * cn, v_dst03, v_dst13, v_dst23);
+
+ v_dst00 = v_fma(v_src00, v_src00, v_dst00);
+ v_dst01 = v_fma(v_src01, v_src01, v_dst01);
+ v_dst02 = v_fma(v_src02, v_src02, v_dst02);
+ v_dst03 = v_fma(v_src03, v_src03, v_dst03);
+ v_dst10 = v_fma(v_src10, v_src10, v_dst10);
+ v_dst11 = v_fma(v_src11, v_src11, v_dst11);
+ v_dst12 = v_fma(v_src12, v_src12, v_dst12);
+ v_dst13 = v_fma(v_src13, v_src13, v_dst13);
+ v_dst20 = v_fma(v_src20, v_src20, v_dst20);
+ v_dst21 = v_fma(v_src21, v_src21, v_dst21);
+ v_dst22 = v_fma(v_src22, v_src22, v_dst22);
+ v_dst23 = v_fma(v_src23, v_src23, v_dst23);
+
+ v_store_interleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
+ v_store_interleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
+ v_store_interleave(dst + (x + step * 2) * cn, v_dst02, v_dst12, v_dst22);
+ v_store_interleave(dst + (x + step * 3) * cn, v_dst03, v_dst13, v_dst23);
}
}
}
-
+#endif // CV_SIMD_64F
accSqr_general_(src, dst, mask, len, cn, x);
}
void accSqr_simd_(const ushort* src, double* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 8;
+#if CV_SIMD_64F
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_src = v_load(src + x);
- v_uint32x4 v_int_0, v_int_1;
+ v_uint16 v_src = vx_load(src + x);
+ v_uint32 v_int_0, v_int_1;
v_expand(v_src, v_int_0, v_int_1);
- v_int32x4 v_int0 = v_reinterpret_as_s32(v_int_0);
- v_int32x4 v_int1 = v_reinterpret_as_s32(v_int_1);
+ v_int32 v_int0 = v_reinterpret_as_s32(v_int_0);
+ v_int32 v_int1 = v_reinterpret_as_s32(v_int_1);
- v_float64x2 v_src0 = v_cvt_f64(v_int0);
- v_float64x2 v_src1 = v_cvt_f64_high(v_int0);
- v_float64x2 v_src2 = v_cvt_f64(v_int1);
- v_float64x2 v_src3 = v_cvt_f64_high(v_int1);
- v_src0 = v_src0 * v_src0;
- v_src1 = v_src1 * v_src1;
- v_src2 = v_src2 * v_src2;
- v_src3 = v_src3 * v_src3;
+ v_float64 v_src0 = v_cvt_f64(v_int0);
+ v_float64 v_src1 = v_cvt_f64_high(v_int0);
+ v_float64 v_src2 = v_cvt_f64(v_int1);
+ v_float64 v_src3 = v_cvt_f64_high(v_int1);
- v_float64x2 v_dst0 = v_load(dst + x);
- v_float64x2 v_dst1 = v_load(dst + x + 2);
- v_float64x2 v_dst2 = v_load(dst + x + 4);
- v_float64x2 v_dst3 = v_load(dst + x + 6);
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+ v_float64 v_dst2 = vx_load(dst + x + step * 2);
+ v_float64 v_dst3 = vx_load(dst + x + step * 3);
- v_dst0 += v_src0;
- v_dst1 += v_src1;
- v_dst2 += v_src2;
- v_dst3 += v_src3;
+ v_dst0 = v_fma(v_src0, v_src0, v_dst0);
+ v_dst1 = v_fma(v_src1, v_src1, v_dst1);
+ v_dst2 = v_fma(v_src2, v_src2, v_dst2);
+ v_dst3 = v_fma(v_src3, v_src3, v_dst3);
v_store(dst + x, v_dst0);
- v_store(dst + x + 2, v_dst1);
- v_store(dst + x + 4, v_dst2);
- v_store(dst + x + 6, v_dst3);
+ v_store(dst + x + step, v_dst1);
+ v_store(dst + x + step * 2, v_dst2);
+ v_store(dst + x + step * 3, v_dst3);
}
}
else
{
- v_uint16x8 v_0 = v_setzero_u16();
+ v_uint16 v_0 = vx_setzero_u16();
if (cn == 1)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask = v_load_expand(mask + x);
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_mask == v_0);
- v_uint16x8 v_src = v_load(src + x);
+ v_uint16 v_src = vx_load(src + x);
v_src = v_src & v_mask;
- v_uint32x4 v_int_0, v_int_1;
+ v_uint32 v_int_0, v_int_1;
v_expand(v_src, v_int_0, v_int_1);
- v_int32x4 v_int0 = v_reinterpret_as_s32(v_int_0);
- v_int32x4 v_int1 = v_reinterpret_as_s32(v_int_1);
+ v_int32 v_int0 = v_reinterpret_as_s32(v_int_0);
+ v_int32 v_int1 = v_reinterpret_as_s32(v_int_1);
- v_float64x2 v_src0 = v_cvt_f64(v_int0);
- v_float64x2 v_src1 = v_cvt_f64_high(v_int0);
- v_float64x2 v_src2 = v_cvt_f64(v_int1);
- v_float64x2 v_src3 = v_cvt_f64_high(v_int1);
- v_src0 = v_src0 * v_src0;
- v_src1 = v_src1 * v_src1;
- v_src2 = v_src2 * v_src2;
- v_src3 = v_src3 * v_src3;
+ v_float64 v_src0 = v_cvt_f64(v_int0);
+ v_float64 v_src1 = v_cvt_f64_high(v_int0);
+ v_float64 v_src2 = v_cvt_f64(v_int1);
+ v_float64 v_src3 = v_cvt_f64_high(v_int1);
- v_float64x2 v_dst0 = v_load(dst + x);
- v_float64x2 v_dst1 = v_load(dst + x + 2);
- v_float64x2 v_dst2 = v_load(dst + x + 4);
- v_float64x2 v_dst3 = v_load(dst + x + 6);
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+ v_float64 v_dst2 = vx_load(dst + x + step * 2);
+ v_float64 v_dst3 = vx_load(dst + x + step * 3);
- v_dst0 += v_src0;
- v_dst1 += v_src1;
- v_dst2 += v_src2;
- v_dst3 += v_src3;
+ v_dst0 = v_fma(v_src0, v_src0, v_dst0);
+ v_dst1 = v_fma(v_src1, v_src1, v_dst1);
+ v_dst2 = v_fma(v_src2, v_src2, v_dst2);
+ v_dst3 = v_fma(v_src3, v_src3, v_dst3);
v_store(dst + x, v_dst0);
- v_store(dst + x + 2, v_dst1);
- v_store(dst + x + 4, v_dst2);
- v_store(dst + x + 6, v_dst3);
+ v_store(dst + x + step, v_dst1);
+ v_store(dst + x + step * 2, v_dst2);
+ v_store(dst + x + step * 3, v_dst3);
}
}
else if (cn == 3)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask = v_load_expand(mask + x);
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_mask == v_0);
- v_uint16x8 v_src0, v_src1, v_src2;
+ v_uint16 v_src0, v_src1, v_src2;
v_load_deinterleave(src + x * cn, v_src0, v_src1, v_src2);
v_src0 = v_src0 & v_mask;
v_src1 = v_src1 & v_mask;
v_src2 = v_src2 & v_mask;
- v_uint32x4 v_int00, v_int01, v_int10, v_int11, v_int20, v_int21;
+ v_uint32 v_int00, v_int01, v_int10, v_int11, v_int20, v_int21;
v_expand(v_src0, v_int00, v_int01);
v_expand(v_src1, v_int10, v_int11);
v_expand(v_src2, v_int20, v_int21);
- v_float64x2 v_src00 = v_cvt_f64(v_reinterpret_as_s32(v_int00));
- v_float64x2 v_src01 = v_cvt_f64_high(v_reinterpret_as_s32(v_int00));
- v_float64x2 v_src02 = v_cvt_f64(v_reinterpret_as_s32(v_int01));
- v_float64x2 v_src03 = v_cvt_f64_high(v_reinterpret_as_s32(v_int01));
- v_float64x2 v_src10 = v_cvt_f64(v_reinterpret_as_s32(v_int10));
- v_float64x2 v_src11 = v_cvt_f64_high(v_reinterpret_as_s32(v_int10));
- v_float64x2 v_src12 = v_cvt_f64(v_reinterpret_as_s32(v_int11));
- v_float64x2 v_src13 = v_cvt_f64_high(v_reinterpret_as_s32(v_int11));
- v_float64x2 v_src20 = v_cvt_f64(v_reinterpret_as_s32(v_int20));
- v_float64x2 v_src21 = v_cvt_f64_high(v_reinterpret_as_s32(v_int20));
- v_float64x2 v_src22 = v_cvt_f64(v_reinterpret_as_s32(v_int21));
- v_float64x2 v_src23 = v_cvt_f64_high(v_reinterpret_as_s32(v_int21));
- v_src00 = v_src00 * v_src00;
- v_src01 = v_src01 * v_src01;
- v_src02 = v_src02 * v_src02;
- v_src03 = v_src03 * v_src03;
- v_src10 = v_src10 * v_src10;
- v_src11 = v_src11 * v_src11;
- v_src12 = v_src12 * v_src12;
- v_src13 = v_src13 * v_src13;
- v_src20 = v_src20 * v_src20;
- v_src21 = v_src21 * v_src21;
- v_src22 = v_src22 * v_src22;
- v_src23 = v_src23 * v_src23;
-
- v_float64x2 v_dst00, v_dst01, v_dst02, v_dst03;
- v_float64x2 v_dst10, v_dst11, v_dst12, v_dst13;
- v_float64x2 v_dst20, v_dst21, v_dst22, v_dst23;
+ v_float64 v_src00 = v_cvt_f64(v_reinterpret_as_s32(v_int00));
+ v_float64 v_src01 = v_cvt_f64_high(v_reinterpret_as_s32(v_int00));
+ v_float64 v_src02 = v_cvt_f64(v_reinterpret_as_s32(v_int01));
+ v_float64 v_src03 = v_cvt_f64_high(v_reinterpret_as_s32(v_int01));
+ v_float64 v_src10 = v_cvt_f64(v_reinterpret_as_s32(v_int10));
+ v_float64 v_src11 = v_cvt_f64_high(v_reinterpret_as_s32(v_int10));
+ v_float64 v_src12 = v_cvt_f64(v_reinterpret_as_s32(v_int11));
+ v_float64 v_src13 = v_cvt_f64_high(v_reinterpret_as_s32(v_int11));
+ v_float64 v_src20 = v_cvt_f64(v_reinterpret_as_s32(v_int20));
+ v_float64 v_src21 = v_cvt_f64_high(v_reinterpret_as_s32(v_int20));
+ v_float64 v_src22 = v_cvt_f64(v_reinterpret_as_s32(v_int21));
+ v_float64 v_src23 = v_cvt_f64_high(v_reinterpret_as_s32(v_int21));
+
+ v_float64 v_dst00, v_dst01, v_dst02, v_dst03;
+ v_float64 v_dst10, v_dst11, v_dst12, v_dst13;
+ v_float64 v_dst20, v_dst21, v_dst22, v_dst23;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 2)* cn, v_dst01, v_dst11, v_dst21);
- v_load_deinterleave(dst + (x + 4)* cn, v_dst02, v_dst12, v_dst22);
- v_load_deinterleave(dst + (x + 6)* cn, v_dst03, v_dst13, v_dst23);
-
- v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 2) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
- v_store_interleave(dst + (x + 4) * cn, v_dst02 + v_src02, v_dst12 + v_src12, v_dst22 + v_src22);
- v_store_interleave(dst + (x + 6) * cn, v_dst03 + v_src03, v_dst13 + v_src13, v_dst23 + v_src23);
+ v_load_deinterleave(dst + (x + step)* cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step * 2)* cn, v_dst02, v_dst12, v_dst22);
+ v_load_deinterleave(dst + (x + step * 3)* cn, v_dst03, v_dst13, v_dst23);
+
+ v_dst00 = v_fma(v_src00, v_src00, v_dst00);
+ v_dst01 = v_fma(v_src01, v_src01, v_dst01);
+ v_dst02 = v_fma(v_src02, v_src02, v_dst02);
+ v_dst03 = v_fma(v_src03, v_src03, v_dst03);
+ v_dst10 = v_fma(v_src10, v_src10, v_dst10);
+ v_dst11 = v_fma(v_src11, v_src11, v_dst11);
+ v_dst12 = v_fma(v_src12, v_src12, v_dst12);
+ v_dst13 = v_fma(v_src13, v_src13, v_dst13);
+ v_dst20 = v_fma(v_src20, v_src20, v_dst20);
+ v_dst21 = v_fma(v_src21, v_src21, v_dst21);
+ v_dst22 = v_fma(v_src22, v_src22, v_dst22);
+ v_dst23 = v_fma(v_src23, v_src23, v_dst23);
+
+ v_store_interleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
+ v_store_interleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
+ v_store_interleave(dst + (x + step * 2) * cn, v_dst02, v_dst12, v_dst22);
+ v_store_interleave(dst + (x + step * 3) * cn, v_dst03, v_dst13, v_dst23);
}
}
}
-
+#endif // CV_SIMD_64F
accSqr_general_(src, dst, mask, len, cn, x);
}
void accSqr_simd_(const float* src, double* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 4;
+#if CV_SIMD_64F
+ const int cVectorWidth = v_float32::nlanes;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
+ #if CV_AVX && !CV_AVX2
+ for (; x <= size - 8 ; x += 8)
+ {
+ __m256 v_src = _mm256_loadu_ps(src + x);
+ __m256d v_src0 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_src,0));
+ __m256d v_src1 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_src,1));
+ __m256d v_dst0 = _mm256_loadu_pd(dst + x);
+ __m256d v_dst1 = _mm256_loadu_pd(dst + x + 4);
+ v_src0 = _mm256_mul_pd(v_src0, v_src0);
+ v_src1 = _mm256_mul_pd(v_src1, v_src1);
+ v_dst0 = _mm256_add_pd(v_src0, v_dst0);
+ v_dst1 = _mm256_add_pd(v_src1, v_dst1);
+ _mm256_storeu_pd(dst + x, v_dst0);
+ _mm256_storeu_pd(dst + x + 4, v_dst1);
+ }
+ #else
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_float32x4 v_src = v_load(src + x);
- v_float64x2 v_src0 = v_cvt_f64(v_src);
- v_float64x2 v_src1 = v_cvt_f64_high(v_src);
- v_src0 = v_src0 * v_src0;
- v_src1 = v_src1 * v_src1;
+ v_float32 v_src = vx_load(src + x);
+ v_float64 v_src0 = v_cvt_f64(v_src);
+ v_float64 v_src1 = v_cvt_f64_high(v_src);
- v_store(dst + x, v_load(dst + x) + v_src0);
- v_store(dst + x + 2, v_load(dst + x + 2) + v_src1);
+ v_store(dst + x, v_fma(v_src0, v_src0, vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(v_src1, v_src1, vx_load(dst + x + step)));
}
+ #endif // CV_AVX && !CV_AVX2
}
else
{
- v_uint32x4 v_0 = v_setzero_u32();
+ v_uint32 v_0 = vx_setzero_u32();
if (cn == 1)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint32x4 v_mask = v_load_expand_q(mask + x);;
+ v_uint32 v_mask = vx_load_expand_q(mask + x);;
v_mask = ~(v_mask == v_0);
- v_float32x4 v_src = v_load(src + x);
+ v_float32 v_src = vx_load(src + x);
v_src = v_src & v_reinterpret_as_f32(v_mask);
- v_float64x2 v_src0 = v_cvt_f64(v_src);
- v_float64x2 v_src1 = v_cvt_f64_high(v_src);
- v_src0 = v_src0 * v_src0;
- v_src1 = v_src1 * v_src1;
+ v_float64 v_src0 = v_cvt_f64(v_src);
+ v_float64 v_src1 = v_cvt_f64_high(v_src);
- v_store(dst + x, v_load(dst + x) + v_src0);
- v_store(dst + x + 2, v_load(dst + x + 2) + v_src1);
+ v_store(dst + x, v_fma(v_src0, v_src0, vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(v_src1, v_src1, vx_load(dst + x + step)));
}
}
else if (cn == 3)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint32x4 v_mask = v_load_expand_q(mask + x);
+ v_uint32 v_mask = vx_load_expand_q(mask + x);
v_mask = ~(v_mask == v_0);
- v_float32x4 v_src0, v_src1, v_src2;
+ v_float32 v_src0, v_src1, v_src2;
v_load_deinterleave(src + x * cn, v_src0, v_src1, v_src2);
v_src0 = v_src0 & v_reinterpret_as_f32(v_mask);
v_src1 = v_src1 & v_reinterpret_as_f32(v_mask);
v_src2 = v_src2 & v_reinterpret_as_f32(v_mask);
- v_float64x2 v_src00 = v_cvt_f64(v_src0);
- v_float64x2 v_src01 = v_cvt_f64_high(v_src0);
- v_float64x2 v_src10 = v_cvt_f64(v_src1);
- v_float64x2 v_src11 = v_cvt_f64_high(v_src1);
- v_float64x2 v_src20 = v_cvt_f64(v_src2);
- v_float64x2 v_src21 = v_cvt_f64_high(v_src2);
- v_src00 = v_src00 * v_src00;
- v_src01 = v_src01 * v_src01;
- v_src10 = v_src10 * v_src10;
- v_src11 = v_src11 * v_src11;
- v_src20 = v_src20 * v_src20;
- v_src21 = v_src21 * v_src21;
-
- v_float64x2 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
+ v_float64 v_src00 = v_cvt_f64(v_src0);
+ v_float64 v_src01 = v_cvt_f64_high(v_src0);
+ v_float64 v_src10 = v_cvt_f64(v_src1);
+ v_float64 v_src11 = v_cvt_f64_high(v_src1);
+ v_float64 v_src20 = v_cvt_f64(v_src2);
+ v_float64 v_src21 = v_cvt_f64_high(v_src2);
+
+ v_float64 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 2) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
- v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 2) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
+ v_dst00 = v_fma(v_src00, v_src00, v_dst00);
+ v_dst01 = v_fma(v_src01, v_src01, v_dst01);
+ v_dst10 = v_fma(v_src10, v_src10, v_dst10);
+ v_dst11 = v_fma(v_src11, v_src11, v_dst11);
+ v_dst20 = v_fma(v_src20, v_src20, v_dst20);
+ v_dst21 = v_fma(v_src21, v_src21, v_dst21);
+
+ v_store_interleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
+ v_store_interleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
}
}
}
-
+#endif // CV_SIMD_64F
accSqr_general_(src, dst, mask, len, cn, x);
}
void accSqr_simd_(const double* src, double* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 4;
+#if CV_SIMD_64F
+ const int cVectorWidth = v_float64::nlanes * 2;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
+ #if CV_AVX && !CV_AVX2
+ for (; x <= size - 4 ; x += 4)
+ {
+ __m256d v_src = _mm256_loadu_pd(src + x);
+ __m256d v_dst = _mm256_loadu_pd(dst + x);
+ v_src = _mm256_mul_pd(v_src, v_src);
+ v_dst = _mm256_add_pd(v_dst, v_src);
+ _mm256_storeu_pd(dst + x, v_dst);
+ }
+ #else
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_float64x2 v_src0 = v_load(src + x);
- v_float64x2 v_src1 = v_load(src + x + 2);
- v_src0 = v_src0 * v_src0;
- v_src1 = v_src1 * v_src1;
-
- v_store(dst + x, v_load(dst + x) + v_src0);
- v_store(dst + x + 2, v_load(dst + x + 2) + v_src1);
+ v_float64 v_src0 = vx_load(src + x);
+ v_float64 v_src1 = vx_load(src + x + step);
+ v_store(dst + x, v_fma(v_src0, v_src0, vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(v_src1, v_src1, vx_load(dst + x + step)));
}
+ #endif // CV_AVX && !CV_AVX2
}
else
{
- v_uint64x2 v_0 = v_setzero_u64();
+ v_uint64 v_0 = vx_setzero_u64();
if (cn == 1)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint32x4 v_mask32 = v_load_expand_q(mask + x);
- v_uint64x2 v_masku640, v_masku641;
+ v_uint32 v_mask32 = vx_load_expand_q(mask + x);
+ v_uint64 v_masku640, v_masku641;
v_expand(v_mask32, v_masku640, v_masku641);
- v_float64x2 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
- v_float64x2 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
- v_float64x2 v_src0 = v_load(src + x);
- v_float64x2 v_src1 = v_load(src + x + 2);
+ v_float64 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
+ v_float64 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
+ v_float64 v_src0 = vx_load(src + x);
+ v_float64 v_src1 = vx_load(src + x + step);
v_src0 = v_src0 & v_mask0;
v_src1 = v_src1 & v_mask1;
- v_src0 = v_src0 * v_src0;
- v_src1 = v_src1 * v_src1;
-
- v_store(dst + x, v_load(dst + x) + v_src0);
- v_store(dst + x + 2, v_load(dst + x + 2) + v_src1);
+ v_store(dst + x, v_fma(v_src0, v_src0, vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(v_src1, v_src1, vx_load(dst + x + step)));
}
}
else if (cn == 3)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint32x4 v_mask32 = v_load_expand_q(mask + x);
- v_uint64x2 v_masku640, v_masku641;
+ v_uint32 v_mask32 = vx_load_expand_q(mask + x);
+ v_uint64 v_masku640, v_masku641;
v_expand(v_mask32, v_masku640, v_masku641);
- v_float64x2 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
- v_float64x2 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
+ v_float64 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
+ v_float64 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
- v_float64x2 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
+ v_float64 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
v_load_deinterleave(src + x * cn, v_src00, v_src10, v_src20);
- v_load_deinterleave(src + (x + 2) * cn, v_src01, v_src11, v_src21);
+ v_load_deinterleave(src + (x + step) * cn, v_src01, v_src11, v_src21);
v_src00 = v_src00 & v_mask0;
v_src01 = v_src01 & v_mask1;
v_src10 = v_src10 & v_mask0;
v_src11 = v_src11 & v_mask1;
v_src20 = v_src20 & v_mask0;
v_src21 = v_src21 & v_mask1;
- v_src00 = v_src00 * v_src00;
- v_src01 = v_src01 * v_src01;
- v_src10 = v_src10 * v_src10;
- v_src11 = v_src11 * v_src11;
- v_src20 = v_src20 * v_src20;
- v_src21 = v_src21 * v_src21;
-
- v_float64x2 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
+
+ v_float64 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 2) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
- v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 2) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
+ v_dst00 = v_fma(v_src00, v_src00, v_dst00);
+ v_dst01 = v_fma(v_src01, v_src01, v_dst01);
+ v_dst10 = v_fma(v_src10, v_src10, v_dst10);
+ v_dst11 = v_fma(v_src11, v_src11, v_dst11);
+ v_dst20 = v_fma(v_src20, v_src20, v_dst20);
+ v_dst21 = v_fma(v_src21, v_src21, v_dst21);
+
+ v_store_interleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
+ v_store_interleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
}
}
}
-
+#endif // CV_SIMD_64F
accSqr_general_(src, dst, mask, len, cn, x);
}
-#else
-void accSqr_simd_(const uchar* src, double* dst, const uchar* mask, int len, int cn)
-{
- accSqr_general_(src, dst, mask, len, cn, 0);
-}
-
-void accSqr_simd_(const ushort* src, double* dst, const uchar* mask, int len, int cn)
-{
- accSqr_general_(src, dst, mask, len, cn, 0);
-}
-
-void accSqr_simd_(const float* src, double* dst, const uchar* mask, int len, int cn)
-{
- accSqr_general_(src, dst, mask, len, cn, 0);
-}
-
-void accSqr_simd_(const double* src, double* dst, const uchar* mask, int len, int cn)
-{
- accSqr_general_(src, dst, mask, len, cn, 0);
-}
-#endif
// product accumulate optimized by universal intrinsic
void accProd_simd_(const uchar* src1, const uchar* src2, float* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 16;
+#if CV_SIMD
+ const int cVectorWidth = v_uint8::nlanes;
+ const int step = v_uint32::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint8x16 v_1src = v_load(src1 + x);
- v_uint8x16 v_2src = v_load(src2 + x);
+ v_uint8 v_1src = vx_load(src1 + x);
+ v_uint8 v_2src = vx_load(src2 + x);
- v_uint16x8 v_1src0, v_1src1, v_2src0, v_2src1;
- v_expand(v_1src, v_1src0, v_1src1);
- v_expand(v_2src, v_2src0, v_2src1);
+ v_uint16 v_src0, v_src1;
+ v_mul_expand(v_1src, v_2src, v_src0, v_src1);
- v_uint16x8 v_src0, v_src1;
- v_src0 = v_mul_wrap(v_1src0, v_2src0);
- v_src1 = v_mul_wrap(v_1src1, v_2src1);
-
- v_uint32x4 v_src00, v_src01, v_src10, v_src11;
+ v_uint32 v_src00, v_src01, v_src10, v_src11;
v_expand(v_src0, v_src00, v_src01);
v_expand(v_src1, v_src10, v_src11);
- v_store(dst + x, v_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src00)));
- v_store(dst + x + 4, v_load(dst + x + 4) + v_cvt_f32(v_reinterpret_as_s32(v_src01)));
- v_store(dst + x + 8, v_load(dst + x + 8) + v_cvt_f32(v_reinterpret_as_s32(v_src10)));
- v_store(dst + x + 12, v_load(dst + x + 12) + v_cvt_f32(v_reinterpret_as_s32(v_src11)));
+ v_store(dst + x, vx_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src00)));
+ v_store(dst + x + step, vx_load(dst + x + step) + v_cvt_f32(v_reinterpret_as_s32(v_src01)));
+ v_store(dst + x + step * 2, vx_load(dst + x + step * 2) + v_cvt_f32(v_reinterpret_as_s32(v_src10)));
+ v_store(dst + x + step * 3, vx_load(dst + x + step * 3) + v_cvt_f32(v_reinterpret_as_s32(v_src11)));
}
}
else
{
- v_uint8x16 v_0 = v_setzero_u8();
+ v_uint8 v_0 = vx_setzero_u8();
if (cn == 1)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint8x16 v_mask = v_load(mask + x);
+ v_uint8 v_mask = vx_load(mask + x);
v_mask = ~(v_mask == v_0);
- v_uint8x16 v_1src = v_load(src1 + x);
- v_uint8x16 v_2src = v_load(src2 + x);
+ v_uint8 v_1src = vx_load(src1 + x);
+ v_uint8 v_2src = vx_load(src2 + x);
v_1src = v_1src & v_mask;
v_2src = v_2src & v_mask;
- v_uint16x8 v_1src0, v_1src1, v_2src0, v_2src1;
- v_expand(v_1src, v_1src0, v_1src1);
- v_expand(v_2src, v_2src0, v_2src1);
-
- v_uint16x8 v_src0, v_src1;
- v_src0 = v_mul_wrap(v_1src0, v_2src0);
- v_src1 = v_mul_wrap(v_1src1, v_2src1);
+ v_uint16 v_src0, v_src1;
+ v_mul_expand(v_1src, v_2src, v_src0, v_src1);
- v_uint32x4 v_src00, v_src01, v_src10, v_src11;
+ v_uint32 v_src00, v_src01, v_src10, v_src11;
v_expand(v_src0, v_src00, v_src01);
v_expand(v_src1, v_src10, v_src11);
- v_store(dst + x, v_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src00)));
- v_store(dst + x + 4, v_load(dst + x + 4) + v_cvt_f32(v_reinterpret_as_s32(v_src01)));
- v_store(dst + x + 8, v_load(dst + x + 8) + v_cvt_f32(v_reinterpret_as_s32(v_src10)));
- v_store(dst + x + 12, v_load(dst + x + 12) + v_cvt_f32(v_reinterpret_as_s32(v_src11)));
+ v_store(dst + x, vx_load(dst + x) + v_cvt_f32(v_reinterpret_as_s32(v_src00)));
+ v_store(dst + x + step, vx_load(dst + x + step) + v_cvt_f32(v_reinterpret_as_s32(v_src01)));
+ v_store(dst + x + step * 2, vx_load(dst + x + step * 2) + v_cvt_f32(v_reinterpret_as_s32(v_src10)));
+ v_store(dst + x + step * 3, vx_load(dst + x + step * 3) + v_cvt_f32(v_reinterpret_as_s32(v_src11)));
}
}
else if (cn == 3)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint8x16 v_mask = v_load(mask + x);
+ v_uint8 v_mask = vx_load(mask + x);
v_mask = ~(v_mask == v_0);
- v_uint8x16 v_1src0, v_1src1, v_1src2, v_2src0, v_2src1, v_2src2;
+ v_uint8 v_1src0, v_1src1, v_1src2, v_2src0, v_2src1, v_2src2;
v_load_deinterleave(src1 + x * cn, v_1src0, v_1src1, v_1src2);
v_load_deinterleave(src2 + x * cn, v_2src0, v_2src1, v_2src2);
v_1src0 = v_1src0 & v_mask;
v_2src1 = v_2src1 & v_mask;
v_2src2 = v_2src2 & v_mask;
- v_uint16x8 v_1src00, v_1src01, v_1src10, v_1src11, v_1src20, v_1src21, v_2src00, v_2src01, v_2src10, v_2src11, v_2src20, v_2src21;
- v_expand(v_1src0, v_1src00, v_1src01);
- v_expand(v_1src1, v_1src10, v_1src11);
- v_expand(v_1src2, v_1src20, v_1src21);
- v_expand(v_2src0, v_2src00, v_2src01);
- v_expand(v_2src1, v_2src10, v_2src11);
- v_expand(v_2src2, v_2src20, v_2src21);
-
- v_uint16x8 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
- v_src00 = v_mul_wrap(v_1src00, v_2src00);
- v_src01 = v_mul_wrap(v_1src01, v_2src01);
- v_src10 = v_mul_wrap(v_1src10, v_2src10);
- v_src11 = v_mul_wrap(v_1src11, v_2src11);
- v_src20 = v_mul_wrap(v_1src20, v_2src20);
- v_src21 = v_mul_wrap(v_1src21, v_2src21);
+ v_uint16 v_src00, v_src01, v_src10, v_src11, v_src20, v_src21;
+ v_mul_expand(v_1src0, v_2src0, v_src00, v_src01);
+ v_mul_expand(v_1src1, v_2src1, v_src10, v_src11);
+ v_mul_expand(v_1src2, v_2src2, v_src20, v_src21);
- v_uint32x4 v_src000, v_src001, v_src002, v_src003, v_src100, v_src101, v_src102, v_src103, v_src200, v_src201, v_src202, v_src203;
+ v_uint32 v_src000, v_src001, v_src002, v_src003, v_src100, v_src101, v_src102, v_src103, v_src200, v_src201, v_src202, v_src203;
v_expand(v_src00, v_src000, v_src001);
v_expand(v_src01, v_src002, v_src003);
v_expand(v_src10, v_src100, v_src101);
v_expand(v_src20, v_src200, v_src201);
v_expand(v_src21, v_src202, v_src203);
- v_float32x4 v_dst000, v_dst001, v_dst002, v_dst003, v_dst100, v_dst101, v_dst102, v_dst103, v_dst200, v_dst201, v_dst202, v_dst203;
+ v_float32 v_dst000, v_dst001, v_dst002, v_dst003, v_dst100, v_dst101, v_dst102, v_dst103, v_dst200, v_dst201, v_dst202, v_dst203;
v_load_deinterleave(dst + x * cn, v_dst000, v_dst100, v_dst200);
- v_load_deinterleave(dst + (x + 4) * cn, v_dst001, v_dst101, v_dst201);
- v_load_deinterleave(dst + (x + 8) * cn, v_dst002, v_dst102, v_dst202);
- v_load_deinterleave(dst + (x + 12) * cn, v_dst003, v_dst103, v_dst203);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst001, v_dst101, v_dst201);
+ v_load_deinterleave(dst + (x + step * 2) * cn, v_dst002, v_dst102, v_dst202);
+ v_load_deinterleave(dst + (x + step * 3) * cn, v_dst003, v_dst103, v_dst203);
v_dst000 = v_dst000 + v_cvt_f32(v_reinterpret_as_s32(v_src000));
v_dst001 = v_dst001 + v_cvt_f32(v_reinterpret_as_s32(v_src001));
v_dst002 = v_dst002 + v_cvt_f32(v_reinterpret_as_s32(v_src002));
v_dst203 = v_dst203 + v_cvt_f32(v_reinterpret_as_s32(v_src203));
v_store_interleave(dst + x * cn, v_dst000, v_dst100, v_dst200);
- v_store_interleave(dst + (x + 4) * cn, v_dst001, v_dst101, v_dst201);
- v_store_interleave(dst + (x + 8) * cn, v_dst002, v_dst102, v_dst202);
- v_store_interleave(dst + (x + 12) * cn, v_dst003, v_dst103, v_dst203);
+ v_store_interleave(dst + (x + step) * cn, v_dst001, v_dst101, v_dst201);
+ v_store_interleave(dst + (x + step * 2) * cn, v_dst002, v_dst102, v_dst202);
+ v_store_interleave(dst + (x + step * 3) * cn, v_dst003, v_dst103, v_dst203);
}
}
}
-
+#endif // CV_SIMD
accProd_general_(src1, src2, dst, mask, len, cn, x);
}
void accProd_simd_(const ushort* src1, const ushort* src2, float* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 8;
+#if CV_SIMD
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float32::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_1src = v_load(src1 + x);
- v_uint16x8 v_2src = v_load(src2 + x);
+ v_uint16 v_1src = vx_load(src1 + x);
+ v_uint16 v_2src = vx_load(src2 + x);
- v_uint32x4 v_1src0, v_1src1, v_2src0, v_2src1;
+ v_uint32 v_1src0, v_1src1, v_2src0, v_2src1;
v_expand(v_1src, v_1src0, v_1src1);
v_expand(v_2src, v_2src0, v_2src1);
- v_float32x4 v_1float0 = v_cvt_f32(v_reinterpret_as_s32(v_1src0));
- v_float32x4 v_1float1 = v_cvt_f32(v_reinterpret_as_s32(v_1src1));
- v_float32x4 v_2float0 = v_cvt_f32(v_reinterpret_as_s32(v_2src0));
- v_float32x4 v_2float1 = v_cvt_f32(v_reinterpret_as_s32(v_2src1));
-
- v_float32x4 v_src0 = v_1float0 * v_2float0;
- v_float32x4 v_src1 = v_1float1 * v_2float1;
+ v_float32 v_1float0 = v_cvt_f32(v_reinterpret_as_s32(v_1src0));
+ v_float32 v_1float1 = v_cvt_f32(v_reinterpret_as_s32(v_1src1));
+ v_float32 v_2float0 = v_cvt_f32(v_reinterpret_as_s32(v_2src0));
+ v_float32 v_2float1 = v_cvt_f32(v_reinterpret_as_s32(v_2src1));
- v_store(dst + x, v_load(dst + x) + v_src0);
- v_store(dst + x + 4, v_load(dst + x + 4) + v_src1);
+ v_store(dst + x, v_fma(v_1float0, v_2float0, vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(v_1float1, v_2float1, vx_load(dst + x + step)));
}
}
else
{
- v_uint16x8 v_0 = v_setzero_u16();
+ v_uint16 v_0 = vx_setzero_u16();
if (cn == 1)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask = v_load_expand(mask + x);
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_0 == v_mask);
- v_uint16x8 v_1src = v_load(src1 + x) & v_mask;
- v_uint16x8 v_2src = v_load(src2 + x) & v_mask;
+ v_uint16 v_1src = vx_load(src1 + x) & v_mask;
+ v_uint16 v_2src = vx_load(src2 + x) & v_mask;
- v_uint32x4 v_1src0, v_1src1, v_2src0, v_2src1;
+ v_uint32 v_1src0, v_1src1, v_2src0, v_2src1;
v_expand(v_1src, v_1src0, v_1src1);
v_expand(v_2src, v_2src0, v_2src1);
- v_float32x4 v_1float0 = v_cvt_f32(v_reinterpret_as_s32(v_1src0));
- v_float32x4 v_1float1 = v_cvt_f32(v_reinterpret_as_s32(v_1src1));
- v_float32x4 v_2float0 = v_cvt_f32(v_reinterpret_as_s32(v_2src0));
- v_float32x4 v_2float1 = v_cvt_f32(v_reinterpret_as_s32(v_2src1));
+ v_float32 v_1float0 = v_cvt_f32(v_reinterpret_as_s32(v_1src0));
+ v_float32 v_1float1 = v_cvt_f32(v_reinterpret_as_s32(v_1src1));
+ v_float32 v_2float0 = v_cvt_f32(v_reinterpret_as_s32(v_2src0));
+ v_float32 v_2float1 = v_cvt_f32(v_reinterpret_as_s32(v_2src1));
- v_float32x4 v_src0 = v_1float0 * v_2float0;
- v_float32x4 v_src1 = v_1float1 * v_2float1;
-
- v_store(dst + x, v_load(dst + x) + v_src0);
- v_store(dst + x + 4, v_load(dst + x + 4) + v_src1);
+ v_store(dst + x, v_fma(v_1float0, v_2float0, vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(v_1float1, v_2float1, vx_load(dst + x + step)));
}
}
else if (cn == 3)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask = v_load_expand(mask + x);
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_0 == v_mask);
- v_uint16x8 v_1src0, v_1src1, v_1src2, v_2src0, v_2src1, v_2src2;
+ v_uint16 v_1src0, v_1src1, v_1src2, v_2src0, v_2src1, v_2src2;
v_load_deinterleave(src1 + x * cn, v_1src0, v_1src1, v_1src2);
v_load_deinterleave(src2 + x * cn, v_2src0, v_2src1, v_2src2);
v_1src0 = v_1src0 & v_mask;
v_2src1 = v_2src1 & v_mask;
v_2src2 = v_2src2 & v_mask;
- v_uint32x4 v_1src00, v_1src01, v_1src10, v_1src11, v_1src20, v_1src21, v_2src00, v_2src01, v_2src10, v_2src11, v_2src20, v_2src21;
+ v_uint32 v_1src00, v_1src01, v_1src10, v_1src11, v_1src20, v_1src21, v_2src00, v_2src01, v_2src10, v_2src11, v_2src20, v_2src21;
v_expand(v_1src0, v_1src00, v_1src01);
v_expand(v_1src1, v_1src10, v_1src11);
v_expand(v_1src2, v_1src20, v_1src21);
v_expand(v_2src1, v_2src10, v_2src11);
v_expand(v_2src2, v_2src20, v_2src21);
- v_float32x4 v_1float00 = v_cvt_f32(v_reinterpret_as_s32(v_1src00));
- v_float32x4 v_1float01 = v_cvt_f32(v_reinterpret_as_s32(v_1src01));
- v_float32x4 v_1float10 = v_cvt_f32(v_reinterpret_as_s32(v_1src10));
- v_float32x4 v_1float11 = v_cvt_f32(v_reinterpret_as_s32(v_1src11));
- v_float32x4 v_1float20 = v_cvt_f32(v_reinterpret_as_s32(v_1src20));
- v_float32x4 v_1float21 = v_cvt_f32(v_reinterpret_as_s32(v_1src21));
- v_float32x4 v_2float00 = v_cvt_f32(v_reinterpret_as_s32(v_2src00));
- v_float32x4 v_2float01 = v_cvt_f32(v_reinterpret_as_s32(v_2src01));
- v_float32x4 v_2float10 = v_cvt_f32(v_reinterpret_as_s32(v_2src10));
- v_float32x4 v_2float11 = v_cvt_f32(v_reinterpret_as_s32(v_2src11));
- v_float32x4 v_2float20 = v_cvt_f32(v_reinterpret_as_s32(v_2src20));
- v_float32x4 v_2float21 = v_cvt_f32(v_reinterpret_as_s32(v_2src21));
-
- v_float32x4 v_src00 = v_1float00 * v_2float00;
- v_float32x4 v_src01 = v_1float01 * v_2float01;
- v_float32x4 v_src10 = v_1float10 * v_2float10;
- v_float32x4 v_src11 = v_1float11 * v_2float11;
- v_float32x4 v_src20 = v_1float20 * v_2float20;
- v_float32x4 v_src21 = v_1float21 * v_2float21;
-
- v_float32x4 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
+ v_float32 v_1float00 = v_cvt_f32(v_reinterpret_as_s32(v_1src00));
+ v_float32 v_1float01 = v_cvt_f32(v_reinterpret_as_s32(v_1src01));
+ v_float32 v_1float10 = v_cvt_f32(v_reinterpret_as_s32(v_1src10));
+ v_float32 v_1float11 = v_cvt_f32(v_reinterpret_as_s32(v_1src11));
+ v_float32 v_1float20 = v_cvt_f32(v_reinterpret_as_s32(v_1src20));
+ v_float32 v_1float21 = v_cvt_f32(v_reinterpret_as_s32(v_1src21));
+ v_float32 v_2float00 = v_cvt_f32(v_reinterpret_as_s32(v_2src00));
+ v_float32 v_2float01 = v_cvt_f32(v_reinterpret_as_s32(v_2src01));
+ v_float32 v_2float10 = v_cvt_f32(v_reinterpret_as_s32(v_2src10));
+ v_float32 v_2float11 = v_cvt_f32(v_reinterpret_as_s32(v_2src11));
+ v_float32 v_2float20 = v_cvt_f32(v_reinterpret_as_s32(v_2src20));
+ v_float32 v_2float21 = v_cvt_f32(v_reinterpret_as_s32(v_2src21));
+
+ v_float32 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 4) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
- v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 4) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
+ v_dst00 = v_fma(v_1float00, v_2float00, v_dst00);
+ v_dst01 = v_fma(v_1float01, v_2float01, v_dst01);
+ v_dst10 = v_fma(v_1float10, v_2float10, v_dst10);
+ v_dst11 = v_fma(v_1float11, v_2float11, v_dst11);
+ v_dst20 = v_fma(v_1float20, v_2float20, v_dst20);
+ v_dst21 = v_fma(v_1float21, v_2float21, v_dst21);
+
+ v_store_interleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
+ v_store_interleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
}
}
}
-
+#endif // CV_SIMD
accProd_general_(src1, src2, dst, mask, len, cn, x);
}
void accProd_simd_(const float* src1, const float* src2, float* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 8;
+#if CV_SIMD
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float32::nlanes;
if (!mask)
{
int size = len * cn;
+ #if CV_AVX && !CV_AVX2
+ for (; x <= size - 8 ; x += 8)
+ {
+ __m256 v_src0 = _mm256_loadu_ps(src1 + x);
+ __m256 v_src1 = _mm256_loadu_ps(src2 + x);
+ __m256 v_dst = _mm256_loadu_ps(dst + x);
+ __m256 v_src = _mm256_mul_ps(v_src0, v_src1);
+ v_dst = _mm256_add_ps(v_src, v_dst);
+ _mm256_storeu_ps(dst + x, v_dst);
+ }
+ #else
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_store(dst + x, v_load(dst + x) + v_load(src1 + x) * v_load(src2 + x));
- v_store(dst + x + 4, v_load(dst + x + 4) + v_load(src1 + x + 4) * v_load(src2 + x + 4));
+ v_store(dst + x, v_fma(vx_load(src1 + x), vx_load(src2 + x), vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(vx_load(src1 + x + step), vx_load(src2 + x + step), vx_load(dst + x + step)));
}
+ #endif // CV_AVX && !CV_AVX2
}
else
{
- v_uint32x4 v_0 = v_setzero_u32();
+ v_uint32 v_0 = vx_setzero_u32();
if (cn == 1)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint32x4 v_mask32_0 = v_load_expand_q(mask + x);
- v_uint32x4 v_mask32_1 = v_load_expand_q(mask + x + 4);
- v_float32x4 v_mask0 = v_reinterpret_as_f32(~(v_mask32_0 == v_0));
- v_float32x4 v_mask1 = v_reinterpret_as_f32(~(v_mask32_1 == v_0));
+ v_uint32 v_mask32_0 = vx_load_expand_q(mask + x);
+ v_uint32 v_mask32_1 = vx_load_expand_q(mask + x + step);
+ v_float32 v_mask0 = v_reinterpret_as_f32(~(v_mask32_0 == v_0));
+ v_float32 v_mask1 = v_reinterpret_as_f32(~(v_mask32_1 == v_0));
- v_store(dst + x, v_load(dst + x) + ((v_load(src1 + x) * v_load(src2 + x)) & v_mask0));
- v_store(dst + x + 4, v_load(dst + x + 4) + ((v_load(src1 + x + 4) * v_load(src2 + x + 4)) & v_mask1));
+ v_store(dst + x, vx_load(dst + x) + ((vx_load(src1 + x) * vx_load(src2 + x)) & v_mask0));
+ v_store(dst + x + step, vx_load(dst + x + step) + ((vx_load(src1 + x + step) * vx_load(src2 + x + step)) & v_mask1));
}
}
else if (cn == 3)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint32x4 v_mask32_0 = v_load_expand_q(mask + x);
- v_uint32x4 v_mask32_1 = v_load_expand_q(mask + x + 4);
- v_float32x4 v_mask0 = v_reinterpret_as_f32(~(v_mask32_0 == v_0));
- v_float32x4 v_mask1 = v_reinterpret_as_f32(~(v_mask32_1 == v_0));
+ v_uint32 v_mask32_0 = vx_load_expand_q(mask + x);
+ v_uint32 v_mask32_1 = vx_load_expand_q(mask + x + step);
+ v_float32 v_mask0 = v_reinterpret_as_f32(~(v_mask32_0 == v_0));
+ v_float32 v_mask1 = v_reinterpret_as_f32(~(v_mask32_1 == v_0));
- v_float32x4 v_1src00, v_1src01, v_1src10, v_1src11, v_1src20, v_1src21;
- v_float32x4 v_2src00, v_2src01, v_2src10, v_2src11, v_2src20, v_2src21;
+ v_float32 v_1src00, v_1src01, v_1src10, v_1src11, v_1src20, v_1src21;
+ v_float32 v_2src00, v_2src01, v_2src10, v_2src11, v_2src20, v_2src21;
v_load_deinterleave(src1 + x * cn, v_1src00, v_1src10, v_1src20);
v_load_deinterleave(src2 + x * cn, v_2src00, v_2src10, v_2src20);
- v_load_deinterleave(src1 + (x + 4) * cn, v_1src01, v_1src11, v_1src21);
- v_load_deinterleave(src2 + (x + 4) * cn, v_2src01, v_2src11, v_2src21);
+ v_load_deinterleave(src1 + (x + step) * cn, v_1src01, v_1src11, v_1src21);
+ v_load_deinterleave(src2 + (x + step) * cn, v_2src01, v_2src11, v_2src21);
- v_float32x4 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
+ v_float32 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 4) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
v_store_interleave(dst + x * cn, v_dst00 + ((v_1src00 * v_2src00) & v_mask0), v_dst10 + ((v_1src10 * v_2src10) & v_mask0), v_dst20 + ((v_1src20 * v_2src20) & v_mask0));
- v_store_interleave(dst + (x + 4) * cn, v_dst01 + ((v_1src01 * v_2src01) & v_mask1), v_dst11 + ((v_1src11 * v_2src11) & v_mask1), v_dst21 + ((v_1src21 * v_2src21) & v_mask1));
+ v_store_interleave(dst + (x + step) * cn, v_dst01 + ((v_1src01 * v_2src01) & v_mask1), v_dst11 + ((v_1src11 * v_2src11) & v_mask1), v_dst21 + ((v_1src21 * v_2src21) & v_mask1));
}
}
}
-
+#endif // CV_SIMD
accProd_general_(src1, src2, dst, mask, len, cn, x);
}
-#if CV_SIMD128_64F
+
void accProd_simd_(const uchar* src1, const uchar* src2, double* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 8;
+#if CV_SIMD_64F
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_1int = v_load_expand(src1 + x);
- v_uint16x8 v_2int = v_load_expand(src2 + x);
+ v_uint16 v_1int = vx_load_expand(src1 + x);
+ v_uint16 v_2int = vx_load_expand(src2 + x);
- v_uint32x4 v_1int_0, v_1int_1, v_2int_0, v_2int_1;
+ v_uint32 v_1int_0, v_1int_1, v_2int_0, v_2int_1;
v_expand(v_1int, v_1int_0, v_1int_1);
v_expand(v_2int, v_2int_0, v_2int_1);
- v_int32x4 v_1int0 = v_reinterpret_as_s32(v_1int_0);
- v_int32x4 v_1int1 = v_reinterpret_as_s32(v_1int_1);
- v_int32x4 v_2int0 = v_reinterpret_as_s32(v_2int_0);
- v_int32x4 v_2int1 = v_reinterpret_as_s32(v_2int_1);
+ v_int32 v_1int0 = v_reinterpret_as_s32(v_1int_0);
+ v_int32 v_1int1 = v_reinterpret_as_s32(v_1int_1);
+ v_int32 v_2int0 = v_reinterpret_as_s32(v_2int_0);
+ v_int32 v_2int1 = v_reinterpret_as_s32(v_2int_1);
- v_float64x2 v_src0 = v_cvt_f64(v_1int0) * v_cvt_f64(v_2int0);
- v_float64x2 v_src1 = v_cvt_f64_high(v_1int0) * v_cvt_f64_high(v_2int0);
- v_float64x2 v_src2 = v_cvt_f64(v_1int1) * v_cvt_f64(v_2int1);
- v_float64x2 v_src3 = v_cvt_f64_high(v_1int1) * v_cvt_f64_high(v_2int1);
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+ v_float64 v_dst2 = vx_load(dst + x + step * 2);
+ v_float64 v_dst3 = vx_load(dst + x + step * 3);
- v_float64x2 v_dst0 = v_load(dst + x);
- v_float64x2 v_dst1 = v_load(dst + x + 2);
- v_float64x2 v_dst2 = v_load(dst + x + 4);
- v_float64x2 v_dst3 = v_load(dst + x + 6);
-
- v_dst0 += v_src0;
- v_dst1 += v_src1;
- v_dst2 += v_src2;
- v_dst3 += v_src3;
+ v_dst0 = v_fma(v_cvt_f64(v_1int0), v_cvt_f64(v_2int0), v_dst0);
+ v_dst1 = v_fma(v_cvt_f64_high(v_1int0), v_cvt_f64_high(v_2int0), v_dst1);
+ v_dst2 = v_fma(v_cvt_f64(v_1int1), v_cvt_f64(v_2int1), v_dst2);
+ v_dst3 = v_fma(v_cvt_f64_high(v_1int1), v_cvt_f64_high(v_2int1), v_dst3);
v_store(dst + x, v_dst0);
- v_store(dst + x + 2, v_dst1);
- v_store(dst + x + 4, v_dst2);
- v_store(dst + x + 6, v_dst3);
+ v_store(dst + x + step, v_dst1);
+ v_store(dst + x + step * 2, v_dst2);
+ v_store(dst + x + step * 3, v_dst3);
}
}
else
{
- v_uint16x8 v_0 = v_setzero_u16();
+ v_uint16 v_0 = vx_setzero_u16();
if (cn == 1)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask = v_load_expand(mask + x);
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_mask == v_0);
- v_uint16x8 v_1int = v_load_expand(src1 + x) & v_mask;
- v_uint16x8 v_2int = v_load_expand(src2 + x) & v_mask;
+ v_uint16 v_1int = vx_load_expand(src1 + x) & v_mask;
+ v_uint16 v_2int = vx_load_expand(src2 + x) & v_mask;
- v_uint32x4 v_1int_0, v_1int_1, v_2int_0, v_2int_1;
+ v_uint32 v_1int_0, v_1int_1, v_2int_0, v_2int_1;
v_expand(v_1int, v_1int_0, v_1int_1);
v_expand(v_2int, v_2int_0, v_2int_1);
- v_int32x4 v_1int0 = v_reinterpret_as_s32(v_1int_0);
- v_int32x4 v_1int1 = v_reinterpret_as_s32(v_1int_1);
- v_int32x4 v_2int0 = v_reinterpret_as_s32(v_2int_0);
- v_int32x4 v_2int1 = v_reinterpret_as_s32(v_2int_1);
-
- v_float64x2 v_src0 = v_cvt_f64(v_1int0) * v_cvt_f64(v_2int0);
- v_float64x2 v_src1 = v_cvt_f64_high(v_1int0) * v_cvt_f64_high(v_2int0);
- v_float64x2 v_src2 = v_cvt_f64(v_1int1) * v_cvt_f64(v_2int1);
- v_float64x2 v_src3 = v_cvt_f64_high(v_1int1) * v_cvt_f64_high(v_2int1);
+ v_int32 v_1int0 = v_reinterpret_as_s32(v_1int_0);
+ v_int32 v_1int1 = v_reinterpret_as_s32(v_1int_1);
+ v_int32 v_2int0 = v_reinterpret_as_s32(v_2int_0);
+ v_int32 v_2int1 = v_reinterpret_as_s32(v_2int_1);
- v_float64x2 v_dst0 = v_load(dst + x);
- v_float64x2 v_dst1 = v_load(dst + x + 2);
- v_float64x2 v_dst2 = v_load(dst + x + 4);
- v_float64x2 v_dst3 = v_load(dst + x + 6);
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+ v_float64 v_dst2 = vx_load(dst + x + step * 2);
+ v_float64 v_dst3 = vx_load(dst + x + step * 3);
- v_dst0 += v_src0;
- v_dst1 += v_src1;
- v_dst2 += v_src2;
- v_dst3 += v_src3;
+ v_dst0 = v_fma(v_cvt_f64(v_1int0), v_cvt_f64(v_2int0), v_dst0);
+ v_dst1 = v_fma(v_cvt_f64_high(v_1int0), v_cvt_f64_high(v_2int0), v_dst1);
+ v_dst2 = v_fma(v_cvt_f64(v_1int1), v_cvt_f64(v_2int1), v_dst2);
+ v_dst3 = v_fma(v_cvt_f64_high(v_1int1), v_cvt_f64_high(v_2int1), v_dst3);
v_store(dst + x, v_dst0);
- v_store(dst + x + 2, v_dst1);
- v_store(dst + x + 4, v_dst2);
- v_store(dst + x + 6, v_dst3);
+ v_store(dst + x + step, v_dst1);
+ v_store(dst + x + step * 2, v_dst2);
+ v_store(dst + x + step * 3, v_dst3);
}
}
else if (cn == 3)
{
- for (; x <= len - /*cVectorWidth*/16; x += cVectorWidth)
+ for (; x <= len - cVectorWidth * 2; x += cVectorWidth)
{
- v_uint8x16 v_1src0, v_1src1, v_1src2, v_2src0, v_2src1, v_2src2;
+ v_uint8 v_1src0, v_1src1, v_1src2, v_2src0, v_2src1, v_2src2;
v_load_deinterleave(src1 + x * cn, v_1src0, v_1src1, v_1src2);
v_load_deinterleave(src2 + x * cn, v_2src0, v_2src1, v_2src2);
- v_uint16x8 v_1int0, v_1int1, v_1int2, v_2int0, v_2int1, v_2int2, dummy;
- v_expand(v_1src0, v_1int0, dummy);
- v_expand(v_1src1, v_1int1, dummy);
- v_expand(v_1src2, v_1int2, dummy);
- v_expand(v_2src0, v_2int0, dummy);
- v_expand(v_2src1, v_2int1, dummy);
- v_expand(v_2src2, v_2int2, dummy);
+ v_uint16 v_1int0 = v_expand_low(v_1src0);
+ v_uint16 v_1int1 = v_expand_low(v_1src1);
+ v_uint16 v_1int2 = v_expand_low(v_1src2);
+ v_uint16 v_2int0 = v_expand_low(v_2src0);
+ v_uint16 v_2int1 = v_expand_low(v_2src1);
+ v_uint16 v_2int2 = v_expand_low(v_2src2);
- v_uint16x8 v_mask = v_load_expand(mask + x);
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_mask == v_0);
v_1int0 = v_1int0 & v_mask;
v_1int1 = v_1int1 & v_mask;
v_2int1 = v_2int1 & v_mask;
v_2int2 = v_2int2 & v_mask;
- v_uint32x4 v_1int00, v_1int01, v_1int10, v_1int11, v_1int20, v_1int21;
- v_uint32x4 v_2int00, v_2int01, v_2int10, v_2int11, v_2int20, v_2int21;
+ v_uint32 v_1int00, v_1int01, v_1int10, v_1int11, v_1int20, v_1int21;
+ v_uint32 v_2int00, v_2int01, v_2int10, v_2int11, v_2int20, v_2int21;
v_expand(v_1int0, v_1int00, v_1int01);
v_expand(v_1int1, v_1int10, v_1int11);
v_expand(v_1int2, v_1int20, v_1int21);
v_expand(v_2int1, v_2int10, v_2int11);
v_expand(v_2int2, v_2int20, v_2int21);
- v_float64x2 v_src00 = v_cvt_f64(v_reinterpret_as_s32(v_1int00)) * v_cvt_f64(v_reinterpret_as_s32(v_2int00));
- v_float64x2 v_src01 = v_cvt_f64_high(v_reinterpret_as_s32(v_1int00)) * v_cvt_f64_high(v_reinterpret_as_s32(v_2int00));
- v_float64x2 v_src02 = v_cvt_f64(v_reinterpret_as_s32(v_1int01)) * v_cvt_f64(v_reinterpret_as_s32(v_2int01));
- v_float64x2 v_src03 = v_cvt_f64_high(v_reinterpret_as_s32(v_1int01)) * v_cvt_f64_high(v_reinterpret_as_s32(v_2int01));
- v_float64x2 v_src10 = v_cvt_f64(v_reinterpret_as_s32(v_1int10)) * v_cvt_f64(v_reinterpret_as_s32(v_2int10));
- v_float64x2 v_src11 = v_cvt_f64_high(v_reinterpret_as_s32(v_1int10)) * v_cvt_f64_high(v_reinterpret_as_s32(v_2int10));
- v_float64x2 v_src12 = v_cvt_f64(v_reinterpret_as_s32(v_1int11)) * v_cvt_f64(v_reinterpret_as_s32(v_2int11));
- v_float64x2 v_src13 = v_cvt_f64_high(v_reinterpret_as_s32(v_1int11)) * v_cvt_f64_high(v_reinterpret_as_s32(v_2int11));
- v_float64x2 v_src20 = v_cvt_f64(v_reinterpret_as_s32(v_1int20)) * v_cvt_f64(v_reinterpret_as_s32(v_2int20));
- v_float64x2 v_src21 = v_cvt_f64_high(v_reinterpret_as_s32(v_1int20)) * v_cvt_f64_high(v_reinterpret_as_s32(v_2int20));
- v_float64x2 v_src22 = v_cvt_f64(v_reinterpret_as_s32(v_1int21)) * v_cvt_f64(v_reinterpret_as_s32(v_2int21));
- v_float64x2 v_src23 = v_cvt_f64_high(v_reinterpret_as_s32(v_1int21)) * v_cvt_f64_high(v_reinterpret_as_s32(v_2int21));
-
- v_float64x2 v_dst00, v_dst01, v_dst02, v_dst03, v_dst10, v_dst11, v_dst12, v_dst13, v_dst20, v_dst21, v_dst22, v_dst23;
+ v_float64 v_dst00, v_dst01, v_dst02, v_dst03, v_dst10, v_dst11, v_dst12, v_dst13, v_dst20, v_dst21, v_dst22, v_dst23;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 2) * cn, v_dst01, v_dst11, v_dst21);
- v_load_deinterleave(dst + (x + 4) * cn, v_dst02, v_dst12, v_dst22);
- v_load_deinterleave(dst + (x + 6) * cn, v_dst03, v_dst13, v_dst23);
-
- v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 2) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
- v_store_interleave(dst + (x + 4) * cn, v_dst02 + v_src02, v_dst12 + v_src12, v_dst22 + v_src22);
- v_store_interleave(dst + (x + 6) * cn, v_dst03 + v_src03, v_dst13 + v_src13, v_dst23 + v_src23);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step * 2) * cn, v_dst02, v_dst12, v_dst22);
+ v_load_deinterleave(dst + (x + step * 3) * cn, v_dst03, v_dst13, v_dst23);
+
+ v_dst00 = v_fma(v_cvt_f64(v_reinterpret_as_s32(v_1int00)), v_cvt_f64(v_reinterpret_as_s32(v_2int00)), v_dst00);
+ v_dst01 = v_fma(v_cvt_f64_high(v_reinterpret_as_s32(v_1int00)), v_cvt_f64_high(v_reinterpret_as_s32(v_2int00)), v_dst01);
+ v_dst02 = v_fma(v_cvt_f64(v_reinterpret_as_s32(v_1int01)), v_cvt_f64(v_reinterpret_as_s32(v_2int01)), v_dst02);
+ v_dst03 = v_fma(v_cvt_f64_high(v_reinterpret_as_s32(v_1int01)), v_cvt_f64_high(v_reinterpret_as_s32(v_2int01)), v_dst03);
+ v_dst10 = v_fma(v_cvt_f64(v_reinterpret_as_s32(v_1int10)), v_cvt_f64(v_reinterpret_as_s32(v_2int10)), v_dst10);
+ v_dst11 = v_fma(v_cvt_f64_high(v_reinterpret_as_s32(v_1int10)), v_cvt_f64_high(v_reinterpret_as_s32(v_2int10)), v_dst11);
+ v_dst12 = v_fma(v_cvt_f64(v_reinterpret_as_s32(v_1int11)), v_cvt_f64(v_reinterpret_as_s32(v_2int11)), v_dst12);
+ v_dst13 = v_fma(v_cvt_f64_high(v_reinterpret_as_s32(v_1int11)), v_cvt_f64_high(v_reinterpret_as_s32(v_2int11)), v_dst13);
+ v_dst20 = v_fma(v_cvt_f64(v_reinterpret_as_s32(v_1int20)), v_cvt_f64(v_reinterpret_as_s32(v_2int20)), v_dst20);
+ v_dst21 = v_fma(v_cvt_f64_high(v_reinterpret_as_s32(v_1int20)), v_cvt_f64_high(v_reinterpret_as_s32(v_2int20)), v_dst21);
+ v_dst22 = v_fma(v_cvt_f64(v_reinterpret_as_s32(v_1int21)), v_cvt_f64(v_reinterpret_as_s32(v_2int21)), v_dst22);
+ v_dst23 = v_fma(v_cvt_f64_high(v_reinterpret_as_s32(v_1int21)), v_cvt_f64_high(v_reinterpret_as_s32(v_2int21)), v_dst23);
+
+ v_store_interleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
+ v_store_interleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
+ v_store_interleave(dst + (x + step * 2) * cn, v_dst02, v_dst12, v_dst22);
+ v_store_interleave(dst + (x + step * 3) * cn, v_dst03, v_dst13, v_dst23);
}
}
}
-
+#endif // CV_SIMD_64F
accProd_general_(src1, src2, dst, mask, len, cn, x);
}
void accProd_simd_(const ushort* src1, const ushort* src2, double* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 8;
+#if CV_SIMD_64F
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_1src = v_load(src1 + x);
- v_uint16x8 v_2src = v_load(src2 + x);
+ v_uint16 v_1src = vx_load(src1 + x);
+ v_uint16 v_2src = vx_load(src2 + x);
- v_uint32x4 v_1int_0, v_1int_1, v_2int_0, v_2int_1;
+ v_uint32 v_1int_0, v_1int_1, v_2int_0, v_2int_1;
v_expand(v_1src, v_1int_0, v_1int_1);
v_expand(v_2src, v_2int_0, v_2int_1);
- v_int32x4 v_1int0 = v_reinterpret_as_s32(v_1int_0);
- v_int32x4 v_1int1 = v_reinterpret_as_s32(v_1int_1);
- v_int32x4 v_2int0 = v_reinterpret_as_s32(v_2int_0);
- v_int32x4 v_2int1 = v_reinterpret_as_s32(v_2int_1);
+ v_int32 v_1int0 = v_reinterpret_as_s32(v_1int_0);
+ v_int32 v_1int1 = v_reinterpret_as_s32(v_1int_1);
+ v_int32 v_2int0 = v_reinterpret_as_s32(v_2int_0);
+ v_int32 v_2int1 = v_reinterpret_as_s32(v_2int_1);
- v_float64x2 v_src0 = v_cvt_f64(v_1int0) * v_cvt_f64(v_2int0);
- v_float64x2 v_src1 = v_cvt_f64_high(v_1int0) * v_cvt_f64_high(v_2int0);
- v_float64x2 v_src2 = v_cvt_f64(v_1int1) * v_cvt_f64(v_2int1);
- v_float64x2 v_src3 = v_cvt_f64_high(v_1int1) * v_cvt_f64_high(v_2int1);
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+ v_float64 v_dst2 = vx_load(dst + x + step * 2);
+ v_float64 v_dst3 = vx_load(dst + x + step * 3);
- v_float64x2 v_dst0 = v_load(dst + x);
- v_float64x2 v_dst1 = v_load(dst + x + 2);
- v_float64x2 v_dst2 = v_load(dst + x + 4);
- v_float64x2 v_dst3 = v_load(dst + x + 6);
+ v_dst0 = v_fma(v_cvt_f64(v_1int0), v_cvt_f64(v_2int0), v_dst0);
+ v_dst1 = v_fma(v_cvt_f64_high(v_1int0), v_cvt_f64_high(v_2int0), v_dst1);
+ v_dst2 = v_fma(v_cvt_f64(v_1int1), v_cvt_f64(v_2int1), v_dst2);
+ v_dst3 = v_fma(v_cvt_f64_high(v_1int1), v_cvt_f64_high(v_2int1), v_dst3);
- v_dst0 = v_dst0 + v_src0;
- v_dst1 = v_dst1 + v_src1;
- v_dst2 = v_dst2 + v_src2;
- v_dst3 = v_dst3 + v_src3;
v_store(dst + x, v_dst0);
- v_store(dst + x + 2, v_dst1);
- v_store(dst + x + 4, v_dst2);
- v_store(dst + x + 6, v_dst3);
+ v_store(dst + x + step, v_dst1);
+ v_store(dst + x + step * 2, v_dst2);
+ v_store(dst + x + step * 3, v_dst3);
}
}
else
{
- v_uint16x8 v_0 = v_setzero_u16();
+ v_uint16 v_0 = vx_setzero_u16();
if (cn == 1)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask = v_load_expand(mask + x);
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_mask == v_0);
- v_uint16x8 v_1src = v_load(src1 + x);
- v_uint16x8 v_2src = v_load(src2 + x);
+ v_uint16 v_1src = vx_load(src1 + x);
+ v_uint16 v_2src = vx_load(src2 + x);
v_1src = v_1src & v_mask;
v_2src = v_2src & v_mask;
- v_uint32x4 v_1int_0, v_1int_1, v_2int_0, v_2int_1;
+ v_uint32 v_1int_0, v_1int_1, v_2int_0, v_2int_1;
v_expand(v_1src, v_1int_0, v_1int_1);
v_expand(v_2src, v_2int_0, v_2int_1);
- v_int32x4 v_1int0 = v_reinterpret_as_s32(v_1int_0);
- v_int32x4 v_1int1 = v_reinterpret_as_s32(v_1int_1);
- v_int32x4 v_2int0 = v_reinterpret_as_s32(v_2int_0);
- v_int32x4 v_2int1 = v_reinterpret_as_s32(v_2int_1);
+ v_int32 v_1int0 = v_reinterpret_as_s32(v_1int_0);
+ v_int32 v_1int1 = v_reinterpret_as_s32(v_1int_1);
+ v_int32 v_2int0 = v_reinterpret_as_s32(v_2int_0);
+ v_int32 v_2int1 = v_reinterpret_as_s32(v_2int_1);
- v_float64x2 v_src0 = v_cvt_f64(v_1int0) * v_cvt_f64(v_2int0);
- v_float64x2 v_src1 = v_cvt_f64_high(v_1int0) * v_cvt_f64_high(v_2int0);
- v_float64x2 v_src2 = v_cvt_f64(v_1int1) * v_cvt_f64(v_2int1);
- v_float64x2 v_src3 = v_cvt_f64_high(v_1int1) * v_cvt_f64_high(v_2int1);
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+ v_float64 v_dst2 = vx_load(dst + x + step * 2);
+ v_float64 v_dst3 = vx_load(dst + x + step * 3);
- v_float64x2 v_dst0 = v_load(dst + x);
- v_float64x2 v_dst1 = v_load(dst + x + 2);
- v_float64x2 v_dst2 = v_load(dst + x + 4);
- v_float64x2 v_dst3 = v_load(dst + x + 6);
+ v_dst0 = v_fma(v_cvt_f64(v_1int0), v_cvt_f64(v_2int0), v_dst0);
+ v_dst1 = v_fma(v_cvt_f64_high(v_1int0), v_cvt_f64_high(v_2int0), v_dst1);
+ v_dst2 = v_fma(v_cvt_f64(v_1int1), v_cvt_f64(v_2int1), v_dst2);
+ v_dst3 = v_fma(v_cvt_f64_high(v_1int1), v_cvt_f64_high(v_2int1), v_dst3);
- v_dst0 = v_dst0 + v_src0;
- v_dst1 = v_dst1 + v_src1;
- v_dst2 = v_dst2 + v_src2;
- v_dst3 = v_dst3 + v_src3;
v_store(dst + x, v_dst0);
- v_store(dst + x + 2, v_dst1);
- v_store(dst + x + 4, v_dst2);
- v_store(dst + x + 6, v_dst3);
+ v_store(dst + x + step, v_dst1);
+ v_store(dst + x + step * 2, v_dst2);
+ v_store(dst + x + step * 3, v_dst3);
}
}
else if (cn == 3)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_mask = v_load_expand(mask + x);
+ v_uint16 v_mask = vx_load_expand(mask + x);
v_mask = ~(v_mask == v_0);
- v_uint16x8 v_1src0, v_1src1, v_1src2, v_2src0, v_2src1, v_2src2;
+ v_uint16 v_1src0, v_1src1, v_1src2, v_2src0, v_2src1, v_2src2;
v_load_deinterleave(src1 + x * cn, v_1src0, v_1src1, v_1src2);
v_load_deinterleave(src2 + x * cn, v_2src0, v_2src1, v_2src2);
v_1src0 = v_1src0 & v_mask;
v_2src1 = v_2src1 & v_mask;
v_2src2 = v_2src2 & v_mask;
- v_uint32x4 v_1int_00, v_1int_01, v_2int_00, v_2int_01;
- v_uint32x4 v_1int_10, v_1int_11, v_2int_10, v_2int_11;
- v_uint32x4 v_1int_20, v_1int_21, v_2int_20, v_2int_21;
+ v_uint32 v_1int_00, v_1int_01, v_2int_00, v_2int_01;
+ v_uint32 v_1int_10, v_1int_11, v_2int_10, v_2int_11;
+ v_uint32 v_1int_20, v_1int_21, v_2int_20, v_2int_21;
v_expand(v_1src0, v_1int_00, v_1int_01);
v_expand(v_1src1, v_1int_10, v_1int_11);
v_expand(v_1src2, v_1int_20, v_1int_21);
v_expand(v_2src1, v_2int_10, v_2int_11);
v_expand(v_2src2, v_2int_20, v_2int_21);
- v_int32x4 v_1int00 = v_reinterpret_as_s32(v_1int_00);
- v_int32x4 v_1int01 = v_reinterpret_as_s32(v_1int_01);
- v_int32x4 v_1int10 = v_reinterpret_as_s32(v_1int_10);
- v_int32x4 v_1int11 = v_reinterpret_as_s32(v_1int_11);
- v_int32x4 v_1int20 = v_reinterpret_as_s32(v_1int_20);
- v_int32x4 v_1int21 = v_reinterpret_as_s32(v_1int_21);
- v_int32x4 v_2int00 = v_reinterpret_as_s32(v_2int_00);
- v_int32x4 v_2int01 = v_reinterpret_as_s32(v_2int_01);
- v_int32x4 v_2int10 = v_reinterpret_as_s32(v_2int_10);
- v_int32x4 v_2int11 = v_reinterpret_as_s32(v_2int_11);
- v_int32x4 v_2int20 = v_reinterpret_as_s32(v_2int_20);
- v_int32x4 v_2int21 = v_reinterpret_as_s32(v_2int_21);
-
- v_float64x2 v_src00 = v_cvt_f64(v_1int00) * v_cvt_f64(v_2int00);
- v_float64x2 v_src01 = v_cvt_f64_high(v_1int00) * v_cvt_f64_high(v_2int00);
- v_float64x2 v_src02 = v_cvt_f64(v_1int01) * v_cvt_f64(v_2int01);
- v_float64x2 v_src03 = v_cvt_f64_high(v_1int01) * v_cvt_f64_high(v_2int01);
- v_float64x2 v_src10 = v_cvt_f64(v_1int10) * v_cvt_f64(v_2int10);
- v_float64x2 v_src11 = v_cvt_f64_high(v_1int10) * v_cvt_f64_high(v_2int10);
- v_float64x2 v_src12 = v_cvt_f64(v_1int11) * v_cvt_f64(v_2int11);
- v_float64x2 v_src13 = v_cvt_f64_high(v_1int11) * v_cvt_f64_high(v_2int11);
- v_float64x2 v_src20 = v_cvt_f64(v_1int20) * v_cvt_f64(v_2int20);
- v_float64x2 v_src21 = v_cvt_f64_high(v_1int20) * v_cvt_f64_high(v_2int20);
- v_float64x2 v_src22 = v_cvt_f64(v_1int21) * v_cvt_f64(v_2int21);
- v_float64x2 v_src23 = v_cvt_f64_high(v_1int21) * v_cvt_f64_high(v_2int21);
-
- v_float64x2 v_dst00, v_dst01, v_dst02, v_dst03;
- v_float64x2 v_dst10, v_dst11, v_dst12, v_dst13;
- v_float64x2 v_dst20, v_dst21, v_dst22, v_dst23;
+ v_int32 v_1int00 = v_reinterpret_as_s32(v_1int_00);
+ v_int32 v_1int01 = v_reinterpret_as_s32(v_1int_01);
+ v_int32 v_1int10 = v_reinterpret_as_s32(v_1int_10);
+ v_int32 v_1int11 = v_reinterpret_as_s32(v_1int_11);
+ v_int32 v_1int20 = v_reinterpret_as_s32(v_1int_20);
+ v_int32 v_1int21 = v_reinterpret_as_s32(v_1int_21);
+ v_int32 v_2int00 = v_reinterpret_as_s32(v_2int_00);
+ v_int32 v_2int01 = v_reinterpret_as_s32(v_2int_01);
+ v_int32 v_2int10 = v_reinterpret_as_s32(v_2int_10);
+ v_int32 v_2int11 = v_reinterpret_as_s32(v_2int_11);
+ v_int32 v_2int20 = v_reinterpret_as_s32(v_2int_20);
+ v_int32 v_2int21 = v_reinterpret_as_s32(v_2int_21);
+
+ v_float64 v_dst00, v_dst01, v_dst02, v_dst03;
+ v_float64 v_dst10, v_dst11, v_dst12, v_dst13;
+ v_float64 v_dst20, v_dst21, v_dst22, v_dst23;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 2) * cn, v_dst01, v_dst11, v_dst21);
- v_load_deinterleave(dst + (x + 4) * cn, v_dst02, v_dst12, v_dst22);
- v_load_deinterleave(dst + (x + 6) * cn, v_dst03, v_dst13, v_dst23);
-
- v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 2) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
- v_store_interleave(dst + (x + 4) * cn, v_dst02 + v_src02, v_dst12 + v_src12, v_dst22 + v_src22);
- v_store_interleave(dst + (x + 6) * cn, v_dst03 + v_src03, v_dst13 + v_src13, v_dst23 + v_src23);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step * 2) * cn, v_dst02, v_dst12, v_dst22);
+ v_load_deinterleave(dst + (x + step * 3) * cn, v_dst03, v_dst13, v_dst23);
+
+ v_dst00 = v_fma(v_cvt_f64(v_1int00), v_cvt_f64(v_2int00), v_dst00);
+ v_dst01 = v_fma(v_cvt_f64_high(v_1int00), v_cvt_f64_high(v_2int00), v_dst01);
+ v_dst02 = v_fma(v_cvt_f64(v_1int01), v_cvt_f64(v_2int01), v_dst02);
+ v_dst03 = v_fma(v_cvt_f64_high(v_1int01), v_cvt_f64_high(v_2int01), v_dst03);
+ v_dst10 = v_fma(v_cvt_f64(v_1int10), v_cvt_f64(v_2int10), v_dst10);
+ v_dst11 = v_fma(v_cvt_f64_high(v_1int10), v_cvt_f64_high(v_2int10), v_dst11);
+ v_dst12 = v_fma(v_cvt_f64(v_1int11), v_cvt_f64(v_2int11), v_dst12);
+ v_dst13 = v_fma(v_cvt_f64_high(v_1int11), v_cvt_f64_high(v_2int11), v_dst13);
+ v_dst20 = v_fma(v_cvt_f64(v_1int20), v_cvt_f64(v_2int20), v_dst20);
+ v_dst21 = v_fma(v_cvt_f64_high(v_1int20), v_cvt_f64_high(v_2int20), v_dst21);
+ v_dst22 = v_fma(v_cvt_f64(v_1int21), v_cvt_f64(v_2int21), v_dst22);
+ v_dst23 = v_fma(v_cvt_f64_high(v_1int21), v_cvt_f64_high(v_2int21), v_dst23);
+
+ v_store_interleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
+ v_store_interleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
+ v_store_interleave(dst + (x + step * 2) * cn, v_dst02, v_dst12, v_dst22);
+ v_store_interleave(dst + (x + step * 3) * cn, v_dst03, v_dst13, v_dst23);
}
}
}
-
+#endif // CV_SIMD_64F
accProd_general_(src1, src2, dst, mask, len, cn, x);
}
void accProd_simd_(const float* src1, const float* src2, double* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 4;
+#if CV_SIMD_64F
+ const int cVectorWidth = v_float32::nlanes;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
+ #if CV_AVX && !CV_AVX2
+ for ( ; x <= size - 8 ; x += 8)
+ {
+ __m256 v_1src = _mm256_loadu_ps(src1 + x);
+ __m256 v_2src = _mm256_loadu_ps(src2 + x);
+ __m256d v_src00 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_1src,0));
+ __m256d v_src01 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_1src,1));
+ __m256d v_src10 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_2src,0));
+ __m256d v_src11 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_2src,1));
+ __m256d v_dst0 = _mm256_loadu_pd(dst + x);
+ __m256d v_dst1 = _mm256_loadu_pd(dst + x + 4);
+ __m256d v_src0 = _mm256_mul_pd(v_src00, v_src10);
+ __m256d v_src1 = _mm256_mul_pd(v_src01, v_src11);
+ v_dst0 = _mm256_add_pd(v_src0, v_dst0);
+ v_dst1 = _mm256_add_pd(v_src1, v_dst1);
+ _mm256_storeu_pd(dst + x, v_dst0);
+ _mm256_storeu_pd(dst + x + 4, v_dst1);
+ }
+ #else
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_float32x4 v_1src = v_load(src1 + x);
- v_float32x4 v_2src = v_load(src2 + x);
+ v_float32 v_1src = vx_load(src1 + x);
+ v_float32 v_2src = vx_load(src2 + x);
- v_float64x2 v_1src0 = v_cvt_f64(v_1src);
- v_float64x2 v_1src1 = v_cvt_f64_high(v_1src);
- v_float64x2 v_2src0 = v_cvt_f64(v_2src);
- v_float64x2 v_2src1 = v_cvt_f64_high(v_2src);
+ v_float64 v_1src0 = v_cvt_f64(v_1src);
+ v_float64 v_1src1 = v_cvt_f64_high(v_1src);
+ v_float64 v_2src0 = v_cvt_f64(v_2src);
+ v_float64 v_2src1 = v_cvt_f64_high(v_2src);
- v_store(dst + x, v_load(dst + x) + (v_1src0 * v_2src0));
- v_store(dst + x + 2, v_load(dst + x + 2) + (v_1src1 * v_2src1));
+ v_store(dst + x, v_fma(v_1src0, v_2src0, vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(v_1src1, v_2src1, vx_load(dst + x + step)));
}
+ #endif // CV_AVX && !CV_AVX2
}
else
{
- v_uint32x4 v_0 = v_setzero_u32();
+ v_uint32 v_0 = vx_setzero_u32();
if (cn == 1)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint32x4 v_mask = v_load_expand_q(mask + x);
+ v_uint32 v_mask = vx_load_expand_q(mask + x);
v_mask = ~(v_mask == v_0);
- v_float32x4 v_1src = v_load(src1 + x);
- v_float32x4 v_2src = v_load(src2 + x);
+ v_float32 v_1src = vx_load(src1 + x);
+ v_float32 v_2src = vx_load(src2 + x);
v_1src = v_1src & v_reinterpret_as_f32(v_mask);
v_2src = v_2src & v_reinterpret_as_f32(v_mask);
- v_float64x2 v_1src0 = v_cvt_f64(v_1src);
- v_float64x2 v_1src1 = v_cvt_f64_high(v_1src);
- v_float64x2 v_2src0 = v_cvt_f64(v_2src);
- v_float64x2 v_2src1 = v_cvt_f64_high(v_2src);
+ v_float64 v_1src0 = v_cvt_f64(v_1src);
+ v_float64 v_1src1 = v_cvt_f64_high(v_1src);
+ v_float64 v_2src0 = v_cvt_f64(v_2src);
+ v_float64 v_2src1 = v_cvt_f64_high(v_2src);
- v_store(dst + x, v_load(dst + x) + (v_1src0 * v_2src0));
- v_store(dst + x + 2, v_load(dst + x + 2) + (v_1src1 * v_2src1));
+ v_store(dst + x, v_fma(v_1src0, v_2src0, vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(v_1src1, v_2src1, vx_load(dst + x + step)));
}
}
else if (cn == 3)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint32x4 v_mask = v_load_expand_q(mask + x);
+ v_uint32 v_mask = vx_load_expand_q(mask + x);
v_mask = ~(v_mask == v_0);
- v_float32x4 v_1src0, v_1src1, v_1src2, v_2src0, v_2src1, v_2src2;
+ v_float32 v_1src0, v_1src1, v_1src2, v_2src0, v_2src1, v_2src2;
v_load_deinterleave(src1 + x * cn, v_1src0, v_1src1, v_1src2);
v_load_deinterleave(src2 + x * cn, v_2src0, v_2src1, v_2src2);
v_1src0 = v_1src0 & v_reinterpret_as_f32(v_mask);
v_2src1 = v_2src1 & v_reinterpret_as_f32(v_mask);
v_2src2 = v_2src2 & v_reinterpret_as_f32(v_mask);
- v_float64x2 v_src00 = v_cvt_f64(v_1src0) * v_cvt_f64(v_2src0);
- v_float64x2 v_src01 = v_cvt_f64_high(v_1src0) * v_cvt_f64_high(v_2src0);
- v_float64x2 v_src10 = v_cvt_f64(v_1src1) * v_cvt_f64(v_2src1);
- v_float64x2 v_src11 = v_cvt_f64_high(v_1src1) * v_cvt_f64_high(v_2src1);
- v_float64x2 v_src20 = v_cvt_f64(v_1src2) * v_cvt_f64(v_2src2);
- v_float64x2 v_src21 = v_cvt_f64_high(v_1src2) * v_cvt_f64_high(v_2src2);
-
- v_float64x2 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
+ v_float64 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 2) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
- v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 2) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
+ v_dst00 = v_fma(v_cvt_f64(v_1src0), v_cvt_f64(v_2src0), v_dst00);
+ v_dst01 = v_fma(v_cvt_f64_high(v_1src0), v_cvt_f64_high(v_2src0), v_dst01);
+ v_dst10 = v_fma(v_cvt_f64(v_1src1), v_cvt_f64(v_2src1), v_dst10);
+ v_dst11 = v_fma(v_cvt_f64_high(v_1src1), v_cvt_f64_high(v_2src1), v_dst11);
+ v_dst20 = v_fma(v_cvt_f64(v_1src2), v_cvt_f64(v_2src2), v_dst20);
+ v_dst21 = v_fma(v_cvt_f64_high(v_1src2), v_cvt_f64_high(v_2src2), v_dst21);
+
+ v_store_interleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
+ v_store_interleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
}
}
}
-
+#endif // CV_SIMD_64F
accProd_general_(src1, src2, dst, mask, len, cn, x);
}
void accProd_simd_(const double* src1, const double* src2, double* dst, const uchar* mask, int len, int cn)
{
int x = 0;
- const int cVectorWidth = 4;
+#if CV_SIMD_64F
+ const int cVectorWidth = v_float64::nlanes * 2;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
+ #if CV_AVX && !CV_AVX2
+ for ( ; x <= size - 4 ; x += 4)
+ {
+ __m256d v_src0 = _mm256_loadu_pd(src1 + x);
+ __m256d v_src1 = _mm256_loadu_pd(src2 + x);
+ __m256d v_dst = _mm256_loadu_pd(dst + x);
+ v_src0 = _mm256_mul_pd(v_src0, v_src1);
+ v_dst = _mm256_add_pd(v_dst, v_src0);
+ _mm256_storeu_pd(dst + x, v_dst);
+ }
+ #else
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_float64x2 v_src00 = v_load(src1 + x);
- v_float64x2 v_src01 = v_load(src1 + x + 2);
- v_float64x2 v_src10 = v_load(src2 + x);
- v_float64x2 v_src11 = v_load(src2 + x + 2);
+ v_float64 v_src00 = vx_load(src1 + x);
+ v_float64 v_src01 = vx_load(src1 + x + step);
+ v_float64 v_src10 = vx_load(src2 + x);
+ v_float64 v_src11 = vx_load(src2 + x + step);
- v_store(dst + x, v_load(dst + x) + (v_src00 * v_src10));
- v_store(dst + x + 2, v_load(dst + x + 2) + (v_src01 * v_src11));
+ v_store(dst + x, v_fma(v_src00, v_src10, vx_load(dst + x)));
+ v_store(dst + x + step, v_fma(v_src01, v_src11, vx_load(dst + x + step)));
}
+ #endif
}
else
{
- v_uint64x2 v_0 = v_setzero_u64();
+ // todo: try fma
+ v_uint64 v_0 = vx_setzero_u64();
if (cn == 1)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint32x4 v_mask32 = v_load_expand_q(mask + x);
- v_uint64x2 v_masku640, v_masku641;
+ v_uint32 v_mask32 = vx_load_expand_q(mask + x);
+ v_uint64 v_masku640, v_masku641;
v_expand(v_mask32, v_masku640, v_masku641);
- v_float64x2 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
- v_float64x2 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
+ v_float64 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
+ v_float64 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
- v_float64x2 v_src00 = v_load(src1 + x);
- v_float64x2 v_src01 = v_load(src1 + x + 2);
- v_float64x2 v_src10 = v_load(src2 + x);
- v_float64x2 v_src11 = v_load(src2 + x + 2);
+ v_float64 v_src00 = vx_load(src1 + x);
+ v_float64 v_src01 = vx_load(src1 + x + step);
+ v_float64 v_src10 = vx_load(src2 + x);
+ v_float64 v_src11 = vx_load(src2 + x + step);
- v_store(dst + x, v_load(dst + x) + ((v_src00 * v_src10) & v_mask0));
- v_store(dst + x + 2, v_load(dst + x + 2) + ((v_src01 * v_src11) & v_mask1));
+ v_store(dst + x, vx_load(dst + x) + ((v_src00 * v_src10) & v_mask0));
+ v_store(dst + x + step, vx_load(dst + x + step) + ((v_src01 * v_src11) & v_mask1));
}
}
else if (cn == 3)
{
for (; x <= len - cVectorWidth; x += cVectorWidth)
{
- v_uint32x4 v_mask32 = v_load_expand_q(mask + x);
- v_uint64x2 v_masku640, v_masku641;
+ v_uint32 v_mask32 = vx_load_expand_q(mask + x);
+ v_uint64 v_masku640, v_masku641;
v_expand(v_mask32, v_masku640, v_masku641);
- v_float64x2 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
- v_float64x2 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
+ v_float64 v_mask0 = v_reinterpret_as_f64(~(v_masku640 == v_0));
+ v_float64 v_mask1 = v_reinterpret_as_f64(~(v_masku641 == v_0));
- v_float64x2 v_1src00, v_1src01, v_1src10, v_1src11, v_1src20, v_1src21;
- v_float64x2 v_2src00, v_2src01, v_2src10, v_2src11, v_2src20, v_2src21;
+ v_float64 v_1src00, v_1src01, v_1src10, v_1src11, v_1src20, v_1src21;
+ v_float64 v_2src00, v_2src01, v_2src10, v_2src11, v_2src20, v_2src21;
v_load_deinterleave(src1 + x * cn, v_1src00, v_1src10, v_1src20);
- v_load_deinterleave(src1 + (x + 2) * cn, v_1src01, v_1src11, v_1src21);
+ v_load_deinterleave(src1 + (x + step) * cn, v_1src01, v_1src11, v_1src21);
v_load_deinterleave(src2 + x * cn, v_2src00, v_2src10, v_2src20);
- v_load_deinterleave(src2 + (x + 2) * cn, v_2src01, v_2src11, v_2src21);
- v_float64x2 v_src00 = (v_1src00 & v_mask0) * v_2src00;
- v_float64x2 v_src01 = (v_1src01 & v_mask1) * v_2src01;
- v_float64x2 v_src10 = (v_1src10 & v_mask0) * v_2src10;
- v_float64x2 v_src11 = (v_1src11 & v_mask1) * v_2src11;
- v_float64x2 v_src20 = (v_1src20 & v_mask0) * v_2src20;
- v_float64x2 v_src21 = (v_1src21 & v_mask1) * v_2src21;
-
- v_float64x2 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
+ v_load_deinterleave(src2 + (x + step) * cn, v_2src01, v_2src11, v_2src21);
+ v_float64 v_src00 = (v_1src00 & v_mask0) * v_2src00;
+ v_float64 v_src01 = (v_1src01 & v_mask1) * v_2src01;
+ v_float64 v_src10 = (v_1src10 & v_mask0) * v_2src10;
+ v_float64 v_src11 = (v_1src11 & v_mask1) * v_2src11;
+ v_float64 v_src20 = (v_1src20 & v_mask0) * v_2src20;
+ v_float64 v_src21 = (v_1src21 & v_mask1) * v_2src21;
+
+ v_float64 v_dst00, v_dst01, v_dst10, v_dst11, v_dst20, v_dst21;
v_load_deinterleave(dst + x * cn, v_dst00, v_dst10, v_dst20);
- v_load_deinterleave(dst + (x + 2) * cn, v_dst01, v_dst11, v_dst21);
+ v_load_deinterleave(dst + (x + step) * cn, v_dst01, v_dst11, v_dst21);
v_store_interleave(dst + x * cn, v_dst00 + v_src00, v_dst10 + v_src10, v_dst20 + v_src20);
- v_store_interleave(dst + (x + 2) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
+ v_store_interleave(dst + (x + step) * cn, v_dst01 + v_src01, v_dst11 + v_src11, v_dst21 + v_src21);
}
}
}
-
+#endif // CV_SIMD_64F
accProd_general_(src1, src2, dst, mask, len, cn, x);
}
-#else
-void accProd_simd_(const uchar* src1, const uchar* src2, double* dst, const uchar* mask, int len, int cn)
-{
- accProd_general_(src1, src2, dst, mask, len, cn, 0);
-}
-
-void accProd_simd_(const ushort* src1, const ushort* src2, double* dst, const uchar* mask, int len, int cn)
-{
- accProd_general_(src1, src2, dst, mask, len, cn, 0);
-}
-
-void accProd_simd_(const float* src1, const float* src2, double* dst, const uchar* mask, int len, int cn)
-{
- accProd_general_(src1, src2, dst, mask, len, cn, 0);
-}
-
-void accProd_simd_(const double* src1, const double* src2, double* dst, const uchar* mask, int len, int cn)
-{
- accProd_general_(src1, src2, dst, mask, len, cn, 0);
-}
-#endif
// running weight accumulate optimized by universal intrinsic
void accW_simd_(const uchar* src, float* dst, const uchar* mask, int len, int cn, double alpha)
{
int x = 0;
- const v_float32x4 v_alpha = v_setall_f32((float)alpha);
- const v_float32x4 v_beta = v_setall_f32((float)(1.0f - alpha));
- const int cVectorWidth = 16;
+#if CV_SIMD
+ const v_float32 v_alpha = vx_setall_f32((float)alpha);
+ const v_float32 v_beta = vx_setall_f32((float)(1.0f - alpha));
+ const int cVectorWidth = v_uint8::nlanes;
+ const int step = v_float32::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint8x16 v_src = v_load(src + x);
+ v_uint8 v_src = vx_load(src + x);
- v_uint16x8 v_src0, v_src1;
+ v_uint16 v_src0, v_src1;
v_expand(v_src, v_src0, v_src1);
- v_uint32x4 v_src00, v_src01, v_src10, v_src11;
+ v_uint32 v_src00, v_src01, v_src10, v_src11;
v_expand(v_src0, v_src00, v_src01);
v_expand(v_src1, v_src10, v_src11);
- v_float32x4 v_dst00 = v_load(dst + x);
- v_float32x4 v_dst01 = v_load(dst + x + 4);
- v_float32x4 v_dst10 = v_load(dst + x + 8);
- v_float32x4 v_dst11 = v_load(dst + x + 12);
+ v_float32 v_dst00 = vx_load(dst + x);
+ v_float32 v_dst01 = vx_load(dst + x + step);
+ v_float32 v_dst10 = vx_load(dst + x + step * 2);
+ v_float32 v_dst11 = vx_load(dst + x + step * 3);
- v_dst00 = (v_dst00 * v_beta) + (v_cvt_f32(v_reinterpret_as_s32(v_src00)) * v_alpha);
- v_dst01 = (v_dst01 * v_beta) + (v_cvt_f32(v_reinterpret_as_s32(v_src01)) * v_alpha);
- v_dst10 = (v_dst10 * v_beta) + (v_cvt_f32(v_reinterpret_as_s32(v_src10)) * v_alpha);
- v_dst11 = (v_dst11 * v_beta) + (v_cvt_f32(v_reinterpret_as_s32(v_src11)) * v_alpha);
+ v_dst00 = v_fma(v_dst00, v_beta, v_cvt_f32(v_reinterpret_as_s32(v_src00)) * v_alpha);
+ v_dst01 = v_fma(v_dst01, v_beta, v_cvt_f32(v_reinterpret_as_s32(v_src01)) * v_alpha);
+ v_dst10 = v_fma(v_dst10, v_beta, v_cvt_f32(v_reinterpret_as_s32(v_src10)) * v_alpha);
+ v_dst11 = v_fma(v_dst11, v_beta, v_cvt_f32(v_reinterpret_as_s32(v_src11)) * v_alpha);
v_store(dst + x, v_dst00);
- v_store(dst + x + 4, v_dst01);
- v_store(dst + x + 8, v_dst10);
- v_store(dst + x + 12, v_dst11);
+ v_store(dst + x + step, v_dst01);
+ v_store(dst + x + step * 2, v_dst10);
+ v_store(dst + x + step * 3, v_dst11);
}
}
-
+#endif // CV_SIMD
accW_general_(src, dst, mask, len, cn, alpha, x);
}
void accW_simd_(const ushort* src, float* dst, const uchar* mask, int len, int cn, double alpha)
{
int x = 0;
- const v_float32x4 v_alpha = v_setall_f32((float)alpha);
- const v_float32x4 v_beta = v_setall_f32((float)(1.0f - alpha));
- const int cVectorWidth = 8;
+#if CV_SIMD
+ const v_float32 v_alpha = vx_setall_f32((float)alpha);
+ const v_float32 v_beta = vx_setall_f32((float)(1.0f - alpha));
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float32::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_src = v_load(src + x);
- v_uint32x4 v_int0, v_int1;
+ v_uint16 v_src = vx_load(src + x);
+ v_uint32 v_int0, v_int1;
v_expand(v_src, v_int0, v_int1);
- v_float32x4 v_src0 = v_cvt_f32(v_reinterpret_as_s32(v_int0));
- v_float32x4 v_src1 = v_cvt_f32(v_reinterpret_as_s32(v_int1));
- v_src0 = v_src0 * v_alpha;
- v_src1 = v_src1 * v_alpha;
+ v_float32 v_dst0 = vx_load(dst + x);
+ v_float32 v_dst1 = vx_load(dst + x + step);
+ v_dst0 = v_fma(v_dst0, v_beta, v_cvt_f32(v_reinterpret_as_s32(v_int0)) * v_alpha);
+ v_dst1 = v_fma(v_dst1, v_beta, v_cvt_f32(v_reinterpret_as_s32(v_int1)) * v_alpha);
- v_float32x4 v_dst0 = v_load(dst + x) * v_beta;
- v_float32x4 v_dst1 = v_load(dst + x + 4) * v_beta;
-
- v_store(dst + x, v_dst0 + v_src0);
- v_store(dst + x + 4, v_dst1 + v_src1);
+ v_store(dst + x, v_dst0);
+ v_store(dst + x + step, v_dst1);
}
}
-
+#endif // CV_SIMD
accW_general_(src, dst, mask, len, cn, alpha, x);
}
void accW_simd_(const float* src, float* dst, const uchar* mask, int len, int cn, double alpha)
{
int x = 0;
- const v_float32x4 v_alpha = v_setall_f32((float)alpha);
- const v_float32x4 v_beta = v_setall_f32((float)(1.0f - alpha));
- const int cVectorWidth = 8;
+#if CV_AVX && !CV_AVX2
+ const __m256 v_alpha = _mm256_set1_ps((float)alpha);
+ const __m256 v_beta = _mm256_set1_ps((float)(1.0f - alpha));
+ const int cVectorWidth = 16;
if (!mask)
{
int size = len * cn;
- for (; x <= size - cVectorWidth; x += cVectorWidth)
+ for ( ; x <= size - cVectorWidth ; x += cVectorWidth)
{
- v_store(dst + x, ((v_load(dst + x) * v_beta) + (v_load(src + x) * v_alpha)));
- v_store(dst + x + 4, ((v_load(dst + x + 4) * v_beta) + (v_load(src + x + 4) * v_alpha)));
- }
+ _mm256_storeu_ps(dst + x, _mm256_add_ps(_mm256_mul_ps(_mm256_loadu_ps(dst + x), v_beta), _mm256_mul_ps(_mm256_loadu_ps(src + x), v_alpha)));
+ _mm256_storeu_ps(dst + x + 8, _mm256_add_ps(_mm256_mul_ps(_mm256_loadu_ps(dst + x + 8), v_beta), _mm256_mul_ps(_mm256_loadu_ps(src + x + 8), v_alpha)));
+ }
}
-
- accW_general_(src, dst, mask, len, cn, alpha, x);
-}
-#if CV_SIMD128_64F
-void accW_simd_(const uchar* src, double* dst, const uchar* mask, int len, int cn, double alpha)
-{
- int x = 0;
- const v_float64x2 v_alpha = v_setall_f64(alpha);
- const v_float64x2 v_beta = v_setall_f64(1.0f - alpha);
- const int cVectorWidth = 8;
+#elif CV_SIMD
+ const v_float32 v_alpha = vx_setall_f32((float)alpha);
+ const v_float32 v_beta = vx_setall_f32((float)(1.0f - alpha));
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float32::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_src16 = v_load_expand(src + x);
+ v_float32 v_dst0 = vx_load(dst + x);
+ v_float32 v_dst1 = vx_load(dst + x + step);
- v_uint32x4 v_int_0, v_int_1;
- v_expand(v_src16, v_int_0, v_int_1);
-
- v_int32x4 v_int0 = v_reinterpret_as_s32(v_int_0);
- v_int32x4 v_int1 = v_reinterpret_as_s32(v_int_1);
-
- v_float64x2 v_src0 = v_cvt_f64(v_int0);
- v_float64x2 v_src1 = v_cvt_f64_high(v_int0);
- v_float64x2 v_src2 = v_cvt_f64(v_int1);
- v_float64x2 v_src3 = v_cvt_f64_high(v_int1);
-
- v_float64x2 v_dst0 = v_load(dst + x);
- v_float64x2 v_dst1 = v_load(dst + x + 2);
- v_float64x2 v_dst2 = v_load(dst + x + 4);
- v_float64x2 v_dst3 = v_load(dst + x + 6);
-
- v_dst0 = (v_dst0 * v_beta) + (v_src0 * v_alpha);
- v_dst1 = (v_dst1 * v_beta) + (v_src1 * v_alpha);
- v_dst2 = (v_dst2 * v_beta) + (v_src2 * v_alpha);
- v_dst3 = (v_dst3 * v_beta) + (v_src3 * v_alpha);
+ v_dst0 = v_fma(v_dst0, v_beta, vx_load(src + x) * v_alpha);
+ v_dst1 = v_fma(v_dst1, v_beta, vx_load(src + x + step) * v_alpha);
v_store(dst + x, v_dst0);
- v_store(dst + x + 2, v_dst1);
- v_store(dst + x + 4, v_dst2);
- v_store(dst + x + 6, v_dst3);
+ v_store(dst + x + step, v_dst1);
}
}
-
+#endif // CV_SIMD
accW_general_(src, dst, mask, len, cn, alpha, x);
}
-void accW_simd_(const ushort* src, double* dst, const uchar* mask, int len, int cn, double alpha)
+void accW_simd_(const uchar* src, double* dst, const uchar* mask, int len, int cn, double alpha)
{
int x = 0;
- const v_float64x2 v_alpha = v_setall_f64(alpha);
- const v_float64x2 v_beta = v_setall_f64(1.0f - alpha);
- const int cVectorWidth = 8;
+#if CV_SIMD_64F
+ const v_float64 v_alpha = vx_setall_f64(alpha);
+ const v_float64 v_beta = vx_setall_f64(1.0f - alpha);
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_uint16x8 v_src = v_load(src + x);
- v_uint32x4 v_int_0, v_int_1;
- v_expand(v_src, v_int_0, v_int_1);
+ v_uint16 v_src16 = vx_load_expand(src + x);
- v_int32x4 v_int0 = v_reinterpret_as_s32(v_int_0);
- v_int32x4 v_int1 = v_reinterpret_as_s32(v_int_1);
+ v_uint32 v_int_0, v_int_1;
+ v_expand(v_src16, v_int_0, v_int_1);
- v_float64x2 v_src00 = v_cvt_f64(v_int0);
- v_float64x2 v_src01 = v_cvt_f64_high(v_int0);
- v_float64x2 v_src10 = v_cvt_f64(v_int1);
- v_float64x2 v_src11 = v_cvt_f64_high(v_int1);
+ v_int32 v_int0 = v_reinterpret_as_s32(v_int_0);
+ v_int32 v_int1 = v_reinterpret_as_s32(v_int_1);
- v_float64x2 v_dst00 = v_load(dst + x);
- v_float64x2 v_dst01 = v_load(dst + x + 2);
- v_float64x2 v_dst10 = v_load(dst + x + 4);
- v_float64x2 v_dst11 = v_load(dst + x + 6);
+ v_float64 v_src0 = v_cvt_f64(v_int0);
+ v_float64 v_src1 = v_cvt_f64_high(v_int0);
+ v_float64 v_src2 = v_cvt_f64(v_int1);
+ v_float64 v_src3 = v_cvt_f64_high(v_int1);
- v_dst00 = (v_dst00 * v_beta) + (v_src00 * v_alpha);
- v_dst01 = (v_dst01 * v_beta) + (v_src01 * v_alpha);
- v_dst10 = (v_dst10 * v_beta) + (v_src10 * v_alpha);
- v_dst11 = (v_dst11 * v_beta) + (v_src11 * v_alpha);
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+ v_float64 v_dst2 = vx_load(dst + x + step * 2);
+ v_float64 v_dst3 = vx_load(dst + x + step * 3);
- v_store(dst + x, v_dst00);
- v_store(dst + x + 2, v_dst01);
- v_store(dst + x + 4, v_dst10);
- v_store(dst + x + 6, v_dst11);
+ v_dst0 = v_fma(v_dst0, v_beta, v_src0 * v_alpha);
+ v_dst1 = v_fma(v_dst1, v_beta, v_src1 * v_alpha);
+ v_dst2 = v_fma(v_dst2, v_beta, v_src2 * v_alpha);
+ v_dst3 = v_fma(v_dst3, v_beta, v_src3 * v_alpha);
+
+ v_store(dst + x, v_dst0);
+ v_store(dst + x + step, v_dst1);
+ v_store(dst + x + step * 2, v_dst2);
+ v_store(dst + x + step * 3, v_dst3);
}
}
-
+#endif // CV_SIMD_64F
accW_general_(src, dst, mask, len, cn, alpha, x);
}
-void accW_simd_(const float* src, double* dst, const uchar* mask, int len, int cn, double alpha)
+void accW_simd_(const ushort* src, double* dst, const uchar* mask, int len, int cn, double alpha)
{
int x = 0;
- const v_float64x2 v_alpha = v_setall_f64(alpha);
- const v_float64x2 v_beta = v_setall_f64(1.0f - alpha);
- const int cVectorWidth = 8;
+#if CV_SIMD_64F
+ const v_float64 v_alpha = vx_setall_f64(alpha);
+ const v_float64 v_beta = vx_setall_f64(1.0f - alpha);
+ const int cVectorWidth = v_uint16::nlanes;
+ const int step = v_float64::nlanes;
if (!mask)
{
int size = len * cn;
for (; x <= size - cVectorWidth; x += cVectorWidth)
{
- v_float32x4 v_src0 = v_load(src + x);
- v_float32x4 v_src1 = v_load(src + x + 4);
- v_float64x2 v_src00 = v_cvt_f64(v_src0);
- v_float64x2 v_src01 = v_cvt_f64_high(v_src0);
- v_float64x2 v_src10 = v_cvt_f64(v_src1);
- v_float64x2 v_src11 = v_cvt_f64_high(v_src1);
+ v_uint16 v_src = vx_load(src + x);
+ v_uint32 v_int_0, v_int_1;
+ v_expand(v_src, v_int_0, v_int_1);
- v_store(dst + x, ((v_load(dst + x) * v_beta) + (v_src00 * v_alpha)));
- v_store(dst + x + 2, ((v_load(dst + x + 2) * v_beta) + (v_src01 * v_alpha)));
- v_store(dst + x + 4, ((v_load(dst + x + 4) * v_beta) + (v_src10 * v_alpha)));
- v_store(dst + x + 6, ((v_load(dst + x + 6) * v_beta) + (v_src11 * v_alpha)));
- }
- }
+ v_int32 v_int0 = v_reinterpret_as_s32(v_int_0);
+ v_int32 v_int1 = v_reinterpret_as_s32(v_int_1);
- accW_general_(src, dst, mask, len, cn, alpha, x);
-}
+ v_float64 v_src00 = v_cvt_f64(v_int0);
+ v_float64 v_src01 = v_cvt_f64_high(v_int0);
+ v_float64 v_src10 = v_cvt_f64(v_int1);
+ v_float64 v_src11 = v_cvt_f64_high(v_int1);
-void accW_simd_(const double* src, double* dst, const uchar* mask, int len, int cn, double alpha)
-{
- int x = 0;
- const v_float64x2 v_alpha = v_setall_f64(alpha);
- const v_float64x2 v_beta = v_setall_f64(1.0f - alpha);
- const int cVectorWidth = 4;
+ v_float64 v_dst00 = vx_load(dst + x);
+ v_float64 v_dst01 = vx_load(dst + x + step);
+ v_float64 v_dst10 = vx_load(dst + x + step * 2);
+ v_float64 v_dst11 = vx_load(dst + x + step * 3);
- if (!mask)
- {
- int size = len * cn;
- for (; x <= size - cVectorWidth; x += cVectorWidth)
- {
- v_float64x2 v_src0 = v_load(src + x);
- v_float64x2 v_src1 = v_load(src + x + 2);
+ v_dst00 = v_fma(v_dst00, v_beta, v_src00 * v_alpha);
+ v_dst01 = v_fma(v_dst01, v_beta, v_src01 * v_alpha);
+ v_dst10 = v_fma(v_dst10, v_beta, v_src10 * v_alpha);
+ v_dst11 = v_fma(v_dst11, v_beta, v_src11 * v_alpha);
- v_store(dst + x, ((v_load(dst + x) * v_beta) + (v_src0 * v_alpha)));
- v_store(dst + x + 2, ((v_load(dst + x + 2) * v_beta) + (v_src1 * v_alpha)));
+ v_store(dst + x, v_dst00);
+ v_store(dst + x + step, v_dst01);
+ v_store(dst + x + step * 2, v_dst10);
+ v_store(dst + x + step * 3, v_dst11);
}
}
-
+#endif // CV_SIMD_64F
accW_general_(src, dst, mask, len, cn, alpha, x);
}
-#else
-void accW_simd_(const uchar* src, double* dst, const uchar* mask, int len, int cn, double alpha)
-{
- accW_general_(src, dst, mask, len, cn, alpha, 0);
-}
-
-void accW_simd_(const ushort* src, double* dst, const uchar* mask, int len, int cn, double alpha)
-{
- accW_general_(src, dst, mask, len, cn, alpha, 0);
-}
void accW_simd_(const float* src, double* dst, const uchar* mask, int len, int cn, double alpha)
{
- accW_general_(src, dst, mask, len, cn, alpha, 0);
-}
-
-void accW_simd_(const double* src, double* dst, const uchar* mask, int len, int cn, double alpha)
-{
- accW_general_(src, dst, mask, len, cn, alpha, 0);
-}
-#endif // CV_SIMD128_64F
-#endif // CV_SIMD128
-#if CV_AVX
-// accumulate optimized by AVX
-void acc_avx_32f(const float* src, float* dst, const uchar* mask, int len, int cn)
-{
- int x = 0;
- const int cVectorWidth = 8;
-
- if (!mask)
- {
- int size = len * cn;
- for ( ; x <= size - cVectorWidth ; x += cVectorWidth)
- {
- __m256 v_src = _mm256_loadu_ps(src + x);
- __m256 v_dst = _mm256_loadu_ps(dst + x);
- v_dst = _mm256_add_ps(v_src, v_dst);
- _mm256_storeu_ps(dst + x, v_dst);
- }
- acc_general_(src, dst, mask, len, cn, x);
- }
- else
- {
- acc_simd_(src, dst, mask, len, cn);
- }
-}
-
-void acc_avx_32f64f(const float* src, double* dst, const uchar* mask, int len, int cn)
-{
- int x = 0;
- const int cVectorWidth = 8;
-
- if (!mask)
- {
- int size = len * cn;
- for ( ; x <= size - cVectorWidth ; x += cVectorWidth)
- {
- __m256 v_src = _mm256_loadu_ps(src + x);
- __m256d v_src0 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_src, 0));
- __m256d v_src1 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_src, 1));
- __m256d v_dst0 = _mm256_loadu_pd(dst + x);
- __m256d v_dst1 = _mm256_loadu_pd(dst + x + 4);
- v_dst0 = _mm256_add_pd(v_src0, v_dst0);
- v_dst1 = _mm256_add_pd(v_src1, v_dst1);
- _mm256_storeu_pd(dst + x, v_dst0);
- _mm256_storeu_pd(dst + x + 4, v_dst1);
- }
- acc_general_(src, dst, mask, len, cn, x);
- }
- else
- {
- acc_simd_(src, dst, mask, len, cn);
- }
-}
-
-void acc_avx_64f(const double* src, double* dst, const uchar* mask, int len, int cn)
-{
- int x = 0;
- const int cVectorWidth = 4;
-
- if (!mask)
- {
- int size = len * cn;
- for ( ; x <= size - cVectorWidth ; x += cVectorWidth)
- {
- __m256d v_src = _mm256_loadu_pd(src + x);
- __m256d v_dst = _mm256_loadu_pd(dst + x);
- v_dst = _mm256_add_pd(v_dst, v_src);
- _mm256_storeu_pd(dst + x, v_dst);
- }
- acc_general_(src, dst, mask, len, cn, x);
- }
- else
- {
- acc_simd_(src, dst, mask, len, cn);
- }
-}
-
-// square accumulate optimized by avx
-void accSqr_avx_32f(const float* src, float* dst, const uchar* mask, int len, int cn)
-{
- int x = 0;
- const int cVectorWidth = 8;
-
- if (!mask)
- {
- int size = len * cn;
- for ( ; x <= size - cVectorWidth ; x += cVectorWidth)
- {
- __m256 v_src = _mm256_loadu_ps(src + x);
- __m256 v_dst = _mm256_loadu_ps(dst + x);
- v_src = _mm256_mul_ps(v_src, v_src);
- v_dst = _mm256_add_ps(v_src, v_dst);
- _mm256_storeu_ps(dst + x, v_dst);
- }
- accSqr_general_(src, dst, mask, len, cn, x);
- }
- else
- {
- accSqr_simd_(src, dst, mask, len, cn);
- }
-}
-
-void accSqr_avx_32f64f(const float* src, double* dst, const uchar* mask, int len, int cn)
-{
- int x = 0;
- const int cVectorWidth = 8;
-
- if (!mask)
- {
- int size = len * cn;
- for ( ; x <= size - cVectorWidth ; x += cVectorWidth)
- {
- __m256 v_src = _mm256_loadu_ps(src + x);
- __m256d v_src0 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_src,0));
- __m256d v_src1 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_src,1));
- __m256d v_dst0 = _mm256_loadu_pd(dst + x);
- __m256d v_dst1 = _mm256_loadu_pd(dst + x + 4);
- v_src0 = _mm256_mul_pd(v_src0, v_src0);
- v_src1 = _mm256_mul_pd(v_src1, v_src1);
- v_dst0 = _mm256_add_pd(v_src0, v_dst0);
- v_dst1 = _mm256_add_pd(v_src1, v_dst1);
- _mm256_storeu_pd(dst + x, v_dst0);
- _mm256_storeu_pd(dst + x + 4, v_dst1);
- }
- accSqr_general_(src, dst, mask, len, cn, x);
- }
- else
- {
- accSqr_simd_(src, dst, mask, len, cn);
- }
-}
-
-void accSqr_avx_64f(const double* src, double* dst, const uchar* mask, int len, int cn)
-{
- int x = 0;
- const int cVectorWidth = 4;
-
- if (!mask)
- {
- int size = len * cn;
- for ( ; x <= size - cVectorWidth ; x += cVectorWidth)
- {
- __m256d v_src = _mm256_loadu_pd(src + x);
- __m256d v_dst = _mm256_loadu_pd(dst + x);
- v_src = _mm256_mul_pd(v_src, v_src);
- v_dst = _mm256_add_pd(v_dst, v_src);
- _mm256_storeu_pd(dst + x, v_dst);
- }
- accSqr_general_(src, dst, mask, len, cn, x);
- }
- else
- {
- accSqr_simd_(src, dst, mask, len, cn);
- }
-}
-
-// product accumulate optimized by avx
-void accProd_avx_32f(const float* src1, const float* src2, float* dst, const uchar* mask, int len, int cn)
-{
- int x = 0;
- const int cVectorWidth = 8;
-
- if (!mask)
- {
- int size = len * cn;
- for ( ; x <= size - cVectorWidth ; x += cVectorWidth)
- {
- __m256 v_src0 = _mm256_loadu_ps(src1 + x);
- __m256 v_src1 = _mm256_loadu_ps(src2 + x);
- __m256 v_dst = _mm256_loadu_ps(dst + x);
- __m256 v_src = _mm256_mul_ps(v_src0, v_src1);
- v_dst = _mm256_add_ps(v_src, v_dst);
- _mm256_storeu_ps(dst + x, v_dst);
- }
- accProd_general_(src1, src2, dst, mask, len, cn, x);
- }
- else
- {
- accProd_simd_(src1, src2, dst, mask, len, cn);
- }
-}
-
-void accProd_avx_32f64f(const float* src1, const float* src2, double* dst, const uchar* mask, int len, int cn)
-{
- int x = 0;
- const int cVectorWidth = 8;
-
- if (!mask)
- {
- int size = len * cn;
- for ( ; x <= size - cVectorWidth ; x += cVectorWidth)
- {
- __m256 v_1src = _mm256_loadu_ps(src1 + x);
- __m256 v_2src = _mm256_loadu_ps(src2 + x);
- __m256d v_src00 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_1src,0));
- __m256d v_src01 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_1src,1));
- __m256d v_src10 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_2src,0));
- __m256d v_src11 = _mm256_cvtps_pd(_mm256_extractf128_ps(v_2src,1));
- __m256d v_dst0 = _mm256_loadu_pd(dst + x);
- __m256d v_dst1 = _mm256_loadu_pd(dst + x + 4);
- __m256d v_src0 = _mm256_mul_pd(v_src00, v_src10);
- __m256d v_src1 = _mm256_mul_pd(v_src01, v_src11);
- v_dst0 = _mm256_add_pd(v_src0, v_dst0);
- v_dst1 = _mm256_add_pd(v_src1, v_dst1);
- _mm256_storeu_pd(dst + x, v_dst0);
- _mm256_storeu_pd(dst + x + 4, v_dst1);
- }
- accProd_general_(src1, src2, dst, mask, len, cn, x);
- }
- else
- {
- accProd_simd_(src1, src2, dst, mask, len, cn);
- }
-}
-
-void accProd_avx_64f(const double* src1, const double* src2, double* dst, const uchar* mask, int len, int cn)
-{
- int x = 0;
- const int cVectorWidth = 4;
-
- if (!mask)
- {
- int size = len * cn;
- for ( ; x <= size - cVectorWidth ; x += cVectorWidth)
- {
- __m256d v_src0 = _mm256_loadu_pd(src1 + x);
- __m256d v_src1 = _mm256_loadu_pd(src2 + x);
- __m256d v_dst = _mm256_loadu_pd(dst + x);
- v_src0 = _mm256_mul_pd(v_src0, v_src1);
- v_dst = _mm256_add_pd(v_dst, v_src0);
- _mm256_storeu_pd(dst + x, v_dst);
- }
- accProd_general_(src1, src2, dst, mask, len, cn, x);
- }
- else
- {
- accProd_simd_(src1, src2, dst, mask, len, cn);
- }
-}
-
-// running weight accumulate optimized by avx
-void accW_avx_32f(const float* src, float* dst, const uchar* mask, int len, int cn, double alpha)
-{
- int x = 0;
- const __m256 v_alpha = _mm256_set1_ps((float)alpha);
- const __m256 v_beta = _mm256_set1_ps((float)(1.0f - alpha));
- const int cVectorWidth = 16;
-
- if (!mask)
- {
- int size = len * cn;
- for ( ; x <= size - cVectorWidth ; x += cVectorWidth)
- {
- _mm256_storeu_ps(dst + x, _mm256_add_ps(_mm256_mul_ps(_mm256_loadu_ps(dst + x), v_beta), _mm256_mul_ps(_mm256_loadu_ps(src + x), v_alpha)));
- _mm256_storeu_ps(dst + x + 8, _mm256_add_ps(_mm256_mul_ps(_mm256_loadu_ps(dst + x + 8), v_beta), _mm256_mul_ps(_mm256_loadu_ps(src + x + 8), v_alpha)));
- }
- accW_general_(src, dst, mask, len, cn, alpha, x);
- }
- else
- {
- accW_simd_(src, dst, mask, len, cn, alpha);
- }
-
-}
-
-void accW_avx_32f64f(const float* src, double* dst, const uchar* mask, int len, int cn, double alpha)
-{
int x = 0;
+#if CV_AVX && !CV_AVX2
const __m256d v_alpha = _mm256_set1_pd(alpha);
const __m256d v_beta = _mm256_set1_pd(1.0f - alpha);
const int cVectorWidth = 16;
_mm256_storeu_pd(dst + x + 8, _mm256_add_pd(_mm256_mul_pd(_mm256_loadu_pd(dst + x + 8), v_beta), _mm256_mul_pd(v_src10, v_alpha)));
_mm256_storeu_pd(dst + x + 12, _mm256_add_pd(_mm256_mul_pd(_mm256_loadu_pd(dst + x + 12), v_beta), _mm256_mul_pd(v_src11, v_alpha)));
}
- accW_general_(src, dst, mask, len, cn, alpha, x);
}
- else
+#elif CV_SIMD_64F
+ const v_float64 v_alpha = vx_setall_f64(alpha);
+ const v_float64 v_beta = vx_setall_f64(1.0f - alpha);
+ const int cVectorWidth = v_float32::nlanes * 2;
+ const int step = v_float64::nlanes;
+
+ if (!mask)
{
- accW_simd_(src, dst, mask, len, cn, alpha);
+ int size = len * cn;
+ for (; x <= size - cVectorWidth; x += cVectorWidth)
+ {
+ v_float32 v_src0 = vx_load(src + x);
+ v_float32 v_src1 = vx_load(src + x + v_float32::nlanes);
+ v_float64 v_src00 = v_cvt_f64(v_src0);
+ v_float64 v_src01 = v_cvt_f64_high(v_src0);
+ v_float64 v_src10 = v_cvt_f64(v_src1);
+ v_float64 v_src11 = v_cvt_f64_high(v_src1);
+
+ v_float64 v_dst00 = vx_load(dst + x);
+ v_float64 v_dst01 = vx_load(dst + x + step);
+ v_float64 v_dst10 = vx_load(dst + x + step * 2);
+ v_float64 v_dst11 = vx_load(dst + x + step * 3);
+
+ v_dst00 = v_fma(v_dst00, v_beta, v_src00 * v_alpha);
+ v_dst01 = v_fma(v_dst01, v_beta, v_src01 * v_alpha);
+ v_dst10 = v_fma(v_dst10, v_beta, v_src10 * v_alpha);
+ v_dst11 = v_fma(v_dst11, v_beta, v_src11 * v_alpha);
+
+ v_store(dst + x, v_dst00);
+ v_store(dst + x + step, v_dst01);
+ v_store(dst + x + step * 2, v_dst10);
+ v_store(dst + x + step * 3, v_dst11);
+ }
}
+#endif // CV_SIMD_64F
+ accW_general_(src, dst, mask, len, cn, alpha, x);
}
-void accW_avx_64f(const double* src, double* dst, const uchar* mask, int len, int cn, double alpha)
+void accW_simd_(const double* src, double* dst, const uchar* mask, int len, int cn, double alpha)
{
int x = 0;
+#if CV_AVX && !CV_AVX2
const __m256d v_alpha = _mm256_set1_pd(alpha);
const __m256d v_beta = _mm256_set1_pd(1.0f - alpha);
const int cVectorWidth = 8;
_mm256_storeu_pd(dst + x, _mm256_add_pd(_mm256_mul_pd(_mm256_loadu_pd(dst + x), v_beta), _mm256_mul_pd(v_src0, v_alpha)));
_mm256_storeu_pd(dst + x + 4, _mm256_add_pd(_mm256_mul_pd(_mm256_loadu_pd(dst + x + 4), v_beta), _mm256_mul_pd(v_src1, v_alpha)));
}
- accW_general_(src, dst, mask, len, cn, alpha, x);
}
- else
+#elif CV_SIMD_64F
+ const v_float64 v_alpha = vx_setall_f64(alpha);
+ const v_float64 v_beta = vx_setall_f64(1.0f - alpha);
+ const int cVectorWidth = v_float64::nlanes * 2;
+ const int step = v_float64::nlanes;
+
+ if (!mask)
{
- accW_simd_(src, dst, mask, len, cn, alpha);
+ int size = len * cn;
+ for (; x <= size - cVectorWidth; x += cVectorWidth)
+ {
+ v_float64 v_src0 = vx_load(src + x);
+ v_float64 v_src1 = vx_load(src + x + step);
+
+ v_float64 v_dst0 = vx_load(dst + x);
+ v_float64 v_dst1 = vx_load(dst + x + step);
+
+ v_dst0 = v_fma(v_dst0, v_beta, v_src0 * v_alpha);
+ v_dst1 = v_fma(v_dst1, v_beta, v_src1 * v_alpha);
+
+ v_store(dst + x, v_dst0);
+ v_store(dst + x + step, v_dst1);
+ }
}
+#endif // CV_SIMD_64F
+ accW_general_(src, dst, mask, len, cn, alpha, x);
}
-#endif
+
#endif // CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
CV_CPU_OPTIMIZATION_NAMESPACE_END