From e46332a183e2f28d36b3200a93b49728d1443d56 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Sun, 28 Sep 2014 03:48:28 -0700 Subject: [PATCH] cv::Mat::convertTo with scale and shift --- modules/core/src/convert.cpp | 722 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 721 insertions(+), 1 deletion(-) diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 0aecb69..4855371 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -1480,6 +1480,724 @@ cvtScaleAbs_( const T* src, size_t sstep, } } +template +struct cvtScale_SIMD +{ + int operator () (const T *, DT *, int, WT, WT) const + { + return 0; + } +}; + +#if CV_NEON + +// from uchar + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)), + vqmovn_u32(vcvtq_u32_f32(v_dst2))); + vst1_u8(dst + x, vqmovn_u16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)), + vqmovn_s32(vcvtq_s32_f32(v_dst2))); + vst1_s8(dst + x, vqmovn_s16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)), + vqmovn_u32(vcvtq_u32_f32(v_dst2))); + vst1q_u16(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)), + vqmovn_s32(vcvtq_s32_f32(v_dst2))); + vst1q_s16(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + vst1q_s32(dst + x, vcvtq_s32_f32(v_dst1)); + vst1q_s32(dst + x + 4, vcvtq_s32_f32(v_dst2)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); + vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift)); + vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift)); + } + + return x; + } +}; + +// from schar + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + int16x8_t v_src = vmovl_s8(vld1_s8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)), + vqmovn_u32(vcvtq_u32_f32(v_dst2))); + vst1_u8(dst + x, vqmovn_u16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + int16x8_t v_src = vmovl_s8(vld1_s8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)), + vqmovn_s32(vcvtq_s32_f32(v_dst2))); + vst1_s8(dst + x, vqmovn_s16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + int16x8_t v_src = vmovl_s8(vld1_s8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)), + vqmovn_u32(vcvtq_u32_f32(v_dst2))); + vst1q_u16(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + int16x8_t v_src = vmovl_s8(vld1_s8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)), + vqmovn_s32(vcvtq_s32_f32(v_dst2))); + vst1q_s16(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + int16x8_t v_src = vmovl_s8(vld1_s8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift); + + vst1q_s32(dst + x, vcvtq_s32_f32(v_dst1)); + vst1q_s32(dst + x + 4, vcvtq_s32_f32(v_dst2)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + int16x8_t v_src = vmovl_s8(vld1_s8(src + x)); + vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift)); + vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift)); + } + + return x; + } +}; + +// from ushort + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vld1q_u16(src + x); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)), + vqmovn_u32(vcvtq_u32_f32(v_dst2))); + vst1_u8(dst + x, vqmovn_u16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vld1q_u16(src + x); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)), + vqmovn_s32(vcvtq_s32_f32(v_dst2))); + vst1_s8(dst + x, vqmovn_s16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vld1q_u16(src + x); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)), + vqmovn_u32(vcvtq_u32_f32(v_dst2))); + vst1q_u16(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vld1q_u16(src + x); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)), + vqmovn_s32(vcvtq_s32_f32(v_dst2))); + vst1q_s16(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vld1q_u16(src + x); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + vst1q_s32(dst + x, vcvtq_s32_f32(v_dst1)); + vst1q_s32(dst + x + 4, vcvtq_s32_f32(v_dst2)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vld1q_u16(src + x); + vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift)); + vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift)); + } + + return x; + } +}; + +// from short + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + int16x8_t v_src = vld1q_s16(src + x); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)), + vqmovn_u32(vcvtq_u32_f32(v_dst2))); + vst1_u8(dst + x, vqmovn_u16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + int16x8_t v_src = vld1q_s16(src + x); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)), + vqmovn_s32(vcvtq_s32_f32(v_dst2))); + vst1_s8(dst + x, vqmovn_s16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + int16x8_t v_src = vld1q_s16(src + x); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)), + vqmovn_u32(vcvtq_u32_f32(v_dst2))); + vst1q_u16(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + int16x8_t v_src = vld1q_s16(src + x); + vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift)); + vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift)); + } + + return x; + } +}; + +// from int + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)), + vqmovn_u32(vcvtq_u32_f32(v_dst2))); + vst1_u8(dst + x, vqmovn_u16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)), + vqmovn_s32(vcvtq_s32_f32(v_dst2))); + vst1_s8(dst + x, vqmovn_s16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)), + vqmovn_u32(vcvtq_u32_f32(v_dst2))); + vst1q_u16(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)), + vqmovn_s32(vcvtq_s32_f32(v_dst2))); + vst1q_s16(dst + x, v_dst); + } + + return x; + } +}; + +// from float + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)), + vqmovn_u32(vcvtq_u32_f32(v_dst2))); + vst1_u8(dst + x, vqmovn_u16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)), + vqmovn_s32(vcvtq_s32_f32(v_dst2))); + vst1_s8(dst + x, vqmovn_s16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)), + vqmovn_u32(vcvtq_u32_f32(v_dst2))); + vst1q_u16(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)), + vqmovn_s32(vcvtq_s32_f32(v_dst2))); + vst1q_s16(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 4; x += 4) + vst1q_s32(dst + x, vcvtq_s32_f32(vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift))); + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 4; x += 4) + vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift)); + + return x; + } +}; + +#endif + template static void cvtScale_( const T* src, size_t sstep, DT* dst, size_t dstep, Size size, @@ -1488,9 +2206,11 @@ cvtScale_( const T* src, size_t sstep, sstep /= sizeof(src[0]); dstep /= sizeof(dst[0]); + cvtScale_SIMD vop; + for( ; size.height--; src += sstep, dst += dstep ) { - int x = 0; + int x = vop(src, dst, size.width, scale, shift); #if CV_ENABLE_UNROLLED for( ; x <= size.width - 4; x += 4 ) -- 2.7.4