From 8d367d4b4d45fd75390abeda57a7e93ee53c127d Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 7 Oct 2014 15:25:02 +0000 Subject: [PATCH] cv::cvtColor (HSV2RGB CV_8U) --- modules/imgproc/src/color.cpp | 70 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 67 insertions(+), 3 deletions(-) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 3d5cded..6b2f9ff 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -3047,7 +3047,13 @@ struct HSV2RGB_b HSV2RGB_b(int _dstcn, int _blueIdx, int _hrange) : dstcn(_dstcn), cvt(3, _blueIdx, (float)_hrange) - {} + { + #if CV_NEON + v_scale_inv = vdupq_n_f32(1.f/255.f); + v_scale = vdupq_n_f32(255.f); + v_alpha = vdup_n_u8(ColorChannel::max()); + #endif + } void operator()(const uchar* src, uchar* dst, int n) const { @@ -3058,8 +3064,30 @@ struct HSV2RGB_b for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) { int dn = std::min(n - i, (int)BLOCK_SIZE); + j = 0; - for( j = 0; j < dn*3; j += 3 ) + #if CV_NEON + for ( ; j <= (dn - 8) * 3; j += 24) + { + uint8x8x3_t v_src = vld3_u8(src + j); + uint16x8_t v_t0 = vmovl_u8(v_src.val[0]), + v_t1 = vmovl_u8(v_src.val[1]), + v_t2 = vmovl_u8(v_src.val[2]); + + float32x4x3_t v_dst; + v_dst.val[0] = vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_t0))); + v_dst.val[1] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_t1))), v_scale_inv); + v_dst.val[2] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_t2))), v_scale_inv); + vst3q_f32(buf + j, v_dst); + + v_dst.val[0] = vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t0))); + v_dst.val[1] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t1))), v_scale_inv); + v_dst.val[2] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_scale_inv); + vst3q_f32(buf + j + 12, v_dst); + } + #endif + + for( ; j < dn*3; j += 3 ) { buf[j] = src[j]; buf[j+1] = src[j+1]*(1.f/255.f); @@ -3067,7 +3095,39 @@ struct HSV2RGB_b } cvt(buf, buf, dn); - for( j = 0; j < dn*3; j += 3, dst += dcn ) + j = 0; + #if CV_NEON + for ( ; j <= (dn - 8) * 3; j += 24, dst += dcn * 8) + { + float32x4x3_t v_src0 = vld3q_f32(buf + j), v_src1 = vld3q_f32(buf + j + 12); + uint8x8_t v_dst0 = vqmovn_u16(vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src0.val[0], v_scale))), + vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src1.val[0], v_scale))))); + uint8x8_t v_dst1 = vqmovn_u16(vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src0.val[1], v_scale))), + vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src1.val[1], v_scale))))); + uint8x8_t v_dst2 = vqmovn_u16(vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src0.val[2], v_scale))), + vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src1.val[2], v_scale))))); + + if (dcn == 4) + { + uint8x8x4_t v_dst; + v_dst.val[0] = v_dst0; + v_dst.val[1] = v_dst1; + v_dst.val[2] = v_dst2; + v_dst.val[3] = v_alpha; + vst4_u8(dst, v_dst); + } + else + { + uint8x8x3_t v_dst; + v_dst.val[0] = v_dst0; + v_dst.val[1] = v_dst1; + v_dst.val[2] = v_dst2; + vst3_u8(dst, v_dst); + } + } + #endif + + for( ; j < dn*3; j += 3, dst += dcn ) { dst[0] = saturate_cast(buf[j]*255.f); dst[1] = saturate_cast(buf[j+1]*255.f); @@ -3080,6 +3140,10 @@ struct HSV2RGB_b int dstcn; HSV2RGB_f cvt; + #if CV_NEON + float32x4_t v_scale, v_scale_inv; + uint8x8_t v_alpha; + #endif }; -- 2.7.4