the rest modes of cv::Mat::convertTo
authorIlya Lavrenov <ilya.lavrenov@itseez.com>
Fri, 10 Oct 2014 11:05:19 +0000 (11:05 +0000)
committerIlya Lavrenov <ilya.lavrenov@itseez.com>
Fri, 10 Oct 2014 14:10:50 +0000 (14:10 +0000)
modules/core/src/convert.cpp
modules/imgproc/src/corner.cpp
modules/imgproc/src/pyramids.cpp

index 67adfde..9d758e7 100644 (file)
@@ -2448,6 +2448,25 @@ struct Cvt_SIMD<schar, short>
 };
 
 template <>
+struct Cvt_SIMD<schar, ushort>
+{
+    int operator() (const schar * src, ushort * dst, int width) const
+    {
+        int x = 0;
+
+        for ( ; x <= width - 8; x += 8)
+        {
+            int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
+            vst1q_u16(dst + x, vcombine_u16(vqmovun_s32(vmovl_s16(vget_low_s16(v_src))),
+                                            vqmovun_s32(vmovl_s16(vget_high_s16(v_src)))));
+        }
+
+        return x;
+    }
+};
+
+
+template <>
 struct Cvt_SIMD<schar, int>
 {
     int operator() (const schar * src, int * dst, int width) const
@@ -2503,6 +2522,49 @@ struct Cvt_SIMD<ushort, uchar>
 };
 
 template <>
+struct Cvt_SIMD<ushort, schar>
+{
+    int operator() (const ushort * src, schar * dst, int width) const
+    {
+        int x = 0;
+
+        for ( ; x <= width - 16; x += 16)
+        {
+            uint16x8_t v_src1 = vld1q_u16(src + x), v_src2 = vld1q_u16(src + x + 8);
+            int32x4_t v_dst10 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src1)));
+            int32x4_t v_dst11 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src1)));
+            int32x4_t v_dst20 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src2)));
+            int32x4_t v_dst21 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src2)));
+
+            vst1q_s8(dst + x, vcombine_s8(vqmovn_s16(vcombine_s16(vqmovn_s32(v_dst10), vqmovn_s32(v_dst11))),
+                                          vqmovn_s16(vcombine_s16(vqmovn_s32(v_dst20), vqmovn_s32(v_dst21)))));
+        }
+
+        return x;
+    }
+};
+
+template <>
+struct Cvt_SIMD<ushort, short>
+{
+    int operator() (const ushort * src, short * dst, int width) const
+    {
+        int x = 0;
+
+        for ( ; x <= width - 8; x += 8)
+        {
+            uint16x8_t v_src = vld1q_u16(src + x);
+            int32x4_t v_dst0 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src)));
+            int32x4_t v_dst1 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src)));
+
+            vst1q_s16(dst + x, vcombine_s16(vqmovn_s32(v_dst0), vqmovn_s32(v_dst1)));
+        }
+
+        return x;
+    }
+};
+
+template <>
 struct Cvt_SIMD<ushort, int>
 {
     int operator() (const ushort * src, int * dst, int width) const
index 4d55a4f..096997a 100644 (file)
@@ -618,7 +618,9 @@ void cv::preCornerDetect( InputArray _src, OutputArray _dst, int ksize, int bord
     if( src.depth() == CV_8U )
         factor *= 255;
     factor = 1./(factor * factor * factor);
+#if CV_NEON || CV_SSE2
     float factor_f = (float)factor;
+#endif
 
 #if CV_SSE2
     volatile bool haveSSE2 = cv::checkHardwareSupport(CV_CPU_SSE2);
index ad10f29..8a8515e 100644 (file)
@@ -181,6 +181,8 @@ struct PyrDownVec_32f
 typedef NoVec<int, ushort> PyrDownVec_32s16u;
 typedef NoVec<int, short> PyrDownVec_32s16s;
 
+typedef NoVec<float, float> PyrUpVec_32f;
+
 #elif CV_NEON
 
 struct PyrDownVec_32s8u