cv::magnitude; cv::corner**
authorIlya Lavrenov <ilya.lavrenov@itseez.com>
Sun, 12 Oct 2014 10:43:06 +0000 (03:43 -0700)
committerIlya Lavrenov <ilya.lavrenov@itseez.com>
Sat, 1 Nov 2014 10:19:51 +0000 (13:19 +0300)
modules/core/include/opencv2/core/base.hpp
modules/core/src/mathfuncs.cpp
modules/imgproc/src/corner.cpp

index 207f926..a7a68bd 100644 (file)
@@ -621,6 +621,22 @@ inline float32x2_t cv_vrecp_f32(float32x2_t val)
     return reciprocal;
 }
 
+inline float32x4_t cv_vsqrtq_f32(float32x4_t val)
+{
+    float32x4_t e = vrsqrteq_f32(val);
+    e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(e, e), val), e);
+    e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(e, e), val), e);
+    return cv_vrecpq_f32(e);
+}
+
+inline float32x2_t cv_vsqrt_f32(float32x2_t val)
+{
+    float32x2_t e = vrsqrte_f32(val);
+    e = vmul_f32(vrsqrts_f32(vmul_f32(e, e), val), e);
+    e = vmul_f32(vrsqrts_f32(vmul_f32(e, e), val), e);
+    return cv_vrecp_f32(e);
+}
+
 #endif
 
 } // cv
index 3fcb6bd..50212e3 100644 (file)
@@ -293,17 +293,15 @@ static void Magnitude_32f(const float* x, const float* y, float* mag, int len)
         }
     }
 #elif CV_NEON
-    float CV_DECL_ALIGNED(16) m[4];
-
     for( ; i <= len - 4; i += 4 )
     {
         float32x4_t v_x = vld1q_f32(x + i), v_y = vld1q_f32(y + i);
-        vst1q_f32(m, vaddq_f32(vmulq_f32(v_x, v_x), vmulq_f32(v_y, v_y)));
-
-        mag[i] = std::sqrt(m[0]);
-        mag[i+1] = std::sqrt(m[1]);
-        mag[i+2] = std::sqrt(m[2]);
-        mag[i+3] = std::sqrt(m[3]);
+        vst1q_f32(mag + i, cv_vsqrtq_f32(vmlaq_f32(vmulq_f32(v_x, v_x), v_y, v_y)));
+    }
+    for( ; i <= len - 2; i += 2 )
+    {
+        float32x2_t v_x = vld1_f32(x + i), v_y = vld1_f32(y + i);
+        vst1_f32(mag + i, cv_vsqrt_f32(vmla_f32(vmul_f32(v_x, v_x), v_y, v_y)));
     }
 #endif
 
index e2e1987..85f2063 100644 (file)
@@ -69,7 +69,7 @@ static void calcMinEigenVal( const Mat& _cov, Mat& _dst )
         if( simd )
         {
             __m128 half = _mm_set1_ps(0.5f);
-            for( ; j <= size.width - 5; j += 4 )
+            for( ; j <= size.width - 4; j += 4 )
             {
                 __m128 t0 = _mm_loadu_ps(cov + j*3); // a0 b0 c0 x
                 __m128 t1 = _mm_loadu_ps(cov + j*3 + 3); // a1 b1 c1 x
@@ -90,6 +90,19 @@ static void calcMinEigenVal( const Mat& _cov, Mat& _dst )
                 _mm_storeu_ps(dst + j, a);
             }
         }
+    #elif CV_NEON
+        float32x4_t v_half = vdupq_n_f32(0.5f);
+        for( ; j <= size.width - 4; j += 4 )
+        {
+            float32x4x3_t v_src = vld3q_f32(cov + j * 3);
+            float32x4_t v_a = vmulq_f32(v_src.val[0], v_half);
+            float32x4_t v_b = v_src.val[1];
+            float32x4_t v_c = vmulq_f32(v_src.val[2], v_half);
+
+            float32x4_t v_t = vsubq_f32(v_a, v_c);
+            v_t = vmlaq_f32(vmulq_f32(v_t, v_t), v_b, v_b);
+            vst1q_f32(dst + j, vsubq_f32(vaddq_f32(v_a, v_c), cv_vsqrtq_f32(v_t)));
+        }
     #endif
         for( ; j < size.width; j++ )
         {
@@ -290,8 +303,24 @@ cornerEigenValsVecs( const Mat& src, Mat& eigenv, int block_size,
         float* cov_data = cov.ptr<float>(i);
         const float* dxdata = Dx.ptr<float>(i);
         const float* dydata = Dy.ptr<float>(i);
+        j = 0;
 
-        for( j = 0; j < size.width; j++ )
+        #if CV_NEON
+        for( ; j <= size.width - 4; j += 4 )
+        {
+            float32x4_t v_dx = vld1q_f32(dxdata + j);
+            float32x4_t v_dy = vld1q_f32(dydata + j);
+
+            float32x4x3_t v_dst;
+            v_dst.val[0] = vmulq_f32(v_dx, v_dx);
+            v_dst.val[1] = vmulq_f32(v_dx, v_dy);
+            v_dst.val[2] = vmulq_f32(v_dy, v_dy);
+
+            vst3q_f32(cov_data + j * 3, v_dst);
+        }
+        #endif
+
+        for( ; j < size.width; j++ )
         {
             float dx = dxdata[j];
             float dy = dydata[j];