Added vp9_sad8x8_neon()
authorScott LaVarnway <slavarnway@google.com>
Fri, 1 Aug 2014 13:36:18 +0000 (06:36 -0700)
committerScott LaVarnway <slavarnway@google.com>
Fri, 1 Aug 2014 13:36:18 +0000 (06:36 -0700)
Change-Id: I3be8911121ef9a5f39f6c1a2e28f9e00972e0624

test/sad_test.cc
vp9/common/vp9_rtcd_defs.pl
vp9/encoder/arm/neon/vp9_sad_neon.c

index dbd2cf5..f07a989 100644 (file)
@@ -475,10 +475,12 @@ INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::Values(
 const SadMxNVp9Func sad_64x64_neon_vp9 = vp9_sad64x64_neon;
 const SadMxNVp9Func sad_32x32_neon_vp9 = vp9_sad32x32_neon;
 const SadMxNVp9Func sad_16x16_neon_vp9 = vp9_sad16x16_neon;
+const SadMxNVp9Func sad_8x8_neon_vp9 = vp9_sad8x8_neon;
 const SadMxNVp9Param neon_vp9_tests[] = {
   make_tuple(64, 64, sad_64x64_neon_vp9),
   make_tuple(32, 32, sad_32x32_neon_vp9),
   make_tuple(16, 16, sad_16x16_neon_vp9),
+  make_tuple(8, 8, sad_8x8_neon_vp9),
 };
 INSTANTIATE_TEST_CASE_P(NEON, SADVP9Test, ::testing::ValuesIn(neon_vp9_tests));
 #endif  // CONFIG_VP9_ENCODER
index 3b1ca16..41f867b 100644 (file)
@@ -554,7 +554,7 @@ add_proto qw/unsigned int vp9_sad8x16/, "const uint8_t *src_ptr, int source_stri
 specialize qw/vp9_sad8x16 mmx/, "$sse2_x86inc";
 
 add_proto qw/unsigned int vp9_sad8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride";
-specialize qw/vp9_sad8x8 mmx/, "$sse2_x86inc";
+specialize qw/vp9_sad8x8 mmx neon/, "$sse2_x86inc";
 
 add_proto qw/unsigned int vp9_sad8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride";
 specialize qw/vp9_sad8x4/, "$sse2_x86inc";
index fe40b54..c4cd856 100644 (file)
@@ -26,9 +26,8 @@ static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo,
                                 vreinterpret_u32_u64(vget_high_u64(b)));
   return vget_lane_u32(c, 0);
 }
-static INLINE unsigned int horizontal_add_16x8(const uint16x8_t vec_lo,
-                                               const uint16x8_t vec_hi) {
-  const uint32x4_t a = vpaddlq_u16(vaddq_u16(vec_lo, vec_hi));
+static INLINE unsigned int horizontal_add_16x8(const uint16x8_t vec_16x8) {
+  const uint32x4_t a = vpaddlq_u16(vec_16x8);
   const uint64x2_t b = vpaddlq_u32(a);
   const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
                                 vreinterpret_u32_u64(vget_high_u64(b)));
@@ -93,7 +92,7 @@ unsigned int vp9_sad32x32_neon(const uint8_t *src, int src_stride,
     vec_accum_hi = vabal_u8(vec_accum_hi, vget_high_u8(vec_src_16),
                             vget_high_u8(vec_ref_16));
   }
-  return horizontal_add_16x8(vec_accum_lo, vec_accum_hi);
+  return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi));
 }
 
 unsigned int vp9_sad16x16_neon(const uint8_t *src, int src_stride,
@@ -112,5 +111,20 @@ unsigned int vp9_sad16x16_neon(const uint8_t *src, int src_stride,
     vec_accum_hi = vabal_u8(vec_accum_hi, vget_high_u8(vec_src),
                             vget_high_u8(vec_ref));
   }
-  return horizontal_add_16x8(vec_accum_lo, vec_accum_hi);
+  return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi));
+}
+
+unsigned int vp9_sad8x8_neon(const uint8_t *src, int src_stride,
+                             const uint8_t *ref, int ref_stride) {
+  int i;
+  uint16x8_t vec_accum = vdupq_n_u16(0);
+
+  for (i = 0; i < 8; ++i) {
+    const uint8x8_t vec_src = vld1_u8(src);
+    const uint8x8_t vec_ref = vld1_u8(ref);
+    src += src_stride;
+    ref += ref_stride;
+    vec_accum = vabal_u8(vec_accum, vec_src, vec_ref);
+  }
+  return horizontal_add_16x8(vec_accum);
 }