Optimize vp9_block_error_fp_neon
authorSalome Thirot <salome.thirot@arm.com>
Wed, 1 Mar 2023 10:06:01 +0000 (10:06 +0000)
committerSalome Thirot <salome.thirot@arm.com>
Tue, 7 Mar 2023 11:29:31 +0000 (11:29 +0000)
Currently vp9_block_error_fp_neon is only used when
CONFIG_VP9_HIGHBITDEPTH is set to false. This patch optimizes the
implementation and uses tran_low_t instead of int16_t so that the
function can also be used in builds where vp9_highbitdepth is enabled.

Change-Id: Ibab7ec5f74b7652fa2ae5edf328f9ec587088fd3

test/avg_test.cc
vp9/common/vp9_rtcd_defs.pl
vp9/encoder/arm/neon/vp9_error_neon.c
vp9/vp9cx.mk
vpx_dsp/arm/sum_neon.h

index 196522c..bcf8d0d 100644 (file)
@@ -694,16 +694,12 @@ INSTANTIATE_TEST_SUITE_P(NEON, SatdLowbdTest,
                                            make_tuple(256, &vpx_satd_neon),
                                            make_tuple(1024, &vpx_satd_neon)));
 
-// TODO(jianj): Remove the highbitdepth flag once the SIMD functions are
-// in place.
-#if !CONFIG_VP9_HIGHBITDEPTH
 INSTANTIATE_TEST_SUITE_P(
     NEON, BlockErrorTestFP,
     ::testing::Values(make_tuple(16, &vp9_block_error_fp_neon),
                       make_tuple(64, &vp9_block_error_fp_neon),
                       make_tuple(256, &vp9_block_error_fp_neon),
                       make_tuple(1024, &vp9_block_error_fp_neon)));
-#endif  // !CONFIG_VP9_HIGHBITDEPTH
 #endif  // HAVE_NEON
 
 #if HAVE_MSA
index 20a482c..c939411 100644 (file)
@@ -127,6 +127,7 @@ if (vpx_config("CONFIG_VP9_TEMPORAL_DENOISING") eq "yes") {
 add_proto qw/int64_t vp9_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
 
 add_proto qw/int64_t vp9_block_error_fp/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, int block_size";
+specialize qw/vp9_block_error_fp neon avx2 sse2/;
 
 add_proto qw/void vp9_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *round_ptr, const int16_t *quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
 specialize qw/vp9_quantize_fp neon sse2 ssse3 avx2 vsx/;
@@ -137,14 +138,10 @@ specialize qw/vp9_quantize_fp_32x32 neon ssse3 avx2 vsx/;
 if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
   specialize qw/vp9_block_error avx2 sse2/;
 
-  specialize qw/vp9_block_error_fp avx2 sse2/;
-
   add_proto qw/int64_t vp9_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
   specialize qw/vp9_highbd_block_error sse2/;
 } else {
   specialize qw/vp9_block_error avx2 msa sse2/;
-
-  specialize qw/vp9_block_error_fp neon avx2 sse2/;
 }
 
 # fdct functions
index 1c75031..eb1e2e0 100644 (file)
 #include <assert.h>
 
 #include "./vp9_rtcd.h"
+#include "vpx_dsp/arm/mem_neon.h"
+#include "vpx_dsp/arm/sum_neon.h"
 
-int64_t vp9_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff,
-                                int block_size) {
-  int64x2_t error = vdupq_n_s64(0);
+int64_t vp9_block_error_fp_neon(const tran_low_t *coeff,
+                                const tran_low_t *dqcoeff, int block_size) {
+  uint64x2_t err_u64[2] = { vdupq_n_u64(0), vdupq_n_u64(0) };
 
-  assert(block_size >= 8);
-  assert((block_size % 8) == 0);
+  assert(block_size >= 16);
+  assert((block_size % 16) == 0);
 
   do {
-    const int16x8_t c = vld1q_s16(coeff);
-    const int16x8_t d = vld1q_s16(dqcoeff);
-    const int16x8_t diff = vsubq_s16(c, d);
-    const int16x4_t diff_lo = vget_low_s16(diff);
-    const int16x4_t diff_hi = vget_high_s16(diff);
-    // diff is 15-bits, the squares 30, so we can store 2 in 31-bits before
-    // accumulating them in 64-bits.
-    const int32x4_t err0 = vmull_s16(diff_lo, diff_lo);
-    const int32x4_t err1 = vmlal_s16(err0, diff_hi, diff_hi);
-    const int64x2_t err2 = vaddl_s32(vget_low_s32(err1), vget_high_s32(err1));
-    error = vaddq_s64(error, err2);
-    coeff += 8;
-    dqcoeff += 8;
-    block_size -= 8;
+    uint32x4_t err0, err1;
+
+    const int16x8_t c0 = load_tran_low_to_s16q(coeff);
+    const int16x8_t c1 = load_tran_low_to_s16q(coeff + 8);
+    const int16x8_t d0 = load_tran_low_to_s16q(dqcoeff);
+    const int16x8_t d1 = load_tran_low_to_s16q(dqcoeff + 8);
+
+    const uint16x8_t diff0 = vreinterpretq_u16_s16(vabdq_s16(c0, d0));
+    const uint16x8_t diff1 = vreinterpretq_u16_s16(vabdq_s16(c1, d1));
+
+    // diff is 15-bits, the squares 30, so in theory we can store 4 in 32-bits
+    // before accumulating them in 64-bits. However splitting into 2 mull, mlal
+    // pairs is beneficial since it allows us to use both Neon
+    // multiply-accumulate pipes - on CPUs that have them - rather than having
+    // a single chain of 4 instructions executing serially.
+    err0 = vmull_u16(vget_low_u16(diff0), vget_low_u16(diff0));
+    err0 = vmlal_u16(err0, vget_high_u16(diff0), vget_high_u16(diff0));
+    err_u64[0] = vpadalq_u32(err_u64[0], err0);
+
+    err1 = vmull_u16(vget_low_u16(diff1), vget_low_u16(diff1));
+    err1 = vmlal_u16(err1, vget_high_u16(diff1), vget_high_u16(diff1));
+    err_u64[1] = vpadalq_u32(err_u64[1], err1);
+
+    coeff += 16;
+    dqcoeff += 16;
+    block_size -= 16;
   } while (block_size != 0);
 
-  return vgetq_lane_s64(error, 0) + vgetq_lane_s64(error, 1);
+  return horizontal_add_uint64x2(vaddq_u64(err_u64[0], err_u64[1]));
 }
index ae8fb85..cccaea7 100644 (file)
@@ -136,9 +136,7 @@ endif
 
 VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_error_avx2.c
 
-ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
 VP9_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp9_error_neon.c
-endif
 VP9_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp9_frame_scale_neon.c
 VP9_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp9_quantize_neon.c
 
index 47748a8..6f513ca 100644 (file)
@@ -127,4 +127,12 @@ static INLINE uint64_t horizontal_add_int64x2(const int64x2_t a) {
 #endif
 }
 
+static INLINE uint64_t horizontal_add_uint64x2(const uint64x2_t a) {
+#if defined(__aarch64__)
+  return vaddvq_u64(a);
+#else
+  return vgetq_lane_u64(a, 0) + vgetq_lane_u64(a, 1);
+#endif
+}
+
 #endif  // VPX_VPX_DSP_ARM_SUM_NEON_H_