Optimize Neon high bitdepth subpel variance functions
authorSalome Thirot <salome.thirot@arm.com>
Tue, 7 Feb 2023 14:08:33 +0000 (14:08 +0000)
committerSalome Thirot <salome.thirot@arm.com>
Mon, 13 Feb 2023 20:19:30 +0000 (20:19 +0000)
Use the same general code style as in the standard bitdepth Neon
implementation. Additionally, do not unnecessarily widen to 32-bit data
types when doing bilinear filtering - allowing us to process twice as
many elements per instruction.

Change-Id: I1e178991d2aa71f5f77a376e145d19257481e90f

vpx_dsp/arm/highbd_subpel_variance_neon.c [new file with mode: 0644]
vpx_dsp/arm/highbd_variance_neon.c
vpx_dsp/arm/mem_neon.h
vpx_dsp/vpx_dsp.mk

diff --git a/vpx_dsp/arm/highbd_subpel_variance_neon.c b/vpx_dsp/arm/highbd_subpel_variance_neon.c
new file mode 100644 (file)
index 0000000..81943ee
--- /dev/null
@@ -0,0 +1,307 @@
+/*
+ *  Copyright (c) 2023 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "./vpx_config.h"
+
+#include "vpx/vpx_integer.h"
+#include "vpx_dsp/arm/mem_neon.h"
+
+// The bilinear filters look like this:
+//
+// {{ 128,  0 }, { 112, 16 }, { 96, 32 }, { 80,  48 },
+//  {  64, 64 }, {  48, 80 }, { 32, 96 }, { 16, 112 }}
+//
+// We can factor out the highest common multiple, such that the sum of both
+// weights will be 8 instead of 128. The benefits of this are two-fold:
+//
+// 1) We can infer the filter values from the filter_offset parameter in the
+// bilinear filter functions below - we don't have to actually load the values
+// from memory:
+// f0 = 8 - filter_offset
+// f1 = filter_offset
+//
+// 2) Scaling the pixel values by 8, instead of 128 enables us to operate on
+// 16-bit data types at all times, rather than widening out to 32-bit and
+// requiring double the number of data processing instructions. (12-bit * 8 =
+// 15-bit.)
+
+// Process a block exactly 4 wide and a multiple of 2 high.
+static void highbd_var_filter_block2d_bil_w4(const uint16_t *src_ptr,
+                                             uint16_t *dst_ptr, int src_stride,
+                                             int pixel_step, int dst_height,
+                                             int filter_offset) {
+  const uint16x8_t f0 = vdupq_n_u16(8 - filter_offset);
+  const uint16x8_t f1 = vdupq_n_u16(filter_offset);
+
+  int i = dst_height;
+  do {
+    uint16x8_t s0 = load_unaligned_u16q(src_ptr, src_stride);
+    uint16x8_t s1 = load_unaligned_u16q(src_ptr + pixel_step, src_stride);
+
+    uint16x8_t blend = vmulq_u16(s0, f0);
+    blend = vmlaq_u16(blend, s1, f1);
+    blend = vrshrq_n_u16(blend, 3);
+
+    vst1q_u16(dst_ptr, blend);
+
+    src_ptr += 2 * src_stride;
+    dst_ptr += 8;
+    i -= 2;
+  } while (i != 0);
+}
+
+// Process a block which is a multiple of 8 and any height.
+static void highbd_var_filter_block2d_bil_large(const uint16_t *src_ptr,
+                                                uint16_t *dst_ptr,
+                                                int src_stride, int pixel_step,
+                                                int dst_width, int dst_height,
+                                                int filter_offset) {
+  const uint16x8_t f0 = vdupq_n_u16(8 - filter_offset);
+  const uint16x8_t f1 = vdupq_n_u16(filter_offset);
+
+  int i = dst_height;
+  do {
+    int j = 0;
+    do {
+      uint16x8_t s0 = vld1q_u16(src_ptr + j);
+      uint16x8_t s1 = vld1q_u16(src_ptr + j + pixel_step);
+
+      uint16x8_t blend = vmulq_u16(s0, f0);
+      blend = vmlaq_u16(blend, s1, f1);
+      blend = vrshrq_n_u16(blend, 3);
+
+      vst1q_u16(dst_ptr + j, blend);
+
+      j += 8;
+    } while (j < dst_width);
+
+    src_ptr += src_stride;
+    dst_ptr += dst_width;
+  } while (--i != 0);
+}
+
+static void highbd_var_filter_block2d_bil_w8(const uint16_t *src_ptr,
+                                             uint16_t *dst_ptr, int src_stride,
+                                             int pixel_step, int dst_height,
+                                             int filter_offset) {
+  highbd_var_filter_block2d_bil_large(src_ptr, dst_ptr, src_stride, pixel_step,
+                                      8, dst_height, filter_offset);
+}
+static void highbd_var_filter_block2d_bil_w16(const uint16_t *src_ptr,
+                                              uint16_t *dst_ptr, int src_stride,
+                                              int pixel_step, int dst_height,
+                                              int filter_offset) {
+  highbd_var_filter_block2d_bil_large(src_ptr, dst_ptr, src_stride, pixel_step,
+                                      16, dst_height, filter_offset);
+}
+static void highbd_var_filter_block2d_bil_w32(const uint16_t *src_ptr,
+                                              uint16_t *dst_ptr, int src_stride,
+                                              int pixel_step, int dst_height,
+                                              int filter_offset) {
+  highbd_var_filter_block2d_bil_large(src_ptr, dst_ptr, src_stride, pixel_step,
+                                      32, dst_height, filter_offset);
+}
+static void highbd_var_filter_block2d_bil_w64(const uint16_t *src_ptr,
+                                              uint16_t *dst_ptr, int src_stride,
+                                              int pixel_step, int dst_height,
+                                              int filter_offset) {
+  highbd_var_filter_block2d_bil_large(src_ptr, dst_ptr, src_stride, pixel_step,
+                                      64, dst_height, filter_offset);
+}
+
+#define HBD_SUBPEL_VARIANCE_WXH_NEON(w, h, padding)                          \
+  unsigned int vpx_highbd_8_sub_pixel_variance##w##x##h##_neon(              \
+      const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
+      const uint8_t *ref, int ref_stride, uint32_t *sse) {                   \
+    uint16_t tmp0[w * (h + padding)];                                        \
+    uint16_t tmp1[w * h];                                                    \
+    uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src);                            \
+                                                                             \
+    highbd_var_filter_block2d_bil_w##w(src_ptr, tmp0, src_stride, 1,         \
+                                       (h + padding), xoffset);              \
+    highbd_var_filter_block2d_bil_w##w(tmp0, tmp1, w, w, h, yoffset);        \
+                                                                             \
+    return vpx_highbd_8_variance##w##x##h(CONVERT_TO_BYTEPTR(tmp1), w, ref,  \
+                                          ref_stride, sse);                  \
+  }                                                                          \
+                                                                             \
+  unsigned int vpx_highbd_10_sub_pixel_variance##w##x##h##_neon(             \
+      const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
+      const uint8_t *ref, int ref_stride, uint32_t *sse) {                   \
+    uint16_t tmp0[w * (h + padding)];                                        \
+    uint16_t tmp1[w * h];                                                    \
+    uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src);                            \
+                                                                             \
+    highbd_var_filter_block2d_bil_w##w(src_ptr, tmp0, src_stride, 1,         \
+                                       (h + padding), xoffset);              \
+    highbd_var_filter_block2d_bil_w##w(tmp0, tmp1, w, w, h, yoffset);        \
+                                                                             \
+    return vpx_highbd_10_variance##w##x##h(CONVERT_TO_BYTEPTR(tmp1), w, ref, \
+                                           ref_stride, sse);                 \
+  }                                                                          \
+  unsigned int vpx_highbd_12_sub_pixel_variance##w##x##h##_neon(             \
+      const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
+      const uint8_t *ref, int ref_stride, uint32_t *sse) {                   \
+    uint16_t tmp0[w * (h + padding)];                                        \
+    uint16_t tmp1[w * h];                                                    \
+    uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src);                            \
+                                                                             \
+    highbd_var_filter_block2d_bil_w##w(src_ptr, tmp0, src_stride, 1,         \
+                                       (h + padding), xoffset);              \
+    highbd_var_filter_block2d_bil_w##w(tmp0, tmp1, w, w, h, yoffset);        \
+                                                                             \
+    return vpx_highbd_12_variance##w##x##h(CONVERT_TO_BYTEPTR(tmp1), w, ref, \
+                                           ref_stride, sse);                 \
+  }
+
+// 4x<h> blocks are processed two rows at a time, so require an extra row of
+// padding.
+HBD_SUBPEL_VARIANCE_WXH_NEON(4, 4, 2)
+HBD_SUBPEL_VARIANCE_WXH_NEON(4, 8, 2)
+
+HBD_SUBPEL_VARIANCE_WXH_NEON(8, 4, 1)
+HBD_SUBPEL_VARIANCE_WXH_NEON(8, 8, 1)
+HBD_SUBPEL_VARIANCE_WXH_NEON(8, 16, 1)
+
+HBD_SUBPEL_VARIANCE_WXH_NEON(16, 8, 1)
+HBD_SUBPEL_VARIANCE_WXH_NEON(16, 16, 1)
+HBD_SUBPEL_VARIANCE_WXH_NEON(16, 32, 1)
+
+HBD_SUBPEL_VARIANCE_WXH_NEON(32, 16, 1)
+HBD_SUBPEL_VARIANCE_WXH_NEON(32, 32, 1)
+HBD_SUBPEL_VARIANCE_WXH_NEON(32, 64, 1)
+
+HBD_SUBPEL_VARIANCE_WXH_NEON(64, 32, 1)
+HBD_SUBPEL_VARIANCE_WXH_NEON(64, 64, 1)
+
+void vpx_highbd_comp_avg_pred_neon(uint16_t *comp_pred, const uint16_t *pred,
+                                   int width, int height, const uint16_t *ref,
+                                   int ref_stride) {
+  int i, j;
+  uint32x4_t one_u32 = vdupq_n_u32(1);
+  if (width >= 8) {
+    for (i = 0; i < height; ++i) {
+      for (j = 0; j < width; j += 8) {
+        const uint16x8_t pred_u16 = vld1q_u16(&pred[j]);
+        const uint16x8_t ref_u16 = vld1q_u16(&ref[j]);
+        const uint32x4_t sum1_u32 =
+            vaddl_u16(vget_low_u16(pred_u16), vget_low_u16(ref_u16));
+        const uint32x4_t sum2_u32 =
+            vaddl_u16(vget_high_u16(pred_u16), vget_high_u16(ref_u16));
+        const uint16x4_t sum1_u16 =
+            vshrn_n_u32(vaddq_u32(sum1_u32, one_u32), 1);
+        const uint16x4_t sum2_u16 =
+            vshrn_n_u32(vaddq_u32(sum2_u32, one_u32), 1);
+        const uint16x8_t vcomp_pred = vcombine_u16(sum1_u16, sum2_u16);
+        vst1q_u16(&comp_pred[j], vcomp_pred);
+      }
+      comp_pred += width;
+      pred += width;
+      ref += ref_stride;
+    }
+  } else {
+    assert(width >= 4);
+    for (i = 0; i < height; ++i) {
+      for (j = 0; j < width; j += 4) {
+        const uint16x4_t pred_u16 = vld1_u16(&pred[j]);
+        const uint16x4_t ref_u16 = vld1_u16(&ref[j]);
+        const uint32x4_t sum_u32 = vaddl_u16(pred_u16, ref_u16);
+        const uint16x4_t vcomp_pred =
+            vshrn_n_u32(vaddq_u32(sum_u32, one_u32), 1);
+        vst1_u16(&comp_pred[j], vcomp_pred);
+      }
+      comp_pred += width;
+      pred += width;
+      ref += ref_stride;
+    }
+  }
+}
+
+#define SUBPEL_AVG_VARIANCE_WXH_NEON(w, h, padding)                            \
+  uint32_t vpx_highbd_8_sub_pixel_avg_variance##w##x##h##_neon(                \
+      const uint8_t *src, int src_stride, int xoffset, int yoffset,            \
+      const uint8_t *ref, int ref_stride, uint32_t *sse,                       \
+      const uint8_t *second_pred) {                                            \
+    uint16_t tmp0[w * (h + padding)];                                          \
+    uint16_t tmp1[w * h];                                                      \
+    uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src);                              \
+                                                                               \
+    highbd_var_filter_block2d_bil_w##w(src_ptr, tmp0, src_stride, 1,           \
+                                       (h + padding), xoffset);                \
+    highbd_var_filter_block2d_bil_w##w(tmp0, tmp1, w, w, h, yoffset);          \
+                                                                               \
+    vpx_highbd_comp_avg_pred_neon(tmp0, CONVERT_TO_SHORTPTR(second_pred), w,   \
+                                  h, tmp1, w);                                 \
+                                                                               \
+    return vpx_highbd_8_variance##w##x##h##_neon(CONVERT_TO_BYTEPTR(tmp0), w,  \
+                                                 ref, ref_stride, sse);        \
+  }                                                                            \
+                                                                               \
+  uint32_t vpx_highbd_10_sub_pixel_avg_variance##w##x##h##_neon(               \
+      const uint8_t *src, int src_stride, int xoffset, int yoffset,            \
+      const uint8_t *ref, int ref_stride, uint32_t *sse,                       \
+      const uint8_t *second_pred) {                                            \
+    uint16_t tmp0[w * (h + padding)];                                          \
+    uint16_t tmp1[w * h];                                                      \
+    uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src);                              \
+                                                                               \
+    highbd_var_filter_block2d_bil_w##w(src_ptr, tmp0, src_stride, 1,           \
+                                       (h + padding), xoffset);                \
+    highbd_var_filter_block2d_bil_w##w(tmp0, tmp1, w, w, h, yoffset);          \
+                                                                               \
+    vpx_highbd_comp_avg_pred_neon(tmp0, CONVERT_TO_SHORTPTR(second_pred), w,   \
+                                  h, tmp1, w);                                 \
+                                                                               \
+    return vpx_highbd_10_variance##w##x##h##_neon(CONVERT_TO_BYTEPTR(tmp0), w, \
+                                                  ref, ref_stride, sse);       \
+  }                                                                            \
+                                                                               \
+  uint32_t vpx_highbd_12_sub_pixel_avg_variance##w##x##h##_neon(               \
+      const uint8_t *src, int src_stride, int xoffset, int yoffset,            \
+      const uint8_t *ref, int ref_stride, uint32_t *sse,                       \
+      const uint8_t *second_pred) {                                            \
+    uint16_t tmp0[w * (h + padding)];                                          \
+    uint16_t tmp1[w * h];                                                      \
+    uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src);                              \
+                                                                               \
+    highbd_var_filter_block2d_bil_w##w(src_ptr, tmp0, src_stride, 1,           \
+                                       (h + padding), xoffset);                \
+    highbd_var_filter_block2d_bil_w##w(tmp0, tmp1, w, w, h, yoffset);          \
+                                                                               \
+    vpx_highbd_comp_avg_pred_neon(tmp0, CONVERT_TO_SHORTPTR(second_pred), w,   \
+                                  h, tmp1, w);                                 \
+                                                                               \
+    return vpx_highbd_12_variance##w##x##h##_neon(CONVERT_TO_BYTEPTR(tmp0), w, \
+                                                  ref, ref_stride, sse);       \
+  }
+
+// 4x<h> blocks are processed two rows at a time, so require an extra row of
+// padding.
+SUBPEL_AVG_VARIANCE_WXH_NEON(4, 4, 2)
+SUBPEL_AVG_VARIANCE_WXH_NEON(4, 8, 2)
+
+SUBPEL_AVG_VARIANCE_WXH_NEON(8, 4, 1)
+SUBPEL_AVG_VARIANCE_WXH_NEON(8, 8, 1)
+SUBPEL_AVG_VARIANCE_WXH_NEON(8, 16, 1)
+
+SUBPEL_AVG_VARIANCE_WXH_NEON(16, 8, 1)
+SUBPEL_AVG_VARIANCE_WXH_NEON(16, 16, 1)
+SUBPEL_AVG_VARIANCE_WXH_NEON(16, 32, 1)
+
+SUBPEL_AVG_VARIANCE_WXH_NEON(32, 16, 1)
+SUBPEL_AVG_VARIANCE_WXH_NEON(32, 32, 1)
+SUBPEL_AVG_VARIANCE_WXH_NEON(32, 64, 1)
+
+SUBPEL_AVG_VARIANCE_WXH_NEON(64, 32, 1)
+SUBPEL_AVG_VARIANCE_WXH_NEON(64, 64, 1)
index 96a35af..985cc35 100644 (file)
 #include "vpx_dsp/arm/sum_neon.h"
 #include "vpx_ports/mem.h"
 
-static const uint8_t bilinear_filters[8][2] = {
-  { 128, 0 }, { 112, 16 }, { 96, 32 }, { 80, 48 },
-  { 64, 64 }, { 48, 80 },  { 32, 96 }, { 16, 112 },
-};
-
 static INLINE void highbd_variance16(const uint16_t *src_ptr, int src_stride,
                                      const uint16_t *ref_ptr, int ref_stride,
                                      int w, int h, uint64_t *sse,
@@ -136,7 +131,7 @@ static INLINE void highbd_12_variance(const uint8_t *src8_ptr, int src_stride,
   *sum = (int)ROUND_POWER_OF_TWO(sum_long, 4);
 }
 
-#define HIGHBD_VAR(W, H)                                                    \
+#define HBD_VARIANCE_WXH_NEON(W, H)                                         \
   uint32_t vpx_highbd_8_variance##W##x##H##_neon(                           \
       const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,       \
       int ref_stride, uint32_t *sse) {                                      \
@@ -218,274 +213,19 @@ static INLINE void highbd_12_variance(const uint8_t *src8_ptr, int src_stride,
     return *sse;                                                            \
   }
 
-static INLINE void highbd_var_filter_block2d_bil_first_pass(
-    const uint8_t *src_ptr8, uint16_t *output_ptr,
-    unsigned int src_pixels_per_line, int pixel_step,
-    unsigned int output_height, unsigned int output_width,
-    const uint8_t *filter) {
-  uint32_t i, j;
-  uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src_ptr8);
-
-  uint32x4_t round_u32 = vshlq_n_u32(vdupq_n_u32(1), FILTER_BITS - 1);
-  uint16x4_t filter1_u16 = vdup_n_u16(filter[0]);
-  uint16x4_t filter2_u16 = vdup_n_u16(filter[1]);
-
-  if (output_width >= 8) {
-    for (i = 0; i < output_height; ++i) {
-      for (j = 0; j < output_width; j += 8) {
-        const uint16x8_t src1_u16 = vld1q_u16(&src_ptr[j]);
-        const uint16x8_t src2_u16 = vld1q_u16(&src_ptr[j + pixel_step]);
-        uint32x4_t sum1_u32 = vmull_u16(filter1_u16, vget_low_u16(src1_u16));
-        uint32x4_t sum2_u32 = vmull_u16(filter1_u16, vget_high_u16(src1_u16));
-        uint16x4_t out1_u16;
-        uint16x4_t out2_u16;
-        sum1_u32 = vmlal_u16(sum1_u32, filter2_u16, vget_low_u16(src2_u16));
-        sum2_u32 = vmlal_u16(sum2_u32, filter2_u16, vget_high_u16(src2_u16));
-        out1_u16 = vshrn_n_u32(vaddq_u32(sum1_u32, round_u32), FILTER_BITS);
-        out2_u16 = vshrn_n_u32(vaddq_u32(sum2_u32, round_u32), FILTER_BITS);
-        vst1q_u16(&output_ptr[j], vcombine_u16(out1_u16, out2_u16));
-      }
-      // Next row...
-      src_ptr += src_pixels_per_line;
-      output_ptr += output_width;
-    }
-  } else {
-    assert(output_width >= 4);
-    for (i = 0; i < output_height; ++i) {
-      for (j = 0; j < output_width; j += 4) {
-        const uint16x4_t src1_u16 = vld1_u16(&src_ptr[j]);
-        const uint16x4_t src2_u16 = vld1_u16(&src_ptr[j + pixel_step]);
-        uint32x4_t sum_u32 = vmull_u16(filter1_u16, src1_u16);
-        uint16x4_t out_u16;
-        sum_u32 = vmlal_u16(sum_u32, filter2_u16, src2_u16);
-        out_u16 = vshrn_n_u32(vaddq_u32(sum_u32, round_u32), FILTER_BITS);
-        vst1_u16(&output_ptr[j], out_u16);
-      }
-      // Next row...
-      src_ptr += src_pixels_per_line;
-      output_ptr += output_width;
-    }
-  }
-}
-
-static INLINE void highbd_var_filter_block2d_bil_second_pass(
-    const uint16_t *src_ptr, uint16_t *output_ptr,
-    unsigned int src_pixels_per_line, unsigned int pixel_step,
-    unsigned int output_height, unsigned int output_width,
-    const uint8_t *filter) {
-  uint32_t i, j;
-
-  uint32x4_t round_u32 = vshlq_n_u32(vdupq_n_u32(1), FILTER_BITS - 1);
-  uint16x4_t filter1_u16 = vdup_n_u16(filter[0]);
-  uint16x4_t filter2_u16 = vdup_n_u16(filter[1]);
-
-  if (output_width >= 8) {
-    for (i = 0; i < output_height; ++i) {
-      for (j = 0; j < output_width; j += 8) {
-        const uint16x8_t src1_u16 = vld1q_u16(&src_ptr[j]);
-        const uint16x8_t src2_u16 = vld1q_u16(&src_ptr[j + pixel_step]);
-        uint32x4_t sum1_u32 = vmull_u16(filter1_u16, vget_low_u16(src1_u16));
-        uint32x4_t sum2_u32 = vmull_u16(filter1_u16, vget_high_u16(src1_u16));
-        uint16x4_t out1_u16;
-        uint16x4_t out2_u16;
-        sum1_u32 = vmlal_u16(sum1_u32, filter2_u16, vget_low_u16(src2_u16));
-        sum2_u32 = vmlal_u16(sum2_u32, filter2_u16, vget_high_u16(src2_u16));
-        out1_u16 = vshrn_n_u32(vaddq_u32(sum1_u32, round_u32), FILTER_BITS);
-        out2_u16 = vshrn_n_u32(vaddq_u32(sum2_u32, round_u32), FILTER_BITS);
-        vst1q_u16(&output_ptr[j], vcombine_u16(out1_u16, out2_u16));
-      }
-      // Next row...
-      src_ptr += src_pixels_per_line;
-      output_ptr += output_width;
-    }
-  } else {
-    assert(output_width >= 4);
-    for (i = 0; i < output_height; ++i) {
-      for (j = 0; j < output_width; j += 4) {
-        const uint16x4_t src1_u16 = vld1_u16(&src_ptr[j]);
-        const uint16x4_t src2_u16 = vld1_u16(&src_ptr[j + pixel_step]);
-        uint32x4_t sum_u32 = vmull_u16(filter1_u16, src1_u16);
-        uint16x4_t out_u16;
-        sum_u32 = vmlal_u16(sum_u32, filter2_u16, src2_u16);
-        out_u16 = vshrn_n_u32(vaddq_u32(sum_u32, round_u32), FILTER_BITS);
-        vst1_u16(&output_ptr[j], out_u16);
-      }
-      // Next row...
-      src_ptr += src_pixels_per_line;
-      output_ptr += output_width;
-    }
-  }
-}
-
-#define HIGHBD_SUBPIX_VAR(W, H)                                                \
-  uint32_t vpx_highbd_8_sub_pixel_variance##W##x##H##_neon(                    \
-      const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,      \
-      const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) {                 \
-    uint16_t fdata3[(H + 1) * W];                                              \
-    uint16_t temp2[H * W];                                                     \
-                                                                               \
-    highbd_var_filter_block2d_bil_first_pass(                                  \
-        src_ptr, fdata3, src_stride, 1, H + 1, W, bilinear_filters[x_offset]); \
-    highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,       \
-                                              bilinear_filters[y_offset]);     \
-                                                                               \
-    return vpx_highbd_8_variance##W##x##H##_neon(CONVERT_TO_BYTEPTR(temp2), W, \
-                                                 ref_ptr, ref_stride, sse);    \
-  }                                                                            \
-                                                                               \
-  uint32_t vpx_highbd_10_sub_pixel_variance##W##x##H##_neon(                   \
-      const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,      \
-      const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) {                 \
-    uint16_t fdata3[(H + 1) * W];                                              \
-    uint16_t temp2[H * W];                                                     \
-                                                                               \
-    highbd_var_filter_block2d_bil_first_pass(                                  \
-        src_ptr, fdata3, src_stride, 1, H + 1, W, bilinear_filters[x_offset]); \
-    highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,       \
-                                              bilinear_filters[y_offset]);     \
-                                                                               \
-    return vpx_highbd_10_variance##W##x##H##_neon(                             \
-        CONVERT_TO_BYTEPTR(temp2), W, ref_ptr, ref_stride, sse);               \
-  }                                                                            \
-                                                                               \
-  uint32_t vpx_highbd_12_sub_pixel_variance##W##x##H##_neon(                   \
-      const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,      \
-      const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) {                 \
-    uint16_t fdata3[(H + 1) * W];                                              \
-    uint16_t temp2[H * W];                                                     \
-                                                                               \
-    highbd_var_filter_block2d_bil_first_pass(                                  \
-        src_ptr, fdata3, src_stride, 1, H + 1, W, bilinear_filters[x_offset]); \
-    highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,       \
-                                              bilinear_filters[y_offset]);     \
-                                                                               \
-    return vpx_highbd_12_variance##W##x##H##_neon(                             \
-        CONVERT_TO_BYTEPTR(temp2), W, ref_ptr, ref_stride, sse);               \
-  }
-
-#define HIGHBD_SUBPIX_AVG_VAR(W, H)                                            \
-  uint32_t vpx_highbd_8_sub_pixel_avg_variance##W##x##H##_neon(                \
-      const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,      \
-      const uint8_t *ref_ptr, int ref_stride, uint32_t *sse,                   \
-      const uint8_t *second_pred) {                                            \
-    uint16_t fdata3[(H + 1) * W];                                              \
-    uint16_t temp2[H * W];                                                     \
-    DECLARE_ALIGNED(16, uint16_t, temp3[H * W]);                               \
-                                                                               \
-    highbd_var_filter_block2d_bil_first_pass(                                  \
-        src_ptr, fdata3, src_stride, 1, H + 1, W, bilinear_filters[x_offset]); \
-    highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,       \
-                                              bilinear_filters[y_offset]);     \
-                                                                               \
-    vpx_highbd_comp_avg_pred_neon(temp3, CONVERT_TO_SHORTPTR(second_pred), W,  \
-                                  H, temp2, W);                                \
-                                                                               \
-    return vpx_highbd_8_variance##W##x##H##_neon(CONVERT_TO_BYTEPTR(temp3), W, \
-                                                 ref_ptr, ref_stride, sse);    \
-  }                                                                            \
-                                                                               \
-  uint32_t vpx_highbd_10_sub_pixel_avg_variance##W##x##H##_neon(               \
-      const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,      \
-      const uint8_t *ref_ptr, int ref_stride, uint32_t *sse,                   \
-      const uint8_t *second_pred) {                                            \
-    uint16_t fdata3[(H + 1) * W];                                              \
-    uint16_t temp2[H * W];                                                     \
-    DECLARE_ALIGNED(16, uint16_t, temp3[H * W]);                               \
-                                                                               \
-    highbd_var_filter_block2d_bil_first_pass(                                  \
-        src_ptr, fdata3, src_stride, 1, H + 1, W, bilinear_filters[x_offset]); \
-    highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,       \
-                                              bilinear_filters[y_offset]);     \
-                                                                               \
-    vpx_highbd_comp_avg_pred_neon(temp3, CONVERT_TO_SHORTPTR(second_pred), W,  \
-                                  H, temp2, W);                                \
-                                                                               \
-    return vpx_highbd_10_variance##W##x##H##_neon(                             \
-        CONVERT_TO_BYTEPTR(temp3), W, ref_ptr, ref_stride, sse);               \
-  }                                                                            \
-                                                                               \
-  uint32_t vpx_highbd_12_sub_pixel_avg_variance##W##x##H##_neon(               \
-      const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,      \
-      const uint8_t *ref_ptr, int ref_stride, uint32_t *sse,                   \
-      const uint8_t *second_pred) {                                            \
-    uint16_t fdata3[(H + 1) * W];                                              \
-    uint16_t temp2[H * W];                                                     \
-    DECLARE_ALIGNED(16, uint16_t, temp3[H * W]);                               \
-                                                                               \
-    highbd_var_filter_block2d_bil_first_pass(                                  \
-        src_ptr, fdata3, src_stride, 1, H + 1, W, bilinear_filters[x_offset]); \
-    highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,       \
-                                              bilinear_filters[y_offset]);     \
-                                                                               \
-    vpx_highbd_comp_avg_pred_neon(temp3, CONVERT_TO_SHORTPTR(second_pred), W,  \
-                                  H, temp2, W);                                \
-                                                                               \
-    return vpx_highbd_12_variance##W##x##H##_neon(                             \
-        CONVERT_TO_BYTEPTR(temp3), W, ref_ptr, ref_stride, sse);               \
-  }
-
-void vpx_highbd_comp_avg_pred_neon(uint16_t *comp_pred, const uint16_t *pred,
-                                   int width, int height, const uint16_t *ref,
-                                   int ref_stride) {
-  int i, j;
-  uint32x4_t one_u32 = vdupq_n_u32(1);
-  if (width >= 8) {
-    for (i = 0; i < height; ++i) {
-      for (j = 0; j < width; j += 8) {
-        const uint16x8_t pred_u16 = vld1q_u16(&pred[j]);
-        const uint16x8_t ref_u16 = vld1q_u16(&ref[j]);
-        const uint32x4_t sum1_u32 =
-            vaddl_u16(vget_low_u16(pred_u16), vget_low_u16(ref_u16));
-        const uint32x4_t sum2_u32 =
-            vaddl_u16(vget_high_u16(pred_u16), vget_high_u16(ref_u16));
-        const uint16x4_t sum1_u16 =
-            vshrn_n_u32(vaddq_u32(sum1_u32, one_u32), 1);
-        const uint16x4_t sum2_u16 =
-            vshrn_n_u32(vaddq_u32(sum2_u32, one_u32), 1);
-        const uint16x8_t vcomp_pred = vcombine_u16(sum1_u16, sum2_u16);
-        vst1q_u16(&comp_pred[j], vcomp_pred);
-      }
-      comp_pred += width;
-      pred += width;
-      ref += ref_stride;
-    }
-  } else {
-    assert(width >= 4);
-    for (i = 0; i < height; ++i) {
-      for (j = 0; j < width; j += 4) {
-        const uint16x4_t pred_u16 = vld1_u16(&pred[j]);
-        const uint16x4_t ref_u16 = vld1_u16(&ref[j]);
-        const uint32x4_t sum_u32 = vaddl_u16(pred_u16, ref_u16);
-        const uint16x4_t vcomp_pred =
-            vshrn_n_u32(vaddq_u32(sum_u32, one_u32), 1);
-        vst1_u16(&comp_pred[j], vcomp_pred);
-      }
-      comp_pred += width;
-      pred += width;
-      ref += ref_stride;
-    }
-  }
-}
-
-/* All three forms of the variance are available in the same sizes. */
-#define HIGHBD_VARIANCES(W, H) \
-  HIGHBD_VAR(W, H)             \
-  HIGHBD_SUBPIX_VAR(W, H)      \
-  HIGHBD_SUBPIX_AVG_VAR(W, H)
-
-HIGHBD_VARIANCES(64, 64)
-HIGHBD_VARIANCES(64, 32)
-HIGHBD_VARIANCES(32, 64)
-HIGHBD_VARIANCES(32, 32)
-HIGHBD_VARIANCES(32, 16)
-HIGHBD_VARIANCES(16, 32)
-HIGHBD_VARIANCES(16, 16)
-HIGHBD_VARIANCES(16, 8)
-HIGHBD_VARIANCES(8, 16)
-HIGHBD_VARIANCES(8, 8)
-HIGHBD_VARIANCES(8, 4)
-HIGHBD_VARIANCES(4, 8)
-HIGHBD_VARIANCES(4, 4)
+HBD_VARIANCE_WXH_NEON(64, 64)
+HBD_VARIANCE_WXH_NEON(64, 32)
+HBD_VARIANCE_WXH_NEON(32, 64)
+HBD_VARIANCE_WXH_NEON(32, 32)
+HBD_VARIANCE_WXH_NEON(32, 16)
+HBD_VARIANCE_WXH_NEON(16, 32)
+HBD_VARIANCE_WXH_NEON(16, 16)
+HBD_VARIANCE_WXH_NEON(16, 8)
+HBD_VARIANCE_WXH_NEON(8, 16)
+HBD_VARIANCE_WXH_NEON(8, 8)
+HBD_VARIANCE_WXH_NEON(8, 4)
+HBD_VARIANCE_WXH_NEON(4, 8)
+HBD_VARIANCE_WXH_NEON(4, 4)
 
 HIGHBD_GET_VAR(8)
 HIGHBD_GET_VAR(16)
index 19cfc7c..866be74 100644 (file)
@@ -126,6 +126,20 @@ static INLINE uint8x8_t load_unaligned_u8(const uint8_t *buf,
   return vreinterpret_u8_u32(a_u32);
 }
 
+// Load 2 sets of 8 bytes when alignment is not guaranteed.
+static INLINE uint16x8_t load_unaligned_u16q(const uint16_t *buf,
+                                             ptrdiff_t stride) {
+  uint64_t a;
+  uint64x2_t a_u64;
+  if (stride == 4) return vld1q_u16(buf);
+  memcpy(&a, buf, 8);
+  buf += stride;
+  a_u64 = vdupq_n_u64(a);
+  memcpy(&a, buf, 8);
+  a_u64 = vsetq_lane_u64(a, a_u64, 1);
+  return vreinterpretq_u16_u64(a_u64);
+}
+
 // Store 2 sets of 4 bytes when alignment is not guaranteed.
 static INLINE void store_unaligned_u8(uint8_t *buf, ptrdiff_t stride,
                                       const uint8x8_t a) {
index 3b04e97..f10b7cc 100644 (file)
@@ -434,6 +434,7 @@ DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_variance_sse2.c
 DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_variance_impl_sse2.asm
 DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_subpel_variance_impl_sse2.asm
 DSP_SRCS-$(HAVE_NEON)   += arm/highbd_variance_neon.c
+DSP_SRCS-$(HAVE_NEON)   += arm/highbd_subpel_variance_neon.c
 endif  # CONFIG_VP9_HIGHBITDEPTH
 endif  # CONFIG_ENCODERS || CONFIG_POSTPROC || CONFIG_VP9_POSTPROC