VarianceParams(2, 3, &vpx_variance4x8_neon),
VarianceParams(2, 2, &vpx_variance4x4_neon)));
+#if HAVE_NEON_DOTPROD
+INSTANTIATE_TEST_SUITE_P(
+ NEON_DOTPROD, VpxVarianceTest,
+ ::testing::Values(VarianceParams(6, 6, &vpx_variance64x64_neon_dotprod),
+ VarianceParams(6, 5, &vpx_variance64x32_neon_dotprod),
+ VarianceParams(5, 6, &vpx_variance32x64_neon_dotprod),
+ VarianceParams(5, 5, &vpx_variance32x32_neon_dotprod),
+ VarianceParams(5, 4, &vpx_variance32x16_neon_dotprod),
+ VarianceParams(4, 5, &vpx_variance16x32_neon_dotprod),
+ VarianceParams(4, 4, &vpx_variance16x16_neon_dotprod),
+ VarianceParams(4, 3, &vpx_variance16x8_neon_dotprod),
+ VarianceParams(3, 4, &vpx_variance8x16_neon_dotprod),
+ VarianceParams(3, 3, &vpx_variance8x8_neon_dotprod),
+ VarianceParams(3, 2, &vpx_variance8x4_neon_dotprod),
+ VarianceParams(2, 3, &vpx_variance4x8_neon_dotprod),
+ VarianceParams(2, 2, &vpx_variance4x4_neon_dotprod)));
+#endif // HAVE_NEON_DOTPROD
+
INSTANTIATE_TEST_SUITE_P(
NEON, VpxSubpelVarianceTest,
::testing::Values(
return vpx_variance##w##x##h(tmp1, w, ref, ref_stride, sse); \
}
-#define SPECIALIZED_SUBPEL_VARIANCE_WXH_NEON(w, h, padding) \
- unsigned int vpx_sub_pixel_variance##w##x##h##_neon( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *ref, int ref_stride, unsigned int *sse) { \
- if (xoffset == 0) { \
- if (yoffset == 0) { \
- return vpx_variance##w##x##h##_neon(src, src_stride, ref, ref_stride, \
- sse); \
- } else if (yoffset == 4) { \
- uint8_t tmp[w * h]; \
- var_filter_block2d_avg(src, tmp, src_stride, src_stride, w, h); \
- return vpx_variance##w##x##h##_neon(tmp, w, ref, ref_stride, sse); \
- } else { \
- uint8_t tmp[w * h]; \
- var_filter_block2d_bil_w##w(src, tmp, src_stride, src_stride, h, \
- yoffset); \
- return vpx_variance##w##x##h##_neon(tmp, w, ref, ref_stride, sse); \
- } \
- } else if (xoffset == 4) { \
- uint8_t tmp0[w * (h + padding)]; \
- if (yoffset == 0) { \
- var_filter_block2d_avg(src, tmp0, src_stride, 1, w, h); \
- return vpx_variance##w##x##h##_neon(tmp0, w, ref, ref_stride, sse); \
- } else if (yoffset == 4) { \
- uint8_t tmp1[w * (h + padding)]; \
- var_filter_block2d_avg(src, tmp0, src_stride, 1, w, (h + padding)); \
- var_filter_block2d_avg(tmp0, tmp1, w, w, w, h); \
- return vpx_variance##w##x##h##_neon(tmp1, w, ref, ref_stride, sse); \
- } else { \
- uint8_t tmp1[w * (h + padding)]; \
- var_filter_block2d_avg(src, tmp0, src_stride, 1, w, (h + padding)); \
- var_filter_block2d_bil_w##w(tmp0, tmp1, w, w, h, yoffset); \
- return vpx_variance##w##x##h##_neon(tmp1, w, ref, ref_stride, sse); \
- } \
- } else { \
- uint8_t tmp0[w * (h + padding)]; \
- if (yoffset == 0) { \
- var_filter_block2d_bil_w##w(src, tmp0, src_stride, 1, h, xoffset); \
- return vpx_variance##w##x##h##_neon(tmp0, w, ref, ref_stride, sse); \
- } else if (yoffset == 4) { \
- uint8_t tmp1[w * h]; \
- var_filter_block2d_bil_w##w(src, tmp0, src_stride, 1, (h + padding), \
- xoffset); \
- var_filter_block2d_avg(tmp0, tmp1, w, w, w, h); \
- return vpx_variance##w##x##h##_neon(tmp1, w, ref, ref_stride, sse); \
- } else { \
- uint8_t tmp1[w * h]; \
- var_filter_block2d_bil_w##w(src, tmp0, src_stride, 1, (h + padding), \
- xoffset); \
- var_filter_block2d_bil_w##w(tmp0, tmp1, w, w, h, yoffset); \
- return vpx_variance##w##x##h##_neon(tmp1, w, ref, ref_stride, sse); \
- } \
- } \
+#define SPECIALIZED_SUBPEL_VARIANCE_WXH_NEON(w, h, padding) \
+ unsigned int vpx_sub_pixel_variance##w##x##h##_neon( \
+ const uint8_t *src, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref, int ref_stride, unsigned int *sse) { \
+ if (xoffset == 0) { \
+ if (yoffset == 0) { \
+ return vpx_variance##w##x##h(src, src_stride, ref, ref_stride, sse); \
+ } else if (yoffset == 4) { \
+ uint8_t tmp[w * h]; \
+ var_filter_block2d_avg(src, tmp, src_stride, src_stride, w, h); \
+ return vpx_variance##w##x##h(tmp, w, ref, ref_stride, sse); \
+ } else { \
+ uint8_t tmp[w * h]; \
+ var_filter_block2d_bil_w##w(src, tmp, src_stride, src_stride, h, \
+ yoffset); \
+ return vpx_variance##w##x##h(tmp, w, ref, ref_stride, sse); \
+ } \
+ } else if (xoffset == 4) { \
+ uint8_t tmp0[w * (h + padding)]; \
+ if (yoffset == 0) { \
+ var_filter_block2d_avg(src, tmp0, src_stride, 1, w, h); \
+ return vpx_variance##w##x##h(tmp0, w, ref, ref_stride, sse); \
+ } else if (yoffset == 4) { \
+ uint8_t tmp1[w * (h + padding)]; \
+ var_filter_block2d_avg(src, tmp0, src_stride, 1, w, (h + padding)); \
+ var_filter_block2d_avg(tmp0, tmp1, w, w, w, h); \
+ return vpx_variance##w##x##h(tmp1, w, ref, ref_stride, sse); \
+ } else { \
+ uint8_t tmp1[w * (h + padding)]; \
+ var_filter_block2d_avg(src, tmp0, src_stride, 1, w, (h + padding)); \
+ var_filter_block2d_bil_w##w(tmp0, tmp1, w, w, h, yoffset); \
+ return vpx_variance##w##x##h(tmp1, w, ref, ref_stride, sse); \
+ } \
+ } else { \
+ uint8_t tmp0[w * (h + padding)]; \
+ if (yoffset == 0) { \
+ var_filter_block2d_bil_w##w(src, tmp0, src_stride, 1, h, xoffset); \
+ return vpx_variance##w##x##h(tmp0, w, ref, ref_stride, sse); \
+ } else if (yoffset == 4) { \
+ uint8_t tmp1[w * h]; \
+ var_filter_block2d_bil_w##w(src, tmp0, src_stride, 1, (h + padding), \
+ xoffset); \
+ var_filter_block2d_avg(tmp0, tmp1, w, w, w, h); \
+ return vpx_variance##w##x##h(tmp1, w, ref, ref_stride, sse); \
+ } else { \
+ uint8_t tmp1[w * h]; \
+ var_filter_block2d_bil_w##w(src, tmp0, src_stride, 1, (h + padding), \
+ xoffset); \
+ var_filter_block2d_bil_w##w(tmp0, tmp1, w, w, h, yoffset); \
+ return vpx_variance##w##x##h(tmp1, w, ref, ref_stride, sse); \
+ } \
+ } \
}
// 4x<h> blocks are processed two rows at a time, so require an extra row of
uint8_t tmp[w * h]; \
if (yoffset == 0) { \
avg_pred(src, tmp, source_stride, w, h, second_pred); \
- return vpx_variance##w##x##h##_neon(tmp, w, ref, ref_stride, sse); \
+ return vpx_variance##w##x##h(tmp, w, ref, ref_stride, sse); \
} else if (yoffset == 4) { \
avg_pred_var_filter_block2d_avg(src, tmp, source_stride, \
source_stride, w, h, second_pred); \
- return vpx_variance##w##x##h##_neon(tmp, w, ref, ref_stride, sse); \
+ return vpx_variance##w##x##h(tmp, w, ref, ref_stride, sse); \
} else { \
avg_pred_var_filter_block2d_bil_w##w( \
src, tmp, source_stride, source_stride, h, yoffset, second_pred); \
- return vpx_variance##w##x##h##_neon(tmp, w, ref, ref_stride, sse); \
+ return vpx_variance##w##x##h(tmp, w, ref, ref_stride, sse); \
} \
} else if (xoffset == 4) { \
uint8_t tmp0[w * (h + padding)]; \
if (yoffset == 0) { \
avg_pred_var_filter_block2d_avg(src, tmp0, source_stride, 1, w, h, \
second_pred); \
- return vpx_variance##w##x##h##_neon(tmp0, w, ref, ref_stride, sse); \
+ return vpx_variance##w##x##h(tmp0, w, ref, ref_stride, sse); \
} else if (yoffset == 4) { \
uint8_t tmp1[w * (h + padding)]; \
var_filter_block2d_avg(src, tmp0, source_stride, 1, w, (h + padding)); \
avg_pred_var_filter_block2d_avg(tmp0, tmp1, w, w, w, h, second_pred); \
- return vpx_variance##w##x##h##_neon(tmp1, w, ref, ref_stride, sse); \
+ return vpx_variance##w##x##h(tmp1, w, ref, ref_stride, sse); \
} else { \
uint8_t tmp1[w * (h + padding)]; \
var_filter_block2d_avg(src, tmp0, source_stride, 1, w, (h + padding)); \
avg_pred_var_filter_block2d_bil_w##w(tmp0, tmp1, w, w, h, yoffset, \
second_pred); \
- return vpx_variance##w##x##h##_neon(tmp1, w, ref, ref_stride, sse); \
+ return vpx_variance##w##x##h(tmp1, w, ref, ref_stride, sse); \
} \
} else { \
uint8_t tmp0[w * (h + padding)]; \
if (yoffset == 0) { \
avg_pred_var_filter_block2d_bil_w##w(src, tmp0, source_stride, 1, h, \
xoffset, second_pred); \
- return vpx_variance##w##x##h##_neon(tmp0, w, ref, ref_stride, sse); \
+ return vpx_variance##w##x##h(tmp0, w, ref, ref_stride, sse); \
} else if (yoffset == 4) { \
uint8_t tmp1[w * h]; \
var_filter_block2d_bil_w##w(src, tmp0, source_stride, 1, \
(h + padding), xoffset); \
avg_pred_var_filter_block2d_avg(tmp0, tmp1, w, w, w, h, second_pred); \
- return vpx_variance##w##x##h##_neon(tmp1, w, ref, ref_stride, sse); \
+ return vpx_variance##w##x##h(tmp1, w, ref, ref_stride, sse); \
} else { \
uint8_t tmp1[w * h]; \
var_filter_block2d_bil_w##w(src, tmp0, source_stride, 1, \
(h + padding), xoffset); \
avg_pred_var_filter_block2d_bil_w##w(tmp0, tmp1, w, w, h, yoffset, \
second_pred); \
- return vpx_variance##w##x##h##_neon(tmp1, w, ref, ref_stride, sse); \
+ return vpx_variance##w##x##h(tmp1, w, ref, ref_stride, sse); \
} \
} \
}
#include "vpx_dsp/arm/sum_neon.h"
#include "vpx_ports/mem.h"
-#if defined(__ARM_FEATURE_DOTPROD)
-
-// Process a block of width 4 four rows at a time.
-static INLINE void variance_4xh_neon(const uint8_t *src_ptr, int src_stride,
- const uint8_t *ref_ptr, int ref_stride,
- int h, uint32_t *sse, int *sum) {
- uint32x4_t src_sum = vdupq_n_u32(0);
- uint32x4_t ref_sum = vdupq_n_u32(0);
- uint32x4_t sse_u32 = vdupq_n_u32(0);
-
- int i = h;
- do {
- const uint8x16_t s = load_unaligned_u8q(src_ptr, src_stride);
- const uint8x16_t r = load_unaligned_u8q(ref_ptr, ref_stride);
-
- const uint8x16_t abs_diff = vabdq_u8(s, r);
- sse_u32 = vdotq_u32(sse_u32, abs_diff, abs_diff);
-
- src_sum = vdotq_u32(src_sum, s, vdupq_n_u8(1));
- ref_sum = vdotq_u32(ref_sum, r, vdupq_n_u8(1));
-
- src_ptr += 4 * src_stride;
- ref_ptr += 4 * ref_stride;
- i -= 4;
- } while (i != 0);
-
- *sum = horizontal_add_int32x4(
- vreinterpretq_s32_u32(vsubq_u32(src_sum, ref_sum)));
- *sse = horizontal_add_uint32x4(sse_u32);
-}
-
-// Process a block of width 8 two rows at a time.
-static INLINE void variance_8xh_neon(const uint8_t *src_ptr, int src_stride,
- const uint8_t *ref_ptr, int ref_stride,
- int h, uint32_t *sse, int *sum) {
- uint32x4_t src_sum = vdupq_n_u32(0);
- uint32x4_t ref_sum = vdupq_n_u32(0);
- uint32x4_t sse_u32 = vdupq_n_u32(0);
-
- int i = h;
- do {
- const uint8x16_t s =
- vcombine_u8(vld1_u8(src_ptr), vld1_u8(src_ptr + src_stride));
- const uint8x16_t r =
- vcombine_u8(vld1_u8(ref_ptr), vld1_u8(ref_ptr + ref_stride));
-
- const uint8x16_t abs_diff = vabdq_u8(s, r);
- sse_u32 = vdotq_u32(sse_u32, abs_diff, abs_diff);
-
- src_sum = vdotq_u32(src_sum, s, vdupq_n_u8(1));
- ref_sum = vdotq_u32(ref_sum, r, vdupq_n_u8(1));
-
- src_ptr += 2 * src_stride;
- ref_ptr += 2 * ref_stride;
- i -= 2;
- } while (i != 0);
-
- *sum = horizontal_add_int32x4(
- vreinterpretq_s32_u32(vsubq_u32(src_sum, ref_sum)));
- *sse = horizontal_add_uint32x4(sse_u32);
-}
-
-// Process a block of width 16 one row at a time.
-static INLINE void variance_16xh_neon(const uint8_t *src_ptr, int src_stride,
- const uint8_t *ref_ptr, int ref_stride,
- int h, uint32_t *sse, int *sum) {
- uint32x4_t src_sum = vdupq_n_u32(0);
- uint32x4_t ref_sum = vdupq_n_u32(0);
- uint32x4_t sse_u32 = vdupq_n_u32(0);
-
- int i = h;
- do {
- const uint8x16_t s = vld1q_u8(src_ptr);
- const uint8x16_t r = vld1q_u8(ref_ptr);
-
- const uint8x16_t abs_diff = vabdq_u8(s, r);
- sse_u32 = vdotq_u32(sse_u32, abs_diff, abs_diff);
-
- src_sum = vdotq_u32(src_sum, s, vdupq_n_u8(1));
- ref_sum = vdotq_u32(ref_sum, r, vdupq_n_u8(1));
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- } while (--i != 0);
-
- *sum = horizontal_add_int32x4(
- vreinterpretq_s32_u32(vsubq_u32(src_sum, ref_sum)));
- *sse = horizontal_add_uint32x4(sse_u32);
-}
-
-// Process a block of any size where the width is divisible by 16.
-static INLINE void variance_large_neon(const uint8_t *src_ptr, int src_stride,
- const uint8_t *ref_ptr, int ref_stride,
- int w, int h, uint32_t *sse, int *sum) {
- uint32x4_t src_sum = vdupq_n_u32(0);
- uint32x4_t ref_sum = vdupq_n_u32(0);
- uint32x4_t sse_u32 = vdupq_n_u32(0);
-
- int i = h;
- do {
- int j = 0;
- do {
- const uint8x16_t s = vld1q_u8(src_ptr + j);
- const uint8x16_t r = vld1q_u8(ref_ptr + j);
-
- const uint8x16_t abs_diff = vabdq_u8(s, r);
- sse_u32 = vdotq_u32(sse_u32, abs_diff, abs_diff);
-
- src_sum = vdotq_u32(src_sum, s, vdupq_n_u8(1));
- ref_sum = vdotq_u32(ref_sum, r, vdupq_n_u8(1));
-
- j += 16;
- } while (j < w);
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- } while (--i != 0);
-
- *sum = horizontal_add_int32x4(
- vreinterpretq_s32_u32(vsubq_u32(src_sum, ref_sum)));
- *sse = horizontal_add_uint32x4(sse_u32);
-}
-
-static INLINE void variance_32xh_neon(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride, int h,
- uint32_t *sse, int *sum) {
- variance_large_neon(src, src_stride, ref, ref_stride, 32, h, sse, sum);
-}
-
-static INLINE void variance_64xh_neon(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride, int h,
- uint32_t *sse, int *sum) {
- variance_large_neon(src, src_stride, ref, ref_stride, 64, h, sse, sum);
-}
-
-#else // !defined(__ARM_FEATURE_DOTPROD)
-
// Process a block of width 4 two rows at a time.
static INLINE void variance_4xh_neon(const uint8_t *src_ptr, int src_stride,
const uint8_t *ref_ptr, int ref_stride,
variance_large_neon(src, src_stride, ref, ref_stride, 64, h, 32, sse, sum);
}
-#endif // defined(__ARM_FEATURE_DOTPROD)
-
void vpx_get8x8var_neon(const uint8_t *src_ptr, int src_stride,
const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse, int *sum) {
VARIANCE_WXH_NEON(64, 32, 11)
VARIANCE_WXH_NEON(64, 64, 12)
+#undef VARIANCE_WXH_NEON
+
#if defined(__ARM_FEATURE_DOTPROD)
static INLINE unsigned int vpx_mse8xh_neon(const unsigned char *src_ptr,
--- /dev/null
+/*
+ * Copyright (c) 2021 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "./vpx_config.h"
+
+#include "vpx/vpx_integer.h"
+#include "vpx_dsp/arm/mem_neon.h"
+#include "vpx_dsp/arm/sum_neon.h"
+#include "vpx_ports/mem.h"
+
+// Process a block of width 4 four rows at a time.
+static INLINE void variance_4xh_neon_dotprod(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h,
+ uint32_t *sse, int *sum) {
+ uint32x4_t src_sum = vdupq_n_u32(0);
+ uint32x4_t ref_sum = vdupq_n_u32(0);
+ uint32x4_t sse_u32 = vdupq_n_u32(0);
+
+ int i = h;
+ do {
+ const uint8x16_t s = load_unaligned_u8q(src_ptr, src_stride);
+ const uint8x16_t r = load_unaligned_u8q(ref_ptr, ref_stride);
+
+ const uint8x16_t abs_diff = vabdq_u8(s, r);
+ sse_u32 = vdotq_u32(sse_u32, abs_diff, abs_diff);
+
+ src_sum = vdotq_u32(src_sum, s, vdupq_n_u8(1));
+ ref_sum = vdotq_u32(ref_sum, r, vdupq_n_u8(1));
+
+ src_ptr += 4 * src_stride;
+ ref_ptr += 4 * ref_stride;
+ i -= 4;
+ } while (i != 0);
+
+ *sum = horizontal_add_int32x4(
+ vreinterpretq_s32_u32(vsubq_u32(src_sum, ref_sum)));
+ *sse = horizontal_add_uint32x4(sse_u32);
+}
+
+// Process a block of width 8 two rows at a time.
+static INLINE void variance_8xh_neon_dotprod(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h,
+ uint32_t *sse, int *sum) {
+ uint32x4_t src_sum = vdupq_n_u32(0);
+ uint32x4_t ref_sum = vdupq_n_u32(0);
+ uint32x4_t sse_u32 = vdupq_n_u32(0);
+
+ int i = h;
+ do {
+ const uint8x16_t s =
+ vcombine_u8(vld1_u8(src_ptr), vld1_u8(src_ptr + src_stride));
+ const uint8x16_t r =
+ vcombine_u8(vld1_u8(ref_ptr), vld1_u8(ref_ptr + ref_stride));
+
+ const uint8x16_t abs_diff = vabdq_u8(s, r);
+ sse_u32 = vdotq_u32(sse_u32, abs_diff, abs_diff);
+
+ src_sum = vdotq_u32(src_sum, s, vdupq_n_u8(1));
+ ref_sum = vdotq_u32(ref_sum, r, vdupq_n_u8(1));
+
+ src_ptr += 2 * src_stride;
+ ref_ptr += 2 * ref_stride;
+ i -= 2;
+ } while (i != 0);
+
+ *sum = horizontal_add_int32x4(
+ vreinterpretq_s32_u32(vsubq_u32(src_sum, ref_sum)));
+ *sse = horizontal_add_uint32x4(sse_u32);
+}
+
+// Process a block of width 16 one row at a time.
+static INLINE void variance_16xh_neon_dotprod(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h,
+ uint32_t *sse, int *sum) {
+ uint32x4_t src_sum = vdupq_n_u32(0);
+ uint32x4_t ref_sum = vdupq_n_u32(0);
+ uint32x4_t sse_u32 = vdupq_n_u32(0);
+
+ int i = h;
+ do {
+ const uint8x16_t s = vld1q_u8(src_ptr);
+ const uint8x16_t r = vld1q_u8(ref_ptr);
+
+ const uint8x16_t abs_diff = vabdq_u8(s, r);
+ sse_u32 = vdotq_u32(sse_u32, abs_diff, abs_diff);
+
+ src_sum = vdotq_u32(src_sum, s, vdupq_n_u8(1));
+ ref_sum = vdotq_u32(ref_sum, r, vdupq_n_u8(1));
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ } while (--i != 0);
+
+ *sum = horizontal_add_int32x4(
+ vreinterpretq_s32_u32(vsubq_u32(src_sum, ref_sum)));
+ *sse = horizontal_add_uint32x4(sse_u32);
+}
+
+// Process a block of any size where the width is divisible by 16.
+static INLINE void variance_large_neon_dotprod(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int w, int h,
+ uint32_t *sse, int *sum) {
+ uint32x4_t src_sum = vdupq_n_u32(0);
+ uint32x4_t ref_sum = vdupq_n_u32(0);
+ uint32x4_t sse_u32 = vdupq_n_u32(0);
+
+ int i = h;
+ do {
+ int j = 0;
+ do {
+ const uint8x16_t s = vld1q_u8(src_ptr + j);
+ const uint8x16_t r = vld1q_u8(ref_ptr + j);
+
+ const uint8x16_t abs_diff = vabdq_u8(s, r);
+ sse_u32 = vdotq_u32(sse_u32, abs_diff, abs_diff);
+
+ src_sum = vdotq_u32(src_sum, s, vdupq_n_u8(1));
+ ref_sum = vdotq_u32(ref_sum, r, vdupq_n_u8(1));
+
+ j += 16;
+ } while (j < w);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ } while (--i != 0);
+
+ *sum = horizontal_add_int32x4(
+ vreinterpretq_s32_u32(vsubq_u32(src_sum, ref_sum)));
+ *sse = horizontal_add_uint32x4(sse_u32);
+}
+
+static INLINE void variance_32xh_neon_dotprod(const uint8_t *src,
+ int src_stride,
+ const uint8_t *ref,
+ int ref_stride, int h,
+ uint32_t *sse, int *sum) {
+ variance_large_neon_dotprod(src, src_stride, ref, ref_stride, 32, h, sse,
+ sum);
+}
+
+static INLINE void variance_64xh_neon_dotprod(const uint8_t *src,
+ int src_stride,
+ const uint8_t *ref,
+ int ref_stride, int h,
+ uint32_t *sse, int *sum) {
+ variance_large_neon_dotprod(src, src_stride, ref, ref_stride, 64, h, sse,
+ sum);
+}
+
+void vpx_get8x8var_neon_dotprod(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ unsigned int *sse, int *sum) {
+ variance_8xh_neon_dotprod(src_ptr, src_stride, ref_ptr, ref_stride, 8, sse,
+ sum);
+}
+
+void vpx_get16x16var_neon_dotprod(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ unsigned int *sse, int *sum) {
+ variance_16xh_neon_dotprod(src_ptr, src_stride, ref_ptr, ref_stride, 16, sse,
+ sum);
+}
+
+#define VARIANCE_WXH_NEON_DOTPROD(w, h, shift) \
+ unsigned int vpx_variance##w##x##h##_neon_dotprod( \
+ const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
+ unsigned int *sse) { \
+ int sum; \
+ variance_##w##xh_neon_dotprod(src, src_stride, ref, ref_stride, h, sse, \
+ &sum); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) >> shift); \
+ }
+
+VARIANCE_WXH_NEON_DOTPROD(4, 4, 4)
+VARIANCE_WXH_NEON_DOTPROD(4, 8, 5)
+
+VARIANCE_WXH_NEON_DOTPROD(8, 4, 5)
+VARIANCE_WXH_NEON_DOTPROD(8, 8, 6)
+VARIANCE_WXH_NEON_DOTPROD(8, 16, 7)
+
+VARIANCE_WXH_NEON_DOTPROD(16, 8, 7)
+VARIANCE_WXH_NEON_DOTPROD(16, 16, 8)
+VARIANCE_WXH_NEON_DOTPROD(16, 32, 9)
+
+VARIANCE_WXH_NEON_DOTPROD(32, 16, 9)
+VARIANCE_WXH_NEON_DOTPROD(32, 32, 10)
+VARIANCE_WXH_NEON_DOTPROD(32, 64, 11)
+
+VARIANCE_WXH_NEON_DOTPROD(64, 32, 11)
+VARIANCE_WXH_NEON_DOTPROD(64, 64, 12)
+
+#undef VARIANCE_WXH_NEON_DOTPROD
DSP_SRCS-$(HAVE_NEON) += arm/avg_pred_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/subpel_variance_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/variance_neon.c
+DSP_SRCS-$(HAVE_NEON_DOTPROD) += arm/variance_neon_dotprod.c
DSP_SRCS-$(HAVE_MSA) += mips/variance_msa.c
DSP_SRCS-$(HAVE_MSA) += mips/sub_pixel_variance_msa.c
# Variance
#
add_proto qw/unsigned int vpx_variance64x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance64x64 sse2 avx2 neon msa mmi vsx lsx/;
+ specialize qw/vpx_variance64x64 sse2 avx2 neon neon_dotprod msa mmi vsx lsx/;
add_proto qw/unsigned int vpx_variance64x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance64x32 sse2 avx2 neon msa mmi vsx/;
+ specialize qw/vpx_variance64x32 sse2 avx2 neon neon_dotprod msa mmi vsx/;
add_proto qw/unsigned int vpx_variance32x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance32x64 sse2 avx2 neon msa mmi vsx/;
+ specialize qw/vpx_variance32x64 sse2 avx2 neon neon_dotprod msa mmi vsx/;
add_proto qw/unsigned int vpx_variance32x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance32x32 sse2 avx2 neon msa mmi vsx lsx/;
+ specialize qw/vpx_variance32x32 sse2 avx2 neon neon_dotprod msa mmi vsx lsx/;
add_proto qw/unsigned int vpx_variance32x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance32x16 sse2 avx2 neon msa mmi vsx/;
+ specialize qw/vpx_variance32x16 sse2 avx2 neon neon_dotprod msa mmi vsx/;
add_proto qw/unsigned int vpx_variance16x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance16x32 sse2 avx2 neon msa mmi vsx/;
+ specialize qw/vpx_variance16x32 sse2 avx2 neon neon_dotprod msa mmi vsx/;
add_proto qw/unsigned int vpx_variance16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance16x16 sse2 avx2 neon msa mmi vsx lsx/;
+ specialize qw/vpx_variance16x16 sse2 avx2 neon neon_dotprod msa mmi vsx lsx/;
add_proto qw/unsigned int vpx_variance16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance16x8 sse2 avx2 neon msa mmi vsx/;
+ specialize qw/vpx_variance16x8 sse2 avx2 neon neon_dotprod msa mmi vsx/;
add_proto qw/unsigned int vpx_variance8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance8x16 sse2 avx2 neon msa mmi vsx/;
+ specialize qw/vpx_variance8x16 sse2 avx2 neon neon_dotprod msa mmi vsx/;
add_proto qw/unsigned int vpx_variance8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance8x8 sse2 avx2 neon msa mmi vsx lsx/;
+ specialize qw/vpx_variance8x8 sse2 avx2 neon neon_dotprod msa mmi vsx lsx/;
add_proto qw/unsigned int vpx_variance8x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance8x4 sse2 avx2 neon msa mmi vsx/;
+ specialize qw/vpx_variance8x4 sse2 avx2 neon neon_dotprod msa mmi vsx/;
add_proto qw/unsigned int vpx_variance4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance4x8 sse2 neon msa mmi vsx/;
+ specialize qw/vpx_variance4x8 sse2 neon neon_dotprod msa mmi vsx/;
add_proto qw/unsigned int vpx_variance4x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance4x4 sse2 neon msa mmi vsx/;
+ specialize qw/vpx_variance4x4 sse2 neon neon_dotprod msa mmi vsx/;
#
# Specialty Variance
#
add_proto qw/void vpx_get16x16var/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
- specialize qw/vpx_get16x16var sse2 avx2 neon msa vsx lsx/;
+ specialize qw/vpx_get16x16var sse2 avx2 neon neon_dotprod msa vsx lsx/;
add_proto qw/void vpx_get8x8var/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
- specialize qw/vpx_get8x8var sse2 neon msa vsx/;
+ specialize qw/vpx_get8x8var sse2 neon neon_dotprod msa vsx/;
add_proto qw/unsigned int vpx_mse16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_mse16x16 sse2 avx2 neon msa mmi vsx lsx/;