From 7009fe55a9a7aed3a3504c09c677de0326c8207b Mon Sep 17 00:00:00 2001 From: Jonathan Wright Date: Sat, 19 Aug 2023 20:59:40 +0100 Subject: [PATCH] Use run-time CPU feature detection for Neon DotProd SAD4D Arm Neon DotProd implementations of vpx_sad*4d currently need to be enabled at compile time since they're guarded by ifdef feature macros. Now that run-time feature detection has been enabled for Arm platforms, expose these implementations with distinct *neon_dotprod names in separate files and wire them up to the build system and rtcd.pl. Also add new test cases for the new DotProd functions. Change-Id: Ie99ee0b03ec488626f52c3f13e4111fe26cc5619 --- test/sad_test.cc | 30 +++++++ vpx_dsp/arm/sad4d_neon.c | 116 -------------------------- vpx_dsp/arm/sad4d_neon_dotprod.c | 176 +++++++++++++++++++++++++++++++++++++++ vpx_dsp/vpx_dsp.mk | 1 + vpx_dsp/vpx_dsp_rtcd_defs.pl | 32 +++---- 5 files changed, 223 insertions(+), 132 deletions(-) create mode 100644 vpx_dsp/arm/sad4d_neon_dotprod.c diff --git a/test/sad_test.cc b/test/sad_test.cc index 3f9c020..3530e66 100644 --- a/test/sad_test.cc +++ b/test/sad_test.cc @@ -1346,6 +1346,21 @@ const SadMxNx4Param x4d_neon_tests[] = { }; INSTANTIATE_TEST_SUITE_P(NEON, SADx4Test, ::testing::ValuesIn(x4d_neon_tests)); +#if HAVE_NEON_DOTPROD +const SadMxNx4Param x4d_neon_dotprod_tests[] = { + SadMxNx4Param(64, 64, &vpx_sad64x64x4d_neon_dotprod), + SadMxNx4Param(64, 32, &vpx_sad64x32x4d_neon_dotprod), + SadMxNx4Param(32, 64, &vpx_sad32x64x4d_neon_dotprod), + SadMxNx4Param(32, 32, &vpx_sad32x32x4d_neon_dotprod), + SadMxNx4Param(32, 16, &vpx_sad32x16x4d_neon_dotprod), + SadMxNx4Param(16, 32, &vpx_sad16x32x4d_neon_dotprod), + SadMxNx4Param(16, 16, &vpx_sad16x16x4d_neon_dotprod), + SadMxNx4Param(16, 8, &vpx_sad16x8x4d_neon_dotprod), +}; +INSTANTIATE_TEST_SUITE_P(NEON_DOTPROD, SADx4Test, + ::testing::ValuesIn(x4d_neon_dotprod_tests)); +#endif // HAVE_NEON_DOTPROD + const SadSkipMxNx4Param skip_x4d_neon_tests[] = { SadSkipMxNx4Param(64, 64, &vpx_sad_skip_64x64x4d_neon), SadSkipMxNx4Param(64, 32, &vpx_sad_skip_64x32x4d_neon), @@ -1401,6 +1416,21 @@ const SadSkipMxNx4Param skip_x4d_neon_tests[] = { }; INSTANTIATE_TEST_SUITE_P(NEON, SADSkipx4Test, ::testing::ValuesIn(skip_x4d_neon_tests)); + +#if HAVE_NEONE_DOTPROD +const SadSkipMxNx4Param skip_x4d_neon_dotprod_tests[] = { + SadSkipMxNx4Param(64, 64, &vpx_sad_skip_64x64x4d_neon_dotprod), + SadSkipMxNx4Param(64, 32, &vpx_sad_skip_64x32x4d_neon_dotprod), + SadSkipMxNx4Param(32, 64, &vpx_sad_skip_32x64x4d_neon_dotprod), + SadSkipMxNx4Param(32, 32, &vpx_sad_skip_32x32x4d_neon_dotprod), + SadSkipMxNx4Param(32, 16, &vpx_sad_skip_32x16x4d_neon_dotprod), + SadSkipMxNx4Param(16, 32, &vpx_sad_skip_16x32x4d_neon_dotprod), + SadSkipMxNx4Param(16, 16, &vpx_sad_skip_16x16x4d_neon_dotprod), + SadSkipMxNx4Param(16, 8, &vpx_sad_skip_16x8x4d_neon_dotprod), +}; +INSTANTIATE_TEST_SUITE_P(NEON_DOTPROD, SADSkipx4Test, + ::testing::ValuesIn(skip_x4d_neon_dotprod_tests)); +#endif // HAVE_NEON_DOTPROD #endif // HAVE_NEON //------------------------------------------------------------------------------ diff --git a/vpx_dsp/arm/sad4d_neon.c b/vpx_dsp/arm/sad4d_neon.c index 3a548d0..713eec7 100644 --- a/vpx_dsp/arm/sad4d_neon.c +++ b/vpx_dsp/arm/sad4d_neon.c @@ -17,120 +17,6 @@ #include "vpx_dsp/arm/mem_neon.h" #include "vpx_dsp/arm/sum_neon.h" -#if defined(__ARM_FEATURE_DOTPROD) - -static INLINE void sad16_neon(uint8x16_t src, uint8x16_t ref, - uint32x4_t *const sad_sum) { - uint8x16_t abs_diff = vabdq_u8(src, ref); - *sad_sum = vdotq_u32(*sad_sum, abs_diff, vdupq_n_u8(1)); -} - -static INLINE void sad64xhx4d_neon(const uint8_t *src, int src_stride, - const uint8_t *const ref[4], int ref_stride, - uint32_t res[4], int h) { - uint32x4_t sum_lo[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0), - vdupq_n_u32(0) }; - uint32x4_t sum_hi[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0), - vdupq_n_u32(0) }; - uint32x4_t sum[4]; - - int i = 0; - do { - uint8x16_t s0, s1, s2, s3; - - s0 = vld1q_u8(src + i * src_stride); - sad16_neon(s0, vld1q_u8(ref[0] + i * ref_stride), &sum_lo[0]); - sad16_neon(s0, vld1q_u8(ref[1] + i * ref_stride), &sum_lo[1]); - sad16_neon(s0, vld1q_u8(ref[2] + i * ref_stride), &sum_lo[2]); - sad16_neon(s0, vld1q_u8(ref[3] + i * ref_stride), &sum_lo[3]); - - s1 = vld1q_u8(src + i * src_stride + 16); - sad16_neon(s1, vld1q_u8(ref[0] + i * ref_stride + 16), &sum_hi[0]); - sad16_neon(s1, vld1q_u8(ref[1] + i * ref_stride + 16), &sum_hi[1]); - sad16_neon(s1, vld1q_u8(ref[2] + i * ref_stride + 16), &sum_hi[2]); - sad16_neon(s1, vld1q_u8(ref[3] + i * ref_stride + 16), &sum_hi[3]); - - s2 = vld1q_u8(src + i * src_stride + 32); - sad16_neon(s2, vld1q_u8(ref[0] + i * ref_stride + 32), &sum_lo[0]); - sad16_neon(s2, vld1q_u8(ref[1] + i * ref_stride + 32), &sum_lo[1]); - sad16_neon(s2, vld1q_u8(ref[2] + i * ref_stride + 32), &sum_lo[2]); - sad16_neon(s2, vld1q_u8(ref[3] + i * ref_stride + 32), &sum_lo[3]); - - s3 = vld1q_u8(src + i * src_stride + 48); - sad16_neon(s3, vld1q_u8(ref[0] + i * ref_stride + 48), &sum_hi[0]); - sad16_neon(s3, vld1q_u8(ref[1] + i * ref_stride + 48), &sum_hi[1]); - sad16_neon(s3, vld1q_u8(ref[2] + i * ref_stride + 48), &sum_hi[2]); - sad16_neon(s3, vld1q_u8(ref[3] + i * ref_stride + 48), &sum_hi[3]); - - i++; - } while (i < h); - - sum[0] = vaddq_u32(sum_lo[0], sum_hi[0]); - sum[1] = vaddq_u32(sum_lo[1], sum_hi[1]); - sum[2] = vaddq_u32(sum_lo[2], sum_hi[2]); - sum[3] = vaddq_u32(sum_lo[3], sum_hi[3]); - - vst1q_u32(res, horizontal_add_4d_uint32x4(sum)); -} - -static INLINE void sad32xhx4d_neon(const uint8_t *src, int src_stride, - const uint8_t *const ref[4], int ref_stride, - uint32_t res[4], int h) { - uint32x4_t sum_lo[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0), - vdupq_n_u32(0) }; - uint32x4_t sum_hi[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0), - vdupq_n_u32(0) }; - uint32x4_t sum[4]; - - int i = 0; - do { - uint8x16_t s0, s1; - - s0 = vld1q_u8(src + i * src_stride); - sad16_neon(s0, vld1q_u8(ref[0] + i * ref_stride), &sum_lo[0]); - sad16_neon(s0, vld1q_u8(ref[1] + i * ref_stride), &sum_lo[1]); - sad16_neon(s0, vld1q_u8(ref[2] + i * ref_stride), &sum_lo[2]); - sad16_neon(s0, vld1q_u8(ref[3] + i * ref_stride), &sum_lo[3]); - - s1 = vld1q_u8(src + i * src_stride + 16); - sad16_neon(s1, vld1q_u8(ref[0] + i * ref_stride + 16), &sum_hi[0]); - sad16_neon(s1, vld1q_u8(ref[1] + i * ref_stride + 16), &sum_hi[1]); - sad16_neon(s1, vld1q_u8(ref[2] + i * ref_stride + 16), &sum_hi[2]); - sad16_neon(s1, vld1q_u8(ref[3] + i * ref_stride + 16), &sum_hi[3]); - - i++; - } while (i < h); - - sum[0] = vaddq_u32(sum_lo[0], sum_hi[0]); - sum[1] = vaddq_u32(sum_lo[1], sum_hi[1]); - sum[2] = vaddq_u32(sum_lo[2], sum_hi[2]); - sum[3] = vaddq_u32(sum_lo[3], sum_hi[3]); - - vst1q_u32(res, horizontal_add_4d_uint32x4(sum)); -} - -static INLINE void sad16xhx4d_neon(const uint8_t *src, int src_stride, - const uint8_t *const ref[4], int ref_stride, - uint32_t res[4], int h) { - uint32x4_t sum[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0), - vdupq_n_u32(0) }; - - int i = 0; - do { - const uint8x16_t s = vld1q_u8(src + i * src_stride); - sad16_neon(s, vld1q_u8(ref[0] + i * ref_stride), &sum[0]); - sad16_neon(s, vld1q_u8(ref[1] + i * ref_stride), &sum[1]); - sad16_neon(s, vld1q_u8(ref[2] + i * ref_stride), &sum[2]); - sad16_neon(s, vld1q_u8(ref[3] + i * ref_stride), &sum[3]); - - i++; - } while (i < h); - - vst1q_u32(res, horizontal_add_4d_uint32x4(sum)); -} - -#else // !defined(__ARM_FEATURE_DOTPROD)) - static INLINE void sad16_neon(uint8x16_t src, uint8x16_t ref, uint16x8_t *const sad_sum) { uint8x16_t abs_diff = vabdq_u8(src, ref); @@ -229,8 +115,6 @@ static INLINE void sad16xhx4d_neon(const uint8_t *src, int src_stride, vst1q_u32(res, horizontal_add_4d_uint16x8(sum)); } -#endif // defined(__ARM_FEATURE_DOTPROD) - static INLINE void sad8_neon(uint8x8_t src, uint8x8_t ref, uint16x8_t *const sad_sum) { uint8x8_t abs_diff = vabd_u8(src, ref); diff --git a/vpx_dsp/arm/sad4d_neon_dotprod.c b/vpx_dsp/arm/sad4d_neon_dotprod.c new file mode 100644 index 0000000..933fc48 --- /dev/null +++ b/vpx_dsp/arm/sad4d_neon_dotprod.c @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2021 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include + +#include "./vpx_config.h" +#include "./vpx_dsp_rtcd.h" +#include "vpx/vpx_integer.h" +#include "vpx_dsp/arm/mem_neon.h" +#include "vpx_dsp/arm/sum_neon.h" + +static INLINE void sad16_neon(uint8x16_t src, uint8x16_t ref, + uint32x4_t *const sad_sum) { + uint8x16_t abs_diff = vabdq_u8(src, ref); + *sad_sum = vdotq_u32(*sad_sum, abs_diff, vdupq_n_u8(1)); +} + +static INLINE void sad64xhx4d_neon_dotprod(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], + int ref_stride, uint32_t res[4], + int h) { + uint32x4_t sum_lo[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0), + vdupq_n_u32(0) }; + uint32x4_t sum_hi[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0), + vdupq_n_u32(0) }; + uint32x4_t sum[4]; + + int i = 0; + do { + uint8x16_t s0, s1, s2, s3; + + s0 = vld1q_u8(src + i * src_stride); + sad16_neon(s0, vld1q_u8(ref[0] + i * ref_stride), &sum_lo[0]); + sad16_neon(s0, vld1q_u8(ref[1] + i * ref_stride), &sum_lo[1]); + sad16_neon(s0, vld1q_u8(ref[2] + i * ref_stride), &sum_lo[2]); + sad16_neon(s0, vld1q_u8(ref[3] + i * ref_stride), &sum_lo[3]); + + s1 = vld1q_u8(src + i * src_stride + 16); + sad16_neon(s1, vld1q_u8(ref[0] + i * ref_stride + 16), &sum_hi[0]); + sad16_neon(s1, vld1q_u8(ref[1] + i * ref_stride + 16), &sum_hi[1]); + sad16_neon(s1, vld1q_u8(ref[2] + i * ref_stride + 16), &sum_hi[2]); + sad16_neon(s1, vld1q_u8(ref[3] + i * ref_stride + 16), &sum_hi[3]); + + s2 = vld1q_u8(src + i * src_stride + 32); + sad16_neon(s2, vld1q_u8(ref[0] + i * ref_stride + 32), &sum_lo[0]); + sad16_neon(s2, vld1q_u8(ref[1] + i * ref_stride + 32), &sum_lo[1]); + sad16_neon(s2, vld1q_u8(ref[2] + i * ref_stride + 32), &sum_lo[2]); + sad16_neon(s2, vld1q_u8(ref[3] + i * ref_stride + 32), &sum_lo[3]); + + s3 = vld1q_u8(src + i * src_stride + 48); + sad16_neon(s3, vld1q_u8(ref[0] + i * ref_stride + 48), &sum_hi[0]); + sad16_neon(s3, vld1q_u8(ref[1] + i * ref_stride + 48), &sum_hi[1]); + sad16_neon(s3, vld1q_u8(ref[2] + i * ref_stride + 48), &sum_hi[2]); + sad16_neon(s3, vld1q_u8(ref[3] + i * ref_stride + 48), &sum_hi[3]); + + } while (++i < h); + + sum[0] = vaddq_u32(sum_lo[0], sum_hi[0]); + sum[1] = vaddq_u32(sum_lo[1], sum_hi[1]); + sum[2] = vaddq_u32(sum_lo[2], sum_hi[2]); + sum[3] = vaddq_u32(sum_lo[3], sum_hi[3]); + + vst1q_u32(res, horizontal_add_4d_uint32x4(sum)); +} + +static INLINE void sad32xhx4d_neon_dotprod(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], + int ref_stride, uint32_t res[4], + int h) { + uint32x4_t sum_lo[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0), + vdupq_n_u32(0) }; + uint32x4_t sum_hi[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0), + vdupq_n_u32(0) }; + uint32x4_t sum[4]; + + int i = 0; + do { + uint8x16_t s0, s1; + + s0 = vld1q_u8(src + i * src_stride); + sad16_neon(s0, vld1q_u8(ref[0] + i * ref_stride), &sum_lo[0]); + sad16_neon(s0, vld1q_u8(ref[1] + i * ref_stride), &sum_lo[1]); + sad16_neon(s0, vld1q_u8(ref[2] + i * ref_stride), &sum_lo[2]); + sad16_neon(s0, vld1q_u8(ref[3] + i * ref_stride), &sum_lo[3]); + + s1 = vld1q_u8(src + i * src_stride + 16); + sad16_neon(s1, vld1q_u8(ref[0] + i * ref_stride + 16), &sum_hi[0]); + sad16_neon(s1, vld1q_u8(ref[1] + i * ref_stride + 16), &sum_hi[1]); + sad16_neon(s1, vld1q_u8(ref[2] + i * ref_stride + 16), &sum_hi[2]); + sad16_neon(s1, vld1q_u8(ref[3] + i * ref_stride + 16), &sum_hi[3]); + + } while (++i < h); + + sum[0] = vaddq_u32(sum_lo[0], sum_hi[0]); + sum[1] = vaddq_u32(sum_lo[1], sum_hi[1]); + sum[2] = vaddq_u32(sum_lo[2], sum_hi[2]); + sum[3] = vaddq_u32(sum_lo[3], sum_hi[3]); + + vst1q_u32(res, horizontal_add_4d_uint32x4(sum)); +} + +static INLINE void sad16xhx4d_neon_dotprod(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], + int ref_stride, uint32_t res[4], + int h) { + uint32x4_t sum[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0), + vdupq_n_u32(0) }; + + int i = 0; + do { + const uint8x16_t s = vld1q_u8(src + i * src_stride); + sad16_neon(s, vld1q_u8(ref[0] + i * ref_stride), &sum[0]); + sad16_neon(s, vld1q_u8(ref[1] + i * ref_stride), &sum[1]); + sad16_neon(s, vld1q_u8(ref[2] + i * ref_stride), &sum[2]); + sad16_neon(s, vld1q_u8(ref[3] + i * ref_stride), &sum[3]); + + } while (++i < h); + + vst1q_u32(res, horizontal_add_4d_uint32x4(sum)); +} + +#define SAD_WXH_4D_NEON_DOTPROD(w, h) \ + void vpx_sad##w##x##h##x4d_neon_dotprod( \ + const uint8_t *src_ptr, int src_stride, \ + const uint8_t *const ref_array[4], int ref_stride, \ + uint32_t sad_array[4]) { \ + sad##w##xhx4d_neon_dotprod(src_ptr, src_stride, ref_array, ref_stride, \ + sad_array, (h)); \ + } + +SAD_WXH_4D_NEON_DOTPROD(16, 8) +SAD_WXH_4D_NEON_DOTPROD(16, 16) +SAD_WXH_4D_NEON_DOTPROD(16, 32) + +SAD_WXH_4D_NEON_DOTPROD(32, 16) +SAD_WXH_4D_NEON_DOTPROD(32, 32) +SAD_WXH_4D_NEON_DOTPROD(32, 64) + +SAD_WXH_4D_NEON_DOTPROD(64, 32) +SAD_WXH_4D_NEON_DOTPROD(64, 64) + +#undef SAD_WXH_4D_NEON_DOTPROD + +#define SAD_SKIP_WXH_4D_NEON_DOTPROD(w, h) \ + void vpx_sad_skip_##w##x##h##x4d_neon_dotprod( \ + const uint8_t *src_ptr, int src_stride, \ + const uint8_t *const ref_array[4], int ref_stride, \ + uint32_t sad_array[4]) { \ + sad##w##xhx4d_neon_dotprod(src_ptr, 2 * src_stride, ref_array, \ + 2 * ref_stride, sad_array, ((h) >> 1)); \ + sad_array[0] <<= 1; \ + sad_array[1] <<= 1; \ + sad_array[2] <<= 1; \ + sad_array[3] <<= 1; \ + } + +SAD_SKIP_WXH_4D_NEON_DOTPROD(16, 8) +SAD_SKIP_WXH_4D_NEON_DOTPROD(16, 16) +SAD_SKIP_WXH_4D_NEON_DOTPROD(16, 32) + +SAD_SKIP_WXH_4D_NEON_DOTPROD(32, 16) +SAD_SKIP_WXH_4D_NEON_DOTPROD(32, 32) +SAD_SKIP_WXH_4D_NEON_DOTPROD(32, 64) + +SAD_SKIP_WXH_4D_NEON_DOTPROD(64, 32) +SAD_SKIP_WXH_4D_NEON_DOTPROD(64, 64) + +#undef SAD_SKIP_WXH_4D_NEON_DOTPROD diff --git a/vpx_dsp/vpx_dsp.mk b/vpx_dsp/vpx_dsp.mk index d789353..feb48ee 100644 --- a/vpx_dsp/vpx_dsp.mk +++ b/vpx_dsp/vpx_dsp.mk @@ -373,6 +373,7 @@ DSP_SRCS-$(HAVE_SSE2) += x86/sum_squares_sse2.c DSP_SRCS-$(HAVE_MSA) += mips/sum_squares_msa.c DSP_SRCS-$(HAVE_NEON) += arm/sad4d_neon.c +DSP_SRCS-$(HAVE_NEON_DOTPROD) += arm/sad4d_neon_dotprod.c DSP_SRCS-$(HAVE_NEON) += arm/sad_neon.c DSP_SRCS-$(HAVE_NEON_DOTPROD) += arm/sad_neon_dotprod.c DSP_SRCS-$(HAVE_NEON) += arm/subtract_neon.c diff --git a/vpx_dsp/vpx_dsp_rtcd_defs.pl b/vpx_dsp/vpx_dsp_rtcd_defs.pl index 8383bdd..ff97e68 100644 --- a/vpx_dsp/vpx_dsp_rtcd_defs.pl +++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl @@ -928,28 +928,28 @@ specialize qw/vpx_sad4x4_avg neon msa sse2 mmi/; # Multi-block SAD, comparing a reference to N independent blocks # add_proto qw/void vpx_sad64x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad64x64x4d avx512 avx2 neon msa sse2 vsx mmi lsx/; +specialize qw/vpx_sad64x64x4d avx512 avx2 neon neon_dotprod msa sse2 vsx mmi lsx/; add_proto qw/void vpx_sad64x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad64x32x4d neon msa sse2 vsx mmi lsx/; +specialize qw/vpx_sad64x32x4d neon neon_dotprod msa sse2 vsx mmi lsx/; add_proto qw/void vpx_sad32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad32x64x4d neon msa sse2 vsx mmi lsx/; +specialize qw/vpx_sad32x64x4d neon neon_dotprod msa sse2 vsx mmi lsx/; add_proto qw/void vpx_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad32x32x4d avx2 neon msa sse2 vsx mmi lsx/; +specialize qw/vpx_sad32x32x4d avx2 neon neon_dotprod msa sse2 vsx mmi lsx/; add_proto qw/void vpx_sad32x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad32x16x4d neon msa sse2 vsx mmi/; +specialize qw/vpx_sad32x16x4d neon neon_dotprod msa sse2 vsx mmi/; add_proto qw/void vpx_sad16x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad16x32x4d neon msa sse2 vsx mmi/; +specialize qw/vpx_sad16x32x4d neon neon_dotprod msa sse2 vsx mmi/; add_proto qw/void vpx_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad16x16x4d neon msa sse2 vsx mmi lsx/; +specialize qw/vpx_sad16x16x4d neon neon_dotprod msa sse2 vsx mmi lsx/; add_proto qw/void vpx_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad16x8x4d neon msa sse2 vsx mmi/; +specialize qw/vpx_sad16x8x4d neon neon_dotprod msa sse2 vsx mmi/; add_proto qw/void vpx_sad8x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; specialize qw/vpx_sad8x16x4d neon msa sse2 mmi/; @@ -967,28 +967,28 @@ add_proto qw/void vpx_sad4x4x4d/, "const uint8_t *src_ptr, int src_stride, const specialize qw/vpx_sad4x4x4d neon msa sse2 mmi/; add_proto qw/void vpx_sad_skip_64x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad_skip_64x64x4d neon avx2 sse2/; +specialize qw/vpx_sad_skip_64x64x4d neon neon_dotprod avx2 sse2/; add_proto qw/void vpx_sad_skip_64x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad_skip_64x32x4d neon avx2 sse2/; +specialize qw/vpx_sad_skip_64x32x4d neon neon_dotprod avx2 sse2/; add_proto qw/void vpx_sad_skip_32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad_skip_32x64x4d neon avx2 sse2/; +specialize qw/vpx_sad_skip_32x64x4d neon neon_dotprod avx2 sse2/; add_proto qw/void vpx_sad_skip_32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad_skip_32x32x4d neon avx2 sse2/; +specialize qw/vpx_sad_skip_32x32x4d neon neon_dotprod avx2 sse2/; add_proto qw/void vpx_sad_skip_32x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad_skip_32x16x4d neon avx2 sse2/; +specialize qw/vpx_sad_skip_32x16x4d neon neon_dotprod avx2 sse2/; add_proto qw/void vpx_sad_skip_16x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad_skip_16x32x4d neon sse2/; +specialize qw/vpx_sad_skip_16x32x4d neon neon_dotprod sse2/; add_proto qw/void vpx_sad_skip_16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad_skip_16x16x4d neon sse2/; +specialize qw/vpx_sad_skip_16x16x4d neon neon_dotprod sse2/; add_proto qw/void vpx_sad_skip_16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; -specialize qw/vpx_sad_skip_16x8x4d neon sse2/; +specialize qw/vpx_sad_skip_16x8x4d neon neon_dotprod sse2/; add_proto qw/void vpx_sad_skip_8x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_array[4], int ref_stride, uint32_t sad_array[4]"; specialize qw/vpx_sad_skip_8x16x4d neon sse2/; -- 2.7.4