From ced982640b981751ecce62e5d8276fa81ee230e3 Mon Sep 17 00:00:00 2001 From: James Zern Date: Thu, 2 Jul 2015 12:01:16 -0700 Subject: [PATCH] Revert "mips msa vp9 avg subpel variance optimization" This reverts commit 61774ad1c44c73ccde48a2e3456e86196965b5dc. this change causes MSA/VP9SubpelAvgVarianceTest.Ref failures under mips32r5el-msa-linux-gnu and mips64r6el-msa-linux-gnu Change-Id: I7fb520c12b2a3b212d5e84b7619a380a48e49bb0 --- test/variance_test.cc | 41 - vp9/common/vp9_rtcd_defs.pl | 26 +- vp9/encoder/mips/msa/vp9_variance_msa.c | 1365 ++----------------------------- 3 files changed, 78 insertions(+), 1354 deletions(-) diff --git a/test/variance_test.cc b/test/variance_test.cc index 614c4d9..c9dbcd4 100644 --- a/test/variance_test.cc +++ b/test/variance_test.cc @@ -2095,47 +2095,6 @@ INSTANTIATE_TEST_CASE_P( make_tuple(5, 6, subpel_variance32x64_msa, 0), make_tuple(6, 5, subpel_variance64x32_msa, 0), make_tuple(6, 6, subpel_variance64x64_msa, 0))); -const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_msa = - vp9_sub_pixel_avg_variance4x4_msa; -const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_msa = - vp9_sub_pixel_avg_variance4x8_msa; -const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_msa = - vp9_sub_pixel_avg_variance8x4_msa; -const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_msa = - vp9_sub_pixel_avg_variance8x8_msa; -const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_msa = - vp9_sub_pixel_avg_variance8x16_msa; -const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_msa = - vp9_sub_pixel_avg_variance16x8_msa; -const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_msa = - vp9_sub_pixel_avg_variance16x16_msa; -const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_msa = - vp9_sub_pixel_avg_variance16x32_msa; -const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_msa = - vp9_sub_pixel_avg_variance32x16_msa; -const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_msa = - vp9_sub_pixel_avg_variance32x32_msa; -const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_msa = - vp9_sub_pixel_avg_variance32x64_msa; -const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_msa = - vp9_sub_pixel_avg_variance64x32_msa; -const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_msa = - vp9_sub_pixel_avg_variance64x64_msa; -INSTANTIATE_TEST_CASE_P( - MSA, VP9SubpelAvgVarianceTest, - ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_msa, 0), - make_tuple(2, 3, subpel_avg_variance4x8_msa, 0), - make_tuple(3, 2, subpel_avg_variance8x4_msa, 0), - make_tuple(3, 3, subpel_avg_variance8x8_msa, 0), - make_tuple(3, 4, subpel_avg_variance8x16_msa, 0), - make_tuple(4, 3, subpel_avg_variance16x8_msa, 0), - make_tuple(4, 4, subpel_avg_variance16x16_msa, 0), - make_tuple(4, 5, subpel_avg_variance16x32_msa, 0), - make_tuple(5, 4, subpel_avg_variance32x16_msa, 0), - make_tuple(5, 5, subpel_avg_variance32x32_msa, 0), - make_tuple(5, 6, subpel_avg_variance32x64_msa, 0), - make_tuple(6, 5, subpel_avg_variance64x32_msa, 0), - make_tuple(6, 6, subpel_avg_variance64x64_msa, 0))); #endif // CONFIG_VP9_ENCODER #endif // HAVE_MSA } // namespace diff --git a/vp9/common/vp9_rtcd_defs.pl b/vp9/common/vp9_rtcd_defs.pl index af2307e..2dd0e2d 100644 --- a/vp9/common/vp9_rtcd_defs.pl +++ b/vp9/common/vp9_rtcd_defs.pl @@ -808,81 +808,81 @@ add_proto qw/unsigned int vp9_sub_pixel_variance64x64/, "const uint8_t *src_ptr, specialize qw/vp9_sub_pixel_variance64x64 avx2 neon msa/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; -specialize qw/vp9_sub_pixel_avg_variance64x64 avx2 msa/, "$sse2_x86inc", "$ssse3_x86inc"; +specialize qw/vp9_sub_pixel_avg_variance64x64 avx2/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; specialize qw/vp9_sub_pixel_variance32x64 msa/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; -specialize qw/vp9_sub_pixel_avg_variance32x64 msa/, "$sse2_x86inc", "$ssse3_x86inc"; +specialize qw/vp9_sub_pixel_avg_variance32x64/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; specialize qw/vp9_sub_pixel_variance64x32 msa/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; -specialize qw/vp9_sub_pixel_avg_variance64x32 msa/, "$sse2_x86inc", "$ssse3_x86inc"; +specialize qw/vp9_sub_pixel_avg_variance64x32/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; specialize qw/vp9_sub_pixel_variance32x16 msa/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; -specialize qw/vp9_sub_pixel_avg_variance32x16 msa/, "$sse2_x86inc", "$ssse3_x86inc"; +specialize qw/vp9_sub_pixel_avg_variance32x16/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; specialize qw/vp9_sub_pixel_variance16x32 msa/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; -specialize qw/vp9_sub_pixel_avg_variance16x32 msa/, "$sse2_x86inc", "$ssse3_x86inc"; +specialize qw/vp9_sub_pixel_avg_variance16x32/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; specialize qw/vp9_sub_pixel_variance32x32 avx2 neon msa/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; -specialize qw/vp9_sub_pixel_avg_variance32x32 avx2 msa/, "$sse2_x86inc", "$ssse3_x86inc"; +specialize qw/vp9_sub_pixel_avg_variance32x32 avx2/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; specialize qw/vp9_sub_pixel_variance16x16 neon msa/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; -specialize qw/vp9_sub_pixel_avg_variance16x16 msa/, "$sse2_x86inc", "$ssse3_x86inc"; +specialize qw/vp9_sub_pixel_avg_variance16x16/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; specialize qw/vp9_sub_pixel_variance8x16 msa/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; -specialize qw/vp9_sub_pixel_avg_variance8x16 msa/, "$sse2_x86inc", "$ssse3_x86inc"; +specialize qw/vp9_sub_pixel_avg_variance8x16/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; specialize qw/vp9_sub_pixel_variance16x8 msa/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; -specialize qw/vp9_sub_pixel_avg_variance16x8 msa/, "$sse2_x86inc", "$ssse3_x86inc"; +specialize qw/vp9_sub_pixel_avg_variance16x8/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; specialize qw/vp9_sub_pixel_variance8x8 neon msa/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; -specialize qw/vp9_sub_pixel_avg_variance8x8 msa/, "$sse2_x86inc", "$ssse3_x86inc"; +specialize qw/vp9_sub_pixel_avg_variance8x8/, "$sse2_x86inc", "$ssse3_x86inc"; # TODO(jingning): need to convert 8x4/4x8 functions into mmx/sse form add_proto qw/unsigned int vp9_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; specialize qw/vp9_sub_pixel_variance8x4 msa/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; -specialize qw/vp9_sub_pixel_avg_variance8x4 msa/, "$sse2_x86inc", "$ssse3_x86inc"; +specialize qw/vp9_sub_pixel_avg_variance8x4/, "$sse2_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; specialize qw/vp9_sub_pixel_variance4x8 msa/, "$sse_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; -specialize qw/vp9_sub_pixel_avg_variance4x8 msa/, "$sse_x86inc", "$ssse3_x86inc"; +specialize qw/vp9_sub_pixel_avg_variance4x8/, "$sse_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; specialize qw/vp9_sub_pixel_variance4x4 msa/, "$sse_x86inc", "$ssse3_x86inc"; #vp9_sub_pixel_variance4x4_sse2=vp9_sub_pixel_variance4x4_wmt add_proto qw/unsigned int vp9_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; -specialize qw/vp9_sub_pixel_avg_variance4x4 msa/, "$sse_x86inc", "$ssse3_x86inc"; +specialize qw/vp9_sub_pixel_avg_variance4x4/, "$sse_x86inc", "$ssse3_x86inc"; add_proto qw/unsigned int vp9_avg_8x8/, "const uint8_t *, int p"; specialize qw/vp9_avg_8x8 sse2 neon msa/; diff --git a/vp9/encoder/mips/msa/vp9_variance_msa.c b/vp9/encoder/mips/msa/vp9_variance_msa.c index 33fb496..fba5d60 100644 --- a/vp9/encoder/mips/msa/vp9_variance_msa.c +++ b/vp9/encoder/mips/msa/vp9_variance_msa.c @@ -49,357 +49,6 @@ DECLARE_ALIGNED(256, static const int8_t, vp9_bilinear_filters_msa[15][2]) = { #define VARIANCE_LARGE_WxH(sse, diff, shift) \ sse - (((int64_t)diff * diff) >> shift) -static uint32_t avg_sse_diff_4width_msa(const uint8_t *src_ptr, - int32_t src_stride, - const uint8_t *ref_ptr, - int32_t ref_stride, - const uint8_t *sec_pred, - int32_t height, - int32_t *diff) { - int32_t ht_cnt; - uint32_t src0, src1, src2, src3; - uint32_t ref0, ref1, ref2, ref3; - v16u8 pred, src = { 0 }; - v16u8 ref = { 0 }; - v8i16 avg = { 0 }; - v4i32 vec, var = { 0 }; - - for (ht_cnt = (height >> 2); ht_cnt--;) { - pred = LD_UB(sec_pred); - sec_pred += 16; - LW4(src_ptr, src_stride, src0, src1, src2, src3); - src_ptr += (4 * src_stride); - LW4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3); - ref_ptr += (4 * ref_stride); - - INSERT_W4_UB(src0, src1, src2, src3, src); - INSERT_W4_UB(ref0, ref1, ref2, ref3, ref); - - src = __msa_aver_u_b(src, pred); - CALC_MSE_AVG_B(src, ref, var, avg); - } - - vec = __msa_hadd_s_w(avg, avg); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - -static uint32_t avg_sse_diff_8width_msa(const uint8_t *src_ptr, - int32_t src_stride, - const uint8_t *ref_ptr, - int32_t ref_stride, - const uint8_t *sec_pred, - int32_t height, - int32_t *diff) { - int32_t ht_cnt; - v16u8 src0, src1, src2, src3; - v16u8 ref0, ref1, ref2, ref3; - v16u8 pred0, pred1; - v8i16 avg = { 0 }; - v4i32 vec, var = { 0 }; - - for (ht_cnt = (height >> 2); ht_cnt--;) { - LD_UB2(sec_pred, 16, pred0, pred1); - sec_pred += 32; - LD_UB4(src_ptr, src_stride, src0, src1, src2, src3); - src_ptr += (4 * src_stride); - LD_UB4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3); - ref_ptr += (4 * ref_stride); - - PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, - src0, src1, ref0, ref1); - AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1); - CALC_MSE_AVG_B(src0, ref0, var, avg); - CALC_MSE_AVG_B(src1, ref1, var, avg); - } - - vec = __msa_hadd_s_w(avg, avg); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - -static uint32_t avg_sse_diff_16width_msa(const uint8_t *src_ptr, - int32_t src_stride, - const uint8_t *ref_ptr, - int32_t ref_stride, - const uint8_t *sec_pred, - int32_t height, - int32_t *diff) { - int32_t ht_cnt; - v16u8 src, ref, pred; - v8i16 avg = { 0 }; - v4i32 vec, var = { 0 }; - - for (ht_cnt = (height >> 2); ht_cnt--;) { - pred = LD_UB(sec_pred); - sec_pred += 16; - src = LD_UB(src_ptr); - src_ptr += src_stride; - ref = LD_UB(ref_ptr); - ref_ptr += ref_stride; - src = __msa_aver_u_b(src, pred); - CALC_MSE_AVG_B(src, ref, var, avg); - - pred = LD_UB(sec_pred); - sec_pred += 16; - src = LD_UB(src_ptr); - src_ptr += src_stride; - ref = LD_UB(ref_ptr); - ref_ptr += ref_stride; - src = __msa_aver_u_b(src, pred); - CALC_MSE_AVG_B(src, ref, var, avg); - - pred = LD_UB(sec_pred); - sec_pred += 16; - src = LD_UB(src_ptr); - src_ptr += src_stride; - ref = LD_UB(ref_ptr); - ref_ptr += ref_stride; - src = __msa_aver_u_b(src, pred); - CALC_MSE_AVG_B(src, ref, var, avg); - - pred = LD_UB(sec_pred); - sec_pred += 16; - src = LD_UB(src_ptr); - src_ptr += src_stride; - ref = LD_UB(ref_ptr); - ref_ptr += ref_stride; - src = __msa_aver_u_b(src, pred); - CALC_MSE_AVG_B(src, ref, var, avg); - } - - vec = __msa_hadd_s_w(avg, avg); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - -static uint32_t avg_sse_diff_32width_msa(const uint8_t *src_ptr, - int32_t src_stride, - const uint8_t *ref_ptr, - int32_t ref_stride, - const uint8_t *sec_pred, - int32_t height, - int32_t *diff) { - int32_t ht_cnt; - v16u8 src0, src1, ref0, ref1, pred0, pred1; - v8i16 avg = { 0 }; - v4i32 vec, var = { 0 }; - - for (ht_cnt = (height >> 2); ht_cnt--;) { - LD_UB2(sec_pred, 16, pred0, pred1); - sec_pred += 32; - LD_UB2(src_ptr, 16, src0, src1); - src_ptr += src_stride; - LD_UB2(ref_ptr, 16, ref0, ref1); - ref_ptr += ref_stride; - AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1); - CALC_MSE_AVG_B(src0, ref0, var, avg); - CALC_MSE_AVG_B(src1, ref1, var, avg); - - LD_UB2(sec_pred, 16, pred0, pred1); - sec_pred += 32; - LD_UB2(src_ptr, 16, src0, src1); - src_ptr += src_stride; - LD_UB2(ref_ptr, 16, ref0, ref1); - ref_ptr += ref_stride; - AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1); - CALC_MSE_AVG_B(src0, ref0, var, avg); - CALC_MSE_AVG_B(src1, ref1, var, avg); - - LD_UB2(sec_pred, 16, pred0, pred1); - sec_pred += 32; - LD_UB2(src_ptr, 16, src0, src1); - src_ptr += src_stride; - LD_UB2(ref_ptr, 16, ref0, ref1); - ref_ptr += ref_stride; - AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1); - CALC_MSE_AVG_B(src0, ref0, var, avg); - CALC_MSE_AVG_B(src1, ref1, var, avg); - - LD_UB2(sec_pred, 16, pred0, pred1); - sec_pred += 32; - LD_UB2(src_ptr, 16, src0, src1); - src_ptr += src_stride; - LD_UB2(ref_ptr, 16, ref0, ref1); - ref_ptr += ref_stride; - AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1); - CALC_MSE_AVG_B(src0, ref0, var, avg); - CALC_MSE_AVG_B(src1, ref1, var, avg); - } - - vec = __msa_hadd_s_w(avg, avg); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - -static uint32_t avg_sse_diff_32x64_msa(const uint8_t *src_ptr, - int32_t src_stride, - const uint8_t *ref_ptr, - int32_t ref_stride, - const uint8_t *sec_pred, - int32_t *diff) { - int32_t ht_cnt; - v16u8 src0, src1, ref0, ref1, pred0, pred1; - v8i16 avg0 = { 0 }; - v8i16 avg1 = { 0 }; - v4i32 vec, var = { 0 }; - - for (ht_cnt = 16; ht_cnt--;) { - LD_UB2(sec_pred, 16, pred0, pred1); - sec_pred += 32; - LD_UB2(src_ptr, 16, src0, src1); - src_ptr += src_stride; - LD_UB2(ref_ptr, 16, ref0, ref1); - ref_ptr += ref_stride; - AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1); - CALC_MSE_AVG_B(src0, ref0, var, avg0); - CALC_MSE_AVG_B(src1, ref1, var, avg1); - - LD_UB2(sec_pred, 16, pred0, pred1); - sec_pred += 32; - LD_UB2(src_ptr, 16, src0, src1); - src_ptr += src_stride; - LD_UB2(ref_ptr, 16, ref0, ref1); - ref_ptr += ref_stride; - AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1); - CALC_MSE_AVG_B(src0, ref0, var, avg0); - CALC_MSE_AVG_B(src1, ref1, var, avg1); - - LD_UB2(sec_pred, 16, pred0, pred1); - sec_pred += 32; - LD_UB2(src_ptr, 16, src0, src1); - src_ptr += src_stride; - LD_UB2(ref_ptr, 16, ref0, ref1); - ref_ptr += ref_stride; - AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1); - CALC_MSE_AVG_B(src0, ref0, var, avg0); - CALC_MSE_AVG_B(src1, ref1, var, avg1); - - LD_UB2(sec_pred, 16, pred0, pred1); - sec_pred += 32; - LD_UB2(src_ptr, 16, src0, src1); - src_ptr += src_stride; - LD_UB2(ref_ptr, 16, ref0, ref1); - ref_ptr += ref_stride; - AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1); - CALC_MSE_AVG_B(src0, ref0, var, avg0); - CALC_MSE_AVG_B(src1, ref1, var, avg1); - } - - vec = __msa_hadd_s_w(avg0, avg0); - vec += __msa_hadd_s_w(avg1, avg1); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - -static uint32_t avg_sse_diff_64x32_msa(const uint8_t *src_ptr, - int32_t src_stride, - const uint8_t *ref_ptr, - int32_t ref_stride, - const uint8_t *sec_pred, - int32_t *diff) { - int32_t ht_cnt; - v16u8 src0, src1, src2, src3; - v16u8 ref0, ref1, ref2, ref3; - v16u8 pred0, pred1, pred2, pred3; - v8i16 avg0 = { 0 }; - v8i16 avg1 = { 0 }; - v4i32 vec, var = { 0 }; - - for (ht_cnt = 16; ht_cnt--;) { - LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3); - sec_pred += 64; - LD_UB4(src_ptr, 16, src0, src1, src2, src3); - src_ptr += src_stride; - LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3); - ref_ptr += ref_stride; - AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, - src0, src1, src2, src3); - CALC_MSE_AVG_B(src0, ref0, var, avg0); - CALC_MSE_AVG_B(src2, ref2, var, avg0); - CALC_MSE_AVG_B(src1, ref1, var, avg1); - CALC_MSE_AVG_B(src3, ref3, var, avg1); - - LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3); - sec_pred += 64; - LD_UB4(src_ptr, 16, src0, src1, src2, src3); - src_ptr += src_stride; - LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3); - ref_ptr += ref_stride; - AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, - src0, src1, src2, src3); - CALC_MSE_AVG_B(src0, ref0, var, avg0); - CALC_MSE_AVG_B(src2, ref2, var, avg0); - CALC_MSE_AVG_B(src1, ref1, var, avg1); - CALC_MSE_AVG_B(src3, ref3, var, avg1); - } - - vec = __msa_hadd_s_w(avg0, avg0); - vec += __msa_hadd_s_w(avg1, avg1); - - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - -static uint32_t avg_sse_diff_64x64_msa(const uint8_t *src_ptr, - int32_t src_stride, - const uint8_t *ref_ptr, - int32_t ref_stride, - const uint8_t *sec_pred, - int32_t *diff) { - int32_t ht_cnt; - v16u8 src0, src1, src2, src3; - v16u8 ref0, ref1, ref2, ref3; - v16u8 pred0, pred1, pred2, pred3; - v8i16 avg0 = { 0 }; - v8i16 avg1 = { 0 }; - v8i16 avg2 = { 0 }; - v8i16 avg3 = { 0 }; - v4i32 vec, var = { 0 }; - - for (ht_cnt = 32; ht_cnt--;) { - LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3); - sec_pred += 64; - LD_UB4(src_ptr, 16, src0, src1, src2, src3); - src_ptr += src_stride; - LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3); - ref_ptr += ref_stride; - AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, - src0, src1, src2, src3); - CALC_MSE_AVG_B(src0, ref0, var, avg0); - CALC_MSE_AVG_B(src1, ref1, var, avg1); - CALC_MSE_AVG_B(src2, ref2, var, avg2); - CALC_MSE_AVG_B(src3, ref3, var, avg3); - - LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3); - sec_pred += 64; - LD_UB4(src_ptr, 16, src0, src1, src2, src3); - src_ptr += src_stride; - LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3); - ref_ptr += ref_stride; - AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, - src0, src1, src2, src3); - CALC_MSE_AVG_B(src0, ref0, var, avg0); - CALC_MSE_AVG_B(src1, ref1, var, avg1); - CALC_MSE_AVG_B(src2, ref2, var, avg2); - CALC_MSE_AVG_B(src3, ref3, var, avg3); - } - - vec = __msa_hadd_s_w(avg0, avg0); - vec += __msa_hadd_s_w(avg1, avg1); - vec += __msa_hadd_s_w(avg2, avg2); - vec += __msa_hadd_s_w(avg3, avg3); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - static uint32_t sub_pixel_sse_diff_4width_h_msa(const uint8_t *src, int32_t src_stride, const uint8_t *dst, @@ -748,6 +397,7 @@ static uint32_t sub_pixel_sse_diff_16width_v_msa(const uint8_t *src, src0 = src4; + /* loop runs height/4 */ CALC_MSE_AVG_B(out0, ref0, var, avg); CALC_MSE_AVG_B(out1, ref1, var, avg); CALC_MSE_AVG_B(out2, ref2, var, avg); @@ -1050,962 +700,77 @@ static uint32_t sub_pixel_sse_diff_64width_hv_msa(const uint8_t *src, return sse; } -static uint32_t sub_pixel_avg_sse_diff_4width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter, - int32_t height, - int32_t *diff) { - int16_t filtval; - uint32_t loop_cnt; - uint32_t ref0, ref1, ref2, ref3; - v16u8 out, pred, filt0, ref = { 0 }; - v16i8 src0, src1, src2, src3; - v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 }; - v8u16 vec0, vec1, vec2, vec3; - v8u16 const255; - v8i16 avg = { 0 }; - v4i32 vec, var = { 0 }; - - filtval = LH(filter); - filt0 = (v16u8)__msa_fill_h(filtval); - - const255 = (v8u16)__msa_ldi_h(255); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - pred = LD_UB(sec_pred); - sec_pred += 16; - LW4(dst, dst_stride, ref0, ref1, ref2, ref3); - dst += (4 * dst_stride); - - INSERT_W4_UB(ref0, ref1, ref2, ref3, ref); - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); - MIN_UH4_UH(vec0, vec1, vec2, vec3, const255); - PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, - src0, src1, src2, src3); - ILVEV_W2_SB(src0, src1, src2, src3, src0, src2); - out = (v16u8)__msa_ilvev_d((v2i64)src2, (v2i64)src0); - out = __msa_aver_u_b(out, pred); - CALC_MSE_AVG_B(out, ref, var, avg); - } +#define VARIANCE_4Wx4H(sse, diff) VARIANCE_WxH(sse, diff, 4); +#define VARIANCE_4Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 5); +#define VARIANCE_8Wx4H(sse, diff) VARIANCE_WxH(sse, diff, 5); +#define VARIANCE_8Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 6); +#define VARIANCE_8Wx16H(sse, diff) VARIANCE_WxH(sse, diff, 7); +#define VARIANCE_16Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 7); +#define VARIANCE_16Wx16H(sse, diff) VARIANCE_WxH(sse, diff, 8); - vec = __msa_hadd_s_w(avg, avg); - *diff = HADD_SW_S32(vec); +#define VARIANCE_16Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 9); +#define VARIANCE_32Wx16H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 9); +#define VARIANCE_32Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 10); +#define VARIANCE_32Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11); +#define VARIANCE_64Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11); +#define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12); - return HADD_SW_S32(var); +#define VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht) \ +uint32_t vp9_sub_pixel_variance##wd##x##ht##_msa(const uint8_t *src, \ + int32_t src_stride, \ + int32_t xoffset, \ + int32_t yoffset, \ + const uint8_t *ref, \ + int32_t ref_stride, \ + uint32_t *sse) { \ + int32_t diff; \ + uint32_t var; \ + const int8_t *h_filter = vp9_bilinear_filters_msa[xoffset - 1]; \ + const int8_t *v_filter = vp9_bilinear_filters_msa[yoffset - 1]; \ + \ + if (yoffset) { \ + if (xoffset) { \ + *sse = sub_pixel_sse_diff_##wd##width_hv_msa(src, src_stride, \ + ref, ref_stride, \ + h_filter, v_filter, \ + ht, &diff); \ + } else { \ + *sse = sub_pixel_sse_diff_##wd##width_v_msa(src, src_stride, \ + ref, ref_stride, \ + v_filter, ht, &diff); \ + } \ + \ + var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \ + } else { \ + if (xoffset) { \ + *sse = sub_pixel_sse_diff_##wd##width_h_msa(src, src_stride, \ + ref, ref_stride, \ + h_filter, ht, &diff); \ + \ + var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \ + } else { \ + var = vpx_variance##wd##x##ht##_msa(src, src_stride, \ + ref, ref_stride, sse); \ + } \ + } \ + \ + return var; \ } -static uint32_t sub_pixel_avg_sse_diff_8width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter, - int32_t height, - int32_t *diff) { - int16_t filtval; - uint32_t loop_cnt; - v16u8 out, pred, filt0; - v16u8 ref0, ref1, ref2, ref3; - v16i8 src0, src1, src2, src3; - v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 }; - v8u16 vec0, vec1, vec2, vec3; - v8u16 const255; - v8i16 avg = { 0 }; - v4i32 vec, var = { 0 }; - - filtval = LH(filter); - filt0 = (v16u8)__msa_fill_h(filtval); - - const255 = (v8u16)__msa_ldi_h(255); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3); - dst += (4 * dst_stride); - - PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1); - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); - MIN_UH4_UH(vec0, vec1, vec2, vec3, const255); - PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, - src0, src1, src2, src3); - out = (v16u8)__msa_ilvev_d((v2i64)src1, (v2i64)src0); - - pred = LD_UB(sec_pred); - sec_pred += 16; - out = __msa_aver_u_b(out, pred); - CALC_MSE_AVG_B(out, ref0, var, avg); - out = (v16u8)__msa_ilvev_d((v2i64)src3, (v2i64)src2); - pred = LD_UB(sec_pred); - sec_pred += 16; - out = __msa_aver_u_b(out, pred); - CALC_MSE_AVG_B(out, ref1, var, avg); - } - - vec = __msa_hadd_s_w(avg, avg); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} +VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 4); +VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 8); -static uint32_t subpel_avg_ssediff_16w_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter, - int32_t height, - int32_t *diff, - int32_t width) { - int16_t filtval; - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7; - v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 }; - v16u8 dst0, dst1, dst2, dst3; - v16u8 tmp0, tmp1, tmp2, tmp3; - v16u8 pred0, pred1, pred2, pred3, filt0; - v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8u16 out0, out1, out2, out3, out4, out5, out6, out7; - v8u16 const255; - v8i16 avg = { 0 }; - v4i32 vec, var = { 0 }; +VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 4); +VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 8); +VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 16); - filtval = LH(filter); - filt0 = (v16u8)__msa_fill_h(filtval); +VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 8); +VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 16); +VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 32); - const255 = (v8u16)__msa_ldi_h(255); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src0, src2, src4, src6); - LD_SB4(src + 8, src_stride, src1, src3, src5, src7); - src += (4 * src_stride); - LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - dst += (4 * dst_stride); - LD_UB4(sec_pred, width, pred0, pred1, pred2, pred3); - sec_pred += (4 * width); - - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - VSHF_B2_UH(src4, src4, src5, src5, mask, mask, vec4, vec5); - VSHF_B2_UH(src6, src6, src7, src7, mask, mask, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - out0, out1, out2, out3); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - out4, out5, out6, out7); - SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS); - SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS); - MIN_UH4_UH(out0, out1, out2, out3, const255); - MIN_UH4_UH(out4, out5, out6, out7, const255); - PCKEV_B4_UB(out1, out0, out3, out2, out5, out4, out7, out6, - tmp0, tmp1, tmp2, tmp3); - AVER_UB4_UB(tmp0, pred0, tmp1, pred1, tmp2, pred2, tmp3, pred3, - tmp0, tmp1, tmp2, tmp3); - - CALC_MSE_AVG_B(tmp0, dst0, var, avg); - CALC_MSE_AVG_B(tmp1, dst1, var, avg); - CALC_MSE_AVG_B(tmp2, dst2, var, avg); - CALC_MSE_AVG_B(tmp3, dst3, var, avg); - } - - vec = __msa_hadd_s_w(avg, avg); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - -static uint32_t sub_pixel_avg_sse_diff_16width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter, - int32_t height, - int32_t *diff) { - return subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride, - sec_pred, filter, height, diff, 16); -} - -static uint32_t sub_pixel_avg_sse_diff_32width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter, - int32_t height, - int32_t *diff) { - uint32_t loop_cnt, sse = 0; - int32_t diff0[2]; - - for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) { - sse += subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride, - sec_pred, filter, height, - &diff0[loop_cnt], 32); - src += 16; - dst += 16; - sec_pred += 16; - } - - *diff = diff0[0] + diff0[1]; - - return sse; -} - -static uint32_t sub_pixel_avg_sse_diff_64width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter, - int32_t height, - int32_t *diff) { - uint32_t loop_cnt, sse = 0; - int32_t diff0[4]; - - for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) { - sse += subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride, - sec_pred, filter, height, - &diff0[loop_cnt], 64); - src += 16; - dst += 16; - sec_pred += 16; - } - - *diff = diff0[0] + diff0[1] + diff0[2] + diff0[3]; - - return sse; -} - -static uint32_t sub_pixel_avg_sse_diff_4width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter, - int32_t height, - int32_t *diff) { - int16_t filtval; - uint32_t loop_cnt; - uint32_t ref0, ref1, ref2, ref3; - v16u8 src0, src1, src2, src3, src4; - v16u8 src10_r, src32_r, src21_r, src43_r; - v16u8 out, pred, ref = { 0 }; - v16u8 src2110, src4332, filt0; - v8i16 avg = { 0 }; - v4i32 vec, var = { 0 }; - v8u16 tmp0, tmp1; - - filtval = LH(filter); - filt0 = (v16u8)__msa_fill_h(filtval); - - src0 = LD_UB(src); - src += src_stride; - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_UB4(src, src_stride, src1, src2, src3, src4); - src += (4 * src_stride); - pred = LD_UB(sec_pred); - sec_pred += 16; - LW4(dst, dst_stride, ref0, ref1, ref2, ref3); - dst += (4 * dst_stride); - - INSERT_W4_UB(ref0, ref1, ref2, ref3, ref); - ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, - src10_r, src21_r, src32_r, src43_r); - ILVR_D2_UB(src21_r, src10_r, src43_r, src32_r, src2110, src4332); - DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, FILTER_BITS); - SAT_UH2_UH(tmp0, tmp1, 7); - - out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); - out = __msa_aver_u_b(out, pred); - CALC_MSE_AVG_B(out, ref, var, avg); - src0 = src4; - } - - vec = __msa_hadd_s_w(avg, avg); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - -static uint32_t sub_pixel_avg_sse_diff_8width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter, - int32_t height, - int32_t *diff) { - int16_t filtval; - uint32_t loop_cnt; - v16u8 src0, src1, src2, src3, src4; - v16u8 ref0, ref1, ref2, ref3; - v16u8 pred0, pred1, filt0; - v8u16 vec0, vec1, vec2, vec3; - v8u16 tmp0, tmp1, tmp2, tmp3; - v8i16 avg = { 0 }; - v4i32 vec, var = { 0 }; - - filtval = LH(filter); - filt0 = (v16u8)__msa_fill_h(filtval); - - src0 = LD_UB(src); - src += src_stride; - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_UB4(src, src_stride, src1, src2, src3, src4); - src += (4 * src_stride); - LD_UB2(sec_pred, 16, pred0, pred1); - sec_pred += 32; - LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3); - dst += (4 * dst_stride); - PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1); - ILVR_B4_UH(src1, src0, src2, src1, src3, src2, src4, src3, - vec0, vec1, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS); - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7); - PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1); - AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1); - CALC_MSE_AVG_B(src0, ref0, var, avg); - CALC_MSE_AVG_B(src1, ref1, var, avg); - - src0 = src4; - } - - vec = __msa_hadd_s_w(avg, avg); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - -static uint32_t subpel_avg_ssediff_16w_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter, - int32_t height, - int32_t *diff, - int32_t width) { - int16_t filtval; - uint32_t loop_cnt; - v16u8 ref0, ref1, ref2, ref3; - v16u8 pred0, pred1, pred2, pred3; - v16u8 src0, src1, src2, src3, src4; - v16u8 out0, out1, out2, out3, filt0; - v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8u16 tmp0, tmp1, tmp2, tmp3; - v8i16 avg = { 0 }; - v4i32 vec, var = { 0 }; - - filtval = LH(filter); - filt0 = (v16u8)__msa_fill_h(filtval); - - src0 = LD_UB(src); - src += src_stride; - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_UB4(src, src_stride, src1, src2, src3, src4); - src += (4 * src_stride); - LD_UB4(sec_pred, width, pred0, pred1, pred2, pred3); - sec_pred += (4 * width); - - ILVR_B2_UH(src1, src0, src2, src1, vec0, vec2); - ILVL_B2_UH(src1, src0, src2, src1, vec1, vec3); - DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, FILTER_BITS); - SAT_UH2_UH(tmp0, tmp1, 7); - out0 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); - - ILVR_B2_UH(src3, src2, src4, src3, vec4, vec6); - ILVL_B2_UH(src3, src2, src4, src3, vec5, vec7); - DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, FILTER_BITS); - SAT_UH2_UH(tmp2, tmp3, 7); - out1 = (v16u8)__msa_pckev_b((v16i8)tmp3, (v16i8)tmp2); - - DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, FILTER_BITS); - SAT_UH2_UH(tmp0, tmp1, 7); - out2 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); - - DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, FILTER_BITS); - SAT_UH2_UH(tmp2, tmp3, 7); - out3 = (v16u8)__msa_pckev_b((v16i8)tmp3, (v16i8)tmp2); - - src0 = src4; - LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3); - dst += (4 * dst_stride); - - AVER_UB4_UB(out0, pred0, out1, pred1, out2, pred2, out3, pred3, - out0, out1, out2, out3); - - CALC_MSE_AVG_B(out0, ref0, var, avg); - CALC_MSE_AVG_B(out1, ref1, var, avg); - CALC_MSE_AVG_B(out2, ref2, var, avg); - CALC_MSE_AVG_B(out3, ref3, var, avg); - } - - vec = __msa_hadd_s_w(avg, avg); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - -static uint32_t sub_pixel_avg_sse_diff_16width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter, - int32_t height, - int32_t *diff) { - return subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride, - sec_pred, filter, height, diff, 16); -} - -static uint32_t sub_pixel_avg_sse_diff_32width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter, - int32_t height, - int32_t *diff) { - uint32_t loop_cnt, sse = 0; - int32_t diff0[2]; - - for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) { - sse += subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride, - sec_pred, filter, height, - &diff0[loop_cnt], 32); - src += 16; - dst += 16; - sec_pred += 16; - } - - *diff = diff0[0] + diff0[1]; - - return sse; -} - -static uint32_t sub_pixel_avg_sse_diff_64width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter, - int32_t height, - int32_t *diff) { - uint32_t loop_cnt, sse = 0; - int32_t diff0[4]; - - for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) { - sse += subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride, - sec_pred, filter, height, - &diff0[loop_cnt], 64); - src += 16; - dst += 16; - sec_pred += 16; - } - - *diff = diff0[0] + diff0[1] + diff0[2] + diff0[3]; - - return sse; -} - -static uint32_t sub_pixel_avg_sse_diff_4width_hv_msa( - const uint8_t *src, int32_t src_stride, - const uint8_t *dst, int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height, int32_t *diff) { - int16_t filtval; - uint32_t loop_cnt; - uint32_t ref0, ref1, ref2, ref3; - v16u8 src0, src1, src2, src3, src4; - v16u8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20 }; - v16u8 filt_hz, filt_vt, vec0, vec1; - v16u8 out, pred, ref = { 0 }; - v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, tmp0, tmp1; - v8i16 avg = { 0 }; - v4i32 vec, var = { 0 }; - - filtval = LH(filter_horiz); - filt_hz = (v16u8)__msa_fill_h(filtval); - filtval = LH(filter_vert); - filt_vt = (v16u8)__msa_fill_h(filtval); - - src0 = LD_UB(src); - src += src_stride; - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_UB4(src, src_stride, src1, src2, src3, src4); - src += (4 * src_stride); - pred = LD_UB(sec_pred); - sec_pred += 16; - LW4(dst, dst_stride, ref0, ref1, ref2, ref3); - dst += (4 * dst_stride); - INSERT_W4_UB(ref0, ref1, ref2, ref3, ref); - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, FILTER_BITS); - hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, FILTER_BITS); - hz_out4 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, FILTER_BITS); - hz_out1 = (v8u16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8); - hz_out3 = (v8u16)__msa_pckod_d((v2i64)hz_out4, (v2i64)hz_out2); - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, FILTER_BITS); - SAT_UH2_UH(tmp0, tmp1, 7); - out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); - out = __msa_aver_u_b(out, pred); - CALC_MSE_AVG_B(out, ref, var, avg); - src0 = src4; - } - - vec = __msa_hadd_s_w(avg, avg); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - -static uint32_t sub_pixel_avg_sse_diff_8width_hv_msa( - const uint8_t *src, int32_t src_stride, - const uint8_t *dst, int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height, int32_t *diff) { - int16_t filtval; - uint32_t loop_cnt; - v16u8 ref0, ref1, ref2, ref3; - v16u8 src0, src1, src2, src3, src4; - v16u8 pred0, pred1, out0, out1; - v16u8 filt_hz, filt_vt, vec0; - v16u8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 }; - v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3; - v8i16 avg = { 0 }; - v4i32 vec, var = { 0 }; - - filtval = LH(filter_horiz); - filt_hz = (v16u8)__msa_fill_h(filtval); - filtval = LH(filter_vert); - filt_vt = (v16u8)__msa_fill_h(filtval); - - src0 = LD_UB(src); - src += src_stride; - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, FILTER_BITS); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_UB4(src, src_stride, src1, src2, src3, src4); - src += (4 * src_stride); - LD_UB2(sec_pred, 16, pred0, pred1); - sec_pred += 32; - LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3); - dst += (4 * dst_stride); - - PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1); - hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, FILTER_BITS); - - vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); - tmp0 = __msa_dotp_u_h(vec0, filt_vt); - hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, FILTER_BITS); - - vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); - tmp1 = __msa_dotp_u_h(vec0, filt_vt); - SRARI_H2_UH(tmp0, tmp1, FILTER_BITS); - SAT_UH2_UH(tmp0, tmp1, 7); - hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, FILTER_BITS); - - vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); - tmp2 = __msa_dotp_u_h(vec0, filt_vt); - hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, FILTER_BITS); - - vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); - tmp3 = __msa_dotp_u_h(vec0, filt_vt); - - SRARI_H2_UH(tmp2, tmp3, FILTER_BITS); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1); - AVER_UB2_UB(out0, pred0, out1, pred1, out0, out1); - - CALC_MSE_AVG_B(out0, ref0, var, avg); - CALC_MSE_AVG_B(out1, ref1, var, avg); - } - - vec = __msa_hadd_s_w(avg, avg); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - -static uint32_t subpel_avg_ssediff_16w_hv_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter_horiz, - const int8_t *filter_vert, - int32_t height, - int32_t *diff, - int32_t width) { - int16_t filtval; - uint32_t loop_cnt; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7; - v16u8 ref0, ref1, ref2, ref3; - v16u8 pred0, pred1, pred2, pred3; - v16u8 out0, out1, out2, out3; - v16u8 filt_hz, filt_vt, vec0, vec1; - v16u8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 }; - v8u16 hz_out0, hz_out1, hz_out2, hz_out3, tmp0, tmp1; - v8i16 avg = { 0 }; - v4i32 vec, var = { 0 }; - - filtval = LH(filter_horiz); - filt_hz = (v16u8)__msa_fill_h(filtval); - filtval = LH(filter_vert); - filt_vt = (v16u8)__msa_fill_h(filtval); - - LD_UB2(src, 8, src0, src1); - src += src_stride; - - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, FILTER_BITS); - hz_out2 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, FILTER_BITS); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_UB4(src, src_stride, src0, src2, src4, src6); - LD_UB4(src + 8, src_stride, src1, src3, src5, src7); - src += (4 * src_stride); - LD_UB4(sec_pred, width, pred0, pred1, pred2, pred3); - sec_pred += (4 * width); - - hz_out1 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, FILTER_BITS); - hz_out3 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, FILTER_BITS); - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, FILTER_BITS); - SAT_UH2_UH(tmp0, tmp1, 7); - out0 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); - - hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, FILTER_BITS); - hz_out2 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, FILTER_BITS); - ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, FILTER_BITS); - SAT_UH2_UH(tmp0, tmp1, 7); - out1 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); - - hz_out1 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, FILTER_BITS); - hz_out3 = HORIZ_2TAP_FILT_UH(src5, src5, mask, filt_hz, FILTER_BITS); - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, FILTER_BITS); - SAT_UH2_UH(tmp0, tmp1, 7); - out2 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); - - hz_out0 = HORIZ_2TAP_FILT_UH(src6, src6, mask, filt_hz, FILTER_BITS); - hz_out2 = HORIZ_2TAP_FILT_UH(src7, src7, mask, filt_hz, FILTER_BITS); - ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, FILTER_BITS); - SAT_UH2_UH(tmp0, tmp1, 7); - out3 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); - - LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3); - dst += (4 * dst_stride); - - AVER_UB4_UB(out0, pred0, out1, pred1, out2, pred2, out3, pred3, - out0, out1, out2, out3); - - CALC_MSE_AVG_B(out0, ref0, var, avg); - CALC_MSE_AVG_B(out1, ref1, var, avg); - CALC_MSE_AVG_B(out2, ref2, var, avg); - CALC_MSE_AVG_B(out3, ref3, var, avg); - } - - vec = __msa_hadd_s_w(avg, avg); - *diff = HADD_SW_S32(vec); - - return HADD_SW_S32(var); -} - -static uint32_t sub_pixel_avg_sse_diff_16width_hv_msa( - const uint8_t *src, int32_t src_stride, - const uint8_t *dst, int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height, int32_t *diff) { - return subpel_avg_ssediff_16w_hv_msa(src, src_stride, dst, dst_stride, - sec_pred, filter_horiz, filter_vert, - height, diff, 16); -} - -static uint32_t sub_pixel_avg_sse_diff_32width_hv_msa( - const uint8_t *src, int32_t src_stride, - const uint8_t *dst, int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height, int32_t *diff) { - uint32_t loop_cnt, sse = 0; - int32_t diff0[2]; - - for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) { - sse += subpel_avg_ssediff_16w_hv_msa(src, src_stride, dst, dst_stride, - sec_pred, filter_horiz, filter_vert, - height, &diff0[loop_cnt], 32); - src += 16; - dst += 16; - sec_pred += 16; - } - - *diff = diff0[0] + diff0[1]; - - return sse; -} - -static uint32_t sub_pixel_avg_sse_diff_64width_hv_msa( - const uint8_t *src, int32_t src_stride, - const uint8_t *dst, int32_t dst_stride, - const uint8_t *sec_pred, - const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height, int32_t *diff) { - uint32_t loop_cnt, sse = 0; - int32_t diff0[4]; - - for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) { - sse += subpel_avg_ssediff_16w_hv_msa(src, src_stride, dst, dst_stride, - sec_pred, filter_horiz, filter_vert, - height, &diff0[loop_cnt], 64); - src += 16; - dst += 16; - sec_pred += 16; - } - - *diff = diff0[0] + diff0[1] + diff0[2] + diff0[3]; - - return sse; -} - -#define VARIANCE_4Wx4H(sse, diff) VARIANCE_WxH(sse, diff, 4); -#define VARIANCE_4Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 5); -#define VARIANCE_8Wx4H(sse, diff) VARIANCE_WxH(sse, diff, 5); -#define VARIANCE_8Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 6); -#define VARIANCE_8Wx16H(sse, diff) VARIANCE_WxH(sse, diff, 7); -#define VARIANCE_16Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 7); -#define VARIANCE_16Wx16H(sse, diff) VARIANCE_WxH(sse, diff, 8); - -#define VARIANCE_16Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 9); -#define VARIANCE_32Wx16H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 9); -#define VARIANCE_32Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 10); -#define VARIANCE_32Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11); -#define VARIANCE_64Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11); -#define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12); - -#define VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht) \ -uint32_t vp9_sub_pixel_variance##wd##x##ht##_msa(const uint8_t *src, \ - int32_t src_stride, \ - int32_t xoffset, \ - int32_t yoffset, \ - const uint8_t *ref, \ - int32_t ref_stride, \ - uint32_t *sse) { \ - int32_t diff; \ - uint32_t var; \ - const int8_t *h_filter = vp9_bilinear_filters_msa[xoffset - 1]; \ - const int8_t *v_filter = vp9_bilinear_filters_msa[yoffset - 1]; \ - \ - if (yoffset) { \ - if (xoffset) { \ - *sse = sub_pixel_sse_diff_##wd##width_hv_msa(src, src_stride, \ - ref, ref_stride, \ - h_filter, v_filter, \ - ht, &diff); \ - } else { \ - *sse = sub_pixel_sse_diff_##wd##width_v_msa(src, src_stride, \ - ref, ref_stride, \ - v_filter, ht, &diff); \ - } \ - \ - var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \ - } else { \ - if (xoffset) { \ - *sse = sub_pixel_sse_diff_##wd##width_h_msa(src, src_stride, \ - ref, ref_stride, \ - h_filter, ht, &diff); \ - \ - var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \ - } else { \ - var = vpx_variance##wd##x##ht##_msa(src, src_stride, \ - ref, ref_stride, sse); \ - } \ - } \ - \ - return var; \ -} - -VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 4); -VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 8); - -VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 4); -VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 8); -VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 16); - -VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 8); -VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 16); -VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 32); - -VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 16); -VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 32); -VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 64); +VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 16); +VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 32); +VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 64); VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 32); VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 64); - -#define VP9_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(wd, ht) \ -uint32_t vp9_sub_pixel_avg_variance##wd##x##ht##_msa( \ - const uint8_t *src_ptr, int32_t src_stride, \ - int32_t xoffset, int32_t yoffset, \ - const uint8_t *ref_ptr, int32_t ref_stride, \ - uint32_t *sse, const uint8_t *sec_pred) { \ - int32_t diff; \ - const int8_t *h_filter = vp9_bilinear_filters_msa[xoffset - 1]; \ - const int8_t *v_filter = vp9_bilinear_filters_msa[yoffset - 1]; \ - \ - if (yoffset) { \ - if (xoffset) { \ - *sse = sub_pixel_avg_sse_diff_##wd##width_hv_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, h_filter, \ - v_filter, ht, &diff); \ - } else { \ - *sse = sub_pixel_avg_sse_diff_##wd##width_v_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, v_filter, \ - ht, &diff); \ - } \ - } else { \ - if (xoffset) { \ - *sse = sub_pixel_avg_sse_diff_##wd##width_h_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, h_filter, \ - ht, &diff); \ - } else { \ - *sse = avg_sse_diff_##wd##width_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, ht, &diff); \ - } \ - } \ - \ - return VARIANCE_##wd##Wx##ht##H(*sse, diff); \ -} - -VP9_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 4); -VP9_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 8); - -VP9_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 4); -VP9_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 8); -VP9_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 16); - -VP9_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 8); -VP9_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 16); -VP9_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 32); - -VP9_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 16); -VP9_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 32); - -uint32_t vp9_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr, - int32_t src_stride, - int32_t xoffset, - int32_t yoffset, - const uint8_t *ref_ptr, - int32_t ref_stride, - uint32_t *sse, - const uint8_t *sec_pred) { - int32_t diff; - const int8_t *h_filter = vp9_bilinear_filters_msa[xoffset - 1]; - const int8_t *v_filter = vp9_bilinear_filters_msa[yoffset - 1]; - - if (yoffset) { - if (xoffset) { - *sse = sub_pixel_avg_sse_diff_32width_hv_msa(src_ptr, src_stride, - ref_ptr, ref_stride, - sec_pred, h_filter, - v_filter, 64, &diff); - } else { - *sse = sub_pixel_avg_sse_diff_32width_v_msa(src_ptr, src_stride, - ref_ptr, ref_stride, - sec_pred, v_filter, - 64, &diff); - } - } else { - if (xoffset) { - *sse = sub_pixel_avg_sse_diff_32width_h_msa(src_ptr, src_stride, - ref_ptr, ref_stride, - sec_pred, h_filter, - 64, &diff); - } else { - *sse = avg_sse_diff_32x64_msa(src_ptr, src_stride, ref_ptr, ref_stride, - sec_pred, &diff); - } - } - - return VARIANCE_32Wx64H(*sse, diff); -} - -#define VP9_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(ht) \ -uint32_t vp9_sub_pixel_avg_variance64x##ht##_msa(const uint8_t *src_ptr, \ - int32_t src_stride, \ - int32_t xoffset, \ - int32_t yoffset, \ - const uint8_t *ref_ptr, \ - int32_t ref_stride, \ - uint32_t *sse, \ - const uint8_t *sec_pred) { \ - int32_t diff; \ - const int8_t *h_filter = vp9_bilinear_filters_msa[xoffset - 1]; \ - const int8_t *v_filter = vp9_bilinear_filters_msa[yoffset - 1]; \ - \ - if (yoffset) { \ - if (xoffset) { \ - *sse = sub_pixel_avg_sse_diff_64width_hv_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, h_filter, \ - v_filter, ht, &diff); \ - } else { \ - *sse = sub_pixel_avg_sse_diff_64width_v_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, v_filter, \ - ht, &diff); \ - } \ - } else { \ - if (xoffset) { \ - *sse = sub_pixel_avg_sse_diff_64width_h_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, h_filter, \ - ht, &diff); \ - } else { \ - *sse = avg_sse_diff_64x##ht##_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, &diff); \ - } \ - } \ - \ - return VARIANCE_64Wx##ht##H(*sse, diff); \ -} - -VP9_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(32); -VP9_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(64); -- 2.7.4