From: Linfeng Zhang Date: Tue, 7 Mar 2017 21:06:06 +0000 (-0800) Subject: Update vpx_idct32x32_1024_add_neon() X-Git-Tag: v1.7.0~652 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=77311e0dff5c5f561e0f6480bad5e276e22bf4ca;p=platform%2Fupstream%2Flibvpx.git Update vpx_idct32x32_1024_add_neon() Most are cosmetics changes. Speed has no change with clang 3.8, and about 5% faster with gcc 4.8.4 Tried the strategy used in 8x8 and 16x16 (which operations' orders are similar to the C code), though speed gets better with gcc, it's worse with clang. Tried to remove store_in_output(), but speed gets worse. Change-Id: I93c8d284e90836f98962bb23d63a454cd40f776e --- diff --git a/vpx_dsp/arm/idct32x32_add_neon.c b/vpx_dsp/arm/idct32x32_add_neon.c index ae9457e..f47fc4e 100644 --- a/vpx_dsp/arm/idct32x32_add_neon.c +++ b/vpx_dsp/arm/idct32x32_add_neon.c @@ -16,146 +16,146 @@ #include "vpx_dsp/arm/transpose_neon.h" #include "vpx_dsp/txfm_common.h" -#define LOAD_FROM_TRANSPOSED(prev, first, second) \ - q14s16 = vld1q_s16(trans_buf + first * 8); \ - q13s16 = vld1q_s16(trans_buf + second * 8); - -#define LOAD_FROM_OUTPUT(prev, first, second, qA, qB) \ - qA = vld1q_s16(out + first * 32); \ - qB = vld1q_s16(out + second * 32); - -#define STORE_IN_OUTPUT(prev, first, second, qA, qB) \ - vst1q_s16(out + first * 32, qA); \ - vst1q_s16(out + second * 32, qB); - -#define STORE_COMBINE_CENTER_RESULTS(r10, r9) \ - __STORE_COMBINE_CENTER_RESULTS(r10, r9, stride, q6s16, q7s16, q8s16, q9s16); -static INLINE void __STORE_COMBINE_CENTER_RESULTS(uint8_t *p1, uint8_t *p2, - int stride, int16x8_t q6s16, - int16x8_t q7s16, - int16x8_t q8s16, - int16x8_t q9s16) { - int16x4_t d8s16, d9s16, d10s16, d11s16; - - d8s16 = vld1_s16((int16_t *)p1); +static INLINE void load_from_transformed(const int16_t *const trans_buf, + const int first, const int second, + int16x8_t *const q0, + int16x8_t *const q1) { + *q0 = vld1q_s16(trans_buf + first * 8); + *q1 = vld1q_s16(trans_buf + second * 8); +} + +static INLINE void load_from_output(const int16_t *const out, const int first, + const int second, int16x8_t *const q0, + int16x8_t *const q1) { + *q0 = vld1q_s16(out + first * 32); + *q1 = vld1q_s16(out + second * 32); +} + +static INLINE void store_in_output(int16_t *const out, const int first, + const int second, const int16x8_t q0, + const int16x8_t q1) { + vst1q_s16(out + first * 32, q0); + vst1q_s16(out + second * 32, q1); +} + +static INLINE void store_combine_center_results(uint8_t *p1, uint8_t *p2, + const int stride, int16x8_t q0, + int16x8_t q1, int16x8_t q2, + int16x8_t q3) { + int16x4_t d[4]; + + d[0] = vld1_s16((int16_t *)p1); p1 += stride; - d11s16 = vld1_s16((int16_t *)p2); + d[1] = vld1_s16((int16_t *)p1); + d[3] = vld1_s16((int16_t *)p2); p2 -= stride; - d9s16 = vld1_s16((int16_t *)p1); - d10s16 = vld1_s16((int16_t *)p2); - - q7s16 = vrshrq_n_s16(q7s16, 6); - q8s16 = vrshrq_n_s16(q8s16, 6); - q9s16 = vrshrq_n_s16(q9s16, 6); - q6s16 = vrshrq_n_s16(q6s16, 6); - - q7s16 = vreinterpretq_s16_u16( - vaddw_u8(vreinterpretq_u16_s16(q7s16), vreinterpret_u8_s16(d9s16))); - q8s16 = vreinterpretq_s16_u16( - vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_s16(d10s16))); - q9s16 = vreinterpretq_s16_u16( - vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_s16(d11s16))); - q6s16 = vreinterpretq_s16_u16( - vaddw_u8(vreinterpretq_u16_s16(q6s16), vreinterpret_u8_s16(d8s16))); - - d9s16 = vreinterpret_s16_u8(vqmovun_s16(q7s16)); - d10s16 = vreinterpret_s16_u8(vqmovun_s16(q8s16)); - d11s16 = vreinterpret_s16_u8(vqmovun_s16(q9s16)); - d8s16 = vreinterpret_s16_u8(vqmovun_s16(q6s16)); - - vst1_s16((int16_t *)p1, d9s16); + d[2] = vld1_s16((int16_t *)p2); + + q0 = vrshrq_n_s16(q0, 6); + q1 = vrshrq_n_s16(q1, 6); + q2 = vrshrq_n_s16(q2, 6); + q3 = vrshrq_n_s16(q3, 6); + + q0 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q0), vreinterpret_u8_s16(d[0]))); + q1 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q1), vreinterpret_u8_s16(d[1]))); + q2 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q2), vreinterpret_u8_s16(d[2]))); + q3 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q3), vreinterpret_u8_s16(d[3]))); + + d[0] = vreinterpret_s16_u8(vqmovun_s16(q0)); + d[1] = vreinterpret_s16_u8(vqmovun_s16(q1)); + d[2] = vreinterpret_s16_u8(vqmovun_s16(q2)); + d[3] = vreinterpret_s16_u8(vqmovun_s16(q3)); + + vst1_s16((int16_t *)p1, d[1]); p1 -= stride; - vst1_s16((int16_t *)p2, d10s16); + vst1_s16((int16_t *)p1, d[0]); + vst1_s16((int16_t *)p2, d[2]); p2 += stride; - vst1_s16((int16_t *)p1, d8s16); - vst1_s16((int16_t *)p2, d11s16); + vst1_s16((int16_t *)p2, d[3]); } -#define STORE_COMBINE_EXTREME_RESULTS(r7, r6) \ - __STORE_COMBINE_EXTREME_RESULTS(r7, r6, stride, q4s16, q5s16, q6s16, q7s16); -static INLINE void __STORE_COMBINE_EXTREME_RESULTS(uint8_t *p1, uint8_t *p2, - int stride, int16x8_t q4s16, - int16x8_t q5s16, - int16x8_t q6s16, - int16x8_t q7s16) { - int16x4_t d4s16, d5s16, d6s16, d7s16; +static INLINE void store_combine_extreme_results(uint8_t *p1, uint8_t *p2, + const int stride, int16x8_t q0, + int16x8_t q1, int16x8_t q2, + int16x8_t q3) { + int16x4_t d[4]; - d4s16 = vld1_s16((int16_t *)p1); + d[0] = vld1_s16((int16_t *)p1); p1 += stride; - d7s16 = vld1_s16((int16_t *)p2); + d[1] = vld1_s16((int16_t *)p1); + d[3] = vld1_s16((int16_t *)p2); p2 -= stride; - d5s16 = vld1_s16((int16_t *)p1); - d6s16 = vld1_s16((int16_t *)p2); - - q5s16 = vrshrq_n_s16(q5s16, 6); - q6s16 = vrshrq_n_s16(q6s16, 6); - q7s16 = vrshrq_n_s16(q7s16, 6); - q4s16 = vrshrq_n_s16(q4s16, 6); - - q5s16 = vreinterpretq_s16_u16( - vaddw_u8(vreinterpretq_u16_s16(q5s16), vreinterpret_u8_s16(d5s16))); - q6s16 = vreinterpretq_s16_u16( - vaddw_u8(vreinterpretq_u16_s16(q6s16), vreinterpret_u8_s16(d6s16))); - q7s16 = vreinterpretq_s16_u16( - vaddw_u8(vreinterpretq_u16_s16(q7s16), vreinterpret_u8_s16(d7s16))); - q4s16 = vreinterpretq_s16_u16( - vaddw_u8(vreinterpretq_u16_s16(q4s16), vreinterpret_u8_s16(d4s16))); - - d5s16 = vreinterpret_s16_u8(vqmovun_s16(q5s16)); - d6s16 = vreinterpret_s16_u8(vqmovun_s16(q6s16)); - d7s16 = vreinterpret_s16_u8(vqmovun_s16(q7s16)); - d4s16 = vreinterpret_s16_u8(vqmovun_s16(q4s16)); - - vst1_s16((int16_t *)p1, d5s16); + d[2] = vld1_s16((int16_t *)p2); + + q0 = vrshrq_n_s16(q0, 6); + q1 = vrshrq_n_s16(q1, 6); + q2 = vrshrq_n_s16(q2, 6); + q3 = vrshrq_n_s16(q3, 6); + + q0 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q0), vreinterpret_u8_s16(d[0]))); + q1 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q1), vreinterpret_u8_s16(d[1]))); + q2 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q2), vreinterpret_u8_s16(d[2]))); + q3 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q3), vreinterpret_u8_s16(d[3]))); + + d[0] = vreinterpret_s16_u8(vqmovun_s16(q0)); + d[1] = vreinterpret_s16_u8(vqmovun_s16(q1)); + d[2] = vreinterpret_s16_u8(vqmovun_s16(q2)); + d[3] = vreinterpret_s16_u8(vqmovun_s16(q3)); + + vst1_s16((int16_t *)p1, d[1]); p1 -= stride; - vst1_s16((int16_t *)p2, d6s16); + vst1_s16((int16_t *)p1, d[0]); + vst1_s16((int16_t *)p2, d[2]); p2 += stride; - vst1_s16((int16_t *)p2, d7s16); - vst1_s16((int16_t *)p1, d4s16); + vst1_s16((int16_t *)p2, d[3]); } -#define DO_BUTTERFLY_STD(const_1, const_2, qA, qB) \ - DO_BUTTERFLY(q14s16, q13s16, const_1, const_2, qA, qB); -static INLINE void DO_BUTTERFLY(int16x8_t q14s16, int16x8_t q13s16, - int16_t first_const, int16_t second_const, - int16x8_t *qAs16, int16x8_t *qBs16) { - int16x4_t d30s16, d31s16; - int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q15s32; - int16x4_t dCs16, dDs16, dAs16, dBs16; - - dCs16 = vget_low_s16(q14s16); - dDs16 = vget_high_s16(q14s16); - dAs16 = vget_low_s16(q13s16); - dBs16 = vget_high_s16(q13s16); - - d30s16 = vdup_n_s16(first_const); - d31s16 = vdup_n_s16(second_const); - - q8s32 = vmull_s16(dCs16, d30s16); - q10s32 = vmull_s16(dAs16, d31s16); - q9s32 = vmull_s16(dDs16, d30s16); - q11s32 = vmull_s16(dBs16, d31s16); - q12s32 = vmull_s16(dCs16, d31s16); - - q8s32 = vsubq_s32(q8s32, q10s32); - q9s32 = vsubq_s32(q9s32, q11s32); - - q10s32 = vmull_s16(dDs16, d31s16); - q11s32 = vmull_s16(dAs16, d30s16); - q15s32 = vmull_s16(dBs16, d30s16); - - q11s32 = vaddq_s32(q12s32, q11s32); - q10s32 = vaddq_s32(q10s32, q15s32); - - *qAs16 = vcombine_s16(vrshrn_n_s32(q8s32, DCT_CONST_BITS), - vrshrn_n_s32(q9s32, DCT_CONST_BITS)); - *qBs16 = vcombine_s16(vrshrn_n_s32(q11s32, DCT_CONST_BITS), - vrshrn_n_s32(q10s32, DCT_CONST_BITS)); +static INLINE void do_butterfly(const int16x8_t qIn0, const int16x8_t qIn1, + const int16_t first_const, + const int16_t second_const, + int16x8_t *const qOut0, + int16x8_t *const qOut1) { + int32x4_t q[4]; + int16x4_t d[6]; + + d[0] = vget_low_s16(qIn0); + d[1] = vget_high_s16(qIn0); + d[2] = vget_low_s16(qIn1); + d[3] = vget_high_s16(qIn1); + + // Note: using v{mul, mla, mls}l_n_s16 here slows down 35% with gcc 4.9. + d[4] = vdup_n_s16(first_const); + d[5] = vdup_n_s16(second_const); + + q[0] = vmull_s16(d[0], d[4]); + q[1] = vmull_s16(d[1], d[4]); + q[0] = vmlsl_s16(q[0], d[2], d[5]); + q[1] = vmlsl_s16(q[1], d[3], d[5]); + + q[2] = vmull_s16(d[0], d[5]); + q[3] = vmull_s16(d[1], d[5]); + q[2] = vmlal_s16(q[2], d[2], d[4]); + q[3] = vmlal_s16(q[3], d[3], d[4]); + + *qOut0 = vcombine_s16(vrshrn_n_s32(q[0], DCT_CONST_BITS), + vrshrn_n_s32(q[1], DCT_CONST_BITS)); + *qOut1 = vcombine_s16(vrshrn_n_s32(q[2], DCT_CONST_BITS), + vrshrn_n_s32(q[3], DCT_CONST_BITS)); } -static INLINE void load_s16x8q(const int16_t *in, int16x8_t *s0, int16x8_t *s1, - int16x8_t *s2, int16x8_t *s3, int16x8_t *s4, - int16x8_t *s5, int16x8_t *s6, int16x8_t *s7) { +static INLINE void load_s16x8q(const int16_t *in, int16x8_t *const s0, + int16x8_t *const s1, int16x8_t *const s2, + int16x8_t *const s3, int16x8_t *const s4, + int16x8_t *const s5, int16x8_t *const s6, + int16x8_t *const s7) { *s0 = vld1q_s16(in); in += 32; *s1 = vld1q_s16(in); @@ -209,11 +209,10 @@ static INLINE void idct32_transpose_pair(const int16_t *input, int16_t *t_buf) { } #if CONFIG_VP9_HIGHBITDEPTH -static INLINE void load_s16x8q_tran_low(const tran_low_t *in, int16x8_t *s0, - int16x8_t *s1, int16x8_t *s2, - int16x8_t *s3, int16x8_t *s4, - int16x8_t *s5, int16x8_t *s6, - int16x8_t *s7) { +static INLINE void load_s16x8q_tran_low( + const tran_low_t *in, int16x8_t *const s0, int16x8_t *const s1, + int16x8_t *const s2, int16x8_t *const s3, int16x8_t *const s4, + int16x8_t *const s5, int16x8_t *const s6, int16x8_t *const s7) { *s0 = load_tran_low_to_s16q(in); in += 32; *s1 = load_tran_low_to_s16q(in); @@ -245,185 +244,176 @@ static INLINE void idct32_transpose_pair_tran_low(const tran_low_t *input, #define idct32_transpose_pair_tran_low idct32_transpose_pair #endif // CONFIG_VP9_HIGHBITDEPTH -static INLINE void idct32_bands_end_1st_pass(int16_t *out, int16x8_t q2s16, - int16x8_t q3s16, int16x8_t q6s16, - int16x8_t q7s16, int16x8_t q8s16, - int16x8_t q9s16, int16x8_t q10s16, - int16x8_t q11s16, int16x8_t q12s16, - int16x8_t q13s16, int16x8_t q14s16, - int16x8_t q15s16) { - int16x8_t q0s16, q1s16, q4s16, q5s16; - - STORE_IN_OUTPUT(17, 16, 17, q6s16, q7s16); - STORE_IN_OUTPUT(17, 14, 15, q8s16, q9s16); - - LOAD_FROM_OUTPUT(15, 30, 31, q0s16, q1s16); - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_IN_OUTPUT(31, 30, 31, q6s16, q7s16); - STORE_IN_OUTPUT(31, 0, 1, q4s16, q5s16); - - LOAD_FROM_OUTPUT(1, 12, 13, q0s16, q1s16); - q2s16 = vaddq_s16(q10s16, q1s16); - q3s16 = vaddq_s16(q11s16, q0s16); - q4s16 = vsubq_s16(q11s16, q0s16); - q5s16 = vsubq_s16(q10s16, q1s16); - - LOAD_FROM_OUTPUT(13, 18, 19, q0s16, q1s16); - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); - STORE_IN_OUTPUT(19, 18, 19, q6s16, q7s16); - STORE_IN_OUTPUT(19, 12, 13, q8s16, q9s16); - - LOAD_FROM_OUTPUT(13, 28, 29, q0s16, q1s16); - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_IN_OUTPUT(29, 28, 29, q6s16, q7s16); - STORE_IN_OUTPUT(29, 2, 3, q4s16, q5s16); - - LOAD_FROM_OUTPUT(3, 10, 11, q0s16, q1s16); - q2s16 = vaddq_s16(q12s16, q1s16); - q3s16 = vaddq_s16(q13s16, q0s16); - q4s16 = vsubq_s16(q13s16, q0s16); - q5s16 = vsubq_s16(q12s16, q1s16); - - LOAD_FROM_OUTPUT(11, 20, 21, q0s16, q1s16); - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); - STORE_IN_OUTPUT(21, 20, 21, q6s16, q7s16); - STORE_IN_OUTPUT(21, 10, 11, q8s16, q9s16); - - LOAD_FROM_OUTPUT(11, 26, 27, q0s16, q1s16); - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_IN_OUTPUT(27, 26, 27, q6s16, q7s16); - STORE_IN_OUTPUT(27, 4, 5, q4s16, q5s16); - - LOAD_FROM_OUTPUT(5, 8, 9, q0s16, q1s16); - q2s16 = vaddq_s16(q14s16, q1s16); - q3s16 = vaddq_s16(q15s16, q0s16); - q4s16 = vsubq_s16(q15s16, q0s16); - q5s16 = vsubq_s16(q14s16, q1s16); - - LOAD_FROM_OUTPUT(9, 22, 23, q0s16, q1s16); - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); - STORE_IN_OUTPUT(23, 22, 23, q6s16, q7s16); - STORE_IN_OUTPUT(23, 8, 9, q8s16, q9s16); - - LOAD_FROM_OUTPUT(9, 24, 25, q0s16, q1s16); - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_IN_OUTPUT(25, 24, 25, q6s16, q7s16); - STORE_IN_OUTPUT(25, 6, 7, q4s16, q5s16); +static INLINE void idct32_bands_end_1st_pass(int16_t *const out, + int16x8_t *const q) { + store_in_output(out, 16, 17, q[6], q[7]); + store_in_output(out, 14, 15, q[8], q[9]); + + load_from_output(out, 30, 31, &q[0], &q[1]); + q[4] = vaddq_s16(q[2], q[1]); + q[5] = vaddq_s16(q[3], q[0]); + q[6] = vsubq_s16(q[3], q[0]); + q[7] = vsubq_s16(q[2], q[1]); + store_in_output(out, 30, 31, q[6], q[7]); + store_in_output(out, 0, 1, q[4], q[5]); + + load_from_output(out, 12, 13, &q[0], &q[1]); + q[2] = vaddq_s16(q[10], q[1]); + q[3] = vaddq_s16(q[11], q[0]); + q[4] = vsubq_s16(q[11], q[0]); + q[5] = vsubq_s16(q[10], q[1]); + + load_from_output(out, 18, 19, &q[0], &q[1]); + q[8] = vaddq_s16(q[4], q[1]); + q[9] = vaddq_s16(q[5], q[0]); + q[6] = vsubq_s16(q[5], q[0]); + q[7] = vsubq_s16(q[4], q[1]); + store_in_output(out, 18, 19, q[6], q[7]); + store_in_output(out, 12, 13, q[8], q[9]); + + load_from_output(out, 28, 29, &q[0], &q[1]); + q[4] = vaddq_s16(q[2], q[1]); + q[5] = vaddq_s16(q[3], q[0]); + q[6] = vsubq_s16(q[3], q[0]); + q[7] = vsubq_s16(q[2], q[1]); + store_in_output(out, 28, 29, q[6], q[7]); + store_in_output(out, 2, 3, q[4], q[5]); + + load_from_output(out, 10, 11, &q[0], &q[1]); + q[2] = vaddq_s16(q[12], q[1]); + q[3] = vaddq_s16(q[13], q[0]); + q[4] = vsubq_s16(q[13], q[0]); + q[5] = vsubq_s16(q[12], q[1]); + + load_from_output(out, 20, 21, &q[0], &q[1]); + q[8] = vaddq_s16(q[4], q[1]); + q[9] = vaddq_s16(q[5], q[0]); + q[6] = vsubq_s16(q[5], q[0]); + q[7] = vsubq_s16(q[4], q[1]); + store_in_output(out, 20, 21, q[6], q[7]); + store_in_output(out, 10, 11, q[8], q[9]); + + load_from_output(out, 26, 27, &q[0], &q[1]); + q[4] = vaddq_s16(q[2], q[1]); + q[5] = vaddq_s16(q[3], q[0]); + q[6] = vsubq_s16(q[3], q[0]); + q[7] = vsubq_s16(q[2], q[1]); + store_in_output(out, 26, 27, q[6], q[7]); + store_in_output(out, 4, 5, q[4], q[5]); + + load_from_output(out, 8, 9, &q[0], &q[1]); + q[2] = vaddq_s16(q[14], q[1]); + q[3] = vaddq_s16(q[15], q[0]); + q[4] = vsubq_s16(q[15], q[0]); + q[5] = vsubq_s16(q[14], q[1]); + + load_from_output(out, 22, 23, &q[0], &q[1]); + q[8] = vaddq_s16(q[4], q[1]); + q[9] = vaddq_s16(q[5], q[0]); + q[6] = vsubq_s16(q[5], q[0]); + q[7] = vsubq_s16(q[4], q[1]); + store_in_output(out, 22, 23, q[6], q[7]); + store_in_output(out, 8, 9, q[8], q[9]); + + load_from_output(out, 24, 25, &q[0], &q[1]); + q[4] = vaddq_s16(q[2], q[1]); + q[5] = vaddq_s16(q[3], q[0]); + q[6] = vsubq_s16(q[3], q[0]); + q[7] = vsubq_s16(q[2], q[1]); + store_in_output(out, 24, 25, q[6], q[7]); + store_in_output(out, 6, 7, q[4], q[5]); } -static INLINE void idct32_bands_end_2nd_pass( - int16_t *out, uint8_t *dest, int stride, int16x8_t q2s16, int16x8_t q3s16, - int16x8_t q6s16, int16x8_t q7s16, int16x8_t q8s16, int16x8_t q9s16, - int16x8_t q10s16, int16x8_t q11s16, int16x8_t q12s16, int16x8_t q13s16, - int16x8_t q14s16, int16x8_t q15s16) { - uint8_t *r6 = dest + 31 * stride; - uint8_t *r7 = dest /* + 0 * stride*/; - uint8_t *r9 = dest + 15 * stride; - uint8_t *r10 = dest + 16 * stride; - int str2 = stride << 1; - int16x8_t q0s16, q1s16, q4s16, q5s16; - - STORE_COMBINE_CENTER_RESULTS(r10, r9); - r10 += str2; - r9 -= str2; - - LOAD_FROM_OUTPUT(17, 30, 31, q0s16, q1s16) - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_COMBINE_EXTREME_RESULTS(r7, r6); - r7 += str2; - r6 -= str2; - - LOAD_FROM_OUTPUT(31, 12, 13, q0s16, q1s16) - q2s16 = vaddq_s16(q10s16, q1s16); - q3s16 = vaddq_s16(q11s16, q0s16); - q4s16 = vsubq_s16(q11s16, q0s16); - q5s16 = vsubq_s16(q10s16, q1s16); - - LOAD_FROM_OUTPUT(13, 18, 19, q0s16, q1s16) - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); - STORE_COMBINE_CENTER_RESULTS(r10, r9); - r10 += str2; - r9 -= str2; - - LOAD_FROM_OUTPUT(19, 28, 29, q0s16, q1s16) - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_COMBINE_EXTREME_RESULTS(r7, r6); - r7 += str2; - r6 -= str2; - - LOAD_FROM_OUTPUT(29, 10, 11, q0s16, q1s16) - q2s16 = vaddq_s16(q12s16, q1s16); - q3s16 = vaddq_s16(q13s16, q0s16); - q4s16 = vsubq_s16(q13s16, q0s16); - q5s16 = vsubq_s16(q12s16, q1s16); - - LOAD_FROM_OUTPUT(11, 20, 21, q0s16, q1s16) - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); - STORE_COMBINE_CENTER_RESULTS(r10, r9); - r10 += str2; - r9 -= str2; - - LOAD_FROM_OUTPUT(21, 26, 27, q0s16, q1s16) - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_COMBINE_EXTREME_RESULTS(r7, r6); - r7 += str2; - r6 -= str2; - - LOAD_FROM_OUTPUT(27, 8, 9, q0s16, q1s16) - q2s16 = vaddq_s16(q14s16, q1s16); - q3s16 = vaddq_s16(q15s16, q0s16); - q4s16 = vsubq_s16(q15s16, q0s16); - q5s16 = vsubq_s16(q14s16, q1s16); - - LOAD_FROM_OUTPUT(9, 22, 23, q0s16, q1s16) - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); - STORE_COMBINE_CENTER_RESULTS(r10, r9); - - LOAD_FROM_OUTPUT(23, 24, 25, q0s16, q1s16) - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_COMBINE_EXTREME_RESULTS(r7, r6); +static INLINE void idct32_bands_end_2nd_pass(const int16_t *const out, + uint8_t *const dest, + const int stride, + int16x8_t *const q) { + uint8_t *dest0 = dest + 0 * stride; + uint8_t *dest1 = dest + 31 * stride; + uint8_t *dest2 = dest + 16 * stride; + uint8_t *dest3 = dest + 15 * stride; + const int str2 = stride << 1; + + store_combine_center_results(dest2, dest3, stride, q[6], q[7], q[8], q[9]); + dest2 += str2; + dest3 -= str2; + + load_from_output(out, 30, 31, &q[0], &q[1]); + q[4] = vaddq_s16(q[2], q[1]); + q[5] = vaddq_s16(q[3], q[0]); + q[6] = vsubq_s16(q[3], q[0]); + q[7] = vsubq_s16(q[2], q[1]); + store_combine_extreme_results(dest0, dest1, stride, q[4], q[5], q[6], q[7]); + dest0 += str2; + dest1 -= str2; + + load_from_output(out, 12, 13, &q[0], &q[1]); + q[2] = vaddq_s16(q[10], q[1]); + q[3] = vaddq_s16(q[11], q[0]); + q[4] = vsubq_s16(q[11], q[0]); + q[5] = vsubq_s16(q[10], q[1]); + + load_from_output(out, 18, 19, &q[0], &q[1]); + q[8] = vaddq_s16(q[4], q[1]); + q[9] = vaddq_s16(q[5], q[0]); + q[6] = vsubq_s16(q[5], q[0]); + q[7] = vsubq_s16(q[4], q[1]); + store_combine_center_results(dest2, dest3, stride, q[6], q[7], q[8], q[9]); + dest2 += str2; + dest3 -= str2; + + load_from_output(out, 28, 29, &q[0], &q[1]); + q[4] = vaddq_s16(q[2], q[1]); + q[5] = vaddq_s16(q[3], q[0]); + q[6] = vsubq_s16(q[3], q[0]); + q[7] = vsubq_s16(q[2], q[1]); + store_combine_extreme_results(dest0, dest1, stride, q[4], q[5], q[6], q[7]); + dest0 += str2; + dest1 -= str2; + + load_from_output(out, 10, 11, &q[0], &q[1]); + q[2] = vaddq_s16(q[12], q[1]); + q[3] = vaddq_s16(q[13], q[0]); + q[4] = vsubq_s16(q[13], q[0]); + q[5] = vsubq_s16(q[12], q[1]); + + load_from_output(out, 20, 21, &q[0], &q[1]); + q[8] = vaddq_s16(q[4], q[1]); + q[9] = vaddq_s16(q[5], q[0]); + q[6] = vsubq_s16(q[5], q[0]); + q[7] = vsubq_s16(q[4], q[1]); + store_combine_center_results(dest2, dest3, stride, q[6], q[7], q[8], q[9]); + dest2 += str2; + dest3 -= str2; + + load_from_output(out, 26, 27, &q[0], &q[1]); + q[4] = vaddq_s16(q[2], q[1]); + q[5] = vaddq_s16(q[3], q[0]); + q[6] = vsubq_s16(q[3], q[0]); + q[7] = vsubq_s16(q[2], q[1]); + store_combine_extreme_results(dest0, dest1, stride, q[4], q[5], q[6], q[7]); + dest0 += str2; + dest1 -= str2; + + load_from_output(out, 8, 9, &q[0], &q[1]); + q[2] = vaddq_s16(q[14], q[1]); + q[3] = vaddq_s16(q[15], q[0]); + q[4] = vsubq_s16(q[15], q[0]); + q[5] = vsubq_s16(q[14], q[1]); + + load_from_output(out, 22, 23, &q[0], &q[1]); + q[8] = vaddq_s16(q[4], q[1]); + q[9] = vaddq_s16(q[5], q[0]); + q[6] = vsubq_s16(q[5], q[0]); + q[7] = vsubq_s16(q[4], q[1]); + store_combine_center_results(dest2, dest3, stride, q[6], q[7], q[8], q[9]); + + load_from_output(out, 24, 25, &q[0], &q[1]); + q[4] = vaddq_s16(q[2], q[1]); + q[5] = vaddq_s16(q[3], q[0]); + q[6] = vsubq_s16(q[3], q[0]); + q[7] = vsubq_s16(q[2], q[1]); + store_combine_extreme_results(dest0, dest1, stride, q[4], q[5], q[6], q[7]); } void vpx_idct32x32_1024_add_neon(const tran_low_t *input, uint8_t *dest, @@ -434,8 +424,7 @@ void vpx_idct32x32_1024_add_neon(const tran_low_t *input, uint8_t *dest, int16_t pass2[32 * 32]; const int16_t *input_pass2 = pass1; // input of pass2 is the result of pass1 int16_t *out; - int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; + int16x8_t q[16]; for (idct32_pass_loop = 0, out = pass1; idct32_pass_loop < 2; idct32_pass_loop++, out = pass2) { @@ -453,235 +442,229 @@ void vpx_idct32x32_1024_add_neon(const tran_low_t *input, uint8_t *dest, // ----------------------------------------- // generate 16,17,30,31 // part of stage 1 - LOAD_FROM_TRANSPOSED(0, 1, 31) - DO_BUTTERFLY_STD(cospi_31_64, cospi_1_64, &q0s16, &q2s16) - LOAD_FROM_TRANSPOSED(31, 17, 15) - DO_BUTTERFLY_STD(cospi_15_64, cospi_17_64, &q1s16, &q3s16) + load_from_transformed(trans_buf, 1, 31, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_31_64, cospi_1_64, &q[0], &q[2]); + load_from_transformed(trans_buf, 17, 15, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_15_64, cospi_17_64, &q[1], &q[3]); // part of stage 2 - q4s16 = vaddq_s16(q0s16, q1s16); - q13s16 = vsubq_s16(q0s16, q1s16); - q6s16 = vaddq_s16(q2s16, q3s16); - q14s16 = vsubq_s16(q2s16, q3s16); + q[4] = vaddq_s16(q[0], q[1]); + q[13] = vsubq_s16(q[0], q[1]); + q[6] = vaddq_s16(q[2], q[3]); + q[14] = vsubq_s16(q[2], q[3]); // part of stage 3 - DO_BUTTERFLY_STD(cospi_28_64, cospi_4_64, &q5s16, &q7s16) + do_butterfly(q[14], q[13], cospi_28_64, cospi_4_64, &q[5], &q[7]); // generate 18,19,28,29 // part of stage 1 - LOAD_FROM_TRANSPOSED(15, 9, 23) - DO_BUTTERFLY_STD(cospi_23_64, cospi_9_64, &q0s16, &q2s16) - LOAD_FROM_TRANSPOSED(23, 25, 7) - DO_BUTTERFLY_STD(cospi_7_64, cospi_25_64, &q1s16, &q3s16) + load_from_transformed(trans_buf, 9, 23, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_23_64, cospi_9_64, &q[0], &q[2]); + load_from_transformed(trans_buf, 25, 7, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_7_64, cospi_25_64, &q[1], &q[3]); // part of stage 2 - q13s16 = vsubq_s16(q3s16, q2s16); - q3s16 = vaddq_s16(q3s16, q2s16); - q14s16 = vsubq_s16(q1s16, q0s16); - q2s16 = vaddq_s16(q1s16, q0s16); + q[13] = vsubq_s16(q[3], q[2]); + q[3] = vaddq_s16(q[3], q[2]); + q[14] = vsubq_s16(q[1], q[0]); + q[2] = vaddq_s16(q[1], q[0]); // part of stage 3 - DO_BUTTERFLY_STD(-cospi_4_64, -cospi_28_64, &q1s16, &q0s16) + do_butterfly(q[14], q[13], -cospi_4_64, -cospi_28_64, &q[1], &q[0]); // part of stage 4 - q8s16 = vaddq_s16(q4s16, q2s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q10s16 = vaddq_s16(q7s16, q1s16); - q15s16 = vaddq_s16(q6s16, q3s16); - q13s16 = vsubq_s16(q5s16, q0s16); - q14s16 = vsubq_s16(q7s16, q1s16); - STORE_IN_OUTPUT(0, 16, 31, q8s16, q15s16) - STORE_IN_OUTPUT(31, 17, 30, q9s16, q10s16) + q[8] = vaddq_s16(q[4], q[2]); + q[9] = vaddq_s16(q[5], q[0]); + q[10] = vaddq_s16(q[7], q[1]); + q[15] = vaddq_s16(q[6], q[3]); + q[13] = vsubq_s16(q[5], q[0]); + q[14] = vsubq_s16(q[7], q[1]); + store_in_output(out, 16, 31, q[8], q[15]); + store_in_output(out, 17, 30, q[9], q[10]); // part of stage 5 - DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q0s16, &q1s16) - STORE_IN_OUTPUT(30, 29, 18, q1s16, q0s16) + do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[0], &q[1]); + store_in_output(out, 29, 18, q[1], q[0]); // part of stage 4 - q13s16 = vsubq_s16(q4s16, q2s16); - q14s16 = vsubq_s16(q6s16, q3s16); + q[13] = vsubq_s16(q[4], q[2]); + q[14] = vsubq_s16(q[6], q[3]); // part of stage 5 - DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q4s16, &q6s16) - STORE_IN_OUTPUT(18, 19, 28, q4s16, q6s16) + do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[4], &q[6]); + store_in_output(out, 19, 28, q[4], q[6]); // ----------------------------------------- // BLOCK B: 20-23,24-27 // ----------------------------------------- // generate 20,21,26,27 // part of stage 1 - LOAD_FROM_TRANSPOSED(7, 5, 27) - DO_BUTTERFLY_STD(cospi_27_64, cospi_5_64, &q0s16, &q2s16) - LOAD_FROM_TRANSPOSED(27, 21, 11) - DO_BUTTERFLY_STD(cospi_11_64, cospi_21_64, &q1s16, &q3s16) + load_from_transformed(trans_buf, 5, 27, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_27_64, cospi_5_64, &q[0], &q[2]); + load_from_transformed(trans_buf, 21, 11, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_11_64, cospi_21_64, &q[1], &q[3]); // part of stage 2 - q13s16 = vsubq_s16(q0s16, q1s16); - q0s16 = vaddq_s16(q0s16, q1s16); - q14s16 = vsubq_s16(q2s16, q3s16); - q2s16 = vaddq_s16(q2s16, q3s16); + q[13] = vsubq_s16(q[0], q[1]); + q[0] = vaddq_s16(q[0], q[1]); + q[14] = vsubq_s16(q[2], q[3]); + q[2] = vaddq_s16(q[2], q[3]); // part of stage 3 - DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16) + do_butterfly(q[14], q[13], cospi_12_64, cospi_20_64, &q[1], &q[3]); // generate 22,23,24,25 // part of stage 1 - LOAD_FROM_TRANSPOSED(11, 13, 19) - DO_BUTTERFLY_STD(cospi_19_64, cospi_13_64, &q5s16, &q7s16) - LOAD_FROM_TRANSPOSED(19, 29, 3) - DO_BUTTERFLY_STD(cospi_3_64, cospi_29_64, &q4s16, &q6s16) + load_from_transformed(trans_buf, 13, 19, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_19_64, cospi_13_64, &q[5], &q[7]); + load_from_transformed(trans_buf, 29, 3, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_3_64, cospi_29_64, &q[4], &q[6]); // part of stage 2 - q14s16 = vsubq_s16(q4s16, q5s16); - q5s16 = vaddq_s16(q4s16, q5s16); - q13s16 = vsubq_s16(q6s16, q7s16); - q6s16 = vaddq_s16(q6s16, q7s16); + q[14] = vsubq_s16(q[4], q[5]); + q[5] = vaddq_s16(q[4], q[5]); + q[13] = vsubq_s16(q[6], q[7]); + q[6] = vaddq_s16(q[6], q[7]); // part of stage 3 - DO_BUTTERFLY_STD(-cospi_20_64, -cospi_12_64, &q4s16, &q7s16) + do_butterfly(q[14], q[13], -cospi_20_64, -cospi_12_64, &q[4], &q[7]); // part of stage 4 - q10s16 = vaddq_s16(q7s16, q1s16); - q11s16 = vaddq_s16(q5s16, q0s16); - q12s16 = vaddq_s16(q6s16, q2s16); - q15s16 = vaddq_s16(q4s16, q3s16); + q[10] = vaddq_s16(q[7], q[1]); + q[11] = vaddq_s16(q[5], q[0]); + q[12] = vaddq_s16(q[6], q[2]); + q[15] = vaddq_s16(q[4], q[3]); // part of stage 6 - LOAD_FROM_OUTPUT(28, 16, 17, q14s16, q13s16) - q8s16 = vaddq_s16(q14s16, q11s16); - q9s16 = vaddq_s16(q13s16, q10s16); - q13s16 = vsubq_s16(q13s16, q10s16); - q11s16 = vsubq_s16(q14s16, q11s16); - STORE_IN_OUTPUT(17, 17, 16, q9s16, q8s16) - LOAD_FROM_OUTPUT(16, 30, 31, q14s16, q9s16) - q8s16 = vsubq_s16(q9s16, q12s16); - q10s16 = vaddq_s16(q14s16, q15s16); - q14s16 = vsubq_s16(q14s16, q15s16); - q12s16 = vaddq_s16(q9s16, q12s16); - STORE_IN_OUTPUT(31, 30, 31, q10s16, q12s16) + load_from_output(out, 16, 17, &q[14], &q[13]); + q[8] = vaddq_s16(q[14], q[11]); + q[9] = vaddq_s16(q[13], q[10]); + q[13] = vsubq_s16(q[13], q[10]); + q[11] = vsubq_s16(q[14], q[11]); + store_in_output(out, 17, 16, q[9], q[8]); + load_from_output(out, 30, 31, &q[14], &q[9]); + q[8] = vsubq_s16(q[9], q[12]); + q[10] = vaddq_s16(q[14], q[15]); + q[14] = vsubq_s16(q[14], q[15]); + q[12] = vaddq_s16(q[9], q[12]); + store_in_output(out, 30, 31, q[10], q[12]); // part of stage 7 - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16) - STORE_IN_OUTPUT(31, 25, 22, q14s16, q13s16) - q13s16 = q11s16; - q14s16 = q8s16; - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16) - STORE_IN_OUTPUT(22, 24, 23, q14s16, q13s16) + do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[13], &q[14]); + store_in_output(out, 25, 22, q[14], q[13]); + do_butterfly(q[8], q[11], cospi_16_64, cospi_16_64, &q[13], &q[14]); + store_in_output(out, 24, 23, q[14], q[13]); // part of stage 4 - q14s16 = vsubq_s16(q5s16, q0s16); - q13s16 = vsubq_s16(q6s16, q2s16); - DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q5s16, &q6s16); - q14s16 = vsubq_s16(q7s16, q1s16); - q13s16 = vsubq_s16(q4s16, q3s16); - DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q0s16, &q1s16); + q[14] = vsubq_s16(q[5], q[0]); + q[13] = vsubq_s16(q[6], q[2]); + do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[5], &q[6]); + q[14] = vsubq_s16(q[7], q[1]); + q[13] = vsubq_s16(q[4], q[3]); + do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[0], &q[1]); // part of stage 6 - LOAD_FROM_OUTPUT(23, 18, 19, q14s16, q13s16) - q8s16 = vaddq_s16(q14s16, q1s16); - q9s16 = vaddq_s16(q13s16, q6s16); - q13s16 = vsubq_s16(q13s16, q6s16); - q1s16 = vsubq_s16(q14s16, q1s16); - STORE_IN_OUTPUT(19, 18, 19, q8s16, q9s16) - LOAD_FROM_OUTPUT(19, 28, 29, q8s16, q9s16) - q14s16 = vsubq_s16(q8s16, q5s16); - q10s16 = vaddq_s16(q8s16, q5s16); - q11s16 = vaddq_s16(q9s16, q0s16); - q0s16 = vsubq_s16(q9s16, q0s16); - STORE_IN_OUTPUT(29, 28, 29, q10s16, q11s16) + load_from_output(out, 18, 19, &q[14], &q[13]); + q[8] = vaddq_s16(q[14], q[1]); + q[9] = vaddq_s16(q[13], q[6]); + q[13] = vsubq_s16(q[13], q[6]); + q[1] = vsubq_s16(q[14], q[1]); + store_in_output(out, 18, 19, q[8], q[9]); + load_from_output(out, 28, 29, &q[8], &q[9]); + q[14] = vsubq_s16(q[8], q[5]); + q[10] = vaddq_s16(q[8], q[5]); + q[11] = vaddq_s16(q[9], q[0]); + q[0] = vsubq_s16(q[9], q[0]); + store_in_output(out, 28, 29, q[10], q[11]); // part of stage 7 - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16) - STORE_IN_OUTPUT(29, 20, 27, q13s16, q14s16) - DO_BUTTERFLY(q0s16, q1s16, cospi_16_64, cospi_16_64, &q1s16, &q0s16); - STORE_IN_OUTPUT(27, 21, 26, q1s16, q0s16) + do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[13], &q[14]); + store_in_output(out, 20, 27, q[13], q[14]); + do_butterfly(q[0], q[1], cospi_16_64, cospi_16_64, &q[1], &q[0]); + store_in_output(out, 21, 26, q[1], q[0]); // ----------------------------------------- // BLOCK C: 8-10,11-15 // ----------------------------------------- // generate 8,9,14,15 // part of stage 2 - LOAD_FROM_TRANSPOSED(3, 2, 30) - DO_BUTTERFLY_STD(cospi_30_64, cospi_2_64, &q0s16, &q2s16) - LOAD_FROM_TRANSPOSED(30, 18, 14) - DO_BUTTERFLY_STD(cospi_14_64, cospi_18_64, &q1s16, &q3s16) + load_from_transformed(trans_buf, 2, 30, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_30_64, cospi_2_64, &q[0], &q[2]); + load_from_transformed(trans_buf, 18, 14, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_14_64, cospi_18_64, &q[1], &q[3]); // part of stage 3 - q13s16 = vsubq_s16(q0s16, q1s16); - q0s16 = vaddq_s16(q0s16, q1s16); - q14s16 = vsubq_s16(q2s16, q3s16); - q2s16 = vaddq_s16(q2s16, q3s16); + q[13] = vsubq_s16(q[0], q[1]); + q[0] = vaddq_s16(q[0], q[1]); + q[14] = vsubq_s16(q[2], q[3]); + q[2] = vaddq_s16(q[2], q[3]); // part of stage 4 - DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q1s16, &q3s16) + do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[1], &q[3]); // generate 10,11,12,13 // part of stage 2 - LOAD_FROM_TRANSPOSED(14, 10, 22) - DO_BUTTERFLY_STD(cospi_22_64, cospi_10_64, &q5s16, &q7s16) - LOAD_FROM_TRANSPOSED(22, 26, 6) - DO_BUTTERFLY_STD(cospi_6_64, cospi_26_64, &q4s16, &q6s16) + load_from_transformed(trans_buf, 10, 22, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_22_64, cospi_10_64, &q[5], &q[7]); + load_from_transformed(trans_buf, 26, 6, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_6_64, cospi_26_64, &q[4], &q[6]); // part of stage 3 - q14s16 = vsubq_s16(q4s16, q5s16); - q5s16 = vaddq_s16(q4s16, q5s16); - q13s16 = vsubq_s16(q6s16, q7s16); - q6s16 = vaddq_s16(q6s16, q7s16); + q[14] = vsubq_s16(q[4], q[5]); + q[5] = vaddq_s16(q[4], q[5]); + q[13] = vsubq_s16(q[6], q[7]); + q[6] = vaddq_s16(q[6], q[7]); // part of stage 4 - DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q4s16, &q7s16) + do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[4], &q[7]); // part of stage 5 - q8s16 = vaddq_s16(q0s16, q5s16); - q9s16 = vaddq_s16(q1s16, q7s16); - q13s16 = vsubq_s16(q1s16, q7s16); - q14s16 = vsubq_s16(q3s16, q4s16); - q10s16 = vaddq_s16(q3s16, q4s16); - q15s16 = vaddq_s16(q2s16, q6s16); - STORE_IN_OUTPUT(26, 8, 15, q8s16, q15s16) - STORE_IN_OUTPUT(15, 9, 14, q9s16, q10s16) + q[8] = vaddq_s16(q[0], q[5]); + q[9] = vaddq_s16(q[1], q[7]); + q[13] = vsubq_s16(q[1], q[7]); + q[14] = vsubq_s16(q[3], q[4]); + q[10] = vaddq_s16(q[3], q[4]); + q[15] = vaddq_s16(q[2], q[6]); + store_in_output(out, 8, 15, q[8], q[15]); + store_in_output(out, 9, 14, q[9], q[10]); // part of stage 6 - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16) - STORE_IN_OUTPUT(14, 13, 10, q3s16, q1s16) - q13s16 = vsubq_s16(q0s16, q5s16); - q14s16 = vsubq_s16(q2s16, q6s16); - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16) - STORE_IN_OUTPUT(10, 11, 12, q1s16, q3s16) + do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]); + store_in_output(out, 13, 10, q[3], q[1]); + q[13] = vsubq_s16(q[0], q[5]); + q[14] = vsubq_s16(q[2], q[6]); + do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]); + store_in_output(out, 11, 12, q[1], q[3]); // ----------------------------------------- // BLOCK D: 0-3,4-7 // ----------------------------------------- // generate 4,5,6,7 // part of stage 3 - LOAD_FROM_TRANSPOSED(6, 4, 28) - DO_BUTTERFLY_STD(cospi_28_64, cospi_4_64, &q0s16, &q2s16) - LOAD_FROM_TRANSPOSED(28, 20, 12) - DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16) + load_from_transformed(trans_buf, 4, 28, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_28_64, cospi_4_64, &q[0], &q[2]); + load_from_transformed(trans_buf, 20, 12, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_12_64, cospi_20_64, &q[1], &q[3]); // part of stage 4 - q13s16 = vsubq_s16(q0s16, q1s16); - q0s16 = vaddq_s16(q0s16, q1s16); - q14s16 = vsubq_s16(q2s16, q3s16); - q2s16 = vaddq_s16(q2s16, q3s16); + q[13] = vsubq_s16(q[0], q[1]); + q[0] = vaddq_s16(q[0], q[1]); + q[14] = vsubq_s16(q[2], q[3]); + q[2] = vaddq_s16(q[2], q[3]); // part of stage 5 - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16) + do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]); // generate 0,1,2,3 // part of stage 4 - LOAD_FROM_TRANSPOSED(12, 0, 16) - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q5s16, &q7s16) - LOAD_FROM_TRANSPOSED(16, 8, 24) - DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q14s16, &q6s16) + load_from_transformed(trans_buf, 0, 16, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[5], &q[7]); + load_from_transformed(trans_buf, 8, 24, &q[14], &q[13]); + do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[14], &q[6]); // part of stage 5 - q4s16 = vaddq_s16(q7s16, q6s16); - q7s16 = vsubq_s16(q7s16, q6s16); - q6s16 = vsubq_s16(q5s16, q14s16); - q5s16 = vaddq_s16(q5s16, q14s16); + q[4] = vaddq_s16(q[7], q[6]); + q[7] = vsubq_s16(q[7], q[6]); + q[6] = vsubq_s16(q[5], q[14]); + q[5] = vaddq_s16(q[5], q[14]); // part of stage 6 - q8s16 = vaddq_s16(q4s16, q2s16); - q9s16 = vaddq_s16(q5s16, q3s16); - q10s16 = vaddq_s16(q6s16, q1s16); - q11s16 = vaddq_s16(q7s16, q0s16); - q12s16 = vsubq_s16(q7s16, q0s16); - q13s16 = vsubq_s16(q6s16, q1s16); - q14s16 = vsubq_s16(q5s16, q3s16); - q15s16 = vsubq_s16(q4s16, q2s16); + q[8] = vaddq_s16(q[4], q[2]); + q[9] = vaddq_s16(q[5], q[3]); + q[10] = vaddq_s16(q[6], q[1]); + q[11] = vaddq_s16(q[7], q[0]); + q[12] = vsubq_s16(q[7], q[0]); + q[13] = vsubq_s16(q[6], q[1]); + q[14] = vsubq_s16(q[5], q[3]); + q[15] = vsubq_s16(q[4], q[2]); // part of stage 7 - LOAD_FROM_OUTPUT(12, 14, 15, q0s16, q1s16) - q2s16 = vaddq_s16(q8s16, q1s16); - q3s16 = vaddq_s16(q9s16, q0s16); - q4s16 = vsubq_s16(q9s16, q0s16); - q5s16 = vsubq_s16(q8s16, q1s16); - LOAD_FROM_OUTPUT(15, 16, 17, q0s16, q1s16) - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); + load_from_output(out, 14, 15, &q[0], &q[1]); + q[2] = vaddq_s16(q[8], q[1]); + q[3] = vaddq_s16(q[9], q[0]); + q[4] = vsubq_s16(q[9], q[0]); + q[5] = vsubq_s16(q[8], q[1]); + load_from_output(out, 16, 17, &q[0], &q[1]); + q[8] = vaddq_s16(q[4], q[1]); + q[9] = vaddq_s16(q[5], q[0]); + q[6] = vsubq_s16(q[5], q[0]); + q[7] = vsubq_s16(q[4], q[1]); if (idct32_pass_loop == 0) { - idct32_bands_end_1st_pass(out, q2s16, q3s16, q6s16, q7s16, q8s16, q9s16, - q10s16, q11s16, q12s16, q13s16, q14s16, - q15s16); + idct32_bands_end_1st_pass(out, q); } else { - idct32_bands_end_2nd_pass(out, dest, stride, q2s16, q3s16, q6s16, q7s16, - q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, - q14s16, q15s16); + idct32_bands_end_2nd_pass(out, dest, stride, q); dest += 8; } }