Replace multiplication_and_add() with butterfly() in idct x86 code
authorLinfeng Zhang <linfengz@google.com>
Fri, 4 Aug 2017 00:46:21 +0000 (17:46 -0700)
committerLinfeng Zhang <linfengz@google.com>
Fri, 4 Aug 2017 22:33:34 +0000 (15:33 -0700)
Change-Id: I266e45a3d75a5357c7d6e6f20ab5c6fdbfe4982e

vpx_dsp/x86/inv_txfm_sse2.c
vpx_dsp/x86/inv_txfm_sse2.h
vpx_dsp/x86/inv_txfm_ssse3.h

index 64e068c..2bf0464 100644 (file)
@@ -1001,20 +1001,20 @@ void iadst16_sse2(__m128i *in0, __m128i *in1) {
 
 #define IDCT32_34                                                              \
   /* Stage1 */                                                                 \
-  multiplication_and_add(in[1], zero, (int)cospi_31_64, (int)cospi_1_64,       \
-                         &stp1_16, &stp1_31);                                  \
-  multiplication_and_add(zero, in[7], (int)cospi_7_64, (int)cospi_25_64,       \
-                         &stp1_19, &stp1_28);                                  \
-  multiplication_and_add(in[5], zero, (int)cospi_27_64, (int)cospi_5_64,       \
-                         &stp1_20, &stp1_27);                                  \
-  multiplication_and_add(zero, in[3], (int)cospi_3_64, (int)cospi_29_64,       \
-                         &stp1_23, &stp1_24);                                  \
+  butterfly(in[1], zero, (int)cospi_31_64, (int)cospi_1_64, &stp1_16,          \
+            &stp1_31);                                                         \
+  butterfly(zero, in[7], (int)cospi_7_64, (int)cospi_25_64, &stp1_19,          \
+            &stp1_28);                                                         \
+  butterfly(in[5], zero, (int)cospi_27_64, (int)cospi_5_64, &stp1_20,          \
+            &stp1_27);                                                         \
+  butterfly(zero, in[3], (int)cospi_3_64, (int)cospi_29_64, &stp1_23,          \
+            &stp1_24);                                                         \
                                                                                \
   /* Stage2 */                                                                 \
-  multiplication_and_add(in[2], zero, (int)cospi_30_64, (int)cospi_2_64,       \
-                         &stp2_8, &stp2_15);                                   \
-  multiplication_and_add(zero, in[6], (int)cospi_6_64, (int)cospi_26_64,       \
-                         &stp2_11, &stp2_12);                                  \
+  butterfly(in[2], zero, (int)cospi_30_64, (int)cospi_2_64, &stp2_8,           \
+            &stp2_15);                                                         \
+  butterfly(zero, in[6], (int)cospi_6_64, (int)cospi_26_64, &stp2_11,          \
+            &stp2_12);                                                         \
                                                                                \
   stp2_16 = stp1_16;                                                           \
   stp2_19 = stp1_19;                                                           \
@@ -1029,23 +1029,22 @@ void iadst16_sse2(__m128i *in0, __m128i *in1) {
   stp2_31 = stp1_31;                                                           \
                                                                                \
   /* Stage3 */                                                                 \
-  multiplication_and_add(in[4], zero, (int)cospi_28_64, (int)cospi_4_64,       \
-                         &stp1_4, &stp1_7);                                    \
+  butterfly(in[4], zero, (int)cospi_28_64, (int)cospi_4_64, &stp1_4, &stp1_7); \
                                                                                \
   stp1_8 = stp2_8;                                                             \
   stp1_11 = stp2_11;                                                           \
   stp1_12 = stp2_12;                                                           \
   stp1_15 = stp2_15;                                                           \
                                                                                \
-  multiplication_and_add(stp1_31, stp1_16, (int)cospi_28_64, (int)cospi_4_64,  \
-                         &stp1_17, &stp1_30);                                  \
-  multiplication_and_add(stp1_19, stp1_28, -(int)cospi_4_64,                   \
-                         -(int)cospi_28_64, &stp1_29, &stp1_18);               \
+  butterfly(stp1_31, stp1_16, (int)cospi_28_64, (int)cospi_4_64, &stp1_17,     \
+            &stp1_30);                                                         \
+  butterfly(stp1_19, stp1_28, -(int)cospi_4_64, -(int)cospi_28_64, &stp1_29,   \
+            &stp1_18);                                                         \
                                                                                \
-  multiplication_and_add(stp1_27, stp1_20, (int)cospi_12_64, (int)cospi_20_64, \
-                         &stp1_21, &stp1_26);                                  \
-  multiplication_and_add(stp1_23, stp1_24, -(int)cospi_20_64,                  \
-                         -(int)cospi_12_64, &stp1_25, &stp1_22);               \
+  butterfly(stp1_27, stp1_20, (int)cospi_12_64, (int)cospi_20_64, &stp1_21,    \
+            &stp1_26);                                                         \
+  butterfly(stp1_23, stp1_24, -(int)cospi_20_64, -(int)cospi_12_64, &stp1_25,  \
+            &stp1_22);                                                         \
                                                                                \
   stp1_16 = stp2_16;                                                           \
   stp1_31 = stp2_31;                                                           \
@@ -1057,18 +1056,18 @@ void iadst16_sse2(__m128i *in0, __m128i *in1) {
   stp1_28 = stp2_28;                                                           \
                                                                                \
   /* Stage4 */                                                                 \
-  multiplication_and_add(in[0], zero, (int)cospi_16_64, (int)cospi_16_64,      \
-                         &stp2_1, &stp2_0);                                    \
+  butterfly(in[0], zero, (int)cospi_16_64, (int)cospi_16_64, &stp2_1,          \
+            &stp2_0);                                                          \
                                                                                \
   stp2_4 = stp1_4;                                                             \
   stp2_5 = stp1_4;                                                             \
   stp2_6 = stp1_7;                                                             \
   stp2_7 = stp1_7;                                                             \
                                                                                \
-  multiplication_and_add(stp2_15, stp2_8, (int)cospi_24_64, (int)cospi_8_64,   \
-                         &stp2_9, &stp2_14);                                   \
-  multiplication_and_add(stp2_11, stp2_12, -(int)cospi_8_64,                   \
-                         -(int)cospi_24_64, &stp2_13, &stp2_10);               \
+  butterfly(stp2_15, stp2_8, (int)cospi_24_64, (int)cospi_8_64, &stp2_9,       \
+            &stp2_14);                                                         \
+  butterfly(stp2_11, stp2_12, -(int)cospi_8_64, -(int)cospi_24_64, &stp2_13,   \
+            &stp2_10);                                                         \
                                                                                \
   stp2_8 = stp1_8;                                                             \
   stp2_15 = stp1_15;                                                           \
@@ -1098,8 +1097,8 @@ void iadst16_sse2(__m128i *in0, __m128i *in1) {
   stp1_1 = stp2_1;                                                             \
   stp1_2 = stp2_1;                                                             \
   stp1_3 = stp2_0;                                                             \
-  multiplication_and_add(stp2_6, stp2_5, (int)cospi_16_64, (int)cospi_16_64,   \
-                         &stp1_5, &stp1_6);                                    \
+  butterfly(stp2_6, stp2_5, (int)cospi_16_64, (int)cospi_16_64, &stp1_5,       \
+            &stp1_6);                                                          \
                                                                                \
   stp1_4 = stp2_4;                                                             \
   stp1_7 = stp2_7;                                                             \
@@ -1116,14 +1115,14 @@ void iadst16_sse2(__m128i *in0, __m128i *in1) {
   stp1_16 = stp2_16;                                                           \
   stp1_17 = stp2_17;                                                           \
                                                                                \
-  multiplication_and_add(stp2_29, stp2_18, (int)cospi_24_64, (int)cospi_8_64,  \
-                         &stp1_18, &stp1_29);                                  \
-  multiplication_and_add(stp2_28, stp2_19, (int)cospi_24_64, (int)cospi_8_64,  \
-                         &stp1_19, &stp1_28);                                  \
-  multiplication_and_add(stp2_20, stp2_27, -(int)cospi_8_64,                   \
-                         -(int)cospi_24_64, &stp1_27, &stp1_20);               \
-  multiplication_and_add(stp2_21, stp2_26, -(int)cospi_8_64,                   \
-                         -(int)cospi_24_64, &stp1_26, &stp1_21);               \
+  butterfly(stp2_29, stp2_18, (int)cospi_24_64, (int)cospi_8_64, &stp1_18,     \
+            &stp1_29);                                                         \
+  butterfly(stp2_28, stp2_19, (int)cospi_24_64, (int)cospi_8_64, &stp1_19,     \
+            &stp1_28);                                                         \
+  butterfly(stp2_20, stp2_27, -(int)cospi_8_64, -(int)cospi_24_64, &stp1_27,   \
+            &stp1_20);                                                         \
+  butterfly(stp2_21, stp2_26, -(int)cospi_8_64, -(int)cospi_24_64, &stp1_26,   \
+            &stp1_21);                                                         \
                                                                                \
   stp1_22 = stp2_22;                                                           \
   stp1_23 = stp2_23;                                                           \
@@ -1147,10 +1146,10 @@ void iadst16_sse2(__m128i *in0, __m128i *in1) {
   stp2_14 = stp1_14;                                                           \
   stp2_15 = stp1_15;                                                           \
                                                                                \
-  multiplication_and_add(stp1_13, stp1_10, (int)cospi_16_64, (int)cospi_16_64, \
-                         &stp2_10, &stp2_13);                                  \
-  multiplication_and_add(stp1_12, stp1_11, (int)cospi_16_64, (int)cospi_16_64, \
-                         &stp2_11, &stp2_12);                                  \
+  butterfly(stp1_13, stp1_10, (int)cospi_16_64, (int)cospi_16_64, &stp2_10,    \
+            &stp2_13);                                                         \
+  butterfly(stp1_12, stp1_11, (int)cospi_16_64, (int)cospi_16_64, &stp2_11,    \
+            &stp2_12);                                                         \
                                                                                \
   stp2_16 = _mm_add_epi16(stp1_16, stp1_23);                                   \
   stp2_17 = _mm_add_epi16(stp1_17, stp1_22);                                   \
@@ -1193,14 +1192,14 @@ void iadst16_sse2(__m128i *in0, __m128i *in1) {
   stp1_18 = stp2_18;                                                           \
   stp1_19 = stp2_19;                                                           \
                                                                                \
-  multiplication_and_add(stp2_27, stp2_20, (int)cospi_16_64, (int)cospi_16_64, \
-                         &stp1_20, &stp1_27);                                  \
-  multiplication_and_add(stp2_26, stp2_21, (int)cospi_16_64, (int)cospi_16_64, \
-                         &stp1_21, &stp1_26);                                  \
-  multiplication_and_add(stp2_25, stp2_22, (int)cospi_16_64, (int)cospi_16_64, \
-                         &stp1_22, &stp1_25);                                  \
-  multiplication_and_add(stp2_24, stp2_23, (int)cospi_16_64, (int)cospi_16_64, \
-                         &stp1_23, &stp1_24);                                  \
+  butterfly(stp2_27, stp2_20, (int)cospi_16_64, (int)cospi_16_64, &stp1_20,    \
+            &stp1_27);                                                         \
+  butterfly(stp2_26, stp2_21, (int)cospi_16_64, (int)cospi_16_64, &stp1_21,    \
+            &stp1_26);                                                         \
+  butterfly(stp2_25, stp2_22, (int)cospi_16_64, (int)cospi_16_64, &stp1_22,    \
+            &stp1_25);                                                         \
+  butterfly(stp2_24, stp2_23, (int)cospi_16_64, (int)cospi_16_64, &stp1_23,    \
+            &stp1_24);                                                         \
                                                                                \
   stp1_28 = stp2_28;                                                           \
   stp1_29 = stp2_29;                                                           \
index cdfc958..6666d17 100644 (file)
@@ -78,10 +78,9 @@ static INLINE __m128i idct_calc_wraplow_sse2(const __m128i in0,
 }
 
 // Multiply elements by constants and add them together.
-static INLINE void multiplication_and_add(const __m128i in0, const __m128i in1,
-                                          const int c0, const int c1,
-                                          __m128i *const res0,
-                                          __m128i *const res1) {
+static INLINE void butterfly(const __m128i in0, const __m128i in1, const int c0,
+                             const int c1, __m128i *const res0,
+                             __m128i *const res1) {
   const __m128i cst0 = pair_set_epi16(c0, -c1);
   const __m128i cst1 = pair_set_epi16(c1, c0);
   const __m128i lo = _mm_unpacklo_epi16(in0, in1);
@@ -90,8 +89,6 @@ static INLINE void multiplication_and_add(const __m128i in0, const __m128i in1,
   *res1 = idct_calc_wraplow_sse2(lo, hi, cst1);
 }
 
-#define butterfly multiplication_and_add
-
 // Functions to allow 8 bit optimisations to be used when profile 0 is used with
 // highbitdepth enabled
 static INLINE __m128i load_input_data4(const tran_low_t *data) {
@@ -315,16 +312,16 @@ static INLINE void idct8(const __m128i *const in /*in[8]*/,
   __m128i step1[8], step2[8];
 
   // stage 1
-  multiplication_and_add(in[1], in[7], (int)cospi_28_64, (int)cospi_4_64,
-                         &step1[4], &step1[7]);
-  multiplication_and_add(in[5], in[3], (int)cospi_12_64, (int)cospi_20_64,
-                         &step1[5], &step1[6]);
+  butterfly(in[1], in[7], (int)cospi_28_64, (int)cospi_4_64, &step1[4],
+            &step1[7]);
+  butterfly(in[5], in[3], (int)cospi_12_64, (int)cospi_20_64, &step1[5],
+            &step1[6]);
 
   // stage 2
-  multiplication_and_add(in[0], in[4], (int)cospi_16_64, (int)cospi_16_64,
-                         &step2[1], &step2[0]);
-  multiplication_and_add(in[2], in[6], (int)cospi_24_64, (int)cospi_8_64,
-                         &step2[2], &step2[3]);
+  butterfly(in[0], in[4], (int)cospi_16_64, (int)cospi_16_64, &step2[1],
+            &step2[0]);
+  butterfly(in[2], in[6], (int)cospi_24_64, (int)cospi_8_64, &step2[2],
+            &step2[3]);
 
   step2[4] = _mm_add_epi16(step1[4], step1[5]);
   step2[5] = _mm_sub_epi16(step1[4], step1[5]);
@@ -336,8 +333,8 @@ static INLINE void idct8(const __m128i *const in /*in[8]*/,
   step1[1] = _mm_add_epi16(step2[1], step2[2]);
   step1[2] = _mm_sub_epi16(step2[1], step2[2]);
   step1[3] = _mm_sub_epi16(step2[0], step2[3]);
-  multiplication_and_add(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64,
-                         &step1[5], &step1[6]);
+  butterfly(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
+            &step1[6]);
 
   // stage 4
   out[0] = _mm_add_epi16(step1[0], step2[7]);
@@ -412,20 +409,20 @@ static INLINE void idct16_8col(__m128i *const io /*io[16]*/) {
   __m128i step1[16], step2[16];
 
   // stage 2
-  multiplication_and_add(io[1], io[15], (int)cospi_30_64, (int)cospi_2_64,
-                         &step2[8], &step2[15]);
-  multiplication_and_add(io[9], io[7], (int)cospi_14_64, (int)cospi_18_64,
-                         &step2[9], &step2[14]);
-  multiplication_and_add(io[5], io[11], (int)cospi_22_64, (int)cospi_10_64,
-                         &step2[10], &step2[13]);
-  multiplication_and_add(io[13], io[3], (int)cospi_6_64, (int)cospi_26_64,
-                         &step2[11], &step2[12]);
+  butterfly(io[1], io[15], (int)cospi_30_64, (int)cospi_2_64, &step2[8],
+            &step2[15]);
+  butterfly(io[9], io[7], (int)cospi_14_64, (int)cospi_18_64, &step2[9],
+            &step2[14]);
+  butterfly(io[5], io[11], (int)cospi_22_64, (int)cospi_10_64, &step2[10],
+            &step2[13]);
+  butterfly(io[13], io[3], (int)cospi_6_64, (int)cospi_26_64, &step2[11],
+            &step2[12]);
 
   // stage 3
-  multiplication_and_add(io[2], io[14], (int)cospi_28_64, (int)cospi_4_64,
-                         &step1[4], &step1[7]);
-  multiplication_and_add(io[10], io[6], (int)cospi_12_64, (int)cospi_20_64,
-                         &step1[5], &step1[6]);
+  butterfly(io[2], io[14], (int)cospi_28_64, (int)cospi_4_64, &step1[4],
+            &step1[7]);
+  butterfly(io[10], io[6], (int)cospi_12_64, (int)cospi_20_64, &step1[5],
+            &step1[6]);
   step1[8] = _mm_add_epi16(step2[8], step2[9]);
   step1[9] = _mm_sub_epi16(step2[8], step2[9]);
   step1[10] = _mm_sub_epi16(step2[11], step2[10]);
@@ -436,14 +433,14 @@ static INLINE void idct16_8col(__m128i *const io /*io[16]*/) {
   step1[15] = _mm_add_epi16(step2[14], step2[15]);
 
   // stage 4
-  multiplication_and_add(io[0], io[8], (int)cospi_16_64, (int)cospi_16_64,
-                         &step2[1], &step2[0]);
-  multiplication_and_add(io[4], io[12], (int)cospi_24_64, (int)cospi_8_64,
-                         &step2[2], &step2[3]);
-  multiplication_and_add(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64,
-                         &step2[9], &step2[14]);
-  multiplication_and_add(step1[10], step1[13], -(int)cospi_8_64,
-                         -(int)cospi_24_64, &step2[13], &step2[10]);
+  butterfly(io[0], io[8], (int)cospi_16_64, (int)cospi_16_64, &step2[1],
+            &step2[0]);
+  butterfly(io[4], io[12], (int)cospi_24_64, (int)cospi_8_64, &step2[2],
+            &step2[3]);
+  butterfly(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64, &step2[9],
+            &step2[14]);
+  butterfly(step1[10], step1[13], -(int)cospi_8_64, -(int)cospi_24_64,
+            &step2[13], &step2[10]);
   step2[5] = _mm_sub_epi16(step1[4], step1[5]);
   step1[4] = _mm_add_epi16(step1[4], step1[5]);
   step2[6] = _mm_sub_epi16(step1[7], step1[6]);
@@ -458,8 +455,8 @@ static INLINE void idct16_8col(__m128i *const io /*io[16]*/) {
   step1[1] = _mm_add_epi16(step2[1], step2[2]);
   step1[2] = _mm_sub_epi16(step2[1], step2[2]);
   step1[3] = _mm_sub_epi16(step2[0], step2[3]);
-  multiplication_and_add(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64,
-                         &step1[5], &step1[6]);
+  butterfly(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
+            &step1[6]);
   step1[8] = _mm_add_epi16(step2[8], step2[11]);
   step1[9] = _mm_add_epi16(step2[9], step2[10]);
   step1[10] = _mm_sub_epi16(step2[9], step2[10]);
@@ -478,10 +475,10 @@ static INLINE void idct16_8col(__m128i *const io /*io[16]*/) {
   step2[5] = _mm_sub_epi16(step1[2], step1[5]);
   step2[6] = _mm_sub_epi16(step1[1], step1[6]);
   step2[7] = _mm_sub_epi16(step1[0], step1[7]);
-  multiplication_and_add(step1[13], step1[10], (int)cospi_16_64,
-                         (int)cospi_16_64, &step2[10], &step2[13]);
-  multiplication_and_add(step1[12], step1[11], (int)cospi_16_64,
-                         (int)cospi_16_64, &step2[11], &step2[12]);
+  butterfly(step1[13], step1[10], (int)cospi_16_64, (int)cospi_16_64,
+            &step2[10], &step2[13]);
+  butterfly(step1[12], step1[11], (int)cospi_16_64, (int)cospi_16_64,
+            &step2[11], &step2[12]);
 
   // stage 7
   io[0] = _mm_add_epi16(step2[0], step1[15]);
@@ -615,26 +612,26 @@ static INLINE void idct16x16_10_pass2(__m128i *const l /*l[8]*/,
   transpose_16bit_4x8(l, io);
 
   // stage 2
-  multiplication_and_add(io[1], zero, (int)cospi_30_64, (int)cospi_2_64,
-                         &step2[8], &step2[15]);
-  multiplication_and_add(zero, io[3], (int)cospi_6_64, (int)cospi_26_64,
-                         &step2[11], &step2[12]);
+  butterfly(io[1], zero, (int)cospi_30_64, (int)cospi_2_64, &step2[8],
+            &step2[15]);
+  butterfly(zero, io[3], (int)cospi_6_64, (int)cospi_26_64, &step2[11],
+            &step2[12]);
 
   // stage 3
-  multiplication_and_add(io[2], zero, (int)cospi_28_64, (int)cospi_4_64,
-                         &step1[4], &step1[7]);
+  butterfly(io[2], zero, (int)cospi_28_64, (int)cospi_4_64, &step1[4],
+            &step1[7]);
 
   // stage 4
-  multiplication_and_add(io[0], zero, (int)cospi_16_64, (int)cospi_16_64,
-                         &step1[1], &step1[0]);
-  multiplication_and_add(step2[15], step2[8], (int)cospi_24_64, (int)cospi_8_64,
-                         &step2[9], &step2[14]);
-  multiplication_and_add(step2[11], step2[12], -(int)cospi_8_64,
-                         -(int)cospi_24_64, &step2[13], &step2[10]);
+  butterfly(io[0], zero, (int)cospi_16_64, (int)cospi_16_64, &step1[1],
+            &step1[0]);
+  butterfly(step2[15], step2[8], (int)cospi_24_64, (int)cospi_8_64, &step2[9],
+            &step2[14]);
+  butterfly(step2[11], step2[12], -(int)cospi_8_64, -(int)cospi_24_64,
+            &step2[13], &step2[10]);
 
   // stage 5
-  multiplication_and_add(step1[7], step1[4], (int)cospi_16_64, (int)cospi_16_64,
-                         &step1[5], &step1[6]);
+  butterfly(step1[7], step1[4], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
+            &step1[6]);
   step1[8] = _mm_add_epi16(step2[8], step2[11]);
   step1[9] = _mm_add_epi16(step2[9], step2[10]);
   step1[10] = _mm_sub_epi16(step2[9], step2[10]);
@@ -653,10 +650,10 @@ static INLINE void idct16x16_10_pass2(__m128i *const l /*l[8]*/,
   step2[5] = _mm_sub_epi16(step1[1], step1[5]);
   step2[6] = _mm_sub_epi16(step1[1], step1[6]);
   step2[7] = _mm_sub_epi16(step1[0], step1[7]);
-  multiplication_and_add(step1[13], step1[10], (int)cospi_16_64,
-                         (int)cospi_16_64, &step2[10], &step2[13]);
-  multiplication_and_add(step1[12], step1[11], (int)cospi_16_64,
-                         (int)cospi_16_64, &step2[11], &step2[12]);
+  butterfly(step1[13], step1[10], (int)cospi_16_64, (int)cospi_16_64,
+            &step2[10], &step2[13]);
+  butterfly(step1[12], step1[11], (int)cospi_16_64, (int)cospi_16_64,
+            &step2[11], &step2[12]);
 
   // stage 7
   io[0] = _mm_add_epi16(step2[0], step1[15]);
index 3db712a..92d4c4e 100644 (file)
@@ -92,8 +92,8 @@ static INLINE void idct8x8_12_add_kernel_ssse3(__m128i *const io /* io[8] */) {
   step1[1] = _mm_add_epi16(step2[0], step2[2]);
   step1[2] = _mm_sub_epi16(step2[0], step2[2]);
   step1[3] = _mm_sub_epi16(step2[0], step2[3]);
-  multiplication_and_add(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64,
-                         &step1[5], &step1[6]);
+  butterfly(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
+            &step1[6]);
 
   // stage 4
   io[0] = _mm_add_epi16(step1[0], step2[7]);