Optimize 16x16 idct10 function
authorYunqing Wang <yunqingwang@google.com>
Thu, 21 Mar 2013 23:29:36 +0000 (16:29 -0700)
committerYunqing Wang <yunqingwang@google.com>
Thu, 21 Mar 2013 23:36:01 +0000 (16:36 -0700)
Wrote sse2 version of vp9_short_idct10_16x16 function. Compared
to c version, the sse2 version is 2.3X faster.

Change-Id: I314c4f09369648721798321eeed6f58e38857f26

vp9/common/vp9_rtcd_defs.sh
vp9/common/x86/vp9_idct_x86.c
vp9/decoder/vp9_dequantize.c

index 1c95ede..c1153ed 100644 (file)
@@ -298,7 +298,7 @@ prototype void vp9_short_idct16x16 "int16_t *input, int16_t *output, int pitch"
 specialize vp9_short_idct16x16 sse2
 
 prototype void vp9_short_idct10_16x16 "int16_t *input, int16_t *output, int pitch"
-specialize vp9_short_idct10_16x16
+specialize vp9_short_idct10_16x16 sse2
 
 prototype void vp9_short_idct1_16x16 "int16_t *input, int16_t *output"
 specialize vp9_short_idct1_16x16
index d172449..1a2c84a 100644 (file)
@@ -265,6 +265,39 @@ void vp9_idct4_1d_sse2(int16_t *input, int16_t *output) {
     out7 = _mm_unpackhi_epi64(tr1_3, tr1_7); \
   }
 
+#define TRANSPOSE_4X8(in0, in1, in2, in3, in4, in5, in6, in7, \
+                      out0, out1, out2, out3, out4, out5, out6, out7) \
+  {                                                     \
+    const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
+    const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
+    const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5); \
+    const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7); \
+                                                        \
+    const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
+    const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
+    const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
+    const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
+                                                            \
+    out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
+    out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
+    out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
+    out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
+    out4 = out5 = out6 = out7 = zero; \
+  }
+
+#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1, out2, out3) \
+  {                                                     \
+    const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
+    const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
+    const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1); \
+    const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3); \
+                                                        \
+    in0 = _mm_unpacklo_epi32(tr0_0, tr0_1);  /* i1 i0 */  \
+    in1 = _mm_unpackhi_epi32(tr0_0, tr0_1);  /* i3 i2 */  \
+    in2 = _mm_unpacklo_epi32(tr0_2, tr0_3);  /* i5 i4 */  \
+    in3 = _mm_unpackhi_epi32(tr0_2, tr0_3);  /* i7 i6 */  \
+  }
+
 #define IDCT8x8_1D                                             \
   /* Stage1 */                                                 \
   {                                                            \
@@ -430,14 +463,14 @@ void vp9_short_idct8x8_sse2(int16_t *input, int16_t *output, int pitch) {
   }
 
   // Final rounding and shift
-  in0 = _mm_add_epi16(in0, final_rounding);
-  in1 = _mm_add_epi16(in1, final_rounding);
-  in2 = _mm_add_epi16(in2, final_rounding);
-  in3 = _mm_add_epi16(in3, final_rounding);
-  in4 = _mm_add_epi16(in4, final_rounding);
-  in5 = _mm_add_epi16(in5, final_rounding);
-  in6 = _mm_add_epi16(in6, final_rounding);
-  in7 = _mm_add_epi16(in7, final_rounding);
+  in0 = _mm_adds_epi16(in0, final_rounding);
+  in1 = _mm_adds_epi16(in1, final_rounding);
+  in2 = _mm_adds_epi16(in2, final_rounding);
+  in3 = _mm_adds_epi16(in3, final_rounding);
+  in4 = _mm_adds_epi16(in4, final_rounding);
+  in5 = _mm_adds_epi16(in5, final_rounding);
+  in6 = _mm_adds_epi16(in6, final_rounding);
+  in7 = _mm_adds_epi16(in7, final_rounding);
 
   in0 = _mm_srai_epi16(in0, 5);
   in1 = _mm_srai_epi16(in1, 5);
@@ -486,22 +519,12 @@ void vp9_short_idct10_8x8_sse2(int16_t *input, int16_t *output, int pitch) {
   in3 = _mm_load_si128((__m128i *)(input + 8 * 3));
 
   // 8x4 Transpose
-  {
-    const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1);
-    const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3);
-    const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1);
-    const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3);
-
-    in0 = _mm_unpacklo_epi32(tr0_0, tr0_1);  // i1 i0
-    in1 = _mm_unpacklo_epi32(tr0_2, tr0_3);  // i5 i4
-    in2 = _mm_unpackhi_epi32(tr0_0, tr0_1);  // i3 i2
-    in3 = _mm_unpackhi_epi32(tr0_2, tr0_3);  // i7 i6
-  }
+  TRANSPOSE_8X4(in0, in1, in2, in3, in0, in1, in2, in3)
 
   // Stage1
   {
     const __m128i lo_17 = _mm_unpackhi_epi16(in0, in3);
-    const __m128i lo_35 = _mm_unpackhi_epi16(in2, in1);
+    const __m128i lo_35 = _mm_unpackhi_epi16(in1, in2);
 
     tmp0 = _mm_madd_epi16(lo_17, stg1_0);
     tmp2 = _mm_madd_epi16(lo_17, stg1_1);
@@ -525,8 +548,8 @@ void vp9_short_idct10_8x8_sse2(int16_t *input, int16_t *output, int pitch) {
 
   // Stage2
   {
-    const __m128i lo_04 = _mm_unpacklo_epi16(in0, in1);
-    const __m128i lo_26 = _mm_unpacklo_epi16(in2, in3);
+    const __m128i lo_04 = _mm_unpacklo_epi16(in0, in2);
+    const __m128i lo_26 = _mm_unpacklo_epi16(in1, in3);
 
     tmp0 = _mm_madd_epi16(lo_04, stg2_0);
     tmp2 = _mm_madd_epi16(lo_04, stg2_1);
@@ -584,39 +607,21 @@ void vp9_short_idct10_8x8_sse2(int16_t *input, int16_t *output, int pitch) {
   in7 = _mm_subs_epi16(stp1_0, stp2_7);
 
   // Columns. 4x8 Transpose
-  {
-    const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1);
-    const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3);
-    const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5);
-    const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7);
-
-    const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
-    const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
-    const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
-    const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
-
-    in0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
-    in1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
-    in2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
-    in3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
-    in4 = _mm_setzero_si128();
-    in5 = _mm_setzero_si128();
-    in6 = _mm_setzero_si128();
-    in7 = _mm_setzero_si128();
-  }
+  TRANSPOSE_4X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                in4, in5, in6, in7)
 
   // 1D idct8x8
   IDCT8x8_1D
 
   // Final rounding and shift
-  in0 = _mm_add_epi16(in0, final_rounding);
-  in1 = _mm_add_epi16(in1, final_rounding);
-  in2 = _mm_add_epi16(in2, final_rounding);
-  in3 = _mm_add_epi16(in3, final_rounding);
-  in4 = _mm_add_epi16(in4, final_rounding);
-  in5 = _mm_add_epi16(in5, final_rounding);
-  in6 = _mm_add_epi16(in6, final_rounding);
-  in7 = _mm_add_epi16(in7, final_rounding);
+  in0 = _mm_adds_epi16(in0, final_rounding);
+  in1 = _mm_adds_epi16(in1, final_rounding);
+  in2 = _mm_adds_epi16(in2, final_rounding);
+  in3 = _mm_adds_epi16(in3, final_rounding);
+  in4 = _mm_adds_epi16(in4, final_rounding);
+  in5 = _mm_adds_epi16(in5, final_rounding);
+  in6 = _mm_adds_epi16(in6, final_rounding);
+  in7 = _mm_adds_epi16(in7, final_rounding);
 
   in0 = _mm_srai_epi16(in0, 5);
   in1 = _mm_srai_epi16(in1, 5);
@@ -638,6 +643,303 @@ void vp9_short_idct10_8x8_sse2(int16_t *input, int16_t *output, int pitch) {
   _mm_store_si128((__m128i *)(output + half_pitch * 7), in7);
 }
 
+#define IDCT16x16_1D                                       \
+  /* Stage2 */                                             \
+  {                                                        \
+    const __m128i lo_1_15 = _mm_unpacklo_epi16(in1, in15); \
+    const __m128i hi_1_15 = _mm_unpackhi_epi16(in1, in15); \
+    const __m128i lo_9_7 = _mm_unpacklo_epi16(in9, in7);   \
+    const __m128i hi_9_7 = _mm_unpackhi_epi16(in9, in7);   \
+    const __m128i lo_5_11 = _mm_unpacklo_epi16(in5, in11); \
+    const __m128i hi_5_11 = _mm_unpackhi_epi16(in5, in11); \
+    const __m128i lo_13_3 = _mm_unpacklo_epi16(in13, in3); \
+    const __m128i hi_13_3 = _mm_unpackhi_epi16(in13, in3); \
+                                            \
+    tmp0 = _mm_madd_epi16(lo_1_15, stg2_0); \
+    tmp1 = _mm_madd_epi16(hi_1_15, stg2_0); \
+    tmp2 = _mm_madd_epi16(lo_1_15, stg2_1); \
+    tmp3 = _mm_madd_epi16(hi_1_15, stg2_1); \
+    tmp4 = _mm_madd_epi16(lo_9_7, stg2_2);  \
+    tmp5 = _mm_madd_epi16(hi_9_7, stg2_2);  \
+    tmp6 = _mm_madd_epi16(lo_9_7, stg2_3);  \
+    tmp7 = _mm_madd_epi16(hi_9_7, stg2_3);  \
+                                          \
+    tmp0 = _mm_add_epi32(tmp0, rounding); \
+    tmp1 = _mm_add_epi32(tmp1, rounding); \
+    tmp2 = _mm_add_epi32(tmp2, rounding); \
+    tmp3 = _mm_add_epi32(tmp3, rounding); \
+    tmp4 = _mm_add_epi32(tmp4, rounding); \
+    tmp5 = _mm_add_epi32(tmp5, rounding); \
+    tmp6 = _mm_add_epi32(tmp6, rounding); \
+    tmp7 = _mm_add_epi32(tmp7, rounding); \
+                                          \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+    tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \
+    tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \
+    tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \
+    tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \
+                                           \
+    stp2_8 = _mm_packs_epi32(tmp0, tmp1);  \
+    stp2_15 = _mm_packs_epi32(tmp2, tmp3); \
+    stp2_9 = _mm_packs_epi32(tmp4, tmp5);  \
+    stp2_14 = _mm_packs_epi32(tmp6, tmp7); \
+                                           \
+    tmp0 = _mm_madd_epi16(lo_5_11, stg2_4); \
+    tmp1 = _mm_madd_epi16(hi_5_11, stg2_4); \
+    tmp2 = _mm_madd_epi16(lo_5_11, stg2_5); \
+    tmp3 = _mm_madd_epi16(hi_5_11, stg2_5); \
+    tmp4 = _mm_madd_epi16(lo_13_3, stg2_6); \
+    tmp5 = _mm_madd_epi16(hi_13_3, stg2_6); \
+    tmp6 = _mm_madd_epi16(lo_13_3, stg2_7); \
+    tmp7 = _mm_madd_epi16(hi_13_3, stg2_7); \
+                                          \
+    tmp0 = _mm_add_epi32(tmp0, rounding); \
+    tmp1 = _mm_add_epi32(tmp1, rounding); \
+    tmp2 = _mm_add_epi32(tmp2, rounding); \
+    tmp3 = _mm_add_epi32(tmp3, rounding); \
+    tmp4 = _mm_add_epi32(tmp4, rounding); \
+    tmp5 = _mm_add_epi32(tmp5, rounding); \
+    tmp6 = _mm_add_epi32(tmp6, rounding); \
+    tmp7 = _mm_add_epi32(tmp7, rounding); \
+                                          \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+    tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \
+    tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \
+    tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \
+    tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \
+                                           \
+    stp2_10 = _mm_packs_epi32(tmp0, tmp1); \
+    stp2_13 = _mm_packs_epi32(tmp2, tmp3); \
+    stp2_11 = _mm_packs_epi32(tmp4, tmp5); \
+    stp2_12 = _mm_packs_epi32(tmp6, tmp7); \
+  }                                        \
+                                           \
+  /* Stage3 */                             \
+  {                                        \
+    const __m128i lo_2_14 = _mm_unpacklo_epi16(in2, in14); \
+    const __m128i hi_2_14 = _mm_unpackhi_epi16(in2, in14); \
+    const __m128i lo_10_6 = _mm_unpacklo_epi16(in10, in6); \
+    const __m128i hi_10_6 = _mm_unpackhi_epi16(in10, in6); \
+                                            \
+    tmp0 = _mm_madd_epi16(lo_2_14, stg3_0); \
+    tmp1 = _mm_madd_epi16(hi_2_14, stg3_0); \
+    tmp2 = _mm_madd_epi16(lo_2_14, stg3_1); \
+    tmp3 = _mm_madd_epi16(hi_2_14, stg3_1); \
+    tmp4 = _mm_madd_epi16(lo_10_6, stg3_2); \
+    tmp5 = _mm_madd_epi16(hi_10_6, stg3_2); \
+    tmp6 = _mm_madd_epi16(lo_10_6, stg3_3); \
+    tmp7 = _mm_madd_epi16(hi_10_6, stg3_3); \
+                                          \
+    tmp0 = _mm_add_epi32(tmp0, rounding); \
+    tmp1 = _mm_add_epi32(tmp1, rounding); \
+    tmp2 = _mm_add_epi32(tmp2, rounding); \
+    tmp3 = _mm_add_epi32(tmp3, rounding); \
+    tmp4 = _mm_add_epi32(tmp4, rounding); \
+    tmp5 = _mm_add_epi32(tmp5, rounding); \
+    tmp6 = _mm_add_epi32(tmp6, rounding); \
+    tmp7 = _mm_add_epi32(tmp7, rounding); \
+                                          \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+    tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \
+    tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \
+    tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \
+    tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \
+                                          \
+    stp1_4 = _mm_packs_epi32(tmp0, tmp1); \
+    stp1_7 = _mm_packs_epi32(tmp2, tmp3); \
+    stp1_5 = _mm_packs_epi32(tmp4, tmp5); \
+    stp1_6 = _mm_packs_epi32(tmp6, tmp7); \
+                                          \
+    stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9);  \
+    stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);    \
+    stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); \
+    stp1_11 = _mm_add_epi16(stp2_11, stp2_10); \
+                                               \
+    stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13); \
+    stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); \
+    stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); \
+    stp1_15 = _mm_add_epi16(stp2_15, stp2_14); \
+  }                                            \
+                                               \
+  /* Stage4 */                                 \
+  {                                            \
+    const __m128i lo_0_8 = _mm_unpacklo_epi16(in0, in8); \
+    const __m128i hi_0_8 = _mm_unpackhi_epi16(in0, in8); \
+    const __m128i lo_4_12 = _mm_unpacklo_epi16(in4, in12); \
+    const __m128i hi_4_12 = _mm_unpackhi_epi16(in4, in12); \
+                                                           \
+    const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \
+    const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
+                                           \
+    tmp0 = _mm_madd_epi16(lo_0_8, stg4_0); \
+    tmp1 = _mm_madd_epi16(hi_0_8, stg4_0); \
+    tmp2 = _mm_madd_epi16(lo_0_8, stg4_1); \
+    tmp3 = _mm_madd_epi16(hi_0_8, stg4_1); \
+    tmp4 = _mm_madd_epi16(lo_4_12, stg4_2); \
+    tmp5 = _mm_madd_epi16(hi_4_12, stg4_2); \
+    tmp6 = _mm_madd_epi16(lo_4_12, stg4_3); \
+    tmp7 = _mm_madd_epi16(hi_4_12, stg4_3); \
+                                          \
+    tmp0 = _mm_add_epi32(tmp0, rounding); \
+    tmp1 = _mm_add_epi32(tmp1, rounding); \
+    tmp2 = _mm_add_epi32(tmp2, rounding); \
+    tmp3 = _mm_add_epi32(tmp3, rounding); \
+    tmp4 = _mm_add_epi32(tmp4, rounding); \
+    tmp5 = _mm_add_epi32(tmp5, rounding); \
+    tmp6 = _mm_add_epi32(tmp6, rounding); \
+    tmp7 = _mm_add_epi32(tmp7, rounding); \
+                                          \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+    tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \
+    tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \
+    tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \
+    tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \
+                                          \
+    stp2_0 = _mm_packs_epi32(tmp0, tmp1); \
+    stp2_1 = _mm_packs_epi32(tmp2, tmp3); \
+    stp2_2 = _mm_packs_epi32(tmp4, tmp5); \
+    stp2_3 = _mm_packs_epi32(tmp6, tmp7); \
+                                          \
+    stp2_4 = _mm_add_epi16(stp1_4, stp1_5); \
+    stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); \
+    stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); \
+    stp2_7 = _mm_add_epi16(stp1_7, stp1_6); \
+                                            \
+    tmp0 = _mm_madd_epi16(lo_9_14, stg4_4); \
+    tmp1 = _mm_madd_epi16(hi_9_14, stg4_4); \
+    tmp2 = _mm_madd_epi16(lo_9_14, stg4_5); \
+    tmp3 = _mm_madd_epi16(hi_9_14, stg4_5); \
+    tmp4 = _mm_madd_epi16(lo_10_13, stg4_6); \
+    tmp5 = _mm_madd_epi16(hi_10_13, stg4_6); \
+    tmp6 = _mm_madd_epi16(lo_10_13, stg4_7); \
+    tmp7 = _mm_madd_epi16(hi_10_13, stg4_7); \
+                                          \
+    tmp0 = _mm_add_epi32(tmp0, rounding); \
+    tmp1 = _mm_add_epi32(tmp1, rounding); \
+    tmp2 = _mm_add_epi32(tmp2, rounding); \
+    tmp3 = _mm_add_epi32(tmp3, rounding); \
+    tmp4 = _mm_add_epi32(tmp4, rounding); \
+    tmp5 = _mm_add_epi32(tmp5, rounding); \
+    tmp6 = _mm_add_epi32(tmp6, rounding); \
+    tmp7 = _mm_add_epi32(tmp7, rounding); \
+                                          \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+    tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \
+    tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \
+    tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \
+    tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \
+                                           \
+    stp2_9 = _mm_packs_epi32(tmp0, tmp1);  \
+    stp2_14 = _mm_packs_epi32(tmp2, tmp3); \
+    stp2_10 = _mm_packs_epi32(tmp4, tmp5); \
+    stp2_13 = _mm_packs_epi32(tmp6, tmp7); \
+  }                                        \
+                                           \
+  /* Stage5 */                             \
+  {                                        \
+    const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
+    const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
+                                            \
+    stp1_0 = _mm_add_epi16(stp2_0, stp2_3); \
+    stp1_1 = _mm_add_epi16(stp2_1, stp2_2); \
+    stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); \
+    stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); \
+                                           \
+    tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \
+    tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \
+    tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \
+    tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \
+                                          \
+    tmp0 = _mm_add_epi32(tmp0, rounding); \
+    tmp1 = _mm_add_epi32(tmp1, rounding); \
+    tmp2 = _mm_add_epi32(tmp2, rounding); \
+    tmp3 = _mm_add_epi32(tmp3, rounding); \
+                                          \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+                                          \
+    stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
+    stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
+                                          \
+    stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11);  \
+    stp1_9 = _mm_add_epi16(stp2_9, stp2_10);    \
+    stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);   \
+    stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \
+                                                 \
+    stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \
+    stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);   \
+    stp1_14 = _mm_add_epi16(stp2_14, stp2_13);   \
+    stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \
+  }                                              \
+                                                 \
+  /* Stage6 */                                   \
+  {                                              \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
+    const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \
+    const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \
+                                            \
+    stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \
+    stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \
+    stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \
+    stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \
+    stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \
+    stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \
+    stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \
+    stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \
+                                             \
+    tmp0 = _mm_madd_epi16(lo_10_13, stg6_0); \
+    tmp1 = _mm_madd_epi16(hi_10_13, stg6_0); \
+    tmp2 = _mm_madd_epi16(lo_10_13, stg4_0); \
+    tmp3 = _mm_madd_epi16(hi_10_13, stg4_0); \
+    tmp4 = _mm_madd_epi16(lo_11_12, stg6_0); \
+    tmp5 = _mm_madd_epi16(hi_11_12, stg6_0); \
+    tmp6 = _mm_madd_epi16(lo_11_12, stg4_0); \
+    tmp7 = _mm_madd_epi16(hi_11_12, stg4_0); \
+                                          \
+    tmp0 = _mm_add_epi32(tmp0, rounding); \
+    tmp1 = _mm_add_epi32(tmp1, rounding); \
+    tmp2 = _mm_add_epi32(tmp2, rounding); \
+    tmp3 = _mm_add_epi32(tmp3, rounding); \
+    tmp4 = _mm_add_epi32(tmp4, rounding); \
+    tmp5 = _mm_add_epi32(tmp5, rounding); \
+    tmp6 = _mm_add_epi32(tmp6, rounding); \
+    tmp7 = _mm_add_epi32(tmp7, rounding); \
+                                                 \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+    tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \
+    tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \
+    tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \
+    tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \
+                                           \
+    stp2_10 = _mm_packs_epi32(tmp0, tmp1); \
+    stp2_13 = _mm_packs_epi32(tmp2, tmp3); \
+    stp2_11 = _mm_packs_epi32(tmp4, tmp5); \
+    stp2_12 = _mm_packs_epi32(tmp6, tmp7); \
+  }
+
 void vp9_short_idct16x16_sse2(int16_t *input, int16_t *output, int pitch) {
   const int half_pitch = pitch >> 1;
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
@@ -731,301 +1033,7 @@ void vp9_short_idct16x16_sse2(int16_t *input, int16_t *output, int pitch) {
                     in12, in13, in14, in15);
     }
 
-    // Stage2
-    {
-      const __m128i lo_1_15 = _mm_unpacklo_epi16(in1, in15);
-      const __m128i hi_1_15 = _mm_unpackhi_epi16(in1, in15);
-      const __m128i lo_9_7 = _mm_unpacklo_epi16(in9, in7);
-      const __m128i hi_9_7 = _mm_unpackhi_epi16(in9, in7);
-      const __m128i lo_5_11 = _mm_unpacklo_epi16(in5, in11);
-      const __m128i hi_5_11 = _mm_unpackhi_epi16(in5, in11);
-      const __m128i lo_13_3 = _mm_unpacklo_epi16(in13, in3);
-      const __m128i hi_13_3 = _mm_unpackhi_epi16(in13, in3);
-
-      tmp0 = _mm_madd_epi16(lo_1_15, stg2_0);
-      tmp1 = _mm_madd_epi16(hi_1_15, stg2_0);
-      tmp2 = _mm_madd_epi16(lo_1_15, stg2_1);
-      tmp3 = _mm_madd_epi16(hi_1_15, stg2_1);
-      tmp4 = _mm_madd_epi16(lo_9_7, stg2_2);
-      tmp5 = _mm_madd_epi16(hi_9_7, stg2_2);
-      tmp6 = _mm_madd_epi16(lo_9_7, stg2_3);
-      tmp7 = _mm_madd_epi16(hi_9_7, stg2_3);
-
-      tmp0 = _mm_add_epi32(tmp0, rounding);
-      tmp1 = _mm_add_epi32(tmp1, rounding);
-      tmp2 = _mm_add_epi32(tmp2, rounding);
-      tmp3 = _mm_add_epi32(tmp3, rounding);
-      tmp4 = _mm_add_epi32(tmp4, rounding);
-      tmp5 = _mm_add_epi32(tmp5, rounding);
-      tmp6 = _mm_add_epi32(tmp6, rounding);
-      tmp7 = _mm_add_epi32(tmp7, rounding);
-
-      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
-      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
-      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
-      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
-      tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
-      tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
-      tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
-      tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
-
-      stp2_8 = _mm_packs_epi32(tmp0, tmp1);
-      stp2_15 = _mm_packs_epi32(tmp2, tmp3);
-      stp2_9 = _mm_packs_epi32(tmp4, tmp5);
-      stp2_14 = _mm_packs_epi32(tmp6, tmp7);
-
-      tmp0 = _mm_madd_epi16(lo_5_11, stg2_4);
-      tmp1 = _mm_madd_epi16(hi_5_11, stg2_4);
-      tmp2 = _mm_madd_epi16(lo_5_11, stg2_5);
-      tmp3 = _mm_madd_epi16(hi_5_11, stg2_5);
-      tmp4 = _mm_madd_epi16(lo_13_3, stg2_6);
-      tmp5 = _mm_madd_epi16(hi_13_3, stg2_6);
-      tmp6 = _mm_madd_epi16(lo_13_3, stg2_7);
-      tmp7 = _mm_madd_epi16(hi_13_3, stg2_7);
-
-      tmp0 = _mm_add_epi32(tmp0, rounding);
-      tmp1 = _mm_add_epi32(tmp1, rounding);
-      tmp2 = _mm_add_epi32(tmp2, rounding);
-      tmp3 = _mm_add_epi32(tmp3, rounding);
-      tmp4 = _mm_add_epi32(tmp4, rounding);
-      tmp5 = _mm_add_epi32(tmp5, rounding);
-      tmp6 = _mm_add_epi32(tmp6, rounding);
-      tmp7 = _mm_add_epi32(tmp7, rounding);
-
-      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
-      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
-      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
-      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
-      tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
-      tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
-      tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
-      tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
-
-      stp2_10 = _mm_packs_epi32(tmp0, tmp1);
-      stp2_13 = _mm_packs_epi32(tmp2, tmp3);
-      stp2_11 = _mm_packs_epi32(tmp4, tmp5);
-      stp2_12 = _mm_packs_epi32(tmp6, tmp7);
-    }
-
-    // Stage3
-    {
-      const __m128i lo_2_14 = _mm_unpacklo_epi16(in2, in14);
-      const __m128i hi_2_14 = _mm_unpackhi_epi16(in2, in14);
-      const __m128i lo_10_6 = _mm_unpacklo_epi16(in10, in6);
-      const __m128i hi_10_6 = _mm_unpackhi_epi16(in10, in6);
-
-      tmp0 = _mm_madd_epi16(lo_2_14, stg3_0);
-      tmp1 = _mm_madd_epi16(hi_2_14, stg3_0);
-      tmp2 = _mm_madd_epi16(lo_2_14, stg3_1);
-      tmp3 = _mm_madd_epi16(hi_2_14, stg3_1);
-      tmp4 = _mm_madd_epi16(lo_10_6, stg3_2);
-      tmp5 = _mm_madd_epi16(hi_10_6, stg3_2);
-      tmp6 = _mm_madd_epi16(lo_10_6, stg3_3);
-      tmp7 = _mm_madd_epi16(hi_10_6, stg3_3);
-
-      tmp0 = _mm_add_epi32(tmp0, rounding);
-      tmp1 = _mm_add_epi32(tmp1, rounding);
-      tmp2 = _mm_add_epi32(tmp2, rounding);
-      tmp3 = _mm_add_epi32(tmp3, rounding);
-      tmp4 = _mm_add_epi32(tmp4, rounding);
-      tmp5 = _mm_add_epi32(tmp5, rounding);
-      tmp6 = _mm_add_epi32(tmp6, rounding);
-      tmp7 = _mm_add_epi32(tmp7, rounding);
-
-      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
-      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
-      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
-      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
-      tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
-      tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
-      tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
-      tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
-
-      stp1_4 = _mm_packs_epi32(tmp0, tmp1);
-      stp1_7 = _mm_packs_epi32(tmp2, tmp3);
-      stp1_5 = _mm_packs_epi32(tmp4, tmp5);
-      stp1_6 = _mm_packs_epi32(tmp6, tmp7);
-
-      stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9);
-      stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);
-      stp1_10 = _mm_sub_epi16(stp2_11, stp2_10);
-      stp1_11 = _mm_add_epi16(stp2_11, stp2_10);
-
-      stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13);
-      stp1_13 = _mm_sub_epi16(stp2_12, stp2_13);
-      stp1_14 = _mm_sub_epi16(stp2_15, stp2_14);
-      stp1_15 = _mm_add_epi16(stp2_15, stp2_14);
-    }
-
-    // Stage4
-    {
-      const __m128i lo_0_8 = _mm_unpacklo_epi16(in0, in8);
-      const __m128i hi_0_8 = _mm_unpackhi_epi16(in0, in8);
-      const __m128i lo_4_12 = _mm_unpacklo_epi16(in4, in12);
-      const __m128i hi_4_12 = _mm_unpackhi_epi16(in4, in12);
-
-      const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);
-      const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14);
-      const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
-      const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);
-
-      tmp0 = _mm_madd_epi16(lo_0_8, stg4_0);
-      tmp1 = _mm_madd_epi16(hi_0_8, stg4_0);
-      tmp2 = _mm_madd_epi16(lo_0_8, stg4_1);
-      tmp3 = _mm_madd_epi16(hi_0_8, stg4_1);
-      tmp4 = _mm_madd_epi16(lo_4_12, stg4_2);
-      tmp5 = _mm_madd_epi16(hi_4_12, stg4_2);
-      tmp6 = _mm_madd_epi16(lo_4_12, stg4_3);
-      tmp7 = _mm_madd_epi16(hi_4_12, stg4_3);
-
-      tmp0 = _mm_add_epi32(tmp0, rounding);
-      tmp1 = _mm_add_epi32(tmp1, rounding);
-      tmp2 = _mm_add_epi32(tmp2, rounding);
-      tmp3 = _mm_add_epi32(tmp3, rounding);
-      tmp4 = _mm_add_epi32(tmp4, rounding);
-      tmp5 = _mm_add_epi32(tmp5, rounding);
-      tmp6 = _mm_add_epi32(tmp6, rounding);
-      tmp7 = _mm_add_epi32(tmp7, rounding);
-
-      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
-      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
-      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
-      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
-      tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
-      tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
-      tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
-      tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
-
-      stp2_0 = _mm_packs_epi32(tmp0, tmp1);
-      stp2_1 = _mm_packs_epi32(tmp2, tmp3);
-      stp2_2 = _mm_packs_epi32(tmp4, tmp5);
-      stp2_3 = _mm_packs_epi32(tmp6, tmp7);
-
-      stp2_4 = _mm_add_epi16(stp1_4, stp1_5);
-      stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);
-      stp2_6 = _mm_sub_epi16(stp1_7, stp1_6);
-      stp2_7 = _mm_add_epi16(stp1_7, stp1_6);
-
-      tmp0 = _mm_madd_epi16(lo_9_14, stg4_4);
-      tmp1 = _mm_madd_epi16(hi_9_14, stg4_4);
-      tmp2 = _mm_madd_epi16(lo_9_14, stg4_5);
-      tmp3 = _mm_madd_epi16(hi_9_14, stg4_5);
-      tmp4 = _mm_madd_epi16(lo_10_13, stg4_6);
-      tmp5 = _mm_madd_epi16(hi_10_13, stg4_6);
-      tmp6 = _mm_madd_epi16(lo_10_13, stg4_7);
-      tmp7 = _mm_madd_epi16(hi_10_13, stg4_7);
-
-      tmp0 = _mm_add_epi32(tmp0, rounding);
-      tmp1 = _mm_add_epi32(tmp1, rounding);
-      tmp2 = _mm_add_epi32(tmp2, rounding);
-      tmp3 = _mm_add_epi32(tmp3, rounding);
-      tmp4 = _mm_add_epi32(tmp4, rounding);
-      tmp5 = _mm_add_epi32(tmp5, rounding);
-      tmp6 = _mm_add_epi32(tmp6, rounding);
-      tmp7 = _mm_add_epi32(tmp7, rounding);
-
-      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
-      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
-      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
-      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
-      tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
-      tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
-      tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
-      tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
-
-      stp2_9 = _mm_packs_epi32(tmp0, tmp1);
-      stp2_14 = _mm_packs_epi32(tmp2, tmp3);
-      stp2_10 = _mm_packs_epi32(tmp4, tmp5);
-      stp2_13 = _mm_packs_epi32(tmp6, tmp7);
-    }
-
-    // Stage5
-    {
-      const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);
-      const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5);
-
-      stp1_0 = _mm_add_epi16(stp2_0, stp2_3);
-      stp1_1 = _mm_add_epi16(stp2_1, stp2_2);
-      stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);
-      stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);
-
-      tmp0 = _mm_madd_epi16(lo_6_5, stg4_1);
-      tmp1 = _mm_madd_epi16(hi_6_5, stg4_1);
-      tmp2 = _mm_madd_epi16(lo_6_5, stg4_0);
-      tmp3 = _mm_madd_epi16(hi_6_5, stg4_0);
-
-      tmp0 = _mm_add_epi32(tmp0, rounding);
-      tmp1 = _mm_add_epi32(tmp1, rounding);
-      tmp2 = _mm_add_epi32(tmp2, rounding);
-      tmp3 = _mm_add_epi32(tmp3, rounding);
-
-      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
-      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
-      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
-      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
-
-      stp1_5 = _mm_packs_epi32(tmp0, tmp1);
-      stp1_6 = _mm_packs_epi32(tmp2, tmp3);
-
-      stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11);
-      stp1_9 = _mm_add_epi16(stp2_9, stp2_10);
-      stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);
-      stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11);
-
-      stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0);
-      stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);
-      stp1_14 = _mm_add_epi16(stp2_14, stp2_13);
-      stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0);
-    }
-
-    // Stage6
-    {
-      const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
-      const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);
-      const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);
-      const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12);
-
-      stp2_0 = _mm_add_epi16(stp1_0, stp2_7);
-      stp2_1 = _mm_add_epi16(stp1_1, stp1_6);
-      stp2_2 = _mm_add_epi16(stp1_2, stp1_5);
-      stp2_3 = _mm_add_epi16(stp1_3, stp2_4);
-      stp2_4 = _mm_sub_epi16(stp1_3, stp2_4);
-      stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);
-      stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);
-      stp2_7 = _mm_sub_epi16(stp1_0, stp2_7);
-
-      tmp0 = _mm_madd_epi16(lo_10_13, stg6_0);
-      tmp1 = _mm_madd_epi16(hi_10_13, stg6_0);
-      tmp2 = _mm_madd_epi16(lo_10_13, stg4_0);
-      tmp3 = _mm_madd_epi16(hi_10_13, stg4_0);
-      tmp4 = _mm_madd_epi16(lo_11_12, stg6_0);
-      tmp5 = _mm_madd_epi16(hi_11_12, stg6_0);
-      tmp6 = _mm_madd_epi16(lo_11_12, stg4_0);
-      tmp7 = _mm_madd_epi16(hi_11_12, stg4_0);
-
-      tmp0 = _mm_add_epi32(tmp0, rounding);
-      tmp1 = _mm_add_epi32(tmp1, rounding);
-      tmp2 = _mm_add_epi32(tmp2, rounding);
-      tmp3 = _mm_add_epi32(tmp3, rounding);
-      tmp4 = _mm_add_epi32(tmp4, rounding);
-      tmp5 = _mm_add_epi32(tmp5, rounding);
-      tmp6 = _mm_add_epi32(tmp6, rounding);
-      tmp7 = _mm_add_epi32(tmp7, rounding);
-
-      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
-      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
-      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
-      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
-      tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
-      tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
-      tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
-      tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
-
-      stp2_10 = _mm_packs_epi32(tmp0, tmp1);
-      stp2_13 = _mm_packs_epi32(tmp2, tmp3);
-      stp2_11 = _mm_packs_epi32(tmp4, tmp5);
-      stp2_12 = _mm_packs_epi32(tmp6, tmp7);
-    }
+    IDCT16x16_1D
 
     // Stage7
     if (i == 0) {
@@ -1084,22 +1092,22 @@ void vp9_short_idct16x16_sse2(int16_t *input, int16_t *output, int pitch) {
       in15 = _mm_sub_epi16(stp2_0, stp1_15);
 
       // Final rounding and shift
-      in0 = _mm_add_epi16(in0, final_rounding);
-      in1 = _mm_add_epi16(in1, final_rounding);
-      in2 = _mm_add_epi16(in2, final_rounding);
-      in3 = _mm_add_epi16(in3, final_rounding);
-      in4 = _mm_add_epi16(in4, final_rounding);
-      in5 = _mm_add_epi16(in5, final_rounding);
-      in6 = _mm_add_epi16(in6, final_rounding);
-      in7 = _mm_add_epi16(in7, final_rounding);
-      in8 = _mm_add_epi16(in8, final_rounding);
-      in9 = _mm_add_epi16(in9, final_rounding);
-      in10 = _mm_add_epi16(in10, final_rounding);
-      in11 = _mm_add_epi16(in11, final_rounding);
-      in12 = _mm_add_epi16(in12, final_rounding);
-      in13 = _mm_add_epi16(in13, final_rounding);
-      in14 = _mm_add_epi16(in14, final_rounding);
-      in15 = _mm_add_epi16(in15, final_rounding);
+      in0 = _mm_adds_epi16(in0, final_rounding);
+      in1 = _mm_adds_epi16(in1, final_rounding);
+      in2 = _mm_adds_epi16(in2, final_rounding);
+      in3 = _mm_adds_epi16(in3, final_rounding);
+      in4 = _mm_adds_epi16(in4, final_rounding);
+      in5 = _mm_adds_epi16(in5, final_rounding);
+      in6 = _mm_adds_epi16(in6, final_rounding);
+      in7 = _mm_adds_epi16(in7, final_rounding);
+      in8 = _mm_adds_epi16(in8, final_rounding);
+      in9 = _mm_adds_epi16(in9, final_rounding);
+      in10 = _mm_adds_epi16(in10, final_rounding);
+      in11 = _mm_adds_epi16(in11, final_rounding);
+      in12 = _mm_adds_epi16(in12, final_rounding);
+      in13 = _mm_adds_epi16(in13, final_rounding);
+      in14 = _mm_adds_epi16(in14, final_rounding);
+      in15 = _mm_adds_epi16(in15, final_rounding);
 
       in0 = _mm_srai_epi16(in0, 6);
       in1 = _mm_srai_epi16(in1, 6);
@@ -1140,4 +1148,362 @@ void vp9_short_idct16x16_sse2(int16_t *input, int16_t *output, int pitch) {
     }
   }
 }
+
+void vp9_short_idct10_16x16_sse2(int16_t *input, int16_t *output, int pitch) {
+  const int half_pitch = pitch >> 1;
+  const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+  const __m128i final_rounding = _mm_set1_epi16(1<<5);
+  const __m128i zero = _mm_setzero_si128();
+
+  const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64);
+  const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64);
+  const __m128i stg2_2 = pair_set_epi16(cospi_14_64, -cospi_18_64);
+  const __m128i stg2_3 = pair_set_epi16(cospi_18_64, cospi_14_64);
+  const __m128i stg2_4 = pair_set_epi16(cospi_22_64, -cospi_10_64);
+  const __m128i stg2_5 = pair_set_epi16(cospi_10_64, cospi_22_64);
+  const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64);
+  const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64);
+
+  const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+  const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
+  const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64);
+  const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64);
+
+  const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+  const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+  const __m128i stg4_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+  const __m128i stg4_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
+  const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+  const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64);
+  const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+  const __m128i stg4_7 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+
+  const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+
+  __m128i in0 = zero, in1 = zero, in2 = zero, in3 = zero, in4 = zero,
+          in5 = zero, in6 = zero, in7 = zero, in8 = zero, in9 = zero,
+          in10 = zero, in11 = zero, in12 = zero, in13 = zero,
+          in14 = zero, in15 = zero;
+  __m128i l0 = zero, l1 = zero, l2 = zero, l3 = zero, l4 = zero, l5 = zero,
+          l6 = zero, l7 = zero, l8 = zero, l9 = zero, l10 = zero, l11 = zero,
+          l12 = zero, l13 = zero, l14 = zero, l15 = zero;
+
+  __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
+          stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+          stp1_8_0, stp1_12_0;
+  __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
+          stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15;
+  __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+  int i;
+
+  // 1-D idct. Load input data.
+  in0 = _mm_load_si128((__m128i *)input);
+  in8 = _mm_load_si128((__m128i *)(input + 8 * 1));
+  in1 = _mm_load_si128((__m128i *)(input + 8 * 2));
+  in9 = _mm_load_si128((__m128i *)(input + 8 * 3));
+  in2 = _mm_load_si128((__m128i *)(input + 8 * 4));
+  in10 = _mm_load_si128((__m128i *)(input + 8 * 5));
+  in3 = _mm_load_si128((__m128i *)(input + 8 * 6));
+  in11 = _mm_load_si128((__m128i *)(input + 8 * 7));
+
+  TRANSPOSE_8X4(in0, in1, in2, in3, in0, in1, in2, in3);
+  TRANSPOSE_8X4(in8, in9, in10, in11, in8, in9, in10, in11);
+
+  // Stage2
+  {
+    const __m128i lo_1_15 = _mm_unpackhi_epi16(in0, in11);
+    const __m128i lo_9_7 = _mm_unpackhi_epi16(in8, in3);
+    const __m128i lo_5_11 = _mm_unpackhi_epi16(in2, in9);
+    const __m128i lo_13_3 = _mm_unpackhi_epi16(in10, in1);
+
+    tmp0 = _mm_madd_epi16(lo_1_15, stg2_0);
+    tmp2 = _mm_madd_epi16(lo_1_15, stg2_1);
+    tmp4 = _mm_madd_epi16(lo_9_7, stg2_2);
+    tmp6 = _mm_madd_epi16(lo_9_7, stg2_3);
+    tmp1 = _mm_madd_epi16(lo_5_11, stg2_4);
+    tmp3 = _mm_madd_epi16(lo_5_11, stg2_5);
+    tmp5 = _mm_madd_epi16(lo_13_3, stg2_6);
+    tmp7 = _mm_madd_epi16(lo_13_3, stg2_7);
+
+    tmp0 = _mm_add_epi32(tmp0, rounding);
+    tmp2 = _mm_add_epi32(tmp2, rounding);
+    tmp4 = _mm_add_epi32(tmp4, rounding);
+    tmp6 = _mm_add_epi32(tmp6, rounding);
+    tmp1 = _mm_add_epi32(tmp1, rounding);
+    tmp3 = _mm_add_epi32(tmp3, rounding);
+    tmp5 = _mm_add_epi32(tmp5, rounding);
+    tmp7 = _mm_add_epi32(tmp7, rounding);
+
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+    tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+    tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
+    tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
+    tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
+
+    stp2_8 = _mm_packs_epi32(tmp0, zero);
+    stp2_15 = _mm_packs_epi32(tmp2, zero);
+    stp2_9 = _mm_packs_epi32(tmp4, zero);
+    stp2_14 = _mm_packs_epi32(tmp6, zero);
+
+    stp2_10 = _mm_packs_epi32(tmp1, zero);
+    stp2_13 = _mm_packs_epi32(tmp3, zero);
+    stp2_11 = _mm_packs_epi32(tmp5, zero);
+    stp2_12 = _mm_packs_epi32(tmp7, zero);
+  }
+
+  // Stage3
+  {
+    const __m128i lo_2_14 = _mm_unpacklo_epi16(in1, in11);
+    const __m128i lo_10_6 = _mm_unpacklo_epi16(in9, in3);
+
+    tmp0 = _mm_madd_epi16(lo_2_14, stg3_0);
+    tmp2 = _mm_madd_epi16(lo_2_14, stg3_1);
+    tmp4 = _mm_madd_epi16(lo_10_6, stg3_2);
+    tmp6 = _mm_madd_epi16(lo_10_6, stg3_3);
+
+    tmp0 = _mm_add_epi32(tmp0, rounding);
+    tmp2 = _mm_add_epi32(tmp2, rounding);
+    tmp4 = _mm_add_epi32(tmp4, rounding);
+    tmp6 = _mm_add_epi32(tmp6, rounding);
+
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+    tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+    tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+
+    stp1_4 = _mm_packs_epi32(tmp0, zero);
+    stp1_7 = _mm_packs_epi32(tmp2, zero);
+    stp1_5 = _mm_packs_epi32(tmp4, zero);
+    stp1_6 = _mm_packs_epi32(tmp6, zero);
+
+    stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9);
+    stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);
+    stp1_10 = _mm_sub_epi16(stp2_11, stp2_10);
+    stp1_11 = _mm_add_epi16(stp2_11, stp2_10);
+
+    stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13);
+    stp1_13 = _mm_sub_epi16(stp2_12, stp2_13);
+    stp1_14 = _mm_sub_epi16(stp2_15, stp2_14);
+    stp1_15 = _mm_add_epi16(stp2_15, stp2_14);
+  }
+
+  // Stage4
+  {
+    const __m128i lo_0_8 = _mm_unpacklo_epi16(in0, in8);
+    const __m128i lo_4_12 = _mm_unpacklo_epi16(in2, in10);
+    const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
+
+    tmp0 = _mm_madd_epi16(lo_0_8, stg4_0);
+    tmp2 = _mm_madd_epi16(lo_0_8, stg4_1);
+    tmp4 = _mm_madd_epi16(lo_4_12, stg4_2);
+    tmp6 = _mm_madd_epi16(lo_4_12, stg4_3);
+    tmp1 = _mm_madd_epi16(lo_9_14, stg4_4);
+    tmp3 = _mm_madd_epi16(lo_9_14, stg4_5);
+    tmp5 = _mm_madd_epi16(lo_10_13, stg4_6);
+    tmp7 = _mm_madd_epi16(lo_10_13, stg4_7);
+
+    tmp0 = _mm_add_epi32(tmp0, rounding);
+    tmp2 = _mm_add_epi32(tmp2, rounding);
+    tmp4 = _mm_add_epi32(tmp4, rounding);
+    tmp6 = _mm_add_epi32(tmp6, rounding);
+    tmp1 = _mm_add_epi32(tmp1, rounding);
+    tmp3 = _mm_add_epi32(tmp3, rounding);
+    tmp5 = _mm_add_epi32(tmp5, rounding);
+    tmp7 = _mm_add_epi32(tmp7, rounding);
+
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+    tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+    tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
+    tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
+    tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
+
+    stp2_0 = _mm_packs_epi32(tmp0, zero);
+    stp2_1 = _mm_packs_epi32(tmp2, zero);
+    stp2_2 = _mm_packs_epi32(tmp4, zero);
+    stp2_3 = _mm_packs_epi32(tmp6, zero);
+    stp2_9 = _mm_packs_epi32(tmp1, zero);
+    stp2_14 = _mm_packs_epi32(tmp3, zero);
+    stp2_10 = _mm_packs_epi32(tmp5, zero);
+    stp2_13 = _mm_packs_epi32(tmp7, zero);
+
+    stp2_4 = _mm_add_epi16(stp1_4, stp1_5);
+    stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);
+    stp2_6 = _mm_sub_epi16(stp1_7, stp1_6);
+    stp2_7 = _mm_add_epi16(stp1_7, stp1_6);
+  }
+
+  // Stage5 and Stage6
+  {
+    stp1_0 = _mm_add_epi16(stp2_0, stp2_3);
+    stp1_1 = _mm_add_epi16(stp2_1, stp2_2);
+    stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);
+    stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);
+
+    stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11);
+    stp1_9 = _mm_add_epi16(stp2_9, stp2_10);
+    stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);
+    stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11);
+
+    stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0);
+    stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);
+    stp1_14 = _mm_add_epi16(stp2_14, stp2_13);
+    stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0);
+  }
+
+  // Stage6
+  {
+    const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
+    const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);
+
+    tmp1 = _mm_madd_epi16(lo_6_5, stg4_1);
+    tmp3 = _mm_madd_epi16(lo_6_5, stg4_0);
+    tmp0 = _mm_madd_epi16(lo_10_13, stg6_0);
+    tmp2 = _mm_madd_epi16(lo_10_13, stg4_0);
+    tmp4 = _mm_madd_epi16(lo_11_12, stg6_0);
+    tmp6 = _mm_madd_epi16(lo_11_12, stg4_0);
+
+    tmp1 = _mm_add_epi32(tmp1, rounding);
+    tmp3 = _mm_add_epi32(tmp3, rounding);
+    tmp0 = _mm_add_epi32(tmp0, rounding);
+    tmp2 = _mm_add_epi32(tmp2, rounding);
+    tmp4 = _mm_add_epi32(tmp4, rounding);
+    tmp6 = _mm_add_epi32(tmp6, rounding);
+
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+    tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+    tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+
+    stp1_5 = _mm_packs_epi32(tmp1, zero);
+    stp1_6 = _mm_packs_epi32(tmp3, zero);
+    stp2_10 = _mm_packs_epi32(tmp0, zero);
+    stp2_13 = _mm_packs_epi32(tmp2, zero);
+    stp2_11 = _mm_packs_epi32(tmp4, zero);
+    stp2_12 = _mm_packs_epi32(tmp6, zero);
+
+    stp2_0 = _mm_add_epi16(stp1_0, stp2_7);
+    stp2_1 = _mm_add_epi16(stp1_1, stp1_6);
+    stp2_2 = _mm_add_epi16(stp1_2, stp1_5);
+    stp2_3 = _mm_add_epi16(stp1_3, stp2_4);
+    stp2_4 = _mm_sub_epi16(stp1_3, stp2_4);
+    stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);
+    stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);
+    stp2_7 = _mm_sub_epi16(stp1_0, stp2_7);
+  }
+
+  // Stage7. Left 8x16 only.
+  l0 = _mm_add_epi16(stp2_0, stp1_15);
+  l1 = _mm_add_epi16(stp2_1, stp1_14);
+  l2 = _mm_add_epi16(stp2_2, stp2_13);
+  l3 = _mm_add_epi16(stp2_3, stp2_12);
+  l4 = _mm_add_epi16(stp2_4, stp2_11);
+  l5 = _mm_add_epi16(stp2_5, stp2_10);
+  l6 = _mm_add_epi16(stp2_6, stp1_9);
+  l7 = _mm_add_epi16(stp2_7, stp1_8);
+  l8 = _mm_sub_epi16(stp2_7, stp1_8);
+  l9 = _mm_sub_epi16(stp2_6, stp1_9);
+  l10 = _mm_sub_epi16(stp2_5, stp2_10);
+  l11 = _mm_sub_epi16(stp2_4, stp2_11);
+  l12 = _mm_sub_epi16(stp2_3, stp2_12);
+  l13 = _mm_sub_epi16(stp2_2, stp2_13);
+  l14 = _mm_sub_epi16(stp2_1, stp1_14);
+  l15 = _mm_sub_epi16(stp2_0, stp1_15);
+
+  // 2-D idct. We do 2 8x16 blocks.
+  for (i = 0; i < 2; i++) {
+    if (i == 0)
+      TRANSPOSE_4X8(l0, l1, l2, l3, l4, l5, l6, l7, in0, in1, in2, in3, in4,
+                    in5, in6, in7);
+
+    if (i == 1)
+      TRANSPOSE_4X8(l8, l9, l10, l11, l12, l13, l14, l15, in0, in1, in2, in3,
+                    in4, in5, in6, in7);
+
+    in8 = in9 = in10 = in11 = in12 = in13 = in14 = in15 = zero;
+
+    IDCT16x16_1D
+
+    // Stage7
+    in0 = _mm_add_epi16(stp2_0, stp1_15);
+    in1 = _mm_add_epi16(stp2_1, stp1_14);
+    in2 = _mm_add_epi16(stp2_2, stp2_13);
+    in3 = _mm_add_epi16(stp2_3, stp2_12);
+    in4 = _mm_add_epi16(stp2_4, stp2_11);
+    in5 = _mm_add_epi16(stp2_5, stp2_10);
+    in6 = _mm_add_epi16(stp2_6, stp1_9);
+    in7 = _mm_add_epi16(stp2_7, stp1_8);
+    in8 = _mm_sub_epi16(stp2_7, stp1_8);
+    in9 = _mm_sub_epi16(stp2_6, stp1_9);
+    in10 = _mm_sub_epi16(stp2_5, stp2_10);
+    in11 = _mm_sub_epi16(stp2_4, stp2_11);
+    in12 = _mm_sub_epi16(stp2_3, stp2_12);
+    in13 = _mm_sub_epi16(stp2_2, stp2_13);
+    in14 = _mm_sub_epi16(stp2_1, stp1_14);
+    in15 = _mm_sub_epi16(stp2_0, stp1_15);
+
+    // Final rounding and shift
+    in0 = _mm_adds_epi16(in0, final_rounding);
+    in1 = _mm_adds_epi16(in1, final_rounding);
+    in2 = _mm_adds_epi16(in2, final_rounding);
+    in3 = _mm_adds_epi16(in3, final_rounding);
+    in4 = _mm_adds_epi16(in4, final_rounding);
+    in5 = _mm_adds_epi16(in5, final_rounding);
+    in6 = _mm_adds_epi16(in6, final_rounding);
+    in7 = _mm_adds_epi16(in7, final_rounding);
+    in8 = _mm_adds_epi16(in8, final_rounding);
+    in9 = _mm_adds_epi16(in9, final_rounding);
+    in10 = _mm_adds_epi16(in10, final_rounding);
+    in11 = _mm_adds_epi16(in11, final_rounding);
+    in12 = _mm_adds_epi16(in12, final_rounding);
+    in13 = _mm_adds_epi16(in13, final_rounding);
+    in14 = _mm_adds_epi16(in14, final_rounding);
+    in15 = _mm_adds_epi16(in15, final_rounding);
+
+    in0 = _mm_srai_epi16(in0, 6);
+    in1 = _mm_srai_epi16(in1, 6);
+    in2 = _mm_srai_epi16(in2, 6);
+    in3 = _mm_srai_epi16(in3, 6);
+    in4 = _mm_srai_epi16(in4, 6);
+    in5 = _mm_srai_epi16(in5, 6);
+    in6 = _mm_srai_epi16(in6, 6);
+    in7 = _mm_srai_epi16(in7, 6);
+    in8 = _mm_srai_epi16(in8, 6);
+    in9 = _mm_srai_epi16(in9, 6);
+    in10 = _mm_srai_epi16(in10, 6);
+    in11 = _mm_srai_epi16(in11, 6);
+    in12 = _mm_srai_epi16(in12, 6);
+    in13 = _mm_srai_epi16(in13, 6);
+    in14 = _mm_srai_epi16(in14, 6);
+    in15 = _mm_srai_epi16(in15, 6);
+
+    // Store results
+    _mm_store_si128((__m128i *)output, in0);
+    _mm_store_si128((__m128i *)(output + half_pitch * 1), in1);
+    _mm_store_si128((__m128i *)(output + half_pitch * 2), in2);
+    _mm_store_si128((__m128i *)(output + half_pitch * 3), in3);
+    _mm_store_si128((__m128i *)(output + half_pitch * 4), in4);
+    _mm_store_si128((__m128i *)(output + half_pitch * 5), in5);
+    _mm_store_si128((__m128i *)(output + half_pitch * 6), in6);
+    _mm_store_si128((__m128i *)(output + half_pitch * 7), in7);
+    _mm_store_si128((__m128i *)(output + half_pitch * 8), in8);
+    _mm_store_si128((__m128i *)(output + half_pitch * 9), in9);
+    _mm_store_si128((__m128i *)(output + half_pitch * 10), in10);
+    _mm_store_si128((__m128i *)(output + half_pitch * 11), in11);
+    _mm_store_si128((__m128i *)(output + half_pitch * 12), in12);
+    _mm_store_si128((__m128i *)(output + half_pitch * 13), in13);
+    _mm_store_si128((__m128i *)(output + half_pitch * 14), in14);
+    _mm_store_si128((__m128i *)(output + half_pitch * 15), in15);
+    output += 8;
+  }
+}
 #endif
index d38f916..57eed17 100644 (file)
@@ -315,7 +315,7 @@ void vp9_dequant_idct_add_16x16_c(int16_t *input, const int16_t *dq,
     input[48] *= dq[1];
 
     // the idct halves ( >> 1) the pitch
-    vp9_short_idct10_16x16_c(input, output, 32);
+    vp9_short_idct10_16x16(input, output, 32);
 
     input[0] = input[1] = input[2] = input[3] = 0;
     input[16] = input[17] = input[18] = 0;