2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include <emmintrin.h> // SSE2
12 #include "vp9/common/vp9_idct.h" // for cospi constants
13 #include "vpx_ports/mem.h"
15 void vp9_fdct4x4_sse2(const int16_t *input, int16_t *output, int stride) {
16 // The 2D transform is done with two passes which are actually pretty
17 // similar. In the first one, we transform the columns and transpose
18 // the results. In the second one, we transform the rows. To achieve that,
19 // as the first pass results are transposed, we tranpose the columns (that
20 // is the transposed rows) and transpose the results (so that it goes back
21 // in normal/row positions).
24 // When we use them, in one case, they are all the same. In all others
25 // it's a pair of them that we need to repeat four times. This is done
26 // by constructing the 32 bit constant corresponding to that pair.
27 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
28 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
29 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
30 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
31 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
32 const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1);
33 const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
34 const __m128i kOne = _mm_set1_epi16(1);
38 in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
39 in0 = _mm_unpacklo_epi64(in0, _mm_loadl_epi64((const __m128i *)
40 (input + 1 * stride)));
41 in1 = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
42 in1 = _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)
43 (input + 3 * stride)), in1);
46 in0 = _mm_slli_epi16(in0, 4);
47 in1 = _mm_slli_epi16(in1, 4);
48 // if (i == 0 && input[0]) input[0] += 1;
50 // The mask will only contain wether the first value is zero, all
51 // other comparison will fail as something shifted by 4 (above << 4)
52 // can never be equal to one. To increment in the non-zero case, we
53 // add the mask and one for the first element:
54 // - if zero, mask = -1, v = v - 1 + 1 = v
55 // - if non-zero, mask = 0, v = v + 0 + 1 = v + 1
56 __m128i mask = _mm_cmpeq_epi16(in0, k__nonzero_bias_a);
57 in0 = _mm_add_epi16(in0, mask);
58 in0 = _mm_add_epi16(in0, k__nonzero_bias_b);
61 // Do the two transform/transpose passes
62 for (pass = 0; pass < 2; ++pass) {
63 // Transform 1/2: Add/substract
64 const __m128i r0 = _mm_add_epi16(in0, in1);
65 const __m128i r1 = _mm_sub_epi16(in0, in1);
66 const __m128i r2 = _mm_unpacklo_epi64(r0, r1);
67 const __m128i r3 = _mm_unpackhi_epi64(r0, r1);
68 // Transform 1/2: Interleave to do the multiply by constants which gets us
70 const __m128i t0 = _mm_unpacklo_epi16(r2, r3);
71 const __m128i t2 = _mm_unpackhi_epi16(r2, r3);
72 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
73 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
74 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p08_p24);
75 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_p24_m08);
76 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
77 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
78 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
79 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
80 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
81 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
82 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
83 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
84 // Combine and transpose
85 const __m128i res0 = _mm_packs_epi32(w0, w2);
86 const __m128i res1 = _mm_packs_epi32(w4, w6);
87 // 00 01 02 03 20 21 22 23
88 // 10 11 12 13 30 31 32 33
89 const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1);
90 const __m128i tr0_1 = _mm_unpackhi_epi16(res0, res1);
91 // 00 10 01 11 02 12 03 13
92 // 20 30 21 31 22 32 23 33
93 in0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
94 in1 = _mm_unpackhi_epi32(tr0_0, tr0_1);
95 in1 = _mm_shuffle_epi32(in1, 0x4E);
96 // 00 10 20 30 01 11 21 31 in0 contains 0 followed by 1
97 // 02 12 22 32 03 13 23 33 in1 contains 2 followed by 3
99 in1 = _mm_shuffle_epi32(in1, 0x4E);
100 // Post-condition output and store it (v + 1) >> 2, taking advantage
101 // of the fact 1/3 are stored just after 0/2.
103 __m128i out01 = _mm_add_epi16(in0, kOne);
104 __m128i out23 = _mm_add_epi16(in1, kOne);
105 out01 = _mm_srai_epi16(out01, 2);
106 out23 = _mm_srai_epi16(out23, 2);
107 _mm_storeu_si128((__m128i *)(output + 0 * 4), out01);
108 _mm_storeu_si128((__m128i *)(output + 2 * 4), out23);
112 static INLINE void load_buffer_4x4(const int16_t *input, __m128i *in,
114 const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1);
115 const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
118 in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
119 in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
120 in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
121 in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
123 in[0] = _mm_slli_epi16(in[0], 4);
124 in[1] = _mm_slli_epi16(in[1], 4);
125 in[2] = _mm_slli_epi16(in[2], 4);
126 in[3] = _mm_slli_epi16(in[3], 4);
128 mask = _mm_cmpeq_epi16(in[0], k__nonzero_bias_a);
129 in[0] = _mm_add_epi16(in[0], mask);
130 in[0] = _mm_add_epi16(in[0], k__nonzero_bias_b);
133 static INLINE void write_buffer_4x4(int16_t *output, __m128i *res) {
134 const __m128i kOne = _mm_set1_epi16(1);
135 __m128i in01 = _mm_unpacklo_epi64(res[0], res[1]);
136 __m128i in23 = _mm_unpacklo_epi64(res[2], res[3]);
137 __m128i out01 = _mm_add_epi16(in01, kOne);
138 __m128i out23 = _mm_add_epi16(in23, kOne);
139 out01 = _mm_srai_epi16(out01, 2);
140 out23 = _mm_srai_epi16(out23, 2);
141 _mm_store_si128((__m128i *)(output + 0 * 8), out01);
142 _mm_store_si128((__m128i *)(output + 1 * 8), out23);
145 static INLINE void transpose_4x4(__m128i *res) {
146 // Combine and transpose
147 // 00 01 02 03 20 21 22 23
148 // 10 11 12 13 30 31 32 33
149 const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]);
150 const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]);
152 // 00 10 01 11 02 12 03 13
153 // 20 30 21 31 22 32 23 33
154 res[0] = _mm_unpacklo_epi32(tr0_0, tr0_1);
155 res[2] = _mm_unpackhi_epi32(tr0_0, tr0_1);
157 // 00 10 20 30 01 11 21 31
158 // 02 12 22 32 03 13 23 33
159 // only use the first 4 16-bit integers
160 res[1] = _mm_unpackhi_epi64(res[0], res[0]);
161 res[3] = _mm_unpackhi_epi64(res[2], res[2]);
164 void fdct4_sse2(__m128i *in) {
165 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
166 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
167 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
168 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
169 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
172 u[0]=_mm_unpacklo_epi16(in[0], in[1]);
173 u[1]=_mm_unpacklo_epi16(in[3], in[2]);
175 v[0] = _mm_add_epi16(u[0], u[1]);
176 v[1] = _mm_sub_epi16(u[0], u[1]);
178 u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16); // 0
179 u[1] = _mm_madd_epi16(v[0], k__cospi_p16_m16); // 2
180 u[2] = _mm_madd_epi16(v[1], k__cospi_p08_p24); // 1
181 u[3] = _mm_madd_epi16(v[1], k__cospi_p24_m08); // 3
183 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
184 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
185 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
186 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
187 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
188 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
189 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
190 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
192 in[0] = _mm_packs_epi32(u[0], u[1]);
193 in[1] = _mm_packs_epi32(u[2], u[3]);
197 void fadst4_sse2(__m128i *in) {
198 const __m128i k__sinpi_p01_p02 = pair_set_epi16(sinpi_1_9, sinpi_2_9);
199 const __m128i k__sinpi_p04_m01 = pair_set_epi16(sinpi_4_9, -sinpi_1_9);
200 const __m128i k__sinpi_p03_p04 = pair_set_epi16(sinpi_3_9, sinpi_4_9);
201 const __m128i k__sinpi_m03_p02 = pair_set_epi16(-sinpi_3_9, sinpi_2_9);
202 const __m128i k__sinpi_p03_p03 = _mm_set1_epi16(sinpi_3_9);
203 const __m128i kZero = _mm_set1_epi16(0);
204 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
206 __m128i in7 = _mm_add_epi16(in[0], in[1]);
208 u[0] = _mm_unpacklo_epi16(in[0], in[1]);
209 u[1] = _mm_unpacklo_epi16(in[2], in[3]);
210 u[2] = _mm_unpacklo_epi16(in7, kZero);
211 u[3] = _mm_unpacklo_epi16(in[2], kZero);
212 u[4] = _mm_unpacklo_epi16(in[3], kZero);
214 v[0] = _mm_madd_epi16(u[0], k__sinpi_p01_p02); // s0 + s2
215 v[1] = _mm_madd_epi16(u[1], k__sinpi_p03_p04); // s4 + s5
216 v[2] = _mm_madd_epi16(u[2], k__sinpi_p03_p03); // x1
217 v[3] = _mm_madd_epi16(u[0], k__sinpi_p04_m01); // s1 - s3
218 v[4] = _mm_madd_epi16(u[1], k__sinpi_m03_p02); // -s4 + s6
219 v[5] = _mm_madd_epi16(u[3], k__sinpi_p03_p03); // s4
220 v[6] = _mm_madd_epi16(u[4], k__sinpi_p03_p03);
222 u[0] = _mm_add_epi32(v[0], v[1]);
223 u[1] = _mm_sub_epi32(v[2], v[6]);
224 u[2] = _mm_add_epi32(v[3], v[4]);
225 u[3] = _mm_sub_epi32(u[2], u[0]);
226 u[4] = _mm_slli_epi32(v[5], 2);
227 u[5] = _mm_sub_epi32(u[4], v[5]);
228 u[6] = _mm_add_epi32(u[3], u[5]);
230 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
231 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
232 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
233 v[3] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
235 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
236 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
237 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
238 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
240 in[0] = _mm_packs_epi32(u[0], u[2]);
241 in[1] = _mm_packs_epi32(u[1], u[3]);
245 void vp9_fht4x4_sse2(const int16_t *input, int16_t *output,
246 int stride, int tx_type) {
251 vp9_fdct4x4_sse2(input, output, stride);
254 load_buffer_4x4(input, in, stride);
257 write_buffer_4x4(output, in);
260 load_buffer_4x4(input, in, stride);
263 write_buffer_4x4(output, in);
266 load_buffer_4x4(input, in, stride);
269 write_buffer_4x4(output, in);
277 void vp9_fdct8x8_sse2(const int16_t *input, int16_t *output, int stride) {
280 // When we use them, in one case, they are all the same. In all others
281 // it's a pair of them that we need to repeat four times. This is done
282 // by constructing the 32 bit constant corresponding to that pair.
283 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
284 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
285 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
286 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
287 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
288 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
289 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
290 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
291 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
293 __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
294 __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
295 __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
296 __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
297 __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
298 __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
299 __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
300 __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
301 // Pre-condition input (shift by two)
302 in0 = _mm_slli_epi16(in0, 2);
303 in1 = _mm_slli_epi16(in1, 2);
304 in2 = _mm_slli_epi16(in2, 2);
305 in3 = _mm_slli_epi16(in3, 2);
306 in4 = _mm_slli_epi16(in4, 2);
307 in5 = _mm_slli_epi16(in5, 2);
308 in6 = _mm_slli_epi16(in6, 2);
309 in7 = _mm_slli_epi16(in7, 2);
311 // We do two passes, first the columns, then the rows. The results of the
312 // first pass are transposed so that the same column code can be reused. The
313 // results of the second pass are also transposed so that the rows (processed
314 // as columns) are put back in row positions.
315 for (pass = 0; pass < 2; pass++) {
316 // To store results of each pass before the transpose.
317 __m128i res0, res1, res2, res3, res4, res5, res6, res7;
319 const __m128i q0 = _mm_add_epi16(in0, in7);
320 const __m128i q1 = _mm_add_epi16(in1, in6);
321 const __m128i q2 = _mm_add_epi16(in2, in5);
322 const __m128i q3 = _mm_add_epi16(in3, in4);
323 const __m128i q4 = _mm_sub_epi16(in3, in4);
324 const __m128i q5 = _mm_sub_epi16(in2, in5);
325 const __m128i q6 = _mm_sub_epi16(in1, in6);
326 const __m128i q7 = _mm_sub_epi16(in0, in7);
327 // Work on first four results
330 const __m128i r0 = _mm_add_epi16(q0, q3);
331 const __m128i r1 = _mm_add_epi16(q1, q2);
332 const __m128i r2 = _mm_sub_epi16(q1, q2);
333 const __m128i r3 = _mm_sub_epi16(q0, q3);
334 // Interleave to do the multiply by constants which gets us into 32bits
335 const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
336 const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
337 const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
338 const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
339 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
340 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
341 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
342 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16);
343 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
344 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08);
345 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
346 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24);
347 // dct_const_round_shift
348 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
349 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
350 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
351 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
352 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
353 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
354 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
355 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
356 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
357 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
358 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
359 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
360 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
361 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
362 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
363 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
365 res0 = _mm_packs_epi32(w0, w1);
366 res4 = _mm_packs_epi32(w2, w3);
367 res2 = _mm_packs_epi32(w4, w5);
368 res6 = _mm_packs_epi32(w6, w7);
370 // Work on next four results
372 // Interleave to do the multiply by constants which gets us into 32bits
373 const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
374 const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
375 const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16);
376 const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16);
377 const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16);
378 const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16);
379 // dct_const_round_shift
380 const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING);
381 const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING);
382 const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING);
383 const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING);
384 const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS);
385 const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS);
386 const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS);
387 const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS);
389 const __m128i r0 = _mm_packs_epi32(s0, s1);
390 const __m128i r1 = _mm_packs_epi32(s2, s3);
392 const __m128i x0 = _mm_add_epi16(q4, r0);
393 const __m128i x1 = _mm_sub_epi16(q4, r0);
394 const __m128i x2 = _mm_sub_epi16(q7, r1);
395 const __m128i x3 = _mm_add_epi16(q7, r1);
396 // Interleave to do the multiply by constants which gets us into 32bits
397 const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
398 const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
399 const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
400 const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
401 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04);
402 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04);
403 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28);
404 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28);
405 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20);
406 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20);
407 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12);
408 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12);
409 // dct_const_round_shift
410 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
411 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
412 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
413 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
414 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
415 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
416 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
417 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
418 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
419 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
420 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
421 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
422 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
423 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
424 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
425 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
427 res1 = _mm_packs_epi32(w0, w1);
428 res7 = _mm_packs_epi32(w2, w3);
429 res5 = _mm_packs_epi32(w4, w5);
430 res3 = _mm_packs_epi32(w6, w7);
432 // Transpose the 8x8.
434 // 00 01 02 03 04 05 06 07
435 // 10 11 12 13 14 15 16 17
436 // 20 21 22 23 24 25 26 27
437 // 30 31 32 33 34 35 36 37
438 // 40 41 42 43 44 45 46 47
439 // 50 51 52 53 54 55 56 57
440 // 60 61 62 63 64 65 66 67
441 // 70 71 72 73 74 75 76 77
442 const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1);
443 const __m128i tr0_1 = _mm_unpacklo_epi16(res2, res3);
444 const __m128i tr0_2 = _mm_unpackhi_epi16(res0, res1);
445 const __m128i tr0_3 = _mm_unpackhi_epi16(res2, res3);
446 const __m128i tr0_4 = _mm_unpacklo_epi16(res4, res5);
447 const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7);
448 const __m128i tr0_6 = _mm_unpackhi_epi16(res4, res5);
449 const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7);
450 // 00 10 01 11 02 12 03 13
451 // 20 30 21 31 22 32 23 33
452 // 04 14 05 15 06 16 07 17
453 // 24 34 25 35 26 36 27 37
454 // 40 50 41 51 42 52 43 53
455 // 60 70 61 71 62 72 63 73
456 // 54 54 55 55 56 56 57 57
457 // 64 74 65 75 66 76 67 77
458 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
459 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
460 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
461 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
462 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
463 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
464 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
465 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
466 // 00 10 20 30 01 11 21 31
467 // 40 50 60 70 41 51 61 71
468 // 02 12 22 32 03 13 23 33
469 // 42 52 62 72 43 53 63 73
470 // 04 14 24 34 05 15 21 36
471 // 44 54 64 74 45 55 61 76
472 // 06 16 26 36 07 17 27 37
473 // 46 56 66 76 47 57 67 77
474 in0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
475 in1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
476 in2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
477 in3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
478 in4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
479 in5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
480 in6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
481 in7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
482 // 00 10 20 30 40 50 60 70
483 // 01 11 21 31 41 51 61 71
484 // 02 12 22 32 42 52 62 72
485 // 03 13 23 33 43 53 63 73
486 // 04 14 24 34 44 54 64 74
487 // 05 15 25 35 45 55 65 75
488 // 06 16 26 36 46 56 66 76
489 // 07 17 27 37 47 57 67 77
492 // Post-condition output and store it
494 // Post-condition (division by two)
495 // division of two 16 bits signed numbers using shifts
496 // n / 2 = (n - (n >> 15)) >> 1
497 const __m128i sign_in0 = _mm_srai_epi16(in0, 15);
498 const __m128i sign_in1 = _mm_srai_epi16(in1, 15);
499 const __m128i sign_in2 = _mm_srai_epi16(in2, 15);
500 const __m128i sign_in3 = _mm_srai_epi16(in3, 15);
501 const __m128i sign_in4 = _mm_srai_epi16(in4, 15);
502 const __m128i sign_in5 = _mm_srai_epi16(in5, 15);
503 const __m128i sign_in6 = _mm_srai_epi16(in6, 15);
504 const __m128i sign_in7 = _mm_srai_epi16(in7, 15);
505 in0 = _mm_sub_epi16(in0, sign_in0);
506 in1 = _mm_sub_epi16(in1, sign_in1);
507 in2 = _mm_sub_epi16(in2, sign_in2);
508 in3 = _mm_sub_epi16(in3, sign_in3);
509 in4 = _mm_sub_epi16(in4, sign_in4);
510 in5 = _mm_sub_epi16(in5, sign_in5);
511 in6 = _mm_sub_epi16(in6, sign_in6);
512 in7 = _mm_sub_epi16(in7, sign_in7);
513 in0 = _mm_srai_epi16(in0, 1);
514 in1 = _mm_srai_epi16(in1, 1);
515 in2 = _mm_srai_epi16(in2, 1);
516 in3 = _mm_srai_epi16(in3, 1);
517 in4 = _mm_srai_epi16(in4, 1);
518 in5 = _mm_srai_epi16(in5, 1);
519 in6 = _mm_srai_epi16(in6, 1);
520 in7 = _mm_srai_epi16(in7, 1);
522 _mm_store_si128((__m128i *)(output + 0 * 8), in0);
523 _mm_store_si128((__m128i *)(output + 1 * 8), in1);
524 _mm_store_si128((__m128i *)(output + 2 * 8), in2);
525 _mm_store_si128((__m128i *)(output + 3 * 8), in3);
526 _mm_store_si128((__m128i *)(output + 4 * 8), in4);
527 _mm_store_si128((__m128i *)(output + 5 * 8), in5);
528 _mm_store_si128((__m128i *)(output + 6 * 8), in6);
529 _mm_store_si128((__m128i *)(output + 7 * 8), in7);
534 static INLINE void load_buffer_8x8(const int16_t *input, __m128i *in,
536 in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
537 in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
538 in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
539 in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
540 in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride));
541 in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride));
542 in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride));
543 in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride));
545 in[0] = _mm_slli_epi16(in[0], 2);
546 in[1] = _mm_slli_epi16(in[1], 2);
547 in[2] = _mm_slli_epi16(in[2], 2);
548 in[3] = _mm_slli_epi16(in[3], 2);
549 in[4] = _mm_slli_epi16(in[4], 2);
550 in[5] = _mm_slli_epi16(in[5], 2);
551 in[6] = _mm_slli_epi16(in[6], 2);
552 in[7] = _mm_slli_epi16(in[7], 2);
555 // right shift and rounding
556 static INLINE void right_shift_8x8(__m128i *res, int const bit) {
557 const __m128i kOne = _mm_set1_epi16(1);
558 const int bit_m02 = bit - 2;
559 __m128i sign0 = _mm_srai_epi16(res[0], 15);
560 __m128i sign1 = _mm_srai_epi16(res[1], 15);
561 __m128i sign2 = _mm_srai_epi16(res[2], 15);
562 __m128i sign3 = _mm_srai_epi16(res[3], 15);
563 __m128i sign4 = _mm_srai_epi16(res[4], 15);
564 __m128i sign5 = _mm_srai_epi16(res[5], 15);
565 __m128i sign6 = _mm_srai_epi16(res[6], 15);
566 __m128i sign7 = _mm_srai_epi16(res[7], 15);
569 __m128i k_const_rounding = _mm_slli_epi16(kOne, bit_m02);
570 res[0] = _mm_add_epi16(res[0], k_const_rounding);
571 res[1] = _mm_add_epi16(res[1], k_const_rounding);
572 res[2] = _mm_add_epi16(res[2], k_const_rounding);
573 res[3] = _mm_add_epi16(res[3], k_const_rounding);
574 res[4] = _mm_add_epi16(res[4], k_const_rounding);
575 res[5] = _mm_add_epi16(res[5], k_const_rounding);
576 res[6] = _mm_add_epi16(res[6], k_const_rounding);
577 res[7] = _mm_add_epi16(res[7], k_const_rounding);
580 res[0] = _mm_sub_epi16(res[0], sign0);
581 res[1] = _mm_sub_epi16(res[1], sign1);
582 res[2] = _mm_sub_epi16(res[2], sign2);
583 res[3] = _mm_sub_epi16(res[3], sign3);
584 res[4] = _mm_sub_epi16(res[4], sign4);
585 res[5] = _mm_sub_epi16(res[5], sign5);
586 res[6] = _mm_sub_epi16(res[6], sign6);
587 res[7] = _mm_sub_epi16(res[7], sign7);
589 res[0] = _mm_srai_epi16(res[0], bit);
590 res[1] = _mm_srai_epi16(res[1], bit);
591 res[2] = _mm_srai_epi16(res[2], bit);
592 res[3] = _mm_srai_epi16(res[3], bit);
593 res[4] = _mm_srai_epi16(res[4], bit);
594 res[5] = _mm_srai_epi16(res[5], bit);
595 res[6] = _mm_srai_epi16(res[6], bit);
596 res[7] = _mm_srai_epi16(res[7], bit);
600 static INLINE void write_buffer_8x8(int16_t *output, __m128i *res, int stride) {
601 _mm_store_si128((__m128i *)(output + 0 * stride), res[0]);
602 _mm_store_si128((__m128i *)(output + 1 * stride), res[1]);
603 _mm_store_si128((__m128i *)(output + 2 * stride), res[2]);
604 _mm_store_si128((__m128i *)(output + 3 * stride), res[3]);
605 _mm_store_si128((__m128i *)(output + 4 * stride), res[4]);
606 _mm_store_si128((__m128i *)(output + 5 * stride), res[5]);
607 _mm_store_si128((__m128i *)(output + 6 * stride), res[6]);
608 _mm_store_si128((__m128i *)(output + 7 * stride), res[7]);
611 // perform in-place transpose
612 static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
613 const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
614 const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
615 const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]);
616 const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]);
617 const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
618 const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
619 const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]);
620 const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]);
621 // 00 10 01 11 02 12 03 13
622 // 20 30 21 31 22 32 23 33
623 // 04 14 05 15 06 16 07 17
624 // 24 34 25 35 26 36 27 37
625 // 40 50 41 51 42 52 43 53
626 // 60 70 61 71 62 72 63 73
627 // 44 54 45 55 46 56 47 57
628 // 64 74 65 75 66 76 67 77
629 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
630 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5);
631 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
632 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5);
633 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3);
634 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
635 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3);
636 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
637 // 00 10 20 30 01 11 21 31
638 // 40 50 60 70 41 51 61 71
639 // 02 12 22 32 03 13 23 33
640 // 42 52 62 72 43 53 63 73
641 // 04 14 24 34 05 15 25 35
642 // 44 54 64 74 45 55 65 75
643 // 06 16 26 36 07 17 27 37
644 // 46 56 66 76 47 57 67 77
645 res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
646 res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
647 res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
648 res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
649 res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5);
650 res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5);
651 res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7);
652 res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
653 // 00 10 20 30 40 50 60 70
654 // 01 11 21 31 41 51 61 71
655 // 02 12 22 32 42 52 62 72
656 // 03 13 23 33 43 53 63 73
657 // 04 14 24 34 44 54 64 74
658 // 05 15 25 35 45 55 65 75
659 // 06 16 26 36 46 56 66 76
660 // 07 17 27 37 47 57 67 77
663 void fdct8_sse2(__m128i *in) {
665 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
666 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
667 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
668 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
669 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
670 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
671 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
672 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
673 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
674 __m128i u0, u1, u2, u3, u4, u5, u6, u7;
675 __m128i v0, v1, v2, v3, v4, v5, v6, v7;
676 __m128i s0, s1, s2, s3, s4, s5, s6, s7;
679 s0 = _mm_add_epi16(in[0], in[7]);
680 s1 = _mm_add_epi16(in[1], in[6]);
681 s2 = _mm_add_epi16(in[2], in[5]);
682 s3 = _mm_add_epi16(in[3], in[4]);
683 s4 = _mm_sub_epi16(in[3], in[4]);
684 s5 = _mm_sub_epi16(in[2], in[5]);
685 s6 = _mm_sub_epi16(in[1], in[6]);
686 s7 = _mm_sub_epi16(in[0], in[7]);
688 u0 = _mm_add_epi16(s0, s3);
689 u1 = _mm_add_epi16(s1, s2);
690 u2 = _mm_sub_epi16(s1, s2);
691 u3 = _mm_sub_epi16(s0, s3);
692 // interleave and perform butterfly multiplication/addition
693 v0 = _mm_unpacklo_epi16(u0, u1);
694 v1 = _mm_unpackhi_epi16(u0, u1);
695 v2 = _mm_unpacklo_epi16(u2, u3);
696 v3 = _mm_unpackhi_epi16(u2, u3);
698 u0 = _mm_madd_epi16(v0, k__cospi_p16_p16);
699 u1 = _mm_madd_epi16(v1, k__cospi_p16_p16);
700 u2 = _mm_madd_epi16(v0, k__cospi_p16_m16);
701 u3 = _mm_madd_epi16(v1, k__cospi_p16_m16);
702 u4 = _mm_madd_epi16(v2, k__cospi_p24_p08);
703 u5 = _mm_madd_epi16(v3, k__cospi_p24_p08);
704 u6 = _mm_madd_epi16(v2, k__cospi_m08_p24);
705 u7 = _mm_madd_epi16(v3, k__cospi_m08_p24);
707 // shift and rounding
708 v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
709 v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
710 v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
711 v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
712 v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
713 v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
714 v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
715 v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
717 u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
718 u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
719 u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
720 u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
721 u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
722 u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
723 u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
724 u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
726 in[0] = _mm_packs_epi32(u0, u1);
727 in[2] = _mm_packs_epi32(u4, u5);
728 in[4] = _mm_packs_epi32(u2, u3);
729 in[6] = _mm_packs_epi32(u6, u7);
732 // interleave and perform butterfly multiplication/addition
733 u0 = _mm_unpacklo_epi16(s6, s5);
734 u1 = _mm_unpackhi_epi16(s6, s5);
735 v0 = _mm_madd_epi16(u0, k__cospi_p16_m16);
736 v1 = _mm_madd_epi16(u1, k__cospi_p16_m16);
737 v2 = _mm_madd_epi16(u0, k__cospi_p16_p16);
738 v3 = _mm_madd_epi16(u1, k__cospi_p16_p16);
740 // shift and rounding
741 u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
742 u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
743 u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
744 u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
746 v0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
747 v1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
748 v2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
749 v3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
751 u0 = _mm_packs_epi32(v0, v1);
752 u1 = _mm_packs_epi32(v2, v3);
755 s0 = _mm_add_epi16(s4, u0);
756 s1 = _mm_sub_epi16(s4, u0);
757 s2 = _mm_sub_epi16(s7, u1);
758 s3 = _mm_add_epi16(s7, u1);
761 u0 = _mm_unpacklo_epi16(s0, s3);
762 u1 = _mm_unpackhi_epi16(s0, s3);
763 u2 = _mm_unpacklo_epi16(s1, s2);
764 u3 = _mm_unpackhi_epi16(s1, s2);
766 v0 = _mm_madd_epi16(u0, k__cospi_p28_p04);
767 v1 = _mm_madd_epi16(u1, k__cospi_p28_p04);
768 v2 = _mm_madd_epi16(u2, k__cospi_p12_p20);
769 v3 = _mm_madd_epi16(u3, k__cospi_p12_p20);
770 v4 = _mm_madd_epi16(u2, k__cospi_m20_p12);
771 v5 = _mm_madd_epi16(u3, k__cospi_m20_p12);
772 v6 = _mm_madd_epi16(u0, k__cospi_m04_p28);
773 v7 = _mm_madd_epi16(u1, k__cospi_m04_p28);
775 // shift and rounding
776 u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
777 u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
778 u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
779 u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
780 u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING);
781 u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING);
782 u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING);
783 u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING);
785 v0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
786 v1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
787 v2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
788 v3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
789 v4 = _mm_srai_epi32(u4, DCT_CONST_BITS);
790 v5 = _mm_srai_epi32(u5, DCT_CONST_BITS);
791 v6 = _mm_srai_epi32(u6, DCT_CONST_BITS);
792 v7 = _mm_srai_epi32(u7, DCT_CONST_BITS);
794 in[1] = _mm_packs_epi32(v0, v1);
795 in[3] = _mm_packs_epi32(v4, v5);
796 in[5] = _mm_packs_epi32(v2, v3);
797 in[7] = _mm_packs_epi32(v6, v7);
800 array_transpose_8x8(in, in);
803 void fadst8_sse2(__m128i *in) {
805 const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
806 const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
807 const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
808 const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64);
809 const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64);
810 const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);
811 const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64);
812 const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64);
813 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
814 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
815 const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
816 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
817 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
818 const __m128i k__const_0 = _mm_set1_epi16(0);
819 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
821 __m128i u0, u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13, u14, u15;
822 __m128i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15;
823 __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15;
824 __m128i s0, s1, s2, s3, s4, s5, s6, s7;
825 __m128i in0, in1, in2, in3, in4, in5, in6, in7;
827 // properly aligned for butterfly input
837 // column transformation
839 // interleave and multiply/add into 32-bit integer
840 s0 = _mm_unpacklo_epi16(in0, in1);
841 s1 = _mm_unpackhi_epi16(in0, in1);
842 s2 = _mm_unpacklo_epi16(in2, in3);
843 s3 = _mm_unpackhi_epi16(in2, in3);
844 s4 = _mm_unpacklo_epi16(in4, in5);
845 s5 = _mm_unpackhi_epi16(in4, in5);
846 s6 = _mm_unpacklo_epi16(in6, in7);
847 s7 = _mm_unpackhi_epi16(in6, in7);
849 u0 = _mm_madd_epi16(s0, k__cospi_p02_p30);
850 u1 = _mm_madd_epi16(s1, k__cospi_p02_p30);
851 u2 = _mm_madd_epi16(s0, k__cospi_p30_m02);
852 u3 = _mm_madd_epi16(s1, k__cospi_p30_m02);
853 u4 = _mm_madd_epi16(s2, k__cospi_p10_p22);
854 u5 = _mm_madd_epi16(s3, k__cospi_p10_p22);
855 u6 = _mm_madd_epi16(s2, k__cospi_p22_m10);
856 u7 = _mm_madd_epi16(s3, k__cospi_p22_m10);
857 u8 = _mm_madd_epi16(s4, k__cospi_p18_p14);
858 u9 = _mm_madd_epi16(s5, k__cospi_p18_p14);
859 u10 = _mm_madd_epi16(s4, k__cospi_p14_m18);
860 u11 = _mm_madd_epi16(s5, k__cospi_p14_m18);
861 u12 = _mm_madd_epi16(s6, k__cospi_p26_p06);
862 u13 = _mm_madd_epi16(s7, k__cospi_p26_p06);
863 u14 = _mm_madd_epi16(s6, k__cospi_p06_m26);
864 u15 = _mm_madd_epi16(s7, k__cospi_p06_m26);
867 w0 = _mm_add_epi32(u0, u8);
868 w1 = _mm_add_epi32(u1, u9);
869 w2 = _mm_add_epi32(u2, u10);
870 w3 = _mm_add_epi32(u3, u11);
871 w4 = _mm_add_epi32(u4, u12);
872 w5 = _mm_add_epi32(u5, u13);
873 w6 = _mm_add_epi32(u6, u14);
874 w7 = _mm_add_epi32(u7, u15);
875 w8 = _mm_sub_epi32(u0, u8);
876 w9 = _mm_sub_epi32(u1, u9);
877 w10 = _mm_sub_epi32(u2, u10);
878 w11 = _mm_sub_epi32(u3, u11);
879 w12 = _mm_sub_epi32(u4, u12);
880 w13 = _mm_sub_epi32(u5, u13);
881 w14 = _mm_sub_epi32(u6, u14);
882 w15 = _mm_sub_epi32(u7, u15);
884 // shift and rounding
885 v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING);
886 v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING);
887 v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING);
888 v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING);
889 v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING);
890 v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING);
891 v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING);
892 v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING);
893 v8 = _mm_add_epi32(w8, k__DCT_CONST_ROUNDING);
894 v9 = _mm_add_epi32(w9, k__DCT_CONST_ROUNDING);
895 v10 = _mm_add_epi32(w10, k__DCT_CONST_ROUNDING);
896 v11 = _mm_add_epi32(w11, k__DCT_CONST_ROUNDING);
897 v12 = _mm_add_epi32(w12, k__DCT_CONST_ROUNDING);
898 v13 = _mm_add_epi32(w13, k__DCT_CONST_ROUNDING);
899 v14 = _mm_add_epi32(w14, k__DCT_CONST_ROUNDING);
900 v15 = _mm_add_epi32(w15, k__DCT_CONST_ROUNDING);
902 u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
903 u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
904 u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
905 u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
906 u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
907 u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
908 u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
909 u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
910 u8 = _mm_srai_epi32(v8, DCT_CONST_BITS);
911 u9 = _mm_srai_epi32(v9, DCT_CONST_BITS);
912 u10 = _mm_srai_epi32(v10, DCT_CONST_BITS);
913 u11 = _mm_srai_epi32(v11, DCT_CONST_BITS);
914 u12 = _mm_srai_epi32(v12, DCT_CONST_BITS);
915 u13 = _mm_srai_epi32(v13, DCT_CONST_BITS);
916 u14 = _mm_srai_epi32(v14, DCT_CONST_BITS);
917 u15 = _mm_srai_epi32(v15, DCT_CONST_BITS);
919 // back to 16-bit and pack 8 integers into __m128i
920 in[0] = _mm_packs_epi32(u0, u1);
921 in[1] = _mm_packs_epi32(u2, u3);
922 in[2] = _mm_packs_epi32(u4, u5);
923 in[3] = _mm_packs_epi32(u6, u7);
924 in[4] = _mm_packs_epi32(u8, u9);
925 in[5] = _mm_packs_epi32(u10, u11);
926 in[6] = _mm_packs_epi32(u12, u13);
927 in[7] = _mm_packs_epi32(u14, u15);
930 s0 = _mm_add_epi16(in[0], in[2]);
931 s1 = _mm_add_epi16(in[1], in[3]);
932 s2 = _mm_sub_epi16(in[0], in[2]);
933 s3 = _mm_sub_epi16(in[1], in[3]);
934 u0 = _mm_unpacklo_epi16(in[4], in[5]);
935 u1 = _mm_unpackhi_epi16(in[4], in[5]);
936 u2 = _mm_unpacklo_epi16(in[6], in[7]);
937 u3 = _mm_unpackhi_epi16(in[6], in[7]);
939 v0 = _mm_madd_epi16(u0, k__cospi_p08_p24);
940 v1 = _mm_madd_epi16(u1, k__cospi_p08_p24);
941 v2 = _mm_madd_epi16(u0, k__cospi_p24_m08);
942 v3 = _mm_madd_epi16(u1, k__cospi_p24_m08);
943 v4 = _mm_madd_epi16(u2, k__cospi_m24_p08);
944 v5 = _mm_madd_epi16(u3, k__cospi_m24_p08);
945 v6 = _mm_madd_epi16(u2, k__cospi_p08_p24);
946 v7 = _mm_madd_epi16(u3, k__cospi_p08_p24);
948 w0 = _mm_add_epi32(v0, v4);
949 w1 = _mm_add_epi32(v1, v5);
950 w2 = _mm_add_epi32(v2, v6);
951 w3 = _mm_add_epi32(v3, v7);
952 w4 = _mm_sub_epi32(v0, v4);
953 w5 = _mm_sub_epi32(v1, v5);
954 w6 = _mm_sub_epi32(v2, v6);
955 w7 = _mm_sub_epi32(v3, v7);
957 v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING);
958 v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING);
959 v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING);
960 v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING);
961 v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING);
962 v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING);
963 v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING);
964 v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING);
966 u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
967 u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
968 u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
969 u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
970 u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
971 u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
972 u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
973 u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
975 // back to 16-bit intergers
976 s4 = _mm_packs_epi32(u0, u1);
977 s5 = _mm_packs_epi32(u2, u3);
978 s6 = _mm_packs_epi32(u4, u5);
979 s7 = _mm_packs_epi32(u6, u7);
982 u0 = _mm_unpacklo_epi16(s2, s3);
983 u1 = _mm_unpackhi_epi16(s2, s3);
984 u2 = _mm_unpacklo_epi16(s6, s7);
985 u3 = _mm_unpackhi_epi16(s6, s7);
987 v0 = _mm_madd_epi16(u0, k__cospi_p16_p16);
988 v1 = _mm_madd_epi16(u1, k__cospi_p16_p16);
989 v2 = _mm_madd_epi16(u0, k__cospi_p16_m16);
990 v3 = _mm_madd_epi16(u1, k__cospi_p16_m16);
991 v4 = _mm_madd_epi16(u2, k__cospi_p16_p16);
992 v5 = _mm_madd_epi16(u3, k__cospi_p16_p16);
993 v6 = _mm_madd_epi16(u2, k__cospi_p16_m16);
994 v7 = _mm_madd_epi16(u3, k__cospi_p16_m16);
996 u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
997 u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
998 u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
999 u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
1000 u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING);
1001 u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING);
1002 u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING);
1003 u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING);
1005 v0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
1006 v1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
1007 v2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
1008 v3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
1009 v4 = _mm_srai_epi32(u4, DCT_CONST_BITS);
1010 v5 = _mm_srai_epi32(u5, DCT_CONST_BITS);
1011 v6 = _mm_srai_epi32(u6, DCT_CONST_BITS);
1012 v7 = _mm_srai_epi32(u7, DCT_CONST_BITS);
1014 s2 = _mm_packs_epi32(v0, v1);
1015 s3 = _mm_packs_epi32(v2, v3);
1016 s6 = _mm_packs_epi32(v4, v5);
1017 s7 = _mm_packs_epi32(v6, v7);
1019 // FIXME(jingning): do subtract using bit inversion?
1021 in[1] = _mm_sub_epi16(k__const_0, s4);
1023 in[3] = _mm_sub_epi16(k__const_0, s2);
1025 in[5] = _mm_sub_epi16(k__const_0, s7);
1027 in[7] = _mm_sub_epi16(k__const_0, s1);
1030 array_transpose_8x8(in, in);
1033 void vp9_fht8x8_sse2(const int16_t *input, int16_t *output,
1034 int stride, int tx_type) {
1039 vp9_fdct8x8_sse2(input, output, stride);
1042 load_buffer_8x8(input, in, stride);
1045 right_shift_8x8(in, 1);
1046 write_buffer_8x8(output, in, 8);
1049 load_buffer_8x8(input, in, stride);
1052 right_shift_8x8(in, 1);
1053 write_buffer_8x8(output, in, 8);
1056 load_buffer_8x8(input, in, stride);
1059 right_shift_8x8(in, 1);
1060 write_buffer_8x8(output, in, 8);
1068 void vp9_fdct16x16_sse2(const int16_t *input, int16_t *output, int stride) {
1069 // The 2D transform is done with two passes which are actually pretty
1070 // similar. In the first one, we transform the columns and transpose
1071 // the results. In the second one, we transform the rows. To achieve that,
1072 // as the first pass results are transposed, we tranpose the columns (that
1073 // is the transposed rows) and transpose the results (so that it goes back
1074 // in normal/row positions).
1076 // We need an intermediate buffer between passes.
1077 DECLARE_ALIGNED_ARRAY(16, int16_t, intermediate, 256);
1078 const int16_t *in = input;
1079 int16_t *out = intermediate;
1081 // When we use them, in one case, they are all the same. In all others
1082 // it's a pair of them that we need to repeat four times. This is done
1083 // by constructing the 32 bit constant corresponding to that pair.
1084 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
1085 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
1086 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
1087 const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
1088 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
1089 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
1090 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
1091 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
1092 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
1093 const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64);
1094 const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64);
1095 const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
1096 const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
1097 const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64);
1098 const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64);
1099 const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
1100 const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
1101 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
1102 const __m128i kOne = _mm_set1_epi16(1);
1103 // Do the two transform/transpose passes
1104 for (pass = 0; pass < 2; ++pass) {
1105 // We process eight columns (transposed rows in second pass) at a time.
1107 for (column_start = 0; column_start < 16; column_start += 8) {
1108 __m128i in00, in01, in02, in03, in04, in05, in06, in07;
1109 __m128i in08, in09, in10, in11, in12, in13, in14, in15;
1110 __m128i input0, input1, input2, input3, input4, input5, input6, input7;
1111 __m128i step1_0, step1_1, step1_2, step1_3;
1112 __m128i step1_4, step1_5, step1_6, step1_7;
1113 __m128i step2_1, step2_2, step2_3, step2_4, step2_5, step2_6;
1114 __m128i step3_0, step3_1, step3_2, step3_3;
1115 __m128i step3_4, step3_5, step3_6, step3_7;
1116 __m128i res00, res01, res02, res03, res04, res05, res06, res07;
1117 __m128i res08, res09, res10, res11, res12, res13, res14, res15;
1118 // Load and pre-condition input.
1120 in00 = _mm_load_si128((const __m128i *)(in + 0 * stride));
1121 in01 = _mm_load_si128((const __m128i *)(in + 1 * stride));
1122 in02 = _mm_load_si128((const __m128i *)(in + 2 * stride));
1123 in03 = _mm_load_si128((const __m128i *)(in + 3 * stride));
1124 in04 = _mm_load_si128((const __m128i *)(in + 4 * stride));
1125 in05 = _mm_load_si128((const __m128i *)(in + 5 * stride));
1126 in06 = _mm_load_si128((const __m128i *)(in + 6 * stride));
1127 in07 = _mm_load_si128((const __m128i *)(in + 7 * stride));
1128 in08 = _mm_load_si128((const __m128i *)(in + 8 * stride));
1129 in09 = _mm_load_si128((const __m128i *)(in + 9 * stride));
1130 in10 = _mm_load_si128((const __m128i *)(in + 10 * stride));
1131 in11 = _mm_load_si128((const __m128i *)(in + 11 * stride));
1132 in12 = _mm_load_si128((const __m128i *)(in + 12 * stride));
1133 in13 = _mm_load_si128((const __m128i *)(in + 13 * stride));
1134 in14 = _mm_load_si128((const __m128i *)(in + 14 * stride));
1135 in15 = _mm_load_si128((const __m128i *)(in + 15 * stride));
1137 in00 = _mm_slli_epi16(in00, 2);
1138 in01 = _mm_slli_epi16(in01, 2);
1139 in02 = _mm_slli_epi16(in02, 2);
1140 in03 = _mm_slli_epi16(in03, 2);
1141 in04 = _mm_slli_epi16(in04, 2);
1142 in05 = _mm_slli_epi16(in05, 2);
1143 in06 = _mm_slli_epi16(in06, 2);
1144 in07 = _mm_slli_epi16(in07, 2);
1145 in08 = _mm_slli_epi16(in08, 2);
1146 in09 = _mm_slli_epi16(in09, 2);
1147 in10 = _mm_slli_epi16(in10, 2);
1148 in11 = _mm_slli_epi16(in11, 2);
1149 in12 = _mm_slli_epi16(in12, 2);
1150 in13 = _mm_slli_epi16(in13, 2);
1151 in14 = _mm_slli_epi16(in14, 2);
1152 in15 = _mm_slli_epi16(in15, 2);
1154 in00 = _mm_load_si128((const __m128i *)(in + 0 * 16));
1155 in01 = _mm_load_si128((const __m128i *)(in + 1 * 16));
1156 in02 = _mm_load_si128((const __m128i *)(in + 2 * 16));
1157 in03 = _mm_load_si128((const __m128i *)(in + 3 * 16));
1158 in04 = _mm_load_si128((const __m128i *)(in + 4 * 16));
1159 in05 = _mm_load_si128((const __m128i *)(in + 5 * 16));
1160 in06 = _mm_load_si128((const __m128i *)(in + 6 * 16));
1161 in07 = _mm_load_si128((const __m128i *)(in + 7 * 16));
1162 in08 = _mm_load_si128((const __m128i *)(in + 8 * 16));
1163 in09 = _mm_load_si128((const __m128i *)(in + 9 * 16));
1164 in10 = _mm_load_si128((const __m128i *)(in + 10 * 16));
1165 in11 = _mm_load_si128((const __m128i *)(in + 11 * 16));
1166 in12 = _mm_load_si128((const __m128i *)(in + 12 * 16));
1167 in13 = _mm_load_si128((const __m128i *)(in + 13 * 16));
1168 in14 = _mm_load_si128((const __m128i *)(in + 14 * 16));
1169 in15 = _mm_load_si128((const __m128i *)(in + 15 * 16));
1171 in00 = _mm_add_epi16(in00, kOne);
1172 in01 = _mm_add_epi16(in01, kOne);
1173 in02 = _mm_add_epi16(in02, kOne);
1174 in03 = _mm_add_epi16(in03, kOne);
1175 in04 = _mm_add_epi16(in04, kOne);
1176 in05 = _mm_add_epi16(in05, kOne);
1177 in06 = _mm_add_epi16(in06, kOne);
1178 in07 = _mm_add_epi16(in07, kOne);
1179 in08 = _mm_add_epi16(in08, kOne);
1180 in09 = _mm_add_epi16(in09, kOne);
1181 in10 = _mm_add_epi16(in10, kOne);
1182 in11 = _mm_add_epi16(in11, kOne);
1183 in12 = _mm_add_epi16(in12, kOne);
1184 in13 = _mm_add_epi16(in13, kOne);
1185 in14 = _mm_add_epi16(in14, kOne);
1186 in15 = _mm_add_epi16(in15, kOne);
1187 in00 = _mm_srai_epi16(in00, 2);
1188 in01 = _mm_srai_epi16(in01, 2);
1189 in02 = _mm_srai_epi16(in02, 2);
1190 in03 = _mm_srai_epi16(in03, 2);
1191 in04 = _mm_srai_epi16(in04, 2);
1192 in05 = _mm_srai_epi16(in05, 2);
1193 in06 = _mm_srai_epi16(in06, 2);
1194 in07 = _mm_srai_epi16(in07, 2);
1195 in08 = _mm_srai_epi16(in08, 2);
1196 in09 = _mm_srai_epi16(in09, 2);
1197 in10 = _mm_srai_epi16(in10, 2);
1198 in11 = _mm_srai_epi16(in11, 2);
1199 in12 = _mm_srai_epi16(in12, 2);
1200 in13 = _mm_srai_epi16(in13, 2);
1201 in14 = _mm_srai_epi16(in14, 2);
1202 in15 = _mm_srai_epi16(in15, 2);
1205 // Calculate input for the first 8 results.
1207 input0 = _mm_add_epi16(in00, in15);
1208 input1 = _mm_add_epi16(in01, in14);
1209 input2 = _mm_add_epi16(in02, in13);
1210 input3 = _mm_add_epi16(in03, in12);
1211 input4 = _mm_add_epi16(in04, in11);
1212 input5 = _mm_add_epi16(in05, in10);
1213 input6 = _mm_add_epi16(in06, in09);
1214 input7 = _mm_add_epi16(in07, in08);
1216 // Calculate input for the next 8 results.
1218 step1_0 = _mm_sub_epi16(in07, in08);
1219 step1_1 = _mm_sub_epi16(in06, in09);
1220 step1_2 = _mm_sub_epi16(in05, in10);
1221 step1_3 = _mm_sub_epi16(in04, in11);
1222 step1_4 = _mm_sub_epi16(in03, in12);
1223 step1_5 = _mm_sub_epi16(in02, in13);
1224 step1_6 = _mm_sub_epi16(in01, in14);
1225 step1_7 = _mm_sub_epi16(in00, in15);
1227 // Work on the first eight values; fdct8(input, even_results);
1230 const __m128i q0 = _mm_add_epi16(input0, input7);
1231 const __m128i q1 = _mm_add_epi16(input1, input6);
1232 const __m128i q2 = _mm_add_epi16(input2, input5);
1233 const __m128i q3 = _mm_add_epi16(input3, input4);
1234 const __m128i q4 = _mm_sub_epi16(input3, input4);
1235 const __m128i q5 = _mm_sub_epi16(input2, input5);
1236 const __m128i q6 = _mm_sub_epi16(input1, input6);
1237 const __m128i q7 = _mm_sub_epi16(input0, input7);
1238 // Work on first four results
1241 const __m128i r0 = _mm_add_epi16(q0, q3);
1242 const __m128i r1 = _mm_add_epi16(q1, q2);
1243 const __m128i r2 = _mm_sub_epi16(q1, q2);
1244 const __m128i r3 = _mm_sub_epi16(q0, q3);
1245 // Interleave to do the multiply by constants which gets us
1247 const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
1248 const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
1249 const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
1250 const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
1251 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
1252 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
1253 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
1254 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16);
1255 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
1256 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08);
1257 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
1258 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24);
1259 // dct_const_round_shift
1260 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1261 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1262 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1263 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1264 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
1265 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
1266 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
1267 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
1268 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1269 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1270 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1271 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1272 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
1273 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
1274 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
1275 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
1277 res00 = _mm_packs_epi32(w0, w1);
1278 res08 = _mm_packs_epi32(w2, w3);
1279 res04 = _mm_packs_epi32(w4, w5);
1280 res12 = _mm_packs_epi32(w6, w7);
1282 // Work on next four results
1284 // Interleave to do the multiply by constants which gets us
1286 const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
1287 const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
1288 const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16);
1289 const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16);
1290 const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16);
1291 const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16);
1292 // dct_const_round_shift
1293 const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING);
1294 const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING);
1295 const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING);
1296 const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING);
1297 const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS);
1298 const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS);
1299 const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS);
1300 const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS);
1302 const __m128i r0 = _mm_packs_epi32(s0, s1);
1303 const __m128i r1 = _mm_packs_epi32(s2, s3);
1305 const __m128i x0 = _mm_add_epi16(q4, r0);
1306 const __m128i x1 = _mm_sub_epi16(q4, r0);
1307 const __m128i x2 = _mm_sub_epi16(q7, r1);
1308 const __m128i x3 = _mm_add_epi16(q7, r1);
1309 // Interleave to do the multiply by constants which gets us
1311 const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
1312 const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
1313 const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
1314 const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
1315 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04);
1316 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04);
1317 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28);
1318 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28);
1319 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20);
1320 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20);
1321 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12);
1322 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12);
1323 // dct_const_round_shift
1324 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1325 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1326 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1327 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1328 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
1329 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
1330 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
1331 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
1332 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1333 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1334 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1335 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1336 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
1337 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
1338 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
1339 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
1341 res02 = _mm_packs_epi32(w0, w1);
1342 res14 = _mm_packs_epi32(w2, w3);
1343 res10 = _mm_packs_epi32(w4, w5);
1344 res06 = _mm_packs_epi32(w6, w7);
1347 // Work on the next eight values; step1 -> odd_results
1351 const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2);
1352 const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2);
1353 const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3);
1354 const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3);
1355 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_m16);
1356 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_m16);
1357 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p16_m16);
1358 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p16_m16);
1359 // dct_const_round_shift
1360 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1361 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1362 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1363 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1364 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1365 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1366 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1367 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1369 step2_2 = _mm_packs_epi32(w0, w1);
1370 step2_3 = _mm_packs_epi32(w2, w3);
1373 const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2);
1374 const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2);
1375 const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3);
1376 const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3);
1377 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
1378 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
1379 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p16_p16);
1380 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p16_p16);
1381 // dct_const_round_shift
1382 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1383 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1384 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1385 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1386 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1387 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1388 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1389 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1391 step2_5 = _mm_packs_epi32(w0, w1);
1392 step2_4 = _mm_packs_epi32(w2, w3);
1396 step3_0 = _mm_add_epi16(step1_0, step2_3);
1397 step3_1 = _mm_add_epi16(step1_1, step2_2);
1398 step3_2 = _mm_sub_epi16(step1_1, step2_2);
1399 step3_3 = _mm_sub_epi16(step1_0, step2_3);
1400 step3_4 = _mm_sub_epi16(step1_7, step2_4);
1401 step3_5 = _mm_sub_epi16(step1_6, step2_5);
1402 step3_6 = _mm_add_epi16(step1_6, step2_5);
1403 step3_7 = _mm_add_epi16(step1_7, step2_4);
1407 const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6);
1408 const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6);
1409 const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5);
1410 const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5);
1411 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m08_p24);
1412 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m08_p24);
1413 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m24_m08);
1414 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m24_m08);
1415 // dct_const_round_shift
1416 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1417 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1418 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1419 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1420 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1421 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1422 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1423 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1425 step2_1 = _mm_packs_epi32(w0, w1);
1426 step2_2 = _mm_packs_epi32(w2, w3);
1429 const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6);
1430 const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6);
1431 const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5);
1432 const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5);
1433 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p24_p08);
1434 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p24_p08);
1435 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m08_p24);
1436 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m08_p24);
1437 // dct_const_round_shift
1438 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1439 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1440 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1441 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1442 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1443 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1444 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1445 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1447 step2_6 = _mm_packs_epi32(w0, w1);
1448 step2_5 = _mm_packs_epi32(w2, w3);
1452 step1_0 = _mm_add_epi16(step3_0, step2_1);
1453 step1_1 = _mm_sub_epi16(step3_0, step2_1);
1454 step1_2 = _mm_sub_epi16(step3_3, step2_2);
1455 step1_3 = _mm_add_epi16(step3_3, step2_2);
1456 step1_4 = _mm_add_epi16(step3_4, step2_5);
1457 step1_5 = _mm_sub_epi16(step3_4, step2_5);
1458 step1_6 = _mm_sub_epi16(step3_7, step2_6);
1459 step1_7 = _mm_add_epi16(step3_7, step2_6);
1463 const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7);
1464 const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7);
1465 const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6);
1466 const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6);
1467 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p30_p02);
1468 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p30_p02);
1469 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p14_p18);
1470 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p14_p18);
1471 // dct_const_round_shift
1472 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1473 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1474 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1475 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1476 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1477 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1478 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1479 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1481 res01 = _mm_packs_epi32(w0, w1);
1482 res09 = _mm_packs_epi32(w2, w3);
1485 const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5);
1486 const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5);
1487 const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4);
1488 const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4);
1489 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p22_p10);
1490 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p22_p10);
1491 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p06_p26);
1492 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p06_p26);
1493 // dct_const_round_shift
1494 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1495 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1496 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1497 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1498 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1499 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1500 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1501 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1503 res05 = _mm_packs_epi32(w0, w1);
1504 res13 = _mm_packs_epi32(w2, w3);
1507 const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5);
1508 const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5);
1509 const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4);
1510 const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4);
1511 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m10_p22);
1512 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m10_p22);
1513 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m26_p06);
1514 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m26_p06);
1515 // dct_const_round_shift
1516 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1517 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1518 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1519 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1520 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1521 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1522 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1523 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1525 res11 = _mm_packs_epi32(w0, w1);
1526 res03 = _mm_packs_epi32(w2, w3);
1529 const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7);
1530 const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7);
1531 const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6);
1532 const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6);
1533 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m02_p30);
1534 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m02_p30);
1535 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m18_p14);
1536 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m18_p14);
1537 // dct_const_round_shift
1538 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1539 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1540 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1541 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1542 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1543 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1544 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1545 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1547 res15 = _mm_packs_epi32(w0, w1);
1548 res07 = _mm_packs_epi32(w2, w3);
1551 // Transpose the results, do it as two 8x8 transposes.
1553 // 00 01 02 03 04 05 06 07
1554 // 10 11 12 13 14 15 16 17
1555 // 20 21 22 23 24 25 26 27
1556 // 30 31 32 33 34 35 36 37
1557 // 40 41 42 43 44 45 46 47
1558 // 50 51 52 53 54 55 56 57
1559 // 60 61 62 63 64 65 66 67
1560 // 70 71 72 73 74 75 76 77
1561 const __m128i tr0_0 = _mm_unpacklo_epi16(res00, res01);
1562 const __m128i tr0_1 = _mm_unpacklo_epi16(res02, res03);
1563 const __m128i tr0_2 = _mm_unpackhi_epi16(res00, res01);
1564 const __m128i tr0_3 = _mm_unpackhi_epi16(res02, res03);
1565 const __m128i tr0_4 = _mm_unpacklo_epi16(res04, res05);
1566 const __m128i tr0_5 = _mm_unpacklo_epi16(res06, res07);
1567 const __m128i tr0_6 = _mm_unpackhi_epi16(res04, res05);
1568 const __m128i tr0_7 = _mm_unpackhi_epi16(res06, res07);
1569 // 00 10 01 11 02 12 03 13
1570 // 20 30 21 31 22 32 23 33
1571 // 04 14 05 15 06 16 07 17
1572 // 24 34 25 35 26 36 27 37
1573 // 40 50 41 51 42 52 43 53
1574 // 60 70 61 71 62 72 63 73
1575 // 54 54 55 55 56 56 57 57
1576 // 64 74 65 75 66 76 67 77
1577 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
1578 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
1579 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
1580 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
1581 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
1582 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
1583 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
1584 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
1585 // 00 10 20 30 01 11 21 31
1586 // 40 50 60 70 41 51 61 71
1587 // 02 12 22 32 03 13 23 33
1588 // 42 52 62 72 43 53 63 73
1589 // 04 14 24 34 05 15 21 36
1590 // 44 54 64 74 45 55 61 76
1591 // 06 16 26 36 07 17 27 37
1592 // 46 56 66 76 47 57 67 77
1593 const __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
1594 const __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
1595 const __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
1596 const __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
1597 const __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
1598 const __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
1599 const __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
1600 const __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
1601 // 00 10 20 30 40 50 60 70
1602 // 01 11 21 31 41 51 61 71
1603 // 02 12 22 32 42 52 62 72
1604 // 03 13 23 33 43 53 63 73
1605 // 04 14 24 34 44 54 64 74
1606 // 05 15 25 35 45 55 65 75
1607 // 06 16 26 36 46 56 66 76
1608 // 07 17 27 37 47 57 67 77
1609 _mm_storeu_si128((__m128i *)(out + 0 * 16), tr2_0);
1610 _mm_storeu_si128((__m128i *)(out + 1 * 16), tr2_1);
1611 _mm_storeu_si128((__m128i *)(out + 2 * 16), tr2_2);
1612 _mm_storeu_si128((__m128i *)(out + 3 * 16), tr2_3);
1613 _mm_storeu_si128((__m128i *)(out + 4 * 16), tr2_4);
1614 _mm_storeu_si128((__m128i *)(out + 5 * 16), tr2_5);
1615 _mm_storeu_si128((__m128i *)(out + 6 * 16), tr2_6);
1616 _mm_storeu_si128((__m128i *)(out + 7 * 16), tr2_7);
1619 // 00 01 02 03 04 05 06 07
1620 // 10 11 12 13 14 15 16 17
1621 // 20 21 22 23 24 25 26 27
1622 // 30 31 32 33 34 35 36 37
1623 // 40 41 42 43 44 45 46 47
1624 // 50 51 52 53 54 55 56 57
1625 // 60 61 62 63 64 65 66 67
1626 // 70 71 72 73 74 75 76 77
1627 const __m128i tr0_0 = _mm_unpacklo_epi16(res08, res09);
1628 const __m128i tr0_1 = _mm_unpacklo_epi16(res10, res11);
1629 const __m128i tr0_2 = _mm_unpackhi_epi16(res08, res09);
1630 const __m128i tr0_3 = _mm_unpackhi_epi16(res10, res11);
1631 const __m128i tr0_4 = _mm_unpacklo_epi16(res12, res13);
1632 const __m128i tr0_5 = _mm_unpacklo_epi16(res14, res15);
1633 const __m128i tr0_6 = _mm_unpackhi_epi16(res12, res13);
1634 const __m128i tr0_7 = _mm_unpackhi_epi16(res14, res15);
1635 // 00 10 01 11 02 12 03 13
1636 // 20 30 21 31 22 32 23 33
1637 // 04 14 05 15 06 16 07 17
1638 // 24 34 25 35 26 36 27 37
1639 // 40 50 41 51 42 52 43 53
1640 // 60 70 61 71 62 72 63 73
1641 // 54 54 55 55 56 56 57 57
1642 // 64 74 65 75 66 76 67 77
1643 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
1644 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
1645 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
1646 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
1647 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
1648 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
1649 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
1650 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
1651 // 00 10 20 30 01 11 21 31
1652 // 40 50 60 70 41 51 61 71
1653 // 02 12 22 32 03 13 23 33
1654 // 42 52 62 72 43 53 63 73
1655 // 04 14 24 34 05 15 21 36
1656 // 44 54 64 74 45 55 61 76
1657 // 06 16 26 36 07 17 27 37
1658 // 46 56 66 76 47 57 67 77
1659 const __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
1660 const __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
1661 const __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
1662 const __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
1663 const __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
1664 const __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
1665 const __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
1666 const __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
1667 // 00 10 20 30 40 50 60 70
1668 // 01 11 21 31 41 51 61 71
1669 // 02 12 22 32 42 52 62 72
1670 // 03 13 23 33 43 53 63 73
1671 // 04 14 24 34 44 54 64 74
1672 // 05 15 25 35 45 55 65 75
1673 // 06 16 26 36 46 56 66 76
1674 // 07 17 27 37 47 57 67 77
1676 _mm_store_si128((__m128i *)(out + 8 + 0 * 16), tr2_0);
1677 _mm_store_si128((__m128i *)(out + 8 + 1 * 16), tr2_1);
1678 _mm_store_si128((__m128i *)(out + 8 + 2 * 16), tr2_2);
1679 _mm_store_si128((__m128i *)(out + 8 + 3 * 16), tr2_3);
1680 _mm_store_si128((__m128i *)(out + 8 + 4 * 16), tr2_4);
1681 _mm_store_si128((__m128i *)(out + 8 + 5 * 16), tr2_5);
1682 _mm_store_si128((__m128i *)(out + 8 + 6 * 16), tr2_6);
1683 _mm_store_si128((__m128i *)(out + 8 + 7 * 16), tr2_7);
1687 // Setup in/out for next pass.
1693 static INLINE void load_buffer_16x16(const int16_t* input, __m128i *in0,
1694 __m128i *in1, int stride) {
1695 // load first 8 columns
1696 load_buffer_8x8(input, in0, stride);
1697 load_buffer_8x8(input + 8 * stride, in0 + 8, stride);
1700 // load second 8 columns
1701 load_buffer_8x8(input, in1, stride);
1702 load_buffer_8x8(input + 8 * stride, in1 + 8, stride);
1705 static INLINE void write_buffer_16x16(int16_t *output, __m128i *in0,
1706 __m128i *in1, int stride) {
1707 // write first 8 columns
1708 write_buffer_8x8(output, in0, stride);
1709 write_buffer_8x8(output + 8 * stride, in0 + 8, stride);
1710 // write second 8 columns
1712 write_buffer_8x8(output, in1, stride);
1713 write_buffer_8x8(output + 8 * stride, in1 + 8, stride);
1716 static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) {
1718 array_transpose_8x8(res0, res0);
1719 array_transpose_8x8(res1, tbuf);
1720 array_transpose_8x8(res0 + 8, res1);
1721 array_transpose_8x8(res1 + 8, res1 + 8);
1733 static INLINE void right_shift_16x16(__m128i *res0, __m128i *res1) {
1734 // perform rounding operations
1735 right_shift_8x8(res0, 2);
1736 right_shift_8x8(res0 + 8, 2);
1737 right_shift_8x8(res1, 2);
1738 right_shift_8x8(res1 + 8, 2);
1741 void fdct16_8col(__m128i *in) {
1742 // perform 16x16 1-D DCT for 8 columns
1743 __m128i i[8], s[8], p[8], t[8], u[16], v[16];
1744 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
1745 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
1746 const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
1747 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
1748 const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
1749 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
1750 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
1751 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
1752 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
1753 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
1754 const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64);
1755 const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64);
1756 const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
1757 const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
1758 const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64);
1759 const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64);
1760 const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
1761 const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
1762 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
1765 i[0] = _mm_add_epi16(in[0], in[15]);
1766 i[1] = _mm_add_epi16(in[1], in[14]);
1767 i[2] = _mm_add_epi16(in[2], in[13]);
1768 i[3] = _mm_add_epi16(in[3], in[12]);
1769 i[4] = _mm_add_epi16(in[4], in[11]);
1770 i[5] = _mm_add_epi16(in[5], in[10]);
1771 i[6] = _mm_add_epi16(in[6], in[9]);
1772 i[7] = _mm_add_epi16(in[7], in[8]);
1774 s[0] = _mm_sub_epi16(in[7], in[8]);
1775 s[1] = _mm_sub_epi16(in[6], in[9]);
1776 s[2] = _mm_sub_epi16(in[5], in[10]);
1777 s[3] = _mm_sub_epi16(in[4], in[11]);
1778 s[4] = _mm_sub_epi16(in[3], in[12]);
1779 s[5] = _mm_sub_epi16(in[2], in[13]);
1780 s[6] = _mm_sub_epi16(in[1], in[14]);
1781 s[7] = _mm_sub_epi16(in[0], in[15]);
1783 p[0] = _mm_add_epi16(i[0], i[7]);
1784 p[1] = _mm_add_epi16(i[1], i[6]);
1785 p[2] = _mm_add_epi16(i[2], i[5]);
1786 p[3] = _mm_add_epi16(i[3], i[4]);
1787 p[4] = _mm_sub_epi16(i[3], i[4]);
1788 p[5] = _mm_sub_epi16(i[2], i[5]);
1789 p[6] = _mm_sub_epi16(i[1], i[6]);
1790 p[7] = _mm_sub_epi16(i[0], i[7]);
1792 u[0] = _mm_add_epi16(p[0], p[3]);
1793 u[1] = _mm_add_epi16(p[1], p[2]);
1794 u[2] = _mm_sub_epi16(p[1], p[2]);
1795 u[3] = _mm_sub_epi16(p[0], p[3]);
1797 v[0] = _mm_unpacklo_epi16(u[0], u[1]);
1798 v[1] = _mm_unpackhi_epi16(u[0], u[1]);
1799 v[2] = _mm_unpacklo_epi16(u[2], u[3]);
1800 v[3] = _mm_unpackhi_epi16(u[2], u[3]);
1802 u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16);
1803 u[1] = _mm_madd_epi16(v[1], k__cospi_p16_p16);
1804 u[2] = _mm_madd_epi16(v[0], k__cospi_p16_m16);
1805 u[3] = _mm_madd_epi16(v[1], k__cospi_p16_m16);
1806 u[4] = _mm_madd_epi16(v[2], k__cospi_p24_p08);
1807 u[5] = _mm_madd_epi16(v[3], k__cospi_p24_p08);
1808 u[6] = _mm_madd_epi16(v[2], k__cospi_m08_p24);
1809 u[7] = _mm_madd_epi16(v[3], k__cospi_m08_p24);
1811 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
1812 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
1813 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
1814 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
1815 v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
1816 v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
1817 v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
1818 v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
1820 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
1821 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
1822 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
1823 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
1824 u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
1825 u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
1826 u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
1827 u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
1829 in[0] = _mm_packs_epi32(u[0], u[1]);
1830 in[4] = _mm_packs_epi32(u[4], u[5]);
1831 in[8] = _mm_packs_epi32(u[2], u[3]);
1832 in[12] = _mm_packs_epi32(u[6], u[7]);
1834 u[0] = _mm_unpacklo_epi16(p[5], p[6]);
1835 u[1] = _mm_unpackhi_epi16(p[5], p[6]);
1836 v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16);
1837 v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16);
1838 v[2] = _mm_madd_epi16(u[0], k__cospi_p16_p16);
1839 v[3] = _mm_madd_epi16(u[1], k__cospi_p16_p16);
1841 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
1842 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
1843 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
1844 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
1846 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
1847 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
1848 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
1849 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
1851 u[0] = _mm_packs_epi32(v[0], v[1]);
1852 u[1] = _mm_packs_epi32(v[2], v[3]);
1854 t[0] = _mm_add_epi16(p[4], u[0]);
1855 t[1] = _mm_sub_epi16(p[4], u[0]);
1856 t[2] = _mm_sub_epi16(p[7], u[1]);
1857 t[3] = _mm_add_epi16(p[7], u[1]);
1859 u[0] = _mm_unpacklo_epi16(t[0], t[3]);
1860 u[1] = _mm_unpackhi_epi16(t[0], t[3]);
1861 u[2] = _mm_unpacklo_epi16(t[1], t[2]);
1862 u[3] = _mm_unpackhi_epi16(t[1], t[2]);
1864 v[0] = _mm_madd_epi16(u[0], k__cospi_p28_p04);
1865 v[1] = _mm_madd_epi16(u[1], k__cospi_p28_p04);
1866 v[2] = _mm_madd_epi16(u[2], k__cospi_p12_p20);
1867 v[3] = _mm_madd_epi16(u[3], k__cospi_p12_p20);
1868 v[4] = _mm_madd_epi16(u[2], k__cospi_m20_p12);
1869 v[5] = _mm_madd_epi16(u[3], k__cospi_m20_p12);
1870 v[6] = _mm_madd_epi16(u[0], k__cospi_m04_p28);
1871 v[7] = _mm_madd_epi16(u[1], k__cospi_m04_p28);
1873 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
1874 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
1875 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
1876 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
1877 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
1878 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
1879 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
1880 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
1882 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
1883 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
1884 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
1885 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
1886 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
1887 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
1888 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
1889 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
1891 in[2] = _mm_packs_epi32(v[0], v[1]);
1892 in[6] = _mm_packs_epi32(v[4], v[5]);
1893 in[10] = _mm_packs_epi32(v[2], v[3]);
1894 in[14] = _mm_packs_epi32(v[6], v[7]);
1897 u[0] = _mm_unpacklo_epi16(s[2], s[5]);
1898 u[1] = _mm_unpackhi_epi16(s[2], s[5]);
1899 u[2] = _mm_unpacklo_epi16(s[3], s[4]);
1900 u[3] = _mm_unpackhi_epi16(s[3], s[4]);
1902 v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16);
1903 v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16);
1904 v[2] = _mm_madd_epi16(u[2], k__cospi_m16_p16);
1905 v[3] = _mm_madd_epi16(u[3], k__cospi_m16_p16);
1906 v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16);
1907 v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16);
1908 v[6] = _mm_madd_epi16(u[0], k__cospi_p16_p16);
1909 v[7] = _mm_madd_epi16(u[1], k__cospi_p16_p16);
1911 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
1912 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
1913 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
1914 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
1915 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
1916 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
1917 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
1918 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
1920 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
1921 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
1922 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
1923 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
1924 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
1925 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
1926 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
1927 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
1929 t[2] = _mm_packs_epi32(v[0], v[1]);
1930 t[3] = _mm_packs_epi32(v[2], v[3]);
1931 t[4] = _mm_packs_epi32(v[4], v[5]);
1932 t[5] = _mm_packs_epi32(v[6], v[7]);
1935 p[0] = _mm_add_epi16(s[0], t[3]);
1936 p[1] = _mm_add_epi16(s[1], t[2]);
1937 p[2] = _mm_sub_epi16(s[1], t[2]);
1938 p[3] = _mm_sub_epi16(s[0], t[3]);
1939 p[4] = _mm_sub_epi16(s[7], t[4]);
1940 p[5] = _mm_sub_epi16(s[6], t[5]);
1941 p[6] = _mm_add_epi16(s[6], t[5]);
1942 p[7] = _mm_add_epi16(s[7], t[4]);
1945 u[0] = _mm_unpacklo_epi16(p[1], p[6]);
1946 u[1] = _mm_unpackhi_epi16(p[1], p[6]);
1947 u[2] = _mm_unpacklo_epi16(p[2], p[5]);
1948 u[3] = _mm_unpackhi_epi16(p[2], p[5]);
1950 v[0] = _mm_madd_epi16(u[0], k__cospi_m08_p24);
1951 v[1] = _mm_madd_epi16(u[1], k__cospi_m08_p24);
1952 v[2] = _mm_madd_epi16(u[2], k__cospi_m24_m08);
1953 v[3] = _mm_madd_epi16(u[3], k__cospi_m24_m08);
1954 v[4] = _mm_madd_epi16(u[2], k__cospi_m08_p24);
1955 v[5] = _mm_madd_epi16(u[3], k__cospi_m08_p24);
1956 v[6] = _mm_madd_epi16(u[0], k__cospi_p24_p08);
1957 v[7] = _mm_madd_epi16(u[1], k__cospi_p24_p08);
1959 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
1960 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
1961 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
1962 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
1963 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
1964 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
1965 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
1966 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
1968 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
1969 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
1970 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
1971 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
1972 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
1973 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
1974 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
1975 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
1977 t[1] = _mm_packs_epi32(v[0], v[1]);
1978 t[2] = _mm_packs_epi32(v[2], v[3]);
1979 t[5] = _mm_packs_epi32(v[4], v[5]);
1980 t[6] = _mm_packs_epi32(v[6], v[7]);
1983 s[0] = _mm_add_epi16(p[0], t[1]);
1984 s[1] = _mm_sub_epi16(p[0], t[1]);
1985 s[2] = _mm_sub_epi16(p[3], t[2]);
1986 s[3] = _mm_add_epi16(p[3], t[2]);
1987 s[4] = _mm_add_epi16(p[4], t[5]);
1988 s[5] = _mm_sub_epi16(p[4], t[5]);
1989 s[6] = _mm_sub_epi16(p[7], t[6]);
1990 s[7] = _mm_add_epi16(p[7], t[6]);
1993 u[0] = _mm_unpacklo_epi16(s[0], s[7]);
1994 u[1] = _mm_unpackhi_epi16(s[0], s[7]);
1995 u[2] = _mm_unpacklo_epi16(s[1], s[6]);
1996 u[3] = _mm_unpackhi_epi16(s[1], s[6]);
1997 u[4] = _mm_unpacklo_epi16(s[2], s[5]);
1998 u[5] = _mm_unpackhi_epi16(s[2], s[5]);
1999 u[6] = _mm_unpacklo_epi16(s[3], s[4]);
2000 u[7] = _mm_unpackhi_epi16(s[3], s[4]);
2002 v[0] = _mm_madd_epi16(u[0], k__cospi_p30_p02);
2003 v[1] = _mm_madd_epi16(u[1], k__cospi_p30_p02);
2004 v[2] = _mm_madd_epi16(u[2], k__cospi_p14_p18);
2005 v[3] = _mm_madd_epi16(u[3], k__cospi_p14_p18);
2006 v[4] = _mm_madd_epi16(u[4], k__cospi_p22_p10);
2007 v[5] = _mm_madd_epi16(u[5], k__cospi_p22_p10);
2008 v[6] = _mm_madd_epi16(u[6], k__cospi_p06_p26);
2009 v[7] = _mm_madd_epi16(u[7], k__cospi_p06_p26);
2010 v[8] = _mm_madd_epi16(u[6], k__cospi_m26_p06);
2011 v[9] = _mm_madd_epi16(u[7], k__cospi_m26_p06);
2012 v[10] = _mm_madd_epi16(u[4], k__cospi_m10_p22);
2013 v[11] = _mm_madd_epi16(u[5], k__cospi_m10_p22);
2014 v[12] = _mm_madd_epi16(u[2], k__cospi_m18_p14);
2015 v[13] = _mm_madd_epi16(u[3], k__cospi_m18_p14);
2016 v[14] = _mm_madd_epi16(u[0], k__cospi_m02_p30);
2017 v[15] = _mm_madd_epi16(u[1], k__cospi_m02_p30);
2019 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
2020 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
2021 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
2022 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
2023 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
2024 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
2025 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
2026 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
2027 u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING);
2028 u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING);
2029 u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING);
2030 u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING);
2031 u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING);
2032 u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING);
2033 u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING);
2034 u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING);
2036 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
2037 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
2038 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
2039 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
2040 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
2041 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
2042 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
2043 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
2044 v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
2045 v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
2046 v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
2047 v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
2048 v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
2049 v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
2050 v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
2051 v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
2053 in[1] = _mm_packs_epi32(v[0], v[1]);
2054 in[9] = _mm_packs_epi32(v[2], v[3]);
2055 in[5] = _mm_packs_epi32(v[4], v[5]);
2056 in[13] = _mm_packs_epi32(v[6], v[7]);
2057 in[3] = _mm_packs_epi32(v[8], v[9]);
2058 in[11] = _mm_packs_epi32(v[10], v[11]);
2059 in[7] = _mm_packs_epi32(v[12], v[13]);
2060 in[15] = _mm_packs_epi32(v[14], v[15]);
2063 void fadst16_8col(__m128i *in) {
2064 // perform 16x16 1-D ADST for 8 columns
2065 __m128i s[16], x[16], u[32], v[32];
2066 const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64);
2067 const __m128i k__cospi_p31_m01 = pair_set_epi16(cospi_31_64, -cospi_1_64);
2068 const __m128i k__cospi_p05_p27 = pair_set_epi16(cospi_5_64, cospi_27_64);
2069 const __m128i k__cospi_p27_m05 = pair_set_epi16(cospi_27_64, -cospi_5_64);
2070 const __m128i k__cospi_p09_p23 = pair_set_epi16(cospi_9_64, cospi_23_64);
2071 const __m128i k__cospi_p23_m09 = pair_set_epi16(cospi_23_64, -cospi_9_64);
2072 const __m128i k__cospi_p13_p19 = pair_set_epi16(cospi_13_64, cospi_19_64);
2073 const __m128i k__cospi_p19_m13 = pair_set_epi16(cospi_19_64, -cospi_13_64);
2074 const __m128i k__cospi_p17_p15 = pair_set_epi16(cospi_17_64, cospi_15_64);
2075 const __m128i k__cospi_p15_m17 = pair_set_epi16(cospi_15_64, -cospi_17_64);
2076 const __m128i k__cospi_p21_p11 = pair_set_epi16(cospi_21_64, cospi_11_64);
2077 const __m128i k__cospi_p11_m21 = pair_set_epi16(cospi_11_64, -cospi_21_64);
2078 const __m128i k__cospi_p25_p07 = pair_set_epi16(cospi_25_64, cospi_7_64);
2079 const __m128i k__cospi_p07_m25 = pair_set_epi16(cospi_7_64, -cospi_25_64);
2080 const __m128i k__cospi_p29_p03 = pair_set_epi16(cospi_29_64, cospi_3_64);
2081 const __m128i k__cospi_p03_m29 = pair_set_epi16(cospi_3_64, -cospi_29_64);
2082 const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64);
2083 const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64);
2084 const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64);
2085 const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64);
2086 const __m128i k__cospi_m28_p04 = pair_set_epi16(-cospi_28_64, cospi_4_64);
2087 const __m128i k__cospi_m12_p20 = pair_set_epi16(-cospi_12_64, cospi_20_64);
2088 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
2089 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
2090 const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
2091 const __m128i k__cospi_m16_m16 = _mm_set1_epi16(-cospi_16_64);
2092 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
2093 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
2094 const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
2095 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
2096 const __m128i kZero = _mm_set1_epi16(0);
2098 u[0] = _mm_unpacklo_epi16(in[15], in[0]);
2099 u[1] = _mm_unpackhi_epi16(in[15], in[0]);
2100 u[2] = _mm_unpacklo_epi16(in[13], in[2]);
2101 u[3] = _mm_unpackhi_epi16(in[13], in[2]);
2102 u[4] = _mm_unpacklo_epi16(in[11], in[4]);
2103 u[5] = _mm_unpackhi_epi16(in[11], in[4]);
2104 u[6] = _mm_unpacklo_epi16(in[9], in[6]);
2105 u[7] = _mm_unpackhi_epi16(in[9], in[6]);
2106 u[8] = _mm_unpacklo_epi16(in[7], in[8]);
2107 u[9] = _mm_unpackhi_epi16(in[7], in[8]);
2108 u[10] = _mm_unpacklo_epi16(in[5], in[10]);
2109 u[11] = _mm_unpackhi_epi16(in[5], in[10]);
2110 u[12] = _mm_unpacklo_epi16(in[3], in[12]);
2111 u[13] = _mm_unpackhi_epi16(in[3], in[12]);
2112 u[14] = _mm_unpacklo_epi16(in[1], in[14]);
2113 u[15] = _mm_unpackhi_epi16(in[1], in[14]);
2115 v[0] = _mm_madd_epi16(u[0], k__cospi_p01_p31);
2116 v[1] = _mm_madd_epi16(u[1], k__cospi_p01_p31);
2117 v[2] = _mm_madd_epi16(u[0], k__cospi_p31_m01);
2118 v[3] = _mm_madd_epi16(u[1], k__cospi_p31_m01);
2119 v[4] = _mm_madd_epi16(u[2], k__cospi_p05_p27);
2120 v[5] = _mm_madd_epi16(u[3], k__cospi_p05_p27);
2121 v[6] = _mm_madd_epi16(u[2], k__cospi_p27_m05);
2122 v[7] = _mm_madd_epi16(u[3], k__cospi_p27_m05);
2123 v[8] = _mm_madd_epi16(u[4], k__cospi_p09_p23);
2124 v[9] = _mm_madd_epi16(u[5], k__cospi_p09_p23);
2125 v[10] = _mm_madd_epi16(u[4], k__cospi_p23_m09);
2126 v[11] = _mm_madd_epi16(u[5], k__cospi_p23_m09);
2127 v[12] = _mm_madd_epi16(u[6], k__cospi_p13_p19);
2128 v[13] = _mm_madd_epi16(u[7], k__cospi_p13_p19);
2129 v[14] = _mm_madd_epi16(u[6], k__cospi_p19_m13);
2130 v[15] = _mm_madd_epi16(u[7], k__cospi_p19_m13);
2131 v[16] = _mm_madd_epi16(u[8], k__cospi_p17_p15);
2132 v[17] = _mm_madd_epi16(u[9], k__cospi_p17_p15);
2133 v[18] = _mm_madd_epi16(u[8], k__cospi_p15_m17);
2134 v[19] = _mm_madd_epi16(u[9], k__cospi_p15_m17);
2135 v[20] = _mm_madd_epi16(u[10], k__cospi_p21_p11);
2136 v[21] = _mm_madd_epi16(u[11], k__cospi_p21_p11);
2137 v[22] = _mm_madd_epi16(u[10], k__cospi_p11_m21);
2138 v[23] = _mm_madd_epi16(u[11], k__cospi_p11_m21);
2139 v[24] = _mm_madd_epi16(u[12], k__cospi_p25_p07);
2140 v[25] = _mm_madd_epi16(u[13], k__cospi_p25_p07);
2141 v[26] = _mm_madd_epi16(u[12], k__cospi_p07_m25);
2142 v[27] = _mm_madd_epi16(u[13], k__cospi_p07_m25);
2143 v[28] = _mm_madd_epi16(u[14], k__cospi_p29_p03);
2144 v[29] = _mm_madd_epi16(u[15], k__cospi_p29_p03);
2145 v[30] = _mm_madd_epi16(u[14], k__cospi_p03_m29);
2146 v[31] = _mm_madd_epi16(u[15], k__cospi_p03_m29);
2148 u[0] = _mm_add_epi32(v[0], v[16]);
2149 u[1] = _mm_add_epi32(v[1], v[17]);
2150 u[2] = _mm_add_epi32(v[2], v[18]);
2151 u[3] = _mm_add_epi32(v[3], v[19]);
2152 u[4] = _mm_add_epi32(v[4], v[20]);
2153 u[5] = _mm_add_epi32(v[5], v[21]);
2154 u[6] = _mm_add_epi32(v[6], v[22]);
2155 u[7] = _mm_add_epi32(v[7], v[23]);
2156 u[8] = _mm_add_epi32(v[8], v[24]);
2157 u[9] = _mm_add_epi32(v[9], v[25]);
2158 u[10] = _mm_add_epi32(v[10], v[26]);
2159 u[11] = _mm_add_epi32(v[11], v[27]);
2160 u[12] = _mm_add_epi32(v[12], v[28]);
2161 u[13] = _mm_add_epi32(v[13], v[29]);
2162 u[14] = _mm_add_epi32(v[14], v[30]);
2163 u[15] = _mm_add_epi32(v[15], v[31]);
2164 u[16] = _mm_sub_epi32(v[0], v[16]);
2165 u[17] = _mm_sub_epi32(v[1], v[17]);
2166 u[18] = _mm_sub_epi32(v[2], v[18]);
2167 u[19] = _mm_sub_epi32(v[3], v[19]);
2168 u[20] = _mm_sub_epi32(v[4], v[20]);
2169 u[21] = _mm_sub_epi32(v[5], v[21]);
2170 u[22] = _mm_sub_epi32(v[6], v[22]);
2171 u[23] = _mm_sub_epi32(v[7], v[23]);
2172 u[24] = _mm_sub_epi32(v[8], v[24]);
2173 u[25] = _mm_sub_epi32(v[9], v[25]);
2174 u[26] = _mm_sub_epi32(v[10], v[26]);
2175 u[27] = _mm_sub_epi32(v[11], v[27]);
2176 u[28] = _mm_sub_epi32(v[12], v[28]);
2177 u[29] = _mm_sub_epi32(v[13], v[29]);
2178 u[30] = _mm_sub_epi32(v[14], v[30]);
2179 u[31] = _mm_sub_epi32(v[15], v[31]);
2181 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
2182 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
2183 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
2184 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
2185 v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
2186 v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
2187 v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
2188 v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
2189 v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
2190 v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
2191 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
2192 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
2193 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
2194 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
2195 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
2196 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
2197 v[16] = _mm_add_epi32(u[16], k__DCT_CONST_ROUNDING);
2198 v[17] = _mm_add_epi32(u[17], k__DCT_CONST_ROUNDING);
2199 v[18] = _mm_add_epi32(u[18], k__DCT_CONST_ROUNDING);
2200 v[19] = _mm_add_epi32(u[19], k__DCT_CONST_ROUNDING);
2201 v[20] = _mm_add_epi32(u[20], k__DCT_CONST_ROUNDING);
2202 v[21] = _mm_add_epi32(u[21], k__DCT_CONST_ROUNDING);
2203 v[22] = _mm_add_epi32(u[22], k__DCT_CONST_ROUNDING);
2204 v[23] = _mm_add_epi32(u[23], k__DCT_CONST_ROUNDING);
2205 v[24] = _mm_add_epi32(u[24], k__DCT_CONST_ROUNDING);
2206 v[25] = _mm_add_epi32(u[25], k__DCT_CONST_ROUNDING);
2207 v[26] = _mm_add_epi32(u[26], k__DCT_CONST_ROUNDING);
2208 v[27] = _mm_add_epi32(u[27], k__DCT_CONST_ROUNDING);
2209 v[28] = _mm_add_epi32(u[28], k__DCT_CONST_ROUNDING);
2210 v[29] = _mm_add_epi32(u[29], k__DCT_CONST_ROUNDING);
2211 v[30] = _mm_add_epi32(u[30], k__DCT_CONST_ROUNDING);
2212 v[31] = _mm_add_epi32(u[31], k__DCT_CONST_ROUNDING);
2214 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
2215 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
2216 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
2217 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
2218 u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
2219 u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
2220 u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
2221 u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
2222 u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
2223 u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
2224 u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
2225 u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
2226 u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
2227 u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
2228 u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
2229 u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
2230 u[16] = _mm_srai_epi32(v[16], DCT_CONST_BITS);
2231 u[17] = _mm_srai_epi32(v[17], DCT_CONST_BITS);
2232 u[18] = _mm_srai_epi32(v[18], DCT_CONST_BITS);
2233 u[19] = _mm_srai_epi32(v[19], DCT_CONST_BITS);
2234 u[20] = _mm_srai_epi32(v[20], DCT_CONST_BITS);
2235 u[21] = _mm_srai_epi32(v[21], DCT_CONST_BITS);
2236 u[22] = _mm_srai_epi32(v[22], DCT_CONST_BITS);
2237 u[23] = _mm_srai_epi32(v[23], DCT_CONST_BITS);
2238 u[24] = _mm_srai_epi32(v[24], DCT_CONST_BITS);
2239 u[25] = _mm_srai_epi32(v[25], DCT_CONST_BITS);
2240 u[26] = _mm_srai_epi32(v[26], DCT_CONST_BITS);
2241 u[27] = _mm_srai_epi32(v[27], DCT_CONST_BITS);
2242 u[28] = _mm_srai_epi32(v[28], DCT_CONST_BITS);
2243 u[29] = _mm_srai_epi32(v[29], DCT_CONST_BITS);
2244 u[30] = _mm_srai_epi32(v[30], DCT_CONST_BITS);
2245 u[31] = _mm_srai_epi32(v[31], DCT_CONST_BITS);
2247 s[0] = _mm_packs_epi32(u[0], u[1]);
2248 s[1] = _mm_packs_epi32(u[2], u[3]);
2249 s[2] = _mm_packs_epi32(u[4], u[5]);
2250 s[3] = _mm_packs_epi32(u[6], u[7]);
2251 s[4] = _mm_packs_epi32(u[8], u[9]);
2252 s[5] = _mm_packs_epi32(u[10], u[11]);
2253 s[6] = _mm_packs_epi32(u[12], u[13]);
2254 s[7] = _mm_packs_epi32(u[14], u[15]);
2255 s[8] = _mm_packs_epi32(u[16], u[17]);
2256 s[9] = _mm_packs_epi32(u[18], u[19]);
2257 s[10] = _mm_packs_epi32(u[20], u[21]);
2258 s[11] = _mm_packs_epi32(u[22], u[23]);
2259 s[12] = _mm_packs_epi32(u[24], u[25]);
2260 s[13] = _mm_packs_epi32(u[26], u[27]);
2261 s[14] = _mm_packs_epi32(u[28], u[29]);
2262 s[15] = _mm_packs_epi32(u[30], u[31]);
2265 u[0] = _mm_unpacklo_epi16(s[8], s[9]);
2266 u[1] = _mm_unpackhi_epi16(s[8], s[9]);
2267 u[2] = _mm_unpacklo_epi16(s[10], s[11]);
2268 u[3] = _mm_unpackhi_epi16(s[10], s[11]);
2269 u[4] = _mm_unpacklo_epi16(s[12], s[13]);
2270 u[5] = _mm_unpackhi_epi16(s[12], s[13]);
2271 u[6] = _mm_unpacklo_epi16(s[14], s[15]);
2272 u[7] = _mm_unpackhi_epi16(s[14], s[15]);
2274 v[0] = _mm_madd_epi16(u[0], k__cospi_p04_p28);
2275 v[1] = _mm_madd_epi16(u[1], k__cospi_p04_p28);
2276 v[2] = _mm_madd_epi16(u[0], k__cospi_p28_m04);
2277 v[3] = _mm_madd_epi16(u[1], k__cospi_p28_m04);
2278 v[4] = _mm_madd_epi16(u[2], k__cospi_p20_p12);
2279 v[5] = _mm_madd_epi16(u[3], k__cospi_p20_p12);
2280 v[6] = _mm_madd_epi16(u[2], k__cospi_p12_m20);
2281 v[7] = _mm_madd_epi16(u[3], k__cospi_p12_m20);
2282 v[8] = _mm_madd_epi16(u[4], k__cospi_m28_p04);
2283 v[9] = _mm_madd_epi16(u[5], k__cospi_m28_p04);
2284 v[10] = _mm_madd_epi16(u[4], k__cospi_p04_p28);
2285 v[11] = _mm_madd_epi16(u[5], k__cospi_p04_p28);
2286 v[12] = _mm_madd_epi16(u[6], k__cospi_m12_p20);
2287 v[13] = _mm_madd_epi16(u[7], k__cospi_m12_p20);
2288 v[14] = _mm_madd_epi16(u[6], k__cospi_p20_p12);
2289 v[15] = _mm_madd_epi16(u[7], k__cospi_p20_p12);
2291 u[0] = _mm_add_epi32(v[0], v[8]);
2292 u[1] = _mm_add_epi32(v[1], v[9]);
2293 u[2] = _mm_add_epi32(v[2], v[10]);
2294 u[3] = _mm_add_epi32(v[3], v[11]);
2295 u[4] = _mm_add_epi32(v[4], v[12]);
2296 u[5] = _mm_add_epi32(v[5], v[13]);
2297 u[6] = _mm_add_epi32(v[6], v[14]);
2298 u[7] = _mm_add_epi32(v[7], v[15]);
2299 u[8] = _mm_sub_epi32(v[0], v[8]);
2300 u[9] = _mm_sub_epi32(v[1], v[9]);
2301 u[10] = _mm_sub_epi32(v[2], v[10]);
2302 u[11] = _mm_sub_epi32(v[3], v[11]);
2303 u[12] = _mm_sub_epi32(v[4], v[12]);
2304 u[13] = _mm_sub_epi32(v[5], v[13]);
2305 u[14] = _mm_sub_epi32(v[6], v[14]);
2306 u[15] = _mm_sub_epi32(v[7], v[15]);
2308 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
2309 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
2310 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
2311 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
2312 v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
2313 v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
2314 v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
2315 v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
2316 v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
2317 v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
2318 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
2319 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
2320 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
2321 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
2322 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
2323 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
2325 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
2326 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
2327 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
2328 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
2329 u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
2330 u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
2331 u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
2332 u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
2333 u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
2334 u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
2335 u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
2336 u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
2337 u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
2338 u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
2339 u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
2340 u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
2342 x[0] = _mm_add_epi16(s[0], s[4]);
2343 x[1] = _mm_add_epi16(s[1], s[5]);
2344 x[2] = _mm_add_epi16(s[2], s[6]);
2345 x[3] = _mm_add_epi16(s[3], s[7]);
2346 x[4] = _mm_sub_epi16(s[0], s[4]);
2347 x[5] = _mm_sub_epi16(s[1], s[5]);
2348 x[6] = _mm_sub_epi16(s[2], s[6]);
2349 x[7] = _mm_sub_epi16(s[3], s[7]);
2350 x[8] = _mm_packs_epi32(u[0], u[1]);
2351 x[9] = _mm_packs_epi32(u[2], u[3]);
2352 x[10] = _mm_packs_epi32(u[4], u[5]);
2353 x[11] = _mm_packs_epi32(u[6], u[7]);
2354 x[12] = _mm_packs_epi32(u[8], u[9]);
2355 x[13] = _mm_packs_epi32(u[10], u[11]);
2356 x[14] = _mm_packs_epi32(u[12], u[13]);
2357 x[15] = _mm_packs_epi32(u[14], u[15]);
2360 u[0] = _mm_unpacklo_epi16(x[4], x[5]);
2361 u[1] = _mm_unpackhi_epi16(x[4], x[5]);
2362 u[2] = _mm_unpacklo_epi16(x[6], x[7]);
2363 u[3] = _mm_unpackhi_epi16(x[6], x[7]);
2364 u[4] = _mm_unpacklo_epi16(x[12], x[13]);
2365 u[5] = _mm_unpackhi_epi16(x[12], x[13]);
2366 u[6] = _mm_unpacklo_epi16(x[14], x[15]);
2367 u[7] = _mm_unpackhi_epi16(x[14], x[15]);
2369 v[0] = _mm_madd_epi16(u[0], k__cospi_p08_p24);
2370 v[1] = _mm_madd_epi16(u[1], k__cospi_p08_p24);
2371 v[2] = _mm_madd_epi16(u[0], k__cospi_p24_m08);
2372 v[3] = _mm_madd_epi16(u[1], k__cospi_p24_m08);
2373 v[4] = _mm_madd_epi16(u[2], k__cospi_m24_p08);
2374 v[5] = _mm_madd_epi16(u[3], k__cospi_m24_p08);
2375 v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24);
2376 v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24);
2377 v[8] = _mm_madd_epi16(u[4], k__cospi_p08_p24);
2378 v[9] = _mm_madd_epi16(u[5], k__cospi_p08_p24);
2379 v[10] = _mm_madd_epi16(u[4], k__cospi_p24_m08);
2380 v[11] = _mm_madd_epi16(u[5], k__cospi_p24_m08);
2381 v[12] = _mm_madd_epi16(u[6], k__cospi_m24_p08);
2382 v[13] = _mm_madd_epi16(u[7], k__cospi_m24_p08);
2383 v[14] = _mm_madd_epi16(u[6], k__cospi_p08_p24);
2384 v[15] = _mm_madd_epi16(u[7], k__cospi_p08_p24);
2386 u[0] = _mm_add_epi32(v[0], v[4]);
2387 u[1] = _mm_add_epi32(v[1], v[5]);
2388 u[2] = _mm_add_epi32(v[2], v[6]);
2389 u[3] = _mm_add_epi32(v[3], v[7]);
2390 u[4] = _mm_sub_epi32(v[0], v[4]);
2391 u[5] = _mm_sub_epi32(v[1], v[5]);
2392 u[6] = _mm_sub_epi32(v[2], v[6]);
2393 u[7] = _mm_sub_epi32(v[3], v[7]);
2394 u[8] = _mm_add_epi32(v[8], v[12]);
2395 u[9] = _mm_add_epi32(v[9], v[13]);
2396 u[10] = _mm_add_epi32(v[10], v[14]);
2397 u[11] = _mm_add_epi32(v[11], v[15]);
2398 u[12] = _mm_sub_epi32(v[8], v[12]);
2399 u[13] = _mm_sub_epi32(v[9], v[13]);
2400 u[14] = _mm_sub_epi32(v[10], v[14]);
2401 u[15] = _mm_sub_epi32(v[11], v[15]);
2403 u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
2404 u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
2405 u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
2406 u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
2407 u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
2408 u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
2409 u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
2410 u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
2411 u[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
2412 u[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
2413 u[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
2414 u[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
2415 u[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
2416 u[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
2417 u[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
2418 u[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
2420 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
2421 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
2422 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
2423 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
2424 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
2425 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
2426 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
2427 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
2428 v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
2429 v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
2430 v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
2431 v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
2432 v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
2433 v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
2434 v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
2435 v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
2437 s[0] = _mm_add_epi16(x[0], x[2]);
2438 s[1] = _mm_add_epi16(x[1], x[3]);
2439 s[2] = _mm_sub_epi16(x[0], x[2]);
2440 s[3] = _mm_sub_epi16(x[1], x[3]);
2441 s[4] = _mm_packs_epi32(v[0], v[1]);
2442 s[5] = _mm_packs_epi32(v[2], v[3]);
2443 s[6] = _mm_packs_epi32(v[4], v[5]);
2444 s[7] = _mm_packs_epi32(v[6], v[7]);
2445 s[8] = _mm_add_epi16(x[8], x[10]);
2446 s[9] = _mm_add_epi16(x[9], x[11]);
2447 s[10] = _mm_sub_epi16(x[8], x[10]);
2448 s[11] = _mm_sub_epi16(x[9], x[11]);
2449 s[12] = _mm_packs_epi32(v[8], v[9]);
2450 s[13] = _mm_packs_epi32(v[10], v[11]);
2451 s[14] = _mm_packs_epi32(v[12], v[13]);
2452 s[15] = _mm_packs_epi32(v[14], v[15]);
2455 u[0] = _mm_unpacklo_epi16(s[2], s[3]);
2456 u[1] = _mm_unpackhi_epi16(s[2], s[3]);
2457 u[2] = _mm_unpacklo_epi16(s[6], s[7]);
2458 u[3] = _mm_unpackhi_epi16(s[6], s[7]);
2459 u[4] = _mm_unpacklo_epi16(s[10], s[11]);
2460 u[5] = _mm_unpackhi_epi16(s[10], s[11]);
2461 u[6] = _mm_unpacklo_epi16(s[14], s[15]);
2462 u[7] = _mm_unpackhi_epi16(s[14], s[15]);
2464 v[0] = _mm_madd_epi16(u[0], k__cospi_m16_m16);
2465 v[1] = _mm_madd_epi16(u[1], k__cospi_m16_m16);
2466 v[2] = _mm_madd_epi16(u[0], k__cospi_p16_m16);
2467 v[3] = _mm_madd_epi16(u[1], k__cospi_p16_m16);
2468 v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16);
2469 v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16);
2470 v[6] = _mm_madd_epi16(u[2], k__cospi_m16_p16);
2471 v[7] = _mm_madd_epi16(u[3], k__cospi_m16_p16);
2472 v[8] = _mm_madd_epi16(u[4], k__cospi_p16_p16);
2473 v[9] = _mm_madd_epi16(u[5], k__cospi_p16_p16);
2474 v[10] = _mm_madd_epi16(u[4], k__cospi_m16_p16);
2475 v[11] = _mm_madd_epi16(u[5], k__cospi_m16_p16);
2476 v[12] = _mm_madd_epi16(u[6], k__cospi_m16_m16);
2477 v[13] = _mm_madd_epi16(u[7], k__cospi_m16_m16);
2478 v[14] = _mm_madd_epi16(u[6], k__cospi_p16_m16);
2479 v[15] = _mm_madd_epi16(u[7], k__cospi_p16_m16);
2481 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
2482 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
2483 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
2484 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
2485 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
2486 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
2487 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
2488 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
2489 u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING);
2490 u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING);
2491 u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING);
2492 u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING);
2493 u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING);
2494 u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING);
2495 u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING);
2496 u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING);
2498 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
2499 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
2500 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
2501 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
2502 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
2503 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
2504 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
2505 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
2506 v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
2507 v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
2508 v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
2509 v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
2510 v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
2511 v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
2512 v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
2513 v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
2516 in[1] = _mm_sub_epi16(kZero, s[8]);
2518 in[3] = _mm_sub_epi16(kZero, s[4]);
2519 in[4] = _mm_packs_epi32(v[4], v[5]);
2520 in[5] = _mm_packs_epi32(v[12], v[13]);
2521 in[6] = _mm_packs_epi32(v[8], v[9]);
2522 in[7] = _mm_packs_epi32(v[0], v[1]);
2523 in[8] = _mm_packs_epi32(v[2], v[3]);
2524 in[9] = _mm_packs_epi32(v[10], v[11]);
2525 in[10] = _mm_packs_epi32(v[14], v[15]);
2526 in[11] = _mm_packs_epi32(v[6], v[7]);
2528 in[13] = _mm_sub_epi16(kZero, s[13]);
2530 in[15] = _mm_sub_epi16(kZero, s[1]);
2533 void fdct16_sse2(__m128i *in0, __m128i *in1) {
2536 array_transpose_16x16(in0, in1);
2539 void fadst16_sse2(__m128i *in0, __m128i *in1) {
2542 array_transpose_16x16(in0, in1);
2545 void vp9_fht16x16_sse2(const int16_t *input, int16_t *output,
2546 int stride, int tx_type) {
2547 __m128i in0[16], in1[16];
2551 vp9_fdct16x16_sse2(input, output, stride);
2554 load_buffer_16x16(input, in0, in1, stride);
2555 fadst16_sse2(in0, in1);
2556 right_shift_16x16(in0, in1);
2557 fdct16_sse2(in0, in1);
2558 write_buffer_16x16(output, in0, in1, 16);
2561 load_buffer_16x16(input, in0, in1, stride);
2562 fdct16_sse2(in0, in1);
2563 right_shift_16x16(in0, in1);
2564 fadst16_sse2(in0, in1);
2565 write_buffer_16x16(output, in0, in1, 16);
2568 load_buffer_16x16(input, in0, in1, stride);
2569 fadst16_sse2(in0, in1);
2570 right_shift_16x16(in0, in1);
2571 fadst16_sse2(in0, in1);
2572 write_buffer_16x16(output, in0, in1, 16);
2580 #define FDCT32x32_2D vp9_fdct32x32_rd_sse2
2581 #define FDCT32x32_HIGH_PRECISION 0
2582 #include "vp9/encoder/x86/vp9_dct32x32_sse2.c"
2584 #undef FDCT32x32_HIGH_PRECISION
2586 #define FDCT32x32_2D vp9_fdct32x32_sse2
2587 #define FDCT32x32_HIGH_PRECISION 1
2588 #include "vp9/encoder/x86/vp9_dct32x32_sse2.c" // NOLINT
2590 #undef FDCT32x32_HIGH_PRECISION