2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include <emmintrin.h> // SSE2
12 #include "vp9/common/vp9_idct.h" // for cospi constants
13 #include "vpx_ports/mem.h"
15 void vp9_fdct4x4_sse2(const int16_t *input, int16_t *output, int stride) {
16 // The 2D transform is done with two passes which are actually pretty
17 // similar. In the first one, we transform the columns and transpose
18 // the results. In the second one, we transform the rows. To achieve that,
19 // as the first pass results are transposed, we tranpose the columns (that
20 // is the transposed rows) and transpose the results (so that it goes back
21 // in normal/row positions).
24 // When we use them, in one case, they are all the same. In all others
25 // it's a pair of them that we need to repeat four times. This is done
26 // by constructing the 32 bit constant corresponding to that pair.
27 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
28 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
29 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
30 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
31 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
32 const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1);
33 const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
34 const __m128i kOne = _mm_set1_epi16(1);
38 in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
39 in0 = _mm_unpacklo_epi64(in0, _mm_loadl_epi64((const __m128i *)
40 (input + 1 * stride)));
41 in1 = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
42 in1 = _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)
43 (input + 3 * stride)), in1);
46 in0 = _mm_slli_epi16(in0, 4);
47 in1 = _mm_slli_epi16(in1, 4);
48 // if (i == 0 && input[0]) input[0] += 1;
50 // The mask will only contain wether the first value is zero, all
51 // other comparison will fail as something shifted by 4 (above << 4)
52 // can never be equal to one. To increment in the non-zero case, we
53 // add the mask and one for the first element:
54 // - if zero, mask = -1, v = v - 1 + 1 = v
55 // - if non-zero, mask = 0, v = v + 0 + 1 = v + 1
56 __m128i mask = _mm_cmpeq_epi16(in0, k__nonzero_bias_a);
57 in0 = _mm_add_epi16(in0, mask);
58 in0 = _mm_add_epi16(in0, k__nonzero_bias_b);
61 // Do the two transform/transpose passes
62 for (pass = 0; pass < 2; ++pass) {
63 // Transform 1/2: Add/substract
64 const __m128i r0 = _mm_add_epi16(in0, in1);
65 const __m128i r1 = _mm_sub_epi16(in0, in1);
66 const __m128i r2 = _mm_unpacklo_epi64(r0, r1);
67 const __m128i r3 = _mm_unpackhi_epi64(r0, r1);
68 // Transform 1/2: Interleave to do the multiply by constants which gets us
70 const __m128i t0 = _mm_unpacklo_epi16(r2, r3);
71 const __m128i t2 = _mm_unpackhi_epi16(r2, r3);
72 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
73 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
74 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p08_p24);
75 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_p24_m08);
76 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
77 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
78 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
79 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
80 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
81 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
82 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
83 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
84 // Combine and transpose
85 const __m128i res0 = _mm_packs_epi32(w0, w2);
86 const __m128i res1 = _mm_packs_epi32(w4, w6);
87 // 00 01 02 03 20 21 22 23
88 // 10 11 12 13 30 31 32 33
89 const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1);
90 const __m128i tr0_1 = _mm_unpackhi_epi16(res0, res1);
91 // 00 10 01 11 02 12 03 13
92 // 20 30 21 31 22 32 23 33
93 in0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
94 in1 = _mm_unpackhi_epi32(tr0_0, tr0_1);
95 in1 = _mm_shuffle_epi32(in1, 0x4E);
96 // 00 10 20 30 01 11 21 31 in0 contains 0 followed by 1
97 // 02 12 22 32 03 13 23 33 in1 contains 2 followed by 3
99 in1 = _mm_shuffle_epi32(in1, 0x4E);
100 // Post-condition output and store it (v + 1) >> 2, taking advantage
101 // of the fact 1/3 are stored just after 0/2.
103 __m128i out01 = _mm_add_epi16(in0, kOne);
104 __m128i out23 = _mm_add_epi16(in1, kOne);
105 out01 = _mm_srai_epi16(out01, 2);
106 out23 = _mm_srai_epi16(out23, 2);
107 _mm_storeu_si128((__m128i *)(output + 0 * 4), out01);
108 _mm_storeu_si128((__m128i *)(output + 2 * 4), out23);
112 static INLINE void load_buffer_4x4(const int16_t *input, __m128i *in,
114 const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1);
115 const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
118 in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
119 in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
120 in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
121 in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
123 in[0] = _mm_slli_epi16(in[0], 4);
124 in[1] = _mm_slli_epi16(in[1], 4);
125 in[2] = _mm_slli_epi16(in[2], 4);
126 in[3] = _mm_slli_epi16(in[3], 4);
128 mask = _mm_cmpeq_epi16(in[0], k__nonzero_bias_a);
129 in[0] = _mm_add_epi16(in[0], mask);
130 in[0] = _mm_add_epi16(in[0], k__nonzero_bias_b);
133 static INLINE void write_buffer_4x4(int16_t *output, __m128i *res) {
134 const __m128i kOne = _mm_set1_epi16(1);
135 __m128i in01 = _mm_unpacklo_epi64(res[0], res[1]);
136 __m128i in23 = _mm_unpacklo_epi64(res[2], res[3]);
137 __m128i out01 = _mm_add_epi16(in01, kOne);
138 __m128i out23 = _mm_add_epi16(in23, kOne);
139 out01 = _mm_srai_epi16(out01, 2);
140 out23 = _mm_srai_epi16(out23, 2);
141 _mm_store_si128((__m128i *)(output + 0 * 8), out01);
142 _mm_store_si128((__m128i *)(output + 1 * 8), out23);
145 static INLINE void transpose_4x4(__m128i *res) {
146 // Combine and transpose
147 // 00 01 02 03 20 21 22 23
148 // 10 11 12 13 30 31 32 33
149 const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]);
150 const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]);
152 // 00 10 01 11 02 12 03 13
153 // 20 30 21 31 22 32 23 33
154 res[0] = _mm_unpacklo_epi32(tr0_0, tr0_1);
155 res[2] = _mm_unpackhi_epi32(tr0_0, tr0_1);
157 // 00 10 20 30 01 11 21 31
158 // 02 12 22 32 03 13 23 33
159 // only use the first 4 16-bit integers
160 res[1] = _mm_unpackhi_epi64(res[0], res[0]);
161 res[3] = _mm_unpackhi_epi64(res[2], res[2]);
164 void fdct4_1d_sse2(__m128i *in) {
165 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
166 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
167 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
168 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
169 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
172 u[0]=_mm_unpacklo_epi16(in[0], in[1]);
173 u[1]=_mm_unpacklo_epi16(in[3], in[2]);
175 v[0] = _mm_add_epi16(u[0], u[1]);
176 v[1] = _mm_sub_epi16(u[0], u[1]);
178 u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16); // 0
179 u[1] = _mm_madd_epi16(v[0], k__cospi_p16_m16); // 2
180 u[2] = _mm_madd_epi16(v[1], k__cospi_p08_p24); // 1
181 u[3] = _mm_madd_epi16(v[1], k__cospi_p24_m08); // 3
183 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
184 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
185 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
186 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
187 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
188 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
189 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
190 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
192 in[0] = _mm_packs_epi32(u[0], u[1]);
193 in[1] = _mm_packs_epi32(u[2], u[3]);
197 void fadst4_1d_sse2(__m128i *in) {
198 const __m128i k__sinpi_p01_p02 = pair_set_epi16(sinpi_1_9, sinpi_2_9);
199 const __m128i k__sinpi_p04_m01 = pair_set_epi16(sinpi_4_9, -sinpi_1_9);
200 const __m128i k__sinpi_p03_p04 = pair_set_epi16(sinpi_3_9, sinpi_4_9);
201 const __m128i k__sinpi_m03_p02 = pair_set_epi16(-sinpi_3_9, sinpi_2_9);
202 const __m128i k__sinpi_p03_p03 = _mm_set1_epi16(sinpi_3_9);
203 const __m128i kZero = _mm_set1_epi16(0);
204 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
206 __m128i in7 = _mm_add_epi16(in[0], in[1]);
208 u[0] = _mm_unpacklo_epi16(in[0], in[1]);
209 u[1] = _mm_unpacklo_epi16(in[2], in[3]);
210 u[2] = _mm_unpacklo_epi16(in7, kZero);
211 u[3] = _mm_unpacklo_epi16(in[2], kZero);
212 u[4] = _mm_unpacklo_epi16(in[3], kZero);
214 v[0] = _mm_madd_epi16(u[0], k__sinpi_p01_p02); // s0 + s2
215 v[1] = _mm_madd_epi16(u[1], k__sinpi_p03_p04); // s4 + s5
216 v[2] = _mm_madd_epi16(u[2], k__sinpi_p03_p03); // x1
217 v[3] = _mm_madd_epi16(u[0], k__sinpi_p04_m01); // s1 - s3
218 v[4] = _mm_madd_epi16(u[1], k__sinpi_m03_p02); // -s4 + s6
219 v[5] = _mm_madd_epi16(u[3], k__sinpi_p03_p03); // s4
220 v[6] = _mm_madd_epi16(u[4], k__sinpi_p03_p03);
222 u[0] = _mm_add_epi32(v[0], v[1]);
223 u[1] = _mm_sub_epi32(v[2], v[6]);
224 u[2] = _mm_add_epi32(v[3], v[4]);
225 u[3] = _mm_sub_epi32(u[2], u[0]);
226 u[4] = _mm_slli_epi32(v[5], 2);
227 u[5] = _mm_sub_epi32(u[4], v[5]);
228 u[6] = _mm_add_epi32(u[3], u[5]);
230 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
231 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
232 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
233 v[3] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
235 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
236 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
237 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
238 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
240 in[0] = _mm_packs_epi32(u[0], u[2]);
241 in[1] = _mm_packs_epi32(u[1], u[3]);
245 void vp9_short_fht4x4_sse2(const int16_t *input, int16_t *output,
246 int stride, int tx_type) {
248 load_buffer_4x4(input, in, stride);
270 write_buffer_4x4(output, in);
273 void vp9_fdct8x8_sse2(const int16_t *input, int16_t *output, int stride) {
276 // When we use them, in one case, they are all the same. In all others
277 // it's a pair of them that we need to repeat four times. This is done
278 // by constructing the 32 bit constant corresponding to that pair.
279 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
280 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
281 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
282 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
283 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
284 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
285 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
286 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
287 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
289 __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
290 __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
291 __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
292 __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
293 __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
294 __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
295 __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
296 __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
297 // Pre-condition input (shift by two)
298 in0 = _mm_slli_epi16(in0, 2);
299 in1 = _mm_slli_epi16(in1, 2);
300 in2 = _mm_slli_epi16(in2, 2);
301 in3 = _mm_slli_epi16(in3, 2);
302 in4 = _mm_slli_epi16(in4, 2);
303 in5 = _mm_slli_epi16(in5, 2);
304 in6 = _mm_slli_epi16(in6, 2);
305 in7 = _mm_slli_epi16(in7, 2);
307 // We do two passes, first the columns, then the rows. The results of the
308 // first pass are transposed so that the same column code can be reused. The
309 // results of the second pass are also transposed so that the rows (processed
310 // as columns) are put back in row positions.
311 for (pass = 0; pass < 2; pass++) {
312 // To store results of each pass before the transpose.
313 __m128i res0, res1, res2, res3, res4, res5, res6, res7;
315 const __m128i q0 = _mm_add_epi16(in0, in7);
316 const __m128i q1 = _mm_add_epi16(in1, in6);
317 const __m128i q2 = _mm_add_epi16(in2, in5);
318 const __m128i q3 = _mm_add_epi16(in3, in4);
319 const __m128i q4 = _mm_sub_epi16(in3, in4);
320 const __m128i q5 = _mm_sub_epi16(in2, in5);
321 const __m128i q6 = _mm_sub_epi16(in1, in6);
322 const __m128i q7 = _mm_sub_epi16(in0, in7);
323 // Work on first four results
326 const __m128i r0 = _mm_add_epi16(q0, q3);
327 const __m128i r1 = _mm_add_epi16(q1, q2);
328 const __m128i r2 = _mm_sub_epi16(q1, q2);
329 const __m128i r3 = _mm_sub_epi16(q0, q3);
330 // Interleave to do the multiply by constants which gets us into 32bits
331 const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
332 const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
333 const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
334 const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
335 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
336 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
337 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
338 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16);
339 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
340 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08);
341 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
342 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24);
343 // dct_const_round_shift
344 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
345 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
346 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
347 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
348 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
349 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
350 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
351 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
352 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
353 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
354 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
355 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
356 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
357 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
358 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
359 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
361 res0 = _mm_packs_epi32(w0, w1);
362 res4 = _mm_packs_epi32(w2, w3);
363 res2 = _mm_packs_epi32(w4, w5);
364 res6 = _mm_packs_epi32(w6, w7);
366 // Work on next four results
368 // Interleave to do the multiply by constants which gets us into 32bits
369 const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
370 const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
371 const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16);
372 const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16);
373 const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16);
374 const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16);
375 // dct_const_round_shift
376 const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING);
377 const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING);
378 const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING);
379 const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING);
380 const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS);
381 const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS);
382 const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS);
383 const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS);
385 const __m128i r0 = _mm_packs_epi32(s0, s1);
386 const __m128i r1 = _mm_packs_epi32(s2, s3);
388 const __m128i x0 = _mm_add_epi16(q4, r0);
389 const __m128i x1 = _mm_sub_epi16(q4, r0);
390 const __m128i x2 = _mm_sub_epi16(q7, r1);
391 const __m128i x3 = _mm_add_epi16(q7, r1);
392 // Interleave to do the multiply by constants which gets us into 32bits
393 const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
394 const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
395 const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
396 const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
397 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04);
398 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04);
399 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28);
400 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28);
401 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20);
402 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20);
403 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12);
404 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12);
405 // dct_const_round_shift
406 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
407 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
408 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
409 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
410 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
411 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
412 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
413 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
414 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
415 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
416 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
417 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
418 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
419 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
420 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
421 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
423 res1 = _mm_packs_epi32(w0, w1);
424 res7 = _mm_packs_epi32(w2, w3);
425 res5 = _mm_packs_epi32(w4, w5);
426 res3 = _mm_packs_epi32(w6, w7);
428 // Transpose the 8x8.
430 // 00 01 02 03 04 05 06 07
431 // 10 11 12 13 14 15 16 17
432 // 20 21 22 23 24 25 26 27
433 // 30 31 32 33 34 35 36 37
434 // 40 41 42 43 44 45 46 47
435 // 50 51 52 53 54 55 56 57
436 // 60 61 62 63 64 65 66 67
437 // 70 71 72 73 74 75 76 77
438 const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1);
439 const __m128i tr0_1 = _mm_unpacklo_epi16(res2, res3);
440 const __m128i tr0_2 = _mm_unpackhi_epi16(res0, res1);
441 const __m128i tr0_3 = _mm_unpackhi_epi16(res2, res3);
442 const __m128i tr0_4 = _mm_unpacklo_epi16(res4, res5);
443 const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7);
444 const __m128i tr0_6 = _mm_unpackhi_epi16(res4, res5);
445 const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7);
446 // 00 10 01 11 02 12 03 13
447 // 20 30 21 31 22 32 23 33
448 // 04 14 05 15 06 16 07 17
449 // 24 34 25 35 26 36 27 37
450 // 40 50 41 51 42 52 43 53
451 // 60 70 61 71 62 72 63 73
452 // 54 54 55 55 56 56 57 57
453 // 64 74 65 75 66 76 67 77
454 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
455 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
456 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
457 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
458 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
459 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
460 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
461 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
462 // 00 10 20 30 01 11 21 31
463 // 40 50 60 70 41 51 61 71
464 // 02 12 22 32 03 13 23 33
465 // 42 52 62 72 43 53 63 73
466 // 04 14 24 34 05 15 21 36
467 // 44 54 64 74 45 55 61 76
468 // 06 16 26 36 07 17 27 37
469 // 46 56 66 76 47 57 67 77
470 in0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
471 in1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
472 in2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
473 in3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
474 in4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
475 in5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
476 in6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
477 in7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
478 // 00 10 20 30 40 50 60 70
479 // 01 11 21 31 41 51 61 71
480 // 02 12 22 32 42 52 62 72
481 // 03 13 23 33 43 53 63 73
482 // 04 14 24 34 44 54 64 74
483 // 05 15 25 35 45 55 65 75
484 // 06 16 26 36 46 56 66 76
485 // 07 17 27 37 47 57 67 77
488 // Post-condition output and store it
490 // Post-condition (division by two)
491 // division of two 16 bits signed numbers using shifts
492 // n / 2 = (n - (n >> 15)) >> 1
493 const __m128i sign_in0 = _mm_srai_epi16(in0, 15);
494 const __m128i sign_in1 = _mm_srai_epi16(in1, 15);
495 const __m128i sign_in2 = _mm_srai_epi16(in2, 15);
496 const __m128i sign_in3 = _mm_srai_epi16(in3, 15);
497 const __m128i sign_in4 = _mm_srai_epi16(in4, 15);
498 const __m128i sign_in5 = _mm_srai_epi16(in5, 15);
499 const __m128i sign_in6 = _mm_srai_epi16(in6, 15);
500 const __m128i sign_in7 = _mm_srai_epi16(in7, 15);
501 in0 = _mm_sub_epi16(in0, sign_in0);
502 in1 = _mm_sub_epi16(in1, sign_in1);
503 in2 = _mm_sub_epi16(in2, sign_in2);
504 in3 = _mm_sub_epi16(in3, sign_in3);
505 in4 = _mm_sub_epi16(in4, sign_in4);
506 in5 = _mm_sub_epi16(in5, sign_in5);
507 in6 = _mm_sub_epi16(in6, sign_in6);
508 in7 = _mm_sub_epi16(in7, sign_in7);
509 in0 = _mm_srai_epi16(in0, 1);
510 in1 = _mm_srai_epi16(in1, 1);
511 in2 = _mm_srai_epi16(in2, 1);
512 in3 = _mm_srai_epi16(in3, 1);
513 in4 = _mm_srai_epi16(in4, 1);
514 in5 = _mm_srai_epi16(in5, 1);
515 in6 = _mm_srai_epi16(in6, 1);
516 in7 = _mm_srai_epi16(in7, 1);
518 _mm_store_si128((__m128i *)(output + 0 * 8), in0);
519 _mm_store_si128((__m128i *)(output + 1 * 8), in1);
520 _mm_store_si128((__m128i *)(output + 2 * 8), in2);
521 _mm_store_si128((__m128i *)(output + 3 * 8), in3);
522 _mm_store_si128((__m128i *)(output + 4 * 8), in4);
523 _mm_store_si128((__m128i *)(output + 5 * 8), in5);
524 _mm_store_si128((__m128i *)(output + 6 * 8), in6);
525 _mm_store_si128((__m128i *)(output + 7 * 8), in7);
530 static INLINE void load_buffer_8x8(const int16_t *input, __m128i *in,
532 in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
533 in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
534 in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
535 in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
536 in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride));
537 in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride));
538 in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride));
539 in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride));
541 in[0] = _mm_slli_epi16(in[0], 2);
542 in[1] = _mm_slli_epi16(in[1], 2);
543 in[2] = _mm_slli_epi16(in[2], 2);
544 in[3] = _mm_slli_epi16(in[3], 2);
545 in[4] = _mm_slli_epi16(in[4], 2);
546 in[5] = _mm_slli_epi16(in[5], 2);
547 in[6] = _mm_slli_epi16(in[6], 2);
548 in[7] = _mm_slli_epi16(in[7], 2);
551 // right shift and rounding
552 static INLINE void right_shift_8x8(__m128i *res, int const bit) {
553 const __m128i kOne = _mm_set1_epi16(1);
554 const int bit_m02 = bit - 2;
555 __m128i sign0 = _mm_srai_epi16(res[0], 15);
556 __m128i sign1 = _mm_srai_epi16(res[1], 15);
557 __m128i sign2 = _mm_srai_epi16(res[2], 15);
558 __m128i sign3 = _mm_srai_epi16(res[3], 15);
559 __m128i sign4 = _mm_srai_epi16(res[4], 15);
560 __m128i sign5 = _mm_srai_epi16(res[5], 15);
561 __m128i sign6 = _mm_srai_epi16(res[6], 15);
562 __m128i sign7 = _mm_srai_epi16(res[7], 15);
565 __m128i k_const_rounding = _mm_slli_epi16(kOne, bit_m02);
566 res[0] = _mm_add_epi16(res[0], k_const_rounding);
567 res[1] = _mm_add_epi16(res[1], k_const_rounding);
568 res[2] = _mm_add_epi16(res[2], k_const_rounding);
569 res[3] = _mm_add_epi16(res[3], k_const_rounding);
570 res[4] = _mm_add_epi16(res[4], k_const_rounding);
571 res[5] = _mm_add_epi16(res[5], k_const_rounding);
572 res[6] = _mm_add_epi16(res[6], k_const_rounding);
573 res[7] = _mm_add_epi16(res[7], k_const_rounding);
576 res[0] = _mm_sub_epi16(res[0], sign0);
577 res[1] = _mm_sub_epi16(res[1], sign1);
578 res[2] = _mm_sub_epi16(res[2], sign2);
579 res[3] = _mm_sub_epi16(res[3], sign3);
580 res[4] = _mm_sub_epi16(res[4], sign4);
581 res[5] = _mm_sub_epi16(res[5], sign5);
582 res[6] = _mm_sub_epi16(res[6], sign6);
583 res[7] = _mm_sub_epi16(res[7], sign7);
585 res[0] = _mm_srai_epi16(res[0], bit);
586 res[1] = _mm_srai_epi16(res[1], bit);
587 res[2] = _mm_srai_epi16(res[2], bit);
588 res[3] = _mm_srai_epi16(res[3], bit);
589 res[4] = _mm_srai_epi16(res[4], bit);
590 res[5] = _mm_srai_epi16(res[5], bit);
591 res[6] = _mm_srai_epi16(res[6], bit);
592 res[7] = _mm_srai_epi16(res[7], bit);
596 static INLINE void write_buffer_8x8(int16_t *output, __m128i *res, int stride) {
597 _mm_store_si128((__m128i *)(output + 0 * stride), res[0]);
598 _mm_store_si128((__m128i *)(output + 1 * stride), res[1]);
599 _mm_store_si128((__m128i *)(output + 2 * stride), res[2]);
600 _mm_store_si128((__m128i *)(output + 3 * stride), res[3]);
601 _mm_store_si128((__m128i *)(output + 4 * stride), res[4]);
602 _mm_store_si128((__m128i *)(output + 5 * stride), res[5]);
603 _mm_store_si128((__m128i *)(output + 6 * stride), res[6]);
604 _mm_store_si128((__m128i *)(output + 7 * stride), res[7]);
607 // perform in-place transpose
608 static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
609 const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
610 const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
611 const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]);
612 const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]);
613 const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
614 const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
615 const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]);
616 const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]);
617 // 00 10 01 11 02 12 03 13
618 // 20 30 21 31 22 32 23 33
619 // 04 14 05 15 06 16 07 17
620 // 24 34 25 35 26 36 27 37
621 // 40 50 41 51 42 52 43 53
622 // 60 70 61 71 62 72 63 73
623 // 44 54 45 55 46 56 47 57
624 // 64 74 65 75 66 76 67 77
625 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
626 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5);
627 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
628 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5);
629 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3);
630 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
631 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3);
632 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
633 // 00 10 20 30 01 11 21 31
634 // 40 50 60 70 41 51 61 71
635 // 02 12 22 32 03 13 23 33
636 // 42 52 62 72 43 53 63 73
637 // 04 14 24 34 05 15 25 35
638 // 44 54 64 74 45 55 65 75
639 // 06 16 26 36 07 17 27 37
640 // 46 56 66 76 47 57 67 77
641 res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
642 res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
643 res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
644 res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
645 res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5);
646 res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5);
647 res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7);
648 res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
649 // 00 10 20 30 40 50 60 70
650 // 01 11 21 31 41 51 61 71
651 // 02 12 22 32 42 52 62 72
652 // 03 13 23 33 43 53 63 73
653 // 04 14 24 34 44 54 64 74
654 // 05 15 25 35 45 55 65 75
655 // 06 16 26 36 46 56 66 76
656 // 07 17 27 37 47 57 67 77
659 void fdct8_1d_sse2(__m128i *in) {
661 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
662 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
663 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
664 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
665 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
666 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
667 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
668 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
669 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
670 __m128i u0, u1, u2, u3, u4, u5, u6, u7;
671 __m128i v0, v1, v2, v3, v4, v5, v6, v7;
672 __m128i s0, s1, s2, s3, s4, s5, s6, s7;
675 s0 = _mm_add_epi16(in[0], in[7]);
676 s1 = _mm_add_epi16(in[1], in[6]);
677 s2 = _mm_add_epi16(in[2], in[5]);
678 s3 = _mm_add_epi16(in[3], in[4]);
679 s4 = _mm_sub_epi16(in[3], in[4]);
680 s5 = _mm_sub_epi16(in[2], in[5]);
681 s6 = _mm_sub_epi16(in[1], in[6]);
682 s7 = _mm_sub_epi16(in[0], in[7]);
684 u0 = _mm_add_epi16(s0, s3);
685 u1 = _mm_add_epi16(s1, s2);
686 u2 = _mm_sub_epi16(s1, s2);
687 u3 = _mm_sub_epi16(s0, s3);
688 // interleave and perform butterfly multiplication/addition
689 v0 = _mm_unpacklo_epi16(u0, u1);
690 v1 = _mm_unpackhi_epi16(u0, u1);
691 v2 = _mm_unpacklo_epi16(u2, u3);
692 v3 = _mm_unpackhi_epi16(u2, u3);
694 u0 = _mm_madd_epi16(v0, k__cospi_p16_p16);
695 u1 = _mm_madd_epi16(v1, k__cospi_p16_p16);
696 u2 = _mm_madd_epi16(v0, k__cospi_p16_m16);
697 u3 = _mm_madd_epi16(v1, k__cospi_p16_m16);
698 u4 = _mm_madd_epi16(v2, k__cospi_p24_p08);
699 u5 = _mm_madd_epi16(v3, k__cospi_p24_p08);
700 u6 = _mm_madd_epi16(v2, k__cospi_m08_p24);
701 u7 = _mm_madd_epi16(v3, k__cospi_m08_p24);
703 // shift and rounding
704 v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
705 v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
706 v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
707 v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
708 v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
709 v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
710 v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
711 v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
713 u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
714 u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
715 u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
716 u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
717 u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
718 u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
719 u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
720 u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
722 in[0] = _mm_packs_epi32(u0, u1);
723 in[2] = _mm_packs_epi32(u4, u5);
724 in[4] = _mm_packs_epi32(u2, u3);
725 in[6] = _mm_packs_epi32(u6, u7);
728 // interleave and perform butterfly multiplication/addition
729 u0 = _mm_unpacklo_epi16(s6, s5);
730 u1 = _mm_unpackhi_epi16(s6, s5);
731 v0 = _mm_madd_epi16(u0, k__cospi_p16_m16);
732 v1 = _mm_madd_epi16(u1, k__cospi_p16_m16);
733 v2 = _mm_madd_epi16(u0, k__cospi_p16_p16);
734 v3 = _mm_madd_epi16(u1, k__cospi_p16_p16);
736 // shift and rounding
737 u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
738 u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
739 u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
740 u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
742 v0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
743 v1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
744 v2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
745 v3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
747 u0 = _mm_packs_epi32(v0, v1);
748 u1 = _mm_packs_epi32(v2, v3);
751 s0 = _mm_add_epi16(s4, u0);
752 s1 = _mm_sub_epi16(s4, u0);
753 s2 = _mm_sub_epi16(s7, u1);
754 s3 = _mm_add_epi16(s7, u1);
757 u0 = _mm_unpacklo_epi16(s0, s3);
758 u1 = _mm_unpackhi_epi16(s0, s3);
759 u2 = _mm_unpacklo_epi16(s1, s2);
760 u3 = _mm_unpackhi_epi16(s1, s2);
762 v0 = _mm_madd_epi16(u0, k__cospi_p28_p04);
763 v1 = _mm_madd_epi16(u1, k__cospi_p28_p04);
764 v2 = _mm_madd_epi16(u2, k__cospi_p12_p20);
765 v3 = _mm_madd_epi16(u3, k__cospi_p12_p20);
766 v4 = _mm_madd_epi16(u2, k__cospi_m20_p12);
767 v5 = _mm_madd_epi16(u3, k__cospi_m20_p12);
768 v6 = _mm_madd_epi16(u0, k__cospi_m04_p28);
769 v7 = _mm_madd_epi16(u1, k__cospi_m04_p28);
771 // shift and rounding
772 u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
773 u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
774 u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
775 u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
776 u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING);
777 u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING);
778 u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING);
779 u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING);
781 v0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
782 v1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
783 v2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
784 v3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
785 v4 = _mm_srai_epi32(u4, DCT_CONST_BITS);
786 v5 = _mm_srai_epi32(u5, DCT_CONST_BITS);
787 v6 = _mm_srai_epi32(u6, DCT_CONST_BITS);
788 v7 = _mm_srai_epi32(u7, DCT_CONST_BITS);
790 in[1] = _mm_packs_epi32(v0, v1);
791 in[3] = _mm_packs_epi32(v4, v5);
792 in[5] = _mm_packs_epi32(v2, v3);
793 in[7] = _mm_packs_epi32(v6, v7);
796 array_transpose_8x8(in, in);
799 void fadst8_1d_sse2(__m128i *in) {
801 const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
802 const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
803 const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
804 const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64);
805 const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64);
806 const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);
807 const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64);
808 const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64);
809 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
810 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
811 const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
812 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
813 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
814 const __m128i k__const_0 = _mm_set1_epi16(0);
815 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
817 __m128i u0, u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13, u14, u15;
818 __m128i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15;
819 __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15;
820 __m128i s0, s1, s2, s3, s4, s5, s6, s7;
821 __m128i in0, in1, in2, in3, in4, in5, in6, in7;
823 // properly aligned for butterfly input
833 // column transformation
835 // interleave and multiply/add into 32-bit integer
836 s0 = _mm_unpacklo_epi16(in0, in1);
837 s1 = _mm_unpackhi_epi16(in0, in1);
838 s2 = _mm_unpacklo_epi16(in2, in3);
839 s3 = _mm_unpackhi_epi16(in2, in3);
840 s4 = _mm_unpacklo_epi16(in4, in5);
841 s5 = _mm_unpackhi_epi16(in4, in5);
842 s6 = _mm_unpacklo_epi16(in6, in7);
843 s7 = _mm_unpackhi_epi16(in6, in7);
845 u0 = _mm_madd_epi16(s0, k__cospi_p02_p30);
846 u1 = _mm_madd_epi16(s1, k__cospi_p02_p30);
847 u2 = _mm_madd_epi16(s0, k__cospi_p30_m02);
848 u3 = _mm_madd_epi16(s1, k__cospi_p30_m02);
849 u4 = _mm_madd_epi16(s2, k__cospi_p10_p22);
850 u5 = _mm_madd_epi16(s3, k__cospi_p10_p22);
851 u6 = _mm_madd_epi16(s2, k__cospi_p22_m10);
852 u7 = _mm_madd_epi16(s3, k__cospi_p22_m10);
853 u8 = _mm_madd_epi16(s4, k__cospi_p18_p14);
854 u9 = _mm_madd_epi16(s5, k__cospi_p18_p14);
855 u10 = _mm_madd_epi16(s4, k__cospi_p14_m18);
856 u11 = _mm_madd_epi16(s5, k__cospi_p14_m18);
857 u12 = _mm_madd_epi16(s6, k__cospi_p26_p06);
858 u13 = _mm_madd_epi16(s7, k__cospi_p26_p06);
859 u14 = _mm_madd_epi16(s6, k__cospi_p06_m26);
860 u15 = _mm_madd_epi16(s7, k__cospi_p06_m26);
863 w0 = _mm_add_epi32(u0, u8);
864 w1 = _mm_add_epi32(u1, u9);
865 w2 = _mm_add_epi32(u2, u10);
866 w3 = _mm_add_epi32(u3, u11);
867 w4 = _mm_add_epi32(u4, u12);
868 w5 = _mm_add_epi32(u5, u13);
869 w6 = _mm_add_epi32(u6, u14);
870 w7 = _mm_add_epi32(u7, u15);
871 w8 = _mm_sub_epi32(u0, u8);
872 w9 = _mm_sub_epi32(u1, u9);
873 w10 = _mm_sub_epi32(u2, u10);
874 w11 = _mm_sub_epi32(u3, u11);
875 w12 = _mm_sub_epi32(u4, u12);
876 w13 = _mm_sub_epi32(u5, u13);
877 w14 = _mm_sub_epi32(u6, u14);
878 w15 = _mm_sub_epi32(u7, u15);
880 // shift and rounding
881 v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING);
882 v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING);
883 v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING);
884 v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING);
885 v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING);
886 v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING);
887 v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING);
888 v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING);
889 v8 = _mm_add_epi32(w8, k__DCT_CONST_ROUNDING);
890 v9 = _mm_add_epi32(w9, k__DCT_CONST_ROUNDING);
891 v10 = _mm_add_epi32(w10, k__DCT_CONST_ROUNDING);
892 v11 = _mm_add_epi32(w11, k__DCT_CONST_ROUNDING);
893 v12 = _mm_add_epi32(w12, k__DCT_CONST_ROUNDING);
894 v13 = _mm_add_epi32(w13, k__DCT_CONST_ROUNDING);
895 v14 = _mm_add_epi32(w14, k__DCT_CONST_ROUNDING);
896 v15 = _mm_add_epi32(w15, k__DCT_CONST_ROUNDING);
898 u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
899 u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
900 u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
901 u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
902 u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
903 u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
904 u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
905 u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
906 u8 = _mm_srai_epi32(v8, DCT_CONST_BITS);
907 u9 = _mm_srai_epi32(v9, DCT_CONST_BITS);
908 u10 = _mm_srai_epi32(v10, DCT_CONST_BITS);
909 u11 = _mm_srai_epi32(v11, DCT_CONST_BITS);
910 u12 = _mm_srai_epi32(v12, DCT_CONST_BITS);
911 u13 = _mm_srai_epi32(v13, DCT_CONST_BITS);
912 u14 = _mm_srai_epi32(v14, DCT_CONST_BITS);
913 u15 = _mm_srai_epi32(v15, DCT_CONST_BITS);
915 // back to 16-bit and pack 8 integers into __m128i
916 in[0] = _mm_packs_epi32(u0, u1);
917 in[1] = _mm_packs_epi32(u2, u3);
918 in[2] = _mm_packs_epi32(u4, u5);
919 in[3] = _mm_packs_epi32(u6, u7);
920 in[4] = _mm_packs_epi32(u8, u9);
921 in[5] = _mm_packs_epi32(u10, u11);
922 in[6] = _mm_packs_epi32(u12, u13);
923 in[7] = _mm_packs_epi32(u14, u15);
926 s0 = _mm_add_epi16(in[0], in[2]);
927 s1 = _mm_add_epi16(in[1], in[3]);
928 s2 = _mm_sub_epi16(in[0], in[2]);
929 s3 = _mm_sub_epi16(in[1], in[3]);
930 u0 = _mm_unpacklo_epi16(in[4], in[5]);
931 u1 = _mm_unpackhi_epi16(in[4], in[5]);
932 u2 = _mm_unpacklo_epi16(in[6], in[7]);
933 u3 = _mm_unpackhi_epi16(in[6], in[7]);
935 v0 = _mm_madd_epi16(u0, k__cospi_p08_p24);
936 v1 = _mm_madd_epi16(u1, k__cospi_p08_p24);
937 v2 = _mm_madd_epi16(u0, k__cospi_p24_m08);
938 v3 = _mm_madd_epi16(u1, k__cospi_p24_m08);
939 v4 = _mm_madd_epi16(u2, k__cospi_m24_p08);
940 v5 = _mm_madd_epi16(u3, k__cospi_m24_p08);
941 v6 = _mm_madd_epi16(u2, k__cospi_p08_p24);
942 v7 = _mm_madd_epi16(u3, k__cospi_p08_p24);
944 w0 = _mm_add_epi32(v0, v4);
945 w1 = _mm_add_epi32(v1, v5);
946 w2 = _mm_add_epi32(v2, v6);
947 w3 = _mm_add_epi32(v3, v7);
948 w4 = _mm_sub_epi32(v0, v4);
949 w5 = _mm_sub_epi32(v1, v5);
950 w6 = _mm_sub_epi32(v2, v6);
951 w7 = _mm_sub_epi32(v3, v7);
953 v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING);
954 v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING);
955 v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING);
956 v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING);
957 v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING);
958 v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING);
959 v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING);
960 v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING);
962 u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
963 u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
964 u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
965 u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
966 u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
967 u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
968 u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
969 u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
971 // back to 16-bit intergers
972 s4 = _mm_packs_epi32(u0, u1);
973 s5 = _mm_packs_epi32(u2, u3);
974 s6 = _mm_packs_epi32(u4, u5);
975 s7 = _mm_packs_epi32(u6, u7);
978 u0 = _mm_unpacklo_epi16(s2, s3);
979 u1 = _mm_unpackhi_epi16(s2, s3);
980 u2 = _mm_unpacklo_epi16(s6, s7);
981 u3 = _mm_unpackhi_epi16(s6, s7);
983 v0 = _mm_madd_epi16(u0, k__cospi_p16_p16);
984 v1 = _mm_madd_epi16(u1, k__cospi_p16_p16);
985 v2 = _mm_madd_epi16(u0, k__cospi_p16_m16);
986 v3 = _mm_madd_epi16(u1, k__cospi_p16_m16);
987 v4 = _mm_madd_epi16(u2, k__cospi_p16_p16);
988 v5 = _mm_madd_epi16(u3, k__cospi_p16_p16);
989 v6 = _mm_madd_epi16(u2, k__cospi_p16_m16);
990 v7 = _mm_madd_epi16(u3, k__cospi_p16_m16);
992 u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
993 u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
994 u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
995 u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
996 u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING);
997 u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING);
998 u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING);
999 u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING);
1001 v0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
1002 v1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
1003 v2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
1004 v3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
1005 v4 = _mm_srai_epi32(u4, DCT_CONST_BITS);
1006 v5 = _mm_srai_epi32(u5, DCT_CONST_BITS);
1007 v6 = _mm_srai_epi32(u6, DCT_CONST_BITS);
1008 v7 = _mm_srai_epi32(u7, DCT_CONST_BITS);
1010 s2 = _mm_packs_epi32(v0, v1);
1011 s3 = _mm_packs_epi32(v2, v3);
1012 s6 = _mm_packs_epi32(v4, v5);
1013 s7 = _mm_packs_epi32(v6, v7);
1015 // FIXME(jingning): do subtract using bit inversion?
1017 in[1] = _mm_sub_epi16(k__const_0, s4);
1019 in[3] = _mm_sub_epi16(k__const_0, s2);
1021 in[5] = _mm_sub_epi16(k__const_0, s7);
1023 in[7] = _mm_sub_epi16(k__const_0, s1);
1026 array_transpose_8x8(in, in);
1029 void vp9_short_fht8x8_sse2(const int16_t *input, int16_t *output,
1030 int stride, int tx_type) {
1032 load_buffer_8x8(input, in, stride);
1046 case 3: // ADST_ADST
1054 right_shift_8x8(in, 1);
1055 write_buffer_8x8(output, in, 8);
1058 void vp9_fdct16x16_sse2(const int16_t *input, int16_t *output, int stride) {
1059 // The 2D transform is done with two passes which are actually pretty
1060 // similar. In the first one, we transform the columns and transpose
1061 // the results. In the second one, we transform the rows. To achieve that,
1062 // as the first pass results are transposed, we tranpose the columns (that
1063 // is the transposed rows) and transpose the results (so that it goes back
1064 // in normal/row positions).
1066 // We need an intermediate buffer between passes.
1067 DECLARE_ALIGNED_ARRAY(16, int16_t, intermediate, 256);
1068 const int16_t *in = input;
1069 int16_t *out = intermediate;
1071 // When we use them, in one case, they are all the same. In all others
1072 // it's a pair of them that we need to repeat four times. This is done
1073 // by constructing the 32 bit constant corresponding to that pair.
1074 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
1075 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
1076 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
1077 const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
1078 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
1079 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
1080 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
1081 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
1082 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
1083 const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64);
1084 const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64);
1085 const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
1086 const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
1087 const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64);
1088 const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64);
1089 const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
1090 const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
1091 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
1092 const __m128i kOne = _mm_set1_epi16(1);
1093 // Do the two transform/transpose passes
1094 for (pass = 0; pass < 2; ++pass) {
1095 // We process eight columns (transposed rows in second pass) at a time.
1097 for (column_start = 0; column_start < 16; column_start += 8) {
1098 __m128i in00, in01, in02, in03, in04, in05, in06, in07;
1099 __m128i in08, in09, in10, in11, in12, in13, in14, in15;
1100 __m128i input0, input1, input2, input3, input4, input5, input6, input7;
1101 __m128i step1_0, step1_1, step1_2, step1_3;
1102 __m128i step1_4, step1_5, step1_6, step1_7;
1103 __m128i step2_1, step2_2, step2_3, step2_4, step2_5, step2_6;
1104 __m128i step3_0, step3_1, step3_2, step3_3;
1105 __m128i step3_4, step3_5, step3_6, step3_7;
1106 __m128i res00, res01, res02, res03, res04, res05, res06, res07;
1107 __m128i res08, res09, res10, res11, res12, res13, res14, res15;
1108 // Load and pre-condition input.
1110 in00 = _mm_load_si128((const __m128i *)(in + 0 * stride));
1111 in01 = _mm_load_si128((const __m128i *)(in + 1 * stride));
1112 in02 = _mm_load_si128((const __m128i *)(in + 2 * stride));
1113 in03 = _mm_load_si128((const __m128i *)(in + 3 * stride));
1114 in04 = _mm_load_si128((const __m128i *)(in + 4 * stride));
1115 in05 = _mm_load_si128((const __m128i *)(in + 5 * stride));
1116 in06 = _mm_load_si128((const __m128i *)(in + 6 * stride));
1117 in07 = _mm_load_si128((const __m128i *)(in + 7 * stride));
1118 in08 = _mm_load_si128((const __m128i *)(in + 8 * stride));
1119 in09 = _mm_load_si128((const __m128i *)(in + 9 * stride));
1120 in10 = _mm_load_si128((const __m128i *)(in + 10 * stride));
1121 in11 = _mm_load_si128((const __m128i *)(in + 11 * stride));
1122 in12 = _mm_load_si128((const __m128i *)(in + 12 * stride));
1123 in13 = _mm_load_si128((const __m128i *)(in + 13 * stride));
1124 in14 = _mm_load_si128((const __m128i *)(in + 14 * stride));
1125 in15 = _mm_load_si128((const __m128i *)(in + 15 * stride));
1127 in00 = _mm_slli_epi16(in00, 2);
1128 in01 = _mm_slli_epi16(in01, 2);
1129 in02 = _mm_slli_epi16(in02, 2);
1130 in03 = _mm_slli_epi16(in03, 2);
1131 in04 = _mm_slli_epi16(in04, 2);
1132 in05 = _mm_slli_epi16(in05, 2);
1133 in06 = _mm_slli_epi16(in06, 2);
1134 in07 = _mm_slli_epi16(in07, 2);
1135 in08 = _mm_slli_epi16(in08, 2);
1136 in09 = _mm_slli_epi16(in09, 2);
1137 in10 = _mm_slli_epi16(in10, 2);
1138 in11 = _mm_slli_epi16(in11, 2);
1139 in12 = _mm_slli_epi16(in12, 2);
1140 in13 = _mm_slli_epi16(in13, 2);
1141 in14 = _mm_slli_epi16(in14, 2);
1142 in15 = _mm_slli_epi16(in15, 2);
1144 in00 = _mm_load_si128((const __m128i *)(in + 0 * 16));
1145 in01 = _mm_load_si128((const __m128i *)(in + 1 * 16));
1146 in02 = _mm_load_si128((const __m128i *)(in + 2 * 16));
1147 in03 = _mm_load_si128((const __m128i *)(in + 3 * 16));
1148 in04 = _mm_load_si128((const __m128i *)(in + 4 * 16));
1149 in05 = _mm_load_si128((const __m128i *)(in + 5 * 16));
1150 in06 = _mm_load_si128((const __m128i *)(in + 6 * 16));
1151 in07 = _mm_load_si128((const __m128i *)(in + 7 * 16));
1152 in08 = _mm_load_si128((const __m128i *)(in + 8 * 16));
1153 in09 = _mm_load_si128((const __m128i *)(in + 9 * 16));
1154 in10 = _mm_load_si128((const __m128i *)(in + 10 * 16));
1155 in11 = _mm_load_si128((const __m128i *)(in + 11 * 16));
1156 in12 = _mm_load_si128((const __m128i *)(in + 12 * 16));
1157 in13 = _mm_load_si128((const __m128i *)(in + 13 * 16));
1158 in14 = _mm_load_si128((const __m128i *)(in + 14 * 16));
1159 in15 = _mm_load_si128((const __m128i *)(in + 15 * 16));
1161 in00 = _mm_add_epi16(in00, kOne);
1162 in01 = _mm_add_epi16(in01, kOne);
1163 in02 = _mm_add_epi16(in02, kOne);
1164 in03 = _mm_add_epi16(in03, kOne);
1165 in04 = _mm_add_epi16(in04, kOne);
1166 in05 = _mm_add_epi16(in05, kOne);
1167 in06 = _mm_add_epi16(in06, kOne);
1168 in07 = _mm_add_epi16(in07, kOne);
1169 in08 = _mm_add_epi16(in08, kOne);
1170 in09 = _mm_add_epi16(in09, kOne);
1171 in10 = _mm_add_epi16(in10, kOne);
1172 in11 = _mm_add_epi16(in11, kOne);
1173 in12 = _mm_add_epi16(in12, kOne);
1174 in13 = _mm_add_epi16(in13, kOne);
1175 in14 = _mm_add_epi16(in14, kOne);
1176 in15 = _mm_add_epi16(in15, kOne);
1177 in00 = _mm_srai_epi16(in00, 2);
1178 in01 = _mm_srai_epi16(in01, 2);
1179 in02 = _mm_srai_epi16(in02, 2);
1180 in03 = _mm_srai_epi16(in03, 2);
1181 in04 = _mm_srai_epi16(in04, 2);
1182 in05 = _mm_srai_epi16(in05, 2);
1183 in06 = _mm_srai_epi16(in06, 2);
1184 in07 = _mm_srai_epi16(in07, 2);
1185 in08 = _mm_srai_epi16(in08, 2);
1186 in09 = _mm_srai_epi16(in09, 2);
1187 in10 = _mm_srai_epi16(in10, 2);
1188 in11 = _mm_srai_epi16(in11, 2);
1189 in12 = _mm_srai_epi16(in12, 2);
1190 in13 = _mm_srai_epi16(in13, 2);
1191 in14 = _mm_srai_epi16(in14, 2);
1192 in15 = _mm_srai_epi16(in15, 2);
1195 // Calculate input for the first 8 results.
1197 input0 = _mm_add_epi16(in00, in15);
1198 input1 = _mm_add_epi16(in01, in14);
1199 input2 = _mm_add_epi16(in02, in13);
1200 input3 = _mm_add_epi16(in03, in12);
1201 input4 = _mm_add_epi16(in04, in11);
1202 input5 = _mm_add_epi16(in05, in10);
1203 input6 = _mm_add_epi16(in06, in09);
1204 input7 = _mm_add_epi16(in07, in08);
1206 // Calculate input for the next 8 results.
1208 step1_0 = _mm_sub_epi16(in07, in08);
1209 step1_1 = _mm_sub_epi16(in06, in09);
1210 step1_2 = _mm_sub_epi16(in05, in10);
1211 step1_3 = _mm_sub_epi16(in04, in11);
1212 step1_4 = _mm_sub_epi16(in03, in12);
1213 step1_5 = _mm_sub_epi16(in02, in13);
1214 step1_6 = _mm_sub_epi16(in01, in14);
1215 step1_7 = _mm_sub_epi16(in00, in15);
1217 // Work on the first eight values; fdct8_1d(input, even_results);
1220 const __m128i q0 = _mm_add_epi16(input0, input7);
1221 const __m128i q1 = _mm_add_epi16(input1, input6);
1222 const __m128i q2 = _mm_add_epi16(input2, input5);
1223 const __m128i q3 = _mm_add_epi16(input3, input4);
1224 const __m128i q4 = _mm_sub_epi16(input3, input4);
1225 const __m128i q5 = _mm_sub_epi16(input2, input5);
1226 const __m128i q6 = _mm_sub_epi16(input1, input6);
1227 const __m128i q7 = _mm_sub_epi16(input0, input7);
1228 // Work on first four results
1231 const __m128i r0 = _mm_add_epi16(q0, q3);
1232 const __m128i r1 = _mm_add_epi16(q1, q2);
1233 const __m128i r2 = _mm_sub_epi16(q1, q2);
1234 const __m128i r3 = _mm_sub_epi16(q0, q3);
1235 // Interleave to do the multiply by constants which gets us
1237 const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
1238 const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
1239 const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
1240 const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
1241 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
1242 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
1243 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
1244 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16);
1245 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
1246 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08);
1247 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
1248 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24);
1249 // dct_const_round_shift
1250 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1251 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1252 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1253 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1254 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
1255 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
1256 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
1257 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
1258 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1259 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1260 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1261 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1262 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
1263 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
1264 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
1265 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
1267 res00 = _mm_packs_epi32(w0, w1);
1268 res08 = _mm_packs_epi32(w2, w3);
1269 res04 = _mm_packs_epi32(w4, w5);
1270 res12 = _mm_packs_epi32(w6, w7);
1272 // Work on next four results
1274 // Interleave to do the multiply by constants which gets us
1276 const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
1277 const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
1278 const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16);
1279 const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16);
1280 const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16);
1281 const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16);
1282 // dct_const_round_shift
1283 const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING);
1284 const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING);
1285 const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING);
1286 const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING);
1287 const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS);
1288 const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS);
1289 const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS);
1290 const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS);
1292 const __m128i r0 = _mm_packs_epi32(s0, s1);
1293 const __m128i r1 = _mm_packs_epi32(s2, s3);
1295 const __m128i x0 = _mm_add_epi16(q4, r0);
1296 const __m128i x1 = _mm_sub_epi16(q4, r0);
1297 const __m128i x2 = _mm_sub_epi16(q7, r1);
1298 const __m128i x3 = _mm_add_epi16(q7, r1);
1299 // Interleave to do the multiply by constants which gets us
1301 const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
1302 const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
1303 const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
1304 const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
1305 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04);
1306 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04);
1307 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28);
1308 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28);
1309 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20);
1310 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20);
1311 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12);
1312 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12);
1313 // dct_const_round_shift
1314 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1315 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1316 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1317 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1318 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
1319 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
1320 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
1321 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
1322 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1323 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1324 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1325 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1326 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
1327 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
1328 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
1329 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
1331 res02 = _mm_packs_epi32(w0, w1);
1332 res14 = _mm_packs_epi32(w2, w3);
1333 res10 = _mm_packs_epi32(w4, w5);
1334 res06 = _mm_packs_epi32(w6, w7);
1337 // Work on the next eight values; step1 -> odd_results
1341 const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2);
1342 const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2);
1343 const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3);
1344 const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3);
1345 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_m16);
1346 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_m16);
1347 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p16_m16);
1348 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p16_m16);
1349 // dct_const_round_shift
1350 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1351 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1352 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1353 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1354 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1355 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1356 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1357 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1359 step2_2 = _mm_packs_epi32(w0, w1);
1360 step2_3 = _mm_packs_epi32(w2, w3);
1363 const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2);
1364 const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2);
1365 const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3);
1366 const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3);
1367 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
1368 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
1369 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p16_p16);
1370 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p16_p16);
1371 // dct_const_round_shift
1372 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1373 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1374 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1375 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1376 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1377 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1378 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1379 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1381 step2_5 = _mm_packs_epi32(w0, w1);
1382 step2_4 = _mm_packs_epi32(w2, w3);
1386 step3_0 = _mm_add_epi16(step1_0, step2_3);
1387 step3_1 = _mm_add_epi16(step1_1, step2_2);
1388 step3_2 = _mm_sub_epi16(step1_1, step2_2);
1389 step3_3 = _mm_sub_epi16(step1_0, step2_3);
1390 step3_4 = _mm_sub_epi16(step1_7, step2_4);
1391 step3_5 = _mm_sub_epi16(step1_6, step2_5);
1392 step3_6 = _mm_add_epi16(step1_6, step2_5);
1393 step3_7 = _mm_add_epi16(step1_7, step2_4);
1397 const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6);
1398 const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6);
1399 const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5);
1400 const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5);
1401 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m08_p24);
1402 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m08_p24);
1403 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m24_m08);
1404 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m24_m08);
1405 // dct_const_round_shift
1406 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1407 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1408 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1409 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1410 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1411 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1412 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1413 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1415 step2_1 = _mm_packs_epi32(w0, w1);
1416 step2_2 = _mm_packs_epi32(w2, w3);
1419 const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6);
1420 const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6);
1421 const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5);
1422 const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5);
1423 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p24_p08);
1424 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p24_p08);
1425 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m08_p24);
1426 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m08_p24);
1427 // dct_const_round_shift
1428 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1429 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1430 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1431 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1432 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1433 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1434 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1435 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1437 step2_6 = _mm_packs_epi32(w0, w1);
1438 step2_5 = _mm_packs_epi32(w2, w3);
1442 step1_0 = _mm_add_epi16(step3_0, step2_1);
1443 step1_1 = _mm_sub_epi16(step3_0, step2_1);
1444 step1_2 = _mm_sub_epi16(step3_3, step2_2);
1445 step1_3 = _mm_add_epi16(step3_3, step2_2);
1446 step1_4 = _mm_add_epi16(step3_4, step2_5);
1447 step1_5 = _mm_sub_epi16(step3_4, step2_5);
1448 step1_6 = _mm_sub_epi16(step3_7, step2_6);
1449 step1_7 = _mm_add_epi16(step3_7, step2_6);
1453 const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7);
1454 const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7);
1455 const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6);
1456 const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6);
1457 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p30_p02);
1458 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p30_p02);
1459 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p14_p18);
1460 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p14_p18);
1461 // dct_const_round_shift
1462 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1463 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1464 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1465 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1466 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1467 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1468 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1469 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1471 res01 = _mm_packs_epi32(w0, w1);
1472 res09 = _mm_packs_epi32(w2, w3);
1475 const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5);
1476 const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5);
1477 const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4);
1478 const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4);
1479 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p22_p10);
1480 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p22_p10);
1481 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p06_p26);
1482 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p06_p26);
1483 // dct_const_round_shift
1484 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1485 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1486 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1487 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1488 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1489 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1490 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1491 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1493 res05 = _mm_packs_epi32(w0, w1);
1494 res13 = _mm_packs_epi32(w2, w3);
1497 const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5);
1498 const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5);
1499 const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4);
1500 const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4);
1501 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m10_p22);
1502 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m10_p22);
1503 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m26_p06);
1504 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m26_p06);
1505 // dct_const_round_shift
1506 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1507 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1508 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1509 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1510 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1511 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1512 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1513 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1515 res11 = _mm_packs_epi32(w0, w1);
1516 res03 = _mm_packs_epi32(w2, w3);
1519 const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7);
1520 const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7);
1521 const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6);
1522 const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6);
1523 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m02_p30);
1524 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m02_p30);
1525 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m18_p14);
1526 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m18_p14);
1527 // dct_const_round_shift
1528 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
1529 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
1530 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
1531 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
1532 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
1533 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
1534 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
1535 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
1537 res15 = _mm_packs_epi32(w0, w1);
1538 res07 = _mm_packs_epi32(w2, w3);
1541 // Transpose the results, do it as two 8x8 transposes.
1543 // 00 01 02 03 04 05 06 07
1544 // 10 11 12 13 14 15 16 17
1545 // 20 21 22 23 24 25 26 27
1546 // 30 31 32 33 34 35 36 37
1547 // 40 41 42 43 44 45 46 47
1548 // 50 51 52 53 54 55 56 57
1549 // 60 61 62 63 64 65 66 67
1550 // 70 71 72 73 74 75 76 77
1551 const __m128i tr0_0 = _mm_unpacklo_epi16(res00, res01);
1552 const __m128i tr0_1 = _mm_unpacklo_epi16(res02, res03);
1553 const __m128i tr0_2 = _mm_unpackhi_epi16(res00, res01);
1554 const __m128i tr0_3 = _mm_unpackhi_epi16(res02, res03);
1555 const __m128i tr0_4 = _mm_unpacklo_epi16(res04, res05);
1556 const __m128i tr0_5 = _mm_unpacklo_epi16(res06, res07);
1557 const __m128i tr0_6 = _mm_unpackhi_epi16(res04, res05);
1558 const __m128i tr0_7 = _mm_unpackhi_epi16(res06, res07);
1559 // 00 10 01 11 02 12 03 13
1560 // 20 30 21 31 22 32 23 33
1561 // 04 14 05 15 06 16 07 17
1562 // 24 34 25 35 26 36 27 37
1563 // 40 50 41 51 42 52 43 53
1564 // 60 70 61 71 62 72 63 73
1565 // 54 54 55 55 56 56 57 57
1566 // 64 74 65 75 66 76 67 77
1567 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
1568 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
1569 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
1570 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
1571 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
1572 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
1573 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
1574 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
1575 // 00 10 20 30 01 11 21 31
1576 // 40 50 60 70 41 51 61 71
1577 // 02 12 22 32 03 13 23 33
1578 // 42 52 62 72 43 53 63 73
1579 // 04 14 24 34 05 15 21 36
1580 // 44 54 64 74 45 55 61 76
1581 // 06 16 26 36 07 17 27 37
1582 // 46 56 66 76 47 57 67 77
1583 const __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
1584 const __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
1585 const __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
1586 const __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
1587 const __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
1588 const __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
1589 const __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
1590 const __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
1591 // 00 10 20 30 40 50 60 70
1592 // 01 11 21 31 41 51 61 71
1593 // 02 12 22 32 42 52 62 72
1594 // 03 13 23 33 43 53 63 73
1595 // 04 14 24 34 44 54 64 74
1596 // 05 15 25 35 45 55 65 75
1597 // 06 16 26 36 46 56 66 76
1598 // 07 17 27 37 47 57 67 77
1599 _mm_storeu_si128((__m128i *)(out + 0 * 16), tr2_0);
1600 _mm_storeu_si128((__m128i *)(out + 1 * 16), tr2_1);
1601 _mm_storeu_si128((__m128i *)(out + 2 * 16), tr2_2);
1602 _mm_storeu_si128((__m128i *)(out + 3 * 16), tr2_3);
1603 _mm_storeu_si128((__m128i *)(out + 4 * 16), tr2_4);
1604 _mm_storeu_si128((__m128i *)(out + 5 * 16), tr2_5);
1605 _mm_storeu_si128((__m128i *)(out + 6 * 16), tr2_6);
1606 _mm_storeu_si128((__m128i *)(out + 7 * 16), tr2_7);
1609 // 00 01 02 03 04 05 06 07
1610 // 10 11 12 13 14 15 16 17
1611 // 20 21 22 23 24 25 26 27
1612 // 30 31 32 33 34 35 36 37
1613 // 40 41 42 43 44 45 46 47
1614 // 50 51 52 53 54 55 56 57
1615 // 60 61 62 63 64 65 66 67
1616 // 70 71 72 73 74 75 76 77
1617 const __m128i tr0_0 = _mm_unpacklo_epi16(res08, res09);
1618 const __m128i tr0_1 = _mm_unpacklo_epi16(res10, res11);
1619 const __m128i tr0_2 = _mm_unpackhi_epi16(res08, res09);
1620 const __m128i tr0_3 = _mm_unpackhi_epi16(res10, res11);
1621 const __m128i tr0_4 = _mm_unpacklo_epi16(res12, res13);
1622 const __m128i tr0_5 = _mm_unpacklo_epi16(res14, res15);
1623 const __m128i tr0_6 = _mm_unpackhi_epi16(res12, res13);
1624 const __m128i tr0_7 = _mm_unpackhi_epi16(res14, res15);
1625 // 00 10 01 11 02 12 03 13
1626 // 20 30 21 31 22 32 23 33
1627 // 04 14 05 15 06 16 07 17
1628 // 24 34 25 35 26 36 27 37
1629 // 40 50 41 51 42 52 43 53
1630 // 60 70 61 71 62 72 63 73
1631 // 54 54 55 55 56 56 57 57
1632 // 64 74 65 75 66 76 67 77
1633 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
1634 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
1635 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
1636 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
1637 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
1638 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
1639 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
1640 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
1641 // 00 10 20 30 01 11 21 31
1642 // 40 50 60 70 41 51 61 71
1643 // 02 12 22 32 03 13 23 33
1644 // 42 52 62 72 43 53 63 73
1645 // 04 14 24 34 05 15 21 36
1646 // 44 54 64 74 45 55 61 76
1647 // 06 16 26 36 07 17 27 37
1648 // 46 56 66 76 47 57 67 77
1649 const __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
1650 const __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
1651 const __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
1652 const __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
1653 const __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
1654 const __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
1655 const __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
1656 const __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
1657 // 00 10 20 30 40 50 60 70
1658 // 01 11 21 31 41 51 61 71
1659 // 02 12 22 32 42 52 62 72
1660 // 03 13 23 33 43 53 63 73
1661 // 04 14 24 34 44 54 64 74
1662 // 05 15 25 35 45 55 65 75
1663 // 06 16 26 36 46 56 66 76
1664 // 07 17 27 37 47 57 67 77
1666 _mm_store_si128((__m128i *)(out + 8 + 0 * 16), tr2_0);
1667 _mm_store_si128((__m128i *)(out + 8 + 1 * 16), tr2_1);
1668 _mm_store_si128((__m128i *)(out + 8 + 2 * 16), tr2_2);
1669 _mm_store_si128((__m128i *)(out + 8 + 3 * 16), tr2_3);
1670 _mm_store_si128((__m128i *)(out + 8 + 4 * 16), tr2_4);
1671 _mm_store_si128((__m128i *)(out + 8 + 5 * 16), tr2_5);
1672 _mm_store_si128((__m128i *)(out + 8 + 6 * 16), tr2_6);
1673 _mm_store_si128((__m128i *)(out + 8 + 7 * 16), tr2_7);
1677 // Setup in/out for next pass.
1683 static INLINE void load_buffer_16x16(const int16_t* input, __m128i *in0,
1684 __m128i *in1, int stride) {
1685 // load first 8 columns
1686 load_buffer_8x8(input, in0, stride);
1687 load_buffer_8x8(input + 8 * stride, in0 + 8, stride);
1690 // load second 8 columns
1691 load_buffer_8x8(input, in1, stride);
1692 load_buffer_8x8(input + 8 * stride, in1 + 8, stride);
1695 static INLINE void write_buffer_16x16(int16_t *output, __m128i *in0,
1696 __m128i *in1, int stride) {
1697 // write first 8 columns
1698 write_buffer_8x8(output, in0, stride);
1699 write_buffer_8x8(output + 8 * stride, in0 + 8, stride);
1700 // write second 8 columns
1702 write_buffer_8x8(output, in1, stride);
1703 write_buffer_8x8(output + 8 * stride, in1 + 8, stride);
1706 static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) {
1708 array_transpose_8x8(res0, res0);
1709 array_transpose_8x8(res1, tbuf);
1710 array_transpose_8x8(res0 + 8, res1);
1711 array_transpose_8x8(res1 + 8, res1 + 8);
1723 static INLINE void right_shift_16x16(__m128i *res0, __m128i *res1) {
1724 // perform rounding operations
1725 right_shift_8x8(res0, 2);
1726 right_shift_8x8(res0 + 8, 2);
1727 right_shift_8x8(res1, 2);
1728 right_shift_8x8(res1 + 8, 2);
1731 void fdct16_1d_8col(__m128i *in) {
1732 // perform 16x16 1-D DCT for 8 columns
1733 __m128i i[8], s[8], p[8], t[8], u[16], v[16];
1734 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
1735 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
1736 const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
1737 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
1738 const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
1739 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
1740 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
1741 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
1742 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
1743 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
1744 const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64);
1745 const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64);
1746 const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
1747 const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
1748 const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64);
1749 const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64);
1750 const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
1751 const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
1752 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
1755 i[0] = _mm_add_epi16(in[0], in[15]);
1756 i[1] = _mm_add_epi16(in[1], in[14]);
1757 i[2] = _mm_add_epi16(in[2], in[13]);
1758 i[3] = _mm_add_epi16(in[3], in[12]);
1759 i[4] = _mm_add_epi16(in[4], in[11]);
1760 i[5] = _mm_add_epi16(in[5], in[10]);
1761 i[6] = _mm_add_epi16(in[6], in[9]);
1762 i[7] = _mm_add_epi16(in[7], in[8]);
1764 s[0] = _mm_sub_epi16(in[7], in[8]);
1765 s[1] = _mm_sub_epi16(in[6], in[9]);
1766 s[2] = _mm_sub_epi16(in[5], in[10]);
1767 s[3] = _mm_sub_epi16(in[4], in[11]);
1768 s[4] = _mm_sub_epi16(in[3], in[12]);
1769 s[5] = _mm_sub_epi16(in[2], in[13]);
1770 s[6] = _mm_sub_epi16(in[1], in[14]);
1771 s[7] = _mm_sub_epi16(in[0], in[15]);
1773 p[0] = _mm_add_epi16(i[0], i[7]);
1774 p[1] = _mm_add_epi16(i[1], i[6]);
1775 p[2] = _mm_add_epi16(i[2], i[5]);
1776 p[3] = _mm_add_epi16(i[3], i[4]);
1777 p[4] = _mm_sub_epi16(i[3], i[4]);
1778 p[5] = _mm_sub_epi16(i[2], i[5]);
1779 p[6] = _mm_sub_epi16(i[1], i[6]);
1780 p[7] = _mm_sub_epi16(i[0], i[7]);
1782 u[0] = _mm_add_epi16(p[0], p[3]);
1783 u[1] = _mm_add_epi16(p[1], p[2]);
1784 u[2] = _mm_sub_epi16(p[1], p[2]);
1785 u[3] = _mm_sub_epi16(p[0], p[3]);
1787 v[0] = _mm_unpacklo_epi16(u[0], u[1]);
1788 v[1] = _mm_unpackhi_epi16(u[0], u[1]);
1789 v[2] = _mm_unpacklo_epi16(u[2], u[3]);
1790 v[3] = _mm_unpackhi_epi16(u[2], u[3]);
1792 u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16);
1793 u[1] = _mm_madd_epi16(v[1], k__cospi_p16_p16);
1794 u[2] = _mm_madd_epi16(v[0], k__cospi_p16_m16);
1795 u[3] = _mm_madd_epi16(v[1], k__cospi_p16_m16);
1796 u[4] = _mm_madd_epi16(v[2], k__cospi_p24_p08);
1797 u[5] = _mm_madd_epi16(v[3], k__cospi_p24_p08);
1798 u[6] = _mm_madd_epi16(v[2], k__cospi_m08_p24);
1799 u[7] = _mm_madd_epi16(v[3], k__cospi_m08_p24);
1801 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
1802 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
1803 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
1804 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
1805 v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
1806 v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
1807 v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
1808 v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
1810 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
1811 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
1812 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
1813 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
1814 u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
1815 u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
1816 u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
1817 u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
1819 in[0] = _mm_packs_epi32(u[0], u[1]);
1820 in[4] = _mm_packs_epi32(u[4], u[5]);
1821 in[8] = _mm_packs_epi32(u[2], u[3]);
1822 in[12] = _mm_packs_epi32(u[6], u[7]);
1824 u[0] = _mm_unpacklo_epi16(p[5], p[6]);
1825 u[1] = _mm_unpackhi_epi16(p[5], p[6]);
1826 v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16);
1827 v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16);
1828 v[2] = _mm_madd_epi16(u[0], k__cospi_p16_p16);
1829 v[3] = _mm_madd_epi16(u[1], k__cospi_p16_p16);
1831 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
1832 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
1833 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
1834 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
1836 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
1837 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
1838 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
1839 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
1841 u[0] = _mm_packs_epi32(v[0], v[1]);
1842 u[1] = _mm_packs_epi32(v[2], v[3]);
1844 t[0] = _mm_add_epi16(p[4], u[0]);
1845 t[1] = _mm_sub_epi16(p[4], u[0]);
1846 t[2] = _mm_sub_epi16(p[7], u[1]);
1847 t[3] = _mm_add_epi16(p[7], u[1]);
1849 u[0] = _mm_unpacklo_epi16(t[0], t[3]);
1850 u[1] = _mm_unpackhi_epi16(t[0], t[3]);
1851 u[2] = _mm_unpacklo_epi16(t[1], t[2]);
1852 u[3] = _mm_unpackhi_epi16(t[1], t[2]);
1854 v[0] = _mm_madd_epi16(u[0], k__cospi_p28_p04);
1855 v[1] = _mm_madd_epi16(u[1], k__cospi_p28_p04);
1856 v[2] = _mm_madd_epi16(u[2], k__cospi_p12_p20);
1857 v[3] = _mm_madd_epi16(u[3], k__cospi_p12_p20);
1858 v[4] = _mm_madd_epi16(u[2], k__cospi_m20_p12);
1859 v[5] = _mm_madd_epi16(u[3], k__cospi_m20_p12);
1860 v[6] = _mm_madd_epi16(u[0], k__cospi_m04_p28);
1861 v[7] = _mm_madd_epi16(u[1], k__cospi_m04_p28);
1863 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
1864 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
1865 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
1866 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
1867 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
1868 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
1869 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
1870 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
1872 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
1873 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
1874 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
1875 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
1876 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
1877 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
1878 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
1879 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
1881 in[2] = _mm_packs_epi32(v[0], v[1]);
1882 in[6] = _mm_packs_epi32(v[4], v[5]);
1883 in[10] = _mm_packs_epi32(v[2], v[3]);
1884 in[14] = _mm_packs_epi32(v[6], v[7]);
1887 u[0] = _mm_unpacklo_epi16(s[2], s[5]);
1888 u[1] = _mm_unpackhi_epi16(s[2], s[5]);
1889 u[2] = _mm_unpacklo_epi16(s[3], s[4]);
1890 u[3] = _mm_unpackhi_epi16(s[3], s[4]);
1892 v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16);
1893 v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16);
1894 v[2] = _mm_madd_epi16(u[2], k__cospi_m16_p16);
1895 v[3] = _mm_madd_epi16(u[3], k__cospi_m16_p16);
1896 v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16);
1897 v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16);
1898 v[6] = _mm_madd_epi16(u[0], k__cospi_p16_p16);
1899 v[7] = _mm_madd_epi16(u[1], k__cospi_p16_p16);
1901 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
1902 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
1903 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
1904 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
1905 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
1906 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
1907 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
1908 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
1910 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
1911 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
1912 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
1913 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
1914 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
1915 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
1916 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
1917 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
1919 t[2] = _mm_packs_epi32(v[0], v[1]);
1920 t[3] = _mm_packs_epi32(v[2], v[3]);
1921 t[4] = _mm_packs_epi32(v[4], v[5]);
1922 t[5] = _mm_packs_epi32(v[6], v[7]);
1925 p[0] = _mm_add_epi16(s[0], t[3]);
1926 p[1] = _mm_add_epi16(s[1], t[2]);
1927 p[2] = _mm_sub_epi16(s[1], t[2]);
1928 p[3] = _mm_sub_epi16(s[0], t[3]);
1929 p[4] = _mm_sub_epi16(s[7], t[4]);
1930 p[5] = _mm_sub_epi16(s[6], t[5]);
1931 p[6] = _mm_add_epi16(s[6], t[5]);
1932 p[7] = _mm_add_epi16(s[7], t[4]);
1935 u[0] = _mm_unpacklo_epi16(p[1], p[6]);
1936 u[1] = _mm_unpackhi_epi16(p[1], p[6]);
1937 u[2] = _mm_unpacklo_epi16(p[2], p[5]);
1938 u[3] = _mm_unpackhi_epi16(p[2], p[5]);
1940 v[0] = _mm_madd_epi16(u[0], k__cospi_m08_p24);
1941 v[1] = _mm_madd_epi16(u[1], k__cospi_m08_p24);
1942 v[2] = _mm_madd_epi16(u[2], k__cospi_m24_m08);
1943 v[3] = _mm_madd_epi16(u[3], k__cospi_m24_m08);
1944 v[4] = _mm_madd_epi16(u[2], k__cospi_m08_p24);
1945 v[5] = _mm_madd_epi16(u[3], k__cospi_m08_p24);
1946 v[6] = _mm_madd_epi16(u[0], k__cospi_p24_p08);
1947 v[7] = _mm_madd_epi16(u[1], k__cospi_p24_p08);
1949 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
1950 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
1951 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
1952 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
1953 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
1954 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
1955 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
1956 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
1958 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
1959 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
1960 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
1961 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
1962 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
1963 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
1964 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
1965 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
1967 t[1] = _mm_packs_epi32(v[0], v[1]);
1968 t[2] = _mm_packs_epi32(v[2], v[3]);
1969 t[5] = _mm_packs_epi32(v[4], v[5]);
1970 t[6] = _mm_packs_epi32(v[6], v[7]);
1973 s[0] = _mm_add_epi16(p[0], t[1]);
1974 s[1] = _mm_sub_epi16(p[0], t[1]);
1975 s[2] = _mm_sub_epi16(p[3], t[2]);
1976 s[3] = _mm_add_epi16(p[3], t[2]);
1977 s[4] = _mm_add_epi16(p[4], t[5]);
1978 s[5] = _mm_sub_epi16(p[4], t[5]);
1979 s[6] = _mm_sub_epi16(p[7], t[6]);
1980 s[7] = _mm_add_epi16(p[7], t[6]);
1983 u[0] = _mm_unpacklo_epi16(s[0], s[7]);
1984 u[1] = _mm_unpackhi_epi16(s[0], s[7]);
1985 u[2] = _mm_unpacklo_epi16(s[1], s[6]);
1986 u[3] = _mm_unpackhi_epi16(s[1], s[6]);
1987 u[4] = _mm_unpacklo_epi16(s[2], s[5]);
1988 u[5] = _mm_unpackhi_epi16(s[2], s[5]);
1989 u[6] = _mm_unpacklo_epi16(s[3], s[4]);
1990 u[7] = _mm_unpackhi_epi16(s[3], s[4]);
1992 v[0] = _mm_madd_epi16(u[0], k__cospi_p30_p02);
1993 v[1] = _mm_madd_epi16(u[1], k__cospi_p30_p02);
1994 v[2] = _mm_madd_epi16(u[2], k__cospi_p14_p18);
1995 v[3] = _mm_madd_epi16(u[3], k__cospi_p14_p18);
1996 v[4] = _mm_madd_epi16(u[4], k__cospi_p22_p10);
1997 v[5] = _mm_madd_epi16(u[5], k__cospi_p22_p10);
1998 v[6] = _mm_madd_epi16(u[6], k__cospi_p06_p26);
1999 v[7] = _mm_madd_epi16(u[7], k__cospi_p06_p26);
2000 v[8] = _mm_madd_epi16(u[6], k__cospi_m26_p06);
2001 v[9] = _mm_madd_epi16(u[7], k__cospi_m26_p06);
2002 v[10] = _mm_madd_epi16(u[4], k__cospi_m10_p22);
2003 v[11] = _mm_madd_epi16(u[5], k__cospi_m10_p22);
2004 v[12] = _mm_madd_epi16(u[2], k__cospi_m18_p14);
2005 v[13] = _mm_madd_epi16(u[3], k__cospi_m18_p14);
2006 v[14] = _mm_madd_epi16(u[0], k__cospi_m02_p30);
2007 v[15] = _mm_madd_epi16(u[1], k__cospi_m02_p30);
2009 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
2010 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
2011 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
2012 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
2013 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
2014 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
2015 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
2016 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
2017 u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING);
2018 u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING);
2019 u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING);
2020 u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING);
2021 u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING);
2022 u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING);
2023 u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING);
2024 u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING);
2026 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
2027 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
2028 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
2029 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
2030 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
2031 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
2032 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
2033 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
2034 v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
2035 v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
2036 v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
2037 v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
2038 v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
2039 v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
2040 v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
2041 v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
2043 in[1] = _mm_packs_epi32(v[0], v[1]);
2044 in[9] = _mm_packs_epi32(v[2], v[3]);
2045 in[5] = _mm_packs_epi32(v[4], v[5]);
2046 in[13] = _mm_packs_epi32(v[6], v[7]);
2047 in[3] = _mm_packs_epi32(v[8], v[9]);
2048 in[11] = _mm_packs_epi32(v[10], v[11]);
2049 in[7] = _mm_packs_epi32(v[12], v[13]);
2050 in[15] = _mm_packs_epi32(v[14], v[15]);
2053 void fadst16_1d_8col(__m128i *in) {
2054 // perform 16x16 1-D ADST for 8 columns
2055 __m128i s[16], x[16], u[32], v[32];
2056 const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64);
2057 const __m128i k__cospi_p31_m01 = pair_set_epi16(cospi_31_64, -cospi_1_64);
2058 const __m128i k__cospi_p05_p27 = pair_set_epi16(cospi_5_64, cospi_27_64);
2059 const __m128i k__cospi_p27_m05 = pair_set_epi16(cospi_27_64, -cospi_5_64);
2060 const __m128i k__cospi_p09_p23 = pair_set_epi16(cospi_9_64, cospi_23_64);
2061 const __m128i k__cospi_p23_m09 = pair_set_epi16(cospi_23_64, -cospi_9_64);
2062 const __m128i k__cospi_p13_p19 = pair_set_epi16(cospi_13_64, cospi_19_64);
2063 const __m128i k__cospi_p19_m13 = pair_set_epi16(cospi_19_64, -cospi_13_64);
2064 const __m128i k__cospi_p17_p15 = pair_set_epi16(cospi_17_64, cospi_15_64);
2065 const __m128i k__cospi_p15_m17 = pair_set_epi16(cospi_15_64, -cospi_17_64);
2066 const __m128i k__cospi_p21_p11 = pair_set_epi16(cospi_21_64, cospi_11_64);
2067 const __m128i k__cospi_p11_m21 = pair_set_epi16(cospi_11_64, -cospi_21_64);
2068 const __m128i k__cospi_p25_p07 = pair_set_epi16(cospi_25_64, cospi_7_64);
2069 const __m128i k__cospi_p07_m25 = pair_set_epi16(cospi_7_64, -cospi_25_64);
2070 const __m128i k__cospi_p29_p03 = pair_set_epi16(cospi_29_64, cospi_3_64);
2071 const __m128i k__cospi_p03_m29 = pair_set_epi16(cospi_3_64, -cospi_29_64);
2072 const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64);
2073 const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64);
2074 const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64);
2075 const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64);
2076 const __m128i k__cospi_m28_p04 = pair_set_epi16(-cospi_28_64, cospi_4_64);
2077 const __m128i k__cospi_m12_p20 = pair_set_epi16(-cospi_12_64, cospi_20_64);
2078 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
2079 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
2080 const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
2081 const __m128i k__cospi_m16_m16 = _mm_set1_epi16(-cospi_16_64);
2082 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
2083 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
2084 const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
2085 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
2086 const __m128i kZero = _mm_set1_epi16(0);
2088 u[0] = _mm_unpacklo_epi16(in[15], in[0]);
2089 u[1] = _mm_unpackhi_epi16(in[15], in[0]);
2090 u[2] = _mm_unpacklo_epi16(in[13], in[2]);
2091 u[3] = _mm_unpackhi_epi16(in[13], in[2]);
2092 u[4] = _mm_unpacklo_epi16(in[11], in[4]);
2093 u[5] = _mm_unpackhi_epi16(in[11], in[4]);
2094 u[6] = _mm_unpacklo_epi16(in[9], in[6]);
2095 u[7] = _mm_unpackhi_epi16(in[9], in[6]);
2096 u[8] = _mm_unpacklo_epi16(in[7], in[8]);
2097 u[9] = _mm_unpackhi_epi16(in[7], in[8]);
2098 u[10] = _mm_unpacklo_epi16(in[5], in[10]);
2099 u[11] = _mm_unpackhi_epi16(in[5], in[10]);
2100 u[12] = _mm_unpacklo_epi16(in[3], in[12]);
2101 u[13] = _mm_unpackhi_epi16(in[3], in[12]);
2102 u[14] = _mm_unpacklo_epi16(in[1], in[14]);
2103 u[15] = _mm_unpackhi_epi16(in[1], in[14]);
2105 v[0] = _mm_madd_epi16(u[0], k__cospi_p01_p31);
2106 v[1] = _mm_madd_epi16(u[1], k__cospi_p01_p31);
2107 v[2] = _mm_madd_epi16(u[0], k__cospi_p31_m01);
2108 v[3] = _mm_madd_epi16(u[1], k__cospi_p31_m01);
2109 v[4] = _mm_madd_epi16(u[2], k__cospi_p05_p27);
2110 v[5] = _mm_madd_epi16(u[3], k__cospi_p05_p27);
2111 v[6] = _mm_madd_epi16(u[2], k__cospi_p27_m05);
2112 v[7] = _mm_madd_epi16(u[3], k__cospi_p27_m05);
2113 v[8] = _mm_madd_epi16(u[4], k__cospi_p09_p23);
2114 v[9] = _mm_madd_epi16(u[5], k__cospi_p09_p23);
2115 v[10] = _mm_madd_epi16(u[4], k__cospi_p23_m09);
2116 v[11] = _mm_madd_epi16(u[5], k__cospi_p23_m09);
2117 v[12] = _mm_madd_epi16(u[6], k__cospi_p13_p19);
2118 v[13] = _mm_madd_epi16(u[7], k__cospi_p13_p19);
2119 v[14] = _mm_madd_epi16(u[6], k__cospi_p19_m13);
2120 v[15] = _mm_madd_epi16(u[7], k__cospi_p19_m13);
2121 v[16] = _mm_madd_epi16(u[8], k__cospi_p17_p15);
2122 v[17] = _mm_madd_epi16(u[9], k__cospi_p17_p15);
2123 v[18] = _mm_madd_epi16(u[8], k__cospi_p15_m17);
2124 v[19] = _mm_madd_epi16(u[9], k__cospi_p15_m17);
2125 v[20] = _mm_madd_epi16(u[10], k__cospi_p21_p11);
2126 v[21] = _mm_madd_epi16(u[11], k__cospi_p21_p11);
2127 v[22] = _mm_madd_epi16(u[10], k__cospi_p11_m21);
2128 v[23] = _mm_madd_epi16(u[11], k__cospi_p11_m21);
2129 v[24] = _mm_madd_epi16(u[12], k__cospi_p25_p07);
2130 v[25] = _mm_madd_epi16(u[13], k__cospi_p25_p07);
2131 v[26] = _mm_madd_epi16(u[12], k__cospi_p07_m25);
2132 v[27] = _mm_madd_epi16(u[13], k__cospi_p07_m25);
2133 v[28] = _mm_madd_epi16(u[14], k__cospi_p29_p03);
2134 v[29] = _mm_madd_epi16(u[15], k__cospi_p29_p03);
2135 v[30] = _mm_madd_epi16(u[14], k__cospi_p03_m29);
2136 v[31] = _mm_madd_epi16(u[15], k__cospi_p03_m29);
2138 u[0] = _mm_add_epi32(v[0], v[16]);
2139 u[1] = _mm_add_epi32(v[1], v[17]);
2140 u[2] = _mm_add_epi32(v[2], v[18]);
2141 u[3] = _mm_add_epi32(v[3], v[19]);
2142 u[4] = _mm_add_epi32(v[4], v[20]);
2143 u[5] = _mm_add_epi32(v[5], v[21]);
2144 u[6] = _mm_add_epi32(v[6], v[22]);
2145 u[7] = _mm_add_epi32(v[7], v[23]);
2146 u[8] = _mm_add_epi32(v[8], v[24]);
2147 u[9] = _mm_add_epi32(v[9], v[25]);
2148 u[10] = _mm_add_epi32(v[10], v[26]);
2149 u[11] = _mm_add_epi32(v[11], v[27]);
2150 u[12] = _mm_add_epi32(v[12], v[28]);
2151 u[13] = _mm_add_epi32(v[13], v[29]);
2152 u[14] = _mm_add_epi32(v[14], v[30]);
2153 u[15] = _mm_add_epi32(v[15], v[31]);
2154 u[16] = _mm_sub_epi32(v[0], v[16]);
2155 u[17] = _mm_sub_epi32(v[1], v[17]);
2156 u[18] = _mm_sub_epi32(v[2], v[18]);
2157 u[19] = _mm_sub_epi32(v[3], v[19]);
2158 u[20] = _mm_sub_epi32(v[4], v[20]);
2159 u[21] = _mm_sub_epi32(v[5], v[21]);
2160 u[22] = _mm_sub_epi32(v[6], v[22]);
2161 u[23] = _mm_sub_epi32(v[7], v[23]);
2162 u[24] = _mm_sub_epi32(v[8], v[24]);
2163 u[25] = _mm_sub_epi32(v[9], v[25]);
2164 u[26] = _mm_sub_epi32(v[10], v[26]);
2165 u[27] = _mm_sub_epi32(v[11], v[27]);
2166 u[28] = _mm_sub_epi32(v[12], v[28]);
2167 u[29] = _mm_sub_epi32(v[13], v[29]);
2168 u[30] = _mm_sub_epi32(v[14], v[30]);
2169 u[31] = _mm_sub_epi32(v[15], v[31]);
2171 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
2172 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
2173 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
2174 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
2175 v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
2176 v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
2177 v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
2178 v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
2179 v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
2180 v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
2181 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
2182 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
2183 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
2184 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
2185 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
2186 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
2187 v[16] = _mm_add_epi32(u[16], k__DCT_CONST_ROUNDING);
2188 v[17] = _mm_add_epi32(u[17], k__DCT_CONST_ROUNDING);
2189 v[18] = _mm_add_epi32(u[18], k__DCT_CONST_ROUNDING);
2190 v[19] = _mm_add_epi32(u[19], k__DCT_CONST_ROUNDING);
2191 v[20] = _mm_add_epi32(u[20], k__DCT_CONST_ROUNDING);
2192 v[21] = _mm_add_epi32(u[21], k__DCT_CONST_ROUNDING);
2193 v[22] = _mm_add_epi32(u[22], k__DCT_CONST_ROUNDING);
2194 v[23] = _mm_add_epi32(u[23], k__DCT_CONST_ROUNDING);
2195 v[24] = _mm_add_epi32(u[24], k__DCT_CONST_ROUNDING);
2196 v[25] = _mm_add_epi32(u[25], k__DCT_CONST_ROUNDING);
2197 v[26] = _mm_add_epi32(u[26], k__DCT_CONST_ROUNDING);
2198 v[27] = _mm_add_epi32(u[27], k__DCT_CONST_ROUNDING);
2199 v[28] = _mm_add_epi32(u[28], k__DCT_CONST_ROUNDING);
2200 v[29] = _mm_add_epi32(u[29], k__DCT_CONST_ROUNDING);
2201 v[30] = _mm_add_epi32(u[30], k__DCT_CONST_ROUNDING);
2202 v[31] = _mm_add_epi32(u[31], k__DCT_CONST_ROUNDING);
2204 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
2205 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
2206 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
2207 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
2208 u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
2209 u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
2210 u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
2211 u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
2212 u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
2213 u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
2214 u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
2215 u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
2216 u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
2217 u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
2218 u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
2219 u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
2220 u[16] = _mm_srai_epi32(v[16], DCT_CONST_BITS);
2221 u[17] = _mm_srai_epi32(v[17], DCT_CONST_BITS);
2222 u[18] = _mm_srai_epi32(v[18], DCT_CONST_BITS);
2223 u[19] = _mm_srai_epi32(v[19], DCT_CONST_BITS);
2224 u[20] = _mm_srai_epi32(v[20], DCT_CONST_BITS);
2225 u[21] = _mm_srai_epi32(v[21], DCT_CONST_BITS);
2226 u[22] = _mm_srai_epi32(v[22], DCT_CONST_BITS);
2227 u[23] = _mm_srai_epi32(v[23], DCT_CONST_BITS);
2228 u[24] = _mm_srai_epi32(v[24], DCT_CONST_BITS);
2229 u[25] = _mm_srai_epi32(v[25], DCT_CONST_BITS);
2230 u[26] = _mm_srai_epi32(v[26], DCT_CONST_BITS);
2231 u[27] = _mm_srai_epi32(v[27], DCT_CONST_BITS);
2232 u[28] = _mm_srai_epi32(v[28], DCT_CONST_BITS);
2233 u[29] = _mm_srai_epi32(v[29], DCT_CONST_BITS);
2234 u[30] = _mm_srai_epi32(v[30], DCT_CONST_BITS);
2235 u[31] = _mm_srai_epi32(v[31], DCT_CONST_BITS);
2237 s[0] = _mm_packs_epi32(u[0], u[1]);
2238 s[1] = _mm_packs_epi32(u[2], u[3]);
2239 s[2] = _mm_packs_epi32(u[4], u[5]);
2240 s[3] = _mm_packs_epi32(u[6], u[7]);
2241 s[4] = _mm_packs_epi32(u[8], u[9]);
2242 s[5] = _mm_packs_epi32(u[10], u[11]);
2243 s[6] = _mm_packs_epi32(u[12], u[13]);
2244 s[7] = _mm_packs_epi32(u[14], u[15]);
2245 s[8] = _mm_packs_epi32(u[16], u[17]);
2246 s[9] = _mm_packs_epi32(u[18], u[19]);
2247 s[10] = _mm_packs_epi32(u[20], u[21]);
2248 s[11] = _mm_packs_epi32(u[22], u[23]);
2249 s[12] = _mm_packs_epi32(u[24], u[25]);
2250 s[13] = _mm_packs_epi32(u[26], u[27]);
2251 s[14] = _mm_packs_epi32(u[28], u[29]);
2252 s[15] = _mm_packs_epi32(u[30], u[31]);
2255 u[0] = _mm_unpacklo_epi16(s[8], s[9]);
2256 u[1] = _mm_unpackhi_epi16(s[8], s[9]);
2257 u[2] = _mm_unpacklo_epi16(s[10], s[11]);
2258 u[3] = _mm_unpackhi_epi16(s[10], s[11]);
2259 u[4] = _mm_unpacklo_epi16(s[12], s[13]);
2260 u[5] = _mm_unpackhi_epi16(s[12], s[13]);
2261 u[6] = _mm_unpacklo_epi16(s[14], s[15]);
2262 u[7] = _mm_unpackhi_epi16(s[14], s[15]);
2264 v[0] = _mm_madd_epi16(u[0], k__cospi_p04_p28);
2265 v[1] = _mm_madd_epi16(u[1], k__cospi_p04_p28);
2266 v[2] = _mm_madd_epi16(u[0], k__cospi_p28_m04);
2267 v[3] = _mm_madd_epi16(u[1], k__cospi_p28_m04);
2268 v[4] = _mm_madd_epi16(u[2], k__cospi_p20_p12);
2269 v[5] = _mm_madd_epi16(u[3], k__cospi_p20_p12);
2270 v[6] = _mm_madd_epi16(u[2], k__cospi_p12_m20);
2271 v[7] = _mm_madd_epi16(u[3], k__cospi_p12_m20);
2272 v[8] = _mm_madd_epi16(u[4], k__cospi_m28_p04);
2273 v[9] = _mm_madd_epi16(u[5], k__cospi_m28_p04);
2274 v[10] = _mm_madd_epi16(u[4], k__cospi_p04_p28);
2275 v[11] = _mm_madd_epi16(u[5], k__cospi_p04_p28);
2276 v[12] = _mm_madd_epi16(u[6], k__cospi_m12_p20);
2277 v[13] = _mm_madd_epi16(u[7], k__cospi_m12_p20);
2278 v[14] = _mm_madd_epi16(u[6], k__cospi_p20_p12);
2279 v[15] = _mm_madd_epi16(u[7], k__cospi_p20_p12);
2281 u[0] = _mm_add_epi32(v[0], v[8]);
2282 u[1] = _mm_add_epi32(v[1], v[9]);
2283 u[2] = _mm_add_epi32(v[2], v[10]);
2284 u[3] = _mm_add_epi32(v[3], v[11]);
2285 u[4] = _mm_add_epi32(v[4], v[12]);
2286 u[5] = _mm_add_epi32(v[5], v[13]);
2287 u[6] = _mm_add_epi32(v[6], v[14]);
2288 u[7] = _mm_add_epi32(v[7], v[15]);
2289 u[8] = _mm_sub_epi32(v[0], v[8]);
2290 u[9] = _mm_sub_epi32(v[1], v[9]);
2291 u[10] = _mm_sub_epi32(v[2], v[10]);
2292 u[11] = _mm_sub_epi32(v[3], v[11]);
2293 u[12] = _mm_sub_epi32(v[4], v[12]);
2294 u[13] = _mm_sub_epi32(v[5], v[13]);
2295 u[14] = _mm_sub_epi32(v[6], v[14]);
2296 u[15] = _mm_sub_epi32(v[7], v[15]);
2298 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
2299 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
2300 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
2301 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
2302 v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
2303 v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
2304 v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
2305 v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
2306 v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
2307 v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
2308 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
2309 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
2310 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
2311 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
2312 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
2313 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
2315 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
2316 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
2317 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
2318 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
2319 u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
2320 u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
2321 u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
2322 u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
2323 u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
2324 u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
2325 u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
2326 u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
2327 u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
2328 u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
2329 u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
2330 u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
2332 x[0] = _mm_add_epi16(s[0], s[4]);
2333 x[1] = _mm_add_epi16(s[1], s[5]);
2334 x[2] = _mm_add_epi16(s[2], s[6]);
2335 x[3] = _mm_add_epi16(s[3], s[7]);
2336 x[4] = _mm_sub_epi16(s[0], s[4]);
2337 x[5] = _mm_sub_epi16(s[1], s[5]);
2338 x[6] = _mm_sub_epi16(s[2], s[6]);
2339 x[7] = _mm_sub_epi16(s[3], s[7]);
2340 x[8] = _mm_packs_epi32(u[0], u[1]);
2341 x[9] = _mm_packs_epi32(u[2], u[3]);
2342 x[10] = _mm_packs_epi32(u[4], u[5]);
2343 x[11] = _mm_packs_epi32(u[6], u[7]);
2344 x[12] = _mm_packs_epi32(u[8], u[9]);
2345 x[13] = _mm_packs_epi32(u[10], u[11]);
2346 x[14] = _mm_packs_epi32(u[12], u[13]);
2347 x[15] = _mm_packs_epi32(u[14], u[15]);
2350 u[0] = _mm_unpacklo_epi16(x[4], x[5]);
2351 u[1] = _mm_unpackhi_epi16(x[4], x[5]);
2352 u[2] = _mm_unpacklo_epi16(x[6], x[7]);
2353 u[3] = _mm_unpackhi_epi16(x[6], x[7]);
2354 u[4] = _mm_unpacklo_epi16(x[12], x[13]);
2355 u[5] = _mm_unpackhi_epi16(x[12], x[13]);
2356 u[6] = _mm_unpacklo_epi16(x[14], x[15]);
2357 u[7] = _mm_unpackhi_epi16(x[14], x[15]);
2359 v[0] = _mm_madd_epi16(u[0], k__cospi_p08_p24);
2360 v[1] = _mm_madd_epi16(u[1], k__cospi_p08_p24);
2361 v[2] = _mm_madd_epi16(u[0], k__cospi_p24_m08);
2362 v[3] = _mm_madd_epi16(u[1], k__cospi_p24_m08);
2363 v[4] = _mm_madd_epi16(u[2], k__cospi_m24_p08);
2364 v[5] = _mm_madd_epi16(u[3], k__cospi_m24_p08);
2365 v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24);
2366 v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24);
2367 v[8] = _mm_madd_epi16(u[4], k__cospi_p08_p24);
2368 v[9] = _mm_madd_epi16(u[5], k__cospi_p08_p24);
2369 v[10] = _mm_madd_epi16(u[4], k__cospi_p24_m08);
2370 v[11] = _mm_madd_epi16(u[5], k__cospi_p24_m08);
2371 v[12] = _mm_madd_epi16(u[6], k__cospi_m24_p08);
2372 v[13] = _mm_madd_epi16(u[7], k__cospi_m24_p08);
2373 v[14] = _mm_madd_epi16(u[6], k__cospi_p08_p24);
2374 v[15] = _mm_madd_epi16(u[7], k__cospi_p08_p24);
2376 u[0] = _mm_add_epi32(v[0], v[4]);
2377 u[1] = _mm_add_epi32(v[1], v[5]);
2378 u[2] = _mm_add_epi32(v[2], v[6]);
2379 u[3] = _mm_add_epi32(v[3], v[7]);
2380 u[4] = _mm_sub_epi32(v[0], v[4]);
2381 u[5] = _mm_sub_epi32(v[1], v[5]);
2382 u[6] = _mm_sub_epi32(v[2], v[6]);
2383 u[7] = _mm_sub_epi32(v[3], v[7]);
2384 u[8] = _mm_add_epi32(v[8], v[12]);
2385 u[9] = _mm_add_epi32(v[9], v[13]);
2386 u[10] = _mm_add_epi32(v[10], v[14]);
2387 u[11] = _mm_add_epi32(v[11], v[15]);
2388 u[12] = _mm_sub_epi32(v[8], v[12]);
2389 u[13] = _mm_sub_epi32(v[9], v[13]);
2390 u[14] = _mm_sub_epi32(v[10], v[14]);
2391 u[15] = _mm_sub_epi32(v[11], v[15]);
2393 u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
2394 u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
2395 u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
2396 u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
2397 u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
2398 u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
2399 u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
2400 u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
2401 u[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
2402 u[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
2403 u[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
2404 u[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
2405 u[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
2406 u[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
2407 u[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
2408 u[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
2410 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
2411 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
2412 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
2413 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
2414 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
2415 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
2416 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
2417 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
2418 v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
2419 v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
2420 v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
2421 v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
2422 v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
2423 v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
2424 v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
2425 v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
2427 s[0] = _mm_add_epi16(x[0], x[2]);
2428 s[1] = _mm_add_epi16(x[1], x[3]);
2429 s[2] = _mm_sub_epi16(x[0], x[2]);
2430 s[3] = _mm_sub_epi16(x[1], x[3]);
2431 s[4] = _mm_packs_epi32(v[0], v[1]);
2432 s[5] = _mm_packs_epi32(v[2], v[3]);
2433 s[6] = _mm_packs_epi32(v[4], v[5]);
2434 s[7] = _mm_packs_epi32(v[6], v[7]);
2435 s[8] = _mm_add_epi16(x[8], x[10]);
2436 s[9] = _mm_add_epi16(x[9], x[11]);
2437 s[10] = _mm_sub_epi16(x[8], x[10]);
2438 s[11] = _mm_sub_epi16(x[9], x[11]);
2439 s[12] = _mm_packs_epi32(v[8], v[9]);
2440 s[13] = _mm_packs_epi32(v[10], v[11]);
2441 s[14] = _mm_packs_epi32(v[12], v[13]);
2442 s[15] = _mm_packs_epi32(v[14], v[15]);
2445 u[0] = _mm_unpacklo_epi16(s[2], s[3]);
2446 u[1] = _mm_unpackhi_epi16(s[2], s[3]);
2447 u[2] = _mm_unpacklo_epi16(s[6], s[7]);
2448 u[3] = _mm_unpackhi_epi16(s[6], s[7]);
2449 u[4] = _mm_unpacklo_epi16(s[10], s[11]);
2450 u[5] = _mm_unpackhi_epi16(s[10], s[11]);
2451 u[6] = _mm_unpacklo_epi16(s[14], s[15]);
2452 u[7] = _mm_unpackhi_epi16(s[14], s[15]);
2454 v[0] = _mm_madd_epi16(u[0], k__cospi_m16_m16);
2455 v[1] = _mm_madd_epi16(u[1], k__cospi_m16_m16);
2456 v[2] = _mm_madd_epi16(u[0], k__cospi_p16_m16);
2457 v[3] = _mm_madd_epi16(u[1], k__cospi_p16_m16);
2458 v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16);
2459 v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16);
2460 v[6] = _mm_madd_epi16(u[2], k__cospi_m16_p16);
2461 v[7] = _mm_madd_epi16(u[3], k__cospi_m16_p16);
2462 v[8] = _mm_madd_epi16(u[4], k__cospi_p16_p16);
2463 v[9] = _mm_madd_epi16(u[5], k__cospi_p16_p16);
2464 v[10] = _mm_madd_epi16(u[4], k__cospi_m16_p16);
2465 v[11] = _mm_madd_epi16(u[5], k__cospi_m16_p16);
2466 v[12] = _mm_madd_epi16(u[6], k__cospi_m16_m16);
2467 v[13] = _mm_madd_epi16(u[7], k__cospi_m16_m16);
2468 v[14] = _mm_madd_epi16(u[6], k__cospi_p16_m16);
2469 v[15] = _mm_madd_epi16(u[7], k__cospi_p16_m16);
2471 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
2472 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
2473 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
2474 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
2475 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
2476 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
2477 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
2478 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
2479 u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING);
2480 u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING);
2481 u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING);
2482 u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING);
2483 u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING);
2484 u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING);
2485 u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING);
2486 u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING);
2488 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
2489 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
2490 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
2491 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
2492 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
2493 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
2494 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
2495 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
2496 v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
2497 v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
2498 v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
2499 v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
2500 v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
2501 v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
2502 v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
2503 v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
2506 in[1] = _mm_sub_epi16(kZero, s[8]);
2508 in[3] = _mm_sub_epi16(kZero, s[4]);
2509 in[4] = _mm_packs_epi32(v[4], v[5]);
2510 in[5] = _mm_packs_epi32(v[12], v[13]);
2511 in[6] = _mm_packs_epi32(v[8], v[9]);
2512 in[7] = _mm_packs_epi32(v[0], v[1]);
2513 in[8] = _mm_packs_epi32(v[2], v[3]);
2514 in[9] = _mm_packs_epi32(v[10], v[11]);
2515 in[10] = _mm_packs_epi32(v[14], v[15]);
2516 in[11] = _mm_packs_epi32(v[6], v[7]);
2518 in[13] = _mm_sub_epi16(kZero, s[13]);
2520 in[15] = _mm_sub_epi16(kZero, s[1]);
2523 void fdct16_1d_sse2(__m128i *in0, __m128i *in1) {
2524 fdct16_1d_8col(in0);
2525 fdct16_1d_8col(in1);
2526 array_transpose_16x16(in0, in1);
2529 void fadst16_1d_sse2(__m128i *in0, __m128i *in1) {
2530 fadst16_1d_8col(in0);
2531 fadst16_1d_8col(in1);
2532 array_transpose_16x16(in0, in1);
2535 void vp9_short_fht16x16_sse2(const int16_t *input, int16_t *output,
2536 int stride, int tx_type) {
2537 __m128i in0[16], in1[16];
2538 load_buffer_16x16(input, in0, in1, stride);
2541 fdct16_1d_sse2(in0, in1);
2542 right_shift_16x16(in0, in1);
2543 fdct16_1d_sse2(in0, in1);
2546 fadst16_1d_sse2(in0, in1);
2547 right_shift_16x16(in0, in1);
2548 fdct16_1d_sse2(in0, in1);
2551 fdct16_1d_sse2(in0, in1);
2552 right_shift_16x16(in0, in1);
2553 fadst16_1d_sse2(in0, in1);
2555 case 3: // ADST_ADST
2556 fadst16_1d_sse2(in0, in1);
2557 right_shift_16x16(in0, in1);
2558 fadst16_1d_sse2(in0, in1);
2564 write_buffer_16x16(output, in0, in1, 16);
2567 #define FDCT32x32_2D vp9_fdct32x32_rd_sse2
2568 #define FDCT32x32_HIGH_PRECISION 0
2569 #include "vp9/encoder/x86/vp9_dct32x32_sse2.c"
2571 #undef FDCT32x32_HIGH_PRECISION
2573 #define FDCT32x32_2D vp9_fdct32x32_sse2
2574 #define FDCT32x32_HIGH_PRECISION 1
2575 #include "vp9/encoder/x86/vp9_dct32x32_sse2.c" // NOLINT
2577 #undef FDCT32x32_HIGH_PRECISION