1 /********************************************************************
3 * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
4 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
5 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
6 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2009 *
9 * by the Xiph.Org Foundation and contributors http://www.xiph.org/ *
11 ********************************************************************
14 last mod: $Id: mmxidct.c 16503 2009-08-22 18:14:02Z giles $
16 ********************************************************************/
18 /*MMX acceleration of Theora's iDCT.
19 Originally written by Rudolf Marek, based on code from On2's VP3.*/
23 #if defined(OC_X86_ASM)
25 /*These are offsets into the table of constants below.*/
26 /*7 rows of cosines, in order: pi/16 * (1 ... 7).*/
27 #define OC_COSINE_OFFSET (0)
29 #define OC_EIGHT_OFFSET (56)
33 /*A table of constants used by the MMX routines.*/
34 static const __declspec(align(16))ogg_uint16_t
35 OC_IDCT_CONSTS[(7+1)*4]={
36 (ogg_uint16_t)OC_C1S7,(ogg_uint16_t)OC_C1S7,
37 (ogg_uint16_t)OC_C1S7,(ogg_uint16_t)OC_C1S7,
38 (ogg_uint16_t)OC_C2S6,(ogg_uint16_t)OC_C2S6,
39 (ogg_uint16_t)OC_C2S6,(ogg_uint16_t)OC_C2S6,
40 (ogg_uint16_t)OC_C3S5,(ogg_uint16_t)OC_C3S5,
41 (ogg_uint16_t)OC_C3S5,(ogg_uint16_t)OC_C3S5,
42 (ogg_uint16_t)OC_C4S4,(ogg_uint16_t)OC_C4S4,
43 (ogg_uint16_t)OC_C4S4,(ogg_uint16_t)OC_C4S4,
44 (ogg_uint16_t)OC_C5S3,(ogg_uint16_t)OC_C5S3,
45 (ogg_uint16_t)OC_C5S3,(ogg_uint16_t)OC_C5S3,
46 (ogg_uint16_t)OC_C6S2,(ogg_uint16_t)OC_C6S2,
47 (ogg_uint16_t)OC_C6S2,(ogg_uint16_t)OC_C6S2,
48 (ogg_uint16_t)OC_C7S1,(ogg_uint16_t)OC_C7S1,
49 (ogg_uint16_t)OC_C7S1,(ogg_uint16_t)OC_C7S1,
54 #define OC_IDCT_BEGIN __asm{ \
55 __asm movq mm2,OC_I(3) \
56 __asm movq mm6,OC_C(3) \
58 __asm movq mm7,OC_J(5) \
59 __asm pmulhw mm4,mm6 \
60 __asm movq mm1,OC_C(5) \
61 __asm pmulhw mm6,mm7 \
63 __asm pmulhw mm1,mm2 \
64 __asm movq mm3,OC_I(1) \
65 __asm pmulhw mm5,mm7 \
66 __asm movq mm0,OC_C(1) \
70 __asm movq mm1,OC_J(7) \
73 __asm pmulhw mm0,mm3 \
75 __asm pmulhw mm5,mm1 \
76 __asm movq mm7,OC_C(7) \
79 __asm pmulhw mm3,mm7 \
80 __asm movq mm2,OC_I(2) \
81 __asm pmulhw mm7,mm1 \
84 __asm pmulhw mm2,OC_C(2) \
86 __asm movq mm5,OC_J(6) \
90 __asm pmulhw mm5,OC_C(2) \
92 __asm pmulhw mm1,OC_C(6) \
98 __asm pmulhw mm7,OC_C(6) \
100 __asm movq OC_I(1),mm4 \
101 __asm psubw mm1,mm5 \
102 __asm movq mm4,OC_C(4) \
104 __asm pmulhw mm3,mm4 \
105 __asm paddw mm7,mm2 \
106 __asm movq OC_I(2),mm6 \
108 __asm movq mm6,OC_I(0) \
109 __asm pmulhw mm0,mm4 \
110 __asm paddw mm5,mm3 \
111 __asm movq mm3,OC_J(4) \
112 __asm psubw mm5,mm1 \
113 __asm paddw mm2,mm0 \
114 __asm psubw mm6,mm3 \
116 __asm pmulhw mm6,mm4 \
117 __asm paddw mm3,mm3 \
118 __asm paddw mm1,mm1 \
119 __asm paddw mm3,mm0 \
120 __asm paddw mm1,mm5 \
121 __asm pmulhw mm4,mm3 \
122 __asm paddw mm6,mm0 \
123 __asm psubw mm6,mm2 \
124 __asm paddw mm2,mm2 \
125 __asm movq mm0,OC_I(1) \
126 __asm paddw mm2,mm6 \
127 __asm paddw mm4,mm3 \
128 __asm psubw mm2,mm1 \
132 #define OC_ROW_IDCT __asm{ \
135 __asm movq mm3,OC_I(2) \
137 __asm psubw mm4,mm7 \
139 __asm paddw mm1,mm1 \
141 __asm paddw mm7,mm7 \
143 __asm paddw mm1,mm2 \
145 __asm paddw mm7,mm4 \
147 __asm psubw mm4,mm3 \
148 __asm paddw mm3,mm3 \
150 __asm psubw mm6,mm5 \
151 __asm paddw mm5,mm5 \
153 __asm paddw mm3,mm4 \
155 __asm paddw mm5,mm6 \
157 __asm psubw mm7,mm0 \
158 __asm paddw mm0,mm0 \
160 __asm movq OC_I(1),mm1 \
162 __asm paddw mm0,mm7 \
165 /*The following macro does two 4x4 transposes in place.
188 I(0) I(1) I(2) I(3) is the transpose of r0 I(1) r2 r3.
189 J(4) J(5) J(6) J(7) is the transpose of r4 r5 r6 r7.
191 Since r1 is free at entry, we calculate the Js first.*/
193 #define OC_TRANSPOSE __asm{ \
195 __asm punpcklwd mm4,mm5 \
196 __asm movq OC_I(0),mm0 \
197 __asm punpckhwd mm1,mm5 \
199 __asm punpcklwd mm6,mm7 \
201 __asm punpckldq mm4,mm6 \
202 __asm punpckhdq mm5,mm6 \
204 __asm movq OC_J(4),mm4 \
205 __asm punpckhwd mm0,mm7 \
206 __asm movq OC_J(5),mm5 \
207 __asm punpckhdq mm6,mm0 \
208 __asm movq mm4,OC_I(0) \
209 __asm punpckldq mm1,mm0 \
210 __asm movq mm5,OC_I(1) \
212 __asm movq OC_J(7),mm6 \
213 __asm punpcklwd mm0,mm5 \
214 __asm movq OC_J(6),mm1 \
215 __asm punpckhwd mm4,mm5 \
217 __asm punpcklwd mm2,mm3 \
219 __asm punpckldq mm0,mm2 \
220 __asm punpckhdq mm1,mm2 \
222 __asm movq OC_I(0),mm0 \
223 __asm punpckhwd mm5,mm3 \
224 __asm movq OC_I(1),mm1 \
225 __asm punpckhdq mm4,mm5 \
226 __asm punpckldq mm2,mm5 \
227 __asm movq OC_I(3),mm4 \
228 __asm movq OC_I(2),mm2 \
232 #define OC_COLUMN_IDCT __asm{ \
234 __asm paddw mm2,OC_8 \
236 __asm paddw mm1,mm1 \
238 __asm paddw mm1,mm2 \
242 __asm psubw mm4,mm7 \
246 __asm movq mm3,OC_I(2) \
248 __asm paddw mm7,mm7 \
249 /*Store NR2 at I(2).*/ \
250 __asm movq OC_I(2),mm2 \
252 __asm paddw mm7,mm4 \
253 /*Store NR1 at I(1).*/ \
254 __asm movq OC_I(1),mm1 \
256 __asm psubw mm4,mm3 \
257 __asm paddw mm4,OC_8 \
259 __asm paddw mm3,mm3 \
261 __asm paddw mm3,mm4 \
265 __asm psubw mm6,mm5 \
268 __asm paddw mm6,OC_8 \
270 __asm paddw mm5,mm5 \
272 __asm paddw mm5,mm6 \
275 /*Store NR4 at J(4).*/ \
276 __asm movq OC_J(4),mm4 \
279 /*Store NR3 at I(3).*/ \
280 __asm movq OC_I(3),mm3 \
282 __asm psubw mm7,mm0 \
283 __asm paddw mm7,OC_8 \
285 __asm paddw mm0,mm0 \
287 __asm paddw mm0,mm7 \
290 /*Store NR6 at J(6).*/ \
291 __asm movq OC_J(6),mm6 \
294 /*Store NR5 at J(5).*/ \
295 __asm movq OC_J(5),mm5 \
296 /*Store NR7 at J(7).*/ \
297 __asm movq OC_J(7),mm7 \
298 /*Store NR0 at I(0).*/ \
299 __asm movq OC_I(0),mm0 \
302 #define OC_MID(_m,_i) [CONSTS+_m+(_i)*8]
303 #define OC_C(_i) OC_MID(OC_COSINE_OFFSET,_i-1)
304 #define OC_8 OC_MID(OC_EIGHT_OFFSET,0)
306 static void oc_idct8x8_slow(ogg_int16_t _y[64]){
307 /*This routine accepts an 8x8 matrix, but in partially transposed form.
308 Every 4x4 block is transposed.*/
312 mov CONSTS,offset OC_IDCT_CONSTS
314 #define OC_I(_k) [Y+_k*16]
315 #define OC_J(_k) [Y+(_k-4)*16+8]
320 #define OC_I(_k) [Y+(_k*16)+64]
321 #define OC_J(_k) [Y+(_k-4)*16+72]
326 #define OC_I(_k) [Y+_k*16]
327 #define OC_J(_k) OC_I(_k)
331 #define OC_I(_k) [Y+_k*16+8]
332 #define OC_J(_k) OC_I(_k)
342 #define OC_IDCT_BEGIN_10 __asm{ \
343 __asm movq mm2,OC_I(3) \
345 __asm movq mm6,OC_C(3) \
347 __asm movq mm1,OC_C(5) \
348 __asm pmulhw mm4,mm6 \
349 __asm movq mm3,OC_I(1) \
350 __asm pmulhw mm1,mm2 \
351 __asm movq mm0,OC_C(1) \
352 __asm paddw mm4,mm2 \
354 __asm paddw mm2,mm1 \
355 __asm movq mm5,OC_I(2) \
356 __asm pmulhw mm0,mm3 \
358 __asm paddw mm0,mm3 \
359 __asm pmulhw mm3,OC_C(7) \
360 __asm psubw mm6,mm2 \
361 __asm pmulhw mm5,OC_C(2) \
362 __asm psubw mm0,mm4 \
363 __asm movq mm7,OC_I(2) \
364 __asm paddw mm4,mm4 \
365 __asm paddw mm7,mm5 \
366 __asm paddw mm4,mm0 \
367 __asm pmulhw mm1,OC_C(6) \
368 __asm psubw mm3,mm6 \
369 __asm movq OC_I(1),mm4 \
370 __asm paddw mm6,mm6 \
371 __asm movq mm4,OC_C(4) \
372 __asm paddw mm6,mm3 \
374 __asm pmulhw mm3,mm4 \
375 __asm movq OC_I(2),mm6 \
377 __asm movq mm6,OC_I(0) \
378 __asm pmulhw mm0,mm4 \
379 __asm paddw mm5,mm3 \
380 __asm paddw mm2,mm0 \
381 __asm psubw mm5,mm1 \
382 __asm pmulhw mm6,mm4 \
383 __asm paddw mm6,OC_I(0) \
384 __asm paddw mm1,mm1 \
386 __asm paddw mm1,mm5 \
387 __asm psubw mm6,mm2 \
388 __asm paddw mm2,mm2 \
389 __asm movq mm0,OC_I(1) \
390 __asm paddw mm2,mm6 \
391 __asm psubw mm2,mm1 \
396 #define OC_ROW_IDCT_10 __asm{ \
399 __asm movq mm3,OC_I(2) \
401 __asm psubw mm4,mm7 \
403 __asm paddw mm1,mm1 \
405 __asm paddw mm7,mm7 \
407 __asm paddw mm1,mm2 \
409 __asm paddw mm7,mm4 \
411 __asm psubw mm4,mm3 \
412 __asm paddw mm3,mm3 \
414 __asm psubw mm6,mm5 \
415 __asm paddw mm5,mm5 \
417 __asm paddw mm3,mm4 \
419 __asm paddw mm5,mm6 \
421 __asm psubw mm7,mm0 \
422 __asm paddw mm0,mm0 \
424 __asm movq OC_I(1),mm1 \
426 __asm paddw mm0,mm7 \
430 #define OC_COLUMN_IDCT_10 __asm{ \
432 __asm paddw mm2,OC_8 \
434 __asm paddw mm1,mm1 \
436 __asm paddw mm1,mm2 \
440 __asm psubw mm4,mm7 \
444 __asm movq mm3,OC_I(2) \
446 __asm paddw mm7,mm7 \
447 /*Store NR2 at I(2).*/ \
448 __asm movq OC_I(2),mm2 \
450 __asm paddw mm7,mm4 \
451 /*Store NR1 at I(1).*/ \
452 __asm movq OC_I(1),mm1 \
454 __asm psubw mm4,mm3 \
455 __asm paddw mm4,OC_8 \
457 __asm paddw mm3,mm3 \
459 __asm paddw mm3,mm4 \
463 __asm psubw mm6,mm5 \
466 __asm paddw mm6,OC_8 \
468 __asm paddw mm5,mm5 \
470 __asm paddw mm5,mm6 \
473 /*Store NR4 at J(4).*/ \
474 __asm movq OC_J(4),mm4 \
477 /*Store NR3 at I(3).*/ \
478 __asm movq OC_I(3),mm3 \
480 __asm psubw mm7,mm0 \
481 __asm paddw mm7,OC_8 \
483 __asm paddw mm0,mm0 \
485 __asm paddw mm0,mm7 \
488 /*Store NR6 at J(6).*/ \
489 __asm movq OC_J(6),mm6 \
492 /*Store NR5 at J(5).*/ \
493 __asm movq OC_J(5),mm5 \
494 /*Store NR7 at J(7).*/ \
495 __asm movq OC_J(7),mm7 \
496 /*Store NR0 at I(0).*/ \
497 __asm movq OC_I(0),mm0 \
500 static void oc_idct8x8_10(ogg_int16_t _y[64]){
504 mov CONSTS,offset OC_IDCT_CONSTS
506 #define OC_I(_k) [Y+_k*16]
507 #define OC_J(_k) [Y+(_k-4)*16+8]
508 /*Done with dequant, descramble, and partial transpose.
509 Now do the iDCT itself.*/
514 #define OC_I(_k) [Y+_k*16]
515 #define OC_J(_k) OC_I(_k)
519 #define OC_I(_k) [Y+_k*16+8]
520 #define OC_J(_k) OC_I(_k)
529 /*Performs an inverse 8x8 Type-II DCT transform.
530 The input is assumed to be scaled by a factor of 4 relative to orthonormal
531 version of the transform.*/
532 void oc_idct8x8_mmx(ogg_int16_t _y[64],int _last_zzi){
533 /*_last_zzi is subtly different from an actual count of the number of
534 coefficients we decoded for this block.
535 It contains the value of zzi BEFORE the final token in the block was
537 In most cases this is an EOB token (the continuation of an EOB run from a
538 previous block counts), and so this is the same as the coefficient count.
539 However, in the case that the last token was NOT an EOB token, but filled
540 the block up with exactly 64 coefficients, _last_zzi will be less than 64.
541 Provided the last token was not a pure zero run, the minimum value it can
542 be is 46, and so that doesn't affect any of the cases in this routine.
543 However, if the last token WAS a pure zero run of length 63, then _last_zzi
544 will be 1 while the number of coefficients decoded is 64.
545 Thus, we will trigger the following special case, where the real
546 coefficient count would not.
547 Note also that a zero run of length 64 will give _last_zzi a value of 0,
548 but we still process the DC coefficient, which might have a non-zero value
549 due to DC prediction.
550 Although convoluted, this is arguably the correct behavior: it allows us to
551 use a smaller transform when the block ends with a long zero run instead
552 of a normal EOB token.
553 It could be smarter... multiple separate zero runs at the end of a block
554 will fool it, but an encoder that generates these really deserves what it
556 Needless to say we inherited this approach from VP3.*/
557 /*Perform the iDCT.*/
558 if(_last_zzi<10)oc_idct8x8_10(_y);
559 else oc_idct8x8_slow(_y);