From dabcdbc493b02d3b477cb056beaf0fb7a1879c1e Mon Sep 17 00:00:00 2001 From: Michael Niedermayer Date: Mon, 5 Nov 2001 18:50:58 +0000 Subject: [PATCH] yuy2toyv12 fixed and speedup Originally committed as revision 2725 to svn://svn.mplayerhq.hu/mplayer/trunk/postproc --- postproc/rgb2rgb.c | 171 ++++++++++++++++++++++++++++---------------- postproc/rgb2rgb.h | 3 +- postproc/rgb2rgb_template.c | 171 ++++++++++++++++++++++++++++---------------- 3 files changed, 220 insertions(+), 125 deletions(-) diff --git a/postproc/rgb2rgb.c b/postproc/rgb2rgb.c index 5d538ec..566a785 100644 --- a/postproc/rgb2rgb.c +++ b/postproc/rgb2rgb.c @@ -291,7 +291,8 @@ void palette8torgb15(const uint8_t *src, uint8_t *dst, unsigned num_pixels, cons } /** * - * width must be a multiple of 16 for the MMX version + * height should be a multiple of 2 and width should be a multiple of 16 (if this is a + * problem for anyone then tell me, and ill fix it) */ void yv12toyuy2(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, int width, int height, int lumStride, int chromStride, int dstStride) @@ -359,70 +360,116 @@ asm( EMMS" \n\t" #endif } -void yuy2toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, unsigned num_pixels) +/** + * + * height should be a multiple of 2 and width should be a multiple of 16 (if this is a + * problem for anyone then tell me, and ill fix it) + */ +void yuy2toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + int width, int height, int lumStride, int chromStride, int srcStride) { + int y; + const int chromWidth= width>>1; + for(y=0; y>1) - : "memory", "%eax" - ); + asm volatile( + "xorl %%eax, %%eax \n\t" + "pcmpeqw %%mm7, %%mm7 \n\t" + "psrlw $8, %%mm7 \n\t" // FF,00,FF,00... + "1: \n\t" + PREFETCH" 64(%0, %%eax, 4) \n\t" + "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0) + "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4) + "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0) + "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4) + "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0) + "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4) + "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0) + "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4) + "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0) + "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0) + + MOVNTQ" %%mm2, (%1, %%eax, 2) \n\t" + + "movq 16(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(8) + "movq 24(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(12) + "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8) + "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12) + "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8) + "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12) + "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8) + "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12) + "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8) + "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8) + + MOVNTQ" %%mm3, 8(%1, %%eax, 2) \n\t" + + "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0) + "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8) + "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0) + "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8) + "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0) + "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8) + "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0) + "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0) + + MOVNTQ" %%mm0, (%3, %%eax) \n\t" + MOVNTQ" %%mm2, (%2, %%eax) \n\t" + + "addl $8, %%eax \n\t" + "cmpl %4, %%eax \n\t" + " jb 1b \n\t" + + "1: \n\t" + PREFETCH" 64(%0, %%eax, 4) \n\t" + "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0) + "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4) + "movq 16(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(8) + "movq 24(%0, %%eax, 4), %%mm3 \n\t" // YUYV YUYV(12) + "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0) + "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4) + "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8) + "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12) + "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0) + "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8) + + MOVNTQ" %%mm0, (%1, %%eax, 2) \n\t" + MOVNTQ" %%mm2, 8(%1, %%eax, 2) \n\t" + + "addl $8, %%eax \n\t" + "cmpl %5, %%eax \n\t" + " jb 1b \n\t" + + ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "r" (chromWidth), "m"(width) + : "memory", "%eax" + ); #else - int i; - num_pixels>>=1; - for(i=0; i>1; + for(y=0; y>1) - : "memory", "%eax" - ); + asm volatile( + "xorl %%eax, %%eax \n\t" + "pcmpeqw %%mm7, %%mm7 \n\t" + "psrlw $8, %%mm7 \n\t" // FF,00,FF,00... + "1: \n\t" + PREFETCH" 64(%0, %%eax, 4) \n\t" + "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0) + "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4) + "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0) + "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4) + "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0) + "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4) + "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0) + "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4) + "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0) + "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0) + + MOVNTQ" %%mm2, (%1, %%eax, 2) \n\t" + + "movq 16(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(8) + "movq 24(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(12) + "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8) + "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12) + "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8) + "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12) + "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8) + "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12) + "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8) + "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8) + + MOVNTQ" %%mm3, 8(%1, %%eax, 2) \n\t" + + "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0) + "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8) + "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0) + "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8) + "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0) + "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8) + "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0) + "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0) + + MOVNTQ" %%mm0, (%3, %%eax) \n\t" + MOVNTQ" %%mm2, (%2, %%eax) \n\t" + + "addl $8, %%eax \n\t" + "cmpl %4, %%eax \n\t" + " jb 1b \n\t" + + "1: \n\t" + PREFETCH" 64(%0, %%eax, 4) \n\t" + "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0) + "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4) + "movq 16(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(8) + "movq 24(%0, %%eax, 4), %%mm3 \n\t" // YUYV YUYV(12) + "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0) + "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4) + "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8) + "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12) + "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0) + "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8) + + MOVNTQ" %%mm0, (%1, %%eax, 2) \n\t" + MOVNTQ" %%mm2, 8(%1, %%eax, 2) \n\t" + + "addl $8, %%eax \n\t" + "cmpl %5, %%eax \n\t" + " jb 1b \n\t" + + ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "r" (chromWidth), "m"(width) + : "memory", "%eax" + ); #else - int i; - num_pixels>>=1; - for(i=0; i