From: Diego Biurrun Date: Wed, 25 Dec 2013 15:48:32 +0000 (+0100) Subject: dsputilenc_mmx: K&R formatting cosmetics X-Git-Tag: v11_alpha1~952 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a36947c167d7278b891453083b57dc56b7a7f5c5;p=platform%2Fupstream%2Flibav.git dsputilenc_mmx: K&R formatting cosmetics --- diff --git a/libavcodec/x86/dsputilenc_mmx.c b/libavcodec/x86/dsputilenc_mmx.c index e21ae24..2c32037 100644 --- a/libavcodec/x86/dsputilenc_mmx.c +++ b/libavcodec/x86/dsputilenc_mmx.c @@ -34,450 +34,479 @@ void ff_get_pixels_mmx(int16_t *block, const uint8_t *pixels, int line_size); void ff_get_pixels_sse2(int16_t *block, const uint8_t *pixels, int line_size); -void ff_diff_pixels_mmx(int16_t *block, const uint8_t *s1, const uint8_t *s2, int stride); -int ff_pix_sum16_mmx(uint8_t * pix, int line_size); +void ff_diff_pixels_mmx(int16_t *block, const uint8_t *s1, const uint8_t *s2, + int stride); +int ff_pix_sum16_mmx(uint8_t *pix, int line_size); int ff_pix_norm1_mmx(uint8_t *pix, int line_size); #if HAVE_INLINE_ASM -static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { +static int sse8_mmx(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) +{ int tmp; - __asm__ volatile ( - "movl %4,%%ecx\n" - "shr $1,%%ecx\n" - "pxor %%mm0,%%mm0\n" /* mm0 = 0 */ - "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */ - "1:\n" - "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */ - "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */ - "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */ - "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */ - - /* todo: mm1-mm2, mm3-mm4 */ - /* algo: subtract mm1 from mm2 with saturation and vice versa */ - /* OR the results to get absolute difference */ - "movq %%mm1,%%mm5\n" - "movq %%mm3,%%mm6\n" - "psubusb %%mm2,%%mm1\n" - "psubusb %%mm4,%%mm3\n" - "psubusb %%mm5,%%mm2\n" - "psubusb %%mm6,%%mm4\n" - - "por %%mm1,%%mm2\n" - "por %%mm3,%%mm4\n" - - /* now convert to 16-bit vectors so we can square them */ - "movq %%mm2,%%mm1\n" - "movq %%mm4,%%mm3\n" - - "punpckhbw %%mm0,%%mm2\n" - "punpckhbw %%mm0,%%mm4\n" - "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */ - "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */ - - "pmaddwd %%mm2,%%mm2\n" - "pmaddwd %%mm4,%%mm4\n" - "pmaddwd %%mm1,%%mm1\n" - "pmaddwd %%mm3,%%mm3\n" - - "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */ - "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */ - - "paddd %%mm2,%%mm1\n" - "paddd %%mm4,%%mm3\n" - "paddd %%mm1,%%mm7\n" - "paddd %%mm3,%%mm7\n" - - "decl %%ecx\n" - "jnz 1b\n" - - "movq %%mm7,%%mm1\n" - "psrlq $32, %%mm7\n" /* shift hi dword to lo */ - "paddd %%mm7,%%mm1\n" - "movd %%mm1,%2\n" - : "+r" (pix1), "+r" (pix2), "=r"(tmp) - : "r" ((x86_reg)line_size) , "m" (h) - : "%ecx"); + + __asm__ volatile ( + "movl %4, %%ecx \n" + "shr $1, %%ecx \n" + "pxor %%mm0, %%mm0 \n" /* mm0 = 0 */ + "pxor %%mm7, %%mm7 \n" /* mm7 holds the sum */ + "1: \n" + "movq (%0), %%mm1 \n" /* mm1 = pix1[0][0 - 7] */ + "movq (%1), %%mm2 \n" /* mm2 = pix2[0][0 - 7] */ + "movq (%0, %3), %%mm3 \n" /* mm3 = pix1[1][0 - 7] */ + "movq (%1, %3), %%mm4 \n" /* mm4 = pix2[1][0 - 7] */ + + /* todo: mm1-mm2, mm3-mm4 */ + /* algo: subtract mm1 from mm2 with saturation and vice versa */ + /* OR the results to get absolute difference */ + "movq %%mm1, %%mm5 \n" + "movq %%mm3, %%mm6 \n" + "psubusb %%mm2, %%mm1 \n" + "psubusb %%mm4, %%mm3 \n" + "psubusb %%mm5, %%mm2 \n" + "psubusb %%mm6, %%mm4 \n" + + "por %%mm1, %%mm2 \n" + "por %%mm3, %%mm4 \n" + + /* now convert to 16-bit vectors so we can square them */ + "movq %%mm2, %%mm1 \n" + "movq %%mm4, %%mm3 \n" + + "punpckhbw %%mm0, %%mm2 \n" + "punpckhbw %%mm0, %%mm4 \n" + "punpcklbw %%mm0, %%mm1 \n" /* mm1 now spread over (mm1, mm2) */ + "punpcklbw %%mm0, %%mm3 \n" /* mm4 now spread over (mm3, mm4) */ + + "pmaddwd %%mm2, %%mm2 \n" + "pmaddwd %%mm4, %%mm4 \n" + "pmaddwd %%mm1, %%mm1 \n" + "pmaddwd %%mm3, %%mm3 \n" + + "lea (%0, %3, 2), %0 \n" /* pix1 += 2 * line_size */ + "lea (%1, %3, 2), %1 \n" /* pix2 += 2 * line_size */ + + "paddd %%mm2, %%mm1 \n" + "paddd %%mm4, %%mm3 \n" + "paddd %%mm1, %%mm7 \n" + "paddd %%mm3, %%mm7 \n" + + "decl %%ecx \n" + "jnz 1b \n" + + "movq %%mm7, %%mm1 \n" + "psrlq $32, %%mm7 \n" /* shift hi dword to lo */ + "paddd %%mm7, %%mm1 \n" + "movd %%mm1, %2 \n" + : "+r" (pix1), "+r" (pix2), "=r" (tmp) + : "r" ((x86_reg) line_size), "m" (h) + : "%ecx"); + return tmp; } -static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { +static int sse16_mmx(void *v, uint8_t *pix1, uint8_t *pix2, + int line_size, int h) +{ int tmp; - __asm__ volatile ( - "movl %4,%%ecx\n" - "pxor %%mm0,%%mm0\n" /* mm0 = 0 */ - "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */ - "1:\n" - "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */ - "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */ - "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */ - "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */ - - /* todo: mm1-mm2, mm3-mm4 */ - /* algo: subtract mm1 from mm2 with saturation and vice versa */ - /* OR the results to get absolute difference */ - "movq %%mm1,%%mm5\n" - "movq %%mm3,%%mm6\n" - "psubusb %%mm2,%%mm1\n" - "psubusb %%mm4,%%mm3\n" - "psubusb %%mm5,%%mm2\n" - "psubusb %%mm6,%%mm4\n" - - "por %%mm1,%%mm2\n" - "por %%mm3,%%mm4\n" - - /* now convert to 16-bit vectors so we can square them */ - "movq %%mm2,%%mm1\n" - "movq %%mm4,%%mm3\n" - - "punpckhbw %%mm0,%%mm2\n" - "punpckhbw %%mm0,%%mm4\n" - "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */ - "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */ - - "pmaddwd %%mm2,%%mm2\n" - "pmaddwd %%mm4,%%mm4\n" - "pmaddwd %%mm1,%%mm1\n" - "pmaddwd %%mm3,%%mm3\n" - - "add %3,%0\n" - "add %3,%1\n" - - "paddd %%mm2,%%mm1\n" - "paddd %%mm4,%%mm3\n" - "paddd %%mm1,%%mm7\n" - "paddd %%mm3,%%mm7\n" - - "decl %%ecx\n" - "jnz 1b\n" - - "movq %%mm7,%%mm1\n" - "psrlq $32, %%mm7\n" /* shift hi dword to lo */ - "paddd %%mm7,%%mm1\n" - "movd %%mm1,%2\n" - : "+r" (pix1), "+r" (pix2), "=r"(tmp) - : "r" ((x86_reg)line_size) , "m" (h) - : "%ecx"); + + __asm__ volatile ( + "movl %4, %%ecx\n" + "pxor %%mm0, %%mm0\n" /* mm0 = 0 */ + "pxor %%mm7, %%mm7\n" /* mm7 holds the sum */ + "1:\n" + "movq (%0), %%mm1\n" /* mm1 = pix1[0 - 7] */ + "movq (%1), %%mm2\n" /* mm2 = pix2[0 - 7] */ + "movq 8(%0), %%mm3\n" /* mm3 = pix1[8 - 15] */ + "movq 8(%1), %%mm4\n" /* mm4 = pix2[8 - 15] */ + + /* todo: mm1-mm2, mm3-mm4 */ + /* algo: subtract mm1 from mm2 with saturation and vice versa */ + /* OR the results to get absolute difference */ + "movq %%mm1, %%mm5\n" + "movq %%mm3, %%mm6\n" + "psubusb %%mm2, %%mm1\n" + "psubusb %%mm4, %%mm3\n" + "psubusb %%mm5, %%mm2\n" + "psubusb %%mm6, %%mm4\n" + + "por %%mm1, %%mm2\n" + "por %%mm3, %%mm4\n" + + /* now convert to 16-bit vectors so we can square them */ + "movq %%mm2, %%mm1\n" + "movq %%mm4, %%mm3\n" + + "punpckhbw %%mm0, %%mm2\n" + "punpckhbw %%mm0, %%mm4\n" + "punpcklbw %%mm0, %%mm1\n" /* mm1 now spread over (mm1, mm2) */ + "punpcklbw %%mm0, %%mm3\n" /* mm4 now spread over (mm3, mm4) */ + + "pmaddwd %%mm2, %%mm2\n" + "pmaddwd %%mm4, %%mm4\n" + "pmaddwd %%mm1, %%mm1\n" + "pmaddwd %%mm3, %%mm3\n" + + "add %3, %0\n" + "add %3, %1\n" + + "paddd %%mm2, %%mm1\n" + "paddd %%mm4, %%mm3\n" + "paddd %%mm1, %%mm7\n" + "paddd %%mm3, %%mm7\n" + + "decl %%ecx\n" + "jnz 1b\n" + + "movq %%mm7, %%mm1\n" + "psrlq $32, %%mm7\n" /* shift hi dword to lo */ + "paddd %%mm7, %%mm1\n" + "movd %%mm1, %2\n" + : "+r" (pix1), "+r" (pix2), "=r" (tmp) + : "r" ((x86_reg) line_size), "m" (h) + : "%ecx"); + return tmp; } -static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) { +static int hf_noise8_mmx(uint8_t *pix1, int line_size, int h) +{ int tmp; - __asm__ volatile ( - "movl %3,%%ecx\n" - "pxor %%mm7,%%mm7\n" - "pxor %%mm6,%%mm6\n" - - "movq (%0),%%mm0\n" - "movq %%mm0, %%mm1\n" - "psllq $8, %%mm0\n" - "psrlq $8, %%mm1\n" - "psrlq $8, %%mm0\n" - "movq %%mm0, %%mm2\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm0\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm2\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm0\n" - "psubw %%mm3, %%mm2\n" - - "add %2,%0\n" - - "movq (%0),%%mm4\n" - "movq %%mm4, %%mm1\n" - "psllq $8, %%mm4\n" - "psrlq $8, %%mm1\n" - "psrlq $8, %%mm4\n" - "movq %%mm4, %%mm5\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm4\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm5\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm4\n" - "psubw %%mm3, %%mm5\n" - "psubw %%mm4, %%mm0\n" - "psubw %%mm5, %%mm2\n" - "pxor %%mm3, %%mm3\n" - "pxor %%mm1, %%mm1\n" - "pcmpgtw %%mm0, %%mm3\n\t" - "pcmpgtw %%mm2, %%mm1\n\t" - "pxor %%mm3, %%mm0\n" - "pxor %%mm1, %%mm2\n" - "psubw %%mm3, %%mm0\n" - "psubw %%mm1, %%mm2\n" - "paddw %%mm0, %%mm2\n" - "paddw %%mm2, %%mm6\n" - - "add %2,%0\n" - "1:\n" - - "movq (%0),%%mm0\n" - "movq %%mm0, %%mm1\n" - "psllq $8, %%mm0\n" - "psrlq $8, %%mm1\n" - "psrlq $8, %%mm0\n" - "movq %%mm0, %%mm2\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm0\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm2\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm0\n" - "psubw %%mm3, %%mm2\n" - "psubw %%mm0, %%mm4\n" - "psubw %%mm2, %%mm5\n" - "pxor %%mm3, %%mm3\n" - "pxor %%mm1, %%mm1\n" - "pcmpgtw %%mm4, %%mm3\n\t" - "pcmpgtw %%mm5, %%mm1\n\t" - "pxor %%mm3, %%mm4\n" - "pxor %%mm1, %%mm5\n" - "psubw %%mm3, %%mm4\n" - "psubw %%mm1, %%mm5\n" - "paddw %%mm4, %%mm5\n" - "paddw %%mm5, %%mm6\n" - - "add %2,%0\n" - - "movq (%0),%%mm4\n" - "movq %%mm4, %%mm1\n" - "psllq $8, %%mm4\n" - "psrlq $8, %%mm1\n" - "psrlq $8, %%mm4\n" - "movq %%mm4, %%mm5\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm4\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm5\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm4\n" - "psubw %%mm3, %%mm5\n" - "psubw %%mm4, %%mm0\n" - "psubw %%mm5, %%mm2\n" - "pxor %%mm3, %%mm3\n" - "pxor %%mm1, %%mm1\n" - "pcmpgtw %%mm0, %%mm3\n\t" - "pcmpgtw %%mm2, %%mm1\n\t" - "pxor %%mm3, %%mm0\n" - "pxor %%mm1, %%mm2\n" - "psubw %%mm3, %%mm0\n" - "psubw %%mm1, %%mm2\n" - "paddw %%mm0, %%mm2\n" - "paddw %%mm2, %%mm6\n" - - "add %2,%0\n" - "subl $2, %%ecx\n" - " jnz 1b\n" - - "movq %%mm6, %%mm0\n" - "punpcklwd %%mm7,%%mm0\n" - "punpckhwd %%mm7,%%mm6\n" - "paddd %%mm0, %%mm6\n" - - "movq %%mm6,%%mm0\n" - "psrlq $32, %%mm6\n" - "paddd %%mm6,%%mm0\n" - "movd %%mm0,%1\n" - : "+r" (pix1), "=r"(tmp) - : "r" ((x86_reg)line_size) , "g" (h-2) - : "%ecx"); - return tmp; + + __asm__ volatile ( + "movl %3, %%ecx\n" + "pxor %%mm7, %%mm7\n" + "pxor %%mm6, %%mm6\n" + + "movq (%0), %%mm0\n" + "movq %%mm0, %%mm1\n" + "psllq $8, %%mm0\n" + "psrlq $8, %%mm1\n" + "psrlq $8, %%mm0\n" + "movq %%mm0, %%mm2\n" + "movq %%mm1, %%mm3\n" + "punpcklbw %%mm7, %%mm0\n" + "punpcklbw %%mm7, %%mm1\n" + "punpckhbw %%mm7, %%mm2\n" + "punpckhbw %%mm7, %%mm3\n" + "psubw %%mm1, %%mm0\n" + "psubw %%mm3, %%mm2\n" + + "add %2, %0\n" + + "movq (%0), %%mm4\n" + "movq %%mm4, %%mm1\n" + "psllq $8, %%mm4\n" + "psrlq $8, %%mm1\n" + "psrlq $8, %%mm4\n" + "movq %%mm4, %%mm5\n" + "movq %%mm1, %%mm3\n" + "punpcklbw %%mm7, %%mm4\n" + "punpcklbw %%mm7, %%mm1\n" + "punpckhbw %%mm7, %%mm5\n" + "punpckhbw %%mm7, %%mm3\n" + "psubw %%mm1, %%mm4\n" + "psubw %%mm3, %%mm5\n" + "psubw %%mm4, %%mm0\n" + "psubw %%mm5, %%mm2\n" + "pxor %%mm3, %%mm3\n" + "pxor %%mm1, %%mm1\n" + "pcmpgtw %%mm0, %%mm3\n\t" + "pcmpgtw %%mm2, %%mm1\n\t" + "pxor %%mm3, %%mm0\n" + "pxor %%mm1, %%mm2\n" + "psubw %%mm3, %%mm0\n" + "psubw %%mm1, %%mm2\n" + "paddw %%mm0, %%mm2\n" + "paddw %%mm2, %%mm6\n" + + "add %2, %0\n" + "1:\n" + + "movq (%0), %%mm0\n" + "movq %%mm0, %%mm1\n" + "psllq $8, %%mm0\n" + "psrlq $8, %%mm1\n" + "psrlq $8, %%mm0\n" + "movq %%mm0, %%mm2\n" + "movq %%mm1, %%mm3\n" + "punpcklbw %%mm7, %%mm0\n" + "punpcklbw %%mm7, %%mm1\n" + "punpckhbw %%mm7, %%mm2\n" + "punpckhbw %%mm7, %%mm3\n" + "psubw %%mm1, %%mm0\n" + "psubw %%mm3, %%mm2\n" + "psubw %%mm0, %%mm4\n" + "psubw %%mm2, %%mm5\n" + "pxor %%mm3, %%mm3\n" + "pxor %%mm1, %%mm1\n" + "pcmpgtw %%mm4, %%mm3\n\t" + "pcmpgtw %%mm5, %%mm1\n\t" + "pxor %%mm3, %%mm4\n" + "pxor %%mm1, %%mm5\n" + "psubw %%mm3, %%mm4\n" + "psubw %%mm1, %%mm5\n" + "paddw %%mm4, %%mm5\n" + "paddw %%mm5, %%mm6\n" + + "add %2, %0\n" + + "movq (%0), %%mm4\n" + "movq %%mm4, %%mm1\n" + "psllq $8, %%mm4\n" + "psrlq $8, %%mm1\n" + "psrlq $8, %%mm4\n" + "movq %%mm4, %%mm5\n" + "movq %%mm1, %%mm3\n" + "punpcklbw %%mm7, %%mm4\n" + "punpcklbw %%mm7, %%mm1\n" + "punpckhbw %%mm7, %%mm5\n" + "punpckhbw %%mm7, %%mm3\n" + "psubw %%mm1, %%mm4\n" + "psubw %%mm3, %%mm5\n" + "psubw %%mm4, %%mm0\n" + "psubw %%mm5, %%mm2\n" + "pxor %%mm3, %%mm3\n" + "pxor %%mm1, %%mm1\n" + "pcmpgtw %%mm0, %%mm3\n\t" + "pcmpgtw %%mm2, %%mm1\n\t" + "pxor %%mm3, %%mm0\n" + "pxor %%mm1, %%mm2\n" + "psubw %%mm3, %%mm0\n" + "psubw %%mm1, %%mm2\n" + "paddw %%mm0, %%mm2\n" + "paddw %%mm2, %%mm6\n" + + "add %2, %0\n" + "subl $2, %%ecx\n" + " jnz 1b\n" + + "movq %%mm6, %%mm0\n" + "punpcklwd %%mm7, %%mm0\n" + "punpckhwd %%mm7, %%mm6\n" + "paddd %%mm0, %%mm6\n" + + "movq %%mm6, %%mm0\n" + "psrlq $32, %%mm6\n" + "paddd %%mm6, %%mm0\n" + "movd %%mm0, %1\n" + : "+r" (pix1), "=r" (tmp) + : "r" ((x86_reg) line_size), "g" (h - 2) + : "%ecx"); + + return tmp; } -static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) { +static int hf_noise16_mmx(uint8_t *pix1, int line_size, int h) +{ int tmp; - uint8_t * pix= pix1; - __asm__ volatile ( - "movl %3,%%ecx\n" - "pxor %%mm7,%%mm7\n" - "pxor %%mm6,%%mm6\n" - - "movq (%0),%%mm0\n" - "movq 1(%0),%%mm1\n" - "movq %%mm0, %%mm2\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm0\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm2\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm0\n" - "psubw %%mm3, %%mm2\n" - - "add %2,%0\n" - - "movq (%0),%%mm4\n" - "movq 1(%0),%%mm1\n" - "movq %%mm4, %%mm5\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm4\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm5\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm4\n" - "psubw %%mm3, %%mm5\n" - "psubw %%mm4, %%mm0\n" - "psubw %%mm5, %%mm2\n" - "pxor %%mm3, %%mm3\n" - "pxor %%mm1, %%mm1\n" - "pcmpgtw %%mm0, %%mm3\n\t" - "pcmpgtw %%mm2, %%mm1\n\t" - "pxor %%mm3, %%mm0\n" - "pxor %%mm1, %%mm2\n" - "psubw %%mm3, %%mm0\n" - "psubw %%mm1, %%mm2\n" - "paddw %%mm0, %%mm2\n" - "paddw %%mm2, %%mm6\n" - - "add %2,%0\n" - "1:\n" - - "movq (%0),%%mm0\n" - "movq 1(%0),%%mm1\n" - "movq %%mm0, %%mm2\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm0\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm2\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm0\n" - "psubw %%mm3, %%mm2\n" - "psubw %%mm0, %%mm4\n" - "psubw %%mm2, %%mm5\n" - "pxor %%mm3, %%mm3\n" - "pxor %%mm1, %%mm1\n" - "pcmpgtw %%mm4, %%mm3\n\t" - "pcmpgtw %%mm5, %%mm1\n\t" - "pxor %%mm3, %%mm4\n" - "pxor %%mm1, %%mm5\n" - "psubw %%mm3, %%mm4\n" - "psubw %%mm1, %%mm5\n" - "paddw %%mm4, %%mm5\n" - "paddw %%mm5, %%mm6\n" - - "add %2,%0\n" - - "movq (%0),%%mm4\n" - "movq 1(%0),%%mm1\n" - "movq %%mm4, %%mm5\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm4\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm5\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm4\n" - "psubw %%mm3, %%mm5\n" - "psubw %%mm4, %%mm0\n" - "psubw %%mm5, %%mm2\n" - "pxor %%mm3, %%mm3\n" - "pxor %%mm1, %%mm1\n" - "pcmpgtw %%mm0, %%mm3\n\t" - "pcmpgtw %%mm2, %%mm1\n\t" - "pxor %%mm3, %%mm0\n" - "pxor %%mm1, %%mm2\n" - "psubw %%mm3, %%mm0\n" - "psubw %%mm1, %%mm2\n" - "paddw %%mm0, %%mm2\n" - "paddw %%mm2, %%mm6\n" - - "add %2,%0\n" - "subl $2, %%ecx\n" - " jnz 1b\n" - - "movq %%mm6, %%mm0\n" - "punpcklwd %%mm7,%%mm0\n" - "punpckhwd %%mm7,%%mm6\n" - "paddd %%mm0, %%mm6\n" - - "movq %%mm6,%%mm0\n" - "psrlq $32, %%mm6\n" - "paddd %%mm6,%%mm0\n" - "movd %%mm0,%1\n" - : "+r" (pix1), "=r"(tmp) - : "r" ((x86_reg)line_size) , "g" (h-2) - : "%ecx"); - return tmp + hf_noise8_mmx(pix+8, line_size, h); + uint8_t *pix = pix1; + + __asm__ volatile ( + "movl %3, %%ecx\n" + "pxor %%mm7, %%mm7\n" + "pxor %%mm6, %%mm6\n" + + "movq (%0), %%mm0\n" + "movq 1(%0), %%mm1\n" + "movq %%mm0, %%mm2\n" + "movq %%mm1, %%mm3\n" + "punpcklbw %%mm7, %%mm0\n" + "punpcklbw %%mm7, %%mm1\n" + "punpckhbw %%mm7, %%mm2\n" + "punpckhbw %%mm7, %%mm3\n" + "psubw %%mm1, %%mm0\n" + "psubw %%mm3, %%mm2\n" + + "add %2, %0\n" + + "movq (%0), %%mm4\n" + "movq 1(%0), %%mm1\n" + "movq %%mm4, %%mm5\n" + "movq %%mm1, %%mm3\n" + "punpcklbw %%mm7, %%mm4\n" + "punpcklbw %%mm7, %%mm1\n" + "punpckhbw %%mm7, %%mm5\n" + "punpckhbw %%mm7, %%mm3\n" + "psubw %%mm1, %%mm4\n" + "psubw %%mm3, %%mm5\n" + "psubw %%mm4, %%mm0\n" + "psubw %%mm5, %%mm2\n" + "pxor %%mm3, %%mm3\n" + "pxor %%mm1, %%mm1\n" + "pcmpgtw %%mm0, %%mm3\n\t" + "pcmpgtw %%mm2, %%mm1\n\t" + "pxor %%mm3, %%mm0\n" + "pxor %%mm1, %%mm2\n" + "psubw %%mm3, %%mm0\n" + "psubw %%mm1, %%mm2\n" + "paddw %%mm0, %%mm2\n" + "paddw %%mm2, %%mm6\n" + + "add %2, %0\n" + "1:\n" + + "movq (%0), %%mm0\n" + "movq 1(%0), %%mm1\n" + "movq %%mm0, %%mm2\n" + "movq %%mm1, %%mm3\n" + "punpcklbw %%mm7, %%mm0\n" + "punpcklbw %%mm7, %%mm1\n" + "punpckhbw %%mm7, %%mm2\n" + "punpckhbw %%mm7, %%mm3\n" + "psubw %%mm1, %%mm0\n" + "psubw %%mm3, %%mm2\n" + "psubw %%mm0, %%mm4\n" + "psubw %%mm2, %%mm5\n" + "pxor %%mm3, %%mm3\n" + "pxor %%mm1, %%mm1\n" + "pcmpgtw %%mm4, %%mm3\n\t" + "pcmpgtw %%mm5, %%mm1\n\t" + "pxor %%mm3, %%mm4\n" + "pxor %%mm1, %%mm5\n" + "psubw %%mm3, %%mm4\n" + "psubw %%mm1, %%mm5\n" + "paddw %%mm4, %%mm5\n" + "paddw %%mm5, %%mm6\n" + + "add %2, %0\n" + + "movq (%0), %%mm4\n" + "movq 1(%0), %%mm1\n" + "movq %%mm4, %%mm5\n" + "movq %%mm1, %%mm3\n" + "punpcklbw %%mm7, %%mm4\n" + "punpcklbw %%mm7, %%mm1\n" + "punpckhbw %%mm7, %%mm5\n" + "punpckhbw %%mm7, %%mm3\n" + "psubw %%mm1, %%mm4\n" + "psubw %%mm3, %%mm5\n" + "psubw %%mm4, %%mm0\n" + "psubw %%mm5, %%mm2\n" + "pxor %%mm3, %%mm3\n" + "pxor %%mm1, %%mm1\n" + "pcmpgtw %%mm0, %%mm3\n\t" + "pcmpgtw %%mm2, %%mm1\n\t" + "pxor %%mm3, %%mm0\n" + "pxor %%mm1, %%mm2\n" + "psubw %%mm3, %%mm0\n" + "psubw %%mm1, %%mm2\n" + "paddw %%mm0, %%mm2\n" + "paddw %%mm2, %%mm6\n" + + "add %2, %0\n" + "subl $2, %%ecx\n" + " jnz 1b\n" + + "movq %%mm6, %%mm0\n" + "punpcklwd %%mm7, %%mm0\n" + "punpckhwd %%mm7, %%mm6\n" + "paddd %%mm0, %%mm6\n" + + "movq %%mm6, %%mm0\n" + "psrlq $32, %%mm6\n" + "paddd %%mm6, %%mm0\n" + "movd %%mm0, %1\n" + : "+r" (pix1), "=r" (tmp) + : "r" ((x86_reg) line_size), "g" (h - 2) + : "%ecx"); + + return tmp + hf_noise8_mmx(pix + 8, line_size, h); } -static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { +static int nsse16_mmx(void *p, uint8_t *pix1, uint8_t *pix2, + int line_size, int h) +{ MpegEncContext *c = p; int score1, score2; - if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h); - else score1 = sse16_mmx(c, pix1, pix2, line_size, h); - score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h); - - if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight; - else return score1 + FFABS(score2)*8; + if (c) + score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h); + else + score1 = sse16_mmx(c, pix1, pix2, line_size, h); + score2 = hf_noise16_mmx(pix1, line_size, h) - + hf_noise16_mmx(pix2, line_size, h); + + if (c) + return score1 + FFABS(score2) * c->avctx->nsse_weight; + else + return score1 + FFABS(score2) * 8; } -static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { +static int nsse8_mmx(void *p, uint8_t *pix1, uint8_t *pix2, + int line_size, int h) +{ MpegEncContext *c = p; - int score1= sse8_mmx(c, pix1, pix2, line_size, h); - int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h); - - if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight; - else return score1 + FFABS(score2)*8; + int score1 = sse8_mmx(c, pix1, pix2, line_size, h); + int score2 = hf_noise8_mmx(pix1, line_size, h) - + hf_noise8_mmx(pix2, line_size, h); + + if (c) + return score1 + FFABS(score2) * c->avctx->nsse_weight; + else + return score1 + FFABS(score2) * 8; } -static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) { +static int vsad_intra16_mmx(void *v, uint8_t *pix, uint8_t *dummy, + int line_size, int h) +{ int tmp; - assert( (((int)pix) & 7) == 0); - assert((line_size &7) ==0); - -#define SUM(in0, in1, out0, out1) \ - "movq (%0), %%mm2\n"\ - "movq 8(%0), %%mm3\n"\ - "add %2,%0\n"\ - "movq %%mm2, " #out0 "\n"\ - "movq %%mm3, " #out1 "\n"\ - "psubusb " #in0 ", %%mm2\n"\ - "psubusb " #in1 ", %%mm3\n"\ - "psubusb " #out0 ", " #in0 "\n"\ - "psubusb " #out1 ", " #in1 "\n"\ - "por %%mm2, " #in0 "\n"\ - "por %%mm3, " #in1 "\n"\ - "movq " #in0 ", %%mm2\n"\ - "movq " #in1 ", %%mm3\n"\ - "punpcklbw %%mm7, " #in0 "\n"\ - "punpcklbw %%mm7, " #in1 "\n"\ - "punpckhbw %%mm7, %%mm2\n"\ - "punpckhbw %%mm7, %%mm3\n"\ - "paddw " #in1 ", " #in0 "\n"\ - "paddw %%mm3, %%mm2\n"\ - "paddw %%mm2, " #in0 "\n"\ - "paddw " #in0 ", %%mm6\n" - - - __asm__ volatile ( - "movl %3,%%ecx\n" - "pxor %%mm6,%%mm6\n" - "pxor %%mm7,%%mm7\n" - "movq (%0),%%mm0\n" - "movq 8(%0),%%mm1\n" - "add %2,%0\n" - "jmp 2f\n" - "1:\n" - - SUM(%%mm4, %%mm5, %%mm0, %%mm1) - "2:\n" - SUM(%%mm0, %%mm1, %%mm4, %%mm5) - - "subl $2, %%ecx\n" - "jnz 1b\n" - - "movq %%mm6,%%mm0\n" - "psrlq $32, %%mm6\n" - "paddw %%mm6,%%mm0\n" - "movq %%mm0,%%mm6\n" - "psrlq $16, %%mm0\n" - "paddw %%mm6,%%mm0\n" - "movd %%mm0,%1\n" - : "+r" (pix), "=r"(tmp) - : "r" ((x86_reg)line_size) , "m" (h) - : "%ecx"); + assert((((int) pix) & 7) == 0); + assert((line_size & 7) == 0); + +#define SUM(in0, in1, out0, out1) \ + "movq (%0), %%mm2\n" \ + "movq 8(%0), %%mm3\n" \ + "add %2,%0\n" \ + "movq %%mm2, " #out0 "\n" \ + "movq %%mm3, " #out1 "\n" \ + "psubusb " #in0 ", %%mm2\n" \ + "psubusb " #in1 ", %%mm3\n" \ + "psubusb " #out0 ", " #in0 "\n" \ + "psubusb " #out1 ", " #in1 "\n" \ + "por %%mm2, " #in0 "\n" \ + "por %%mm3, " #in1 "\n" \ + "movq " #in0 ", %%mm2\n" \ + "movq " #in1 ", %%mm3\n" \ + "punpcklbw %%mm7, " #in0 "\n" \ + "punpcklbw %%mm7, " #in1 "\n" \ + "punpckhbw %%mm7, %%mm2\n" \ + "punpckhbw %%mm7, %%mm3\n" \ + "paddw " #in1 ", " #in0 "\n" \ + "paddw %%mm3, %%mm2\n" \ + "paddw %%mm2, " #in0 "\n" \ + "paddw " #in0 ", %%mm6\n" + + + __asm__ volatile ( + "movl %3, %%ecx\n" + "pxor %%mm6, %%mm6\n" + "pxor %%mm7, %%mm7\n" + "movq (%0), %%mm0\n" + "movq 8(%0), %%mm1\n" + "add %2, %0\n" + "jmp 2f\n" + "1:\n" + + SUM(%%mm4, %%mm5, %%mm0, %%mm1) + "2:\n" + SUM(%%mm0, %%mm1, %%mm4, %%mm5) + + "subl $2, %%ecx\n" + "jnz 1b\n" + + "movq %%mm6, %%mm0\n" + "psrlq $32, %%mm6\n" + "paddw %%mm6, %%mm0\n" + "movq %%mm0, %%mm6\n" + "psrlq $16, %%mm0\n" + "paddw %%mm6, %%mm0\n" + "movd %%mm0, %1\n" + : "+r" (pix), "=r" (tmp) + : "r" ((x86_reg) line_size), "m" (h) + : "%ecx"); + return tmp & 0xFFFF; } #undef SUM @@ -487,117 +516,121 @@ static int vsad_intra16_mmxext(void *v, uint8_t *pix, uint8_t *dummy, { int tmp; - assert( (((int)pix) & 7) == 0); - assert((line_size &7) ==0); - -#define SUM(in0, in1, out0, out1) \ - "movq (%0), " #out0 "\n"\ - "movq 8(%0), " #out1 "\n"\ - "add %2,%0\n"\ - "psadbw " #out0 ", " #in0 "\n"\ - "psadbw " #out1 ", " #in1 "\n"\ - "paddw " #in1 ", " #in0 "\n"\ - "paddw " #in0 ", %%mm6\n" - - __asm__ volatile ( - "movl %3,%%ecx\n" - "pxor %%mm6,%%mm6\n" - "pxor %%mm7,%%mm7\n" - "movq (%0),%%mm0\n" - "movq 8(%0),%%mm1\n" - "add %2,%0\n" - "jmp 2f\n" - "1:\n" - - SUM(%%mm4, %%mm5, %%mm0, %%mm1) - "2:\n" - SUM(%%mm0, %%mm1, %%mm4, %%mm5) - - "subl $2, %%ecx\n" - "jnz 1b\n" - - "movd %%mm6,%1\n" - : "+r" (pix), "=r"(tmp) - : "r" ((x86_reg)line_size) , "m" (h) - : "%ecx"); + assert((((int) pix) & 7) == 0); + assert((line_size & 7) == 0); + +#define SUM(in0, in1, out0, out1) \ + "movq (%0), " #out0 "\n" \ + "movq 8(%0), " #out1 "\n" \ + "add %2, %0\n" \ + "psadbw " #out0 ", " #in0 "\n" \ + "psadbw " #out1 ", " #in1 "\n" \ + "paddw " #in1 ", " #in0 "\n" \ + "paddw " #in0 ", %%mm6\n" + + __asm__ volatile ( + "movl %3, %%ecx\n" + "pxor %%mm6, %%mm6\n" + "pxor %%mm7, %%mm7\n" + "movq (%0), %%mm0\n" + "movq 8(%0), %%mm1\n" + "add %2, %0\n" + "jmp 2f\n" + "1:\n" + + SUM(%%mm4, %%mm5, %%mm0, %%mm1) + "2:\n" + SUM(%%mm0, %%mm1, %%mm4, %%mm5) + + "subl $2, %%ecx\n" + "jnz 1b\n" + + "movd %%mm6, %1\n" + : "+r" (pix), "=r" (tmp) + : "r" ((x86_reg) line_size), "m" (h) + : "%ecx"); + return tmp; } #undef SUM -static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { +static int vsad16_mmx(void *v, uint8_t *pix1, uint8_t *pix2, + int line_size, int h) +{ int tmp; - assert( (((int)pix1) & 7) == 0); - assert( (((int)pix2) & 7) == 0); - assert((line_size &7) ==0); - -#define SUM(in0, in1, out0, out1) \ - "movq (%0),%%mm2\n"\ - "movq (%1)," #out0 "\n"\ - "movq 8(%0),%%mm3\n"\ - "movq 8(%1)," #out1 "\n"\ - "add %3,%0\n"\ - "add %3,%1\n"\ - "psubb " #out0 ", %%mm2\n"\ - "psubb " #out1 ", %%mm3\n"\ - "pxor %%mm7, %%mm2\n"\ - "pxor %%mm7, %%mm3\n"\ - "movq %%mm2, " #out0 "\n"\ - "movq %%mm3, " #out1 "\n"\ - "psubusb " #in0 ", %%mm2\n"\ - "psubusb " #in1 ", %%mm3\n"\ - "psubusb " #out0 ", " #in0 "\n"\ - "psubusb " #out1 ", " #in1 "\n"\ - "por %%mm2, " #in0 "\n"\ - "por %%mm3, " #in1 "\n"\ - "movq " #in0 ", %%mm2\n"\ - "movq " #in1 ", %%mm3\n"\ - "punpcklbw %%mm7, " #in0 "\n"\ - "punpcklbw %%mm7, " #in1 "\n"\ - "punpckhbw %%mm7, %%mm2\n"\ - "punpckhbw %%mm7, %%mm3\n"\ - "paddw " #in1 ", " #in0 "\n"\ - "paddw %%mm3, %%mm2\n"\ - "paddw %%mm2, " #in0 "\n"\ - "paddw " #in0 ", %%mm6\n" - - - __asm__ volatile ( - "movl %4,%%ecx\n" - "pxor %%mm6,%%mm6\n" - "pcmpeqw %%mm7,%%mm7\n" - "psllw $15, %%mm7\n" - "packsswb %%mm7, %%mm7\n" - "movq (%0),%%mm0\n" - "movq (%1),%%mm2\n" - "movq 8(%0),%%mm1\n" - "movq 8(%1),%%mm3\n" - "add %3,%0\n" - "add %3,%1\n" - "psubb %%mm2, %%mm0\n" - "psubb %%mm3, %%mm1\n" - "pxor %%mm7, %%mm0\n" - "pxor %%mm7, %%mm1\n" - "jmp 2f\n" - "1:\n" - - SUM(%%mm4, %%mm5, %%mm0, %%mm1) - "2:\n" - SUM(%%mm0, %%mm1, %%mm4, %%mm5) - - "subl $2, %%ecx\n" - "jnz 1b\n" - - "movq %%mm6,%%mm0\n" - "psrlq $32, %%mm6\n" - "paddw %%mm6,%%mm0\n" - "movq %%mm0,%%mm6\n" - "psrlq $16, %%mm0\n" - "paddw %%mm6,%%mm0\n" - "movd %%mm0,%2\n" - : "+r" (pix1), "+r" (pix2), "=r"(tmp) - : "r" ((x86_reg)line_size) , "m" (h) - : "%ecx"); + assert((((int) pix1) & 7) == 0); + assert((((int) pix2) & 7) == 0); + assert((line_size & 7) == 0); + +#define SUM(in0, in1, out0, out1) \ + "movq (%0), %%mm2\n" \ + "movq (%1), " #out0 "\n" \ + "movq 8(%0), %%mm3\n" \ + "movq 8(%1), " #out1 "\n" \ + "add %3, %0\n" \ + "add %3, %1\n" \ + "psubb " #out0 ", %%mm2\n" \ + "psubb " #out1 ", %%mm3\n" \ + "pxor %%mm7, %%mm2\n" \ + "pxor %%mm7, %%mm3\n" \ + "movq %%mm2, " #out0 "\n" \ + "movq %%mm3, " #out1 "\n" \ + "psubusb " #in0 ", %%mm2\n" \ + "psubusb " #in1 ", %%mm3\n" \ + "psubusb " #out0 ", " #in0 "\n" \ + "psubusb " #out1 ", " #in1 "\n" \ + "por %%mm2, " #in0 "\n" \ + "por %%mm3, " #in1 "\n" \ + "movq " #in0 ", %%mm2\n" \ + "movq " #in1 ", %%mm3\n" \ + "punpcklbw %%mm7, " #in0 "\n" \ + "punpcklbw %%mm7, " #in1 "\n" \ + "punpckhbw %%mm7, %%mm2\n" \ + "punpckhbw %%mm7, %%mm3\n" \ + "paddw " #in1 ", " #in0 "\n" \ + "paddw %%mm3, %%mm2\n" \ + "paddw %%mm2, " #in0 "\n" \ + "paddw " #in0 ", %%mm6\n" + + + __asm__ volatile ( + "movl %4, %%ecx\n" + "pxor %%mm6, %%mm6\n" + "pcmpeqw %%mm7, %%mm7\n" + "psllw $15, %%mm7\n" + "packsswb %%mm7, %%mm7\n" + "movq (%0), %%mm0\n" + "movq (%1), %%mm2\n" + "movq 8(%0), %%mm1\n" + "movq 8(%1), %%mm3\n" + "add %3, %0\n" + "add %3, %1\n" + "psubb %%mm2, %%mm0\n" + "psubb %%mm3, %%mm1\n" + "pxor %%mm7, %%mm0\n" + "pxor %%mm7, %%mm1\n" + "jmp 2f\n" + "1:\n" + + SUM(%%mm4, %%mm5, %%mm0, %%mm1) + "2:\n" + SUM(%%mm0, %%mm1, %%mm4, %%mm5) + + "subl $2, %%ecx\n" + "jnz 1b\n" + + "movq %%mm6, %%mm0\n" + "psrlq $32, %%mm6\n" + "paddw %%mm6, %%mm0\n" + "movq %%mm0, %%mm6\n" + "psrlq $16, %%mm0\n" + "paddw %%mm6, %%mm0\n" + "movd %%mm0, %2\n" + : "+r" (pix1), "+r" (pix2), "=r" (tmp) + : "r" ((x86_reg) line_size), "m" (h) + : "%ecx"); + return tmp & 0x7FFF; } #undef SUM @@ -607,63 +640,66 @@ static int vsad16_mmxext(void *v, uint8_t *pix1, uint8_t *pix2, { int tmp; - assert( (((int)pix1) & 7) == 0); - assert( (((int)pix2) & 7) == 0); - assert((line_size &7) ==0); - -#define SUM(in0, in1, out0, out1) \ - "movq (%0)," #out0 "\n"\ - "movq (%1),%%mm2\n"\ - "movq 8(%0)," #out1 "\n"\ - "movq 8(%1),%%mm3\n"\ - "add %3,%0\n"\ - "add %3,%1\n"\ - "psubb %%mm2, " #out0 "\n"\ - "psubb %%mm3, " #out1 "\n"\ - "pxor %%mm7, " #out0 "\n"\ - "pxor %%mm7, " #out1 "\n"\ - "psadbw " #out0 ", " #in0 "\n"\ - "psadbw " #out1 ", " #in1 "\n"\ - "paddw " #in1 ", " #in0 "\n"\ - "paddw " #in0 ", %%mm6\n" - - __asm__ volatile ( - "movl %4,%%ecx\n" - "pxor %%mm6,%%mm6\n" - "pcmpeqw %%mm7,%%mm7\n" - "psllw $15, %%mm7\n" - "packsswb %%mm7, %%mm7\n" - "movq (%0),%%mm0\n" - "movq (%1),%%mm2\n" - "movq 8(%0),%%mm1\n" - "movq 8(%1),%%mm3\n" - "add %3,%0\n" - "add %3,%1\n" - "psubb %%mm2, %%mm0\n" - "psubb %%mm3, %%mm1\n" - "pxor %%mm7, %%mm0\n" - "pxor %%mm7, %%mm1\n" - "jmp 2f\n" - "1:\n" - - SUM(%%mm4, %%mm5, %%mm0, %%mm1) - "2:\n" - SUM(%%mm0, %%mm1, %%mm4, %%mm5) - - "subl $2, %%ecx\n" - "jnz 1b\n" - - "movd %%mm6,%2\n" - : "+r" (pix1), "+r" (pix2), "=r"(tmp) - : "r" ((x86_reg)line_size) , "m" (h) - : "%ecx"); + assert((((int) pix1) & 7) == 0); + assert((((int) pix2) & 7) == 0); + assert((line_size & 7) == 0); + +#define SUM(in0, in1, out0, out1) \ + "movq (%0), " #out0 "\n" \ + "movq (%1), %%mm2\n" \ + "movq 8(%0), " #out1 "\n" \ + "movq 8(%1), %%mm3\n" \ + "add %3, %0\n" \ + "add %3, %1\n" \ + "psubb %%mm2, " #out0 "\n" \ + "psubb %%mm3, " #out1 "\n" \ + "pxor %%mm7, " #out0 "\n" \ + "pxor %%mm7, " #out1 "\n" \ + "psadbw " #out0 ", " #in0 "\n" \ + "psadbw " #out1 ", " #in1 "\n" \ + "paddw " #in1 ", " #in0 "\n" \ + "paddw " #in0 ", %%mm6\n " + + __asm__ volatile ( + "movl %4, %%ecx\n" + "pxor %%mm6, %%mm6\n" + "pcmpeqw %%mm7, %%mm7\n" + "psllw $15, %%mm7\n" + "packsswb %%mm7, %%mm7\n" + "movq (%0), %%mm0\n" + "movq (%1), %%mm2\n" + "movq 8(%0), %%mm1\n" + "movq 8(%1), %%mm3\n" + "add %3, %0\n" + "add %3, %1\n" + "psubb %%mm2, %%mm0\n" + "psubb %%mm3, %%mm1\n" + "pxor %%mm7, %%mm0\n" + "pxor %%mm7, %%mm1\n" + "jmp 2f\n" + "1:\n" + + SUM(%%mm4, %%mm5, %%mm0, %%mm1) + "2:\n" + SUM(%%mm0, %%mm1, %%mm4, %%mm5) + + "subl $2, %%ecx\n" + "jnz 1b\n" + + "movd %%mm6, %2\n" + : "+r" (pix1), "+r" (pix2), "=r" (tmp) + : "r" ((x86_reg) line_size), "m" (h) + : "%ecx"); + return tmp; } #undef SUM -static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ - x86_reg i=0; - __asm__ volatile( +static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w) +{ + x86_reg i = 0; + + __asm__ volatile ( "1: \n\t" "movq (%2, %0), %%mm0 \n\t" "movq (%1, %0), %%mm1 \n\t" @@ -677,20 +713,20 @@ static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ "cmp %4, %0 \n\t" " jb 1b \n\t" : "+r" (i) - : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w-15) - ); - for(; iget_pixels = ff_get_pixels_mmx; c->diff_pixels = ff_diff_pixels_mmx; - c->pix_sum = ff_pix_sum16_mmx; - - c->pix_norm1 = ff_pix_norm1_mmx; + c->pix_sum = ff_pix_sum16_mmx; + c->pix_norm1 = ff_pix_norm1_mmx; } + if (EXTERNAL_SSE2(cpu_flags)) if (bit_depth <= 8) c->get_pixels = ff_get_pixels_sse2; @@ -970,20 +1013,20 @@ av_cold void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx) (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX)) c->fdct = ff_fdct_mmx; - c->diff_bytes= diff_bytes_mmx; - c->sum_abs_dctelem= sum_abs_dctelem_mmx; + c->diff_bytes = diff_bytes_mmx; + c->sum_abs_dctelem = sum_abs_dctelem_mmx; - c->sse[0] = sse16_mmx; - c->sse[1] = sse8_mmx; - c->vsad[4]= vsad_intra16_mmx; + c->sse[0] = sse16_mmx; + c->sse[1] = sse8_mmx; + c->vsad[4] = vsad_intra16_mmx; c->nsse[0] = nsse16_mmx; c->nsse[1] = nsse8_mmx; - if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ - c->vsad[0] = vsad16_mmx; - c->try_8x8basis= try_8x8basis_mmx; + if (!(avctx->flags & CODEC_FLAG_BITEXACT)) { + c->vsad[0] = vsad16_mmx; + c->try_8x8basis = try_8x8basis_mmx; } - c->add_8x8basis= add_8x8basis_mmx; + c->add_8x8basis = add_8x8basis_mmx; c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx; } @@ -1003,7 +1046,7 @@ av_cold void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx) c->sum_abs_dctelem = sum_abs_dctelem_mmxext; c->vsad[4] = vsad_intra16_mmxext; - if (!(avctx->flags & CODEC_FLAG_BITEXACT)){ + if (!(avctx->flags & CODEC_FLAG_BITEXACT)) { c->vsad[0] = vsad16_mmxext; } @@ -1015,7 +1058,7 @@ av_cold void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx) (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX)) c->fdct = ff_fdct_sse2; - c->sum_abs_dctelem= sum_abs_dctelem_sse2; + c->sum_abs_dctelem = sum_abs_dctelem_sse2; } #if HAVE_SSSE3_INLINE