#undef MMREG_SIZE
#undef PAVGB
-#if HAVE_SSE2
+#if COMPILE_TEMPLATE_SSE2
#define MMREG_SIZE 16
#else
#define MMREG_SIZE 8
#endif
-#if HAVE_AMD3DNOW
+#if COMPILE_TEMPLATE_AMD3DNOW
#define PREFETCH "prefetch"
#define PAVGB "pavgusb"
-#elif HAVE_MMX2
+#elif COMPILE_TEMPLATE_MMX2
#define PREFETCH "prefetchnta"
#define PAVGB "pavgb"
#else
#define PREFETCH " # nop"
#endif
-#if HAVE_AMD3DNOW
+#if COMPILE_TEMPLATE_AMD3DNOW
/* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
#define EMMS "femms"
#else
#define EMMS "emms"
#endif
-#if HAVE_MMX2
+#if COMPILE_TEMPLATE_MMX2
#define MOVNTQ "movntq"
#define SFENCE "sfence"
#else
uint8_t *dest = dst;
const uint8_t *s = src;
const uint8_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end;
#endif
end = s + src_size;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 23;
__asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory");
uint8_t *dest = dst;
const uint8_t *s = src;
const uint8_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end;
#endif
end = s + src_size;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 31;
while (s < mm_end) {
register const uint8_t *end;
const uint8_t *mm_end;
end = s + src_size;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s));
__asm__ volatile("movq %0, %%mm4"::"m"(mask15s));
mm_end = end - 15;
register const uint8_t *end;
const uint8_t *mm_end;
end = s + src_size;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s));
__asm__ volatile("movq %0, %%mm7"::"m"(mask15rg));
__asm__ volatile("movq %0, %%mm6"::"m"(mask15b));
{
const uint8_t *s = src;
const uint8_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
mm_end = end - 15;
#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
__asm__ volatile(
{
const uint8_t *s = src;
const uint8_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile(
"movq %0, %%mm7 \n\t"
{
const uint8_t *s = src;
const uint8_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
mm_end = end - 15;
#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
__asm__ volatile(
{
const uint8_t *s = src;
const uint8_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile(
"movq %0, %%mm7 \n\t"
{
const uint8_t *s = src;
const uint8_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile(
"movq %0, %%mm7 \n\t"
{
const uint8_t *s = src;
const uint8_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile(
"movq %0, %%mm7 \n\t"
{
const uint8_t *s = src;
const uint8_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile(
"movq %0, %%mm7 \n\t"
{
const uint8_t *s = src;
const uint8_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile(
"movq %0, %%mm7 \n\t"
static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
{
const uint16_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint16_t *mm_end;
#endif
uint8_t *d = dst;
const uint16_t *s = (const uint16_t*)src;
end = s + src_size/2;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 7;
while (s < mm_end) {
static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
{
const uint16_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint16_t *mm_end;
#endif
uint8_t *d = (uint8_t *)dst;
const uint16_t *s = (const uint16_t *)src;
end = s + src_size/2;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 7;
while (s < mm_end) {
static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size)
{
const uint16_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint16_t *mm_end;
#endif
uint8_t *d = dst;
const uint16_t *s = (const uint16_t *)src;
end = s + src_size/2;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
__asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
__asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size)
{
const uint16_t *end;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
const uint16_t *mm_end;
#endif
uint8_t *d = dst;
const uint16_t *s = (const uint16_t*)src;
end = s + src_size/2;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
__asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
__asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
x86_reg idx = 15 - src_size;
const uint8_t *s = src-idx;
uint8_t *d = dst-idx;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(
"test %0, %0 \n\t"
"jns 2f \n\t"
PREFETCH" 32(%1, %0) \n\t"
"movq (%1, %0), %%mm0 \n\t"
"movq 8(%1, %0), %%mm1 \n\t"
-# if HAVE_MMX2
+# if COMPILE_TEMPLATE_MMX2
"pshufw $177, %%mm0, %%mm3 \n\t"
"pshufw $177, %%mm1, %%mm5 \n\t"
"pand %%mm7, %%mm0 \n\t"
static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
{
unsigned i;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
x86_reg mmx_size= 23 - src_size;
__asm__ volatile (
"test %%"REG_a", %%"REG_a" \n\t"
long y;
const x86_reg chromWidth= width>>1;
for (y=0; y<height; y++) {
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
//FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
__asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t"
ysrc += lumStride;
dst += dstStride;
}
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__(EMMS" \n\t"
SFENCE" \n\t"
:::"memory");
long y;
const x86_reg chromWidth= width>>1;
for (y=0; y<height; y++) {
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
//FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
__asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t"
ysrc += lumStride;
dst += dstStride;
}
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__(EMMS" \n\t"
SFENCE" \n\t"
:::"memory");
long y;
const x86_reg chromWidth= width>>1;
for (y=0; y<height; y+=2) {
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t"
ydst += lumStride;
src += srcStride;
}
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(EMMS" \n\t"
SFENCE" \n\t"
:::"memory");
dst+= dstStride;
for (y=1; y<srcHeight; y++) {
-#if HAVE_MMX2 || HAVE_AMD3DNOW
+#if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
const x86_reg mmxSize= srcWidth&~15;
__asm__ volatile(
"mov %4, %%"REG_a" \n\t"
}
#endif
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(EMMS" \n\t"
SFENCE" \n\t"
:::"memory");
long y;
const x86_reg chromWidth= width>>1;
for (y=0; y<height; y+=2) {
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t"
ydst += lumStride;
src += srcStride;
}
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(EMMS" \n\t"
SFENCE" \n\t"
:::"memory");
{
long y;
const x86_reg chromWidth= width>>1;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
for (y=0; y<height-2; y+=2) {
long i;
for (i=0; i<2; i++) {
"1: \n\t"
PREFETCH" 64(%0, %%"REG_d") \n\t"
PREFETCH" 64(%1, %%"REG_d") \n\t"
-#if HAVE_MMX2 || HAVE_AMD3DNOW
+#if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
"movq (%0, %%"REG_d"), %%mm0 \n\t"
"movq (%1, %%"REG_d"), %%mm1 \n\t"
"movq 6(%0, %%"REG_d"), %%mm2 \n\t"
"packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
"psraw $7, %%mm0 \n\t"
-#if HAVE_MMX2 || HAVE_AMD3DNOW
+#if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
"movq 12(%0, %%"REG_d"), %%mm4 \n\t"
"movq 12(%1, %%"REG_d"), %%mm1 \n\t"
"movq 18(%0, %%"REG_d"), %%mm2 \n\t"
for (h=0; h < height; h++) {
long w;
-#if HAVE_MMX
-#if HAVE_SSE2
+#if COMPILE_TEMPLATE_MMX
+#if COMPILE_TEMPLATE_SSE2
__asm__(
"xor %%"REG_a", %%"REG_a" \n\t"
"1: \n\t"
src1 += src1Stride;
src2 += src2Stride;
}
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__(
EMMS" \n\t"
SFENCE" \n\t"
x86_reg y;
long x,w,h;
w=width/2; h=height/2;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__ volatile(
PREFETCH" %0 \n\t"
PREFETCH" %1 \n\t"
const uint8_t* s1=src1+srcStride1*(y>>1);
uint8_t* d=dst1+dstStride1*y;
x=0;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
for (;x<w-31;x+=32) {
__asm__ volatile(
PREFETCH" 32%1 \n\t"
const uint8_t* s2=src2+srcStride2*(y>>1);
uint8_t* d=dst2+dstStride2*y;
x=0;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
for (;x<w-31;x+=32) {
__asm__ volatile(
PREFETCH" 32%1 \n\t"
#endif
for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
}
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__(
EMMS" \n\t"
SFENCE" \n\t"
const uint8_t* vp=src3+srcStride3*(y>>2);
uint8_t* d=dst+dstStride*y;
x=0;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
for (;x<w-7;x+=8) {
__asm__ volatile(
PREFETCH" 32(%1, %0) \n\t"
d[8*x+7] = vp[x];
}
}
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__(
EMMS" \n\t"
SFENCE" \n\t"
src += 2*count;
count= - count;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
if(count <= -16) {
count += 15;
__asm__ volatile(
dst1+= count;
src += 4*count;
count= - count;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
if(count <= -8) {
count += 7;
__asm__ volatile(
dst1+= count;
src += 4*count;
count= - count;
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
if(count <= -8) {
count += 7;
__asm__ volatile(
src += srcStride;
ydst+= lumStride;
}
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__(
EMMS" \n\t"
SFENCE" \n\t"
udst+= chromStride;
vdst+= chromStride;
}
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__(
EMMS" \n\t"
SFENCE" \n\t"
src += srcStride;
ydst+= lumStride;
}
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__(
EMMS" \n\t"
SFENCE" \n\t"
udst+= chromStride;
vdst+= chromStride;
}
-#if HAVE_MMX
+#if COMPILE_TEMPLATE_MMX
__asm__(
EMMS" \n\t"
SFENCE" \n\t"