extern const uint8_t ff_reverse[256];
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
// avoid +32 for shift optimization (gcc should do that ...)
static inline int32_t NEG_SSR32( int32_t a, int8_t s){
asm ("sarl %1, %0\n\t"
#endif
/* used to avoid missaligned exceptions on some archs (alpha, ...) */
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
# define unaligned16(a) (*(const uint16_t*)(a))
# define unaligned32(a) (*(const uint32_t*)(a))
# define unaligned64(a) (*(const uint64_t*)(a))
unaligned(32)
unaligned(64)
#undef unaligned
-#endif /* defined(ARCH_X86) || defined(ARCH_X86_64) */
+#endif /* defined(ARCH_X86) */
#ifndef ALT_BITSTREAM_WRITER
static inline void put_bits(PutBitContext *s, int n, unsigned int value)
static inline void put_bits(PutBitContext *s, int n, unsigned int value)
{
# ifdef ALIGNED_BITSTREAM_WRITER
-# if defined(ARCH_X86) || defined(ARCH_X86_64)
+# if defined(ARCH_X86)
asm volatile(
"movl %0, %%ecx \n\t"
"xorl %%eax, %%eax \n\t"
s->index= index;
# endif
# else //ALIGNED_BITSTREAM_WRITER
-# if defined(ARCH_X86) || defined(ARCH_X86_64)
+# if defined(ARCH_X86)
asm volatile(
"movl $7, %%ecx \n\t"
"andl %0, %%ecx \n\t"
name##_bit_count-= 32;\
}\
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
# define SKIP_CACHE(name, gb, num)\
asm(\
"shldl %2, %1, %0 \n\t"\
necessitate to modify mpegvideo.c. The problem comes from the
fact they decided to store the quantized DC (which would lead
to problems if Q could vary !) */
-#if (defined(ARCH_X86) || defined(ARCH_X86_64)) && !defined PIC
+#if (defined(ARCH_X86)) && !defined PIC
asm volatile(
"movl %3, %%eax \n\t"
"shrl $1, %%eax \n\t"
# define LEGACY_REGS "=q"
#endif
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
static always_inline uint16_t bswap_16(uint16_t x)
{
__asm("rorw $8, %0" :
return r.ll;
#endif
}
-#endif /* defined(ARCH_X86) || defined(ARCH_X86_64) */
+#endif /* defined(ARCH_X86) */
#endif /* !HAVE_BYTESWAP_H */
}\
}
-#if defined(ARCH_X86) || defined(ARCH_X86_64) || defined(ARCH_POWERPC)
+#if defined(ARCH_X86) || defined(ARCH_POWERPC)
#if defined(ARCH_X86_64)
static inline uint64_t read_time(void)
{
extern const uint32_t inverse[256];
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
# define FASTDIV(a,b) \
({\
int ret,dmy;\
return ret;
}
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
#define MASK_ABS(mask, level)\
asm volatile(\
"cdq \n\t"\
#define TEMP_STRIDE 8
//#define NUM_BLOCKS_AT_ONCE 16 //not used yet
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
static uint64_t __attribute__((aligned(8))) attribute_used w05= 0x0005000500050005LL;
static uint64_t __attribute__((aligned(8))) attribute_used w04= 0x0004000400040004LL;
static uint64_t __attribute__((aligned(8))) attribute_used w20= 0x0020002000200020LL;
};
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
static inline void prefetchnta(void *p)
{
asm volatile( "prefetchnta (%0)\n\t"
#endif //HAVE_ALTIVEC
#endif //ARCH_POWERPC
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
#if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_MMX
#if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_3DNOW
#endif
-#endif /* defined(ARCH_X86) || defined(ARCH_X86_64) */
+#endif /* defined(ARCH_X86) */
#undef HAVE_MMX
#undef HAVE_MMX2
// difference wouldnt be messureable here but its much better because
// someone might exchange the cpu whithout restarting mplayer ;)
#ifdef RUNTIME_CPUDETECT
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
// ordered per speed fasterst first
if(c->cpuCaps & PP_CPU_CAPS_MMX2)
postProcess_MMX2(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);