"vmulps (%5), %%ymm0 , %%ymm0 \n\t"
#endif
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmovups (%2,%0,4), %%ymm5 \n\t" // 4 complex values from x
- ".align 2 \n\t"
+ ".p2align 1 \n\t"
"vmovups 32(%2,%0,4), %%ymm7 \n\t" // 4 complex values from x
"vmovups 64(%2,%0,4), %%ymm9 \n\t" // 4 complex values from x
"vmovups 96(%2,%0,4), %%ymm11 \n\t" // 4 complex values from x
"vaddps %%ymm10, %%ymm11, %%ymm11 \n\t"
"vmovups %%ymm5 , (%3,%0,4) \n\t"
- ".align 2 \n\t"
+ ".p2align 1 \n\t"
"vmovups %%ymm7 , 32(%3,%0,4) \n\t"
"vmovups %%ymm9 , 64(%3,%0,4) \n\t"
"vmovups %%ymm11, 96(%3,%0,4) \n\t"
"vxorps %%ymm6, %%ymm6, %%ymm6 \n\t"
"vxorps %%ymm7, %%ymm7, %%ymm7 \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmovups (%2,%0,4), %%ymm8 \n\t" // 2 * x
"vmovups 32(%2,%0,4), %%ymm9 \n\t" // 2 * x
"subq $16, %1 \n\t"
"jz 2f \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmulpd %%ymm4, %%ymm0, %%ymm4 \n\t"
"vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t"
"vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmovups (%2,%0,8), %%ymm12 \n\t" // 2 * x
"vmovups 32(%2,%0,8), %%ymm13 \n\t" // 2 * x
"subq $8, %1 \n\t"
"jz 2f \n\t"
- ".align 8 \n\t"
+ ".p2align 3 \n\t"
"1: \n\t"
"vmulpd %%xmm4, %%xmm0, %%xmm4 \n\t"
"subq $1 , %0 \n\t"
"jz 2f \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"prefetcht0 640(%1) \n\t"
"cmpq $0, %0 \n\t"
"je 2f \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmovups %%xmm0 ,-128(%1) \n\t"
"vbroadcastsd 16(%8), %%ymm6 \n\t" // temp1[1]
"vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1]
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y
"vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1]
"xorq %0,%0 \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y
"subq $32, %1 \n\t"
"jz 2f \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmulps %%ymm4, %%ymm0, %%ymm4 \n\t"
"vxorps %%ymm6, %%ymm6, %%ymm6 \n\t"
"vxorps %%ymm7, %%ymm7, %%ymm7 \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmovups (%2,%0,4), %%ymm12 \n\t" // 2 * x
"vmovups 32(%2,%0,4), %%ymm13 \n\t" // 2 * x
"je 4f \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vxorps %%ymm4 , %%ymm4 , %%ymm4 \n\t"
"vxorps %%ymm5 , %%ymm5 , %%ymm5 \n\t"
"je 4f \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vxorps %%ymm4 , %%ymm4 , %%ymm4 \n\t"
"vxorps %%ymm5 , %%ymm5 , %%ymm5 \n\t"
"je 4f \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"prefetcht0 384(%2,%0,4) \n\t"
"vmovups (%2,%0,4), %%ymm12 \n\t" // 8 * x
"subq $16, %1 \n\t"
"jz 2f \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmulps %%xmm4, %%xmm0, %%xmm4 \n\t"
"vbroadcastss 8(%8), %%xmm6 \n\t" // temp1[1]
"vbroadcastss 12(%8), %%xmm7 \n\t" // temp1[1]
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmovups (%3,%0,4), %%xmm9 \n\t" // 2 * y
"vbroadcastss 8(%8), %%ymm6 \n\t" // temp1[1]
"vbroadcastss 12(%8), %%ymm7 \n\t" // temp1[1]
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmovups (%3,%0,4), %%ymm9 \n\t" // 2 * y
"vbroadcastss 12(%8), %%ymm7 \n\t" // temp1[1]
"xorq %0,%0 \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmovups (%3,%0,4), %%ymm9 \n\t" // 2 * y
"vmulpd (%5), %%ymm0 , %%ymm0 \n\t"
#endif
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmovups (%2,%0,8), %%ymm5 \n\t" // 4 complex values from x
- ".align 2 \n\t"
+ ".p2align 1 \n\t"
"vmovups 32(%2,%0,8), %%ymm7 \n\t" // 4 complex values from x
"vmovups 64(%2,%0,8), %%ymm9 \n\t" // 4 complex values from x
"vmovups 96(%2,%0,8), %%ymm11 \n\t" // 4 complex values from x
"vaddpd %%ymm10, %%ymm11, %%ymm11 \n\t"
"vmovups %%ymm5 , (%3,%0,8) \n\t"
- ".align 2 \n\t"
+ ".p2align 1 \n\t"
"vmovups %%ymm7 , 32(%3,%0,8) \n\t"
"vmovups %%ymm9 , 64(%3,%0,8) \n\t"
"vmovups %%ymm11, 96(%3,%0,8) \n\t"
"vmulpd (%5), %%ymm0 , %%ymm0 \n\t"
#endif
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"prefetcht0 512(%2,%0,8) \n\t"
"prefetcht0 576(%2,%0,8) \n\t"
"vmovups (%2,%0,8), %%ymm5 \n\t" // 4 complex values from x
- ".align 2 \n\t"
+ ".p2align 1 \n\t"
"vmovups 32(%2,%0,8), %%ymm7 \n\t" // 4 complex values from x
"vmovups 64(%2,%0,8), %%ymm9 \n\t" // 4 complex values from x
"vmovups 96(%2,%0,8), %%ymm11 \n\t" // 4 complex values from x
"vaddpd %%ymm10, %%ymm11, %%ymm11 \n\t"
"vmovups %%ymm5 , (%3,%0,8) \n\t"
- ".align 2 \n\t"
+ ".p2align 1 \n\t"
"vmovups %%ymm7 , 32(%3,%0,8) \n\t"
"vmovups %%ymm9 , 64(%3,%0,8) \n\t"
"vmovups %%ymm11, 96(%3,%0,8) \n\t"
"vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t"
"vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x
"vmovups 32(%2,%0,8), %%ymm9 \n\t" // 2 * x
"vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t"
"vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t"
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
"prefetcht0 512(%2,%0,8) \n\t"
"vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x
"vbroadcastsd 56(%2), %%ymm7 \n\t" // imag part x3
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
//"prefetcht0 256(%4,%0,8) \n\t"
"vbroadcastsd 16(%2), %%ymm2 \n\t" // real part x1
"vbroadcastsd 24(%2), %%ymm3 \n\t" // imag part x1
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
// "prefetcht0 256(%4,%0,8) \n\t"
"vbroadcastsd (%2), %%ymm0 \n\t" // real part x0
"vbroadcastsd 8(%2), %%ymm1 \n\t" // imag part x0
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
// "prefetcht0 256(%4,%0,8) \n\t"
"vbroadcastsd (%4), %%ymm0 \n\t" // alpha_r
"vbroadcastsd (%5), %%ymm1 \n\t" // alpha_i
- ".align 16 \n\t"
+ ".p2align 4 \n\t"
"1: \n\t"
// "prefetcht0 192(%2,%0,8) \n\t"
"vmovups (%2,%0,8), %%ymm8 \n\t" // 2 complex values from src