#define movupd movups\r
\r
#define KERNEL1(xx) \\r
- vfmaddpd %xmm8,%xmm1,%xmm0,%xmm8 ;\\r
- vmovaps %xmm2, %xmm0 ;\\r
- vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\\r
- vfmaddpd %xmm9,%xmm3,%xmm0,%xmm9 ;\\r
+ vmovups -16 * SIZE(AO, %rax, 4),%xmm0 ;\\r
+ vfmaddpd %xmm8,%xmm0,%xmm1,%xmm8 ;\\r
+ vmovaps %xmm2,%xmm0 ;\\r
+ vmovddup -16 * SIZE(BO, %rax, 4), %xmm1 ;\\r
+ vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm9,%xmm0,%xmm3,%xmm9 ;\\r
+ vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\\r
vfmaddpd %xmm12,%xmm2,%xmm1,%xmm12 ;\\r
- vmovddup -14 * SIZE(BO, %rax, 4), %xmm1 ;\\r
vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\\r
+ vmovddup -14 * SIZE(BO, %rax, 4), %xmm1 ;\\r
vmovddup -13 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\\r
- vfmaddpd %xmm11,%xmm3,%xmm0,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\\r
+ vfmaddpd %xmm10,%xmm0,%xmm1,%xmm10 ;\\r
+ vfmaddpd %xmm11,%xmm0,%xmm3,%xmm11 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\\r
- vmovups -12 * SIZE(AO, %rax, 4), %xmm0 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\\r
+ vmovups -12 * SIZE(AO, %rax, 4), %xmm0 ;\\r
+ vmovups -10 * SIZE(AO, %rax, 4),%xmm2 ;\\r
vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\\r
vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vmovaps %xmm0, %xmm2\r
\r
#define KERNEL2(xx) \\r
- vfmaddpd %xmm8,%xmm1, %xmm0,%xmm8 ;\\r
- vmovaps %xmm2, %xmm0 ;\\r
- vmovups -10 * SIZE(AO, %rax, 4),%xmm2 ;\\r
-/*A*/ vmovups (AO, %rax, 4), %xmm6 ;\\r
+ vmovups -8 * SIZE(AO, %rax, 4),%xmm4 ;\\r
+ vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\\r
+ vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\\r
vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\\r
- vfmaddpd %xmm9,%xmm3, %xmm0,%xmm9 ;\\r
- vmovddup -10 * SIZE(BO, %rax, 4), %xmm1 ;\\r
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
+ vmovddup -10 * SIZE(BO, %rax, 4), %xmm1 ;\\r
vmovddup -9 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm10,%xmm1, %xmm0,%xmm10 ;\\r
+ vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\\r
+ vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\\r
-/**/ vmovddup (BO, %rax, 4), %xmm1 ;\\r
- vfmaddpd %xmm11,%xmm3, %xmm0,%xmm11 ;\\r
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\\r
+ vmovddup -8 * SIZE(BO, %rax, 4), %xmm5 ;\\r
vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vmovaps %xmm4, %xmm2\r
\r
#define KERNEL3(xx) \\r
- vfmaddpd %xmm8,%xmm5, %xmm4, %xmm8 ;\\r
- vmovaps %xmm2, %xmm4 ;\\r
- vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\\r
+ vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\\r
+ vfmaddpd %xmm8, %xmm4, %xmm5, %xmm8 ;\\r
+ vfmaddpd %xmm9, %xmm4, %xmm3,%xmm9 ;\\r
vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\\r
- vfmaddpd %xmm9,%xmm3, %xmm4,%xmm9 ;\\r
- vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\\r
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
+ vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\\r
vmovddup -5 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm10,%xmm5, %xmm4,%xmm10 ;\\r
+ vfmaddpd %xmm10,%xmm4, %xmm5,%xmm10 ;\\r
+ vfmaddpd %xmm11,%xmm4, %xmm3, %xmm11 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm3, %xmm4, %xmm11 ;\\r
- vmovups -4 * SIZE(AO, %rax, 4), %xmm4 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\\r
+ vmovups -4 * SIZE(AO, %rax, 4), %xmm4 ;\\r
+ vmovups -2 * SIZE(AO, %rax, 4),%xmm2 ;\\r
vmovddup -4 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vfmaddpd %xmm15,%xmm2,%xmm3,%xmm15 ;\\r
vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vmovaps %xmm4, %xmm2\r
\r
#define KERNEL4(xx) \\r
- vfmaddpd %xmm8,%xmm5, %xmm4,%xmm8 ;\\r
- vmovaps %xmm2, %xmm4 ;\\r
- vmovups -2 * SIZE(AO, %rax, 4),%xmm2 ;\\r
+ vfmaddpd %xmm8,%xmm4, %xmm5,%xmm8 ;\\r
+ vfmaddpd %xmm9,%xmm4, %xmm3,%xmm9 ;\\r
vfmaddpd %xmm12,%xmm2, %xmm5 ,%xmm12;\\r
-/*A*/ vmovups 8 * SIZE(AO, %rax, 4), %xmm7 ;\\r
- vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vfmaddpd %xmm9,%xmm3, %xmm4,%xmm9 ;\\r
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
+ vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\\r
vmovddup -1 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm10,%xmm5, %xmm4,%xmm10 ;\\r
+ vfmaddpd %xmm10,%xmm4, %xmm5,%xmm10 ;\\r
+ vfmaddpd %xmm11,%xmm4, %xmm3,%xmm11 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
-/**/ vmovddup 8 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vfmaddpd %xmm11,%xmm3, %xmm4,%xmm11 ;\\r
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\\r
+/*A*/ vmovups (AO, %rax, 4), %xmm6 ;\\r
+ vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\\r
vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vmovaps %xmm6, %xmm2\r
+/**/ vmovddup (BO, %rax, 4), %xmm1 ;\\r
\r
#define KERNEL5(xx) \\r
- vfmaddpd %xmm8,%xmm1, %xmm6,%xmm8 ;\\r
- vmovaps %xmm2, %xmm6 ;\\r
- vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\\r
+ vfmaddpd %xmm8,%xmm6, %xmm1,%xmm8 ;\\r
+ vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\\r
vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\\r
- vmovddup 2 * SIZE(BO, %rax, 4), %xmm1 ;\\r
- vfmaddpd %xmm9,%xmm3, %xmm6,%xmm9 ;\\r
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
+ vmovddup 2 * SIZE(BO, %rax, 4), %xmm1 ;\\r
vmovddup 3 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm10,%xmm1, %xmm6,%xmm10 ;\\r
+ vfmaddpd %xmm10,%xmm6, %xmm1,%xmm10 ;\\r
+ vfmaddpd %xmm11,%xmm6, %xmm3,%xmm11 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm3, %xmm6,%xmm11 ;\\r
- vmovups 4 * SIZE(AO, %rax, 4), %xmm6 ;\\r
- vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\\r
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\\r
- vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vmovaps %xmm6, %xmm2\r
+ vmovups 4 * SIZE(AO, %rax, 4), %xmm6 ;\\r
+ vmovups 6 * SIZE(AO, %rax, 4),%xmm2 ;\\r
+ vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\\r
+ vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\\r
\r
#define KERNEL6(xx) \\r
- vfmaddpd %xmm8,%xmm1, %xmm6,%xmm8 ;\\r
- vmovaps %xmm2, %xmm6 ;\\r
- vmovups 6 * SIZE(AO, %rax, 4),%xmm2 ;\\r
+ vfmaddpd %xmm8,%xmm6, %xmm1,%xmm8 ;\\r
+ vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\\r
vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\\r
-/*A*/ vmovups 16 * SIZE(AO, %rax, 4), %xmm0 ;\\r
- vmovddup 6 * SIZE(BO, %rax, 4), %xmm1 ;\\r
- vfmaddpd %xmm9,%xmm3, %xmm6,%xmm9 ;\\r
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
+ vmovddup 6 * SIZE(BO, %rax, 4), %xmm1 ;\\r
vmovddup 7 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm10,%xmm1, %xmm6,%xmm10 ;\\r
+ vfmaddpd %xmm10,%xmm6, %xmm1,%xmm10 ;\\r
+ vfmaddpd %xmm11,%xmm6, %xmm3,%xmm11 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\\r
-/**/ vmovddup 16 * SIZE(BO, %rax, 4), %xmm1 ;\\r
- vfmaddpd %xmm11,%xmm3, %xmm6,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm2,%xmm3,%xmm15 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\\r
+/*A*/ vmovups 8 * SIZE(AO, %rax, 4), %xmm7 ;\\r
+ vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\\r
+/**/ vmovddup 8 * SIZE(BO, %rax, 4), %xmm5 ;\\r
vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vmovaps %xmm7, %xmm2\r
\r
#define KERNEL7(xx) \\r
- vfmaddpd %xmm8,%xmm5, %xmm7,%xmm8 ;\\r
- vmovaps %xmm2, %xmm7 ;\\r
- vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\\r
+ vfmaddpd %xmm8,%xmm7, %xmm5,%xmm8 ;\\r
+ vfmaddpd %xmm9,%xmm7, %xmm3,%xmm9 ;\\r
vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\\r
- vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vfmaddpd %xmm9,%xmm3, %xmm7,%xmm9 ;\\r
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
+ vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\\r
vmovddup 11 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm10,%xmm5, %xmm7,%xmm10 ;\\r
+ vfmaddpd %xmm10,%xmm7, %xmm5,%xmm10 ;\\r
+ vfmaddpd %xmm11,%xmm7, %xmm3,%xmm11 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm3, %xmm7,%xmm11 ;\\r
- vmovups 12 * SIZE(AO, %rax, 4), %xmm7 ;\\r
- vmovddup 12 * SIZE(BO, %rax, 4), %xmm5 ;\\r
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\\r
+ vmovups 12 * SIZE(AO, %rax, 4), %xmm7 ;\\r
+ vmovups 14 * SIZE(AO, %rax, 4),%xmm2 ;\\r
+ vmovddup 12 * SIZE(BO, %rax, 4), %xmm5 ;\\r
vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vmovaps %xmm7, %xmm2\r
\r
#define KERNEL8(xx) \\r
- vfmaddpd %xmm8,%xmm5, %xmm7,%xmm8 ;\\r
- vmovaps %xmm2, %xmm7 ;\\r
- vmovups 14 * SIZE(AO, %rax, 4),%xmm2 ;\\r
-/*A*/ vmovups 24 * SIZE(AO, %rax, 4), %xmm4 ;\\r
+ vfmaddpd %xmm8,%xmm7, %xmm5,%xmm8 ;\\r
+ vfmaddpd %xmm9,%xmm7, %xmm3,%xmm9 ;\\r
vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\\r
- vfmaddpd %xmm9,%xmm3, %xmm7,%xmm9 ;\\r
- vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\\r
vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\\r
+ vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\\r
vmovddup 15 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm10,%xmm5, %xmm7,%xmm10 ;\\r
+ vfmaddpd %xmm10,%xmm7, %xmm5,%xmm10 ;\\r
+ vfmaddpd %xmm11,%xmm7, %xmm3,%xmm11 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
-/**/ vmovddup 24 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vfmaddpd %xmm11,%xmm3, %xmm7,%xmm11 ;\\r
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\\r
+/*A*/ vmovups 16 * SIZE(AO, %rax, 4), %xmm0 ;\\r
+ vmovddup 16 * SIZE(BO, %rax, 4), %xmm1 ;\\r
vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\\r
vmovaps %xmm0, %xmm2 ;\\r
addq $8 * SIZE, %rax ;\\r
\r
#define KERNEL_SUB1(xx) \\r
+ vmovddup -15 * SIZE(BO), %xmm3 ;\\r
+ vmovups -16 * SIZE(AO),%xmm0 ;\\r
vfmaddpd %xmm8, %xmm1, %xmm0,%xmm8 ;\\r
vmovapd %xmm2, %xmm0 ;\\r
vmovups -14 * SIZE(AO),%xmm2 ;\\r
vfmaddpd %xmm8,%xmm1, %xmm0,%xmm8 ;\\r
vmovaps %xmm2, %xmm0 ;\\r
vmovups -10 * SIZE(AO),%xmm2 ;\\r
+ vmovups -8 * SIZE(AO),%xmm4 ;\\r
vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\\r
vfmaddpd %xmm9,%xmm3, %xmm0,%xmm9 ;\\r
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
vmovddup -10 * SIZE(BO), %xmm1 ;\\r
vmovddup -9 * SIZE(BO), %xmm3 ;\\r
+ vmovddup -8 * SIZE(BO), %xmm5 ;\\r
vfmaddpd %xmm10,%xmm1, %xmm0,%xmm10 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\\r
vfmaddpd %xmm11,%xmm3, %xmm0,%xmm11 ;\\r
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\\r
- vmovups (AO), %xmm0 ;\\r
- vmovddup (BO), %xmm1 ;\\r
vmovddup -7 * SIZE(BO), %xmm3 ;\\r
vmovaps %xmm4, %xmm2\r
\r
vfmaddpd %xmm8,%xmm5, %xmm4,%xmm8 ;\\r
vmovaps %xmm2, %xmm4 ;\\r
vmovups -2 * SIZE(AO),%xmm2 ;\\r
+ vmovups (AO), %xmm0 ;\\r
vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\\r
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
vmovddup -2 * SIZE(BO), %xmm5 ;\\r
vfmaddpd %xmm9,%xmm3, %xmm4,%xmm9 ;\\r
vmovddup -1 * SIZE(BO), %xmm3 ;\\r
+ vmovddup (BO), %xmm1 ;\\r
vfmaddpd %xmm10,%xmm5, %xmm4,%xmm10 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
vfmaddpd %xmm11,%xmm3, %xmm4,%xmm11 ;\\r
leaq (B, %rax, 4), BO\r
#endif \r
\r
- vzeroall\r
- prefetcht0 256(CO1)\r
- prefetcht0 320(CO1)\r
- prefetcht0 256(CO2)\r
- prefetcht0 320(CO2)\r
+ vxorpd %xmm8, %xmm8,%xmm8\r
+ vxorpd %xmm9, %xmm9,%xmm9\r
+ vxorpd %xmm10, %xmm10,%xmm10\r
+ vxorpd %xmm11, %xmm11,%xmm11\r
+ vxorpd %xmm12, %xmm12,%xmm12\r
+ vxorpd %xmm13, %xmm13,%xmm13\r
+ vxorpd %xmm14, %xmm14,%xmm14\r
+ vxorpd %xmm15, %xmm15,%xmm15\r
+\r
+ prefetcht0 (CO1)\r
+ prefetcht0 8*SIZE(CO1)\r
+ prefetcht0 (CO1,LDC)\r
+ prefetcht0 8*SIZE(CO1,LDC)\r
+ prefetcht0 (CO2)\r
+ prefetcht0 8*SIZE(CO2)\r
+ prefetcht0 (CO2,LDC)\r
+ prefetcht0 8*(CO2,LDC)\r
vmovups -16 * SIZE(AO), %xmm0\r
vmovddup -16 * SIZE(BO), %xmm1\r
vmovddup -15 * SIZE(BO), %xmm3\r
- vmovups -8 * SIZE(AO), %xmm4\r
- vmovddup -8 * SIZE(BO), %xmm5\r
\r
vmovaps %xmm0, %xmm2\r
\r
\r
.align 16\r
.L12:\r
- prefetcht0 (AO,%rax,4)\r
- prefetcht0 (BO,%rax,4)\r
+ prefetcht0 24*SIZE(AO,%rax,4)\r
+ prefetcht0 32*SIZE(AO,%rax,4)\r
+ prefetcht0 24*SIZE(BO,%rax,4)\r
+ prefetcht0 32*SIZE(BO,%rax,4)\r
KERNEL1(16 * 0)\r
KERNEL2(16 * 0)\r
KERNEL3(16 * 0)\r
KERNEL8(16 * 0)\r
NOBRANCH\r
je .L15\r
+ prefetcht0 24*SIZE(AO,%rax,4)\r
+ prefetcht0 32*SIZE(AO,%rax,4)\r
+ prefetcht0 24*SIZE(BO,%rax,4)\r
+ prefetcht0 32*SIZE(BO,%rax,4)\r
KERNEL1(16 * 0)\r
KERNEL2(16 * 0)\r
KERNEL3(16 * 0)\r
KERNEL8(16 * 0)\r
NOBRANCH\r
je .L15\r
+ prefetcht0 24*SIZE(AO,%rax,4)\r
+ prefetcht0 32*SIZE(AO,%rax,4)\r
+ prefetcht0 24*SIZE(BO,%rax,4)\r
+ prefetcht0 32*SIZE(BO,%rax,4)\r
KERNEL1(16 * 0)\r
KERNEL2(16 * 0)\r
KERNEL3(16 * 0)\r
KERNEL8(16 * 0)\r
NOBRANCH\r
je .L15\r
+ prefetcht0 24*SIZE(AO,%rax,4)\r
+ prefetcht0 32*SIZE(AO,%rax,4)\r
+ prefetcht0 24*SIZE(BO,%rax,4)\r
+ prefetcht0 32*SIZE(BO,%rax,4)\r
KERNEL1(16 * 0)\r
KERNEL2(16 * 0)\r
KERNEL3(16 * 0)\r
KERNEL8(16 * 0)\r
NOBRANCH\r
je .L15\r
+ prefetcht0 24*SIZE(AO,%rax,4)\r
+ prefetcht0 32*SIZE(AO,%rax,4)\r
+ prefetcht0 24*SIZE(BO,%rax,4)\r
+ prefetcht0 32*SIZE(BO,%rax,4)\r
KERNEL1(16 * 0)\r
KERNEL2(16 * 0)\r
KERNEL3(16 * 0)\r
KERNEL8(16 * 0)\r
NOBRANCH\r
je .L15\r
+ prefetcht0 24*SIZE(AO,%rax,4)\r
+ prefetcht0 32*SIZE(AO,%rax,4)\r
+ prefetcht0 24*SIZE(BO,%rax,4)\r
+ prefetcht0 32*SIZE(BO,%rax,4)\r
KERNEL1(16 * 0)\r
KERNEL2(16 * 0)\r
KERNEL3(16 * 0)\r
KERNEL8(16 * 0)\r
NOBRANCH\r
je .L15\r
+ prefetcht0 24*SIZE(AO,%rax,4)\r
+ prefetcht0 32*SIZE(AO,%rax,4)\r
+ prefetcht0 24*SIZE(BO,%rax,4)\r
+ prefetcht0 32*SIZE(BO,%rax,4)\r
KERNEL1(16 * 0)\r
KERNEL2(16 * 0)\r
KERNEL3(16 * 0)\r
KERNEL8(16 * 0)\r
NOBRANCH\r
je .L15\r
+ prefetcht0 24*SIZE(AO,%rax,4)\r
+ prefetcht0 32*SIZE(AO,%rax,4)\r
+ prefetcht0 24*SIZE(BO,%rax,4)\r
+ prefetcht0 32*SIZE(BO,%rax,4)\r
KERNEL1(16 * 0)\r
KERNEL2(16 * 0)\r
KERNEL3(16 * 0)\r
\r
#endif\r
\r
- .align 2\r
vmovups %xmm8, (CO1)\r
vmovups %xmm12, 2 * SIZE(CO1)\r
- .align 2\r
vmovups %xmm9, (CO1, LDC)\r
vmovups %xmm13, 2 * SIZE(CO1, LDC)\r
- .align 2\r
vmovups %xmm10, (CO2)\r
vmovups %xmm14, 2 * SIZE(CO2)\r
- .align 2\r
vmovups %xmm11, (CO2, LDC)\r
vmovups %xmm15, 2 * SIZE(CO2, LDC)\r
\r
vxorps %xmm13, %xmm13,%xmm13\r
vmovups -16 * SIZE(AO), %xmm0\r
vmovups -8 * SIZE(AO), %xmm4\r
- // prefetcht0 256(CO1)\r
- // prefetcht0 320(CO1)\r
- // prefetcht0 256(CO2)\r
- // prefetcht0 320(CO2)\r
- // prefetchnta 24 * SIZE(CO1)\r
- // prefetchnta 32 * SIZE(CO1)\r
- // prefetchw 3 * SIZE(CO1)\r
vmovups %xmm0, %xmm2\r
- // prefetchw 3 * SIZE(CO2)\r
- // prefetchnta -16 * SIZE(BB)\r
- // prefetch -16 * SIZE(BB)\r
subq $-8 * SIZE, BB\r
\r
#ifndef TRMMKERNEL\r