/* or implied, of The University of Texas at Austin. */\r
/*********************************************************************/\r
\r
+/*********************************************************************\r
+* Changelog:\r
+*\r
+* 2013/04/15 Saar\r
+* Prefetch for A and B\r
+* unroll of inner Loop\r
+* using generic versions for ncopy and tcopy\r
+* moved vmovddup ALPHA, %xmm7 down\r
+* define A_PR1 192\r
+* define B_PR1 512\r
+**********************************************************************/\r
+\r
+/*********************************************************************\r
+* 2013/04/12 Saar\r
+* Performance:\r
+* 3584x3584 89 GFLOPS with 8 threads on 4 modules\r
+* 76 GFLOPS with 4 threads on 4 modules\r
+* 53 GFLOPS with 4 threads on 2 modules\r
+* 46 GFLOPS with 2 threads on 2 modules\r
+* 28 GFLOPS with 2 threads on 1 module\r
+* 23,6 GFLOPS with 1 thread on 1 module\r
+*********************************************************************/\r
+\r
#define ASSEMBLER\r
#include "common.h"\r
\r
#define movapd movaps\r
#define movupd movups\r
\r
+#define A_PR1 192\r
+#define B_PR1 512\r
+\r
#define KERNEL1(xx) \\r
- vmovups -16 * SIZE(AO, %rax, 4),%xmm0 ;\\r
- vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\\r
- vmovddup -16 * SIZE(BO, %rax, 4), %xmm1 ;\\r
vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\\r
+ vfmaddpd %xmm8,%xmm6,%xmm7,%xmm8 ;\\r
+ vfmaddpd %xmm12,%xmm2,%xmm7,%xmm12 ;\\r
vmovddup -14 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vfmaddpd %xmm8,%xmm0,%xmm1,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm2,%xmm1,%xmm12 ;\\r
- vmovddup -13 * SIZE(BO, %rax, 4), %xmm7 ;\\r
- vmovups -12 * SIZE(AO, %rax, 4), %xmm4 ;\\r
- vmovups -10 * SIZE(AO, %rax, 4),%xmm6 ;\\r
- vfmaddpd %xmm9,%xmm0,%xmm3,%xmm9 ;\\r
+ vmovddup -13 * SIZE(BO, %rax, 4), %xmm4 ;\\r
+ vfmaddpd %xmm9,%xmm6,%xmm3,%xmm9 ;\\r
vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\\r
- vfmaddpd %xmm10,%xmm0,%xmm5,%xmm10 ;\\r
+ vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\\r
+ vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\\r
+ vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\\r
+ vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm0,%xmm7,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm2, %xmm7,%xmm15 ;\\r
+ vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\\r
\r
#define KERNEL2(xx) \\r
- vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\\r
+ vmovups -12 * SIZE(AO, %rax, 4), %xmm2 ;\\r
+ vmovups -10 * SIZE(AO, %rax, 4),%xmm0 ;\\r
vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm8, %xmm2, %xmm1,%xmm8 ;\\r
+ vfmaddpd %xmm12,%xmm0, %xmm1,%xmm12 ;\\r
vmovddup -10 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vmovddup -9 * SIZE(BO, %rax, 4), %xmm7 ;\\r
- vfmaddpd %xmm8, %xmm4, %xmm1,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm6, %xmm1,%xmm12 ;\\r
- vfmaddpd %xmm9, %xmm4, %xmm3,%xmm9 ;\\r
- vfmaddpd %xmm13,%xmm6, %xmm3,%xmm13 ;\\r
- vfmaddpd %xmm10,%xmm4, %xmm5,%xmm10 ;\\r
- vfmaddpd %xmm14,%xmm6, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm4, %xmm7,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm6, %xmm7,%xmm15 ;\\r
+ vfmaddpd %xmm9, %xmm2, %xmm3,%xmm9 ;\\r
+ vfmaddpd %xmm13,%xmm0, %xmm3,%xmm13 ;\\r
+ vmovddup -9 * SIZE(BO, %rax, 4), %xmm4 ;\\r
+ vfmaddpd %xmm10,%xmm2, %xmm5,%xmm10 ;\\r
+ vfmaddpd %xmm14,%xmm0, %xmm5,%xmm14 ;\\r
+ vfmaddpd %xmm11,%xmm2, %xmm4,%xmm11 ;\\r
+ vfmaddpd %xmm15,%xmm0, %xmm4,%xmm15 ;\\r
\r
#define KERNEL3(xx) \\r
- vmovups -8 * SIZE(AO, %rax, 4),%xmm0 ;\\r
vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\\r
- vmovddup -8 * SIZE(BO, %rax, 4), %xmm1 ;\\r
vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm8, %xmm6, %xmm7, %xmm8 ;\\r
+ vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\\r
vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vmovddup -5 * SIZE(BO, %rax, 4), %xmm7 ;\\r
- vfmaddpd %xmm8, %xmm0, %xmm1, %xmm8 ;\\r
- vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\\r
- vmovups -4 * SIZE(AO, %rax, 4), %xmm4 ;\\r
- vmovups -2 * SIZE(AO, %rax, 4),%xmm6 ;\\r
- vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\\r
+ vmovddup -5 * SIZE(BO, %rax, 4), %xmm4 ;\\r
+ vfmaddpd %xmm9, %xmm6, %xmm3,%xmm9 ;\\r
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
- vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\\r
+ vmovddup (BO, %rax, 4), %xmm7 ;\\r
+ vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm0, %xmm7, %xmm11 ;\\r
- vfmaddpd %xmm15,%xmm2, %xmm7,%xmm15 ;\\r
+ vmovddup -4 * SIZE(BO, %rax, 4), %xmm1 ;\\r
+ vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
+ vmovups (AO, %rax, 4), %xmm6 ;\\r
\r
#define KERNEL4(xx) \\r
- vmovddup -4 * SIZE(BO, %rax, 4), %xmm1 ;\\r
+ vmovups -4 * SIZE(AO, %rax, 4), %xmm2 ;\\r
+ vmovups -2 * SIZE(AO, %rax, 4),%xmm0 ;\\r
vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm8,%xmm2, %xmm1,%xmm8 ;\\r
+ vfmaddpd %xmm12,%xmm0, %xmm1 ,%xmm12;\\r
vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vmovddup -1 * SIZE(BO, %rax, 4), %xmm7 ;\\r
- vfmaddpd %xmm8,%xmm4, %xmm1,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm6, %xmm1 ,%xmm12;\\r
- vfmaddpd %xmm9,%xmm4, %xmm3,%xmm9 ;\\r
- vfmaddpd %xmm13,%xmm6, %xmm3,%xmm13 ;\\r
- vfmaddpd %xmm10,%xmm4, %xmm5,%xmm10 ;\\r
- vfmaddpd %xmm14,%xmm6, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm4, %xmm7,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm6, %xmm7,%xmm15 ;\\r
+ vfmaddpd %xmm9,%xmm2, %xmm3,%xmm9 ;\\r
+ vfmaddpd %xmm13,%xmm0, %xmm3,%xmm13 ;\\r
+ vmovddup -1 * SIZE(BO, %rax, 4), %xmm4 ;\\r
+ vfmaddpd %xmm10,%xmm2, %xmm5,%xmm10 ;\\r
+ vfmaddpd %xmm14,%xmm0, %xmm5,%xmm14 ;\\r
+ vfmaddpd %xmm11,%xmm2, %xmm4,%xmm11 ;\\r
+ vfmaddpd %xmm15,%xmm0, %xmm4,%xmm15 ;\\r
\r
#define KERNEL5(xx) \\r
- vmovups (AO, %rax, 4), %xmm0 ;\\r
vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\\r
- vmovddup (BO, %rax, 4), %xmm1 ;\\r
vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\\r
+ vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\\r
vmovddup 2 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vmovddup 3 * SIZE(BO, %rax, 4), %xmm7 ;\\r
- vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\\r
- vmovups 4 * SIZE(AO, %rax, 4), %xmm4 ;\\r
- vmovups 6 * SIZE(AO, %rax, 4),%xmm6 ;\\r
- vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\\r
+ vmovddup 3 * SIZE(BO, %rax, 4), %xmm4 ;\\r
+ vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\\r
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
- vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\\r
+ vmovddup 8 * SIZE(BO, %rax, 4), %xmm7 ;\\r
+ vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm0, %xmm7,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm2, %xmm7,%xmm15 ;\\r
+ vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\\r
+ vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
+ vmovups 8 * SIZE(AO, %rax, 4), %xmm6 ;\\r
\r
#define KERNEL6(xx) \\r
- vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\\r
+ vmovups 4 * SIZE(AO, %rax, 4), %xmm2 ;\\r
+ vmovups 6 * SIZE(AO, %rax, 4),%xmm0 ;\\r
vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm8,%xmm2, %xmm1,%xmm8 ;\\r
+ vfmaddpd %xmm12,%xmm0, %xmm1,%xmm12 ;\\r
vmovddup 6 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vmovddup 7 * SIZE(BO, %rax, 4), %xmm7 ;\\r
- vfmaddpd %xmm8,%xmm4, %xmm1,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm6, %xmm1,%xmm12 ;\\r
- vfmaddpd %xmm9,%xmm4, %xmm3,%xmm9 ;\\r
- vfmaddpd %xmm13,%xmm6, %xmm3,%xmm13 ;\\r
- vfmaddpd %xmm10,%xmm4, %xmm5,%xmm10 ;\\r
- vfmaddpd %xmm14,%xmm6, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm4, %xmm7,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm6, %xmm7,%xmm15 ;\\r
+ vfmaddpd %xmm9,%xmm2, %xmm3,%xmm9 ;\\r
+ vfmaddpd %xmm13,%xmm0, %xmm3,%xmm13 ;\\r
+ vmovddup 7 * SIZE(BO, %rax, 4), %xmm4 ;\\r
+ vfmaddpd %xmm10,%xmm2, %xmm5,%xmm10 ;\\r
+ vfmaddpd %xmm14,%xmm0, %xmm5,%xmm14 ;\\r
+ vfmaddpd %xmm11,%xmm2, %xmm4,%xmm11 ;\\r
+ vfmaddpd %xmm15,%xmm0, %xmm4,%xmm15 ;\\r
\r
#define KERNEL7(xx) \\r
- vmovups 8 * SIZE(AO, %rax, 4), %xmm0 ;\\r
vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\\r
- vmovddup 8 * SIZE(BO, %rax, 4), %xmm1 ;\\r
vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\\r
+ vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\\r
vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vmovddup 11 * SIZE(BO, %rax, 4), %xmm7 ;\\r
- vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\\r
- vmovups 12 * SIZE(AO, %rax, 4), %xmm4 ;\\r
- vmovups 14 * SIZE(AO, %rax, 4), %xmm6 ;\\r
- vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\\r
+ vmovddup 11 * SIZE(BO, %rax, 4), %xmm4 ;\\r
+ vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\\r
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
- vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\\r
+ vmovddup 16 * SIZE(BO, %rax, 4), %xmm7 ;\\r
+ vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\\r
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm0, %xmm7,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm2, %xmm7,%xmm15 ;\\r
+ vmovddup 12 * SIZE(BO, %rax, 4), %xmm1 ;\\r
+ vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
+ vmovups 16 * SIZE(AO, %rax, 4),%xmm6 ;\\r
\r
#define KERNEL8(xx) \\r
- vmovddup 12 * SIZE(BO, %rax, 4), %xmm1 ;\\r
+ vmovups 12 * SIZE(AO, %rax, 4), %xmm2 ;\\r
+ vmovups 14 * SIZE(AO, %rax, 4), %xmm0 ;\\r
vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm8,%xmm2, %xmm1,%xmm8 ;\\r
+ vfmaddpd %xmm12,%xmm0, %xmm1,%xmm12 ;\\r
vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vmovddup 15 * SIZE(BO, %rax, 4), %xmm7 ;\\r
- vfmaddpd %xmm8,%xmm4, %xmm1,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm6, %xmm1,%xmm12 ;\\r
- vfmaddpd %xmm9,%xmm4, %xmm3,%xmm9 ;\\r
- vfmaddpd %xmm13, %xmm6, %xmm3,%xmm13 ;\\r
- vfmaddpd %xmm10,%xmm4, %xmm5,%xmm10 ;\\r
- vfmaddpd %xmm14,%xmm6, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm4, %xmm7,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm6, %xmm7,%xmm15 ;\\r
- addq $8 * SIZE, %rax ;\\r
+ vfmaddpd %xmm9,%xmm2, %xmm3,%xmm9 ;\\r
+ vfmaddpd %xmm13, %xmm0, %xmm3,%xmm13 ;\\r
+ vmovddup 15 * SIZE(BO, %rax, 4), %xmm4 ;\\r
+ vfmaddpd %xmm10,%xmm2, %xmm5,%xmm10 ;\\r
+ vfmaddpd %xmm14,%xmm0, %xmm5,%xmm14 ;\\r
+ vfmaddpd %xmm11,%xmm2, %xmm4,%xmm11 ;\\r
+ vfmaddpd %xmm15,%xmm0, %xmm4,%xmm15 ;\\r
\r
#define KERNEL_SUB1(xx) \\r
vmovups -16 * SIZE(AO),%xmm0 ;\\r
movq A, AO # aoffset = a\r
\r
movq K, %rax\r
- salq $BASE_SHIFT + 2, %rax\r
+ salq $BASE_SHIFT + 2, %rax # k << 5 # K * 32\r
leaq (B, %rax), BB\r
\r
movq M, I\r
leaq (B, %rax, 4), BO\r
#endif \r
\r
+ \r
+\r
vxorpd %xmm8, %xmm8,%xmm8\r
vxorpd %xmm9, %xmm9,%xmm9\r
vxorpd %xmm10, %xmm10,%xmm10\r
vxorpd %xmm14, %xmm14,%xmm14\r
vxorpd %xmm15, %xmm15,%xmm15\r
\r
- prefetcht0 (CO1)\r
- prefetcht0 (CO1,LDC)\r
- prefetcht0 (CO2)\r
- prefetcht0 (CO2,LDC)\r
+ // prefetchw (CO1)\r
+ // prefetchw (CO1,LDC)\r
+ // prefetchw (CO2)\r
+ // prefetchw (CO2,LDC)\r
\r
#ifndef TRMMKERNEL\r
movq K, %rax\r
leaq (AO, %rax, 4), AO\r
leaq (BO, %rax, 4), BO\r
negq %rax\r
- NOBRANCH\r
je .L15\r
// ALIGN_4\r
\r
- .align 16\r
+ vmovups -16 * SIZE(AO, %rax, 4),%xmm6 \r
+ vmovddup -16 * SIZE(BO, %rax, 4), %xmm7 \r
\r
-#define PR1 16\r
-#define PR2 24\r
+ .align 16\r
\r
.L12:\r
- prefetcht0 PR1*SIZE(AO,%rax,4)\r
- prefetcht0 PR2*SIZE(AO,%rax,4)\r
- prefetcht0 PR1*SIZE(BO,%rax,4)\r
- prefetcht0 PR2*SIZE(BO,%rax,4)\r
- KERNEL1(16 * 0)\r
- KERNEL2(16 * 0)\r
- KERNEL3(16 * 0)\r
- KERNEL4(16 * 0)\r
- KERNEL5(16 * 0)\r
- KERNEL6(16 * 0)\r
- KERNEL7(16 * 0)\r
- KERNEL8(16 * 0)\r
- jl .L12\r
- ALIGN_4\r
+\r
+#ifndef SMP \r
+\r
+ prefetcht0 A_PR1(AO,%rax,4)\r
+ prefetcht0 B_PR1(BO,%rax,4)\r
+ KERNEL1(16 * 0)\r
+ KERNEL2(16 * 0)\r
+ prefetcht0 A_PR1+64(AO,%rax,4)\r
+ prefetcht0 B_PR1+64(BO,%rax,4)\r
+ KERNEL3(16 * 0)\r
+ KERNEL4(16 * 0)\r
+ prefetcht0 A_PR1+128(AO,%rax,4)\r
+ prefetcht0 B_PR1+128(BO,%rax,4)\r
+ KERNEL5(16 * 0)\r
+ KERNEL6(16 * 0)\r
+ prefetcht0 A_PR1+192(AO,%rax,4)\r
+ prefetcht0 B_PR1+192(BO,%rax,4)\r
+ KERNEL7(16 * 0)\r
+ KERNEL8(16 * 0)\r
+\r
+ addq $8 * SIZE, %rax \r
+ je .L15\r
+\r
+ prefetcht0 A_PR1(AO,%rax,4)\r
+ prefetcht0 B_PR1(BO,%rax,4)\r
+ KERNEL1(16 * 0)\r
+ KERNEL2(16 * 0)\r
+ prefetcht0 A_PR1+64(AO,%rax,4)\r
+ prefetcht0 B_PR1+64(BO,%rax,4)\r
+ KERNEL3(16 * 0)\r
+ KERNEL4(16 * 0)\r
+ prefetcht0 A_PR1+128(AO,%rax,4)\r
+ prefetcht0 B_PR1+128(BO,%rax,4)\r
+ KERNEL5(16 * 0)\r
+ KERNEL6(16 * 0)\r
+ prefetcht0 A_PR1+192(AO,%rax,4)\r
+ prefetcht0 B_PR1+192(BO,%rax,4)\r
+ KERNEL7(16 * 0)\r
+ KERNEL8(16 * 0)\r
+\r
+ addq $8 * SIZE, %rax \r
+ je .L15\r
+\r
+\r
+ prefetcht0 A_PR1(AO,%rax,4)\r
+ prefetcht0 B_PR1(BO,%rax,4)\r
+ KERNEL1(16 * 0)\r
+ KERNEL2(16 * 0)\r
+ prefetcht0 A_PR1+64(AO,%rax,4)\r
+ prefetcht0 B_PR1+64(BO,%rax,4)\r
+ KERNEL3(16 * 0)\r
+ KERNEL4(16 * 0)\r
+ prefetcht0 A_PR1+128(AO,%rax,4)\r
+ prefetcht0 B_PR1+128(BO,%rax,4)\r
+ KERNEL5(16 * 0)\r
+ KERNEL6(16 * 0)\r
+ prefetcht0 A_PR1+192(AO,%rax,4)\r
+ prefetcht0 B_PR1+192(BO,%rax,4)\r
+ KERNEL7(16 * 0)\r
+ KERNEL8(16 * 0)\r
+\r
+ addq $8 * SIZE, %rax \r
+ je .L15\r
+\r
+ prefetcht0 A_PR1(AO,%rax,4)\r
+ prefetcht0 B_PR1(BO,%rax,4)\r
+ KERNEL1(16 * 0)\r
+ KERNEL2(16 * 0)\r
+ prefetcht0 A_PR1+64(AO,%rax,4)\r
+ prefetcht0 B_PR1+64(BO,%rax,4)\r
+ KERNEL3(16 * 0)\r
+ KERNEL4(16 * 0)\r
+ prefetcht0 A_PR1+128(AO,%rax,4)\r
+ prefetcht0 B_PR1+128(BO,%rax,4)\r
+ KERNEL5(16 * 0)\r
+ KERNEL6(16 * 0)\r
+ prefetcht0 A_PR1+192(AO,%rax,4)\r
+ prefetcht0 B_PR1+192(BO,%rax,4)\r
+ KERNEL7(16 * 0)\r
+ KERNEL8(16 * 0)\r
+\r
+ addq $8 * SIZE, %rax \r
+ jnz .L12\r
+\r
+ .align 16\r
+#else\r
+#ifdef OPTMODULE\r
+\r
+ prefetcht0 A_PR1(AO,%rax,4)\r
+ prefetcht0 B_PR1(BO,%rax,4)\r
+ KERNEL1(16 * 0)\r
+ KERNEL2(16 * 0)\r
+ prefetcht0 A_PR1+64(AO,%rax,4)\r
+ prefetcht0 B_PR1+64(BO,%rax,4)\r
+ KERNEL3(16 * 0)\r
+ KERNEL4(16 * 0)\r
+ prefetcht0 A_PR1+128(AO,%rax,4)\r
+ prefetcht0 B_PR1+128(BO,%rax,4)\r
+ KERNEL5(16 * 0)\r
+ KERNEL6(16 * 0)\r
+ prefetcht0 A_PR1+192(AO,%rax,4)\r
+ prefetcht0 B_PR1+192(BO,%rax,4)\r
+ KERNEL7(16 * 0)\r
+ KERNEL8(16 * 0)\r
+\r
+ addq $8 * SIZE, %rax \r
+ je .L15\r
+ jmp .L12\r
+ .align 16\r
+\r
+#else\r
+ KERNEL1(16 * 0)\r
+ KERNEL2(16 * 0)\r
+ KERNEL3(16 * 0)\r
+ KERNEL4(16 * 0)\r
+ KERNEL5(16 * 0)\r
+ KERNEL6(16 * 0)\r
+ KERNEL7(16 * 0)\r
+ KERNEL8(16 * 0)\r
+\r
+ addq $8 * SIZE, %rax \r
+ je .L15\r
+ jmp .L12\r
+ .align 16\r
+\r
+#endif\r
+#endif\r
+\r
\r
.L15:\r
- vmovddup ALPHA, %xmm7\r
\r
#ifndef TRMMKERNEL\r
movq K, %rax\r
#else\r
movq KKK, %rax\r
#endif\r
+ vmovddup ALPHA, %xmm7\r
andq $3, %rax # if (k & 1)\r
je .L19\r
\r
\r
vfmaddpd (CO1),%xmm7, %xmm8,%xmm8\r
vfmaddpd 2 * SIZE(CO1),%xmm7, %xmm12,%xmm12\r
- .align 2\r
vfmaddpd (CO1, LDC),%xmm7, %xmm9,%xmm9\r
vfmaddpd 2 * SIZE(CO1, LDC),%xmm7, %xmm13,%xmm13\r
- .align 2\r
vfmaddpd (CO2),%xmm7, %xmm10,%xmm10\r
vfmaddpd 2 * SIZE(CO2),%xmm7, %xmm14,%xmm14\r
- .align 2\r
vfmaddpd (CO2, LDC),%xmm7, %xmm11,%xmm11\r
vfmaddpd 2 * SIZE(CO2, LDC),%xmm7, %xmm15,%xmm15\r
\r