* 53 GFLOPS with 4 threads on 2 modules\r
* 46 GFLOPS with 2 threads on 2 modules\r
* 28 GFLOPS with 2 threads on 1 module\r
-* 23,6 GFLOPS with 1 thread on 1 module\r
+* 23,1 GFLOPS with 1 thread on 1 module\r
*********************************************************************/\r
\r
#define ASSEMBLER\r
#define B_PR1 512\r
\r
#define KERNEL1(xx) \\r
- vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 ;\\r
vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\\r
- vfmaddpd %xmm8,%xmm6,%xmm7,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm2,%xmm7,%xmm12 ;\\r
+ vfmaddpd %xmm8,%xmm6,%xmm7,%xmm8 ;\\r
vmovddup -14 * SIZE(BO, %rax, 4), %xmm5 ;\\r
+ vfmaddpd %xmm12,%xmm2,%xmm7,%xmm12 ;\\r
vmovddup -13 * SIZE(BO, %rax, 4), %xmm4 ;\\r
- vfmaddpd %xmm9,%xmm6,%xmm3,%xmm9 ;\\r
- vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\\r
- vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\\r
- vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\\r
- vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\\r
+ vfmaddpd %xmm9,%xmm6,%xmm3,%xmm9 ;\\r
vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\\r
- vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
- vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
- vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\\r
+ vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\\r
+ vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\\r
+ vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\\r
+ vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\\r
+ vmovups -12 * SIZE(AO, %rax, 4),%xmm0 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
+ vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\\r
+ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
\r
#define KERNEL2(xx) \\r
- vmovups -12 * SIZE(AO, %rax, 4), %xmm2 ;\\r
- vmovups -10 * SIZE(AO, %rax, 4),%xmm0 ;\\r
- vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm8, %xmm2, %xmm1,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm0, %xmm1,%xmm12 ;\\r
+ vmovups -10 * SIZE(AO, %rax, 4), %xmm2 ;\\r
+ vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\\r
+ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\\r
vmovddup -10 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vfmaddpd %xmm9, %xmm2, %xmm3,%xmm9 ;\\r
- vfmaddpd %xmm13,%xmm0, %xmm3,%xmm13 ;\\r
+ vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\\r
+ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
vmovddup -9 * SIZE(BO, %rax, 4), %xmm4 ;\\r
- vfmaddpd %xmm10,%xmm2, %xmm5,%xmm10 ;\\r
- vfmaddpd %xmm14,%xmm0, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm2, %xmm4,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm0, %xmm4,%xmm15 ;\\r
+ vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\\r
+ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
+ vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
\r
#define KERNEL3(xx) \\r
- vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\\r
- vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm8, %xmm6, %xmm7, %xmm8 ;\\r
- vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\\r
+ vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\\r
+ vfmaddpd %xmm8, %xmm6, %xmm7, %xmm8 ;\\r
vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\\r
+ vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\\r
vmovddup -5 * SIZE(BO, %rax, 4), %xmm4 ;\\r
- vfmaddpd %xmm9, %xmm6, %xmm3,%xmm9 ;\\r
- vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
+ vfmaddpd %xmm9, %xmm6, %xmm3,%xmm9 ;\\r
vmovddup (BO, %rax, 4), %xmm7 ;\\r
- vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\\r
- vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
+ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
vmovddup -4 * SIZE(BO, %rax, 4), %xmm1 ;\\r
- vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\\r
- vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
+ vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\\r
+ vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\\r
+ vmovups -4 * SIZE(AO, %rax, 4),%xmm0 ;\\r
+ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
vmovups (AO, %rax, 4), %xmm6 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
\r
#define KERNEL4(xx) \\r
- vmovups -4 * SIZE(AO, %rax, 4), %xmm2 ;\\r
- vmovups -2 * SIZE(AO, %rax, 4),%xmm0 ;\\r
- vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm8,%xmm2, %xmm1,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm0, %xmm1 ,%xmm12;\\r
+ vmovups -2 * SIZE(AO, %rax, 4), %xmm2 ;\\r
+ vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\\r
+ vfmaddpd %xmm12,%xmm2, %xmm1 ,%xmm12;\\r
vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vfmaddpd %xmm9,%xmm2, %xmm3,%xmm9 ;\\r
- vfmaddpd %xmm13,%xmm0, %xmm3,%xmm13 ;\\r
+ vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\\r
+ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
vmovddup -1 * SIZE(BO, %rax, 4), %xmm4 ;\\r
- vfmaddpd %xmm10,%xmm2, %xmm5,%xmm10 ;\\r
- vfmaddpd %xmm14,%xmm0, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm2, %xmm4,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm0, %xmm4,%xmm15 ;\\r
+ vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\\r
+ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
+ vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
\r
#define KERNEL5(xx) \\r
vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\\r
- vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\\r
+ vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\\r
vmovddup 2 * SIZE(BO, %rax, 4), %xmm5 ;\\r
+ vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\\r
vmovddup 3 * SIZE(BO, %rax, 4), %xmm4 ;\\r
- vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\\r
- vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
+ vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\\r
vmovddup 8 * SIZE(BO, %rax, 4), %xmm7 ;\\r
- vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\\r
- vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
- vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\\r
- vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
+ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
+ vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\\r
+ vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\\r
+ vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\\r
+ vmovups 4 * SIZE(AO, %rax, 4),%xmm0 ;\\r
+ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
vmovups 8 * SIZE(AO, %rax, 4), %xmm6 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
\r
#define KERNEL6(xx) \\r
- vmovups 4 * SIZE(AO, %rax, 4), %xmm2 ;\\r
- vmovups 6 * SIZE(AO, %rax, 4),%xmm0 ;\\r
- vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm8,%xmm2, %xmm1,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm0, %xmm1,%xmm12 ;\\r
+ vmovups 6 * SIZE(AO, %rax, 4), %xmm2 ;\\r
+ vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\\r
+ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\\r
vmovddup 6 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vfmaddpd %xmm9,%xmm2, %xmm3,%xmm9 ;\\r
- vfmaddpd %xmm13,%xmm0, %xmm3,%xmm13 ;\\r
+ vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\\r
+ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
vmovddup 7 * SIZE(BO, %rax, 4), %xmm4 ;\\r
- vfmaddpd %xmm10,%xmm2, %xmm5,%xmm10 ;\\r
- vfmaddpd %xmm14,%xmm0, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm2, %xmm4,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm0, %xmm4,%xmm15 ;\\r
+ vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\\r
+ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
+ vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
\r
#define KERNEL7(xx) \\r
vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\\r
- vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\\r
+ vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\\r
vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\\r
+ vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\\r
vmovddup 11 * SIZE(BO, %rax, 4), %xmm4 ;\\r
- vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\\r
- vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
+ vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\\r
vmovddup 16 * SIZE(BO, %rax, 4), %xmm7 ;\\r
- vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\\r
- vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
+ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\\r
vmovddup 12 * SIZE(BO, %rax, 4), %xmm1 ;\\r
- vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
+ vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\\r
+ vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\\r
+ vmovups 12 * SIZE(AO, %rax, 4), %xmm0 ;\\r
+ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
vmovups 16 * SIZE(AO, %rax, 4),%xmm6 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
\r
#define KERNEL8(xx) \\r
- vmovups 12 * SIZE(AO, %rax, 4), %xmm2 ;\\r
- vmovups 14 * SIZE(AO, %rax, 4), %xmm0 ;\\r
- vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\\r
- vfmaddpd %xmm8,%xmm2, %xmm1,%xmm8 ;\\r
- vfmaddpd %xmm12,%xmm0, %xmm1,%xmm12 ;\\r
+ vmovups 14 * SIZE(AO, %rax, 4), %xmm2 ;\\r
+ vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\\r
+ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\\r
vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\\r
- vfmaddpd %xmm9,%xmm2, %xmm3,%xmm9 ;\\r
- vfmaddpd %xmm13, %xmm0, %xmm3,%xmm13 ;\\r
+ vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\\r
+ vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\\r
vmovddup 15 * SIZE(BO, %rax, 4), %xmm4 ;\\r
- vfmaddpd %xmm10,%xmm2, %xmm5,%xmm10 ;\\r
- vfmaddpd %xmm14,%xmm0, %xmm5,%xmm14 ;\\r
- vfmaddpd %xmm11,%xmm2, %xmm4,%xmm11 ;\\r
- vfmaddpd %xmm15,%xmm0, %xmm4,%xmm15 ;\\r
+ vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\\r
+ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\\r
+ vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\\r
+ vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\\r
+ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\\r
\r
#define KERNEL_SUB1(xx) \\r
vmovups -16 * SIZE(AO),%xmm0 ;\\r
\r
vmovups -16 * SIZE(AO, %rax, 4),%xmm6 \r
vmovddup -16 * SIZE(BO, %rax, 4), %xmm7 \r
+ vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 \r
\r
.align 16\r
\r
.L12:\r
\r
-#ifndef SMP \r
-\r
- prefetcht0 A_PR1(AO,%rax,4)\r
- prefetcht0 B_PR1(BO,%rax,4)\r
- KERNEL1(16 * 0)\r
- KERNEL2(16 * 0)\r
- prefetcht0 A_PR1+64(AO,%rax,4)\r
- prefetcht0 B_PR1+64(BO,%rax,4)\r
- KERNEL3(16 * 0)\r
- KERNEL4(16 * 0)\r
- prefetcht0 A_PR1+128(AO,%rax,4)\r
- prefetcht0 B_PR1+128(BO,%rax,4)\r
- KERNEL5(16 * 0)\r
- KERNEL6(16 * 0)\r
- prefetcht0 A_PR1+192(AO,%rax,4)\r
- prefetcht0 B_PR1+192(BO,%rax,4)\r
- KERNEL7(16 * 0)\r
- KERNEL8(16 * 0)\r
-\r
- addq $8 * SIZE, %rax \r
- je .L15\r
-\r
- prefetcht0 A_PR1(AO,%rax,4)\r
- prefetcht0 B_PR1(BO,%rax,4)\r
- KERNEL1(16 * 0)\r
- KERNEL2(16 * 0)\r
- prefetcht0 A_PR1+64(AO,%rax,4)\r
- prefetcht0 B_PR1+64(BO,%rax,4)\r
- KERNEL3(16 * 0)\r
- KERNEL4(16 * 0)\r
- prefetcht0 A_PR1+128(AO,%rax,4)\r
- prefetcht0 B_PR1+128(BO,%rax,4)\r
- KERNEL5(16 * 0)\r
- KERNEL6(16 * 0)\r
- prefetcht0 A_PR1+192(AO,%rax,4)\r
- prefetcht0 B_PR1+192(BO,%rax,4)\r
- KERNEL7(16 * 0)\r
- KERNEL8(16 * 0)\r
-\r
- addq $8 * SIZE, %rax \r
- je .L15\r
-\r
-\r
- prefetcht0 A_PR1(AO,%rax,4)\r
- prefetcht0 B_PR1(BO,%rax,4)\r
- KERNEL1(16 * 0)\r
- KERNEL2(16 * 0)\r
- prefetcht0 A_PR1+64(AO,%rax,4)\r
- prefetcht0 B_PR1+64(BO,%rax,4)\r
- KERNEL3(16 * 0)\r
- KERNEL4(16 * 0)\r
- prefetcht0 A_PR1+128(AO,%rax,4)\r
- prefetcht0 B_PR1+128(BO,%rax,4)\r
- KERNEL5(16 * 0)\r
- KERNEL6(16 * 0)\r
- prefetcht0 A_PR1+192(AO,%rax,4)\r
- prefetcht0 B_PR1+192(BO,%rax,4)\r
- KERNEL7(16 * 0)\r
- KERNEL8(16 * 0)\r
-\r
- addq $8 * SIZE, %rax \r
- je .L15\r
-\r
- prefetcht0 A_PR1(AO,%rax,4)\r
- prefetcht0 B_PR1(BO,%rax,4)\r
- KERNEL1(16 * 0)\r
- KERNEL2(16 * 0)\r
- prefetcht0 A_PR1+64(AO,%rax,4)\r
- prefetcht0 B_PR1+64(BO,%rax,4)\r
- KERNEL3(16 * 0)\r
- KERNEL4(16 * 0)\r
- prefetcht0 A_PR1+128(AO,%rax,4)\r
- prefetcht0 B_PR1+128(BO,%rax,4)\r
- KERNEL5(16 * 0)\r
- KERNEL6(16 * 0)\r
- prefetcht0 A_PR1+192(AO,%rax,4)\r
- prefetcht0 B_PR1+192(BO,%rax,4)\r
- KERNEL7(16 * 0)\r
- KERNEL8(16 * 0)\r
-\r
- addq $8 * SIZE, %rax \r
- jnz .L12\r
-\r
- .align 16\r
-#else\r
-#ifdef OPTMODULE\r
+#if defined(OPTBYMODULE) || !defined(SMP)\r
\r
prefetcht0 A_PR1(AO,%rax,4)\r
prefetcht0 B_PR1(BO,%rax,4)\r
.align 16\r
\r
#endif\r
-#endif\r
\r
\r
.L15:\r