From: wernsaar Date: Mon, 4 Mar 2013 16:37:38 +0000 (+0100) Subject: new dgemm_kernel for bulldozer X-Git-Tag: v0.2.9.rc1~82^2~19 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=9405f26f4b4feedf67a35b8aa0ce3e4882474d58;p=platform%2Fupstream%2Fopenblas.git new dgemm_kernel for bulldozer --- diff --git a/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S b/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S index b06b07e..9d0c613 100644 --- a/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S +++ b/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S @@ -88,151 +88,142 @@ #define movupd movups #define KERNEL1(xx) \ - vfmaddpd %xmm8,%xmm1,%xmm0,%xmm8 ;\ - vmovaps %xmm2, %xmm0 ;\ - vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm9,%xmm3,%xmm0,%xmm9 ;\ + vmovups -16 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm8,%xmm0,%xmm1,%xmm8 ;\ + vmovaps %xmm2,%xmm0 ;\ + vmovddup -16 * SIZE(BO, %rax, 4), %xmm1 ;\ + vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm9,%xmm0,%xmm3,%xmm9 ;\ + vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\ vfmaddpd %xmm12,%xmm2,%xmm1,%xmm12 ;\ - vmovddup -14 * SIZE(BO, %rax, 4), %xmm1 ;\ vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\ + vmovddup -14 * SIZE(BO, %rax, 4), %xmm1 ;\ vmovddup -13 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ - vfmaddpd %xmm11,%xmm3,%xmm0,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ + vfmaddpd %xmm10,%xmm0,%xmm1,%xmm10 ;\ + vfmaddpd %xmm11,%xmm0,%xmm3,%xmm11 ;\ vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ - vmovups -12 * SIZE(AO, %rax, 4), %xmm0 ;\ + vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ + vmovups -12 * SIZE(AO, %rax, 4), %xmm0 ;\ + vmovups -10 * SIZE(AO, %rax, 4),%xmm2 ;\ vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ - vmovaps %xmm0, %xmm2 #define KERNEL2(xx) \ - vfmaddpd %xmm8,%xmm1, %xmm0,%xmm8 ;\ - vmovaps %xmm2, %xmm0 ;\ - vmovups -10 * SIZE(AO, %rax, 4),%xmm2 ;\ -/*A*/ vmovups (AO, %rax, 4), %xmm6 ;\ + vmovups -8 * SIZE(AO, %rax, 4),%xmm4 ;\ + vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vfmaddpd %xmm9,%xmm3, %xmm0,%xmm9 ;\ - vmovddup -10 * SIZE(BO, %rax, 4), %xmm1 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -10 * SIZE(BO, %rax, 4), %xmm1 ;\ vmovddup -9 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm10,%xmm1, %xmm0,%xmm10 ;\ + vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ + vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ -/**/ vmovddup (BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm11,%xmm3, %xmm0,%xmm11 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ + vmovddup -8 * SIZE(BO, %rax, 4), %xmm5 ;\ vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\ - vmovaps %xmm4, %xmm2 #define KERNEL3(xx) \ - vfmaddpd %xmm8,%xmm5, %xmm4, %xmm8 ;\ - vmovaps %xmm2, %xmm4 ;\ - vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\ + vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8, %xmm4, %xmm5, %xmm8 ;\ + vfmaddpd %xmm9, %xmm4, %xmm3,%xmm9 ;\ vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\ - vfmaddpd %xmm9,%xmm3, %xmm4,%xmm9 ;\ - vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\ vmovddup -5 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm10,%xmm5, %xmm4,%xmm10 ;\ + vfmaddpd %xmm10,%xmm4, %xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm4, %xmm3, %xmm11 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vfmaddpd %xmm11,%xmm3, %xmm4, %xmm11 ;\ - vmovups -4 * SIZE(AO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ + vmovups -4 * SIZE(AO, %rax, 4), %xmm4 ;\ + vmovups -2 * SIZE(AO, %rax, 4),%xmm2 ;\ vmovddup -4 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm15,%xmm2,%xmm3,%xmm15 ;\ vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ - vmovaps %xmm4, %xmm2 #define KERNEL4(xx) \ - vfmaddpd %xmm8,%xmm5, %xmm4,%xmm8 ;\ - vmovaps %xmm2, %xmm4 ;\ - vmovups -2 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8,%xmm4, %xmm5,%xmm8 ;\ + vfmaddpd %xmm9,%xmm4, %xmm3,%xmm9 ;\ vfmaddpd %xmm12,%xmm2, %xmm5 ,%xmm12;\ -/*A*/ vmovups 8 * SIZE(AO, %rax, 4), %xmm7 ;\ - vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm3, %xmm4,%xmm9 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ vmovddup -1 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm10,%xmm5, %xmm4,%xmm10 ;\ + vfmaddpd %xmm10,%xmm4, %xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm4, %xmm3,%xmm11 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ -/**/ vmovddup 8 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm11,%xmm3, %xmm4,%xmm11 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ +/*A*/ vmovups (AO, %rax, 4), %xmm6 ;\ + vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\ vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ - vmovaps %xmm6, %xmm2 +/**/ vmovddup (BO, %rax, 4), %xmm1 ;\ #define KERNEL5(xx) \ - vfmaddpd %xmm8,%xmm1, %xmm6,%xmm8 ;\ - vmovaps %xmm2, %xmm6 ;\ - vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8,%xmm6, %xmm1,%xmm8 ;\ + vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vmovddup 2 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm9,%xmm3, %xmm6,%xmm9 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup 2 * SIZE(BO, %rax, 4), %xmm1 ;\ vmovddup 3 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm10,%xmm1, %xmm6,%xmm10 ;\ + vfmaddpd %xmm10,%xmm6, %xmm1,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6, %xmm3,%xmm11 ;\ vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ - vfmaddpd %xmm11,%xmm3, %xmm6,%xmm11 ;\ - vmovups 4 * SIZE(AO, %rax, 4), %xmm6 ;\ - vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ - vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ - vmovaps %xmm6, %xmm2 + vmovups 4 * SIZE(AO, %rax, 4), %xmm6 ;\ + vmovups 6 * SIZE(AO, %rax, 4),%xmm2 ;\ + vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ + vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ #define KERNEL6(xx) \ - vfmaddpd %xmm8,%xmm1, %xmm6,%xmm8 ;\ - vmovaps %xmm2, %xmm6 ;\ - vmovups 6 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8,%xmm6, %xmm1,%xmm8 ;\ + vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ -/*A*/ vmovups 16 * SIZE(AO, %rax, 4), %xmm0 ;\ - vmovddup 6 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm9,%xmm3, %xmm6,%xmm9 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup 6 * SIZE(BO, %rax, 4), %xmm1 ;\ vmovddup 7 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm10,%xmm1, %xmm6,%xmm10 ;\ + vfmaddpd %xmm10,%xmm6, %xmm1,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6, %xmm3,%xmm11 ;\ vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ -/**/ vmovddup 16 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm11,%xmm3, %xmm6,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2,%xmm3,%xmm15 ;\ + vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ +/*A*/ vmovups 8 * SIZE(AO, %rax, 4), %xmm7 ;\ + vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\ +/**/ vmovddup 8 * SIZE(BO, %rax, 4), %xmm5 ;\ vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ - vmovaps %xmm7, %xmm2 #define KERNEL7(xx) \ - vfmaddpd %xmm8,%xmm5, %xmm7,%xmm8 ;\ - vmovaps %xmm2, %xmm7 ;\ - vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8,%xmm7, %xmm5,%xmm8 ;\ + vfmaddpd %xmm9,%xmm7, %xmm3,%xmm9 ;\ vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\ - vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm3, %xmm7,%xmm9 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\ vmovddup 11 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm10,%xmm5, %xmm7,%xmm10 ;\ + vfmaddpd %xmm10,%xmm7, %xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm7, %xmm3,%xmm11 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vfmaddpd %xmm11,%xmm3, %xmm7,%xmm11 ;\ - vmovups 12 * SIZE(AO, %rax, 4), %xmm7 ;\ - vmovddup 12 * SIZE(BO, %rax, 4), %xmm5 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ + vmovups 12 * SIZE(AO, %rax, 4), %xmm7 ;\ + vmovups 14 * SIZE(AO, %rax, 4),%xmm2 ;\ + vmovddup 12 * SIZE(BO, %rax, 4), %xmm5 ;\ vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ - vmovaps %xmm7, %xmm2 #define KERNEL8(xx) \ - vfmaddpd %xmm8,%xmm5, %xmm7,%xmm8 ;\ - vmovaps %xmm2, %xmm7 ;\ - vmovups 14 * SIZE(AO, %rax, 4),%xmm2 ;\ -/*A*/ vmovups 24 * SIZE(AO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm8,%xmm7, %xmm5,%xmm8 ;\ + vfmaddpd %xmm9,%xmm7, %xmm3,%xmm9 ;\ vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\ - vfmaddpd %xmm9,%xmm3, %xmm7,%xmm9 ;\ - vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\ vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ + vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\ vmovddup 15 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm10,%xmm5, %xmm7,%xmm10 ;\ + vfmaddpd %xmm10,%xmm7, %xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm7, %xmm3,%xmm11 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ -/**/ vmovddup 24 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm11,%xmm3, %xmm7,%xmm11 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ +/*A*/ vmovups 16 * SIZE(AO, %rax, 4), %xmm0 ;\ + vmovddup 16 * SIZE(BO, %rax, 4), %xmm1 ;\ vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovaps %xmm0, %xmm2 ;\ addq $8 * SIZE, %rax ;\ #define KERNEL_SUB1(xx) \ + vmovddup -15 * SIZE(BO), %xmm3 ;\ + vmovups -16 * SIZE(AO),%xmm0 ;\ vfmaddpd %xmm8, %xmm1, %xmm0,%xmm8 ;\ vmovapd %xmm2, %xmm0 ;\ vmovups -14 * SIZE(AO),%xmm2 ;\ @@ -255,17 +246,17 @@ vfmaddpd %xmm8,%xmm1, %xmm0,%xmm8 ;\ vmovaps %xmm2, %xmm0 ;\ vmovups -10 * SIZE(AO),%xmm2 ;\ + vmovups -8 * SIZE(AO),%xmm4 ;\ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ vfmaddpd %xmm9,%xmm3, %xmm0,%xmm9 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vmovddup -10 * SIZE(BO), %xmm1 ;\ vmovddup -9 * SIZE(BO), %xmm3 ;\ + vmovddup -8 * SIZE(BO), %xmm5 ;\ vfmaddpd %xmm10,%xmm1, %xmm0,%xmm10 ;\ vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ vfmaddpd %xmm11,%xmm3, %xmm0,%xmm11 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ - vmovups (AO), %xmm0 ;\ - vmovddup (BO), %xmm1 ;\ vmovddup -7 * SIZE(BO), %xmm3 ;\ vmovaps %xmm4, %xmm2 @@ -291,11 +282,13 @@ vfmaddpd %xmm8,%xmm5, %xmm4,%xmm8 ;\ vmovaps %xmm2, %xmm4 ;\ vmovups -2 * SIZE(AO),%xmm2 ;\ + vmovups (AO), %xmm0 ;\ vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vmovddup -2 * SIZE(BO), %xmm5 ;\ vfmaddpd %xmm9,%xmm3, %xmm4,%xmm9 ;\ vmovddup -1 * SIZE(BO), %xmm3 ;\ + vmovddup (BO), %xmm1 ;\ vfmaddpd %xmm10,%xmm5, %xmm4,%xmm10 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ vfmaddpd %xmm11,%xmm3, %xmm4,%xmm11 ;\ @@ -407,16 +400,26 @@ leaq (B, %rax, 4), BO #endif - vzeroall - prefetcht0 256(CO1) - prefetcht0 320(CO1) - prefetcht0 256(CO2) - prefetcht0 320(CO2) + vxorpd %xmm8, %xmm8,%xmm8 + vxorpd %xmm9, %xmm9,%xmm9 + vxorpd %xmm10, %xmm10,%xmm10 + vxorpd %xmm11, %xmm11,%xmm11 + vxorpd %xmm12, %xmm12,%xmm12 + vxorpd %xmm13, %xmm13,%xmm13 + vxorpd %xmm14, %xmm14,%xmm14 + vxorpd %xmm15, %xmm15,%xmm15 + + prefetcht0 (CO1) + prefetcht0 8*SIZE(CO1) + prefetcht0 (CO1,LDC) + prefetcht0 8*SIZE(CO1,LDC) + prefetcht0 (CO2) + prefetcht0 8*SIZE(CO2) + prefetcht0 (CO2,LDC) + prefetcht0 8*(CO2,LDC) vmovups -16 * SIZE(AO), %xmm0 vmovddup -16 * SIZE(BO), %xmm1 vmovddup -15 * SIZE(BO), %xmm3 - vmovups -8 * SIZE(AO), %xmm4 - vmovddup -8 * SIZE(BO), %xmm5 vmovaps %xmm0, %xmm2 @@ -448,8 +451,10 @@ .align 16 .L12: - prefetcht0 (AO,%rax,4) - prefetcht0 (BO,%rax,4) + prefetcht0 24*SIZE(AO,%rax,4) + prefetcht0 32*SIZE(AO,%rax,4) + prefetcht0 24*SIZE(BO,%rax,4) + prefetcht0 32*SIZE(BO,%rax,4) KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) @@ -460,6 +465,10 @@ KERNEL8(16 * 0) NOBRANCH je .L15 + prefetcht0 24*SIZE(AO,%rax,4) + prefetcht0 32*SIZE(AO,%rax,4) + prefetcht0 24*SIZE(BO,%rax,4) + prefetcht0 32*SIZE(BO,%rax,4) KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) @@ -470,6 +479,10 @@ KERNEL8(16 * 0) NOBRANCH je .L15 + prefetcht0 24*SIZE(AO,%rax,4) + prefetcht0 32*SIZE(AO,%rax,4) + prefetcht0 24*SIZE(BO,%rax,4) + prefetcht0 32*SIZE(BO,%rax,4) KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) @@ -480,6 +493,10 @@ KERNEL8(16 * 0) NOBRANCH je .L15 + prefetcht0 24*SIZE(AO,%rax,4) + prefetcht0 32*SIZE(AO,%rax,4) + prefetcht0 24*SIZE(BO,%rax,4) + prefetcht0 32*SIZE(BO,%rax,4) KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) @@ -490,6 +507,10 @@ KERNEL8(16 * 0) NOBRANCH je .L15 + prefetcht0 24*SIZE(AO,%rax,4) + prefetcht0 32*SIZE(AO,%rax,4) + prefetcht0 24*SIZE(BO,%rax,4) + prefetcht0 32*SIZE(BO,%rax,4) KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) @@ -500,6 +521,10 @@ KERNEL8(16 * 0) NOBRANCH je .L15 + prefetcht0 24*SIZE(AO,%rax,4) + prefetcht0 32*SIZE(AO,%rax,4) + prefetcht0 24*SIZE(BO,%rax,4) + prefetcht0 32*SIZE(BO,%rax,4) KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) @@ -510,6 +535,10 @@ KERNEL8(16 * 0) NOBRANCH je .L15 + prefetcht0 24*SIZE(AO,%rax,4) + prefetcht0 32*SIZE(AO,%rax,4) + prefetcht0 24*SIZE(BO,%rax,4) + prefetcht0 32*SIZE(BO,%rax,4) KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) @@ -520,6 +549,10 @@ KERNEL8(16 * 0) NOBRANCH je .L15 + prefetcht0 24*SIZE(AO,%rax,4) + prefetcht0 32*SIZE(AO,%rax,4) + prefetcht0 24*SIZE(BO,%rax,4) + prefetcht0 32*SIZE(BO,%rax,4) KERNEL1(16 * 0) KERNEL2(16 * 0) KERNEL3(16 * 0) @@ -619,16 +652,12 @@ #endif - .align 2 vmovups %xmm8, (CO1) vmovups %xmm12, 2 * SIZE(CO1) - .align 2 vmovups %xmm9, (CO1, LDC) vmovups %xmm13, 2 * SIZE(CO1, LDC) - .align 2 vmovups %xmm10, (CO2) vmovups %xmm14, 2 * SIZE(CO2) - .align 2 vmovups %xmm11, (CO2, LDC) vmovups %xmm15, 2 * SIZE(CO2, LDC) @@ -1019,17 +1048,7 @@ vxorps %xmm13, %xmm13,%xmm13 vmovups -16 * SIZE(AO), %xmm0 vmovups -8 * SIZE(AO), %xmm4 - // prefetcht0 256(CO1) - // prefetcht0 320(CO1) - // prefetcht0 256(CO2) - // prefetcht0 320(CO2) - // prefetchnta 24 * SIZE(CO1) - // prefetchnta 32 * SIZE(CO1) - // prefetchw 3 * SIZE(CO1) vmovups %xmm0, %xmm2 - // prefetchw 3 * SIZE(CO2) - // prefetchnta -16 * SIZE(BB) - // prefetch -16 * SIZE(BB) subq $-8 * SIZE, BB #ifndef TRMMKERNEL