From: wernsaar Date: Sat, 3 Aug 2013 09:43:25 +0000 (+0200) Subject: repaired trmm bug in sgemm_kernel_16x2_bulldozer.S X-Git-Tag: v0.2.9.rc1~31^2~1^2^2~5 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e45a347cd2d2dc36f4425fa2796c0f2e731bea51;p=platform%2Fupstream%2Fopenblas.git repaired trmm bug in sgemm_kernel_16x2_bulldozer.S --- diff --git a/kernel/x86_64/sgemm_kernel_16x2_bulldozer.S b/kernel/x86_64/sgemm_kernel_16x2_bulldozer.S index f02a1df..2a034f0 100644 --- a/kernel/x86_64/sgemm_kernel_16x2_bulldozer.S +++ b/kernel/x86_64/sgemm_kernel_16x2_bulldozer.S @@ -981,6 +981,8 @@ /*******************************************************************************************/ +#if !defined(TRMMKERNEL) + PROLOGUE PROFCODE @@ -1016,16 +1018,11 @@ movq OLD_B, B movq OLD_C, C movq OLD_LDC, LDC -#ifdef TRMMKERNEL - movsd OLD_OFFSET, %xmm12 -#endif + vmovaps %xmm3, %xmm0 #else movq STACKSIZE + 8(%rsp), LDC -#ifdef TRMMKERNEL - movsd STACKSIZE + 16(%rsp), %xmm12 -#endif #endif @@ -1059,15 +1056,6 @@ movq %rax, Ndiv6 // N / 6 movq %rdx, Nmod6 // N % 6 - - -#ifdef TRMMKERNEL - vmovsd %xmm12, OFFSET - vmovsd %xmm12, KK -#ifndef LEFT - negq KK -#endif -#endif movq Ndiv6, J cmpq $0, J @@ -1248,10 +1236,6 @@ leaq (C, LDC, 2), C leaq (C, LDC, 1), C // c += 3 * ldc -#if defined(TRMMKERNEL) && defined(LEFT) - movq OFFSET, %rax - movq %rax, KK -#endif movq A, AO // aoffset = a addq $32 * SIZE, AO @@ -1263,40 +1247,12 @@ ALIGN_4 .L6_11: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leaq BUFFER1, BO // first buffer to BO addq $6 * SIZE, BO -#else - movq KK, %rax - leaq BUFFER1, BO // first buffer to BO - addq $6 * SIZE, BO - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $4, %rax // rax = rax * 16 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - vzeroall -#ifndef TRMMKERNEL movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $16, %rax // number of values in AO -#else - addq $3, %rax // number of values in BO -#endif - movq %rax, KKK -#endif andq $-8, %rax // K = K - ( K % 8 ) je .L6_16 @@ -1343,11 +1299,7 @@ ALIGN_4 .L6_16: -#ifndef TRMMKERNEL movq K, %rax -#else - movq KKK, %rax -#endif andq $7, %rax # if (k & 1) je .L6_19 @@ -1375,8 +1327,6 @@ vbroadcastss ALPHA, %xmm0 -#ifndef TRMMKERNEL - vfmaddps (CO1),%xmm0, %xmm4,%xmm4 vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 vfmaddps 8 * SIZE(CO1),%xmm0, %xmm10,%xmm10 @@ -1392,23 +1342,6 @@ vfmaddps 8 * SIZE(CO1, LDC, 2),%xmm0, %xmm12,%xmm12 vfmaddps 12 * SIZE(CO1, LDC, 2),%xmm0, %xmm15,%xmm15 -#else - vmulps %xmm0, %xmm4,%xmm4 - vmulps %xmm0, %xmm7,%xmm7 - vmulps %xmm0, %xmm10,%xmm10 - vmulps %xmm0, %xmm13,%xmm13 - - vmulps %xmm0, %xmm5,%xmm5 - vmulps %xmm0, %xmm8,%xmm8 - vmulps %xmm0, %xmm11,%xmm11 - vmulps %xmm0, %xmm14,%xmm14 - - vmulps %xmm0, %xmm6,%xmm6 - vmulps %xmm0, %xmm9,%xmm9 - vmulps %xmm0, %xmm12,%xmm12 - vmulps %xmm0, %xmm15,%xmm15 - -#endif vmovups %xmm4 , (CO1) vmovups %xmm7 , 4 * SIZE(CO1) @@ -1425,21 +1358,6 @@ vmovups %xmm12, 8 * SIZE(CO1, LDC, 2) vmovups %xmm15,12 * SIZE(CO1, LDC, 2) -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $4, %rax // rax = rax * 16 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $16, KK -#endif addq $16 * SIZE, CO1 # coffset += 16 decq I # i -- @@ -1462,41 +1380,12 @@ /**************************************************************************/ .L6_20_1: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - leaq BUFFER1, BO // first buffer to BO - addq $6 * SIZE, BO -#else - movq KK, %rax leaq BUFFER1, BO // first buffer to BO addq $6 * SIZE, BO - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $3, %rax // rax = rax * 8 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - vzeroall -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $8, %rax // number of values in A -#else - addq $3, %rax // number of values in BO -#endif - movq %rax, KKK -#endif - andq $-8, %rax je .L6_20_6 @@ -1543,11 +1432,7 @@ ALIGN_4 .L6_20_6: -#ifndef TRMMKERNEL movq K, %rax -#else - movq KKK, %rax -#endif andq $7, %rax # if (k & 1) je .L6_20_9 @@ -1575,8 +1460,6 @@ vbroadcastss ALPHA, %xmm0 -#ifndef TRMMKERNEL - vfmaddps (CO1),%xmm0, %xmm4,%xmm4 vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 @@ -1586,17 +1469,6 @@ vfmaddps (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 vfmaddps 4 * SIZE(CO1, LDC, 2),%xmm0, %xmm9,%xmm9 -#else - vmulps %xmm0, %xmm4,%xmm4 - vmulps %xmm0, %xmm7,%xmm7 - - vmulps %xmm0, %xmm5,%xmm5 - vmulps %xmm0, %xmm8,%xmm8 - - vmulps %xmm0, %xmm6,%xmm6 - vmulps %xmm0, %xmm9,%xmm9 - -#endif vmovups %xmm4 , (CO1) vmovups %xmm7 , 4 * SIZE(CO1) @@ -1608,22 +1480,6 @@ vmovups %xmm9 , 4 * SIZE(CO1, LDC, 2) -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $3, %rax // rax = rax * 8 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $8, KK -#endif - addq $8 * SIZE, CO1 # coffset += 8 ALIGN_4 @@ -1638,41 +1494,12 @@ ALIGN_4 .L6_21: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leaq BUFFER1, BO // first buffer to BO addq $6 * SIZE, BO -#else - movq KK, %rax - leaq BUFFER1, BO // first buffer to BO - addq $6 * SIZE, BO - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $2, %rax // rax = rax * 4 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - vzeroall -#ifndef TRMMKERNEL movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $4, %rax // number of values in A -#else - addq $3, %rax // number of values in BO -#endif - movq %rax, KKK -#endif - andq $-8, %rax je .L6_26 @@ -1719,11 +1546,7 @@ ALIGN_4 .L6_26: -#ifndef TRMMKERNEL movq K, %rax -#else - movq KKK, %rax -#endif andq $7, %rax # if (k & 1) je .L6_29 @@ -1751,40 +1574,15 @@ vbroadcastss ALPHA, %xmm0 -#ifndef TRMMKERNEL - vfmaddps (CO1),%xmm0, %xmm4,%xmm4 vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 vfmaddps (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 -#else - vmulps %xmm0, %xmm4,%xmm4 - vmulps %xmm0, %xmm5,%xmm5 - vmulps %xmm0, %xmm6,%xmm6 - -#endif vmovups %xmm4 , (CO1) vmovups %xmm5 , (CO1, LDC) vmovups %xmm6 , (CO1, LDC, 2) - -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $2, %rax // rax = rax * 4 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $4, KK -#endif - addq $4 * SIZE, CO1 # coffset += 4 ALIGN_4 @@ -1796,41 +1594,12 @@ ALIGN_4 .L6_31: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) leaq BUFFER1, BO // first buffer to BO addq $6 * SIZE, BO -#else - movq KK, %rax - leaq BUFFER1, BO // first buffer to BO - addq $6 * SIZE, BO - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $1, %rax // rax = rax * 2 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - vzeroall -#ifndef TRMMKERNEL movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $2, %rax // number of values in AO -#else - addq $3, %rax // number of values in BO -#endif - movq %rax, KKK -#endif - andq $-8, %rax je .L6_36 @@ -1877,11 +1646,7 @@ ALIGN_4 .L6_36: -#ifndef TRMMKERNEL movq K, %rax -#else - movq KKK, %rax -#endif andq $7, %rax # if (k & 1) je .L6_39 @@ -1909,8 +1674,6 @@ vmovss ALPHA, %xmm0 -#ifndef TRMMKERNEL - vfmaddss (CO1),%xmm0, %xmm4,%xmm4 vfmaddss 1 * SIZE(CO1),%xmm0, %xmm8,%xmm8 vfmaddss (CO1, LDC),%xmm0, %xmm5,%xmm5 @@ -1918,15 +1681,6 @@ vfmaddss (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 vfmaddss 1 * SIZE(CO1, LDC, 2),%xmm0, %xmm12,%xmm12 -#else - vmulss %xmm0, %xmm4,%xmm4 - vmulss %xmm0, %xmm8,%xmm8 - vmulss %xmm0, %xmm5,%xmm5 - vmulss %xmm0, %xmm10,%xmm10 - vmulss %xmm0, %xmm6,%xmm6 - vmulss %xmm0, %xmm12,%xmm12 - -#endif vmovss %xmm4 , (CO1) vmovss %xmm8 , 1 * SIZE(CO1) @@ -1935,22 +1689,6 @@ vmovss %xmm6 , (CO1, LDC, 2) vmovss %xmm12, 1 * SIZE(CO1, LDC, 2) -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $1, %rax // rax = rax * 2 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $2, KK -#endif - addq $2 * SIZE, CO1 # coffset += 2 ALIGN_4 @@ -1961,39 +1699,12 @@ ALIGN_4 .L6_41: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - leaq BUFFER1, BO // first buffer to BO - addq $6 * SIZE, BO -#else - movq KK, %rax leaq BUFFER1, BO // first buffer to BO addq $6 * SIZE, BO - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - leaq (AO, %rax, SIZE), AO -#endif - vzeroall -#ifndef TRMMKERNEL movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $1, %rax // number of values in AO -#else - addq $3, %rax // number of values in BO -#endif - movq %rax, KKK -#endif andq $-8, %rax je .L6_46 @@ -2036,11 +1747,7 @@ ALIGN_4 .L6_46: -#ifndef TRMMKERNEL movq K, %rax -#else - movq KKK, %rax -#endif andq $7, %rax # if (k & 1) je .L6_49 @@ -2067,38 +1774,15 @@ vmovss ALPHA, %xmm0 -#ifndef TRMMKERNEL - vfmaddss (CO1),%xmm0, %xmm4,%xmm4 vfmaddss (CO1, LDC),%xmm0, %xmm5,%xmm5 vfmaddss (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 -#else - vmulss %xmm0, %xmm4,%xmm4 - vmulss %xmm0, %xmm5,%xmm5 - vmulss %xmm0, %xmm6,%xmm6 - -#endif vmovss %xmm4 , (CO1) vmovss %xmm5 , (CO1, LDC) vmovss %xmm6 , (CO1, LDC, 2) -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - leaq (AO, %rax, SIZE), AO -#endif - - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $1, KK -#endif - addq $1 * SIZE, CO1 # coffset += 1 ALIGN_4 @@ -2112,10 +1796,6 @@ leaq (C, LDC, 2), C leaq (C, LDC, 1), C // c += 3 * ldc -#if defined(TRMMKERNEL) && defined(LEFT) - movq OFFSET, %rax - movq %rax, KK -#endif movq A, AO // aoffset = a addq $32 * SIZE, AO @@ -2127,40 +1807,12 @@ ALIGN_4 .L7_11: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - leaq BUFFER2, BO // second buffer to BO - addq $6 * SIZE, BO -#else - movq KK, %rax leaq BUFFER2, BO // second buffer to BO addq $6 * SIZE, BO - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $4, %rax // rax = rax * 16 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - vzeroall -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $16, %rax // number of values in AO -#else - addq $3, %rax // number of values in BO -#endif - movq %rax, KKK -#endif andq $-8, %rax // K = K - ( K % 8 ) je .L7_16 @@ -2207,11 +1859,7 @@ ALIGN_4 .L7_16: -#ifndef TRMMKERNEL movq K, %rax -#else - movq KKK, %rax -#endif andq $7, %rax # if (k & 1) je .L7_19 @@ -2239,8 +1887,6 @@ vbroadcastss ALPHA, %xmm0 -#ifndef TRMMKERNEL - vfmaddps (CO1),%xmm0, %xmm4,%xmm4 vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 vfmaddps 8 * SIZE(CO1),%xmm0, %xmm10,%xmm10 @@ -2256,23 +1902,6 @@ vfmaddps 8 * SIZE(CO1, LDC, 2),%xmm0, %xmm12,%xmm12 vfmaddps 12 * SIZE(CO1, LDC, 2),%xmm0, %xmm15,%xmm15 -#else - vmulps %xmm0, %xmm4,%xmm4 - vmulps %xmm0, %xmm7,%xmm7 - vmulps %xmm0, %xmm10,%xmm10 - vmulps %xmm0, %xmm13,%xmm13 - - vmulps %xmm0, %xmm5,%xmm5 - vmulps %xmm0, %xmm8,%xmm8 - vmulps %xmm0, %xmm11,%xmm11 - vmulps %xmm0, %xmm14,%xmm14 - - vmulps %xmm0, %xmm6,%xmm6 - vmulps %xmm0, %xmm9,%xmm9 - vmulps %xmm0, %xmm12,%xmm12 - vmulps %xmm0, %xmm15,%xmm15 - -#endif vmovups %xmm4 , (CO1) vmovups %xmm7 , 4 * SIZE(CO1) @@ -2289,21 +1918,6 @@ vmovups %xmm12, 8 * SIZE(CO1, LDC, 2) vmovups %xmm15,12 * SIZE(CO1, LDC, 2) -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $4, %rax // rax = rax * 16 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $16, KK -#endif addq $16 * SIZE, CO1 # coffset += 16 decq I # i -- @@ -2326,41 +1940,12 @@ /**************************************************************************/ .L7_20_1: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - leaq BUFFER2, BO // first buffer to BO - addq $6 * SIZE, BO -#else - movq KK, %rax leaq BUFFER2, BO // first buffer to BO addq $6 * SIZE, BO - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $3, %rax // rax = rax * 8 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - vzeroall -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $8, %rax // number of values in A -#else - addq $3, %rax // number of values in BO -#endif - movq %rax, KKK -#endif - andq $-8, %rax je .L7_20_6 @@ -2407,11 +1992,7 @@ ALIGN_4 .L7_20_6: -#ifndef TRMMKERNEL movq K, %rax -#else - movq KKK, %rax -#endif andq $7, %rax # if (k & 1) je .L7_20_9 @@ -2434,12 +2015,10 @@ jl .L7_20_7 ALIGN_4 - .L7_20_9: - vbroadcastss ALPHA, %xmm0 -#ifndef TRMMKERNEL + vbroadcastss ALPHA, %xmm0 vfmaddps (CO1),%xmm0, %xmm4,%xmm4 vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 @@ -2450,18 +2029,6 @@ vfmaddps (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 vfmaddps 4 * SIZE(CO1, LDC, 2),%xmm0, %xmm9,%xmm9 -#else - vmulps %xmm0, %xmm4,%xmm4 - vmulps %xmm0, %xmm7,%xmm7 - - vmulps %xmm0, %xmm5,%xmm5 - vmulps %xmm0, %xmm8,%xmm8 - - vmulps %xmm0, %xmm6,%xmm6 - vmulps %xmm0, %xmm9,%xmm9 - -#endif - vmovups %xmm4 , (CO1) vmovups %xmm7 , 4 * SIZE(CO1) @@ -2472,22 +2039,6 @@ vmovups %xmm9 , 4 * SIZE(CO1, LDC, 2) -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $3, %rax // rax = rax * 8 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $8, KK -#endif - addq $8 * SIZE, CO1 # coffset += 8 ALIGN_4 @@ -2502,41 +2053,12 @@ ALIGN_4 .L7_21: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - leaq BUFFER2, BO // second buffer to BO - addq $6 * SIZE, BO -#else - movq KK, %rax leaq BUFFER2, BO // second buffer to BO addq $6 * SIZE, BO - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $2, %rax // rax = rax * 4 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - vzeroall -#ifndef TRMMKERNEL movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $4, %rax // number of values in A -#else - addq $3, %rax // number of values in BO -#endif - movq %rax, KKK -#endif - andq $-8, %rax je .L7_26 @@ -2583,11 +2105,7 @@ ALIGN_4 .L7_26: -#ifndef TRMMKERNEL movq K, %rax -#else - movq KKK, %rax -#endif andq $7, %rax # if (k & 1) je .L7_29 @@ -2615,40 +2133,14 @@ vbroadcastss ALPHA, %xmm0 -#ifndef TRMMKERNEL - vfmaddps (CO1),%xmm0, %xmm4,%xmm4 vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 vfmaddps (CO1, LDC, 2),%xmm0, %xmm6 ,%xmm6 -#else - vmulps %xmm0, %xmm4,%xmm4 - vmulps %xmm0, %xmm5,%xmm5 - vmulps %xmm0, %xmm6,%xmm6 - -#endif - vmovups %xmm4 , (CO1) vmovups %xmm5 , (CO1, LDC) vmovups %xmm6 , (CO1, LDC, 2) - -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $2, %rax // rax = rax * 4 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $4, KK -#endif - addq $4 * SIZE, CO1 # coffset += 4 ALIGN_4 @@ -2660,41 +2152,12 @@ ALIGN_4 .L7_31: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - leaq BUFFER2, BO // second buffer to BO - addq $6 * SIZE, BO -#else - movq KK, %rax leaq BUFFER2, BO // second buffer to BO addq $6 * SIZE, BO - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $1, %rax // rax = rax * 2 ; number of values - leaq (AO, %rax, SIZE), AO -#endif - vzeroall -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $2, %rax // number of values in AO -#else - addq $3, %rax // number of values in BO -#endif - movq %rax, KKK -#endif - andq $-8, %rax je .L7_36 @@ -2741,11 +2204,7 @@ ALIGN_4 .L7_36: -#ifndef TRMMKERNEL movq K, %rax -#else - movq KKK, %rax -#endif andq $7, %rax # if (k & 1) je .L7_39 @@ -2773,8 +2232,6 @@ vmovss ALPHA, %xmm0 -#ifndef TRMMKERNEL - vfmaddss (CO1),%xmm0, %xmm4,%xmm4 vfmaddss 1 * SIZE(CO1),%xmm0, %xmm8,%xmm8 vfmaddss (CO1, LDC),%xmm0, %xmm5,%xmm5 @@ -2782,16 +2239,6 @@ vfmaddss (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 vfmaddss 1 * SIZE(CO1, LDC, 2),%xmm0, %xmm12,%xmm12 -#else - vmulss %xmm0, %xmm4,%xmm4 - vmulss %xmm0, %xmm8,%xmm8 - vmulss %xmm0, %xmm5,%xmm5 - vmulss %xmm0, %xmm10,%xmm10 - vmulss %xmm0, %xmm6,%xmm6 - vmulss %xmm0, %xmm12,%xmm12 - -#endif - vmovss %xmm4 , (CO1) vmovss %xmm8 , 1 * SIZE(CO1) vmovss %xmm5 , (CO1, LDC) @@ -2799,70 +2246,1113 @@ vmovss %xmm6 , (CO1, LDC, 2) vmovss %xmm12, 1 * SIZE(CO1, LDC, 2) -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - movq %rax, BI // Index for BO + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L7_40: + testq $1, M + jz .L7_60 // to next 3 lines of N + + ALIGN_4 + +.L7_41: + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L7_46 + movq %rax, BI // Index for BO leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - salq $1, %rax // rax = rax * 2 ; number of values - leaq (AO, %rax, SIZE), AO -#endif + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L7_42: + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + je .L7_46 + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + je .L7_46 + + jmp .L7_42 + ALIGN_4 + +.L7_46: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L7_49 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L7_47: + + KERNEL1x3_SUB(xxx) + addq $3, BI + addq $1, %rax + jl .L7_47 + ALIGN_4 + + +.L7_49: + + vmovss ALPHA, %xmm0 + + vfmaddss (CO1),%xmm0, %xmm4,%xmm4 + vfmaddss (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddss (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + + vmovss %xmm4 , (CO1) + vmovss %xmm5 , (CO1, LDC) + vmovss %xmm6 , (CO1, LDC, 2) + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + + +.L7_60: + + decq J // j -- + jg .L6_01 + + +.L2_0: + cmpq $0, Nmod6 // N % 6 == 0 + je .L999 + +/************************************************************************************************ +* Loop for Nmod6 / 2 > 0 +*************************************************************************************************/ + + movq Nmod6, J + sarq $1, J // j = j / 2 + je .L1_0 + ALIGN_4 + +.L2_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L2_02b: + + vmovsd (BO1), %xmm0 + vmovsd %xmm0, (BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO + decq %rax + jnz .L2_02b + +.L2_02c: + + movq BO1, B // next offset of B + +.L2_10: + movq C, CO1 + leaq (C, LDC, 2), C // c += 2 * ldc + + + movq A, AO // aoffset = a + addq $32 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L2_20 + + ALIGN_4 + +.L2_11: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax // K = K - ( K % 8 ) + je .L2_16 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_12: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL16x2_1(xxx) + KERNEL16x2_2(xxx) + KERNEL16x2_3(xxx) + KERNEL16x2_4(xxx) + + KERNEL16x2_1(xxx) + KERNEL16x2_2(xxx) + KERNEL16x2_3(xxx) + KERNEL16x2_4(xxx) + + je .L2_16 + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL16x2_1(xxx) + KERNEL16x2_2(xxx) + KERNEL16x2_3(xxx) + KERNEL16x2_4(xxx) + + KERNEL16x2_1(xxx) + KERNEL16x2_2(xxx) + KERNEL16x2_3(xxx) + KERNEL16x2_4(xxx) + + je .L2_16 + + jmp .L2_12 + ALIGN_4 + +.L2_16: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L2_19 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_17: + + KERNEL16x2_SUB(xxx) + addq $2, BI + addq $16, %rax + jl .L2_17 + ALIGN_4 + + +.L2_19: + + vbroadcastss ALPHA, %xmm0 + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + vfmaddps 8 * SIZE(CO1),%xmm0, %xmm10,%xmm10 + vfmaddps 12 * SIZE(CO1),%xmm0, %xmm13,%xmm13 + + vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddps 4 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + vfmaddps 8 * SIZE(CO1, LDC),%xmm0, %xmm11,%xmm11 + vfmaddps 12 * SIZE(CO1, LDC),%xmm0, %xmm14,%xmm14 + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 4 * SIZE(CO1) + vmovups %xmm10, 8 * SIZE(CO1) + vmovups %xmm13,12 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 4 * SIZE(CO1, LDC) + vmovups %xmm11, 8 * SIZE(CO1, LDC) + vmovups %xmm14,12 * SIZE(CO1, LDC) + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L2_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L2_20: + // Test rest of M + + testq $15, M + jz .L2_60 // to next 3 lines of N + + testq $8, M + jz .L2_21pre + ALIGN_4 + +/**************************************************************************/ + +.L2_20_1: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L2_20_6 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_20_2: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL8x2_1(xxx) + KERNEL8x2_2(xxx) + KERNEL8x2_3(xxx) + KERNEL8x2_4(xxx) + + KERNEL8x2_1(xxx) + KERNEL8x2_2(xxx) + KERNEL8x2_3(xxx) + KERNEL8x2_4(xxx) + + je .L2_20_6 + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL8x2_1(xxx) + KERNEL8x2_2(xxx) + KERNEL8x2_3(xxx) + KERNEL8x2_4(xxx) + + KERNEL8x2_1(xxx) + KERNEL8x2_2(xxx) + KERNEL8x2_3(xxx) + KERNEL8x2_4(xxx) + + je .L2_20_6 + + jmp .L2_20_2 + ALIGN_4 + +.L2_20_6: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L2_20_9 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_20_7: + + KERNEL8x2_SUB(xxx) + addq $2, BI + addq $8, %rax + jl .L2_20_7 + ALIGN_4 + + +.L2_20_9: + + vbroadcastss ALPHA, %xmm0 + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + + vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddps 4 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 4 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 4 * SIZE(CO1, LDC) + + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L2_21pre: + + testq $4, M + jz .L2_30 + ALIGN_4 + +.L2_21: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L2_26 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 1 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_22: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + je .L2_26 + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + je .L2_26 + + jmp .L2_22 + ALIGN_4 + +.L2_26: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L2_29 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_27: + + KERNEL4x2_SUB(xxx) + addq $2, BI + addq $4, %rax + jl .L2_27 + ALIGN_4 + + +.L2_29: + + vbroadcastss ALPHA, %xmm0 + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 + + vmovups %xmm4 , (CO1) + vmovups %xmm5 , (CO1, LDC) + + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L2_30: + testq $2, M + jz .L2_40 + + ALIGN_4 + +.L2_31: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L2_36 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_32: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + je .L2_36 + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + je .L2_36 + + jmp .L2_32 + ALIGN_4 + +.L2_36: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L2_39 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_37: + + KERNEL2x2_SUB(xxx) + addq $2, BI + addq $2, %rax + jl .L2_37 + ALIGN_4 + + +.L2_39: + + vmovss ALPHA, %xmm0 + + vfmaddss (CO1),%xmm0, %xmm4,%xmm4 + vfmaddss 1 * SIZE(CO1),%xmm0, %xmm8,%xmm8 + vfmaddss (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddss 1 * SIZE(CO1, LDC),%xmm0, %xmm10,%xmm10 + + vmovss %xmm4 , (CO1) + vmovss %xmm8 , 1 * SIZE(CO1) + vmovss %xmm5 , (CO1, LDC) + vmovss %xmm10, 1 * SIZE(CO1, LDC) + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L2_40: + testq $1, M + jz .L2_60 // to next 2 lines of N + + ALIGN_4 + +.L2_41: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L2_46 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_42: + + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + je .L2_46 + + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + je .L2_46 + + jmp .L2_42 + ALIGN_4 + +.L2_46: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L2_49 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_47: + + KERNEL1x2_SUB(xxx) + addq $2, BI + addq $1, %rax + jl .L2_47 + ALIGN_4 + + +.L2_49: + + vmovss ALPHA, %xmm0 + + vfmaddss (CO1),%xmm0, %xmm4,%xmm4 + vfmaddss (CO1, LDC),%xmm0, %xmm5,%xmm5 + + vmovss %xmm4 , (CO1) + vmovss %xmm5 , (CO1, LDC) + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + +.L2_60: + + decq J // j -- + jg .L2_01 // next 2 lines of N + + + +.L1_0: + +/************************************************************************************************ +* Loop for Nmod6 % 2 > 0 +*************************************************************************************************/ + + movq Nmod6, J + andq $1, J // j % 2 + je .L999 + ALIGN_4 + +.L1_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L1_02b: + + vmovss (BO1), %xmm0 + vmovss %xmm0, (BO) + addq $1*SIZE,BO1 + addq $1*SIZE,BO + decq %rax + jnz .L1_02b + +.L1_02c: + + movq BO1, B // next offset of B + +.L1_10: + movq C, CO1 + leaq (C, LDC, 1), C // c += 1 * ldc + + + movq A, AO // aoffset = a + addq $32 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L1_20 + + ALIGN_4 + +.L1_11: + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax // K = K - ( K % 8 ) + je .L1_16 + movq %rax, BI // Index for BO + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_12: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL16x1_1(xxx) + KERNEL16x1_2(xxx) + KERNEL16x1_3(xxx) + KERNEL16x1_4(xxx) + + KERNEL16x1_1(xxx) + KERNEL16x1_2(xxx) + KERNEL16x1_3(xxx) + KERNEL16x1_4(xxx) + + je .L1_16 + + KERNEL16x1_1(xxx) + KERNEL16x1_2(xxx) + KERNEL16x1_3(xxx) + KERNEL16x1_4(xxx) + + KERNEL16x1_1(xxx) + KERNEL16x1_2(xxx) + KERNEL16x1_3(xxx) + KERNEL16x1_4(xxx) + + je .L1_16 + + jmp .L1_12 + ALIGN_4 + +.L1_16: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L1_19 + + movq %rax, BI // Index for BO + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_17: + + KERNEL16x1_SUB(xxx) + addq $1, BI + addq $16, %rax + jl .L1_17 + ALIGN_4 + + +.L1_19: + + vbroadcastss ALPHA, %xmm0 + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + vfmaddps 8 * SIZE(CO1),%xmm0, %xmm10,%xmm10 + vfmaddps 12 * SIZE(CO1),%xmm0, %xmm13,%xmm13 + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 4 * SIZE(CO1) + vmovups %xmm10, 8 * SIZE(CO1) + vmovups %xmm13,12 * SIZE(CO1) + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L1_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L1_20: + // Test rest of M + + testq $15, M + jz .L999 + + testq $8, M + jz .L1_21pre + ALIGN_4 + +/**************************************************************************/ + +.L1_20_1: + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L1_20_6 + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_20_2: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL8x1_1(xxx) + KERNEL8x1_2(xxx) + KERNEL8x1_3(xxx) + KERNEL8x1_4(xxx) + + KERNEL8x1_1(xxx) + KERNEL8x1_2(xxx) + KERNEL8x1_3(xxx) + KERNEL8x1_4(xxx) + + je .L1_20_6 + + KERNEL8x1_1(xxx) + KERNEL8x1_2(xxx) + KERNEL8x1_3(xxx) + KERNEL8x1_4(xxx) + + KERNEL8x1_1(xxx) + KERNEL8x1_2(xxx) + KERNEL8x1_3(xxx) + KERNEL8x1_4(xxx) + + je .L1_20_6 + + jmp .L1_20_2 + ALIGN_4 + +.L1_20_6: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L1_20_9 + + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_20_7: + + KERNEL8x1_SUB(xxx) + addq $1, BI + addq $8, %rax + jl .L1_20_7 + ALIGN_4 + + +.L1_20_9: + + vbroadcastss ALPHA, %xmm0 + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 4 * SIZE(CO1) + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L1_21pre: + + testq $4, M + jz .L1_30 + ALIGN_4 + +.L1_21: + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L1_26 + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_22: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + je .L1_26 + + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + je .L1_26 + + jmp .L1_22 + ALIGN_4 + +.L1_26: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L1_29 + + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_27: + + KERNEL4x1_SUB(xxx) + addq $1, BI + addq $4, %rax + jl .L1_27 + ALIGN_4 + + +.L1_29: + + vbroadcastss ALPHA, %xmm0 + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + + vmovups %xmm4 , (CO1) + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L1_30: + testq $2, M + jz .L1_40 + + ALIGN_4 + +.L1_31: + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L1_36 + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_32: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + je .L1_36 + + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + je .L1_36 + + jmp .L1_32 + ALIGN_4 + +.L1_36: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L1_39 + + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_37: + + KERNEL2x1_SUB(xxx) + addq $1, BI + addq $2, %rax + jl .L1_37 + ALIGN_4 + + +.L1_39: + + vmovss ALPHA, %xmm0 + + vfmaddss (CO1),%xmm0, %xmm4,%xmm4 + vfmaddss 1 * SIZE(CO1),%xmm0, %xmm8,%xmm8 -#if defined(TRMMKERNEL) && defined(LEFT) - addq $2, KK -#endif + vmovss %xmm4 , (CO1) + vmovss %xmm8 , 1 * SIZE(CO1) addq $2 * SIZE, CO1 # coffset += 2 ALIGN_4 -.L7_40: +.L1_40: testq $1, M - jz .L7_60 // to next 3 lines of N + jz .L999 ALIGN_4 -.L7_41: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - leaq BUFFER2, BO // second buffer to BO - addq $6 * SIZE, BO -#else - movq KK, %rax - leaq BUFFER2, BO // second buffer to BO - addq $6 * SIZE, BO - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - leaq (AO, %rax, SIZE), AO -#endif - +.L1_41: + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO vzeroall -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $1, %rax // number of values in AO -#else - addq $3, %rax // number of values in BO -#endif - movq %rax, KKK -#endif andq $-8, %rax - je .L7_46 + je .L1_46 movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values leaq (AO, %rax, SIZE), AO leaq (BO, BI, SIZE), BO @@ -2870,47 +3360,42 @@ negq %rax ALIGN_4 -.L7_42: +.L1_42: - KERNEL1x3_1(xxx) - KERNEL1x3_2(xxx) - KERNEL1x3_3(xxx) - KERNEL1x3_4(xxx) + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) - KERNEL1x3_1(xxx) - KERNEL1x3_2(xxx) - KERNEL1x3_3(xxx) - KERNEL1x3_4(xxx) + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) - je .L7_46 + je .L1_46 - KERNEL1x3_1(xxx) - KERNEL1x3_2(xxx) - KERNEL1x3_3(xxx) - KERNEL1x3_4(xxx) + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) - KERNEL1x3_1(xxx) - KERNEL1x3_2(xxx) - KERNEL1x3_3(xxx) - KERNEL1x3_4(xxx) + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) - je .L7_46 + je .L1_46 - jmp .L7_42 + jmp .L1_42 ALIGN_4 -.L7_46: -#ifndef TRMMKERNEL +.L1_46: movq K, %rax -#else - movq KKK, %rax -#endif andq $7, %rax # if (k & 1) - je .L7_49 + je .L1_49 movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values leaq (AO, %rax, SIZE), AO leaq (BO, BI, SIZE), BO @@ -2918,72 +3403,152 @@ negq %rax ALIGN_4 -.L7_47: +.L1_47: - KERNEL1x3_SUB(xxx) - addq $3, BI + KERNEL1x1_SUB(xxx) + addq $1, BI addq $1, %rax - jl .L7_47 + jl .L1_47 ALIGN_4 -.L7_49: +.L1_49: vmovss ALPHA, %xmm0 -#ifndef TRMMKERNEL - vfmaddss (CO1),%xmm0, %xmm4,%xmm4 - vfmaddss (CO1, LDC),%xmm0, %xmm5,%xmm5 - vfmaddss (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 -#else - vmulss %xmm0, %xmm4,%xmm4 - vmulss %xmm0, %xmm5,%xmm5 - vmulss %xmm0, %xmm6,%xmm6 + vmovss %xmm4 , (CO1) + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + +.L999: + movq SP, %rsp + movq (%rsp), %rbx + movq 8(%rsp), %rbp + movq 16(%rsp), %r12 + movq 24(%rsp), %r13 + movq 32(%rsp), %r14 + movq 40(%rsp), %r15 +#ifdef WINDOWS_ABI + movq 48(%rsp), %rdi + movq 56(%rsp), %rsi + movups 64(%rsp), %xmm6 + movups 80(%rsp), %xmm7 + movups 96(%rsp), %xmm8 + movups 112(%rsp), %xmm9 + movups 128(%rsp), %xmm10 + movups 144(%rsp), %xmm11 + movups 160(%rsp), %xmm12 + movups 176(%rsp), %xmm13 + movups 192(%rsp), %xmm14 + movups 208(%rsp), %xmm15 #endif - vmovss %xmm4 , (CO1) - vmovss %xmm5 , (CO1, LDC) - vmovss %xmm6 , (CO1, LDC, 2) + addq $STACKSIZE, %rsp + ret -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - movq %rax, BI // Index for BO - leaq (BI,BI,2), BI // BI = BI * 3 ; number of values - leaq (BO, BI, SIZE), BO - leaq (AO, %rax, SIZE), AO -#endif + EPILOGUE -#if defined(TRMMKERNEL) && defined(LEFT) - addq $1, KK -#endif +#else +/************************************************************************************* +* TRMM Kernel +*************************************************************************************/ - addq $1 * SIZE, CO1 # coffset += 1 - ALIGN_4 + + PROLOGUE + PROFCODE + subq $STACKSIZE, %rsp + movq %rbx, (%rsp) + movq %rbp, 8(%rsp) + movq %r12, 16(%rsp) + movq %r13, 24(%rsp) + movq %r14, 32(%rsp) + movq %r15, 40(%rsp) + vzeroupper -.L7_60: +#ifdef WINDOWS_ABI + movq %rdi, 48(%rsp) + movq %rsi, 56(%rsp) + movups %xmm6, 64(%rsp) + movups %xmm7, 80(%rsp) + movups %xmm8, 96(%rsp) + movups %xmm9, 112(%rsp) + movups %xmm10, 128(%rsp) + movups %xmm11, 144(%rsp) + movups %xmm12, 160(%rsp) + movups %xmm13, 176(%rsp) + movups %xmm14, 192(%rsp) + movups %xmm15, 208(%rsp) - decq J // j -- - jg .L6_01 + movq ARG1, OLD_M + movq ARG2, OLD_N + movq ARG3, OLD_K + movq OLD_A, A + movq OLD_B, B + movq OLD_C, C + movq OLD_LDC, LDC +#ifdef TRMMKERNEL + movsd OLD_OFFSET, %xmm12 +#endif + vmovaps %xmm3, %xmm0 + +#else + movq STACKSIZE + 8(%rsp), LDC +#ifdef TRMMKERNEL + movsd STACKSIZE + 16(%rsp), %xmm12 +#endif +#endif -.L2_0: - cmpq $0, Nmod6 // N % 6 == 0 + movq %rsp, SP # save old stack + subq $128 + L_BUFFER_SIZE, %rsp + andq $-4096, %rsp # align stack + + STACK_TOUCH + + cmpq $0, OLD_M je .L999 -/************************************************************************************************ -* Loop for Nmod6 / 2 > 0 -*************************************************************************************************/ + cmpq $0, OLD_N + je .L999 - movq Nmod6, J - sarq $1, J // j = j / 2 + cmpq $0, OLD_K + je .L999 + + movq OLD_M, M + movq OLD_N, N + movq OLD_K, K + + vmovsd %xmm0, ALPHA + + salq $BASE_SHIFT, LDC + + movq N, %rax + xorq %rdx, %rdx + movq $2, %rdi + divq %rdi // N / 6 + movq %rax, Ndiv6 // N / 6 + movq %rdx, Nmod6 // N % 6 + + + +#ifdef TRMMKERNEL + vmovsd %xmm12, OFFSET + vmovsd %xmm12, KK +#ifndef LEFT + negq KK +#endif +#endif + + movq Ndiv6, J + cmpq $0, J je .L1_0 ALIGN_4 @@ -3831,6 +4396,9 @@ .L2_60: +#if defined(TRMMKERNEL) && !defined(LEFT) + addq $2, KK +#endif decq J // j -- jg .L2_01 // next 2 lines of N @@ -4655,3 +5223,9 @@ ret EPILOGUE + + + + + +#endif