From 1397b47197f1176984186b3520ab189501b7b0eb Mon Sep 17 00:00:00 2001 From: Ashwin Sekhar T K Date: Mon, 2 Nov 2015 18:58:28 +0530 Subject: [PATCH] Optimized zgemm kernel for CORTEXA57 --- kernel/arm64/KERNEL.CORTEXA57 | 8 + kernel/arm64/zgemm_kernel_4x4.S | 1617 +++++++++++++++++++++++++++++++++++++++ param.h | 10 +- 3 files changed, 1630 insertions(+), 5 deletions(-) create mode 100644 kernel/arm64/zgemm_kernel_4x4.S diff --git a/kernel/arm64/KERNEL.CORTEXA57 b/kernel/arm64/KERNEL.CORTEXA57 index ac9908d..fd7f505 100644 --- a/kernel/arm64/KERNEL.CORTEXA57 +++ b/kernel/arm64/KERNEL.CORTEXA57 @@ -63,6 +63,7 @@ ZGEMVTKERNEL = zgemv_t.S STRMMKERNEL = ../generic/trmmkernel_4x4.c DTRMMKERNEL = ../generic/trmmkernel_4x4.c CTRMMKERNEL = ../generic/ztrmmkernel_4x4.c +ZTRMMKERNEL = ../generic/ztrmmkernel_4x4.c SGEMMKERNEL = sgemm_kernel_4x4.S SGEMMONCOPY = ../generic/gemm_ncopy_4.c @@ -81,3 +82,10 @@ CGEMMONCOPY = ../generic/zgemm_ncopy_4.c CGEMMOTCOPY = ../generic/zgemm_tcopy_4.c CGEMMONCOPYOBJ = cgemm_oncopy.o CGEMMOTCOPYOBJ = cgemm_otcopy.o + +ZGEMMKERNEL = zgemm_kernel_4x4.S +ZGEMMONCOPY = ../generic/zgemm_ncopy_4.c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_4.c +ZGEMMONCOPYOBJ = zgemm_oncopy.o +ZGEMMOTCOPYOBJ = zgemm_otcopy.o + diff --git a/kernel/arm64/zgemm_kernel_4x4.S b/kernel/arm64/zgemm_kernel_4x4.S new file mode 100644 index 0000000..56a8bba --- /dev/null +++ b/kernel/arm64/zgemm_kernel_4x4.S @@ -0,0 +1,1617 @@ +/******************************************************************************* +Copyright (c) 2015, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*******************************************************************************/ + +#define ASSEMBLER +#include "common.h" + +/* X0 X1 X2 s0 X3 x4 x5 x6 */ +/*int CNAME(BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha0,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc */ + +#define origM x0 +#define origN x1 +#define origK x2 +#define origPA x3 +#define origPB x4 +#define pC x5 +#define LDC x6 +#define temp x7 +#define counterL x8 +#define counterI x9 +#define counterJ x10 +#define pB x11 +#define pCRow0 x12 +#define pCRow1 x13 +#define pCRow2 x14 +#define pA x15 +#define alpha_save_R x16 +#define alpha_save_I x17 + +#define alpha0_R d10 +#define alphaV0_R v10.d[0] +#define alpha0_I d11 +#define alphaV0_I v11.d[0] + +#define alpha1_R d14 +#define alphaV1_R v14.d[0] +#define alpha1_I d15 +#define alphaV1_I v15.d[0] + + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) +#define OP_rr fmla +#define OP_ii fmls +#define OP_ri fmla +#define OP_ir fmla +#elif defined(NR) || defined(NC) || defined(TR) || defined(TC) +#define OP_rr fmla +#define OP_ii fmla +#define OP_ri fmls +#define OP_ir fmla +#elif defined(RN) || defined(RT) || defined(CN) || defined(CT) +#define OP_rr fmla +#define OP_ii fmla +#define OP_ri fmla +#define OP_ir fmls +#elif defined(RR) || defined(RC) || defined(CR) || defined(CC) +#define OP_rr fmla +#define OP_ii fmls +#define OP_ri fmls +#define OP_ir fmls +#endif + +// 00 origM +// 01 origN +// 02 origK +// 03 origPA +// 04 origPB +// 05 pC +// 06 origLDC -> LDC +// 07 offset -> temp +// 08 counterL +// 09 counterI +// 10 counterJ +// 11 pB +// 12 pCRow0 +// 13 pCRow1 +// 14 pCRow2 +// 15 pA +// 16 alpha_save_R +// 17 alpha_save_I +// 18 must save +// 19 must save +// 20 must save +// 21 must save +// 22 must save +// 23 must save +// 24 must save +// 25 must save +// 26 must save +// 27 must save +// 28 must save +// 29 frame +// 30 link +// 31 sp + +//v00 ALPHA_R -> pA00_R, pA01_R +//v01 ALPHA_I -> pA00_I, pA01_I +//v02 pA02_R, pA03_R +//v03 pA02_I, pA03_I +//v04 pA10_R, pA11_R +//v05 pA10_I, pA11_I +//v06 pA12_R, pA13_R +//v07 pA12_I, pA13_I +//v08 must save pB00_R, pB01_R +//v09 must save pB00_I, pB01_I +//v10 must save pB02_R, pB03_R OR ALPHA0_R +//v11 must save pB02_I, pB03_I OR ALPHA0_I +//v12 must save pB10_R, pB11_R +//v13 must save pB10_I, pB11_I +//v14 must save pB12_R, pB13_R OR ALPHA1_R +//v15 must save pB12_I, pB13_I OR ALPHA1_R +//v16 must save pC00_R, pC01_R +//v17 must save pC00_I, pC01_I +//v18 pC02_R, pC03_R +//v19 pC02_I, pC03_I +//v20 pC10_R, pC11_R +//v21 pC10_I, pC11_I +//v22 pC12_R, pC13_R +//v23 pC12_I, pC13_I +//v24 pC20_R, pC21_R +//v25 pC20_I, pC21_I +//v26 pC22_R, pC23_R +//v27 pC22_I, pC23_I +//v28 pC30_R, pC31_R +//v29 pC30_I, pC31_I +//v30 pC32_R, pC33_R +//v31 pC32_I, pC33_I + +/******************************************************************************* +* Macro definitions +*******************************************************************************/ + +.macro INIT4x4 + fmov d16, xzr + fmov d17, d16 + fmov d18, d17 + fmov d19, d16 + fmov d20, d17 + fmov d21, d16 + fmov d22, d17 + fmov d23, d16 + fmov d24, d17 + fmov d25, d16 + fmov d26, d17 + fmov d27, d16 + fmov d28, d17 + fmov d29, d16 + fmov d30, d17 + fmov d31, d16 +.endm + +.macro KERNEL4x4_I + ld2 {v8.2d, v9.2d}, [pB] + add pB, pB, #32 + ld2 {v10.2d, v11.2d}, [pB] + add pB, pB, #32 + ld2 {v0.2d, v1.2d}, [pA] + add pA, pA, #32 + ld2 {v2.2d, v3.2d}, [pA] + add pA, pA, #32 + + fmul v16.2d, v0.2d, v8.2d[0] + OP_ii v16.2d, v1.2d, v9.2d[0] + fmul v17.2d, v0.2d, v9.2d[0] +#if defined(NR) || defined(NC) || defined(TR) || defined(TC) || \ + defined(RR) || defined(RC) || defined(CR) || defined(CC) + fneg v17.2d, v17.2d +#endif + OP_ir v17.2d, v1.2d, v8.2d[0] + + fmul v18.2d, v2.2d, v8.2d[0] + OP_ii v18.2d, v3.2d, v9.2d[0] + fmul v19.2d, v2.2d, v9.2d[0] +#if defined(NR) || defined(NC) || defined(TR) || defined(TC) || \ + defined(RR) || defined(RC) || defined(CR) || defined(CC) + fneg v19.2d, v19.2d +#endif + OP_ir v19.2d, v3.2d, v8.2d[0] + + fmul v20.2d, v0.2d, v8.2d[1] + OP_ii v20.2d, v1.2d, v9.2d[1] + fmul v21.2d, v0.2d, v9.2d[1] +#if defined(NR) || defined(NC) || defined(TR) || defined(TC) || \ + defined(RR) || defined(RC) || defined(CR) || defined(CC) + fneg v21.2d, v21.2d +#endif + OP_ir v21.2d, v1.2d, v8.2d[1] + + fmul v22.2d, v2.2d, v8.2d[1] + OP_ii v22.2d, v3.2d, v9.2d[1] + fmul v23.2d, v2.2d, v9.2d[1] +#if defined(NR) || defined(NC) || defined(TR) || defined(TC) || \ + defined(RR) || defined(RC) || defined(CR) || defined(CC) + fneg v23.2d, v23.2d +#endif + OP_ir v23.2d, v3.2d, v8.2d[1] + + fmul v24.2d, v0.2d, v10.2d[0] + OP_ii v24.2d, v1.2d, v11.2d[0] + fmul v25.2d, v0.2d, v11.2d[0] +#if defined(NR) || defined(NC) || defined(TR) || defined(TC) || \ + defined(RR) || defined(RC) || defined(CR) || defined(CC) + fneg v25.2d, v25.2d +#endif + OP_ir v25.2d, v1.2d, v10.2d[0] + + fmul v26.2d, v2.2d, v10.2d[0] + OP_ii v26.2d, v3.2d, v11.2d[0] + fmul v27.2d, v2.2d, v11.2d[0] +#if defined(NR) || defined(NC) || defined(TR) || defined(TC) || \ + defined(RR) || defined(RC) || defined(CR) || defined(CC) + fneg v27.2d, v27.2d +#endif + OP_ir v27.2d, v3.2d, v10.2d[0] + + fmul v28.2d, v0.2d, v10.2d[1] + OP_ii v28.2d, v1.2d, v11.2d[1] + fmul v29.2d, v0.2d, v11.2d[1] +#if defined(NR) || defined(NC) || defined(TR) || defined(TC) || \ + defined(RR) || defined(RC) || defined(CR) || defined(CC) + fneg v29.2d, v29.2d +#endif + OP_ir v29.2d, v1.2d, v10.2d[1] + + fmul v30.2d, v2.2d, v10.2d[1] + OP_ii v30.2d, v3.2d, v11.2d[1] + fmul v31.2d, v2.2d, v11.2d[1] +#if defined(NR) || defined(NC) || defined(TR) || defined(TC) || \ + defined(RR) || defined(RC) || defined(CR) || defined(CC) + fneg v31.2d, v31.2d +#endif + OP_ir v31.2d, v3.2d, v10.2d[1] + + ld2 {v12.2d, v13.2d}, [pB] + add pB, pB, #32 + ld2 {v14.2d, v15.2d}, [pB] + add pB, pB, #32 + ld2 {v4.2d, v5.2d} , [pA] + add pA, pA, #32 + ld2 {v6.2d, v7.2d} , [pA] + add pA, pA, #32 +.endm + +.macro KERNEL4x4_M1 + OP_rr v16.2d, v0.2d, v8.2d[0] + OP_ii v16.2d, v1.2d, v9.2d[0] + OP_ri v17.2d, v0.2d, v9.2d[0] + OP_ir v17.2d, v1.2d, v8.2d[0] + + ld2 {v12.2d, v13.2d}, [pB] // For next round + add pB, pB, #32 + + OP_rr v18.2d, v2.2d, v8.2d[0] + OP_ii v18.2d, v3.2d, v9.2d[0] + OP_ri v19.2d, v2.2d, v9.2d[0] + OP_ir v19.2d, v3.2d, v8.2d[0] + + ld2 {v14.2d, v15.2d}, [pB] // For next round + add pB, pB, #32 + + OP_rr v20.2d, v0.2d, v8.2d[1] + OP_ii v20.2d, v1.2d, v9.2d[1] + OP_ri v21.2d, v0.2d, v9.2d[1] + OP_ir v21.2d, v1.2d, v8.2d[1] + + ld2 {v4.2d, v5.2d} , [pA] // For next round + add pA, pA, #32 + + OP_rr v22.2d, v2.2d, v8.2d[1] + OP_ii v22.2d, v3.2d, v9.2d[1] + OP_ri v23.2d, v2.2d, v9.2d[1] + OP_ir v23.2d, v3.2d, v8.2d[1] + + ld2 {v6.2d, v7.2d} , [pA] // For next round + add pA, pA, #32 + + OP_rr v24.2d, v0.2d, v10.2d[0] + OP_ii v24.2d, v1.2d, v11.2d[0] + OP_ri v25.2d, v0.2d, v11.2d[0] + OP_ir v25.2d, v1.2d, v10.2d[0] + + prfm PLDL1KEEP, [pA, #512] + + OP_rr v26.2d, v2.2d, v10.2d[0] + OP_ii v26.2d, v3.2d, v11.2d[0] + OP_ri v27.2d, v2.2d, v11.2d[0] + OP_ir v27.2d, v3.2d, v10.2d[0] + + prfm PLDL1KEEP, [pB, #512] + + OP_rr v28.2d, v0.2d, v10.2d[1] + OP_ii v28.2d, v1.2d, v11.2d[1] + OP_ri v29.2d, v0.2d, v11.2d[1] + OP_ir v29.2d, v1.2d, v10.2d[1] + + OP_rr v30.2d, v2.2d, v10.2d[1] + OP_ii v30.2d, v3.2d, v11.2d[1] + OP_ri v31.2d, v2.2d, v11.2d[1] + OP_ir v31.2d, v3.2d, v10.2d[1] +.endm + +.macro KERNEL4x4_M2 + OP_rr v16.2d, v4.2d, v12.2d[0] + OP_ii v16.2d, v5.2d, v13.2d[0] + OP_ri v17.2d, v4.2d, v13.2d[0] + OP_ir v17.2d, v5.2d, v12.2d[0] + + ld2 {v8.2d, v9.2d}, [pB] // For next round + add pB, pB, #32 + + OP_rr v18.2d, v6.2d, v12.2d[0] + OP_ii v18.2d, v7.2d, v13.2d[0] + OP_ri v19.2d, v6.2d, v13.2d[0] + OP_ir v19.2d, v7.2d, v12.2d[0] + + ld2 {v10.2d, v11.2d}, [pB] // For next round + add pB, pB, #32 + + OP_rr v20.2d, v4.2d, v12.2d[1] + OP_ii v20.2d, v5.2d, v13.2d[1] + OP_ri v21.2d, v4.2d, v13.2d[1] + OP_ir v21.2d, v5.2d, v12.2d[1] + + ld2 {v0.2d, v1.2d}, [pA] // For next round + add pA, pA, #32 + + OP_rr v22.2d, v6.2d, v12.2d[1] + OP_ii v22.2d, v7.2d, v13.2d[1] + OP_ri v23.2d, v6.2d, v13.2d[1] + OP_ir v23.2d, v7.2d, v12.2d[1] + + ld2 {v2.2d, v3.2d}, [pA] // For next round + add pA, pA, #32 + + OP_rr v24.2d, v4.2d, v14.2d[0] + OP_ii v24.2d, v5.2d, v15.2d[0] + OP_ri v25.2d, v4.2d, v15.2d[0] + OP_ir v25.2d, v5.2d, v14.2d[0] + + prfm PLDL1KEEP, [pA, #512] + + OP_rr v26.2d, v6.2d, v14.2d[0] + OP_ii v26.2d, v7.2d, v15.2d[0] + OP_ri v27.2d, v6.2d, v15.2d[0] + OP_ir v27.2d, v7.2d, v14.2d[0] + + prfm PLDL1KEEP, [pB, #512] + + OP_rr v28.2d, v4.2d, v14.2d[1] + OP_ii v28.2d, v5.2d, v15.2d[1] + OP_ri v29.2d, v4.2d, v15.2d[1] + OP_ir v29.2d, v5.2d, v14.2d[1] + + OP_rr v30.2d, v6.2d, v14.2d[1] + OP_ii v30.2d, v7.2d, v15.2d[1] + OP_ri v31.2d, v6.2d, v15.2d[1] + OP_ir v31.2d, v7.2d, v14.2d[1] +.endm + +.macro KERNEL4x4_E + OP_rr v16.2d, v4.2d, v12.2d[0] + OP_ii v16.2d, v5.2d, v13.2d[0] + OP_ri v17.2d, v4.2d, v13.2d[0] + OP_ir v17.2d, v5.2d, v12.2d[0] + + OP_rr v18.2d, v6.2d, v12.2d[0] + OP_ii v18.2d, v7.2d, v13.2d[0] + OP_ri v19.2d, v6.2d, v13.2d[0] + OP_ir v19.2d, v7.2d, v12.2d[0] + + OP_rr v20.2d, v4.2d, v12.2d[1] + OP_ii v20.2d, v5.2d, v13.2d[1] + OP_ri v21.2d, v4.2d, v13.2d[1] + OP_ir v21.2d, v5.2d, v12.2d[1] + + OP_rr v22.2d, v6.2d, v12.2d[1] + OP_ii v22.2d, v7.2d, v13.2d[1] + OP_ri v23.2d, v6.2d, v13.2d[1] + OP_ir v23.2d, v7.2d, v12.2d[1] + + OP_rr v24.2d, v4.2d, v14.2d[0] + OP_ii v24.2d, v5.2d, v15.2d[0] + OP_ri v25.2d, v4.2d, v15.2d[0] + OP_ir v25.2d, v5.2d, v14.2d[0] + + OP_rr v26.2d, v6.2d, v14.2d[0] + OP_ii v26.2d, v7.2d, v15.2d[0] + OP_ri v27.2d, v6.2d, v15.2d[0] + OP_ir v27.2d, v7.2d, v14.2d[0] + + OP_rr v28.2d, v4.2d, v14.2d[1] + OP_ii v28.2d, v5.2d, v15.2d[1] + OP_ri v29.2d, v4.2d, v15.2d[1] + OP_ir v29.2d, v5.2d, v14.2d[1] + + OP_rr v30.2d, v6.2d, v14.2d[1] + OP_ii v30.2d, v7.2d, v15.2d[1] + OP_ri v31.2d, v6.2d, v15.2d[1] + OP_ir v31.2d, v7.2d, v14.2d[1] +.endm + +.macro KERNEL4x4_SUB + ld2 {v8.2d, v9.2d}, [pB] + add pB, pB, #32 + ld2 {v10.2d, v11.2d}, [pB] + add pB, pB, #32 + ld2 {v0.2d, v1.2d}, [pA] + add pA, pA, #32 + ld2 {v2.2d, v3.2d}, [pA] + add pA, pA, #32 + + OP_rr v16.2d, v0.2d, v8.2d[0] + OP_ii v16.2d, v1.2d, v9.2d[0] + OP_ri v17.2d, v0.2d, v9.2d[0] + OP_ir v17.2d, v1.2d, v8.2d[0] + + OP_rr v18.2d, v2.2d, v8.2d[0] + OP_ii v18.2d, v3.2d, v9.2d[0] + OP_ri v19.2d, v2.2d, v9.2d[0] + OP_ir v19.2d, v3.2d, v8.2d[0] + + OP_rr v20.2d, v0.2d, v8.2d[1] + OP_ii v20.2d, v1.2d, v9.2d[1] + OP_ri v21.2d, v0.2d, v9.2d[1] + OP_ir v21.2d, v1.2d, v8.2d[1] + + OP_rr v22.2d, v2.2d, v8.2d[1] + OP_ii v22.2d, v3.2d, v9.2d[1] + OP_ri v23.2d, v2.2d, v9.2d[1] + OP_ir v23.2d, v3.2d, v8.2d[1] + + OP_rr v24.2d, v0.2d, v10.2d[0] + OP_ii v24.2d, v1.2d, v11.2d[0] + OP_ri v25.2d, v0.2d, v11.2d[0] + OP_ir v25.2d, v1.2d, v10.2d[0] + + OP_rr v26.2d, v2.2d, v10.2d[0] + OP_ii v26.2d, v3.2d, v11.2d[0] + OP_ri v27.2d, v2.2d, v11.2d[0] + OP_ir v27.2d, v3.2d, v10.2d[0] + + OP_rr v28.2d, v0.2d, v10.2d[1] + OP_ii v28.2d, v1.2d, v11.2d[1] + OP_ri v29.2d, v0.2d, v11.2d[1] + OP_ir v29.2d, v1.2d, v10.2d[1] + + OP_rr v30.2d, v2.2d, v10.2d[1] + OP_ii v30.2d, v3.2d, v11.2d[1] + OP_ri v31.2d, v2.2d, v11.2d[1] + OP_ir v31.2d, v3.2d, v10.2d[1] +.endm + +.macro SAVE4x4 + fmov alpha0_R, alpha_save_R + fmov alpha0_I, alpha_save_I + fmov alpha1_R, alpha0_R + fmov alpha1_I, alpha0_I + + mov pCRow1, pCRow0 + + ld2 {v0.2d, v1.2d}, [pCRow1] + fmla v0.2d, v16.2d, alphaV0_R + fmls v0.2d, v17.2d, alphaV0_I + fmla v1.2d, v16.2d, alphaV1_I + fmla v1.2d, v17.2d, alphaV1_R + st2 {v0.2d, v1.2d}, [pCRow1] + add pCRow2, pCRow1, #32 + ld2 {v2.2d, v3.2d}, [pCRow2] + fmla v2.2d, v18.2d, alphaV0_R + fmls v2.2d, v19.2d, alphaV0_I + fmla v3.2d, v18.2d, alphaV1_I + fmla v3.2d, v19.2d, alphaV1_R + st2 {v2.2d, v3.2d}, [pCRow2] + + add pCRow1, pCRow1, LDC + ld2 {v4.2d, v5.2d}, [pCRow1] + fmla v4.2d, v20.2d, alphaV0_R + fmls v4.2d, v21.2d, alphaV0_I + fmla v5.2d, v20.2d, alphaV1_I + fmla v5.2d, v21.2d, alphaV1_R + st2 {v4.2d, v5.2d}, [pCRow1] + add pCRow2, pCRow1, #32 + ld2 {v6.2d, v7.2d}, [pCRow2] + fmla v6.2d, v22.2d, alphaV0_R + fmls v6.2d, v23.2d, alphaV0_I + fmla v7.2d, v22.2d, alphaV1_I + fmla v7.2d, v23.2d, alphaV1_R + st2 {v6.2d, v7.2d}, [pCRow2] + + add pCRow1, pCRow1, LDC + ld2 {v0.2d, v1.2d}, [pCRow1] + fmla v0.2d, v24.2d, alphaV0_R + fmls v0.2d, v25.2d, alphaV0_I + fmla v1.2d, v24.2d, alphaV1_I + fmla v1.2d, v25.2d, alphaV1_R + st2 {v0.2d, v1.2d}, [pCRow1] + add pCRow2, pCRow1, #32 + ld2 {v2.2d, v3.2d}, [pCRow2] + fmla v2.2d, v26.2d, alphaV0_R + fmls v2.2d, v27.2d, alphaV0_I + fmla v3.2d, v26.2d, alphaV1_I + fmla v3.2d, v27.2d, alphaV1_R + st2 {v2.2d, v3.2d}, [pCRow2] + + add pCRow1, pCRow1, LDC + + ld2 {v4.2d, v5.2d}, [pCRow1] + fmla v4.2d, v28.2d, alphaV0_R + fmls v4.2d, v29.2d, alphaV0_I + fmla v5.2d, v28.2d, alphaV1_I + fmla v5.2d, v29.2d, alphaV1_R + st2 {v4.2d, v5.2d}, [pCRow1] + add pCRow2, pCRow1, #32 + ld2 {v6.2d, v7.2d}, [pCRow2] + fmla v6.2d, v30.2d, alphaV0_R + fmls v6.2d, v31.2d, alphaV0_I + fmla v7.2d, v30.2d, alphaV1_I + fmla v7.2d, v31.2d, alphaV1_R + st2 {v6.2d, v7.2d}, [pCRow2] + + add pCRow0, pCRow0, #64 +.endm + +/******************************************************************************/ + +.macro INIT2x4 + fmov d16, xzr + fmov d17, xzr + fmov d20, d16 + fmov d21, d17 + fmov d24, d16 + fmov d25, d17 + fmov d28, d16 + fmov d29, d17 +.endm + +.macro KERNEL2x4_SUB + ld2 {v8.2d, v9.2d}, [pB] + add pB, pB, #32 + ld2 {v10.2d, v11.2d}, [pB] + add pB, pB, #32 + + ld2 {v0.2d, v1.2d}, [pA] + add pA, pA, #32 + + OP_rr v16.2d, v0.2d, v8.2d[0] + OP_ii v16.2d, v1.2d, v9.2d[0] + OP_ri v17.2d, v0.2d, v9.2d[0] + OP_ir v17.2d, v1.2d, v8.2d[0] + + OP_rr v20.2d, v0.2d, v8.2d[1] + OP_ii v20.2d, v1.2d, v9.2d[1] + OP_ri v21.2d, v0.2d, v9.2d[1] + OP_ir v21.2d, v1.2d, v8.2d[1] + + OP_rr v24.2d, v0.2d, v10.2d[0] + OP_ii v24.2d, v1.2d, v11.2d[0] + OP_ri v25.2d, v0.2d, v11.2d[0] + OP_ir v25.2d, v1.2d, v10.2d[0] + + OP_rr v28.2d, v0.2d, v10.2d[1] + OP_ii v28.2d, v1.2d, v11.2d[1] + OP_ri v29.2d, v0.2d, v11.2d[1] + OP_ir v29.2d, v1.2d, v10.2d[1] +.endm + +.macro SAVE2x4 + fmov alpha0_R, alpha_save_R + fmov alpha0_I, alpha_save_I + fmov alpha1_R, alpha0_R + fmov alpha1_I, alpha0_I + + mov pCRow1, pCRow0 + + ld2 {v0.2d, v1.2d}, [pCRow1] + fmla v0.2d, v16.2d, alphaV0_R + fmls v0.2d, v17.2d, alphaV0_I + fmla v1.2d, v16.2d, alphaV1_I + fmla v1.2d, v17.2d, alphaV1_R + st2 {v0.2d, v1.2d}, [pCRow1] + + add pCRow1, pCRow1, LDC + + ld2 {v4.2d, v5.2d}, [pCRow1] + fmla v4.2d, v20.2d, alphaV0_R + fmls v4.2d, v21.2d, alphaV0_I + fmla v5.2d, v20.2d, alphaV1_I + fmla v5.2d, v21.2d, alphaV1_R + st2 {v4.2d, v5.2d}, [pCRow1] + + add pCRow1, pCRow1, LDC + + ld2 {v0.2d, v1.2d}, [pCRow1] + fmla v0.2d, v24.2d, alphaV0_R + fmls v0.2d, v25.2d, alphaV0_I + fmla v1.2d, v24.2d, alphaV1_I + fmla v1.2d, v25.2d, alphaV1_R + st2 {v0.2d, v1.2d}, [pCRow1] + + add pCRow1, pCRow1, LDC + + ld2 {v4.2d, v5.2d}, [pCRow1] + fmla v4.2d, v28.2d, alphaV0_R + fmls v4.2d, v29.2d, alphaV0_I + fmla v5.2d, v28.2d, alphaV1_I + fmla v5.2d, v29.2d, alphaV1_R + st2 {v4.2d, v5.2d}, [pCRow1] + + add pCRow0, pCRow0, #32 +.endm + +/******************************************************************************/ + +.macro INIT1x4 + fmov d16, xzr + fmov d17, xzr + fmov d20, d16 + fmov d21, d17 + fmov d24, d16 + fmov d25, d17 + fmov d28, d16 + fmov d29, d17 +.endm + +.macro KERNEL1x4_SUB + ld2 {v8.2d, v9.2d}, [pB] + add pB, pB, #32 + ld2 {v10.2d, v11.2d}, [pB] + add pB, pB, #32 + ld2 {v0.d, v1.d}[0], [pA] + add pA, pA, #16 + + OP_rr d16, d0, v8.2d[0] + OP_ii d16, d1, v9.2d[0] + OP_ri d17, d0, v9.2d[0] + OP_ir d17, d1, v8.2d[0] + + OP_rr d20, d0, v8.2d[1] + OP_ii d20, d1, v9.2d[1] + OP_ri d21, d0, v9.2d[1] + OP_ir d21, d1, v8.2d[1] + + OP_rr d24, d0, v10.2d[0] + OP_ii d24, d1, v11.2d[0] + OP_ri d25, d0, v11.2d[0] + OP_ir d25, d1, v10.2d[0] + + OP_rr d28, d0, v10.2d[1] + OP_ii d28, d1, v11.2d[1] + OP_ri d29, d0, v11.2d[1] + OP_ir d29, d1, v10.2d[1] +.endm + +.macro SAVE1x4 + fmov alpha0_R, alpha_save_R + fmov alpha0_I, alpha_save_I + fmov alpha1_R, alpha0_R + fmov alpha1_I, alpha0_I + + mov pCRow1, pCRow0 + + ld2 {v0.d, v1.d}[0], [pCRow1] + fmla d0, d16, alphaV0_R + fmls d0, d17, alphaV0_I + fmla d1, d16, alphaV1_I + fmla d1, d17, alphaV1_R + st2 {v0.d, v1.d}[0], [pCRow1] + + add pCRow1, pCRow1, LDC + + ld2 {v4.d, v5.d}[0], [pCRow1] + fmla d4, d20, alphaV0_R + fmls d4, d21, alphaV0_I + fmla d5, d20, alphaV1_I + fmla d5, d21, alphaV1_R + st2 {v4.d, v5.d}[0], [pCRow1] + + add pCRow1, pCRow1, LDC + + ld2 {v0.d, v1.d}[0], [pCRow1] + fmla d0, d24, alphaV0_R + fmls d0, d25, alphaV0_I + fmla d1, d24, alphaV1_I + fmla d1, d25, alphaV1_R + st2 {v0.d, v1.d}[0], [pCRow1] + + add pCRow1, pCRow1, LDC + + ld2 {v4.d, v5.d}[0], [pCRow1] + fmla d4, d28, alphaV0_R + fmls d4, d29, alphaV0_I + fmla d5, d28, alphaV1_I + fmla d5, d29, alphaV1_R + st2 {v4.d, v5.d}[0], [pCRow1] + + add pCRow0, pCRow0, #16 +.endm + +/******************************************************************************/ + +.macro INIT4x2 + fmov d16, xzr + fmov d17, xzr + fmov d18, d16 + fmov d19, d17 + fmov d20, d16 + fmov d21, d17 + fmov d22, d16 + fmov d23, d17 +.endm + +.macro KERNEL4x2_SUB + ld2 {v8.2d, v9.2d}, [pB] + add pB, pB, #32 + ld2 {v0.2d, v1.2d}, [pA] + add pA, pA, #32 + ld2 {v2.2d, v3.2d}, [pA] + add pA, pA, #32 + + OP_rr v16.2d, v0.2d, v8.2d[0] + OP_ii v16.2d, v1.2d, v9.2d[0] + OP_ri v17.2d, v0.2d, v9.2d[0] + OP_ir v17.2d, v1.2d, v8.2d[0] + + OP_rr v18.2d, v2.2d, v8.2d[0] + OP_ii v18.2d, v3.2d, v9.2d[0] + OP_ri v19.2d, v2.2d, v9.2d[0] + OP_ir v19.2d, v3.2d, v8.2d[0] + + OP_rr v20.2d, v0.2d, v8.2d[1] + OP_ii v20.2d, v1.2d, v9.2d[1] + OP_ri v21.2d, v0.2d, v9.2d[1] + OP_ir v21.2d, v1.2d, v8.2d[1] + + OP_rr v22.2d, v2.2d, v8.2d[1] + OP_ii v22.2d, v3.2d, v9.2d[1] + OP_ri v23.2d, v2.2d, v9.2d[1] + OP_ir v23.2d, v3.2d, v8.2d[1] +.endm + +.macro SAVE4x2 + fmov alpha0_R, alpha_save_R + fmov alpha0_I, alpha_save_I + fmov alpha1_R, alpha0_R + fmov alpha1_I, alpha0_I + + mov pCRow1, pCRow0 + + ld2 {v0.2d, v1.2d}, [pCRow1] + fmla v0.2d, v16.2d, alphaV0_R + fmls v0.2d, v17.2d, alphaV0_I + fmla v1.2d, v16.2d, alphaV1_I + fmla v1.2d, v17.2d, alphaV1_R + st2 {v0.2d, v1.2d}, [pCRow1] + add pCRow2, pCRow1, #32 + ld2 {v2.2d, v3.2d}, [pCRow2] + fmla v2.2d, v18.2d, alphaV0_R + fmls v2.2d, v19.2d, alphaV0_I + fmla v3.2d, v18.2d, alphaV1_I + fmla v3.2d, v19.2d, alphaV1_R + st2 {v2.2d, v3.2d}, [pCRow2] + + add pCRow1, pCRow1, LDC + + ld2 {v4.2d, v5.2d}, [pCRow1] + fmla v4.2d, v20.2d, alphaV0_R + fmls v4.2d, v21.2d, alphaV0_I + fmla v5.2d, v20.2d, alphaV1_I + fmla v5.2d, v21.2d, alphaV1_R + st2 {v4.2d, v5.2d}, [pCRow1] + add pCRow2, pCRow1, #32 + ld2 {v6.2d, v7.2d}, [pCRow2] + fmla v6.2d, v22.2d, alphaV0_R + fmls v6.2d, v23.2d, alphaV0_I + fmla v7.2d, v22.2d, alphaV1_I + fmla v7.2d, v23.2d, alphaV1_R + st2 {v6.2d, v7.2d}, [pCRow2] + + add pCRow0, pCRow0, #64 +.endm + +/******************************************************************************/ + +.macro INIT2x2 + fmov d16, xzr + fmov d17, xzr + fmov d20, d16 + fmov d21, d17 +.endm + +.macro KERNEL2x2_SUB + ld2 {v8.2d, v9.2d}, [pB] + add pB, pB, #32 + ld2 {v0.2d, v1.2d}, [pA] + add pA, pA, #32 + + OP_rr v16.2d, v0.2d, v8.2d[0] + OP_ii v16.2d, v1.2d, v9.2d[0] + OP_ri v17.2d, v0.2d, v9.2d[0] + OP_ir v17.2d, v1.2d, v8.2d[0] + + OP_rr v20.2d, v0.2d, v8.2d[1] + OP_ii v20.2d, v1.2d, v9.2d[1] + OP_ri v21.2d, v0.2d, v9.2d[1] + OP_ir v21.2d, v1.2d, v8.2d[1] +.endm + +.macro SAVE2x2 + fmov alpha0_R, alpha_save_R + fmov alpha0_I, alpha_save_I + fmov alpha1_R, alpha0_R + fmov alpha1_I, alpha0_I + + mov pCRow1, pCRow0 + + ld2 {v0.2d, v1.2d}, [pCRow1] + fmla v0.2d, v16.2d, alphaV0_R + fmls v0.2d, v17.2d, alphaV0_I + fmla v1.2d, v16.2d, alphaV1_I + fmla v1.2d, v17.2d, alphaV1_R + st2 {v0.2d, v1.2d}, [pCRow1] + + add pCRow1, pCRow1, LDC + + ld2 {v4.2d, v5.2d}, [pCRow1] + fmla v4.2d, v20.2d, alphaV0_R + fmls v4.2d, v21.2d, alphaV0_I + fmla v5.2d, v20.2d, alphaV1_I + fmla v5.2d, v21.2d, alphaV1_R + st2 {v4.2d, v5.2d}, [pCRow1] + + add pCRow0, pCRow0, #32 +.endm + +/******************************************************************************/ + +.macro INIT1x2 + fmov d16, xzr + fmov d17, xzr + fmov d20, xzr + fmov d21, xzr +.endm + +.macro KERNEL1x2_SUB + ld2 {v8.2d, v9.2d}, [pB] + add pB, pB, #32 + ld2 {v0.d, v1.d}[0], [pA] + add pA, pA, #16 + + OP_rr d16, d0, v8.2d[0] + OP_ii d16, d1, v9.2d[0] + OP_ri d17, d0, v9.2d[0] + OP_ir d17, d1, v8.2d[0] + + OP_rr d20, d0, v8.2d[1] + OP_ii d20, d1, v9.2d[1] + OP_ri d21, d0, v9.2d[1] + OP_ir d21, d1, v8.2d[1] +.endm + +.macro SAVE1x2 + fmov alpha0_R, alpha_save_R + fmov alpha0_I, alpha_save_I + fmov alpha1_R, alpha0_R + fmov alpha1_I, alpha0_I + + mov pCRow1, pCRow0 + + ld2 {v0.d, v1.d}[0], [pCRow1] + fmla d0, d16, alphaV0_R + fmls d0, d17, alphaV0_I + fmla d1, d16, alphaV1_I + fmla d1, d17, alphaV1_R + st2 {v0.d, v1.d}[0], [pCRow1] + + add pCRow1, pCRow1, LDC + + ld2 {v4.d, v5.d}[0], [pCRow1] + fmla d4, d20, alphaV0_R + fmls d4, d21, alphaV0_I + fmla d5, d20, alphaV1_I + fmla d5, d21, alphaV1_R + st2 {v4.d, v5.d}[0], [pCRow1] + + add pCRow0, pCRow0, #16 +.endm + +/******************************************************************************/ + +.macro INIT4x1 + fmov d16, xzr + fmov d17, d16 + fmov d18, d16 + fmov d19, d17 +.endm + +.macro KERNEL4x1_SUB + ld2 {v8.d, v9.d}[0], [pB] + add pB, pB, #16 + ld2 {v0.2d, v1.2d}, [pA] + add pA, pA, #32 + ld2 {v2.2d, v3.2d}, [pA] + add pA, pA, #32 + + OP_rr v16.2d, v0.2d, v8.d[0] + OP_ii v16.2d, v1.2d, v9.d[0] + OP_ri v17.2d, v0.2d, v9.d[0] + OP_ir v17.2d, v1.2d, v8.d[0] + + OP_rr v18.2d, v2.2d, v8.d[0] + OP_ii v18.2d, v3.2d, v9.d[0] + OP_ri v19.2d, v2.2d, v9.d[0] + OP_ir v19.2d, v3.2d, v8.d[0] +.endm + +.macro SAVE4x1 + fmov alpha0_R, alpha_save_R + fmov alpha0_I, alpha_save_I + fmov alpha1_R, alpha0_R + fmov alpha1_I, alpha0_I + + mov pCRow1, pCRow0 + + ld2 {v0.2d, v1.2d}, [pCRow1] + fmla v0.2d, v16.2d, alphaV0_R + fmls v0.2d, v17.2d, alphaV0_I + fmla v1.2d, v16.2d, alphaV1_I + fmla v1.2d, v17.2d, alphaV1_R + st2 {v0.2d, v1.2d}, [pCRow1] + add pCRow2, pCRow1, #32 + ld2 {v2.2d, v3.2d}, [pCRow2] + fmla v2.2d, v18.2d, alphaV0_R + fmls v2.2d, v19.2d, alphaV0_I + fmla v3.2d, v18.2d, alphaV1_I + fmla v3.2d, v19.2d, alphaV1_R + st2 {v2.2d, v3.2d}, [pCRow2] + + add pCRow0, pCRow0, #64 +.endm + +/******************************************************************************/ + +.macro INIT2x1 + fmov d16, xzr + fmov d17, xzr +.endm + +.macro KERNEL2x1_SUB + ld2 {v8.d, v9.d}[0], [pB] + add pB, pB, #16 + ld2 {v0.2d, v1.2d}, [pA] + add pA, pA, #32 + + OP_rr v16.2d, v0.2d, v8.d[0] + OP_ii v16.2d, v1.2d, v9.d[0] + OP_ri v17.2d, v0.2d, v9.d[0] + OP_ir v17.2d, v1.2d, v8.d[0] +.endm + +.macro SAVE2x1 + fmov alpha0_R, alpha_save_R + fmov alpha0_I, alpha_save_I + fmov alpha1_R, alpha0_R + fmov alpha1_I, alpha0_I + + mov pCRow1, pCRow0 + + ld2 {v0.2d, v1.2d}, [pCRow1] + fmla v0.2d, v16.2d, alphaV0_R + fmls v0.2d, v17.2d, alphaV0_I + fmla v1.2d, v16.2d, alphaV1_I + fmla v1.2d, v17.2d, alphaV1_R + st2 {v0.2d, v1.2d}, [pCRow1] + + add pCRow0, pCRow0, #32 + +.endm + +/******************************************************************************/ + +.macro INIT1x1 + fmov d16, xzr + fmov d17, xzr +.endm + +.macro KERNEL1x1_SUB + ld2 {v8.d, v9.d}[0], [pB] + add pB, pB, #16 + ld2 {v0.d, v1.d}[0], [pA] + add pA, pA, #16 + + OP_rr d16, d0, v8.d[0] + OP_ii d16, d1, v9.d[0] + OP_ri d17, d0, v9.d[0] + OP_ir d17, d1, v8.d[0] +.endm + +.macro SAVE1x1 + fmov alpha0_R, alpha_save_R + fmov alpha0_I, alpha_save_I + fmov alpha1_R, alpha0_R + fmov alpha1_I, alpha0_I + + mov pCRow1, pCRow0 + + ld2 {v0.d, v1.d}[0], [pCRow1] + fmla d0, d16, alphaV0_R + fmls d0, d17, alphaV0_I + fmla d1, d16, alphaV1_I + fmla d1, d17, alphaV1_R + st2 {v0.d, v1.d}[0], [pCRow1] + + add pCRow0, pCRow0, #16 +.endm + +/******************************************************************************* +* End of macro definitions +*******************************************************************************/ + + PROLOGUE + + .align 5 + add sp, sp, #-(11 * 16) + stp d8, d9, [sp, #(0 * 16)] + stp d10, d11, [sp, #(1 * 16)] + stp d12, d13, [sp, #(2 * 16)] + stp d14, d15, [sp, #(3 * 16)] + stp d16, d17, [sp, #(4 * 16)] + stp x18, x19, [sp, #(5 * 16)] + stp x20, x21, [sp, #(6 * 16)] + stp x22, x23, [sp, #(7 * 16)] + stp x24, x25, [sp, #(8 * 16)] + stp x26, x27, [sp, #(9 * 16)] + str x28, [sp, #(10 * 16)] + + fmov alpha_save_R, d0 + fmov alpha_save_I, d1 + + lsl LDC, LDC, #4 // ldc = ldc * 2 * 8 + + mov pB, origPB + + mov counterJ, origN + asr counterJ, counterJ, #2 // J = J / 4 + cmp counterJ, #0 + ble zgemm_kernel_L2_BEGIN + +zgemm_kernel_L4_BEGIN: + mov pCRow0, pC // pCRow0 = C + add pC, pC, LDC, lsl #2 + mov pA, origPA // pA = start of A array + +zgemm_kernel_L4_M4_BEGIN: + + mov counterI, origM + asr counterI, counterI, #2 // counterI = counterI / 4 + cmp counterI, #0 + ble zgemm_kernel_L4_M2_BEGIN + +zgemm_kernel_L4_M4_20: + + mov pB, origPB + asr counterL , origK, #1 // L = K / 2 + cmp counterL , #2 // is there at least 4 to do? + blt zgemm_kernel_L4_M4_32 + + KERNEL4x4_I // do one in the K + KERNEL4x4_M2 // do another in the K + + subs counterL, counterL, #2 // subtract 2 + ble zgemm_kernel_L4_M4_22a + .align 5 + +zgemm_kernel_L4_M4_22: + + KERNEL4x4_M1 + KERNEL4x4_M2 + + subs counterL, counterL, #1 + bgt zgemm_kernel_L4_M4_22 + + +zgemm_kernel_L4_M4_22a: + + KERNEL4x4_M1 + KERNEL4x4_E + + b zgemm_kernel_L4_M4_44 + +zgemm_kernel_L4_M4_32: + + tst counterL, #1 + ble zgemm_kernel_L4_M4_40 + + KERNEL4x4_I + KERNEL4x4_E + + b zgemm_kernel_L4_M4_44 + + +zgemm_kernel_L4_M4_40: + + INIT4x4 + +zgemm_kernel_L4_M4_44: + + ands counterL , origK, #1 + ble zgemm_kernel_L4_M4_100 + +zgemm_kernel_L4_M4_46: + KERNEL4x4_SUB + +zgemm_kernel_L4_M4_100: + + SAVE4x4 + +zgemm_kernel_L4_M4_END: + subs counterI, counterI, #1 + bne zgemm_kernel_L4_M4_20 + +zgemm_kernel_L4_M2_BEGIN: + + mov counterI, origM + tst counterI , #3 + ble zgemm_kernel_L4_END + + tst counterI, #2 // counterI = counterI / 2 + ble zgemm_kernel_L4_M1_BEGIN + +zgemm_kernel_L4_M2_20: + + INIT2x4 + + mov pB, origPB + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble zgemm_kernel_L4_M2_40 + +zgemm_kernel_L4_M2_22: + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L4_M2_22 + + +zgemm_kernel_L4_M2_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble zgemm_kernel_L4_M2_100 + +zgemm_kernel_L4_M2_42: + + KERNEL2x4_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L4_M2_42 + +zgemm_kernel_L4_M2_100: + + SAVE2x4 + +zgemm_kernel_L4_M2_END: + + +zgemm_kernel_L4_M1_BEGIN: + + tst counterI, #1 // counterI = counterI % 2 + ble zgemm_kernel_L4_END + +zgemm_kernel_L4_M1_20: + + INIT1x4 + + mov pB, origPB + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble zgemm_kernel_L4_M1_40 + +zgemm_kernel_L4_M1_22: + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L4_M1_22 + + +zgemm_kernel_L4_M1_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble zgemm_kernel_L4_M1_100 + +zgemm_kernel_L4_M1_42: + + KERNEL1x4_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L4_M1_42 + +zgemm_kernel_L4_M1_100: + + SAVE1x4 + + +zgemm_kernel_L4_END: + + lsl temp, origK, #6 + add origPB, origPB, temp // B = B + K * 4 * 8 * 2 + + subs counterJ, counterJ , #1 // j-- + bgt zgemm_kernel_L4_BEGIN + + +/******************************************************************************/ + +zgemm_kernel_L2_BEGIN: // less than 2 left in N direction + + mov counterJ , origN + tst counterJ , #3 + ble zgemm_kernel_L999 + + tst counterJ , #2 + ble zgemm_kernel_L1_BEGIN + + mov pCRow0, pC // pCRow0 = pC + + add pC,pC,LDC, lsl #1 + + mov pA, origPA // pA = A + + + +zgemm_kernel_L2_M4_BEGIN: + + mov counterI, origM + asr counterI, counterI, #2 // counterI = counterI / 4 + cmp counterI,#0 + ble zgemm_kernel_L2_M2_BEGIN + +zgemm_kernel_L2_M4_20: + + INIT4x2 + + mov pB, origPB + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL,#0 + ble zgemm_kernel_L2_M4_40 + .align 5 + +zgemm_kernel_L2_M4_22: + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L2_M4_22 + + +zgemm_kernel_L2_M4_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble zgemm_kernel_L2_M4_100 + +zgemm_kernel_L2_M4_42: + + KERNEL4x2_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L2_M4_42 + +zgemm_kernel_L2_M4_100: + + SAVE4x2 + +zgemm_kernel_L2_M4_END: + + subs counterI, counterI, #1 + bgt zgemm_kernel_L2_M4_20 + + +zgemm_kernel_L2_M2_BEGIN: + + mov counterI, origM + tst counterI , #3 + ble zgemm_kernel_L2_END + + tst counterI, #2 // counterI = counterI / 2 + ble zgemm_kernel_L2_M1_BEGIN + +zgemm_kernel_L2_M2_20: + + INIT2x2 + + mov pB, origPB + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL,#0 + ble zgemm_kernel_L2_M2_40 + +zgemm_kernel_L2_M2_22: + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L2_M2_22 + + +zgemm_kernel_L2_M2_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble zgemm_kernel_L2_M2_100 + +zgemm_kernel_L2_M2_42: + + KERNEL2x2_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L2_M2_42 + +zgemm_kernel_L2_M2_100: + + SAVE2x2 + +zgemm_kernel_L2_M2_END: + + +zgemm_kernel_L2_M1_BEGIN: + + tst counterI, #1 // counterI = counterI % 2 + ble zgemm_kernel_L2_END + +zgemm_kernel_L2_M1_20: + + INIT1x2 + + mov pB, origPB + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL, #0 + ble zgemm_kernel_L2_M1_40 + +zgemm_kernel_L2_M1_22: + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L2_M1_22 + + +zgemm_kernel_L2_M1_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble zgemm_kernel_L2_M1_100 + +zgemm_kernel_L2_M1_42: + + KERNEL1x2_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L2_M1_42 + +zgemm_kernel_L2_M1_100: + + SAVE1x2 + + +zgemm_kernel_L2_END: + lsl temp, origK, #5 + add origPB, origPB, temp // B = B + K * 2 * 8 * 2 + +/******************************************************************************/ + +zgemm_kernel_L1_BEGIN: + + mov counterJ , origN + tst counterJ , #1 + ble zgemm_kernel_L999 // done + + + mov pCRow0, pC // pCRow0 = C + add pC , pC , LDC // Update pC to point to next + + mov pA, origPA // pA = A + + + +zgemm_kernel_L1_M4_BEGIN: + + mov counterI, origM + asr counterI, counterI, #2 // counterI = counterI / 4 + cmp counterI, #0 + ble zgemm_kernel_L1_M2_BEGIN + +zgemm_kernel_L1_M4_20: + + INIT4x1 + + mov pB, origPB + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble zgemm_kernel_L1_M4_40 + .align 5 + +zgemm_kernel_L1_M4_22: + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L1_M4_22 + + +zgemm_kernel_L1_M4_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble zgemm_kernel_L1_M4_100 + +zgemm_kernel_L1_M4_42: + + KERNEL4x1_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L1_M4_42 + +zgemm_kernel_L1_M4_100: + + SAVE4x1 + +zgemm_kernel_L1_M4_END: + + subs counterI, counterI, #1 + bgt zgemm_kernel_L1_M4_20 + + +zgemm_kernel_L1_M2_BEGIN: + + mov counterI, origM + tst counterI , #3 + ble zgemm_kernel_L1_END + + tst counterI, #2 // counterI = counterI / 2 + ble zgemm_kernel_L1_M1_BEGIN + +zgemm_kernel_L1_M2_20: + + INIT2x1 + + mov pB, origPB + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble zgemm_kernel_L1_M2_40 + +zgemm_kernel_L1_M2_22: + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L1_M2_22 + + +zgemm_kernel_L1_M2_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble zgemm_kernel_L1_M2_100 + +zgemm_kernel_L1_M2_42: + + KERNEL2x1_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L1_M2_42 + +zgemm_kernel_L1_M2_100: + + SAVE2x1 + +zgemm_kernel_L1_M2_END: + + +zgemm_kernel_L1_M1_BEGIN: + + tst counterI, #1 // counterI = counterI % 2 + ble zgemm_kernel_L1_END + +zgemm_kernel_L1_M1_20: + + INIT1x1 + + mov pB, origPB + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble zgemm_kernel_L1_M1_40 + +zgemm_kernel_L1_M1_22: + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L1_M1_22 + + +zgemm_kernel_L1_M1_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble zgemm_kernel_L1_M1_100 + +zgemm_kernel_L1_M1_42: + + KERNEL1x1_SUB + + subs counterL, counterL, #1 + bgt zgemm_kernel_L1_M1_42 + +zgemm_kernel_L1_M1_100: + + SAVE1x1 + + +zgemm_kernel_L1_END: + + +zgemm_kernel_L999: + mov x0, #0 // set return value + ldp d8, d9, [sp, #(0 * 16)] + ldp d10, d11, [sp, #(1 * 16)] + ldp d12, d13, [sp, #(2 * 16)] + ldp d14, d15, [sp, #(3 * 16)] + ldp d16, d17, [sp, #(4 * 16)] + ldp x18, x19, [sp, #(5 * 16)] + ldp x20, x21, [sp, #(6 * 16)] + ldp x22, x23, [sp, #(7 * 16)] + ldp x24, x25, [sp, #(8 * 16)] + ldp x26, x27, [sp, #(9 * 16)] + ldr x28, [sp, #(10 * 16)] + add sp, sp, #(11*16) + ret + + EPILOGUE + diff --git a/param.h b/param.h index cedb6d2..2fe3b8a 100644 --- a/param.h +++ b/param.h @@ -2235,23 +2235,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CGEMM_DEFAULT_UNROLL_M 4 #define CGEMM_DEFAULT_UNROLL_N 4 -#define ZGEMM_DEFAULT_UNROLL_M 2 -#define ZGEMM_DEFAULT_UNROLL_N 2 +#define ZGEMM_DEFAULT_UNROLL_M 4 +#define ZGEMM_DEFAULT_UNROLL_N 4 #define SGEMM_DEFAULT_P 128 #define DGEMM_DEFAULT_P 256 #define CGEMM_DEFAULT_P 256 -#define ZGEMM_DEFAULT_P 64 +#define ZGEMM_DEFAULT_P 128 #define SGEMM_DEFAULT_Q 240 #define DGEMM_DEFAULT_Q 1024 #define CGEMM_DEFAULT_Q 1024 -#define ZGEMM_DEFAULT_Q 120 +#define ZGEMM_DEFAULT_Q 512 #define SGEMM_DEFAULT_R 12288 #define DGEMM_DEFAULT_R 4096 #define CGEMM_DEFAULT_R 4096 -#define ZGEMM_DEFAULT_R 4096 +#define ZGEMM_DEFAULT_R 2048 #define SYMV_P 16 -- 2.7.4