DYNAMIC_CORE = KATMAI COPPERMINE NORTHWOOD PRESCOTT BANIAS \
CORE2 PENRYN DUNNINGTON NEHALEM ATHLON OPTERON OPTERON_SSE3 BARCELONA BOBCAT ATOM NANO
ifneq ($(NO_AVX), 1)
-DYNAMIC_CORE += SANDYBRIDGE BULLDOZER PILEDRIVER
+DYNAMIC_CORE += SANDYBRIDGE BULLDOZER PILEDRIVER HASWELL
endif
endif
ifeq ($(ARCH), x86_64)
DYNAMIC_CORE = PRESCOTT CORE2 PENRYN DUNNINGTON NEHALEM OPTERON OPTERON_SSE3 BARCELONA BOBCAT ATOM NANO
ifneq ($(NO_AVX), 1)
-DYNAMIC_CORE += SANDYBRIDGE BULLDOZER PILEDRIVER
+DYNAMIC_CORE += SANDYBRIDGE BULLDOZER PILEDRIVER HASWELL
endif
endif
#define CORE_BOBCAT 21
#define CORE_BULLDOZER 22
#define CORE_PILEDRIVER 23
-#define CORE_HASWELL CORE_SANDYBRIDGE
+#define CORE_HASWELL 24
#define HAVE_SSE (1 << 0)
#define HAVE_SSE2 (1 << 1)
#define CPUTYPE_BOBCAT 45
#define CPUTYPE_BULLDOZER 46
#define CPUTYPE_PILEDRIVER 47
-// this define is because BLAS doesn't have haswell specific optimizations yet
-#define CPUTYPE_HASWELL CPUTYPE_SANDYBRIDGE
+#define CPUTYPE_HASWELL 48
#endif
"BOBCAT",
"BULLDOZER",
"PILEDRIVER",
+ "HASWELL",
};
static char *lowercpuname[] = {
"bobcat",
"bulldozer",
"piledriver",
+ "haswell",
};
static char *corename[] = {
"BOBCAT",
"BULLDOZER",
"PILEDRIVER",
+ "HASWELL",
};
static char *corename_lower[] = {
"bobcat",
"bulldozer",
"piledriver",
+ "haswell",
};
for(jjs = js; jjs < js + min_j; jjs += min_jj){
min_jj = min_j + js - jjs;
-#if defined(BULLDOZER) && defined(ARCH_X86_64) && !defined(XDOUBLE) && !defined(COMPLEX)
- if (min_jj >= 12*GEMM_UNROLL_N) min_jj = 12*GEMM_UNROLL_N;
- else
+#if defined(HASWELL) && defined(ARCH_X86_64) && !defined(XDOUBLE) && !defined(COMPLEX)
if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N;
else
if (min_jj >= 3*GEMM_UNROLL_N) min_jj = 3*GEMM_UNROLL_N;
else
if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N;
+
#else
if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N;
for(jjs = xxx; jjs < MIN(n_to, xxx + div_n); jjs += min_jj){
min_jj = MIN(n_to, xxx + div_n) - jjs;
-#if defined(BULLDOZER) && defined(ARCH_X86_64) && !defined(XDOUBLE) && !defined(COMPLEX)
- if (min_jj >= 12*GEMM_UNROLL_N) min_jj = 12*GEMM_UNROLL_N;
- else
+#if defined(HASWELL) && defined(ARCH_X86_64) && !defined(XDOUBLE) && !defined(COMPLEX)
if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N;
else
if (min_jj >= 3*GEMM_UNROLL_N) min_jj = 3*GEMM_UNROLL_N;
extern gotoblas_t gotoblas_SANDYBRIDGE;
extern gotoblas_t gotoblas_BULLDOZER;
extern gotoblas_t gotoblas_PILEDRIVER;
+extern gotoblas_t gotoblas_HASWELL;
#else
//Use NEHALEM kernels for sandy bridge
#define gotoblas_SANDYBRIDGE gotoblas_NEHALEM
+#define gotoblas_HASWELL gotoblas_NEHALEM
#define gotoblas_BULLDOZER gotoblas_BARCELONA
#define gotoblas_PILEDRIVER gotoblas_BARCELONA
#endif
-//Use sandy bridge kernels for haswell.
-#define gotoblas_HASWELL gotoblas_SANDYBRIDGE
+
#define VENDOR_INTEL 1
#define VENDOR_AMD 2
"Bobcat",
"Bulldozer",
"Piledriver",
+ "Haswell",
};
char *gotoblas_corename(void) {
if (gotoblas == &gotoblas_SANDYBRIDGE) return corename[16];
if (gotoblas == &gotoblas_BOBCAT) return corename[17];
if (gotoblas == &gotoblas_BULLDOZER) return corename[18];
- if (gotoblas == &gotoblas_PILEDRIVER) return corename[19];
+ if (gotoblas == &gotoblas_PILEDRIVER) return corename[19];
+ if (gotoblas == &gotoblas_HASWELL) return corename[20];
return corename[0];
}
#define CORENAME "SANDYBRIDGE"
#endif
+#ifdef FORCE_HASWELL
+#define FORCE
+#define FORCE_INTEL
+#define ARCHITECTURE "X86"
+#define SUBARCHITECTURE "HASWELL"
+#define ARCHCONFIG "-DHASWELL " \
+ "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 " \
+ "-DL2_SIZE=262144 -DL2_LINESIZE=64 " \
+ "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \
+ "-DHAVE_CMOV -DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 -DHAVE_AVX " \
+ "-DFMA3"
+#define LIBNAME "haswell"
+#define CORENAME "HASWELL"
+#endif
+
#ifdef FORCE_ATOM
#define FORCE
#define FORCE_INTEL
--- /dev/null
+include $(KERNELDIR)/KERNEL.PENRYN
#define PREFETCHSIZE (8 * 21 + 4)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHSIZE (8 * 21 + 4)
#endif
#define PREFETCHSIZE (8 * 21 + 4)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHSIZE (8 * 21 + 4)
#endif
#define PREFETCHSIZE (8 * 21 + 4)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHSIZE (8 * 21 + 4)
#endif
#define PREFETCHSIZE (8 * 21 + 4)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHSIZE (8 * 21 + 4)
#endif
#define PREFETCHSIZE (8 * 21 + 4)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHSIZE (8 * 21 + 4)
#endif
#define PREFETCHSIZE (8 * 21 + 4)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHSIZE (8 * 21 + 4)
#endif
#define PREFETCHSIZE 84
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht1
#define PREFETCHSIZE 84
#endif
#define PREFETCHSIZE 84
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht1
#define PREFETCHSIZE 84
#endif
#define PREFETCHSIZE 84
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht1
#define PREFETCHSIZE 84
#endif
#define PREFETCHSIZE 84
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht1
#define PREFETCHSIZE 84
#endif
#define PREFETCHSIZE 84
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht1
#define PREFETCHSIZE 84
#endif
--- /dev/null
+SGEMMKERNEL = sgemm_kernel_16x4_haswell.S
+SGEMMINCOPY = ../generic/gemm_ncopy_16.c
+SGEMMITCOPY = ../generic/gemm_tcopy_16.c
+SGEMMONCOPY = ../generic/gemm_ncopy_4.c
+SGEMMOTCOPY = ../generic/gemm_tcopy_4.c
+SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX)
+SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX)
+SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX)
+SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX)
+
+DGEMMKERNEL = dgemm_kernel_4x4_haswell.S
+DGEMMINCOPY =
+DGEMMITCOPY =
+DGEMMONCOPY = ../generic/gemm_ncopy_4.c
+DGEMMOTCOPY = ../generic/gemm_tcopy_4.c
+DGEMMINCOPYOBJ =
+DGEMMITCOPYOBJ =
+DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX)
+DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX)
+
+CGEMMKERNEL = cgemm_kernel_8x2_haswell.S
+CGEMMINCOPY = ../generic/zgemm_ncopy_8.c
+CGEMMITCOPY = ../generic/zgemm_tcopy_8.c
+CGEMMONCOPY = ../generic/zgemm_ncopy_2.c
+CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c
+CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX)
+CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX)
+CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX)
+CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX)
+
+ZGEMMKERNEL = zgemm_kernel_4x2_haswell.S
+ZGEMMINCOPY = ../generic/zgemm_ncopy_4.c
+ZGEMMITCOPY = ../generic/zgemm_tcopy_4.c
+ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c
+ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c
+ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX)
+ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX)
+ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX)
+ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX)
+
+STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
+STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
+STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
+STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
+
+DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
+DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
+DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
+DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
+
+CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
+CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
+CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
+CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
+
+ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
+ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
+ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
+ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
+
+CGEMM3MKERNEL = zgemm3m_kernel_4x8_nehalem.S
+ZGEMM3MKERNEL = zgemm3m_kernel_2x8_nehalem.S
+
--- /dev/null
+/*********************************************************************************\r
+Copyright (c) 2013, The OpenBLAS Project\r
+All rights reserved.\r
+Redistribution and use in source and binary forms, with or without\r
+modification, are permitted provided that the following conditions are\r
+met:\r
+1. Redistributions of source code must retain the above copyright\r
+notice, this list of conditions and the following disclaimer.\r
+2. Redistributions in binary form must reproduce the above copyright\r
+notice, this list of conditions and the following disclaimer in\r
+the documentation and/or other materials provided with the\r
+distribution.\r
+3. Neither the name of the OpenBLAS project nor the names of\r
+its contributors may be used to endorse or promote products\r
+derived from this software without specific prior written permission.\r
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\r
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r
+ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE\r
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\r
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\r
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\r
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\r
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+**********************************************************************************/\r
+\r
+/*********************************************************************\r
+* 2013/11/13 Saar\r
+* BLASTEST : OK\r
+* CTEST : OK\r
+* TEST : OK\r
+*\r
+* 2013/10/28 Saar\r
+* Parameter:\r
+* CGEMM_DEFAULT_UNROLL_N 2\r
+* CGEMM_DEFAULT_UNROLL_M 8\r
+* CGEMM_DEFAULT_P 384\r
+* CGEMM_DEFAULT_Q 192\r
+* A_PR1 512\r
+* B_PR1 512\r
+*\r
+* Performance at 6912x6912x6912:\r
+* 1 thread: 84 GFLOPS (SANDYBRIDGE: 60) (MKL: 86)\r
+* 2 threads: 153 GFLOPS (SANDYBRIDGE: 114) (MKL: 155)\r
+* 3 threads: 224 GFLOPS (SANDYBRIDGE: 162) (MKL: 222)\r
+* 4 threads: 278 GFLOPS (SANDYBRIDGE: 223) (MKL: 279)\r
+*\r
+*\r
+*********************************************************************/\r
+\r
+\r
+\r
+#define ASSEMBLER\r
+#include "common.h"\r
+ \r
+#define OLD_M %rdi\r
+#define OLD_N %rsi\r
+#define M %r13\r
+#define J %r14\r
+#define OLD_K %rdx\r
+\r
+#define A %rcx\r
+#define B %r8\r
+#define C %r9\r
+#define LDC %r10\r
+ \r
+#define I %r11\r
+#define AO %rdi\r
+#define BO %rsi\r
+#define CO1 %r15\r
+#define K %r12\r
+#define BI %rbp\r
+#define SP %rbx\r
+\r
+#define BO1 %rdi\r
+#define BO2 %r15\r
+\r
+#ifndef WINDOWS_ABI\r
+\r
+#define STACKSIZE 96\r
+\r
+#else\r
+\r
+#define STACKSIZE 320\r
+\r
+#define OLD_ALPHA_I 40 + STACKSIZE(%rsp)\r
+#define OLD_A 48 + STACKSIZE(%rsp)\r
+#define OLD_B 56 + STACKSIZE(%rsp)\r
+#define OLD_C 64 + STACKSIZE(%rsp)\r
+#define OLD_LDC 72 + STACKSIZE(%rsp)\r
+#define OLD_OFFSET 80 + STACKSIZE(%rsp)\r
+\r
+#endif\r
+\r
+#define L_BUFFER_SIZE 512*8*4\r
+#define LB2_OFFSET 512*8*2\r
+\r
+#define Ndiv6 24(%rsp)\r
+#define Nmod6 32(%rsp)\r
+#define N 40(%rsp)\r
+#define ALPHA_R 48(%rsp)\r
+#define ALPHA_I 56(%rsp)\r
+#define OFFSET 64(%rsp)\r
+#define KK 72(%rsp)\r
+#define KKK 80(%rsp)\r
+#define BUFFER1 128(%rsp)\r
+#define BUFFER2 LB2_OFFSET+128(%rsp)\r
+\r
+#if defined(OS_WINDOWS)\r
+#if L_BUFFER_SIZE > 16384\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 4(%rsp);\\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 12288\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 8192\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 4096\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 1(%rsp);\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+\r
+\r
+#if defined(BULLDOZER)\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT)\r
+\r
+#define VFMADDPS_R( y0,y1,y2 ) vfmaddps y0,y1,y2,y0\r
+\r
+#define VFMADDPS_I( y0,y1,y2 ) vfmaddps y0,y1,y2,y0\r
+\r
+#elif defined(RN) || defined(RT) || defined(CN) || defined(CT)\r
+\r
+#define VFMADDPS_R( y0,y1,y2 ) vfnmaddps y0,y1,y2,y0\r
+\r
+#define VFMADDPS_I( y0,y1,y2 ) vfmaddps y0,y1,y2,y0\r
+\r
+#elif defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+#define VFMADDPS_R( y0,y1,y2 ) vfmaddps y0,y1,y2,y0\r
+\r
+#define VFMADDPS_I( y0,y1,y2 ) vfnmaddps y0,y1,y2,y0\r
+\r
+#else\r
+\r
+#define VFMADDPS_R( y0,y1,y2 ) vfnmaddps y0,y1,y2,y0\r
+\r
+#define VFMADDPS_I( y0,y1,y2 ) vfnmaddps y0,y1,y2,y0\r
+\r
+#endif\r
+\r
+#else\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT)\r
+\r
+#define VFMADDPS_R( y0,y1,y2 ) vfmadd231ps y1,y2,y0\r
+\r
+#define VFMADDPS_I( y0,y1,y2 ) vfmadd231ps y1,y2,y0\r
+\r
+#elif defined(RN) || defined(RT) || defined(CN) || defined(CT)\r
+\r
+#define VFMADDPS_R( y0,y1,y2 ) vfnmadd231ps y1,y2,y0\r
+\r
+#define VFMADDPS_I( y0,y1,y2 ) vfmadd231ps y1,y2,y0\r
+\r
+#elif defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+#define VFMADDPS_R( y0,y1,y2 ) vfmadd231ps y1,y2,y0\r
+\r
+#define VFMADDPS_I( y0,y1,y2 ) vfnmadd231ps y1,y2,y0\r
+\r
+#else\r
+\r
+#define VFMADDPS_R( y0,y1,y2 ) vfnmadd231ps y1,y2,y0\r
+\r
+#define VFMADDPS_I( y0,y1,y2 ) vfnmadd231ps y1,y2,y0\r
+\r
+#endif\r
+\r
+#endif\r
+\r
+\r
+#define A_PR1 512\r
+#define B_PR1 512\r
+\r
+/***************************************************************************************************************************/\r
+\r
+.macro KERNEL8x2_SUB\r
+\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vbroadcastss -8 * SIZE(BO, BI, SIZE), %ymm4\r
+ VFMADDPS_R( %ymm8,%ymm4,%ymm0 )\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm1\r
+ VFMADDPS_R( %ymm12,%ymm4,%ymm1 )\r
+ vbroadcastss -7 * SIZE(BO, BI, SIZE), %ymm5\r
+ VFMADDPS_I( %ymm9,%ymm5,%ymm0 )\r
+ VFMADDPS_I( %ymm13,%ymm5,%ymm1 )\r
+ vbroadcastss -6 * SIZE(BO, BI, SIZE), %ymm6\r
+ VFMADDPS_R( %ymm10,%ymm6,%ymm0 )\r
+ VFMADDPS_R( %ymm14,%ymm6,%ymm1 )\r
+ vbroadcastss -5 * SIZE(BO, BI, SIZE), %ymm7\r
+ VFMADDPS_I( %ymm11,%ymm7,%ymm0 )\r
+ VFMADDPS_I( %ymm15,%ymm7,%ymm1 )\r
+ addq $4 , BI \r
+ addq $16, %rax \r
+.endm\r
+\r
+.macro SAVE8x2\r
+\r
+ vbroadcastss ALPHA_R, %ymm0\r
+ vbroadcastss ALPHA_I, %ymm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %ymm9 , %ymm9, %ymm9\r
+ vshufps $0xb1, %ymm11, %ymm11, %ymm11\r
+ vshufps $0xb1, %ymm13, %ymm13, %ymm13\r
+ vshufps $0xb1, %ymm15, %ymm15, %ymm15\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %ymm9, %ymm8 , %ymm8\r
+ vaddsubps %ymm11,%ymm10, %ymm10\r
+ vaddsubps %ymm13,%ymm12, %ymm12\r
+ vaddsubps %ymm15,%ymm14, %ymm14\r
+\r
+ vshufps $0xb1, %ymm8 , %ymm8, %ymm9\r
+ vshufps $0xb1, %ymm10, %ymm10, %ymm11\r
+ vshufps $0xb1, %ymm12, %ymm12, %ymm13\r
+ vshufps $0xb1, %ymm14, %ymm14, %ymm15\r
+\r
+#else\r
+ vaddsubps %ymm8, %ymm9 ,%ymm9\r
+ vaddsubps %ymm10, %ymm11,%ymm11\r
+ vaddsubps %ymm12, %ymm13,%ymm13\r
+ vaddsubps %ymm14, %ymm15,%ymm15\r
+\r
+ vmovaps %ymm9, %ymm8\r
+ vmovaps %ymm11, %ymm10\r
+ vmovaps %ymm13, %ymm12\r
+ vmovaps %ymm15, %ymm14\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %ymm9 , %ymm9, %ymm9\r
+ vshufps $0xb1, %ymm11, %ymm11, %ymm11\r
+ vshufps $0xb1, %ymm13, %ymm13, %ymm13\r
+ vshufps $0xb1, %ymm15, %ymm15, %ymm15\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %ymm8 , %ymm0, %ymm8\r
+ vmulps %ymm10, %ymm0, %ymm10\r
+ vmulps %ymm12, %ymm0, %ymm12\r
+ vmulps %ymm14, %ymm0, %ymm14\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %ymm9 , %ymm1, %ymm9\r
+ vmulps %ymm11, %ymm1, %ymm11\r
+ vmulps %ymm13, %ymm1, %ymm13\r
+ vmulps %ymm15, %ymm1, %ymm15\r
+\r
+ vaddsubps %ymm9, %ymm8 , %ymm8\r
+ vaddsubps %ymm11,%ymm10, %ymm10\r
+ vaddsubps %ymm13,%ymm12, %ymm12\r
+ vaddsubps %ymm15,%ymm14, %ymm14\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %ymm8 , %ymm8\r
+ vaddps 8 * SIZE(CO1), %ymm12, %ymm12\r
+\r
+ vaddps (CO1, LDC), %ymm10, %ymm10\r
+ vaddps 8 * SIZE(CO1, LDC), %ymm14, %ymm14\r
+\r
+#endif\r
+\r
+ vmovups %ymm8 , (CO1)\r
+ vmovups %ymm12 , 8 * SIZE(CO1)\r
+\r
+ vmovups %ymm10 , (CO1, LDC)\r
+ vmovups %ymm14 , 8 * SIZE(CO1, LDC)\r
+\r
+ prefetcht0 64(CO1)\r
+ prefetcht0 64(CO1, LDC)\r
+\r
+.endm\r
+\r
+/***************************************************************************************************************************/\r
+\r
+.macro KERNEL4x2_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -8 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPS_R( %xmm8,%xmm4,%xmm0 )\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %xmm1\r
+ VFMADDPS_R( %xmm12,%xmm4,%xmm1 )\r
+ vbroadcastss -7 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPS_I( %xmm9,%xmm5,%xmm0 )\r
+ VFMADDPS_I( %xmm13,%xmm5,%xmm1 )\r
+ vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm6\r
+ VFMADDPS_R( %xmm10,%xmm6,%xmm0 )\r
+ VFMADDPS_R( %xmm14,%xmm6,%xmm1 )\r
+ vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm7\r
+ VFMADDPS_I( %xmm11,%xmm7,%xmm0 )\r
+ VFMADDPS_I( %xmm15,%xmm7,%xmm1 )\r
+ addq $4, BI \r
+ addq $8, %rax \r
+.endm\r
+\r
+.macro SAVE4x2\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $0xb1, %xmm11, %xmm11, %xmm11\r
+ vshufps $0xb1, %xmm13, %xmm13, %xmm13\r
+ vshufps $0xb1, %xmm15, %xmm15, %xmm15\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+ vaddsubps %xmm13,%xmm12, %xmm12\r
+ vaddsubps %xmm15,%xmm14, %xmm14\r
+\r
+ vshufps $0xb1, %xmm8 , %xmm8, %xmm9\r
+ vshufps $0xb1, %xmm10, %xmm10, %xmm11\r
+ vshufps $0xb1, %xmm12, %xmm12, %xmm13\r
+ vshufps $0xb1, %xmm14, %xmm14, %xmm15\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+ vaddsubps %xmm10, %xmm11,%xmm11\r
+ vaddsubps %xmm12, %xmm13,%xmm13\r
+ vaddsubps %xmm14, %xmm15,%xmm15\r
+\r
+ vmovaps %xmm9, %xmm8\r
+ vmovaps %xmm11, %xmm10\r
+ vmovaps %xmm13, %xmm12\r
+ vmovaps %xmm15, %xmm14\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $0xb1, %xmm11, %xmm11, %xmm11\r
+ vshufps $0xb1, %xmm13, %xmm13, %xmm13\r
+ vshufps $0xb1, %xmm15, %xmm15, %xmm15\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+ vmulps %xmm10, %xmm0, %xmm10\r
+ vmulps %xmm12, %xmm0, %xmm12\r
+ vmulps %xmm14, %xmm0, %xmm14\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+ vmulps %xmm11, %xmm1, %xmm11\r
+ vmulps %xmm13, %xmm1, %xmm13\r
+ vmulps %xmm15, %xmm1, %xmm15\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+ vaddsubps %xmm13,%xmm12, %xmm12\r
+ vaddsubps %xmm15,%xmm14, %xmm14\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %xmm8 , %xmm8\r
+ vaddps 4 * SIZE(CO1), %xmm12, %xmm12\r
+\r
+ vaddps (CO1, LDC), %xmm10, %xmm10\r
+ vaddps 4 * SIZE(CO1, LDC), %xmm14, %xmm14\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+ vmovups %xmm12 , 4 * SIZE(CO1)\r
+\r
+ vmovups %xmm10 , (CO1, LDC)\r
+ vmovups %xmm14 , 4 * SIZE(CO1, LDC)\r
+\r
+.endm\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL2x2_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -8 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPS_R( %xmm8,%xmm4,%xmm0 )\r
+ vbroadcastss -7 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPS_I( %xmm9,%xmm5,%xmm0 )\r
+ vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm6\r
+ VFMADDPS_R( %xmm10,%xmm6,%xmm0 )\r
+ vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm7\r
+ VFMADDPS_I( %xmm11,%xmm7,%xmm0 )\r
+ addq $4, BI \r
+ addq $4, %rax \r
+.endm\r
+\r
+.macro SAVE2x2\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 4 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $0xb1, %xmm11, %xmm11, %xmm11\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+\r
+ vshufps $0xb1, %xmm8 , %xmm8, %xmm9\r
+ vshufps $0xb1, %xmm10, %xmm10, %xmm11\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+ vaddsubps %xmm10, %xmm11,%xmm11\r
+\r
+ vmovaps %xmm9, %xmm8\r
+ vmovaps %xmm11, %xmm10\r
+\r
+ // swap high and low 4 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $0xb1, %xmm11, %xmm11, %xmm11\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+ vmulps %xmm10, %xmm0, %xmm10\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+ vmulps %xmm11, %xmm1, %xmm11\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %xmm8 , %xmm8\r
+\r
+ vaddps (CO1, LDC), %xmm10, %xmm10\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+\r
+ vmovups %xmm10 , (CO1, LDC)\r
+\r
+.endm\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL1x2_SUB\r
+ vmovsd -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -8 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPS_R( %xmm8,%xmm4,%xmm0 )\r
+ vbroadcastss -7 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPS_I( %xmm9,%xmm5,%xmm0 )\r
+ vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm6\r
+ VFMADDPS_R( %xmm10,%xmm6,%xmm0 )\r
+ vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm7\r
+ VFMADDPS_I( %xmm11,%xmm7,%xmm0 )\r
+ addq $4, BI \r
+ addq $2, %rax \r
+.endm\r
+\r
+.macro SAVE1x2\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $0xb1, %xmm11, %xmm11, %xmm11\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+\r
+ vshufps $0xb1, %xmm8 , %xmm8, %xmm9\r
+ vshufps $0xb1, %xmm10, %xmm10, %xmm11\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+ vaddsubps %xmm10, %xmm11,%xmm11\r
+\r
+ vmovaps %xmm9, %xmm8\r
+ vmovaps %xmm11, %xmm10\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $0xb1, %xmm11, %xmm11, %xmm11\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+ vmulps %xmm10, %xmm0, %xmm10\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+ vmulps %xmm11, %xmm1, %xmm11\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vmovsd (CO1), %xmm14\r
+ vaddps %xmm14, %xmm8 , %xmm8\r
+\r
+ vmovsd (CO1, LDC), %xmm15\r
+ vaddps %xmm15, %xmm10, %xmm10\r
+\r
+#endif\r
+\r
+ vmovsd %xmm8 , (CO1)\r
+ vmovsd %xmm10 , (CO1, LDC)\r
+\r
+.endm\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL8x1_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm1\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %ymm4\r
+ VFMADDPS_R( %ymm8,%ymm4,%ymm0 )\r
+ VFMADDPS_R( %ymm12,%ymm4,%ymm1 )\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %ymm5\r
+ VFMADDPS_I( %ymm9,%ymm5,%ymm0 )\r
+ VFMADDPS_I( %ymm13,%ymm5,%ymm1 )\r
+ addq $2 , BI \r
+ addq $16, %rax \r
+.endm\r
+\r
+.macro SAVE8x1\r
+\r
+ vbroadcastss ALPHA_R, %ymm0\r
+ vbroadcastss ALPHA_I, %ymm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %ymm9 , %ymm9, %ymm9\r
+ vshufps $0xb1, %ymm13, %ymm13, %ymm13\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %ymm9, %ymm8 , %ymm8\r
+ vaddsubps %ymm13,%ymm12, %ymm12\r
+\r
+ vshufps $0xb1, %ymm8 , %ymm8, %ymm9\r
+ vshufps $0xb1, %ymm12, %ymm12, %ymm13\r
+\r
+#else\r
+ vaddsubps %ymm8, %ymm9 ,%ymm9\r
+ vaddsubps %ymm12, %ymm13,%ymm13\r
+\r
+ vmovaps %ymm9, %ymm8\r
+ vmovaps %ymm13, %ymm12\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %ymm9 , %ymm9, %ymm9\r
+ vshufps $0xb1, %ymm13, %ymm13, %ymm13\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %ymm8 , %ymm0, %ymm8\r
+ vmulps %ymm12, %ymm0, %ymm12\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %ymm9 , %ymm1, %ymm9\r
+ vmulps %ymm13, %ymm1, %ymm13\r
+\r
+ vaddsubps %ymm9, %ymm8 , %ymm8\r
+ vaddsubps %ymm13,%ymm12, %ymm12\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %ymm8 , %ymm8\r
+ vaddps 8 * SIZE(CO1), %ymm12, %ymm12\r
+\r
+#endif\r
+\r
+ vmovups %ymm8 , (CO1)\r
+ vmovups %ymm12 , 8 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL4x1_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPS_R( %xmm8,%xmm4,%xmm0 )\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %xmm1\r
+ VFMADDPS_R( %xmm12,%xmm4,%xmm1 )\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPS_I( %xmm9,%xmm5,%xmm0 )\r
+ VFMADDPS_I( %xmm13,%xmm5,%xmm1 )\r
+ addq $2, BI \r
+ addq $8, %rax \r
+.endm\r
+\r
+.macro SAVE4x1\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 4 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $0xb1, %xmm13, %xmm13, %xmm13\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm13,%xmm12, %xmm12\r
+\r
+ vshufps $0xb1, %xmm8 , %xmm8, %xmm9\r
+ vshufps $0xb1, %xmm12, %xmm12, %xmm13\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+ vaddsubps %xmm12, %xmm13,%xmm13\r
+\r
+ vmovaps %xmm9, %xmm8\r
+ vmovaps %xmm13, %xmm12\r
+\r
+ // swap high and low 4 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $0xb1, %xmm13, %xmm13, %xmm13\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+ vmulps %xmm12, %xmm0, %xmm12\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+ vmulps %xmm13, %xmm1, %xmm13\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm13,%xmm12, %xmm12\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %xmm8 , %xmm8\r
+ vaddps 4 * SIZE(CO1), %xmm12, %xmm12\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+ vmovups %xmm12 , 4 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL2x1_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPS_R( %xmm8,%xmm4,%xmm0 )\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPS_I( %xmm9,%xmm5,%xmm0 )\r
+ addq $2, BI \r
+ addq $4, %rax \r
+.endm\r
+\r
+.macro SAVE2x1\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+\r
+ vshufps $0xb1, %xmm8 , %xmm8, %xmm9\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+\r
+ vmovaps %xmm9, %xmm8\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %xmm8 , %xmm8\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+\r
+.endm\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL1x1_SUB\r
+ vmovsd -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPS_R( %xmm8,%xmm4,%xmm0 )\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPS_I( %xmm9,%xmm5,%xmm0 )\r
+ addq $2, BI \r
+ addq $2, %rax \r
+.endm\r
+\r
+.macro SAVE1x1\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+\r
+ vshufps $0xb1, %xmm8 , %xmm8, %xmm9\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+\r
+ vmovaps %xmm9, %xmm8\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vmovsd (CO1), %xmm14\r
+ vaddps %xmm14, %xmm8 , %xmm8\r
+\r
+#endif\r
+\r
+ vmovsd %xmm8 , (CO1)\r
+\r
+.endm\r
+\r
+/************************************************************************************************/\r
+\r
+\r
+\r
+\r
+ PROLOGUE\r
+ PROFCODE\r
+ \r
+ subq $STACKSIZE, %rsp\r
+ movq %rbx, (%rsp)\r
+ movq %rbp, 8(%rsp)\r
+ movq %r12, 16(%rsp)\r
+ movq %r13, 24(%rsp)\r
+ movq %r14, 32(%rsp)\r
+ movq %r15, 40(%rsp)\r
+\r
+ vzeroupper\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq %rdi, 48(%rsp)\r
+ movq %rsi, 56(%rsp)\r
+ movups %xmm6, 64(%rsp)\r
+ movups %xmm7, 80(%rsp)\r
+ movups %xmm8, 96(%rsp)\r
+ movups %xmm9, 112(%rsp)\r
+ movups %xmm10, 128(%rsp)\r
+ movups %xmm11, 144(%rsp)\r
+ movups %xmm12, 160(%rsp)\r
+ movups %xmm13, 176(%rsp)\r
+ movups %xmm14, 192(%rsp)\r
+ movups %xmm15, 208(%rsp)\r
+\r
+ movq ARG1, OLD_M\r
+ movq ARG2, OLD_N\r
+ movq ARG3, OLD_K\r
+ movq OLD_A, A\r
+ movq OLD_B, B\r
+ movq OLD_C, C\r
+ movq OLD_LDC, LDC\r
+#ifdef TRMMKERNEL\r
+ movsd OLD_OFFSET, %xmm12\r
+#endif\r
+ vmovaps %xmm3, %xmm0\r
+\r
+#else\r
+ movq STACKSIZE + 8(%rsp), LDC\r
+#ifdef TRMMKERNEL\r
+ movsd STACKSIZE + 16(%rsp), %xmm12\r
+#endif\r
+\r
+#endif\r
+\r
+ movq %rsp, SP # save old stack\r
+ subq $128 + L_BUFFER_SIZE, %rsp\r
+ andq $-4096, %rsp # align stack\r
+\r
+ STACK_TOUCH\r
+\r
+ cmpq $0, OLD_M\r
+ je .L999\r
+\r
+ cmpq $0, OLD_N\r
+ je .L999\r
+\r
+ cmpq $0, OLD_K\r
+ je .L999\r
+\r
+ movq OLD_M, M\r
+ movq OLD_N, N\r
+ movq OLD_K, K\r
+\r
+ vmovss %xmm0, ALPHA_R\r
+ vmovss %xmm1, ALPHA_I\r
+\r
+ salq $ZBASE_SHIFT, LDC\r
+\r
+ movq N, %rax\r
+ xorq %rdx, %rdx\r
+ movq $2, %rdi\r
+ divq %rdi // N / 2\r
+ movq %rax, Ndiv6 // N / 2\r
+ movq %rdx, Nmod6 // N % 2\r
+\r
+ \r
+\r
+#ifdef TRMMKERNEL\r
+ vmovsd %xmm12, OFFSET\r
+ vmovsd %xmm12, KK\r
+#ifndef LEFT\r
+ negq KK\r
+#endif \r
+#endif\r
+\r
+.L2_0:\r
+\r
+ movq Ndiv6, J\r
+ cmpq $0, J\r
+ je .L1_0\r
+ ALIGN_4\r
+\r
+\r
+\r
+.L2_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ ALIGN_4\r
+\r
+.L2_02b:\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups %xmm0, (BO)\r
+ addq $4*SIZE,BO1\r
+ addq $4*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_02b\r
+\r
+.L2_02c:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L2_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C // c += 2 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $3, I // i = (m >> 3)\r
+ je .L2_4_10\r
+\r
+ ALIGN_4\r
+/**********************************************************************************************************/\r
+\r
+.L2_8_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $8, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_8_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_8_12:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+\r
+ je .L2_8_16\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+\r
+ je .L2_8_16\r
+\r
+ jmp .L2_8_12\r
+ ALIGN_4\r
+\r
+.L2_8_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_8_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_8_17:\r
+\r
+ KERNEL8x2_SUB\r
+\r
+ jl .L2_8_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_8_19:\r
+\r
+ SAVE8x2\r
+\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $8, KK\r
+#endif\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L2_8_11\r
+ ALIGN_4 \r
+\r
+\r
+/**********************************************************************************************************/\r
+\r
+\r
+\r
+\r
+.L2_4_10:\r
+ testq $7, M \r
+ jz .L2_4_60 // to next 2 lines of N\r
+\r
+ testq $4, M \r
+ jz .L2_4_20\r
+ ALIGN_4\r
+\r
+\r
+.L2_4_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_4_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_12:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ je .L2_4_16\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ je .L2_4_16\r
+\r
+ jmp .L2_4_12\r
+ ALIGN_4\r
+\r
+.L2_4_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_4_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_17:\r
+\r
+ KERNEL4x2_SUB\r
+\r
+ jl .L2_4_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_4_19:\r
+\r
+ SAVE4x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4 \r
+\r
+\r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+\r
+.L2_4_20:\r
+\r
+ testq $2, M \r
+ jz .L2_4_40\r
+ ALIGN_4\r
+\r
+.L2_4_21:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_4_26\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_22:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ je .L2_4_26\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ je .L2_4_26\r
+\r
+ jmp .L2_4_22\r
+ ALIGN_4\r
+\r
+.L2_4_26:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_4_29\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_27:\r
+\r
+ KERNEL2x2_SUB\r
+\r
+ jl .L2_4_27\r
+ ALIGN_4\r
+\r
+\r
+.L2_4_29:\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $0xb1, %xmm11, %xmm11, %xmm11\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+\r
+ vshufps $0xb1, %xmm8 , %xmm8, %xmm9\r
+ vshufps $0xb1, %xmm10, %xmm10, %xmm11\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+ vaddsubps %xmm10, %xmm11,%xmm11\r
+\r
+ vmovaps %xmm9, %xmm8\r
+ vmovaps %xmm11, %xmm10\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $0xb1, %xmm11, %xmm11, %xmm11\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+ vmulps %xmm10, %xmm0, %xmm10\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+ vmulps %xmm11, %xmm1, %xmm11\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %xmm8 , %xmm8\r
+\r
+ vaddps (CO1, LDC), %xmm10, %xmm10\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+\r
+ vmovups %xmm10 , (CO1, LDC)\r
+\r
+\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ decq I # i --\r
+ jg .L2_4_21\r
+ ALIGN_4 \r
+\r
+\r
+\r
+/**************************************************************************/\r
+.L2_4_40:\r
+ testq $1, M \r
+ jz .L2_4_60 // to next 2 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L2_4_41:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_4_46\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_42:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ je .L2_4_46\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ je .L2_4_46\r
+\r
+ jmp .L2_4_42\r
+ ALIGN_4\r
+\r
+.L2_4_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_4_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_47:\r
+\r
+ KERNEL1x2_SUB\r
+\r
+ jl .L2_4_47\r
+ ALIGN_4\r
+\r
+\r
+.L2_4_49:\r
+\r
+ SAVE1x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK\r
+#endif\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ decq I # i --\r
+ jg .L2_4_41\r
+ ALIGN_4 \r
+\r
+\r
+\r
+ \r
+.L2_4_60:\r
+#if defined(TRMMKERNEL) && !defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ decq J // j --\r
+ jg .L2_01 // next 2 lines of N\r
+\r
+\r
+\r
+.L1_0:\r
+\r
+/************************************************************************************************\r
+* Loop for Nmod6 % 2 > 0\r
+*************************************************************************************************/\r
+\r
+ movq Nmod6, J \r
+ andq $1, J // j % 2\r
+ je .L999\r
+ ALIGN_4\r
+\r
+.L1_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ ALIGN_4\r
+\r
+.L1_02b:\r
+\r
+ vmovsd (BO1), %xmm0\r
+ vmovsd %xmm0, (BO)\r
+ addq $2*SIZE,BO1\r
+ addq $2*SIZE,BO\r
+ decq %rax\r
+ jnz .L1_02b\r
+\r
+.L1_02c:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L1_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 1), C // c += 1 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $3, I // i = (m >> 3)\r
+ je .L1_4_10\r
+\r
+ ALIGN_4\r
+\r
+/**************************************************************************************************/\r
+\r
+.L1_8_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $8, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_8_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_8_12:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+\r
+ je .L1_8_16\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+\r
+ je .L1_8_16\r
+\r
+ jmp .L1_8_12\r
+ ALIGN_4\r
+\r
+.L1_8_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_8_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_8_17:\r
+\r
+ KERNEL8x1_SUB\r
+\r
+ jl .L1_8_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_8_19:\r
+\r
+ SAVE8x1\r
+\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $8, KK\r
+#endif\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L1_8_11\r
+ ALIGN_4 \r
+\r
+\r
+\r
+/**************************************************************************************************/\r
+.L1_4_10:\r
+\r
+ testq $7, M \r
+ jz .L999\r
+\r
+ testq $4, M \r
+ jz .L1_4_20\r
+\r
+\r
+.L1_4_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_4_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_12:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ je .L1_4_16\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ je .L1_4_16\r
+\r
+ jmp .L1_4_12\r
+ ALIGN_4\r
+\r
+.L1_4_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_4_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_17:\r
+\r
+ KERNEL4x1_SUB\r
+\r
+ jl .L1_4_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_4_19:\r
+\r
+ SAVE4x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4 \r
+\r
+\r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+\r
+.L1_4_20:\r
+\r
+ testq $2, M \r
+ jz .L1_4_40\r
+ ALIGN_4\r
+\r
+.L1_4_21:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_4_26\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_22:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ je .L1_4_26\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ je .L1_4_26\r
+\r
+ jmp .L1_4_22\r
+ ALIGN_4\r
+\r
+.L1_4_26:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_4_29\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_27:\r
+\r
+ KERNEL2x1_SUB\r
+\r
+ jl .L1_4_27\r
+ ALIGN_4\r
+\r
+\r
+.L1_4_29:\r
+\r
+ SAVE2x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4 \r
+\r
+\r
+\r
+/**************************************************************************/\r
+.L1_4_40:\r
+ testq $1, M \r
+ jz .L999 // to next 2 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L1_4_41:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_4_46\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_42:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ je .L1_4_46\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ je .L1_4_46\r
+\r
+ jmp .L1_4_42\r
+ ALIGN_4\r
+\r
+.L1_4_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_4_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_47:\r
+\r
+ KERNEL1x1_SUB\r
+\r
+ jl .L1_4_47\r
+ ALIGN_4\r
+\r
+\r
+.L1_4_49:\r
+\r
+ SAVE1x1\r
+\r
+\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK\r
+#endif\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4 \r
+\r
+\r
+.L999:\r
+ movq SP, %rsp\r
+ movq (%rsp), %rbx\r
+ movq 8(%rsp), %rbp\r
+ movq 16(%rsp), %r12\r
+ movq 24(%rsp), %r13\r
+ movq 32(%rsp), %r14\r
+ movq 40(%rsp), %r15\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq 48(%rsp), %rdi\r
+ movq 56(%rsp), %rsi\r
+ movups 64(%rsp), %xmm6\r
+ movups 80(%rsp), %xmm7\r
+ movups 96(%rsp), %xmm8\r
+ movups 112(%rsp), %xmm9\r
+ movups 128(%rsp), %xmm10\r
+ movups 144(%rsp), %xmm11\r
+ movups 160(%rsp), %xmm12\r
+ movups 176(%rsp), %xmm13\r
+ movups 192(%rsp), %xmm14\r
+ movups 208(%rsp), %xmm15\r
+#endif\r
+\r
+ addq $STACKSIZE, %rsp\r
+ ret\r
+\r
+ EPILOGUE\r
--- /dev/null
+/*********************************************************************/\r
+/* Copyright 2009, 2010 The University of Texas at Austin. */\r
+/* All rights reserved. */\r
+/* */\r
+/* Redistribution and use in source and binary forms, with or */\r
+/* without modification, are permitted provided that the following */\r
+/* conditions are met: */\r
+/* */\r
+/* 1. Redistributions of source code must retain the above */\r
+/* copyright notice, this list of conditions and the following */\r
+/* disclaimer. */\r
+/* */\r
+/* 2. Redistributions in binary form must reproduce the above */\r
+/* copyright notice, this list of conditions and the following */\r
+/* disclaimer in the documentation and/or other materials */\r
+/* provided with the distribution. */\r
+/* */\r
+/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */\r
+/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */\r
+/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */\r
+/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */\r
+/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */\r
+/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */\r
+/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */\r
+/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */\r
+/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */\r
+/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */\r
+/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */\r
+/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */\r
+/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\r
+/* POSSIBILITY OF SUCH DAMAGE. */\r
+/* */\r
+/* The views and conclusions contained in the software and */\r
+/* documentation are those of the authors and should not be */\r
+/* interpreted as representing official policies, either expressed */\r
+/* or implied, of The University of Texas at Austin. */\r
+/*********************************************************************/\r
+\r
+/*********************************************************************\r
+* 2013/10/20 Saar\r
+* BLASTEST : OK\r
+* CTEST : OK\r
+* TEST : OK\r
+\r
+*\r
+*\r
+* 2013/10/20 Saar\r
+* Parameter:\r
+* DGEMM_DEFAULT_UNROLL_N 2\r
+* DGEMM_DEFAULT_UNROLL_M 16\r
+* DGEMM_DEFAULT_P 192\r
+* DGEMM_DEFAULT_Q 128\r
+* A_PR1 512\r
+*\r
+*\r
+* Performance without prefetch of B:\r
+* 1 thread: 45.8 GFLOPS (MKL: 45)\r
+* 2 threads: 80.0 GFLOPS (MKL: 91)\r
+* 4 threads: 135.0 GFLOPS (MKL: 135)\r
+*********************************************************************/\r
+\r
+\r
+#define ASSEMBLER\r
+#include "common.h"\r
+ \r
+#define OLD_M %rdi\r
+#define OLD_N %rsi\r
+#define M %r13\r
+#define J %r14\r
+#define OLD_K %rdx\r
+\r
+#define A %rcx\r
+#define B %r8\r
+#define C %r9\r
+#define LDC %r10\r
+ \r
+#define I %r11\r
+#define AO %rdi\r
+#define BO %rsi\r
+#define CO1 %r15\r
+#define K %r12\r
+#define BI %rbp\r
+#define SP %rbx\r
+\r
+#define BO1 %rdi\r
+#define BO2 %r15\r
+\r
+#ifndef WINDOWS_ABI\r
+\r
+#define STACKSIZE 96\r
+\r
+#else\r
+\r
+#define STACKSIZE 256\r
+\r
+#define OLD_A 40 + STACKSIZE(%rsp)\r
+#define OLD_B 48 + STACKSIZE(%rsp)\r
+#define OLD_C 56 + STACKSIZE(%rsp)\r
+#define OLD_LDC 64 + STACKSIZE(%rsp)\r
+#define OLD_OFFSET 72 + STACKSIZE(%rsp)\r
+\r
+#endif\r
+\r
+#define L_BUFFER_SIZE 512*8*4\r
+#define LB2_OFFSET 512*8*2\r
+\r
+#define Ndiv6 24(%rsp)\r
+#define Nmod6 32(%rsp)\r
+#define N 40(%rsp)\r
+#define ALPHA 48(%rsp)\r
+#define OFFSET 56(%rsp)\r
+#define KK 64(%rsp)\r
+#define KKK 72(%rsp)\r
+#define BUFFER1 128(%rsp)\r
+#define BUFFER2 LB2_OFFSET+128(%rsp)\r
+\r
+#if defined(OS_WINDOWS)\r
+#if L_BUFFER_SIZE > 16384\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 4(%rsp);\\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 12288\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 8192\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 4096\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 1(%rsp);\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+\r
+#if defined(BULLDOZER)\r
+\r
+.macro VFMADD231PD_ y0,y1,y2\r
+ vfmaddpd \y0,\y1,\y2,\y0\r
+.endm\r
+\r
+.macro VFMADD231SD_ x0,x1,x2\r
+ vfmaddsd \x0,\x1,\x2,\x0\r
+.endm\r
+\r
+#else\r
+\r
+.macro VFMADD231PD_ y0,y1,y2\r
+ vfmadd231pd \y2,\y1,\y0\r
+.endm\r
+\r
+.macro VFMADD231SD_ x0,x1,x2\r
+ vfmadd231sd \x2,\x1,\x0\r
+.endm\r
+\r
+#endif\r
+\r
+\r
+#define A_PR1 512\r
+#define B_PR1 256\r
+\r
+/*******************************************************************************************\r
+* 3 lines of N\r
+*******************************************************************************************/\r
+\r
+.macro KERNEL16x3_SUBN\r
+ prefetcht0 A_PR1(AO)\r
+ vbroadcastsd -12 * SIZE(BO), %ymm1\r
+ vmovaps -16 * SIZE(AO), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -11 * SIZE(BO), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -10 * SIZE(BO), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovaps -12 * SIZE(AO), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 A_PR1+64(AO)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ vmovaps -8 * SIZE(AO), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm12,%ymm3,%ymm0\r
+ vmovaps -4 * SIZE(AO), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm15,%ymm3,%ymm0\r
+ addq $3*SIZE , BO \r
+ addq $16*SIZE, AO\r
+.endm\r
+\r
+\r
+.macro KERNEL8x3_SUBN\r
+ //prefetcht0 A_PR1(AO)\r
+ vbroadcastsd -12 * SIZE(BO), %ymm1\r
+ vmovaps -16 * SIZE(AO), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -11 * SIZE(BO), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -10 * SIZE(BO), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovaps -12 * SIZE(AO), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ //prefetcht0 A_PR1+64(AO)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ prefetcht0 B_PR1(BO)\r
+ addq $3*SIZE , BO \r
+ addq $8*SIZE, AO\r
+.endm\r
+\r
+.macro KERNEL4x3_SUBN\r
+ vbroadcastsd -12 * SIZE(BO), %ymm1\r
+ vmovaps -16 * SIZE(AO), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -11 * SIZE(BO), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -10 * SIZE(BO), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ addq $3*SIZE , BO \r
+ addq $4*SIZE, AO\r
+.endm\r
+\r
+.macro KERNEL2x3_SUBN\r
+ vmovsd -12 * SIZE(BO), %xmm1\r
+ vmovsd -16 * SIZE(AO), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -11 * SIZE(BO), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -10 * SIZE(BO), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ vmovsd -15 * SIZE(AO), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ VFMADD231SD_ %xmm12,%xmm3,%xmm0\r
+ addq $3*SIZE , BO \r
+ addq $2*SIZE, AO\r
+.endm\r
+\r
+.macro KERNEL1x3_SUBN\r
+ vmovsd -12 * SIZE(BO), %xmm1\r
+ vmovsd -16 * SIZE(AO), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -11 * SIZE(BO), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -10 * SIZE(BO), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ addq $3*SIZE , BO \r
+ addq $1*SIZE, AO\r
+.endm\r
+\r
+\r
+\r
+\r
+\r
+\r
+/******************************************************************************************/\r
+\r
+.macro KERNEL16x3_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 64+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm12,%ymm3,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm1\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm15,%ymm3,%ymm0\r
+.endm\r
+\r
+\r
+\r
+\r
+.macro KERNEL16x3_2\r
+ prefetcht0 128+A_PR1(AO, %rax, SIZE)\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ prefetcht0 A_PR1+64(AO,%rax,SIZE)\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ prefetcht0 192+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm12,%ymm3,%ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm15,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x3_3\r
+ prefetcht0 256+A_PR1(AO, %rax, SIZE)\r
+ vmovups 0 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups 4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 320+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ vmovups 8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm12,%ymm3,%ymm0\r
+ vmovups 12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm1\r
+ vbroadcastsd 4 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm15,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x3_4\r
+ prefetcht0 384+A_PR1(AO, %rax, SIZE)\r
+ vmovups 16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd 5 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups 20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 448+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ vmovups 24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ addq $12, BI \r
+ VFMADD231PD_ %ymm12,%ymm3,%ymm0\r
+ vmovups 28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ addq $64, %rax \r
+ VFMADD231PD_ %ymm15,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x3_SUB\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm12,%ymm3,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm15,%ymm3,%ymm0\r
+ addq $3 , BI \r
+ addq $16, %rax \r
+.endm\r
+\r
+.macro SAVE16x3\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+ vmulpd %ymm0 , %ymm10, %ymm10\r
+ vmulpd %ymm0 , %ymm13, %ymm13\r
+\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+ vmulpd %ymm0 , %ymm8 , %ymm8\r
+ vmulpd %ymm0 , %ymm11, %ymm11\r
+ vmulpd %ymm0 , %ymm14, %ymm14\r
+\r
+ vmulpd %ymm0 , %ymm6 , %ymm6\r
+ vmulpd %ymm0 , %ymm9 , %ymm9\r
+ vmulpd %ymm0 , %ymm12, %ymm12\r
+ vmulpd %ymm0 , %ymm15, %ymm15\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd 4 * SIZE(CO1), %ymm7,%ymm7\r
+ vaddpd 8 * SIZE(CO1), %ymm10,%ymm10\r
+ vaddpd 12 * SIZE(CO1), %ymm13,%ymm13\r
+\r
+ vaddpd (CO1, LDC), %ymm5,%ymm5\r
+ vaddpd 4 * SIZE(CO1, LDC), %ymm8,%ymm8\r
+ vaddpd 8 * SIZE(CO1, LDC), %ymm11,%ymm11\r
+ vaddpd 12 * SIZE(CO1, LDC), %ymm14,%ymm14\r
+\r
+ vaddpd (CO1, LDC, 2), %ymm6,%ymm6\r
+ vaddpd 4 * SIZE(CO1, LDC, 2), %ymm9,%ymm9\r
+ vaddpd 8 * SIZE(CO1, LDC, 2), %ymm12,%ymm12\r
+ vaddpd 12 * SIZE(CO1, LDC, 2), %ymm15,%ymm15\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm7 , 4 * SIZE(CO1)\r
+ vmovups %ymm10, 8 * SIZE(CO1)\r
+ vmovups %ymm13,12 * SIZE(CO1)\r
+\r
+ vmovups %ymm5 , (CO1, LDC)\r
+ vmovups %ymm8 , 4 * SIZE(CO1, LDC)\r
+ vmovups %ymm11, 8 * SIZE(CO1, LDC)\r
+ vmovups %ymm14,12 * SIZE(CO1, LDC)\r
+\r
+ vmovups %ymm6 , (CO1, LDC, 2)\r
+ vmovups %ymm9 , 4 * SIZE(CO1, LDC, 2)\r
+ vmovups %ymm12, 8 * SIZE(CO1, LDC, 2)\r
+ vmovups %ymm15,12 * SIZE(CO1, LDC, 2)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL8x3_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x3_2\r
+ prefetcht0 64+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x3_3\r
+ prefetcht0 128+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x3_4\r
+ prefetcht0 192+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 4 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd 5 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ addq $12, BI\r
+ addq $32, %rax\r
+.endm\r
+\r
+.macro KERNEL8x3_SUB\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ addq $3 , BI\r
+ addq $8 , %rax\r
+.endm\r
+\r
+.macro SAVE8x3\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+ vmulpd %ymm0 , %ymm8 , %ymm8\r
+\r
+ vmulpd %ymm0 , %ymm6 , %ymm6\r
+ vmulpd %ymm0 , %ymm9 , %ymm9\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd 4 * SIZE(CO1), %ymm7,%ymm7\r
+\r
+ vaddpd (CO1, LDC), %ymm5,%ymm5\r
+ vaddpd 4 * SIZE(CO1, LDC), %ymm8,%ymm8\r
+\r
+ vaddpd (CO1, LDC, 2), %ymm6,%ymm6\r
+ vaddpd 4 * SIZE(CO1, LDC, 2), %ymm9,%ymm9\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm7 , 4 * SIZE(CO1)\r
+\r
+ vmovups %ymm5 , (CO1, LDC)\r
+ vmovups %ymm8 , 4 * SIZE(CO1, LDC)\r
+\r
+ vmovups %ymm6 , (CO1, LDC, 2)\r
+ vmovups %ymm9 , 4 * SIZE(CO1, LDC, 2)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL4x3_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x3_2\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x3_3\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x3_4\r
+ vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 4 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd 5 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ addq $12, BI\r
+ addq $16, %rax\r
+.endm\r
+\r
+.macro KERNEL4x3_SUB\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ addq $3 , BI\r
+ addq $4 , %rax\r
+.endm\r
+\r
+.macro SAVE4x3\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+ vmulpd %ymm0 , %ymm6 , %ymm6\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd (CO1, LDC), %ymm5,%ymm5\r
+ vaddpd (CO1, LDC, 2), %ymm6,%ymm6\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm5 , (CO1, LDC)\r
+ vmovups %ymm6 , (CO1, LDC, 2)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL2x3_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vmovsd -6 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -5 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ VFMADD231SD_ %xmm12,%xmm3,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x3_2\r
+ vmovsd -3 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -1 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ VFMADD231SD_ %xmm12,%xmm3,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x3_3\r
+ vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -28 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 1 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd 2 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ vmovsd -27 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ VFMADD231SD_ %xmm12,%xmm3,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x3_4\r
+ vmovsd 3 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -26 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 4 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd 5 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ vmovsd -25 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ VFMADD231SD_ %xmm12,%xmm3,%xmm0\r
+ addq $12, BI\r
+ addq $8, %rax\r
+.endm\r
+\r
+.macro KERNEL2x3_SUB\r
+ vmovsd -6 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -5 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ VFMADD231SD_ %xmm12,%xmm3,%xmm0\r
+ addq $3 , BI\r
+ addq $2 , %rax\r
+.endm\r
+\r
+.macro SAVE2x3\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+ vmulsd %xmm0 , %xmm8 , %xmm8\r
+ vmulsd %xmm0 , %xmm5 , %xmm5\r
+ vmulsd %xmm0 , %xmm10, %xmm10\r
+ vmulsd %xmm0 , %xmm6 , %xmm6\r
+ vmulsd %xmm0 , %xmm12, %xmm12\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4,%xmm4\r
+ vaddsd 1 * SIZE(CO1), %xmm8,%xmm8\r
+ vaddsd (CO1, LDC), %xmm5,%xmm5\r
+ vaddsd 1 * SIZE(CO1, LDC), %xmm10,%xmm10\r
+ vaddsd (CO1, LDC, 2), %xmm6,%xmm6\r
+ vaddsd 1 * SIZE(CO1, LDC, 2), %xmm12,%xmm12\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+ vmovsd %xmm8 , 1 * SIZE(CO1)\r
+ vmovsd %xmm5 , (CO1, LDC)\r
+ vmovsd %xmm10, 1 * SIZE(CO1, LDC)\r
+ vmovsd %xmm6 , (CO1, LDC, 2)\r
+ vmovsd %xmm12, 1 * SIZE(CO1, LDC, 2)\r
+\r
+.endm\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL1x3_1\r
+ vmovsd -6 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -5 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x3_2\r
+ vmovsd -3 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -1 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x3_3\r
+ vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 1 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd 2 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x3_4\r
+ vmovsd 3 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 4 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd 5 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ addq $12, BI\r
+ addq $4, %rax\r
+.endm\r
+\r
+.macro KERNEL1x3_SUB\r
+ vmovsd -6 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -5 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ addq $3 , BI\r
+ addq $1 , %rax\r
+.endm\r
+\r
+.macro SAVE1x3\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+ vmulsd %xmm0 , %xmm5 , %xmm5\r
+ vmulsd %xmm0 , %xmm6 , %xmm6\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4,%xmm4\r
+ vaddsd (CO1, LDC), %xmm5,%xmm5\r
+ vaddsd (CO1, LDC, 2), %xmm6,%xmm6\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+ vmovsd %xmm5 , (CO1, LDC)\r
+ vmovsd %xmm6 , (CO1, LDC, 2)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+/*******************************************************************************************\r
+* 2 lines of N\r
+*******************************************************************************************/\r
+\r
+.macro KERNEL16x2_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 64+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x2_2\r
+ prefetcht0 128+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 192+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x2_3\r
+ prefetcht0 256+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups 0 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups 4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 320+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ vmovups 8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ vmovups 12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x2_4\r
+ prefetcht0 384+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups 16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups 20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 448+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ vmovups 24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ vmovups 28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ addq $8, BI\r
+ addq $64, %rax\r
+.endm\r
+\r
+.macro KERNEL16x2_SUB\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ addq $2, BI\r
+ addq $16, %rax\r
+.endm\r
+\r
+.macro SAVE16x2\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+ vmulpd %ymm0 , %ymm10, %ymm10\r
+ vmulpd %ymm0 , %ymm13, %ymm13\r
+\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+ vmulpd %ymm0 , %ymm8 , %ymm8\r
+ vmulpd %ymm0 , %ymm11, %ymm11\r
+ vmulpd %ymm0 , %ymm14, %ymm14\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd 4 * SIZE(CO1), %ymm7,%ymm7\r
+ vaddpd 8 * SIZE(CO1), %ymm10,%ymm10\r
+ vaddpd 12 * SIZE(CO1), %ymm13,%ymm13\r
+\r
+ vaddpd (CO1, LDC), %ymm5,%ymm5\r
+ vaddpd 4 * SIZE(CO1, LDC), %ymm8,%ymm8\r
+ vaddpd 8 * SIZE(CO1, LDC), %ymm11,%ymm11\r
+ vaddpd 12 * SIZE(CO1, LDC), %ymm14,%ymm14\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm7 , 4 * SIZE(CO1)\r
+ vmovups %ymm10, 8 * SIZE(CO1)\r
+ vmovups %ymm13,12 * SIZE(CO1)\r
+\r
+ vmovups %ymm5 , (CO1, LDC)\r
+ vmovups %ymm8 , 4 * SIZE(CO1, LDC)\r
+ vmovups %ymm11, 8 * SIZE(CO1, LDC)\r
+ vmovups %ymm14,12 * SIZE(CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL8x2_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x2_2\r
+ prefetcht0 64+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x2_3\r
+ prefetcht0 128+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x2_4\r
+ prefetcht0 192+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ addq $8, BI \r
+ addq $32, %rax \r
+.endm\r
+\r
+.macro KERNEL8x2_SUB\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ addq $2, BI \r
+ addq $8 , %rax \r
+.endm\r
+\r
+.macro SAVE8x2\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+ vmulpd %ymm0 , %ymm8 , %ymm8\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd 4 * SIZE(CO1), %ymm7,%ymm7\r
+\r
+ vaddpd (CO1, LDC), %ymm5,%ymm5\r
+ vaddpd 4 * SIZE(CO1, LDC), %ymm8,%ymm8\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm7 , 4 * SIZE(CO1)\r
+\r
+ vmovups %ymm5 , (CO1, LDC)\r
+ vmovups %ymm8 , 4 * SIZE(CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL4x2_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x2_2\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x2_3\r
+ prefetcht0 64+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x2_4\r
+ vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ addq $8, BI \r
+ addq $16, %rax \r
+.endm\r
+\r
+.macro KERNEL4x2_SUB\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ addq $2, BI \r
+ addq $4 , %rax \r
+.endm\r
+\r
+.macro SAVE4x2\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd (CO1, LDC), %ymm5,%ymm5\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm5 , (CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL2x2_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -3 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x2_2\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -1 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x2_3\r
+ vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -28 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 1 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -27 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x2_4\r
+ vmovsd 2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -26 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 3 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -25 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ addq $8, BI \r
+ addq $8, %rax \r
+.endm\r
+\r
+.macro KERNEL2x2_SUB\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -3 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ addq $2, BI \r
+ addq $2, %rax \r
+.endm\r
+\r
+.macro SAVE2x2\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+ vmulsd %xmm0 , %xmm8 , %xmm8\r
+ vmulsd %xmm0 , %xmm5 , %xmm5\r
+ vmulsd %xmm0 , %xmm10, %xmm10\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4,%xmm4\r
+ vaddsd 1 * SIZE(CO1), %xmm8,%xmm8\r
+ vaddsd (CO1, LDC), %xmm5,%xmm5\r
+ vaddsd 1 * SIZE(CO1, LDC), %xmm10,%xmm10\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+ vmovsd %xmm8 , 1 * SIZE(CO1)\r
+ vmovsd %xmm5 , (CO1, LDC)\r
+ vmovsd %xmm10, 1 * SIZE(CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL1x2_1\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -3 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x2_2\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -1 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x2_3\r
+ vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 1 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x2_4\r
+ vmovsd 2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 3 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ addq $8, BI \r
+ addq $4, %rax \r
+.endm\r
+\r
+.macro KERNEL1x2_SUB\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -3 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ addq $2, BI \r
+ addq $1, %rax \r
+.endm\r
+\r
+.macro SAVE1x2\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+ vmulsd %xmm0 , %xmm5 , %xmm5\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4,%xmm4\r
+ vaddsd (CO1, LDC), %xmm5,%xmm5\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+ vmovsd %xmm5 , (CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+/*******************************************************************************************\r
+* 1 line of N\r
+*******************************************************************************************/\r
+\r
+.macro KERNEL16x1_1\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x1_2\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x1_3\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups 0 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups 4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ vmovups 8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ vmovups 12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x1_4\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups 16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups 20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ vmovups 24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ vmovups 28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ addq $4, BI \r
+ addq $64, %rax \r
+.endm\r
+\r
+.macro KERNEL16x1_SUB\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ addq $1, BI \r
+ addq $16, %rax \r
+.endm\r
+\r
+.macro SAVE16x1\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+ vmulpd %ymm0 , %ymm10, %ymm10\r
+ vmulpd %ymm0 , %ymm13, %ymm13\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd 4 * SIZE(CO1), %ymm7,%ymm7\r
+ vaddpd 8 * SIZE(CO1), %ymm10,%ymm10\r
+ vaddpd 12 * SIZE(CO1), %ymm13,%ymm13\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm7 , 4 * SIZE(CO1)\r
+ vmovups %ymm10, 8 * SIZE(CO1)\r
+ vmovups %ymm13,12 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL8x1_1\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x1_2\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x1_3\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x1_4\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ addq $4, BI \r
+ addq $32, %rax \r
+.endm\r
+\r
+.macro KERNEL8x1_SUB\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ addq $1, BI \r
+ addq $8 , %rax \r
+.endm\r
+\r
+.macro SAVE8x1\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd 4 * SIZE(CO1), %ymm7,%ymm7\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm7 , 4 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL4x1_1\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x1_2\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x1_3\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x1_4\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ addq $4, BI \r
+ addq $16, %rax \r
+.endm\r
+\r
+.macro KERNEL4x1_SUB\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ addq $1, BI \r
+ addq $4 , %rax \r
+.endm\r
+\r
+.macro SAVE4x1\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL2x1_1\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x1_2\r
+ vmovsd -1 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x1_3\r
+ vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -28 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -27 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x1_4\r
+ vmovsd 1 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -26 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -25 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ addq $4, BI \r
+ addq $8, %rax \r
+.endm\r
+\r
+.macro KERNEL2x1_SUB\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ addq $1, BI \r
+ addq $2 , %rax \r
+.endm\r
+\r
+.macro SAVE2x1\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+ vmulsd %xmm0 , %xmm8 , %xmm8\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4,%xmm4\r
+ vaddsd 1 * SIZE(CO1), %xmm8,%xmm8\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+ vmovsd %xmm8 , 1 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL1x1_1\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x1_2\r
+ vmovsd -1 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x1_3\r
+ vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x1_4\r
+ vmovsd 1 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ addq $4, BI \r
+ addq $4, %rax \r
+.endm\r
+\r
+.macro KERNEL1x1_SUB\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ addq $1, BI \r
+ addq $1 , %rax \r
+.endm\r
+\r
+.macro SAVE1x1\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4,%xmm4\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+\r
+ PROLOGUE\r
+ PROFCODE\r
+ \r
+ subq $STACKSIZE, %rsp\r
+ movq %rbx, (%rsp)\r
+ movq %rbp, 8(%rsp)\r
+ movq %r12, 16(%rsp)\r
+ movq %r13, 24(%rsp)\r
+ movq %r14, 32(%rsp)\r
+ movq %r15, 40(%rsp)\r
+\r
+ vzeroupper\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq %rdi, 48(%rsp)\r
+ movq %rsi, 56(%rsp)\r
+ movups %xmm6, 64(%rsp)\r
+ movups %xmm7, 80(%rsp)\r
+ movups %xmm8, 96(%rsp)\r
+ movups %xmm9, 112(%rsp)\r
+ movups %xmm10, 128(%rsp)\r
+ movups %xmm11, 144(%rsp)\r
+ movups %xmm12, 160(%rsp)\r
+ movups %xmm13, 176(%rsp)\r
+ movups %xmm14, 192(%rsp)\r
+ movups %xmm15, 208(%rsp)\r
+\r
+ movq ARG1, OLD_M\r
+ movq ARG2, OLD_N\r
+ movq ARG3, OLD_K\r
+ movq OLD_A, A\r
+ movq OLD_B, B\r
+ movq OLD_C, C\r
+ movq OLD_LDC, LDC\r
+\r
+ vmovaps %xmm3, %xmm0\r
+\r
+#else\r
+ movq STACKSIZE + 8(%rsp), LDC\r
+\r
+#endif\r
+\r
+ movq %rsp, SP # save old stack\r
+ subq $128 + L_BUFFER_SIZE, %rsp\r
+ andq $-4096, %rsp # align stack\r
+\r
+ STACK_TOUCH\r
+\r
+ cmpq $0, OLD_M\r
+ je .L999\r
+\r
+ cmpq $0, OLD_N\r
+ je .L999\r
+\r
+ cmpq $0, OLD_K\r
+ je .L999\r
+\r
+ movq OLD_M, M\r
+ movq OLD_N, N\r
+ movq OLD_K, K\r
+\r
+ vmovsd %xmm0, ALPHA\r
+\r
+ salq $BASE_SHIFT, LDC\r
+\r
+ movq N, %rax\r
+ xorq %rdx, %rdx\r
+ movq $6, %rdi\r
+ divq %rdi // N / 6\r
+ movq %rax, Ndiv6 // N / 6\r
+ movq %rdx, Nmod6 // N % 6\r
+\r
+\r
+ movq Ndiv6, J\r
+ cmpq $0, J\r
+ je .L2_0\r
+ ALIGN_4\r
+\r
+.L6_01:\r
+ // copy to sub buffer\r
+ movq K, %rax\r
+ salq $1,%rax // K * 2 ; read 2 values\r
+ movq B, BO1\r
+ leaq (B,%rax, SIZE), BO2 // next offset to BO2\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ sarq $3 , %rax // K / 8\r
+ jz .L6_01a_2\r
+ ALIGN_4\r
+\r
+.L6_01a_1:\r
+\r
+ prefetcht0 512(BO1)\r
+ prefetcht0 512(BO2)\r
+ prefetchw 512(BO)\r
+\r
+\r
+ vmovups 0 * SIZE(BO1), %xmm0\r
+ vmovups 2 * SIZE(BO1), %xmm2\r
+ vmovups 4 * SIZE(BO1), %xmm4\r
+ vmovups 6 * SIZE(BO1), %xmm6\r
+ vmovsd 0 * SIZE(BO2), %xmm1\r
+ vmovsd 2 * SIZE(BO2), %xmm3\r
+ vmovsd 4 * SIZE(BO2), %xmm5\r
+ vmovsd 6 * SIZE(BO2), %xmm7\r
+ vmovups %xmm0, 0*SIZE(BO)\r
+ vmovsd %xmm1, 2*SIZE(BO)\r
+ vmovups %xmm2, 3*SIZE(BO)\r
+ vmovsd %xmm3, 5*SIZE(BO)\r
+ vmovups %xmm4, 6*SIZE(BO)\r
+ vmovsd %xmm5, 8*SIZE(BO)\r
+ vmovups %xmm6, 9*SIZE(BO)\r
+ vmovsd %xmm7,11*SIZE(BO)\r
+ addq $8*SIZE,BO1\r
+ addq $8*SIZE,BO2\r
+ addq $12*SIZE,BO\r
+\r
+ vmovups 0 * SIZE(BO1), %xmm0\r
+ vmovups 2 * SIZE(BO1), %xmm2\r
+ vmovups 4 * SIZE(BO1), %xmm4\r
+ vmovups 6 * SIZE(BO1), %xmm6\r
+ vmovsd 0 * SIZE(BO2), %xmm1\r
+ vmovsd 2 * SIZE(BO2), %xmm3\r
+ vmovsd 4 * SIZE(BO2), %xmm5\r
+ vmovsd 6 * SIZE(BO2), %xmm7\r
+ vmovups %xmm0, 0*SIZE(BO)\r
+ vmovsd %xmm1, 2*SIZE(BO)\r
+ vmovups %xmm2, 3*SIZE(BO)\r
+ vmovsd %xmm3, 5*SIZE(BO)\r
+ vmovups %xmm4, 6*SIZE(BO)\r
+ vmovsd %xmm5, 8*SIZE(BO)\r
+ vmovups %xmm6, 9*SIZE(BO)\r
+ vmovsd %xmm7,11*SIZE(BO)\r
+ addq $8*SIZE,BO1\r
+ addq $8*SIZE,BO2\r
+ addq $12*SIZE,BO\r
+\r
+ decq %rax\r
+ jnz .L6_01a_1\r
+\r
+\r
+\r
+.L6_01a_2:\r
+\r
+ movq K, %rax\r
+ andq $7, %rax // K % 8\r
+ jz .L6_02c\r
+ ALIGN_4\r
+\r
+\r
+.L6_02b:\r
+\r
+ vmovups 0 * SIZE(BO1), %xmm0\r
+ vmovsd 0 * SIZE(BO2), %xmm2\r
+ vmovups %xmm0, 0*SIZE(BO)\r
+ vmovsd %xmm2, 2*SIZE(BO)\r
+ addq $2*SIZE,BO1\r
+ addq $2*SIZE,BO2\r
+ addq $3*SIZE,BO\r
+ decq %rax\r
+ jnz .L6_02b\r
+\r
+.L6_02c:\r
+\r
+ movq K, %rax\r
+ salq $1,%rax // K * 2\r
+ leaq (B,%rax, SIZE), BO1 // next offset to BO1\r
+ leaq (BO1,%rax, SIZE), BO2 // next offset to BO2\r
+ leaq BUFFER2, BO // second buffer to BO\r
+ movq K, %rax\r
+ sarq $3 , %rax // K / 8\r
+ jz .L6_02c_2\r
+ ALIGN_4\r
+\r
+.L6_02c_1:\r
+\r
+ prefetcht0 512(BO2)\r
+ prefetchw 512(BO)\r
+\r
+ vmovups 0 * SIZE(BO2), %xmm0\r
+ vmovups 2 * SIZE(BO2), %xmm2\r
+ vmovups 4 * SIZE(BO2), %xmm4\r
+ vmovups 6 * SIZE(BO2), %xmm6\r
+ vmovsd 1 * SIZE(BO1), %xmm1\r
+ vmovsd 3 * SIZE(BO1), %xmm3\r
+ vmovsd 5 * SIZE(BO1), %xmm5\r
+ vmovsd 7 * SIZE(BO1), %xmm7\r
+ vmovsd %xmm1, 0*SIZE(BO)\r
+ vmovups %xmm0, 1*SIZE(BO)\r
+ vmovsd %xmm3, 3*SIZE(BO)\r
+ vmovups %xmm2, 4*SIZE(BO)\r
+ vmovsd %xmm5, 6*SIZE(BO)\r
+ vmovups %xmm4, 7*SIZE(BO)\r
+ vmovsd %xmm7, 9*SIZE(BO)\r
+ vmovups %xmm6,10*SIZE(BO)\r
+ addq $8*SIZE,BO1\r
+ addq $8*SIZE,BO2\r
+ addq $12*SIZE,BO\r
+\r
+\r
+ vmovups 0 * SIZE(BO2), %xmm0\r
+ vmovups 2 * SIZE(BO2), %xmm2\r
+ vmovups 4 * SIZE(BO2), %xmm4\r
+ vmovups 6 * SIZE(BO2), %xmm6\r
+ vmovsd 1 * SIZE(BO1), %xmm1\r
+ vmovsd 3 * SIZE(BO1), %xmm3\r
+ vmovsd 5 * SIZE(BO1), %xmm5\r
+ vmovsd 7 * SIZE(BO1), %xmm7\r
+ vmovsd %xmm1, 0*SIZE(BO)\r
+ vmovups %xmm0, 1*SIZE(BO)\r
+ vmovsd %xmm3, 3*SIZE(BO)\r
+ vmovups %xmm2, 4*SIZE(BO)\r
+ vmovsd %xmm5, 6*SIZE(BO)\r
+ vmovups %xmm4, 7*SIZE(BO)\r
+ vmovsd %xmm7, 9*SIZE(BO)\r
+ vmovups %xmm6,10*SIZE(BO)\r
+ addq $8*SIZE,BO1\r
+ addq $8*SIZE,BO2\r
+ addq $12*SIZE,BO\r
+\r
+ decq %rax\r
+ jnz .L6_02c_1\r
+\r
+\r
+.L6_02c_2:\r
+\r
+ movq K, %rax\r
+ andq $7, %rax // K % 8\r
+ jz .L6_03c\r
+ ALIGN_4\r
+\r
+.L6_03b:\r
+\r
+ vmovsd 1*SIZE(BO1), %xmm0\r
+ vmovups 0*SIZE(BO2), %xmm1\r
+ vmovsd %xmm0, 0*SIZE(BO)\r
+ vmovups %xmm1, 1*SIZE(BO)\r
+ addq $2*SIZE,BO1\r
+ addq $2*SIZE,BO2\r
+ addq $3*SIZE,BO\r
+ decq %rax\r
+ jnz .L6_03b\r
+\r
+\r
+.L6_03c:\r
+\r
+ movq BO2, B // next offset of B\r
+\r
+.L6_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C \r
+ leaq (C, LDC, 1), C // c += 3 * ldc\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L6_20\r
+\r
+ ALIGN_4\r
+\r
+.L6_11:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ prefetcht0 (CO1)\r
+ prefetcht0 (CO1,LDC,1)\r
+ prefetcht0 (CO1,LDC,2)\r
+ prefetcht0 64(CO1)\r
+ prefetcht0 64(CO1,LDC,1)\r
+ prefetcht0 64(CO1,LDC,2)\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ sarq $1, %rax // K / 8\r
+ je .L6_16\r
+\r
+ ALIGN_5\r
+\r
+.L6_12:\r
+/*\r
+ prefetcht0 B_PR1(BO)\r
+ prefetcht0 B_PR1+64(BO)\r
+ prefetcht0 B_PR1+128(BO)\r
+*/\r
+ KERNEL16x3_SUBN\r
+ KERNEL16x3_SUBN\r
+/*\r
+ KERNEL16x3_SUBN\r
+ KERNEL16x3_SUBN\r
+\r
+ KERNEL16x3_SUBN\r
+ KERNEL16x3_SUBN\r
+ KERNEL16x3_SUBN\r
+ KERNEL16x3_SUBN\r
+*/\r
+ dec %rax\r
+ jne .L6_12\r
+\r
+.L6_16:\r
+ movq K, %rax\r
+\r
+ andq $1, %rax # if (k & 1)\r
+ je .L6_19\r
+\r
+ ALIGN_4\r
+\r
+.L6_17:\r
+\r
+ KERNEL16x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L6_17\r
+ ALIGN_4\r
+\r
+\r
+.L6_19:\r
+\r
+ SAVE16x3\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L6_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L6_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L7_10 // to next 3 lines of N\r
+\r
+ testq $8, M \r
+ jz .L6_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L6_20_1:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax\r
+ je .L6_20_6\r
+\r
+ ALIGN_4\r
+\r
+.L6_20_2:\r
+\r
+ KERNEL8x3_SUBN\r
+ KERNEL8x3_SUBN\r
+ KERNEL8x3_SUBN\r
+ KERNEL8x3_SUBN\r
+\r
+ KERNEL8x3_SUBN\r
+ KERNEL8x3_SUBN\r
+ KERNEL8x3_SUBN\r
+ KERNEL8x3_SUBN\r
+ dec %rax\r
+ jne .L6_20_2\r
+ ALIGN_4\r
+\r
+.L6_20_6:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L6_20_9\r
+\r
+\r
+ ALIGN_4\r
+\r
+.L6_20_7:\r
+\r
+ KERNEL8x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L6_20_7\r
+ ALIGN_4\r
+\r
+\r
+.L6_20_9:\r
+\r
+ SAVE8x3\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L6_21pre:\r
+\r
+ testq $4, M \r
+ jz .L6_30\r
+ ALIGN_4\r
+\r
+.L6_21:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax\r
+ je .L6_26\r
+\r
+ ALIGN_4\r
+\r
+.L6_22:\r
+\r
+ KERNEL4x3_SUBN\r
+ KERNEL4x3_SUBN\r
+ KERNEL4x3_SUBN\r
+ KERNEL4x3_SUBN\r
+\r
+ KERNEL4x3_SUBN\r
+ KERNEL4x3_SUBN\r
+ KERNEL4x3_SUBN\r
+ KERNEL4x3_SUBN\r
+ dec %rax\r
+ jne .L6_22\r
+ ALIGN_4\r
+\r
+.L6_26:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L6_29\r
+\r
+ ALIGN_4\r
+\r
+.L6_27:\r
+\r
+ KERNEL4x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L6_27\r
+ ALIGN_4\r
+\r
+\r
+.L6_29:\r
+\r
+ SAVE4x3\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L6_30:\r
+ testq $2, M \r
+ jz .L6_40\r
+\r
+ ALIGN_4\r
+\r
+.L6_31:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax\r
+ je .L6_36\r
+ ALIGN_4\r
+\r
+.L6_32:\r
+\r
+ KERNEL2x3_SUBN\r
+ KERNEL2x3_SUBN\r
+ KERNEL2x3_SUBN\r
+ KERNEL2x3_SUBN\r
+\r
+ KERNEL2x3_SUBN\r
+ KERNEL2x3_SUBN\r
+ KERNEL2x3_SUBN\r
+ KERNEL2x3_SUBN\r
+ dec %rax\r
+ jne .L6_32\r
+ ALIGN_4\r
+\r
+.L6_36:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L6_39\r
+\r
+ ALIGN_4\r
+\r
+.L6_37:\r
+\r
+ KERNEL2x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L6_37\r
+ ALIGN_4\r
+\r
+\r
+.L6_39:\r
+\r
+ SAVE2x3\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L6_40:\r
+ testq $1, M \r
+ jz .L7_10 // to next 3 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L6_41:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3,%rax\r
+ je .L6_46\r
+\r
+ ALIGN_4\r
+\r
+.L6_42:\r
+\r
+ KERNEL1x3_SUBN\r
+ KERNEL1x3_SUBN\r
+ KERNEL1x3_SUBN\r
+ KERNEL1x3_SUBN\r
+\r
+ KERNEL1x3_SUBN\r
+ KERNEL1x3_SUBN\r
+ KERNEL1x3_SUBN\r
+ KERNEL1x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L6_42\r
+ ALIGN_4\r
+\r
+.L6_46:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L6_49\r
+\r
+ ALIGN_4\r
+\r
+.L6_47:\r
+\r
+ KERNEL1x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L6_47\r
+ ALIGN_4\r
+\r
+\r
+.L6_49:\r
+\r
+ SAVE1x3\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+\r
+\r
+\r
+/***************************************************************************************************************/\r
+\r
+.L7_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C \r
+ leaq (C, LDC, 1), C // c += 3 * ldc\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L7_20\r
+\r
+ ALIGN_4\r
+\r
+.L7_11:\r
+ leaq BUFFER2, BO // second buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ prefetcht0 (CO1)\r
+ prefetcht0 (CO1,LDC,1)\r
+ prefetcht0 (CO1,LDC,2)\r
+ prefetcht0 64(CO1)\r
+ prefetcht0 64(CO1,LDC,1)\r
+ prefetcht0 64(CO1,LDC,2)\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax // K / 8\r
+ je .L7_16\r
+ ALIGN_5\r
+\r
+.L7_12:\r
+/*\r
+ prefetcht0 B_PR1(BO)\r
+ prefetcht0 B_PR1+64(BO)\r
+ prefetcht0 B_PR1+128(BO)\r
+*/\r
+ KERNEL16x3_SUBN\r
+ KERNEL16x3_SUBN\r
+ KERNEL16x3_SUBN\r
+ KERNEL16x3_SUBN\r
+\r
+ KERNEL16x3_SUBN\r
+ KERNEL16x3_SUBN\r
+ KERNEL16x3_SUBN\r
+ KERNEL16x3_SUBN\r
+ dec %rax\r
+ jne .L7_12\r
+ ALIGN_4\r
+\r
+.L7_16:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L7_19\r
+\r
+ ALIGN_5\r
+\r
+.L7_17:\r
+\r
+ KERNEL16x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L7_17\r
+\r
+\r
+.L7_19:\r
+\r
+ SAVE16x3\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L7_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L7_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L7_60 // to next 3 lines of N\r
+\r
+ testq $8, M \r
+ jz .L7_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L7_20_1:\r
+ leaq BUFFER2, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax\r
+ je .L7_20_6\r
+\r
+ ALIGN_4\r
+\r
+.L7_20_2:\r
+\r
+ KERNEL8x3_SUBN\r
+ KERNEL8x3_SUBN\r
+ KERNEL8x3_SUBN\r
+ KERNEL8x3_SUBN\r
+\r
+ KERNEL8x3_SUBN\r
+ KERNEL8x3_SUBN\r
+ KERNEL8x3_SUBN\r
+ KERNEL8x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L7_20_2\r
+ ALIGN_4\r
+\r
+.L7_20_6:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L7_20_9\r
+\r
+ ALIGN_4\r
+\r
+.L7_20_7:\r
+\r
+ KERNEL8x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L7_20_7\r
+ ALIGN_4\r
+\r
+.L7_20_9:\r
+\r
+ SAVE8x3\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L7_21pre:\r
+\r
+ testq $4, M \r
+ jz .L7_30\r
+ ALIGN_4\r
+\r
+.L7_21:\r
+ leaq BUFFER2, BO // second buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax\r
+ je .L7_26\r
+\r
+ ALIGN_4\r
+\r
+.L7_22:\r
+\r
+ KERNEL4x3_SUBN\r
+ KERNEL4x3_SUBN\r
+ KERNEL4x3_SUBN\r
+ KERNEL4x3_SUBN\r
+\r
+ KERNEL4x3_SUBN\r
+ KERNEL4x3_SUBN\r
+ KERNEL4x3_SUBN\r
+ KERNEL4x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L7_22\r
+ ALIGN_4\r
+\r
+.L7_26:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L7_29\r
+\r
+ ALIGN_4\r
+\r
+.L7_27:\r
+\r
+ KERNEL4x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L7_27\r
+ ALIGN_4\r
+\r
+\r
+.L7_29:\r
+\r
+ SAVE4x3\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L7_30:\r
+ testq $2, M \r
+ jz .L7_40\r
+\r
+ ALIGN_4\r
+\r
+.L7_31:\r
+ leaq BUFFER2, BO // second buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax\r
+ je .L7_36\r
+\r
+ ALIGN_4\r
+\r
+.L7_32:\r
+\r
+ KERNEL2x3_SUBN\r
+ KERNEL2x3_SUBN\r
+ KERNEL2x3_SUBN\r
+ KERNEL2x3_SUBN\r
+\r
+ KERNEL2x3_SUBN\r
+ KERNEL2x3_SUBN\r
+ KERNEL2x3_SUBN\r
+ KERNEL2x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L7_32\r
+ ALIGN_4\r
+\r
+.L7_36:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L7_39\r
+\r
+ ALIGN_4\r
+\r
+.L7_37:\r
+\r
+ KERNEL2x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L7_37\r
+ ALIGN_4\r
+\r
+\r
+.L7_39:\r
+\r
+ SAVE2x3\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L7_40:\r
+ testq $1, M \r
+ jz .L7_60 // to next 3 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L7_41:\r
+ leaq BUFFER2, BO // second buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax\r
+ je .L7_46\r
+\r
+ ALIGN_4\r
+\r
+.L7_42:\r
+ KERNEL1x3_SUBN\r
+ KERNEL1x3_SUBN\r
+ KERNEL1x3_SUBN\r
+ KERNEL1x3_SUBN\r
+\r
+ KERNEL1x3_SUBN\r
+ KERNEL1x3_SUBN\r
+ KERNEL1x3_SUBN\r
+ KERNEL1x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L7_42\r
+ ALIGN_4\r
+\r
+.L7_46:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L7_49\r
+\r
+ ALIGN_4\r
+\r
+.L7_47:\r
+\r
+ KERNEL1x3_SUBN\r
+\r
+ dec %rax\r
+ jne .L7_47\r
+ ALIGN_4\r
+\r
+\r
+.L7_49:\r
+\r
+ SAVE1x3\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+\r
+\r
+.L7_60:\r
+\r
+ decq J // j --\r
+ jg .L6_01\r
+\r
+\r
+.L2_0:\r
+ cmpq $0, Nmod6 // N % 6 == 0\r
+ je .L999\r
+\r
+/************************************************************************************************\r
+* Loop for Nmod6 / 2 > 0\r
+*************************************************************************************************/\r
+\r
+ movq Nmod6, J \r
+ sarq $1, J // j = j / 2\r
+ je .L1_0\r
+ ALIGN_4\r
+\r
+.L2_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ sarq $2, %rax // K / 4\r
+ jz .L2_01b\r
+ ALIGN_4\r
+\r
+.L2_01a:\r
+ prefetcht0 512(BO1)\r
+ prefetchw 512(BO)\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups 2*SIZE(BO1), %xmm1\r
+ vmovups 4*SIZE(BO1), %xmm2\r
+ vmovups 6*SIZE(BO1), %xmm3\r
+\r
+ vmovups %xmm0, (BO)\r
+ vmovups %xmm1, 2*SIZE(BO)\r
+ vmovups %xmm2, 4*SIZE(BO)\r
+ vmovups %xmm3, 6*SIZE(BO)\r
+\r
+ addq $8*SIZE,BO1\r
+ addq $8*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_01a\r
+\r
+\r
+.L2_01b:\r
+\r
+ movq K, %rax\r
+ andq $3, %rax // K % 4\r
+ jz .L2_02d\r
+ ALIGN_4\r
+\r
+.L2_02c:\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups %xmm0, (BO)\r
+ addq $2*SIZE,BO1\r
+ addq $2*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_02c\r
+\r
+.L2_02d:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L2_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C // c += 2 * ldc\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $32 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L2_20\r
+\r
+ ALIGN_4\r
+\r
+.L2_11:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_16\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_12:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ je .L2_16\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ je .L2_16\r
+\r
+ jmp .L2_12\r
+ ALIGN_4\r
+\r
+.L2_16:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_17:\r
+\r
+ KERNEL16x2_SUB\r
+\r
+ jl .L2_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_19:\r
+\r
+ SAVE16x2\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L2_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L2_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L2_60 // to next 3 lines of N\r
+\r
+ testq $8, M \r
+ jz .L2_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L2_20_1:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L2_20_6\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_20_2:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ je .L2_20_6\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ je .L2_20_6\r
+\r
+ jmp .L2_20_2\r
+ ALIGN_4\r
+\r
+.L2_20_6:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_20_9\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_20_7:\r
+\r
+ KERNEL8x2_SUB\r
+\r
+ jl .L2_20_7\r
+ ALIGN_4\r
+\r
+\r
+.L2_20_9:\r
+\r
+ SAVE8x2\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L2_21pre:\r
+\r
+ testq $4, M \r
+ jz .L2_30\r
+ ALIGN_4\r
+\r
+.L2_21:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L2_26\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 1 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_22:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ je .L2_26\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ je .L2_26\r
+\r
+ jmp .L2_22\r
+ ALIGN_4\r
+\r
+.L2_26:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_29\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_27:\r
+\r
+ KERNEL4x2_SUB\r
+\r
+ jl .L2_27\r
+ ALIGN_4\r
+\r
+\r
+.L2_29:\r
+\r
+ SAVE4x2\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L2_30:\r
+ testq $2, M \r
+ jz .L2_40\r
+\r
+ ALIGN_4\r
+\r
+.L2_31:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L2_36\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_32:\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ je .L2_36\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ je .L2_36\r
+\r
+ jmp .L2_32\r
+ ALIGN_4\r
+\r
+.L2_36:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_39\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ \r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_37:\r
+\r
+ KERNEL2x2_SUB\r
+\r
+ jl .L2_37\r
+ ALIGN_4\r
+\r
+\r
+.L2_39:\r
+\r
+ SAVE2x2\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L2_40:\r
+ testq $1, M \r
+ jz .L2_60 // to next 2 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L2_41:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L2_46\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_42:\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ je .L2_46\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ je .L2_46\r
+\r
+ jmp .L2_42\r
+ ALIGN_4\r
+\r
+.L2_46:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_47:\r
+\r
+ KERNEL1x2_SUB\r
+\r
+ jl .L2_47\r
+ ALIGN_4\r
+\r
+\r
+.L2_49:\r
+\r
+ SAVE1x2\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+.L2_60:\r
+\r
+ decq J // j --\r
+ jg .L2_01 // next 2 lines of N\r
+\r
+\r
+\r
+.L1_0:\r
+\r
+/************************************************************************************************\r
+* Loop for Nmod6 % 2 > 0\r
+*************************************************************************************************/\r
+\r
+ movq Nmod6, J \r
+ andq $1, J // j % 2\r
+ je .L999\r
+ ALIGN_4\r
+\r
+.L1_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ ALIGN_4\r
+\r
+.L1_02b:\r
+\r
+ vmovsd (BO1), %xmm0\r
+ vmovsd %xmm0, (BO)\r
+ addq $1*SIZE,BO1\r
+ addq $1*SIZE,BO\r
+ decq %rax\r
+ jnz .L1_02b\r
+\r
+.L1_02c:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L1_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 1), C // c += 1 * ldc\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $32 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L1_20\r
+\r
+ ALIGN_4\r
+\r
+.L1_11:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_16\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_12:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ je .L1_16\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ je .L1_16\r
+\r
+ jmp .L1_12\r
+ ALIGN_4\r
+\r
+.L1_16:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_19\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_17:\r
+\r
+ KERNEL16x1_SUB\r
+\r
+ jl .L1_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_19:\r
+\r
+ SAVE16x1\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L1_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L1_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L999\r
+\r
+ testq $8, M \r
+ jz .L1_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L1_20_1:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L1_20_6\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_20_2:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ je .L1_20_6\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ je .L1_20_6\r
+\r
+ jmp .L1_20_2\r
+ ALIGN_4\r
+\r
+.L1_20_6:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_20_9\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_20_7:\r
+\r
+ KERNEL8x1_SUB\r
+\r
+ jl .L1_20_7\r
+ ALIGN_4\r
+\r
+\r
+.L1_20_9:\r
+\r
+ SAVE8x1\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L1_21pre:\r
+\r
+ testq $4, M \r
+ jz .L1_30\r
+ ALIGN_4\r
+\r
+.L1_21:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L1_26\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_22:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ je .L1_26\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ je .L1_26\r
+\r
+ jmp .L1_22\r
+ ALIGN_4\r
+\r
+.L1_26:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_29\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_27:\r
+\r
+ KERNEL4x1_SUB\r
+\r
+ jl .L1_27\r
+ ALIGN_4\r
+\r
+\r
+.L1_29:\r
+\r
+ SAVE4x1\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L1_30:\r
+ testq $2, M \r
+ jz .L1_40\r
+\r
+ ALIGN_4\r
+\r
+.L1_31:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L1_36\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_32:\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ je .L1_36\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ je .L1_36\r
+\r
+ jmp .L1_32\r
+ ALIGN_4\r
+\r
+.L1_36:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_39\r
+\r
+ movq %rax, BI // Index for BO\r
+ \r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_37:\r
+\r
+ KERNEL2x1_SUB\r
+\r
+ jl .L1_37\r
+ ALIGN_4\r
+\r
+\r
+.L1_39:\r
+\r
+ SAVE2x1\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L1_40:\r
+ testq $1, M \r
+ jz .L999\r
+\r
+ ALIGN_4\r
+\r
+.L1_41:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L1_46\r
+ movq %rax, BI // Index for BO\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_42:\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ je .L1_46\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ je .L1_46\r
+\r
+ jmp .L1_42\r
+ ALIGN_4\r
+\r
+.L1_46:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_49\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_47:\r
+\r
+ KERNEL1x1_SUB\r
+\r
+ jl .L1_47\r
+ ALIGN_4\r
+\r
+\r
+.L1_49:\r
+\r
+ SAVE1x1\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+\r
+.L999:\r
+ movq SP, %rsp\r
+ movq (%rsp), %rbx\r
+ movq 8(%rsp), %rbp\r
+ movq 16(%rsp), %r12\r
+ movq 24(%rsp), %r13\r
+ movq 32(%rsp), %r14\r
+ movq 40(%rsp), %r15\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq 48(%rsp), %rdi\r
+ movq 56(%rsp), %rsi\r
+ movups 64(%rsp), %xmm6\r
+ movups 80(%rsp), %xmm7\r
+ movups 96(%rsp), %xmm8\r
+ movups 112(%rsp), %xmm9\r
+ movups 128(%rsp), %xmm10\r
+ movups 144(%rsp), %xmm11\r
+ movups 160(%rsp), %xmm12\r
+ movups 176(%rsp), %xmm13\r
+ movups 192(%rsp), %xmm14\r
+ movups 208(%rsp), %xmm15\r
+#endif\r
+\r
+ addq $STACKSIZE, %rsp\r
+ ret\r
+\r
+ EPILOGUE\r
+\r
+\r
+#else\r
+/*************************************************************************************\r
+* TRMM Kernel\r
+*************************************************************************************/\r
+\r
+\r
+ PROLOGUE\r
+ PROFCODE\r
+ \r
+ subq $STACKSIZE, %rsp\r
+ movq %rbx, (%rsp)\r
+ movq %rbp, 8(%rsp)\r
+ movq %r12, 16(%rsp)\r
+ movq %r13, 24(%rsp)\r
+ movq %r14, 32(%rsp)\r
+ movq %r15, 40(%rsp)\r
+\r
+ vzeroupper\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq %rdi, 48(%rsp)\r
+ movq %rsi, 56(%rsp)\r
+ movups %xmm6, 64(%rsp)\r
+ movups %xmm7, 80(%rsp)\r
+ movups %xmm8, 96(%rsp)\r
+ movups %xmm9, 112(%rsp)\r
+ movups %xmm10, 128(%rsp)\r
+ movups %xmm11, 144(%rsp)\r
+ movups %xmm12, 160(%rsp)\r
+ movups %xmm13, 176(%rsp)\r
+ movups %xmm14, 192(%rsp)\r
+ movups %xmm15, 208(%rsp)\r
+\r
+ movq ARG1, OLD_M\r
+ movq ARG2, OLD_N\r
+ movq ARG3, OLD_K\r
+ movq OLD_A, A\r
+ movq OLD_B, B\r
+ movq OLD_C, C\r
+ movq OLD_LDC, LDC\r
+#ifdef TRMMKERNEL\r
+ movsd OLD_OFFSET, %xmm12\r
+#endif\r
+ vmovaps %xmm3, %xmm0\r
+\r
+#else\r
+ movq STACKSIZE + 8(%rsp), LDC\r
+#ifdef TRMMKERNEL\r
+ movsd STACKSIZE + 16(%rsp), %xmm12\r
+#endif\r
+\r
+#endif\r
+\r
+ movq %rsp, SP # save old stack\r
+ subq $128 + L_BUFFER_SIZE, %rsp\r
+ andq $-4096, %rsp # align stack\r
+\r
+ STACK_TOUCH\r
+\r
+ cmpq $0, OLD_M\r
+ je .L999\r
+\r
+ cmpq $0, OLD_N\r
+ je .L999\r
+\r
+ cmpq $0, OLD_K\r
+ je .L999\r
+\r
+ movq OLD_M, M\r
+ movq OLD_N, N\r
+ movq OLD_K, K\r
+\r
+ vmovsd %xmm0, ALPHA\r
+\r
+ salq $BASE_SHIFT, LDC\r
+\r
+ movq N, %rax\r
+ xorq %rdx, %rdx\r
+ movq $2, %rdi\r
+ divq %rdi // N / 6\r
+ movq %rax, Ndiv6 // N / 6\r
+ movq %rdx, Nmod6 // N % 6\r
+\r
+ \r
+\r
+#ifdef TRMMKERNEL\r
+ vmovsd %xmm12, OFFSET\r
+ vmovsd %xmm12, KK\r
+#ifndef LEFT\r
+ negq KK\r
+#endif \r
+#endif\r
+\r
+ movq Ndiv6, J\r
+ cmpq $0, J\r
+ je .L1_0\r
+ ALIGN_4\r
+\r
+.L2_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ sarq $2, %rax // K / 4\r
+ jz .L2_01b\r
+ ALIGN_4\r
+\r
+.L2_01a:\r
+ prefetcht0 512(BO1)\r
+ prefetchw 512(BO)\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups 2*SIZE(BO1), %xmm1\r
+ vmovups 4*SIZE(BO1), %xmm2\r
+ vmovups 6*SIZE(BO1), %xmm3\r
+\r
+ vmovups %xmm0, (BO)\r
+ vmovups %xmm1, 2*SIZE(BO)\r
+ vmovups %xmm2, 4*SIZE(BO)\r
+ vmovups %xmm3, 6*SIZE(BO)\r
+\r
+ addq $8*SIZE,BO1\r
+ addq $8*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_01a\r
+\r
+\r
+.L2_01b:\r
+\r
+ movq K, %rax\r
+ andq $3, %rax // K % 4\r
+ jz .L2_02d\r
+ ALIGN_4\r
+\r
+.L2_02c:\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups %xmm0, (BO)\r
+ addq $2*SIZE,BO1\r
+ addq $2*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_02c\r
+\r
+.L2_02d:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L2_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C // c += 2 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $32 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L2_20\r
+\r
+ ALIGN_4\r
+\r
+.L2_11:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $16, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_16\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_12:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ je .L2_16\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ je .L2_16\r
+\r
+ jmp .L2_12\r
+ ALIGN_4\r
+\r
+.L2_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_17:\r
+\r
+ KERNEL16x2_SUB\r
+\r
+ jl .L2_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_19:\r
+\r
+ SAVE16x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $16, KK \r
+#endif\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L2_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L2_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L2_60 // to next 3 lines of N\r
+\r
+ testq $8, M \r
+ jz .L2_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L2_20_1:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $8, %rax // number of values in A\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L2_20_6\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_20_2:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ je .L2_20_6\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ je .L2_20_6\r
+\r
+ jmp .L2_20_2\r
+ ALIGN_4\r
+\r
+.L2_20_6:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_20_9\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_20_7:\r
+\r
+ KERNEL8x2_SUB\r
+\r
+ jl .L2_20_7\r
+ ALIGN_4\r
+\r
+\r
+.L2_20_9:\r
+\r
+ SAVE8x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $8, KK\r
+#endif\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L2_21pre:\r
+\r
+ testq $4, M \r
+ jz .L2_30\r
+ ALIGN_4\r
+\r
+.L2_21:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in A\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L2_26\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 1 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_22:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ je .L2_26\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ je .L2_26\r
+\r
+ jmp .L2_22\r
+ ALIGN_4\r
+\r
+.L2_26:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_29\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_27:\r
+\r
+ KERNEL4x2_SUB\r
+\r
+ jl .L2_27\r
+ ALIGN_4\r
+\r
+\r
+.L2_29:\r
+\r
+ SAVE4x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L2_30:\r
+ testq $2, M \r
+ jz .L2_40\r
+\r
+ ALIGN_4\r
+\r
+.L2_31:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L2_36\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_32:\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ je .L2_36\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ je .L2_36\r
+\r
+ jmp .L2_32\r
+ ALIGN_4\r
+\r
+.L2_36:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_39\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ \r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_37:\r
+\r
+ KERNEL2x2_SUB\r
+\r
+ jl .L2_37\r
+ ALIGN_4\r
+\r
+\r
+.L2_39:\r
+\r
+ SAVE2x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L2_40:\r
+ testq $1, M \r
+ jz .L2_60 // to next 2 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L2_41:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax\r
+ je .L2_46\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_42:\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ je .L2_46\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ je .L2_46\r
+\r
+ jmp .L2_42\r
+ ALIGN_4\r
+\r
+.L2_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_47:\r
+\r
+ KERNEL1x2_SUB\r
+\r
+ jl .L2_47\r
+ ALIGN_4\r
+\r
+\r
+.L2_49:\r
+\r
+ SAVE1x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK\r
+#endif\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+\r
+\r
+\r
+ \r
+.L2_60:\r
+#if defined(TRMMKERNEL) && !defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ decq J // j --\r
+ jg .L2_01 // next 2 lines of N\r
+\r
+\r
+\r
+.L1_0:\r
+\r
+/************************************************************************************************\r
+* Loop for Nmod6 % 2 > 0\r
+*************************************************************************************************/\r
+\r
+ movq Nmod6, J \r
+ andq $1, J // j % 2\r
+ je .L999\r
+ ALIGN_4\r
+\r
+.L1_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ ALIGN_4\r
+\r
+.L1_02b:\r
+\r
+ vmovsd (BO1), %xmm0\r
+ vmovsd %xmm0, (BO)\r
+ addq $1*SIZE,BO1\r
+ addq $1*SIZE,BO\r
+ decq %rax\r
+ jnz .L1_02b\r
+\r
+.L1_02c:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L1_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 1), C // c += 1 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $32 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L1_20\r
+\r
+ ALIGN_4\r
+\r
+.L1_11:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $16, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_16\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_12:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ je .L1_16\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ je .L1_16\r
+\r
+ jmp .L1_12\r
+ ALIGN_4\r
+\r
+.L1_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_19\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_17:\r
+\r
+ KERNEL16x1_SUB\r
+\r
+ jl .L1_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_19:\r
+\r
+ SAVE16x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $16, KK \r
+#endif\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L1_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L1_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L999\r
+\r
+ testq $8, M \r
+ jz .L1_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L1_20_1:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $8, %rax // number of values in A\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L1_20_6\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_20_2:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ je .L1_20_6\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ je .L1_20_6\r
+\r
+ jmp .L1_20_2\r
+ ALIGN_4\r
+\r
+.L1_20_6:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_20_9\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_20_7:\r
+\r
+ KERNEL8x1_SUB\r
+\r
+ jl .L1_20_7\r
+ ALIGN_4\r
+\r
+\r
+.L1_20_9:\r
+\r
+ SAVE8x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $8, KK\r
+#endif\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L1_21pre:\r
+\r
+ testq $4, M \r
+ jz .L1_30\r
+ ALIGN_4\r
+\r
+.L1_21:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in A\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L1_26\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_22:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ je .L1_26\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ je .L1_26\r
+\r
+ jmp .L1_22\r
+ ALIGN_4\r
+\r
+.L1_26:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_29\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_27:\r
+\r
+ KERNEL4x1_SUB\r
+\r
+ jl .L1_27\r
+ ALIGN_4\r
+\r
+\r
+.L1_29:\r
+\r
+ SAVE4x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L1_30:\r
+ testq $2, M \r
+ jz .L1_40\r
+\r
+ ALIGN_4\r
+\r
+.L1_31:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L1_36\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_32:\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ je .L1_36\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ je .L1_36\r
+\r
+ jmp .L1_32\r
+ ALIGN_4\r
+\r
+.L1_36:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_39\r
+\r
+ movq %rax, BI // Index for BO\r
+ \r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_37:\r
+\r
+ KERNEL2x1_SUB\r
+\r
+ jl .L1_37\r
+ ALIGN_4\r
+\r
+\r
+.L1_39:\r
+\r
+ SAVE2x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L1_40:\r
+ testq $1, M \r
+ jz .L999\r
+\r
+ ALIGN_4\r
+\r
+.L1_41:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax\r
+ je .L1_46\r
+ movq %rax, BI // Index for BO\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_42:\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ je .L1_46\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ je .L1_46\r
+\r
+ jmp .L1_42\r
+ ALIGN_4\r
+\r
+.L1_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_49\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_47:\r
+\r
+ KERNEL1x1_SUB\r
+\r
+ jl .L1_47\r
+ ALIGN_4\r
+\r
+\r
+.L1_49:\r
+\r
+ SAVE1x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO \r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK\r
+#endif\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+\r
+.L999:\r
+ movq SP, %rsp\r
+ movq (%rsp), %rbx\r
+ movq 8(%rsp), %rbp\r
+ movq 16(%rsp), %r12\r
+ movq 24(%rsp), %r13\r
+ movq 32(%rsp), %r14\r
+ movq 40(%rsp), %r15\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq 48(%rsp), %rdi\r
+ movq 56(%rsp), %rsi\r
+ movups 64(%rsp), %xmm6\r
+ movups 80(%rsp), %xmm7\r
+ movups 96(%rsp), %xmm8\r
+ movups 112(%rsp), %xmm9\r
+ movups 128(%rsp), %xmm10\r
+ movups 144(%rsp), %xmm11\r
+ movups 160(%rsp), %xmm12\r
+ movups 176(%rsp), %xmm13\r
+ movups 192(%rsp), %xmm14\r
+ movups 208(%rsp), %xmm15\r
+#endif\r
+\r
+ addq $STACKSIZE, %rsp\r
+ ret\r
+\r
+ EPILOGUE\r
+\r
+\r
+\r
+\r
+\r
+#endif\r
--- /dev/null
+/*********************************************************************************\r
+Copyright (c) 2013, The OpenBLAS Project\r
+All rights reserved.\r
+Redistribution and use in source and binary forms, with or without\r
+modification, are permitted provided that the following conditions are\r
+met:\r
+1. Redistributions of source code must retain the above copyright\r
+notice, this list of conditions and the following disclaimer.\r
+2. Redistributions in binary form must reproduce the above copyright\r
+notice, this list of conditions and the following disclaimer in\r
+the documentation and/or other materials provided with the\r
+distribution.\r
+3. Neither the name of the OpenBLAS project nor the names of\r
+its contributors may be used to endorse or promote products\r
+derived from this software without specific prior written permission.\r
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\r
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r
+ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE\r
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\r
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\r
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\r
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\r
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+**********************************************************************************/\r
+\r
+\r
+/*********************************************************************\r
+* 2013/10/28 Saar\r
+* BLASTEST : OK\r
+* CTEST : OK\r
+* TEST : OK\r
+\r
+*\r
+*\r
+* 2013/10/27 Saar\r
+* Parameter:\r
+* DGEMM_DEFAULT_UNROLL_N 4\r
+* DGEMM_DEFAULT_UNROLL_M 4\r
+* DGEMM_DEFAULT_P 512\r
+* DGEMM_DEFAULT_Q 256\r
+* A_PR1 512\r
+* B_PR1 512\r
+*\r
+*\r
+* Performance at 9216x9216x9216:\r
+* 1 thread: 53.3 GFLOPS (MKL: 54)\r
+* 2 threads: 100.0 GFLOPS (MKL: 97)\r
+* 3 threads: 147.0 GFLOPS (MKL: 133)\r
+* 4 threads: 184.0 GFLOPS (MKL: 170)\r
+*********************************************************************/\r
+\r
+\r
+#define ASSEMBLER\r
+#include "common.h"\r
+ \r
+#define OLD_M %rdi\r
+#define OLD_N %rsi\r
+#define M %r13\r
+#define J %r14\r
+#define OLD_K %rdx\r
+\r
+#define A %rcx\r
+#define B %r8\r
+#define C %r9\r
+#define LDC %r10\r
+ \r
+#define I %r11\r
+#define AO %rdi\r
+#define BO %rsi\r
+#define CO1 %r15\r
+#define K %r12\r
+#define SP %rbx\r
+\r
+#define BO1 %rdi\r
+#define BO2 %r15\r
+#define BO3 %rbp\r
+\r
+#ifndef WINDOWS_ABI\r
+\r
+#define STACKSIZE 96\r
+\r
+#else\r
+\r
+#define STACKSIZE 256\r
+\r
+#define OLD_A 40 + STACKSIZE(%rsp)\r
+#define OLD_B 48 + STACKSIZE(%rsp)\r
+#define OLD_C 56 + STACKSIZE(%rsp)\r
+#define OLD_LDC 64 + STACKSIZE(%rsp)\r
+#define OLD_OFFSET 72 + STACKSIZE(%rsp)\r
+\r
+#endif\r
+\r
+#define L_BUFFER_SIZE 512*8*12\r
+\r
+#define Ndiv12 24(%rsp)\r
+#define Nmod12 32(%rsp)\r
+#define N 40(%rsp)\r
+#define ALPHA 48(%rsp)\r
+#define OFFSET 56(%rsp)\r
+#define KK 64(%rsp)\r
+#define KKK 72(%rsp)\r
+#define BUFFER1 128(%rsp)\r
+\r
+#if defined(OS_WINDOWS)\r
+#if L_BUFFER_SIZE > 16384\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 4(%rsp);\\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 12288\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 8192\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 4096\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 1(%rsp);\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+\r
+#define A_PR1 512\r
+#define B_PR1 512\r
+\r
+/*******************************************************************************************\r
+* Macro definitions\r
+*******************************************************************************************/\r
+\r
+.macro INIT4x12\r
+\r
+ vxorpd %ymm4 , %ymm4 , %ymm4\r
+ vxorpd %ymm5 , %ymm5 , %ymm5\r
+ vxorpd %ymm6 , %ymm6 , %ymm6\r
+ vxorpd %ymm7 , %ymm7 , %ymm7\r
+ vxorpd %ymm8 , %ymm8 , %ymm8\r
+ vxorpd %ymm9 , %ymm9 , %ymm9\r
+ vxorpd %ymm10, %ymm10, %ymm10\r
+ vxorpd %ymm11, %ymm11, %ymm11\r
+ vxorpd %ymm12, %ymm12, %ymm12\r
+ vxorpd %ymm13, %ymm13, %ymm13\r
+ vxorpd %ymm14, %ymm14, %ymm14\r
+ vxorpd %ymm15, %ymm15, %ymm15\r
+\r
+.endm\r
+\r
+.macro KERNEL4x12_I\r
+ prefetcht0 A_PR1(AO)\r
+ vmovups -12 * SIZE(BO), %ymm1\r
+ prefetcht0 B_PR1(BO)\r
+ vmovups -16 * SIZE(AO), %ymm0\r
+ prefetcht0 B_PR1+64(BO)\r
+ vmovups -8 * SIZE(BO), %ymm2\r
+ prefetcht0 B_PR1+128(BO)\r
+ vmovups -4 * SIZE(BO), %ymm3\r
+ vmulpd %ymm0 ,%ymm1 , %ymm4\r
+ prefetcht0 B_PR1+192(BO)\r
+ vmulpd %ymm0 ,%ymm2 , %ymm8\r
+ vmulpd %ymm0 ,%ymm3 , %ymm12\r
+ prefetcht0 B_PR1+256(BO)\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vmulpd %ymm0 ,%ymm1 , %ymm5\r
+ vmulpd %ymm0 ,%ymm2 , %ymm9\r
+ vmulpd %ymm0 ,%ymm3 , %ymm13\r
+ vpermpd $0x1b, %ymm0 , %ymm0\r
+ vmulpd %ymm0 ,%ymm1 , %ymm6\r
+ vmulpd %ymm0 ,%ymm2 , %ymm10\r
+\r
+ addq $12*SIZE, BO\r
+ vmulpd %ymm0 ,%ymm3 , %ymm14\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vmulpd %ymm0 ,%ymm1 , %ymm7\r
+ vmovups -12 * SIZE(BO), %ymm1\r
+ vmulpd %ymm0 ,%ymm2 , %ymm11\r
+ vmovups -8 * SIZE(BO), %ymm2\r
+ vmulpd %ymm0 ,%ymm3 , %ymm15\r
+ vmovups -4 * SIZE(BO), %ymm3\r
+\r
+.endm\r
+\r
+.macro KERNEL4x12_M1\r
+ prefetcht0 A_PR1(AO)\r
+ vmovups -16 * SIZE(AO), %ymm0\r
+ prefetcht0 B_PR1(BO)\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm4\r
+ prefetcht0 B_PR1+64(BO)\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm8\r
+ prefetcht0 B_PR1+128(BO)\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm12\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm5\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm9\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm13\r
+ vpermpd $0x1b, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm6\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm10\r
+\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm14\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm7\r
+ vmovups -12 * SIZE(BO), %ymm1\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm11\r
+ vmovups -8 * SIZE(BO), %ymm2\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm15\r
+ vmovups -4 * SIZE(BO), %ymm3\r
+\r
+.endm\r
+\r
+.macro KERNEL4x12_M2\r
+ vmovups -12 * SIZE(AO), %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm4\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm8\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm12\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm5\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm9\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm13\r
+ vpermpd $0x1b, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm6\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm10\r
+\r
+ addq $8*SIZE, AO\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm14\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm7\r
+ vmovups 0 * SIZE(BO), %ymm1\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm11\r
+ vmovups 4 * SIZE(BO), %ymm2\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm15\r
+ vmovups 8 * SIZE(BO), %ymm3\r
+ addq $24*SIZE, BO\r
+.endm\r
+\r
+\r
+.macro KERNEL4x12_E\r
+ vmovups -12 * SIZE(AO), %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm4\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm8\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm12\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm5\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm9\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm13\r
+ vpermpd $0x1b, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm6\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm10\r
+\r
+ addq $8*SIZE, AO\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm14\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm7\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm11\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm15\r
+ addq $12*SIZE, BO\r
+.endm\r
+\r
+.macro KERNEL4x12_SUB\r
+ vmovups -12 * SIZE(BO), %ymm1\r
+ vmovups -16 * SIZE(AO), %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm4\r
+ vmovups -8 * SIZE(BO), %ymm2\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm8\r
+ vmovups -4 * SIZE(BO), %ymm3\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm12\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm5\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm9\r
+ addq $12*SIZE, BO\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm13\r
+ vpermpd $0x1b, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm6\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm10\r
+ addq $4*SIZE, AO\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm14\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm7\r
+ vfmadd231pd %ymm0 ,%ymm2 , %ymm11\r
+ vfmadd231pd %ymm0 ,%ymm3 , %ymm15\r
+\r
+.endm\r
+\r
+\r
+.macro SAVE4x12\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+ vmulpd %ymm0 , %ymm6 , %ymm6\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+\r
+ vmulpd %ymm0 , %ymm8 , %ymm8\r
+ vmulpd %ymm0 , %ymm9 , %ymm9\r
+ vmulpd %ymm0 , %ymm10, %ymm10\r
+ vmulpd %ymm0 , %ymm11, %ymm11\r
+\r
+ vmulpd %ymm0 , %ymm12, %ymm12\r
+ vmulpd %ymm0 , %ymm13, %ymm13\r
+ vmulpd %ymm0 , %ymm14, %ymm14\r
+ vmulpd %ymm0 , %ymm15, %ymm15\r
+\r
+ vpermpd $0xb1 , %ymm5, %ymm5\r
+ vpermpd $0xb1 , %ymm7, %ymm7\r
+\r
+ vblendpd $0x0a, %ymm5, %ymm4, %ymm0\r
+ vblendpd $0x05, %ymm5, %ymm4, %ymm1\r
+ vblendpd $0x0a, %ymm7, %ymm6, %ymm2\r
+ vblendpd $0x05, %ymm7, %ymm6, %ymm3\r
+\r
+ vpermpd $0x1b , %ymm2, %ymm2\r
+ vpermpd $0x1b , %ymm3, %ymm3\r
+ vpermpd $0xb1 , %ymm2, %ymm2\r
+ vpermpd $0xb1 , %ymm3, %ymm3\r
+\r
+ vblendpd $0x03, %ymm0, %ymm2 , %ymm4\r
+ vblendpd $0x03, %ymm1, %ymm3 , %ymm5\r
+ vblendpd $0x03, %ymm2, %ymm0 , %ymm6\r
+ vblendpd $0x03, %ymm3, %ymm1 , %ymm7\r
+\r
+ leaq (CO1, LDC, 2), %rax \r
+ \r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4, %ymm4\r
+ vaddpd (CO1, LDC), %ymm5, %ymm5\r
+ vaddpd (%rax), %ymm6, %ymm6\r
+ vaddpd (%rax, LDC), %ymm7, %ymm7\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm5 , (CO1, LDC)\r
+ vmovups %ymm6 , (%rax)\r
+ vmovups %ymm7 , (%rax, LDC)\r
+\r
+ prefetcht0 32(CO1)\r
+ prefetcht0 32(CO1,LDC)\r
+ prefetcht0 32(%rax)\r
+ prefetcht0 32(%rax,LDC)\r
+\r
+ vpermpd $0xb1 , %ymm9 , %ymm9\r
+ vpermpd $0xb1 , %ymm11, %ymm11\r
+\r
+ vblendpd $0x0a, %ymm9 , %ymm8 , %ymm0\r
+ vblendpd $0x05, %ymm9 , %ymm8 , %ymm1\r
+ vblendpd $0x0a, %ymm11, %ymm10, %ymm2\r
+ vblendpd $0x05, %ymm11, %ymm10, %ymm3\r
+\r
+ vpermpd $0x1b , %ymm2, %ymm2\r
+ vpermpd $0x1b , %ymm3, %ymm3\r
+ vpermpd $0xb1 , %ymm2, %ymm2\r
+ vpermpd $0xb1 , %ymm3, %ymm3\r
+\r
+ vblendpd $0x03, %ymm0, %ymm2 , %ymm4\r
+ vblendpd $0x03, %ymm1, %ymm3 , %ymm5\r
+ vblendpd $0x03, %ymm2, %ymm0 , %ymm6\r
+ vblendpd $0x03, %ymm3, %ymm1 , %ymm7\r
+\r
+\r
+ leaq (%rax, LDC, 2), %rax\r
+ leaq (%rax, LDC, 2), %rbp\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (%rax), %ymm4, %ymm4\r
+ vaddpd (%rax, LDC), %ymm5, %ymm5\r
+ vaddpd (%rbp), %ymm6, %ymm6\r
+ vaddpd (%rbp, LDC), %ymm7, %ymm7\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (%rax)\r
+ vmovups %ymm5 , (%rax, LDC)\r
+ vmovups %ymm6 , (%rbp)\r
+ vmovups %ymm7 , (%rbp, LDC)\r
+\r
+ prefetcht0 32(%rax)\r
+ prefetcht0 32(%rax,LDC)\r
+ prefetcht0 32(%rbp)\r
+ prefetcht0 32(%rbp,LDC)\r
+\r
+ vpermpd $0xb1 , %ymm13, %ymm13\r
+ vpermpd $0xb1 , %ymm15, %ymm15\r
+\r
+ vblendpd $0x0a, %ymm13, %ymm12, %ymm0\r
+ vblendpd $0x05, %ymm13, %ymm12, %ymm1\r
+ vblendpd $0x0a, %ymm15, %ymm14, %ymm2\r
+ vblendpd $0x05, %ymm15, %ymm14, %ymm3\r
+\r
+ vpermpd $0x1b , %ymm2, %ymm2\r
+ vpermpd $0x1b , %ymm3, %ymm3\r
+ vpermpd $0xb1 , %ymm2, %ymm2\r
+ vpermpd $0xb1 , %ymm3, %ymm3\r
+\r
+ vblendpd $0x03, %ymm0, %ymm2 , %ymm4\r
+ vblendpd $0x03, %ymm1, %ymm3 , %ymm5\r
+ vblendpd $0x03, %ymm2, %ymm0 , %ymm6\r
+ vblendpd $0x03, %ymm3, %ymm1 , %ymm7\r
+\r
+\r
+ leaq (%rax, LDC, 4), %rax\r
+ leaq (%rbp, LDC, 4), %rbp\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (%rax), %ymm4, %ymm4\r
+ vaddpd (%rax, LDC), %ymm5, %ymm5\r
+ vaddpd (%rbp), %ymm6, %ymm6\r
+ vaddpd (%rbp, LDC), %ymm7, %ymm7\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (%rax)\r
+ vmovups %ymm5 , (%rax, LDC)\r
+ vmovups %ymm6 , (%rbp)\r
+ vmovups %ymm7 , (%rbp, LDC)\r
+\r
+ prefetcht0 32(%rax)\r
+ prefetcht0 32(%rax,LDC)\r
+ prefetcht0 32(%rbp)\r
+ prefetcht0 32(%rbp,LDC)\r
+\r
+ addq $4*SIZE, CO1\r
+.endm\r
+\r
+/******************************************************************************************/\r
+\r
+.macro INIT2x12\r
+\r
+ vxorpd %xmm4 , %xmm4 , %xmm4\r
+ vxorpd %xmm5 , %xmm5 , %xmm5\r
+ vxorpd %xmm6 , %xmm6 , %xmm6\r
+ vxorpd %xmm7 , %xmm7 , %xmm7\r
+ vxorpd %xmm8 , %xmm8 , %xmm8\r
+ vxorpd %xmm9 , %xmm9 , %xmm9\r
+ vxorpd %xmm10, %xmm10, %xmm10\r
+ vxorpd %xmm11, %xmm11, %xmm11\r
+ vxorpd %xmm12, %xmm12, %xmm12\r
+ vxorpd %xmm13, %xmm13, %xmm13\r
+ vxorpd %xmm14, %xmm14, %xmm14\r
+ vxorpd %xmm15, %xmm15, %xmm15\r
+\r
+.endm\r
+\r
+.macro KERNEL2x12_SUB\r
+ vmovups -16 * SIZE(AO), %xmm0\r
+ vmovddup -12 * SIZE(BO), %xmm1\r
+ vmovddup -11 * SIZE(BO), %xmm2\r
+ vmovddup -10 * SIZE(BO), %xmm3\r
+ vfmadd231pd %xmm0 ,%xmm1 , %xmm4\r
+ vmovddup -9 * SIZE(BO), %xmm1\r
+ vfmadd231pd %xmm0 ,%xmm2 , %xmm5\r
+ vmovddup -8 * SIZE(BO), %xmm2\r
+ vfmadd231pd %xmm0 ,%xmm3 , %xmm6\r
+ vmovddup -7 * SIZE(BO), %xmm3\r
+ vfmadd231pd %xmm0 ,%xmm1 , %xmm7\r
+ vmovddup -6 * SIZE(BO), %xmm1\r
+ vfmadd231pd %xmm0 ,%xmm2 , %xmm8\r
+ vmovddup -5 * SIZE(BO), %xmm2\r
+ vfmadd231pd %xmm0 ,%xmm3 , %xmm9\r
+ vmovddup -4 * SIZE(BO), %xmm3\r
+ vfmadd231pd %xmm0 ,%xmm1 , %xmm10\r
+ vmovddup -3 * SIZE(BO), %xmm1\r
+ vfmadd231pd %xmm0 ,%xmm2 , %xmm11\r
+ vmovddup -2 * SIZE(BO), %xmm2\r
+ vfmadd231pd %xmm0 ,%xmm3 , %xmm12\r
+ vmovddup -1 * SIZE(BO), %xmm3\r
+ vfmadd231pd %xmm0 ,%xmm1 , %xmm13\r
+ addq $12*SIZE, BO\r
+ vfmadd231pd %xmm0 ,%xmm2 , %xmm14\r
+ addq $2*SIZE, AO\r
+ vfmadd231pd %xmm0 ,%xmm3 , %xmm15\r
+\r
+.endm\r
+\r
+.macro SAVE2x12\r
+\r
+ vmovddup ALPHA, %xmm0\r
+\r
+ vmulpd %xmm0 , %xmm4 , %xmm4\r
+ vmulpd %xmm0 , %xmm5 , %xmm5\r
+ vmulpd %xmm0 , %xmm6 , %xmm6\r
+ vmulpd %xmm0 , %xmm7 , %xmm7\r
+\r
+ vmulpd %xmm0 , %xmm8 , %xmm8\r
+ vmulpd %xmm0 , %xmm9 , %xmm9\r
+ vmulpd %xmm0 , %xmm10, %xmm10\r
+ vmulpd %xmm0 , %xmm11, %xmm11\r
+\r
+ vmulpd %xmm0 , %xmm12, %xmm12\r
+ vmulpd %xmm0 , %xmm13, %xmm13\r
+ vmulpd %xmm0 , %xmm14, %xmm14\r
+ vmulpd %xmm0 , %xmm15, %xmm15\r
+\r
+\r
+ leaq (CO1, LDC, 2), %rax \r
+ \r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %xmm4, %xmm4\r
+ vaddpd (CO1, LDC), %xmm5, %xmm5\r
+ vaddpd (%rax), %xmm6, %xmm6\r
+ vaddpd (%rax, LDC), %xmm7, %xmm7\r
+\r
+#endif\r
+\r
+ vmovups %xmm4 , (CO1)\r
+ vmovups %xmm5 , (CO1, LDC)\r
+ vmovups %xmm6 , (%rax)\r
+ vmovups %xmm7 , (%rax, LDC)\r
+\r
+\r
+ leaq (%rax, LDC, 2), %rax\r
+ leaq (%rax, LDC, 2), %rbp\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (%rax), %xmm8 , %xmm4\r
+ vaddpd (%rax, LDC), %xmm9 , %xmm5\r
+ vaddpd (%rbp), %xmm10, %xmm6\r
+ vaddpd (%rbp, LDC), %xmm11, %xmm7\r
+\r
+#endif\r
+\r
+ vmovups %xmm4 , (%rax)\r
+ vmovups %xmm5 , (%rax, LDC)\r
+ vmovups %xmm6 , (%rbp)\r
+ vmovups %xmm7 , (%rbp, LDC)\r
+\r
+\r
+ leaq (%rax, LDC, 4), %rax\r
+ leaq (%rbp, LDC, 4), %rbp\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (%rax), %xmm12, %xmm4\r
+ vaddpd (%rax, LDC), %xmm13, %xmm5\r
+ vaddpd (%rbp), %xmm14, %xmm6\r
+ vaddpd (%rbp, LDC), %xmm15, %xmm7\r
+\r
+#endif\r
+\r
+ vmovups %xmm4 , (%rax)\r
+ vmovups %xmm5 , (%rax, LDC)\r
+ vmovups %xmm6 , (%rbp)\r
+ vmovups %xmm7 , (%rbp, LDC)\r
+\r
+ addq $2*SIZE, CO1\r
+.endm\r
+\r
+\r
+/******************************************************************************************/\r
+\r
+.macro INIT1x12\r
+\r
+ vxorpd %xmm4 , %xmm4 , %xmm4\r
+ vxorpd %xmm5 , %xmm5 , %xmm5\r
+ vxorpd %xmm6 , %xmm6 , %xmm6\r
+ vxorpd %xmm7 , %xmm7 , %xmm7\r
+ vxorpd %xmm8 , %xmm8 , %xmm8\r
+ vxorpd %xmm9 , %xmm9 , %xmm9\r
+ vxorpd %xmm10, %xmm10, %xmm10\r
+ vxorpd %xmm11, %xmm11, %xmm11\r
+ vxorpd %xmm12, %xmm12, %xmm12\r
+ vxorpd %xmm13, %xmm13, %xmm13\r
+ vxorpd %xmm14, %xmm14, %xmm14\r
+ vxorpd %xmm15, %xmm15, %xmm15\r
+\r
+.endm\r
+\r
+.macro KERNEL1x12_SUB\r
+ vmovsd -16 * SIZE(AO), %xmm0\r
+ vmovsd -12 * SIZE(BO), %xmm1\r
+ vmovsd -11 * SIZE(BO), %xmm2\r
+ vmovsd -10 * SIZE(BO), %xmm3\r
+ vfmadd231sd %xmm0 ,%xmm1 , %xmm4\r
+ vmovsd -9 * SIZE(BO), %xmm1\r
+ vfmadd231sd %xmm0 ,%xmm2 , %xmm5\r
+ vmovsd -8 * SIZE(BO), %xmm2\r
+ vfmadd231sd %xmm0 ,%xmm3 , %xmm6\r
+ vmovsd -7 * SIZE(BO), %xmm3\r
+ vfmadd231sd %xmm0 ,%xmm1 , %xmm7\r
+ vmovsd -6 * SIZE(BO), %xmm1\r
+ vfmadd231sd %xmm0 ,%xmm2 , %xmm8\r
+ vmovsd -5 * SIZE(BO), %xmm2\r
+ vfmadd231sd %xmm0 ,%xmm3 , %xmm9\r
+ vmovsd -4 * SIZE(BO), %xmm3\r
+ vfmadd231sd %xmm0 ,%xmm1 , %xmm10\r
+ vmovsd -3 * SIZE(BO), %xmm1\r
+ vfmadd231sd %xmm0 ,%xmm2 , %xmm11\r
+ vmovsd -2 * SIZE(BO), %xmm2\r
+ vfmadd231sd %xmm0 ,%xmm3 , %xmm12\r
+ vmovsd -1 * SIZE(BO), %xmm3\r
+ vfmadd231sd %xmm0 ,%xmm1 , %xmm13\r
+ addq $12*SIZE, BO\r
+ vfmadd231sd %xmm0 ,%xmm2 , %xmm14\r
+ addq $1*SIZE, AO\r
+ vfmadd231sd %xmm0 ,%xmm3 , %xmm15\r
+\r
+.endm\r
+\r
+.macro SAVE1x12\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+ vmulsd %xmm0 , %xmm5 , %xmm5\r
+ vmulsd %xmm0 , %xmm6 , %xmm6\r
+ vmulsd %xmm0 , %xmm7 , %xmm7\r
+\r
+ vmulsd %xmm0 , %xmm8 , %xmm8\r
+ vmulsd %xmm0 , %xmm9 , %xmm9\r
+ vmulsd %xmm0 , %xmm10, %xmm10\r
+ vmulsd %xmm0 , %xmm11, %xmm11\r
+\r
+ vmulsd %xmm0 , %xmm12, %xmm12\r
+ vmulsd %xmm0 , %xmm13, %xmm13\r
+ vmulsd %xmm0 , %xmm14, %xmm14\r
+ vmulsd %xmm0 , %xmm15, %xmm15\r
+\r
+\r
+ leaq (CO1, LDC, 2), %rax \r
+ \r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4, %xmm4\r
+ vaddsd (CO1, LDC), %xmm5, %xmm5\r
+ vaddsd (%rax), %xmm6, %xmm6\r
+ vaddsd (%rax, LDC), %xmm7, %xmm7\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+ vmovsd %xmm5 , (CO1, LDC)\r
+ vmovsd %xmm6 , (%rax)\r
+ vmovsd %xmm7 , (%rax, LDC)\r
+\r
+\r
+ leaq (%rax, LDC, 2), %rax\r
+ leaq (%rax, LDC, 2), %rbp\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (%rax), %xmm8 , %xmm4\r
+ vaddsd (%rax, LDC), %xmm9 , %xmm5\r
+ vaddsd (%rbp), %xmm10, %xmm6\r
+ vaddsd (%rbp, LDC), %xmm11, %xmm7\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (%rax)\r
+ vmovsd %xmm5 , (%rax, LDC)\r
+ vmovsd %xmm6 , (%rbp)\r
+ vmovsd %xmm7 , (%rbp, LDC)\r
+\r
+\r
+ leaq (%rax, LDC, 4), %rax\r
+ leaq (%rbp, LDC, 4), %rbp\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (%rax), %xmm12, %xmm4\r
+ vaddsd (%rax, LDC), %xmm13, %xmm5\r
+ vaddsd (%rbp), %xmm14, %xmm6\r
+ vaddsd (%rbp, LDC), %xmm15, %xmm7\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (%rax)\r
+ vmovsd %xmm5 , (%rax, LDC)\r
+ vmovsd %xmm6 , (%rbp)\r
+ vmovsd %xmm7 , (%rbp, LDC)\r
+\r
+ addq $1*SIZE, CO1\r
+.endm\r
+\r
+\r
+\r
+\r
+/******************************************************************************************/\r
+/******************************************************************************************/\r
+\r
+.macro INIT4x4\r
+\r
+ vxorpd %ymm4 , %ymm4 , %ymm4\r
+ vxorpd %ymm5 , %ymm5 , %ymm5\r
+ vxorpd %ymm6 , %ymm6 , %ymm6\r
+ vxorpd %ymm7 , %ymm7 , %ymm7\r
+\r
+.endm\r
+\r
+.macro KERNEL4x4_I\r
+ prefetcht0 A_PR1(AO)\r
+ vmovups -12 * SIZE(BO), %ymm1\r
+ vmovups -16 * SIZE(AO), %ymm0\r
+ vmulpd %ymm0 ,%ymm1 , %ymm4\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vmulpd %ymm0 ,%ymm1 , %ymm5\r
+ vpermpd $0x1b, %ymm0 , %ymm0\r
+ vmulpd %ymm0 ,%ymm1 , %ymm6\r
+\r
+ addq $4*SIZE, BO\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vmulpd %ymm0 ,%ymm1 , %ymm7\r
+ vmovups -12 * SIZE(BO), %ymm1\r
+\r
+.endm\r
+\r
+.macro KERNEL4x4_M1\r
+ prefetcht0 A_PR1(AO)\r
+ vmovups -16 * SIZE(AO), %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm4\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm5\r
+ vpermpd $0x1b, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm6\r
+\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm7\r
+ vmovups -12 * SIZE(BO), %ymm1\r
+\r
+.endm\r
+\r
+.macro KERNEL4x4_M2\r
+ vmovups -12 * SIZE(AO), %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm4\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm5\r
+ vpermpd $0x1b, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm6\r
+\r
+ addq $8*SIZE, AO\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm7\r
+ vmovups -8 * SIZE(BO), %ymm1\r
+ addq $8*SIZE, BO\r
+.endm\r
+\r
+\r
+.macro KERNEL4x4_E\r
+ vmovups -12 * SIZE(AO), %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm4\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm5\r
+ vpermpd $0x1b, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm6\r
+\r
+ addq $8*SIZE, AO\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm7\r
+ addq $4*SIZE, BO\r
+.endm\r
+\r
+.macro KERNEL4x4_SUB\r
+ vmovups -12 * SIZE(BO), %ymm1\r
+ vmovups -16 * SIZE(AO), %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm4\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm5\r
+ addq $4*SIZE, BO\r
+ vpermpd $0x1b, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm6\r
+ addq $4*SIZE, AO\r
+ vpermpd $0xb1, %ymm0 , %ymm0\r
+ vfmadd231pd %ymm0 ,%ymm1 , %ymm7\r
+\r
+.endm\r
+\r
+.macro SAVE4x4\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+ vmulpd %ymm0 , %ymm6 , %ymm6\r
+\r
+ vpermpd $0xb1 , %ymm5, %ymm5\r
+ vpermpd $0xb1 , %ymm7, %ymm7\r
+\r
+ vblendpd $0x0a, %ymm5, %ymm4, %ymm0\r
+ vblendpd $0x05, %ymm5, %ymm4, %ymm1\r
+ vblendpd $0x0a, %ymm7, %ymm6, %ymm2\r
+ vblendpd $0x05, %ymm7, %ymm6, %ymm3\r
+\r
+ vpermpd $0x1b , %ymm2, %ymm2\r
+ vpermpd $0x1b , %ymm3, %ymm3\r
+ vpermpd $0xb1 , %ymm2, %ymm2\r
+ vpermpd $0xb1 , %ymm3, %ymm3\r
+\r
+ vblendpd $0x03, %ymm0, %ymm2 , %ymm4\r
+ vblendpd $0x03, %ymm1, %ymm3 , %ymm5\r
+ vblendpd $0x03, %ymm2, %ymm0 , %ymm6\r
+ vblendpd $0x03, %ymm3, %ymm1 , %ymm7\r
+\r
+ leaq (CO1, LDC, 2), %rax \r
+ \r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4, %ymm4\r
+ vaddpd (CO1, LDC), %ymm5, %ymm5\r
+ vaddpd (%rax), %ymm6, %ymm6\r
+ vaddpd (%rax, LDC), %ymm7, %ymm7\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm5 , (CO1, LDC)\r
+ vmovups %ymm6 , (%rax)\r
+ vmovups %ymm7 , (%rax, LDC)\r
+\r
+ addq $4*SIZE, CO1\r
+.endm\r
+\r
+/******************************************************************************************/\r
+/******************************************************************************************/\r
+\r
+.macro INIT2x4\r
+\r
+ vxorpd %xmm4 , %xmm4 , %xmm4\r
+ vxorpd %xmm5 , %xmm5 , %xmm5\r
+ vxorpd %xmm6 , %xmm6 , %xmm6\r
+ vxorpd %xmm7 , %xmm7 , %xmm7\r
+\r
+.endm\r
+\r
+\r
+.macro KERNEL2x4_SUB\r
+ vmovddup -12 * SIZE(BO), %xmm1\r
+ vmovups -16 * SIZE(AO), %xmm0\r
+ vmovddup -11 * SIZE(BO), %xmm2\r
+ vfmadd231pd %xmm0 ,%xmm1 , %xmm4\r
+ vmovddup -10 * SIZE(BO), %xmm3\r
+ vfmadd231pd %xmm0 ,%xmm2 , %xmm5\r
+ vmovddup -9 * SIZE(BO), %xmm8\r
+ vfmadd231pd %xmm0 ,%xmm3 , %xmm6\r
+ addq $4*SIZE, BO\r
+ vfmadd231pd %xmm0 ,%xmm8 , %xmm7\r
+ addq $2*SIZE, AO\r
+\r
+.endm\r
+\r
+\r
+.macro SAVE2x4\r
+\r
+ vmovddup ALPHA, %xmm0\r
+\r
+ vmulpd %xmm0 , %xmm4 , %xmm4\r
+ vmulpd %xmm0 , %xmm5 , %xmm5\r
+ vmulpd %xmm0 , %xmm6 , %xmm6\r
+ vmulpd %xmm0 , %xmm7 , %xmm7\r
+\r
+ leaq (CO1, LDC, 2), %rax \r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %xmm4, %xmm4\r
+ vaddpd (CO1, LDC), %xmm5, %xmm5\r
+ vaddpd (%rax), %xmm6, %xmm6\r
+ vaddpd (%rax, LDC), %xmm7, %xmm7\r
+\r
+#endif\r
+\r
+ vmovups %xmm4 , (CO1)\r
+ vmovups %xmm5 , (CO1, LDC)\r
+ vmovups %xmm6 , (%rax)\r
+ vmovups %xmm7 , (%rax, LDC)\r
+\r
+ addq $2*SIZE, CO1\r
+.endm\r
+\r
+/******************************************************************************************/\r
+/******************************************************************************************/\r
+\r
+.macro INIT1x4\r
+\r
+ vxorpd %xmm4 , %xmm4 , %xmm4\r
+ vxorpd %xmm5 , %xmm5 , %xmm5\r
+ vxorpd %xmm6 , %xmm6 , %xmm6\r
+ vxorpd %xmm7 , %xmm7 , %xmm7\r
+\r
+.endm\r
+\r
+\r
+.macro KERNEL1x4_SUB\r
+ vmovsd -12 * SIZE(BO), %xmm1\r
+ vmovsd -16 * SIZE(AO), %xmm0\r
+ vmovsd -11 * SIZE(BO), %xmm2\r
+ vfmadd231sd %xmm0 ,%xmm1 , %xmm4\r
+ vmovsd -10 * SIZE(BO), %xmm3\r
+ vfmadd231sd %xmm0 ,%xmm2 , %xmm5\r
+ vmovsd -9 * SIZE(BO), %xmm8\r
+ vfmadd231sd %xmm0 ,%xmm3 , %xmm6\r
+ addq $4*SIZE, BO\r
+ vfmadd231sd %xmm0 ,%xmm8 , %xmm7\r
+ addq $1*SIZE, AO\r
+\r
+.endm\r
+\r
+\r
+.macro SAVE1x4\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+ vmulsd %xmm0 , %xmm5 , %xmm5\r
+ vmulsd %xmm0 , %xmm6 , %xmm6\r
+ vmulsd %xmm0 , %xmm7 , %xmm7\r
+\r
+ leaq (CO1, LDC, 2), %rax \r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4, %xmm4\r
+ vaddsd (CO1, LDC), %xmm5, %xmm5\r
+ vaddsd (%rax), %xmm6, %xmm6\r
+ vaddsd (%rax, LDC), %xmm7, %xmm7\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+ vmovsd %xmm5 , (CO1, LDC)\r
+ vmovsd %xmm6 , (%rax)\r
+ vmovsd %xmm7 , (%rax, LDC)\r
+\r
+ addq $1*SIZE, CO1\r
+.endm\r
+\r
+\r
+/******************************************************************************************/\r
+/******************************************************************************************/\r
+\r
+.macro INIT4x2\r
+\r
+ vxorpd %xmm4 , %xmm4 , %xmm4\r
+ vxorpd %xmm5 , %xmm5 , %xmm5\r
+ vxorpd %xmm6 , %xmm6 , %xmm6\r
+ vxorpd %xmm7 , %xmm7 , %xmm7\r
+\r
+.endm\r
+\r
+\r
+.macro KERNEL4x2_SUB\r
+ vmovddup -12 * SIZE(BO), %xmm2\r
+ vmovups -16 * SIZE(AO), %xmm0\r
+ vmovups -14 * SIZE(AO), %xmm1\r
+ vmovddup -11 * SIZE(BO), %xmm3\r
+ vfmadd231pd %xmm0 ,%xmm2 , %xmm4\r
+ vfmadd231pd %xmm1 ,%xmm2 , %xmm5\r
+ vfmadd231pd %xmm0 ,%xmm3 , %xmm6\r
+ vfmadd231pd %xmm1 ,%xmm3 , %xmm7\r
+ addq $2*SIZE, BO\r
+ addq $4*SIZE, AO\r
+\r
+.endm\r
+\r
+\r
+.macro SAVE4x2\r
+\r
+ vmovddup ALPHA, %xmm0\r
+\r
+ vmulpd %xmm0 , %xmm4 , %xmm4\r
+ vmulpd %xmm0 , %xmm5 , %xmm5\r
+ vmulpd %xmm0 , %xmm6 , %xmm6\r
+ vmulpd %xmm0 , %xmm7 , %xmm7\r
+\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1) , %xmm4, %xmm4\r
+ vaddpd 2 * SIZE(CO1) , %xmm5, %xmm5\r
+ vaddpd (CO1, LDC), %xmm6, %xmm6\r
+ vaddpd 2 * SIZE(CO1, LDC), %xmm7, %xmm7\r
+\r
+#endif\r
+\r
+ vmovups %xmm4 , (CO1)\r
+ vmovups %xmm5 , 2 * SIZE(CO1)\r
+ vmovups %xmm6 , (CO1, LDC)\r
+ vmovups %xmm7 , 2 * SIZE(CO1, LDC)\r
+\r
+ addq $4*SIZE, CO1\r
+.endm\r
+\r
+\r
+/******************************************************************************************/\r
+/******************************************************************************************/\r
+\r
+.macro INIT2x2\r
+\r
+ vxorpd %xmm4 , %xmm4 , %xmm4\r
+ vxorpd %xmm6 , %xmm6 , %xmm6\r
+\r
+.endm\r
+\r
+\r
+.macro KERNEL2x2_SUB\r
+ vmovddup -12 * SIZE(BO), %xmm2\r
+ vmovups -16 * SIZE(AO), %xmm0\r
+ vmovddup -11 * SIZE(BO), %xmm3\r
+ vfmadd231pd %xmm0 ,%xmm2 , %xmm4\r
+ vfmadd231pd %xmm0 ,%xmm3 , %xmm6\r
+ addq $2*SIZE, BO\r
+ addq $2*SIZE, AO\r
+\r
+.endm\r
+\r
+\r
+.macro SAVE2x2\r
+\r
+ vmovddup ALPHA, %xmm0\r
+\r
+ vmulpd %xmm0 , %xmm4 , %xmm4\r
+ vmulpd %xmm0 , %xmm6 , %xmm6\r
+\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1) , %xmm4, %xmm4\r
+ vaddpd (CO1, LDC), %xmm6, %xmm6\r
+\r
+#endif\r
+\r
+ vmovups %xmm4 , (CO1)\r
+ vmovups %xmm6 , (CO1, LDC)\r
+\r
+ addq $2*SIZE, CO1\r
+.endm\r
+\r
+/******************************************************************************************/\r
+/******************************************************************************************/\r
+\r
+.macro INIT1x2\r
+\r
+ vxorpd %xmm4 , %xmm4 , %xmm4\r
+ vxorpd %xmm5 , %xmm5 , %xmm5\r
+\r
+.endm\r
+\r
+\r
+.macro KERNEL1x2_SUB\r
+ vmovsd -12 * SIZE(BO), %xmm1\r
+ vmovsd -16 * SIZE(AO), %xmm0\r
+ vmovsd -11 * SIZE(BO), %xmm2\r
+ vfmadd231sd %xmm0 ,%xmm1 , %xmm4\r
+ vfmadd231sd %xmm0 ,%xmm2 , %xmm5\r
+ addq $2*SIZE, BO\r
+ addq $1*SIZE, AO\r
+\r
+.endm\r
+\r
+\r
+.macro SAVE1x2\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+ vmulsd %xmm0 , %xmm5 , %xmm5\r
+\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4, %xmm4\r
+ vaddsd (CO1, LDC), %xmm5, %xmm5\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+ vmovsd %xmm5 , (CO1, LDC)\r
+\r
+ addq $1*SIZE, CO1\r
+.endm\r
+\r
+\r
+/******************************************************************************************/\r
+/******************************************************************************************/\r
+\r
+.macro INIT4x1\r
+\r
+ vxorpd %xmm4 , %xmm4 , %xmm4\r
+ vxorpd %xmm5 , %xmm5 , %xmm5\r
+\r
+.endm\r
+\r
+\r
+.macro KERNEL4x1_SUB\r
+ vmovddup -12 * SIZE(BO), %xmm2\r
+ vmovups -16 * SIZE(AO), %xmm0\r
+ vmovups -14 * SIZE(AO), %xmm1\r
+ vfmadd231pd %xmm0 ,%xmm2 , %xmm4\r
+ vfmadd231pd %xmm1 ,%xmm2 , %xmm5\r
+ addq $1*SIZE, BO\r
+ addq $4*SIZE, AO\r
+\r
+.endm\r
+\r
+\r
+.macro SAVE4x1\r
+\r
+ vmovddup ALPHA, %xmm0\r
+\r
+ vmulpd %xmm0 , %xmm4 , %xmm4\r
+ vmulpd %xmm0 , %xmm5 , %xmm5\r
+\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1) , %xmm4, %xmm4\r
+ vaddpd 2 * SIZE(CO1) , %xmm5, %xmm5\r
+\r
+#endif\r
+\r
+ vmovups %xmm4 , (CO1)\r
+ vmovups %xmm5 , 2 * SIZE(CO1)\r
+\r
+ addq $4*SIZE, CO1\r
+.endm\r
+\r
+\r
+/******************************************************************************************/\r
+/******************************************************************************************/\r
+\r
+.macro INIT2x1\r
+\r
+ vxorpd %xmm4 , %xmm4 , %xmm4\r
+\r
+.endm\r
+\r
+\r
+.macro KERNEL2x1_SUB\r
+ vmovddup -12 * SIZE(BO), %xmm2\r
+ vmovups -16 * SIZE(AO), %xmm0\r
+ vfmadd231pd %xmm0 ,%xmm2 , %xmm4\r
+ addq $1*SIZE, BO\r
+ addq $2*SIZE, AO\r
+\r
+.endm\r
+\r
+\r
+.macro SAVE2x1\r
+\r
+ vmovddup ALPHA, %xmm0\r
+\r
+ vmulpd %xmm0 , %xmm4 , %xmm4\r
+\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1) , %xmm4, %xmm4\r
+\r
+#endif\r
+\r
+ vmovups %xmm4 , (CO1)\r
+\r
+ addq $2*SIZE, CO1\r
+.endm\r
+\r
+\r
+/******************************************************************************************/\r
+/******************************************************************************************/\r
+\r
+.macro INIT1x1\r
+\r
+ vxorpd %xmm4 , %xmm4 , %xmm4\r
+\r
+.endm\r
+\r
+\r
+.macro KERNEL1x1_SUB\r
+ vmovsd -12 * SIZE(BO), %xmm1\r
+ vmovsd -16 * SIZE(AO), %xmm0\r
+ vfmadd231sd %xmm0 ,%xmm1 , %xmm4\r
+ addq $1*SIZE, BO\r
+ addq $1*SIZE, AO\r
+\r
+.endm\r
+\r
+\r
+.macro SAVE1x1\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4, %xmm4\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+\r
+ addq $1*SIZE, CO1\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+\r
+ PROLOGUE\r
+ PROFCODE\r
+ \r
+ subq $STACKSIZE, %rsp\r
+ movq %rbx, (%rsp)\r
+ movq %rbp, 8(%rsp)\r
+ movq %r12, 16(%rsp)\r
+ movq %r13, 24(%rsp)\r
+ movq %r14, 32(%rsp)\r
+ movq %r15, 40(%rsp)\r
+\r
+ vzeroupper\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq %rdi, 48(%rsp)\r
+ movq %rsi, 56(%rsp)\r
+ vmovups %xmm6, 64(%rsp)\r
+ vmovups %xmm7, 80(%rsp)\r
+ vmovups %xmm8, 96(%rsp)\r
+ vmovups %xmm9, 112(%rsp)\r
+ vmovups %xmm10, 128(%rsp)\r
+ vmovups %xmm11, 144(%rsp)\r
+ vmovups %xmm12, 160(%rsp)\r
+ vmovups %xmm13, 176(%rsp)\r
+ vmovups %xmm14, 192(%rsp)\r
+ vmovups %xmm15, 208(%rsp)\r
+\r
+ movq ARG1, OLD_M\r
+ movq ARG2, OLD_N\r
+ movq ARG3, OLD_K\r
+ movq OLD_A, A\r
+ movq OLD_B, B\r
+ movq OLD_C, C\r
+ movq OLD_LDC, LDC\r
+\r
+ vmovups %xmm3, %xmm0\r
+\r
+#else\r
+ movq STACKSIZE + 8(%rsp), LDC\r
+\r
+#endif\r
+\r
+ movq %rsp, SP # save old stack\r
+ subq $128 + L_BUFFER_SIZE, %rsp\r
+ andq $-4096, %rsp # align stack\r
+\r
+ STACK_TOUCH\r
+\r
+ cmpq $0, OLD_M\r
+ je .L999\r
+\r
+ cmpq $0, OLD_N\r
+ je .L999\r
+\r
+ cmpq $0, OLD_K\r
+ je .L999\r
+\r
+ movq OLD_M, M\r
+ movq OLD_N, N\r
+ movq OLD_K, K\r
+\r
+ vmovsd %xmm0, ALPHA\r
+\r
+ salq $BASE_SHIFT, LDC\r
+\r
+ movq N, %rax\r
+ xorq %rdx, %rdx\r
+ movq $12, %rdi\r
+ divq %rdi // N / 12\r
+ movq %rax, Ndiv12 // N / 12\r
+ movq %rdx, Nmod12 // N % 12\r
+\r
+\r
+ movq Ndiv12, J\r
+ cmpq $0, J\r
+ je .L4_0\r
+ ALIGN_4\r
+\r
+.L12_01:\r
+ // copy to sub buffer\r
+ movq K, %rax\r
+ salq $2,%rax // K * 4 ; read 2 values\r
+ movq B, BO1\r
+ leaq (B,%rax, SIZE), BO2 // next offset to BO2\r
+ leaq (BO2,%rax, SIZE), BO3 // next offset to BO2\r
+\r
+\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ sarq $1 , %rax // K / 2\r
+ jz .L12_01a_2\r
+ ALIGN_4\r
+\r
+.L12_01a_1:\r
+\r
+ prefetcht0 512(BO1)\r
+ prefetcht0 512(BO2)\r
+ prefetcht0 512(BO3)\r
+ prefetchw 512(BO)\r
+\r
+\r
+ vmovups 0 * SIZE(BO1), %ymm1\r
+ vmovups 4 * SIZE(BO1), %ymm5\r
+ vmovups 0 * SIZE(BO2), %ymm2\r
+ vmovups 4 * SIZE(BO2), %ymm6\r
+ vmovups 0 * SIZE(BO3), %ymm3\r
+ vmovups 4 * SIZE(BO3), %ymm7\r
+\r
+ vmovups %ymm1, 0 * SIZE(BO)\r
+ vmovups %ymm2, 4 * SIZE(BO)\r
+ vmovups %ymm3, 8 * SIZE(BO)\r
+\r
+ vmovups %ymm5, 12 * SIZE(BO)\r
+ vmovups %ymm6, 16 * SIZE(BO)\r
+ vmovups %ymm7, 20 * SIZE(BO)\r
+\r
+ addq $8 * SIZE ,BO1\r
+ addq $8 * SIZE ,BO2\r
+ addq $8 * SIZE ,BO3\r
+ addq $24 *SIZE ,BO\r
+\r
+ decq %rax\r
+ jnz .L12_01a_1\r
+\r
+\r
+\r
+.L12_01a_2:\r
+\r
+ movq K, %rax\r
+ andq $1, %rax // K % 2\r
+ jz .L12_03c\r
+ ALIGN_4\r
+\r
+\r
+.L12_02b:\r
+\r
+ vmovups 0 * SIZE(BO1), %ymm1\r
+ vmovups 0 * SIZE(BO2), %ymm2\r
+ vmovups 0 * SIZE(BO3), %ymm3\r
+ vmovups %ymm1, 0 * SIZE(BO)\r
+ vmovups %ymm2, 4 * SIZE(BO)\r
+ vmovups %ymm3, 8 * SIZE(BO)\r
+ addq $4*SIZE,BO1\r
+ addq $4*SIZE,BO2\r
+ addq $4*SIZE,BO3\r
+ addq $12*SIZE,BO\r
+ decq %rax\r
+ jnz .L12_02b\r
+\r
+.L12_03c:\r
+\r
+ movq BO3, B // next offset of B\r
+\r
+.L12_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 8), C \r
+ leaq (C, LDC, 4), C // c += 12 * ldc\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $2, I // i = m / 4\r
+ je .L12_20\r
+\r
+ ALIGN_4\r
+\r
+.L12_11:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax // K / 8\r
+ cmpq $2, %rax\r
+\r
+ jl .L12_13\r
+\r
+\r
+ KERNEL4x12_I\r
+ KERNEL4x12_M2\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_M2\r
+\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_M2\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_M2\r
+\r
+ subq $2, %rax\r
+ je .L12_12a\r
+\r
+ .align 32\r
+\r
+.L12_12:\r
+\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_M2\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_M2\r
+\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_M2\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_M2\r
+\r
+ dec %rax\r
+ jne .L12_12\r
+\r
+.L12_12a:\r
+\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_M2\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_M2\r
+\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_M2\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_E\r
+\r
+ jmp .L12_16\r
+\r
+\r
+.L12_13:\r
+\r
+ test $1, %rax\r
+ jz .L12_14\r
+\r
+ KERNEL4x12_I\r
+ KERNEL4x12_M2\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_M2\r
+\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_M2\r
+ KERNEL4x12_M1\r
+ KERNEL4x12_E\r
+\r
+ jmp .L12_16\r
+\r
+\r
+.L12_14:\r
+\r
+ INIT4x12\r
+\r
+\r
+.L12_16:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L12_19\r
+\r
+ ALIGN_4\r
+\r
+.L12_17:\r
+\r
+ KERNEL4x12_SUB\r
+\r
+ dec %rax\r
+ jne .L12_17\r
+ ALIGN_4\r
+\r
+\r
+.L12_19:\r
+\r
+ SAVE4x12\r
+\r
+ decq I # i --\r
+ jne .L12_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L12_20:\r
+ // Test rest of M\r
+\r
+ testq $3, M\r
+ jz .L12_100 // to next 16 lines of N\r
+\r
+\r
+.L12_30:\r
+ testq $2, M \r
+ jz .L12_40\r
+\r
+ ALIGN_4\r
+\r
+.L12_31:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ INIT2x12\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax\r
+ je .L12_36\r
+ ALIGN_4\r
+\r
+.L12_32:\r
+\r
+ KERNEL2x12_SUB\r
+ KERNEL2x12_SUB\r
+ KERNEL2x12_SUB\r
+ KERNEL2x12_SUB\r
+\r
+ KERNEL2x12_SUB\r
+ KERNEL2x12_SUB\r
+ KERNEL2x12_SUB\r
+ KERNEL2x12_SUB\r
+\r
+ dec %rax\r
+ jne .L12_32\r
+ ALIGN_4\r
+\r
+.L12_36:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L12_39\r
+\r
+ ALIGN_4\r
+\r
+.L12_37:\r
+\r
+ KERNEL2x12_SUB\r
+\r
+ dec %rax\r
+ jne .L12_37\r
+ ALIGN_4\r
+\r
+\r
+.L12_39:\r
+\r
+ SAVE2x12\r
+\r
+ ALIGN_4\r
+\r
+.L12_40:\r
+ testq $1, M \r
+ jz .L12_100 // to next 3 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L12_41:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ INIT1x12\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3,%rax\r
+ je .L12_46\r
+\r
+ ALIGN_4\r
+\r
+.L12_42:\r
+\r
+ KERNEL1x12_SUB\r
+ KERNEL1x12_SUB\r
+ KERNEL1x12_SUB\r
+ KERNEL1x12_SUB\r
+\r
+ KERNEL1x12_SUB\r
+ KERNEL1x12_SUB\r
+ KERNEL1x12_SUB\r
+ KERNEL1x12_SUB\r
+\r
+\r
+ dec %rax\r
+ jne .L12_42\r
+ ALIGN_4\r
+\r
+.L12_46:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L12_49\r
+\r
+ ALIGN_4\r
+\r
+.L12_47:\r
+\r
+ KERNEL1x12_SUB\r
+\r
+ dec %rax\r
+ jne .L12_47\r
+ ALIGN_4\r
+\r
+\r
+.L12_49:\r
+\r
+ SAVE1x12\r
+\r
+ ALIGN_4\r
+ \r
+.L12_100:\r
+\r
+ decq J // j --\r
+ jg .L12_01\r
+\r
+\r
+.L4_0:\r
+\r
+ cmpq $0, Nmod12 // N % 12 == 0\r
+ je .L999\r
+\r
+ movq Nmod12, J \r
+ sarq $2, J // j = j / 4\r
+ je .L2_0\r
+\r
+.L4_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 4), C // c += 4 * ldc\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $2, I // i = m / 4\r
+ je .L4_20\r
+\r
+ ALIGN_4\r
+\r
+.L4_11:\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax // K / 8\r
+ cmpq $2, %rax\r
+ jl .L4_13\r
+\r
+\r
+ KERNEL4x4_I\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+\r
+ subq $2, %rax\r
+ je .L4_12a\r
+\r
+ .align 32\r
+\r
+.L4_12:\r
+\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+\r
+ dec %rax\r
+ jne .L4_12\r
+\r
+.L4_12a:\r
+\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_E\r
+\r
+ jmp .L4_16\r
+\r
+\r
+.L4_13:\r
+\r
+ test $1, %rax\r
+ jz .L4_14\r
+\r
+ KERNEL4x4_I\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_E\r
+\r
+ jmp .L4_16\r
+\r
+\r
+.L4_14:\r
+\r
+ INIT4x4\r
+\r
+\r
+.L4_16:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L4_19\r
+\r
+ ALIGN_4\r
+\r
+.L4_17:\r
+\r
+ KERNEL4x4_SUB\r
+\r
+ dec %rax\r
+ jne .L4_17\r
+ ALIGN_4\r
+\r
+\r
+.L4_19:\r
+\r
+ SAVE4x4\r
+\r
+ decq I # i --\r
+ jg .L4_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L4_20:\r
+ // Test rest of M\r
+\r
+ testq $3, M\r
+ jz .L4_100 // to next 16 lines of N\r
+\r
+\r
+.L4_30:\r
+ testq $2, M \r
+ jz .L4_40\r
+\r
+ ALIGN_4\r
+\r
+.L4_31:\r
+ movq B, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ INIT2x4\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax\r
+ je .L4_36\r
+ ALIGN_4\r
+\r
+.L4_32:\r
+\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+\r
+ dec %rax\r
+ jne .L4_32\r
+ ALIGN_4\r
+\r
+.L4_36:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L4_39\r
+\r
+ ALIGN_4\r
+\r
+.L4_37:\r
+\r
+ KERNEL2x4_SUB\r
+\r
+ dec %rax\r
+ jne .L4_37\r
+\r
+\r
+.L4_39:\r
+\r
+ SAVE2x4\r
+\r
+.L4_40:\r
+ testq $1, M \r
+ jz .L4_100 // to next 3 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L4_41:\r
+ movq B, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ INIT1x4\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3,%rax\r
+ je .L4_46\r
+\r
+ ALIGN_4\r
+\r
+.L4_42:\r
+\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+\r
+ dec %rax\r
+ jne .L4_42\r
+ ALIGN_4\r
+\r
+.L4_46:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L4_49\r
+\r
+ ALIGN_4\r
+\r
+.L4_47:\r
+\r
+ KERNEL1x4_SUB\r
+\r
+ dec %rax\r
+ jne .L4_47\r
+ ALIGN_4\r
+\r
+\r
+.L4_49:\r
+\r
+ SAVE1x4\r
+\r
+ ALIGN_4\r
+ \r
+.L4_100:\r
+\r
+ movq K, %rax\r
+ salq $2, %rax // * 4\r
+ leaq (B , %rax, SIZE), B\r
+ decq J // j --\r
+ jg .L4_10\r
+\r
+\r
+\r
+\r
+/***************************************************************************************************************/\r
+\r
+.L2_0:\r
+\r
+ movq Nmod12, J \r
+ testq $2, J\r
+ je .L1_0\r
+\r
+.L2_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C // c += 2 * ldc\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $2, I // i = m / 4\r
+ je .L2_20\r
+\r
+ ALIGN_4\r
+\r
+.L2_11:\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+\r
+ INIT4x2\r
+\r
+ movq K, %rax\r
+ sarq $3, %rax // K / 8\r
+\r
+ je .L2_16\r
+\r
+ .align 32\r
+\r
+.L2_12:\r
+\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ dec %rax\r
+ jne .L2_12\r
+\r
+\r
+.L2_16:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_19\r
+\r
+ ALIGN_4\r
+\r
+.L2_17:\r
+\r
+ KERNEL4x2_SUB\r
+\r
+ dec %rax\r
+ jne .L2_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_19:\r
+\r
+ SAVE4x2\r
+\r
+ decq I # i --\r
+ jg .L2_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L2_20:\r
+ // Test rest of M\r
+\r
+ testq $3, M\r
+ jz .L2_100 // to next 16 lines of N\r
+\r
+\r
+.L2_30:\r
+ testq $2, M \r
+ jz .L2_40\r
+\r
+ ALIGN_4\r
+\r
+.L2_31:\r
+ movq B, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ INIT2x2\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax\r
+ je .L2_36\r
+ ALIGN_4\r
+\r
+.L2_32:\r
+\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ dec %rax\r
+ jne .L2_32\r
+\r
+.L2_36:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_39\r
+\r
+ ALIGN_4\r
+\r
+.L2_37:\r
+\r
+ KERNEL2x2_SUB\r
+\r
+ dec %rax\r
+ jne .L2_37\r
+\r
+\r
+.L2_39:\r
+\r
+ SAVE2x2\r
+\r
+.L2_40:\r
+ testq $1, M \r
+ jz .L2_100 // to next 3 lines of N\r
+\r
+.L2_41:\r
+ movq B, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ INIT1x2\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3,%rax\r
+ je .L2_46\r
+\r
+ ALIGN_4\r
+\r
+.L2_42:\r
+\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ dec %rax\r
+ jne .L2_42\r
+\r
+.L2_46:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_49\r
+\r
+ ALIGN_4\r
+\r
+.L2_47:\r
+\r
+ KERNEL1x2_SUB\r
+\r
+ dec %rax\r
+ jne .L2_47\r
+\r
+.L2_49:\r
+\r
+ SAVE1x2\r
+\r
+.L2_100:\r
+\r
+ movq K, %rax\r
+ salq $1, %rax // * 2\r
+ leaq (B , %rax, SIZE), B\r
+\r
+/***************************************************************************************************************/\r
+\r
+.L1_0:\r
+\r
+ movq Nmod12, J \r
+ testq $1, J\r
+ je .L999\r
+\r
+.L1_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 1), C // c += 1 * ldc\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $2, I // i = m / 4\r
+ je .L1_20\r
+\r
+ ALIGN_4\r
+\r
+.L1_11:\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+\r
+ INIT4x1\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax // K / 8\r
+ je .L1_16\r
+\r
+ .align 32\r
+\r
+.L1_12:\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ dec %rax\r
+ jne .L1_12\r
+\r
+\r
+.L1_16:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_19\r
+\r
+ ALIGN_4\r
+\r
+.L1_17:\r
+\r
+ KERNEL4x1_SUB\r
+\r
+ dec %rax\r
+ jne .L1_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_19:\r
+\r
+ SAVE4x1\r
+\r
+ decq I # i --\r
+ jg .L1_11\r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L1_20:\r
+ // Test rest of M\r
+\r
+ testq $3, M\r
+ jz .L1_100 \r
+\r
+\r
+.L1_30:\r
+ testq $2, M \r
+ jz .L1_40\r
+\r
+ ALIGN_4\r
+\r
+.L1_31:\r
+ movq B, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ INIT2x1\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3, %rax\r
+ je .L1_36\r
+ ALIGN_4\r
+\r
+.L1_32:\r
+\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+\r
+ dec %rax\r
+ jne .L1_32\r
+\r
+.L1_36:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_39\r
+\r
+ ALIGN_4\r
+\r
+.L1_37:\r
+\r
+ KERNEL2x1_SUB\r
+\r
+ dec %rax\r
+ jne .L1_37\r
+\r
+.L1_39:\r
+\r
+ SAVE2x1\r
+\r
+.L1_40:\r
+ testq $1, M \r
+ jz .L1_100 // to next 3 lines of N\r
+\r
+\r
+.L1_41:\r
+ movq B, BO // first buffer to BO\r
+ addq $12 * SIZE, BO\r
+\r
+ INIT1x1\r
+\r
+ movq K, %rax\r
+\r
+ sarq $3,%rax\r
+ je .L1_46\r
+\r
+ ALIGN_4\r
+\r
+.L1_42:\r
+\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ dec %rax\r
+ jne .L1_42\r
+\r
+.L1_46:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_49\r
+\r
+ ALIGN_4\r
+\r
+.L1_47:\r
+\r
+ KERNEL1x1_SUB\r
+\r
+ dec %rax\r
+ jne .L1_47\r
+\r
+\r
+.L1_49:\r
+\r
+ SAVE1x1\r
+\r
+.L1_100:\r
+\r
+\r
+\r
+\r
+.L999:\r
+ vzeroupper\r
+\r
+ movq SP, %rsp\r
+ movq (%rsp), %rbx\r
+ movq 8(%rsp), %rbp\r
+ movq 16(%rsp), %r12\r
+ movq 24(%rsp), %r13\r
+ movq 32(%rsp), %r14\r
+ movq 40(%rsp), %r15\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq 48(%rsp), %rdi\r
+ movq 56(%rsp), %rsi\r
+ vmovups 64(%rsp), %xmm6\r
+ vmovups 80(%rsp), %xmm7\r
+ vmovups 96(%rsp), %xmm8\r
+ vmovups 112(%rsp), %xmm9\r
+ vmovups 128(%rsp), %xmm10\r
+ vmovups 144(%rsp), %xmm11\r
+ vmovups 160(%rsp), %xmm12\r
+ vmovups 176(%rsp), %xmm13\r
+ vmovups 192(%rsp), %xmm14\r
+ vmovups 208(%rsp), %xmm15\r
+#endif\r
+\r
+ addq $STACKSIZE, %rsp\r
+ ret\r
+\r
+ EPILOGUE\r
+\r
+\r
+#else\r
+/*************************************************************************************\r
+* TRMM Kernel\r
+*************************************************************************************/\r
+\r
+\r
+ PROLOGUE\r
+ PROFCODE\r
+ \r
+ subq $STACKSIZE, %rsp\r
+ movq %rbx, (%rsp)\r
+ movq %rbp, 8(%rsp)\r
+ movq %r12, 16(%rsp)\r
+ movq %r13, 24(%rsp)\r
+ movq %r14, 32(%rsp)\r
+ movq %r15, 40(%rsp)\r
+\r
+ vzeroupper\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq %rdi, 48(%rsp)\r
+ movq %rsi, 56(%rsp)\r
+ vmovups %xmm6, 64(%rsp)\r
+ vmovups %xmm7, 80(%rsp)\r
+ vmovups %xmm8, 96(%rsp)\r
+ vmovups %xmm9, 112(%rsp)\r
+ vmovups %xmm10, 128(%rsp)\r
+ vmovups %xmm11, 144(%rsp)\r
+ vmovups %xmm12, 160(%rsp)\r
+ vmovups %xmm13, 176(%rsp)\r
+ vmovups %xmm14, 192(%rsp)\r
+ vmovups %xmm15, 208(%rsp)\r
+\r
+ movq ARG1, OLD_M\r
+ movq ARG2, OLD_N\r
+ movq ARG3, OLD_K\r
+ movq OLD_A, A\r
+ movq OLD_B, B\r
+ movq OLD_C, C\r
+ movq OLD_LDC, LDC\r
+#ifdef TRMMKERNEL\r
+ vmovsd OLD_OFFSET, %xmm12\r
+#endif\r
+ vmovups %xmm3, %xmm0\r
+\r
+#else\r
+ movq STACKSIZE + 8(%rsp), LDC\r
+#ifdef TRMMKERNEL\r
+ vmovsd STACKSIZE + 16(%rsp), %xmm12\r
+#endif\r
+\r
+#endif\r
+\r
+ movq %rsp, SP # save old stack\r
+ subq $128 + L_BUFFER_SIZE, %rsp\r
+ andq $-4096, %rsp # align stack\r
+\r
+ STACK_TOUCH\r
+\r
+ cmpq $0, OLD_M\r
+ je .L999\r
+\r
+ cmpq $0, OLD_N\r
+ je .L999\r
+\r
+ cmpq $0, OLD_K\r
+ je .L999\r
+\r
+ movq OLD_M, M\r
+ movq OLD_N, N\r
+ movq OLD_K, K\r
+\r
+ vmovsd %xmm0, ALPHA\r
+\r
+ salq $BASE_SHIFT, LDC\r
+\r
+ movq N, %rax\r
+ xorq %rdx, %rdx\r
+ movq $4, %rdi\r
+ divq %rdi // N / 4\r
+ movq %rax, Ndiv12 // N / 4\r
+ movq %rdx, Nmod12 // N % 4\r
+\r
+#ifdef TRMMKERNEL\r
+ vmovsd %xmm12, OFFSET\r
+ vmovsd %xmm12, KK\r
+#ifndef LEFT\r
+ negq KK\r
+#endif \r
+#endif\r
+\r
+\r
+\r
+ movq Ndiv12, J\r
+ cmpq $0, J\r
+ je .L2_0\r
+ ALIGN_4\r
+\r
+.L4_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 4), C // c += 4 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $2, I // i = m / 4\r
+ je .L4_20\r
+\r
+ ALIGN_4\r
+\r
+.L4_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+#else\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+ movq KK, %rax\r
+ salq $3, %rax // rax * SIZE\r
+ leaq (BO,%rax,4), BO // add number of values in B\r
+ leaq (AO,%rax,4), AO // add number of values in A\r
+#endif\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in AO\r
+#else\r
+ addq $4, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ sarq $3, %rax // K / 8\r
+ cmpq $2, %rax\r
+ jl .L4_13\r
+\r
+\r
+ KERNEL4x4_I\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+\r
+ subq $2, %rax\r
+ je .L4_12a\r
+\r
+ .align 32\r
+\r
+.L4_12:\r
+\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+\r
+ dec %rax\r
+ jne .L4_12\r
+\r
+.L4_12a:\r
+\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_E\r
+\r
+ jmp .L4_16\r
+\r
+\r
+.L4_13:\r
+\r
+ test $1, %rax\r
+ jz .L4_14\r
+\r
+ KERNEL4x4_I\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_M2\r
+ KERNEL4x4_M1\r
+ KERNEL4x4_E\r
+\r
+ jmp .L4_16\r
+\r
+\r
+.L4_14:\r
+\r
+ INIT4x4\r
+\r
+\r
+.L4_16:\r
+ movq KKK, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L4_19\r
+\r
+ ALIGN_4\r
+\r
+.L4_17:\r
+\r
+ KERNEL4x4_SUB\r
+\r
+ dec %rax\r
+ jne .L4_17\r
+ ALIGN_4\r
+\r
+\r
+.L4_19:\r
+\r
+ SAVE4x4\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ salq $3, %rax // rax + SIZE\r
+ leaq (BO, %rax, 4), BO // number of values in B\r
+ leaq (AO, %rax, 4), AO // number of values in A\r
+#endif\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK // number of values in A\r
+#endif\r
+\r
+ decq I # i --\r
+ jg .L4_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L4_20:\r
+ // Test rest of M\r
+\r
+ testq $3, M\r
+ jz .L4_100 // to next 16 lines of N\r
+\r
+\r
+.L4_30:\r
+ testq $2, M \r
+ jz .L4_40\r
+\r
+ ALIGN_4\r
+\r
+.L4_31:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+#else\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+ movq KK, %rax\r
+ salq $3, %rax // rax * SIZE\r
+ leaq (BO,%rax,4), BO // add number of values in B\r
+ leaq (AO,%rax,2), AO // add number of values in A\r
+#endif\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $4, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ INIT2x4\r
+\r
+ sarq $3, %rax\r
+ je .L4_36\r
+ ALIGN_4\r
+\r
+.L4_32:\r
+\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+\r
+ dec %rax\r
+ jne .L4_32\r
+ ALIGN_4\r
+\r
+.L4_36:\r
+ movq KKK, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L4_39\r
+\r
+ ALIGN_4\r
+\r
+.L4_37:\r
+\r
+ KERNEL2x4_SUB\r
+\r
+ dec %rax\r
+ jne .L4_37\r
+\r
+\r
+.L4_39:\r
+\r
+ SAVE2x4\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ salq $3, %rax // rax + SIZE\r
+ leaq (BO, %rax, 4), BO // number of values in B\r
+ leaq (AO, %rax, 2), AO // number of values in A\r
+#endif\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK // number of values in A\r
+#endif\r
+\r
+\r
+.L4_40:\r
+ testq $1, M \r
+ jz .L4_100 // to next 3 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L4_41:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+#else\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+ movq KK, %rax\r
+ salq $3, %rax // rax * SIZE\r
+ leaq (BO,%rax,4), BO // add number of values in B\r
+ leaq (AO,%rax,1), AO // add number of values in A\r
+#endif\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $4, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ INIT1x4\r
+\r
+ sarq $3,%rax\r
+ je .L4_46\r
+\r
+ ALIGN_4\r
+\r
+.L4_42:\r
+\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+\r
+ dec %rax\r
+ jne .L4_42\r
+ ALIGN_4\r
+\r
+.L4_46:\r
+ movq KKK, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L4_49\r
+\r
+ ALIGN_4\r
+\r
+.L4_47:\r
+\r
+ KERNEL1x4_SUB\r
+\r
+ dec %rax\r
+ jne .L4_47\r
+ ALIGN_4\r
+\r
+\r
+.L4_49:\r
+\r
+ SAVE1x4\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ salq $3, %rax // rax + SIZE\r
+ leaq (BO, %rax, 4), BO // number of values in B\r
+ leaq (AO, %rax, 1), AO // number of values in A\r
+#endif\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK // number of values in A\r
+#endif\r
+\r
+.L4_100:\r
+\r
+#if defined(TRMMKERNEL) && !defined(LEFT)\r
+ addq $4, KK // number of values in B\r
+#endif\r
+\r
+\r
+ movq K, %rax\r
+ salq $2, %rax // * 4\r
+ leaq (B , %rax, SIZE), B\r
+ decq J // j --\r
+ jg .L4_10\r
+\r
+\r
+\r
+\r
+/***************************************************************************************************************/\r
+\r
+.L2_0:\r
+\r
+ movq Nmod12, J \r
+ testq $2, J\r
+ je .L1_0\r
+\r
+.L2_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C // c += 2 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $2, I // i = m / 4\r
+ je .L2_20\r
+\r
+ ALIGN_4\r
+\r
+.L2_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+#else\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+ movq KK, %rax\r
+ salq $3, %rax // rax * SIZE\r
+ leaq (BO,%rax,2), BO // add number of values in B\r
+ leaq (AO,%rax,4), AO // add number of values in A\r
+#endif\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ INIT4x2\r
+\r
+ sarq $3, %rax // K / 8\r
+\r
+ je .L2_16\r
+\r
+ .align 32\r
+\r
+.L2_12:\r
+\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ dec %rax\r
+ jne .L2_12\r
+\r
+\r
+.L2_16:\r
+ movq KKK, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_19\r
+\r
+ ALIGN_4\r
+\r
+.L2_17:\r
+\r
+ KERNEL4x2_SUB\r
+\r
+ dec %rax\r
+ jne .L2_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_19:\r
+\r
+ SAVE4x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ salq $3, %rax // rax + SIZE\r
+ leaq (BO, %rax, 2), BO // number of values in B\r
+ leaq (AO, %rax, 4), AO // number of values in A\r
+#endif\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK // number of values in A\r
+#endif\r
+\r
+\r
+ decq I # i --\r
+ jg .L2_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L2_20:\r
+ // Test rest of M\r
+\r
+ testq $3, M\r
+ jz .L2_100 // to next 16 lines of N\r
+\r
+\r
+.L2_30:\r
+ testq $2, M \r
+ jz .L2_40\r
+\r
+ ALIGN_4\r
+\r
+.L2_31:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+#else\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+ movq KK, %rax\r
+ salq $3, %rax // rax * SIZE\r
+ leaq (BO,%rax,2), BO // add number of values in B\r
+ leaq (AO,%rax,2), AO // add number of values in A\r
+#endif\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ INIT2x2\r
+\r
+ sarq $3, %rax\r
+ je .L2_36\r
+ ALIGN_4\r
+\r
+.L2_32:\r
+\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ dec %rax\r
+ jne .L2_32\r
+\r
+.L2_36:\r
+ movq KKK, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_39\r
+\r
+ ALIGN_4\r
+\r
+.L2_37:\r
+\r
+ KERNEL2x2_SUB\r
+\r
+ dec %rax\r
+ jne .L2_37\r
+\r
+\r
+.L2_39:\r
+\r
+ SAVE2x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ salq $3, %rax // rax + SIZE\r
+ leaq (BO, %rax, 2), BO // number of values in B\r
+ leaq (AO, %rax, 2), AO // number of values in A\r
+#endif\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK // number of values in A\r
+#endif\r
+\r
+\r
+.L2_40:\r
+ testq $1, M \r
+ jz .L2_100 // to next 3 lines of N\r
+\r
+.L2_41:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+#else\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+ movq KK, %rax\r
+ salq $3, %rax // rax * SIZE\r
+ leaq (BO,%rax,2), BO // add number of values in B\r
+ leaq (AO,%rax,1), AO // add number of values in A\r
+#endif\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ INIT1x2\r
+\r
+ sarq $3,%rax\r
+ je .L2_46\r
+\r
+ ALIGN_4\r
+\r
+.L2_42:\r
+\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ dec %rax\r
+ jne .L2_42\r
+\r
+.L2_46:\r
+ movq KKK, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_49\r
+\r
+ ALIGN_4\r
+\r
+.L2_47:\r
+\r
+ KERNEL1x2_SUB\r
+\r
+ dec %rax\r
+ jne .L2_47\r
+\r
+.L2_49:\r
+\r
+ SAVE1x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ salq $3, %rax // rax * SIZE\r
+ leaq (BO, %rax, 2), BO // number of values in B\r
+ leaq (AO, %rax, 1), AO // number of values in A\r
+#endif\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK // number of values in A\r
+#endif\r
+\r
+\r
+.L2_100:\r
+\r
+\r
+#if defined(TRMMKERNEL) && !defined(LEFT)\r
+ addq $2, KK // number of values in B\r
+#endif\r
+\r
+ movq K, %rax\r
+ salq $1, %rax // * 2\r
+ leaq (B , %rax, SIZE), B\r
+\r
+/***************************************************************************************************************/\r
+\r
+.L1_0:\r
+\r
+ movq Nmod12, J \r
+ testq $1, J\r
+ je .L999\r
+\r
+.L1_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 1), C // c += 1 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $2, I // i = m / 4\r
+ je .L1_20\r
+\r
+ ALIGN_4\r
+\r
+.L1_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+#else\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+ movq KK, %rax\r
+ salq $3, %rax // rax * SIZE\r
+ leaq (BO,%rax,1), BO // add number of values in B\r
+ leaq (AO,%rax,4), AO // add number of values in A\r
+#endif\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ INIT4x1\r
+\r
+ sarq $3, %rax // K / 8\r
+ je .L1_16\r
+\r
+ .align 32\r
+\r
+.L1_12:\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ dec %rax\r
+ jne .L1_12\r
+\r
+\r
+.L1_16:\r
+ movq KKK, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_19\r
+\r
+ ALIGN_4\r
+\r
+.L1_17:\r
+\r
+ KERNEL4x1_SUB\r
+\r
+ dec %rax\r
+ jne .L1_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_19:\r
+\r
+ SAVE4x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ salq $3, %rax // rax * SIZE\r
+ leaq (BO, %rax, 1), BO // number of values in B\r
+ leaq (AO, %rax, 4), AO // number of values in A\r
+#endif\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK // number of values in A\r
+#endif\r
+\r
+\r
+ decq I # i --\r
+ jg .L1_11\r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L1_20:\r
+ // Test rest of M\r
+\r
+ testq $3, M\r
+ jz .L1_100 \r
+\r
+\r
+.L1_30:\r
+ testq $2, M \r
+ jz .L1_40\r
+\r
+ ALIGN_4\r
+\r
+.L1_31:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+#else\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+ movq KK, %rax\r
+ salq $3, %rax // rax * SIZE\r
+ leaq (BO,%rax,1), BO // add number of values in B\r
+ leaq (AO,%rax,2), AO // add number of values in A\r
+#endif\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ INIT2x1\r
+\r
+ sarq $3, %rax\r
+ je .L1_36\r
+ ALIGN_4\r
+\r
+.L1_32:\r
+\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+\r
+ dec %rax\r
+ jne .L1_32\r
+\r
+.L1_36:\r
+ movq KKK, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_39\r
+\r
+ ALIGN_4\r
+\r
+.L1_37:\r
+\r
+ KERNEL2x1_SUB\r
+\r
+ dec %rax\r
+ jne .L1_37\r
+\r
+.L1_39:\r
+\r
+ SAVE2x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ salq $3, %rax // rax * SIZE\r
+ leaq (BO, %rax, 1), BO // number of values in B\r
+ leaq (AO, %rax, 2), AO // number of values in A\r
+#endif\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK // number of values in A\r
+#endif\r
+\r
+\r
+.L1_40:\r
+ testq $1, M \r
+ jz .L1_100 // to next 3 lines of N\r
+\r
+\r
+.L1_41:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+#else\r
+ movq B, BO \r
+ addq $12 * SIZE, BO\r
+ movq KK, %rax\r
+ salq $3, %rax // rax * SIZE\r
+ leaq (BO,%rax,1), BO // add number of values in B\r
+ leaq (AO,%rax,1), AO // add number of values in A\r
+#endif\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ INIT1x1\r
+\r
+ sarq $3,%rax\r
+ je .L1_46\r
+\r
+ ALIGN_4\r
+\r
+.L1_42:\r
+\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ dec %rax\r
+ jne .L1_42\r
+\r
+.L1_46:\r
+ movq KKK, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_49\r
+\r
+ ALIGN_4\r
+\r
+.L1_47:\r
+\r
+ KERNEL1x1_SUB\r
+\r
+ dec %rax\r
+ jne .L1_47\r
+\r
+\r
+.L1_49:\r
+\r
+ SAVE1x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ salq $3, %rax // rax * SIZE\r
+ leaq (BO, %rax, 1), BO // number of values in B\r
+ leaq (AO, %rax, 1), AO // number of values in A\r
+#endif\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK // number of values in A\r
+#endif\r
+\r
+\r
+\r
+.L1_100:\r
+\r
+\r
+#if defined(TRMMKERNEL) && !defined(LEFT)\r
+ addq $1, KK // number of values in B\r
+#endif\r
+\r
+\r
+\r
+.L999:\r
+\r
+ vzeroupper\r
+\r
+ movq SP, %rsp\r
+ movq (%rsp), %rbx\r
+ movq 8(%rsp), %rbp\r
+ movq 16(%rsp), %r12\r
+ movq 24(%rsp), %r13\r
+ movq 32(%rsp), %r14\r
+ movq 40(%rsp), %r15\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq 48(%rsp), %rdi\r
+ movq 56(%rsp), %rsi\r
+ vmovups 64(%rsp), %xmm6\r
+ vmovups 80(%rsp), %xmm7\r
+ vmovups 96(%rsp), %xmm8\r
+ vmovups 112(%rsp), %xmm9\r
+ vmovups 128(%rsp), %xmm10\r
+ vmovups 144(%rsp), %xmm11\r
+ vmovups 160(%rsp), %xmm12\r
+ vmovups 176(%rsp), %xmm13\r
+ vmovups 192(%rsp), %xmm14\r
+ vmovups 208(%rsp), %xmm15\r
+#endif\r
+\r
+ addq $STACKSIZE, %rsp\r
+ ret\r
+\r
+ EPILOGUE\r
+\r
+\r
+\r
+\r
+\r
+#endif\r
--- /dev/null
+/*********************************************************************************\r
+Copyright (c) 2013, The OpenBLAS Project\r
+All rights reserved.\r
+Redistribution and use in source and binary forms, with or without\r
+modification, are permitted provided that the following conditions are\r
+met:\r
+1. Redistributions of source code must retain the above copyright\r
+notice, this list of conditions and the following disclaimer.\r
+2. Redistributions in binary form must reproduce the above copyright\r
+notice, this list of conditions and the following disclaimer in\r
+the documentation and/or other materials provided with the\r
+distribution.\r
+3. Neither the name of the OpenBLAS project nor the names of\r
+its contributors may be used to endorse or promote products\r
+derived from this software without specific prior written permission.\r
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\r
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r
+ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE\r
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\r
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\r
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\r
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\r
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+**********************************************************************************/\r
+\r
+/*********************************************************************\r
+* 2013/11/13 Saar\r
+* BLASTEST : OK\r
+* CTEST : OK\r
+* TEST : OK\r
+*\r
+* 2013/10/28 Saar\r
+* Parameter:\r
+* SGEMM_DEFAULT_UNROLL_N 4\r
+* SGEMM_DEFAULT_UNROLL_M 16\r
+* SGEMM_DEFAULT_P 768\r
+* SGEMM_DEFAULT_Q 384\r
+* A_PR1 512\r
+* B_PR1 512\r
+* \r
+*\r
+* Performance at 9216x9216x9216:\r
+* 1 thread: 86 GFLOPS (SANDYBRIDGE: 59) (MKL: 83)\r
+* 2 threads: 157 GFLOPS (SANDYBRIDGE: 116) (MKL: 155)\r
+* 3 threads: 235 GFLOPS (SANDYBRIDGE: 165) (MKL: 230)\r
+* 4 threads: 288 GFLOPS (SANDYBRIDGE: 223) (MKL: 267)\r
+*\r
+*********************************************************************/\r
+\r
+#define ASSEMBLER\r
+#include "common.h"\r
+ \r
+#define OLD_M %rdi\r
+#define OLD_N %rsi\r
+#define M %r13\r
+#define J %r14\r
+#define OLD_K %rdx\r
+\r
+#define A %rcx\r
+#define B %r8\r
+#define C %r9\r
+#define LDC %r10\r
+ \r
+#define I %r11\r
+#define AO %rdi\r
+#define BO %rsi\r
+#define CO1 %r15\r
+#define K %r12\r
+#define BI %rbp\r
+#define SP %rbx\r
+\r
+#define BO1 %rdi\r
+#define CO2 %rdx\r
+\r
+#ifndef WINDOWS_ABI\r
+\r
+#define STACKSIZE 96\r
+\r
+#else\r
+\r
+#define STACKSIZE 256\r
+\r
+#define OLD_A 40 + STACKSIZE(%rsp)\r
+#define OLD_B 48 + STACKSIZE(%rsp)\r
+#define OLD_C 56 + STACKSIZE(%rsp)\r
+#define OLD_LDC 64 + STACKSIZE(%rsp)\r
+#define OLD_OFFSET 72 + STACKSIZE(%rsp)\r
+\r
+#endif\r
+\r
+#define L_BUFFER_SIZE 512*8*4\r
+#define LB2_OFFSET 512*8*2\r
+\r
+#define Ndiv6 24(%rsp)\r
+#define Nmod6 32(%rsp)\r
+#define N 40(%rsp)\r
+#define ALPHA 48(%rsp)\r
+#define OFFSET 56(%rsp)\r
+#define KK 64(%rsp)\r
+#define KKK 72(%rsp)\r
+#define BUFFER1 128(%rsp)\r
+#define BUFFER2 LB2_OFFSET+128(%rsp)\r
+\r
+#if defined(OS_WINDOWS)\r
+#if L_BUFFER_SIZE > 16384\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 4(%rsp);\\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 12288\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 8192\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 4096\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 1(%rsp);\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+\r
+#if defined(BULLDOZER)\r
+\r
+#define VFMADD231PS_( y0,y1,y2 ) vfmaddps y0,y1,y2,y0\r
+\r
+#define VFMADD231SS_( x0,x1,x2 ) vfmaddss x0,x1,x2,x0\r
+\r
+#else\r
+\r
+#define VFMADD231PS_( y0,y1,y2 ) vfmadd231ps y1,y2,y0\r
+\r
+#define VFMADD231SS_( x0,x1,x2 ) vfmadd231ss x1,x2,x0\r
+\r
+#endif\r
+\r
+\r
+#define A_PR1 512\r
+#define B_PR1 512\r
+\r
+/*******************************************************************************************\r
+* 4 lines of N\r
+*******************************************************************************************/\r
+\r
+.macro KERNEL16x4_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm1\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %ymm2\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PS_( %ymm4,%ymm2,%ymm0 )\r
+ VFMADD231PS_( %ymm5,%ymm2,%ymm1 )\r
+ VFMADD231PS_( %ymm6,%ymm3,%ymm0 )\r
+ VFMADD231PS_( %ymm7,%ymm3,%ymm1 )\r
+ vbroadcastss -2 * SIZE(BO, BI, SIZE), %ymm2\r
+ vbroadcastss -1 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PS_( %ymm8,%ymm2,%ymm0 )\r
+ VFMADD231PS_( %ymm9,%ymm2,%ymm1 )\r
+ VFMADD231PS_( %ymm10,%ymm3,%ymm0 )\r
+ VFMADD231PS_( %ymm11,%ymm3,%ymm1 )\r
+ addq $4 , BI \r
+ addq $16, %rax \r
+.endm\r
+\r
+.macro SAVE16x4\r
+\r
+ vbroadcastss ALPHA, %ymm0\r
+\r
+ vmulps %ymm0 , %ymm4 , %ymm4\r
+ vmulps %ymm0 , %ymm5 , %ymm5\r
+ vmulps %ymm0 , %ymm6 , %ymm6\r
+ vmulps %ymm0 , %ymm7 , %ymm7\r
+ vmulps %ymm0 , %ymm8 , %ymm8\r
+ vmulps %ymm0 , %ymm9 , %ymm9\r
+ vmulps %ymm0 , %ymm10, %ymm10\r
+ vmulps %ymm0 , %ymm11, %ymm11\r
+\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %ymm4,%ymm4\r
+ vaddps 8 * SIZE(CO1), %ymm5,%ymm5\r
+\r
+ vaddps (CO1, LDC), %ymm6,%ymm6\r
+ vaddps 8 * SIZE(CO1, LDC), %ymm7,%ymm7\r
+\r
+ vaddps (CO2), %ymm8,%ymm8\r
+ vaddps 8 * SIZE(CO2), %ymm9,%ymm9\r
+\r
+ vaddps (CO2, LDC), %ymm10,%ymm10\r
+ vaddps 8 * SIZE(CO2, LDC), %ymm11,%ymm11\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm5 , 8 * SIZE(CO1)\r
+\r
+ vmovups %ymm6 , (CO1, LDC)\r
+ vmovups %ymm7 , 8 * SIZE(CO1, LDC)\r
+\r
+ vmovups %ymm8 , (CO2)\r
+ vmovups %ymm9 , 8 * SIZE(CO2)\r
+\r
+ vmovups %ymm10, (CO2, LDC)\r
+ vmovups %ymm11, 8 * SIZE(CO2, LDC)\r
+\r
+ prefetcht0 64(CO1)\r
+ prefetcht0 64(CO1, LDC)\r
+ prefetcht0 64(CO2)\r
+ prefetcht0 64(CO2, LDC)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL8x4_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %ymm2\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PS_( %ymm4,%ymm2,%ymm0 )\r
+ VFMADD231PS_( %ymm6,%ymm3,%ymm0 )\r
+ vbroadcastss -2 * SIZE(BO, BI, SIZE), %ymm2\r
+ vbroadcastss -1 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PS_( %ymm8,%ymm2,%ymm0 )\r
+ VFMADD231PS_( %ymm10,%ymm3,%ymm0 )\r
+ addq $4 , BI \r
+ addq $8 , %rax \r
+.endm\r
+\r
+.macro SAVE8x4\r
+\r
+ vbroadcastss ALPHA, %ymm0\r
+\r
+ vmulps %ymm0 , %ymm4 , %ymm4\r
+ vmulps %ymm0 , %ymm6 , %ymm6\r
+ vmulps %ymm0 , %ymm8 , %ymm8\r
+ vmulps %ymm0 , %ymm10, %ymm10\r
+\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %ymm4,%ymm4\r
+ vaddps (CO1, LDC), %ymm6,%ymm6\r
+ vaddps (CO2), %ymm8,%ymm8\r
+ vaddps (CO2, LDC), %ymm10,%ymm10\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm6 , (CO1, LDC)\r
+ vmovups %ymm8 , (CO2)\r
+ vmovups %ymm10, (CO2, LDC)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL4x4_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm2\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231PS_( %xmm4,%xmm2,%xmm0 )\r
+ VFMADD231PS_( %xmm6,%xmm3,%xmm0 )\r
+ vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm2\r
+ vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231PS_( %xmm8,%xmm2,%xmm0 )\r
+ VFMADD231PS_( %xmm10,%xmm3,%xmm0 )\r
+ addq $4 , BI \r
+ addq $4 , %rax \r
+.endm\r
+\r
+.macro SAVE4x4\r
+\r
+ vbroadcastss ALPHA, %xmm0\r
+\r
+ vmulps %xmm0 , %xmm4 , %xmm4\r
+ vmulps %xmm0 , %xmm6 , %xmm6\r
+ vmulps %xmm0 , %xmm8 , %xmm8\r
+ vmulps %xmm0 , %xmm10, %xmm10\r
+\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %xmm4,%xmm4\r
+ vaddps (CO1, LDC), %xmm6,%xmm6\r
+ vaddps (CO2), %xmm8,%xmm8\r
+ vaddps (CO2, LDC), %xmm10,%xmm10\r
+\r
+#endif\r
+\r
+ vmovups %xmm4 , (CO1)\r
+ vmovups %xmm6 , (CO1, LDC)\r
+ vmovups %xmm8 , (CO2)\r
+ vmovups %xmm10, (CO2, LDC)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL2x4_SUB\r
+ vmovss -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vmovss -15 * SIZE(AO, %rax, SIZE), %xmm1\r
+ vmovss -4 * SIZE(BO, BI, SIZE), %xmm2\r
+ vmovss -3 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SS_( %xmm4,%xmm2,%xmm0 )\r
+ VFMADD231SS_( %xmm5,%xmm2,%xmm1 )\r
+ VFMADD231SS_( %xmm6,%xmm3,%xmm0 )\r
+ VFMADD231SS_( %xmm7,%xmm3,%xmm1 )\r
+ vmovss -2 * SIZE(BO, BI, SIZE), %xmm2\r
+ vmovss -1 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SS_( %xmm8,%xmm2,%xmm0 )\r
+ VFMADD231SS_( %xmm9,%xmm2,%xmm1 )\r
+ VFMADD231SS_( %xmm10,%xmm3,%xmm0 )\r
+ VFMADD231SS_( %xmm11,%xmm3,%xmm1 )\r
+ addq $4 , BI \r
+ addq $2, %rax \r
+.endm\r
+\r
+.macro SAVE2x4\r
+\r
+ vmovss ALPHA, %xmm0\r
+\r
+ vmulss %xmm0 , %xmm4 , %xmm4\r
+ vmulss %xmm0 , %xmm5 , %xmm5\r
+ vmulss %xmm0 , %xmm6 , %xmm6\r
+ vmulss %xmm0 , %xmm7 , %xmm7\r
+ vmulss %xmm0 , %xmm8 , %xmm8\r
+ vmulss %xmm0 , %xmm9 , %xmm9\r
+ vmulss %xmm0 , %xmm10, %xmm10\r
+ vmulss %xmm0 , %xmm11, %xmm11\r
+\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %xmm4,%xmm4\r
+ vaddps 1 * SIZE(CO1), %xmm5,%xmm5\r
+\r
+ vaddps (CO1, LDC), %xmm6,%xmm6\r
+ vaddps 1 * SIZE(CO1, LDC), %xmm7,%xmm7\r
+\r
+ vaddps (CO2), %xmm8,%xmm8\r
+ vaddps 1 * SIZE(CO2), %xmm9,%xmm9\r
+\r
+ vaddps (CO2, LDC), %xmm10,%xmm10\r
+ vaddps 1 * SIZE(CO2, LDC), %xmm11,%xmm11\r
+\r
+#endif\r
+\r
+ vmovss %xmm4 , (CO1)\r
+ vmovss %xmm5 , 1 * SIZE(CO1)\r
+\r
+ vmovss %xmm6 , (CO1, LDC)\r
+ vmovss %xmm7 , 1 * SIZE(CO1, LDC)\r
+\r
+ vmovss %xmm8 , (CO2)\r
+ vmovss %xmm9 , 1 * SIZE(CO2)\r
+\r
+ vmovss %xmm10, (CO2, LDC)\r
+ vmovss %xmm11, 1 * SIZE(CO2, LDC)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL1x4_SUB\r
+ vmovss -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vmovss -4 * SIZE(BO, BI, SIZE), %xmm2\r
+ vmovss -3 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SS_( %xmm4,%xmm2,%xmm0 )\r
+ VFMADD231SS_( %xmm6,%xmm3,%xmm0 )\r
+ vmovss -2 * SIZE(BO, BI, SIZE), %xmm2\r
+ vmovss -1 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SS_( %xmm8,%xmm2,%xmm0 )\r
+ VFMADD231SS_( %xmm10,%xmm3,%xmm0 )\r
+ addq $4 , BI \r
+ addq $1, %rax \r
+.endm\r
+\r
+.macro SAVE1x4\r
+\r
+ vmovss ALPHA, %xmm0\r
+\r
+ vmulss %xmm0 , %xmm4 , %xmm4\r
+ vmulss %xmm0 , %xmm6 , %xmm6\r
+ vmulss %xmm0 , %xmm8 , %xmm8\r
+ vmulss %xmm0 , %xmm10, %xmm10\r
+\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %xmm4,%xmm4\r
+ vaddps (CO1, LDC), %xmm6,%xmm6\r
+ vaddps (CO2), %xmm8,%xmm8\r
+ vaddps (CO2, LDC), %xmm10,%xmm10\r
+\r
+#endif\r
+\r
+ vmovss %xmm4 , (CO1)\r
+ vmovss %xmm6 , (CO1, LDC)\r
+ vmovss %xmm8 , (CO2)\r
+ vmovss %xmm10, (CO2, LDC)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+/*******************************************************************************************\r
+* 2 lines of N\r
+*******************************************************************************************/\r
+\r
+.macro KERNEL16x2_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm1\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %ymm2\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PS_( %ymm4,%ymm2,%ymm0 )\r
+ VFMADD231PS_( %ymm5,%ymm2,%ymm1 )\r
+ VFMADD231PS_( %ymm6,%ymm3,%ymm0 )\r
+ VFMADD231PS_( %ymm7,%ymm3,%ymm1 )\r
+ addq $2 , BI \r
+ addq $16, %rax \r
+.endm\r
+\r
+.macro SAVE16x2\r
+\r
+ vbroadcastss ALPHA, %ymm0\r
+\r
+ vmulps %ymm0 , %ymm4 , %ymm4\r
+ vmulps %ymm0 , %ymm5 , %ymm5\r
+ vmulps %ymm0 , %ymm6 , %ymm6\r
+ vmulps %ymm0 , %ymm7 , %ymm7\r
+\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %ymm4,%ymm4\r
+ vaddps 8 * SIZE(CO1), %ymm5,%ymm5\r
+\r
+ vaddps (CO1, LDC), %ymm6,%ymm6\r
+ vaddps 8 * SIZE(CO1, LDC), %ymm7,%ymm7\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm5 , 8 * SIZE(CO1)\r
+\r
+ vmovups %ymm6 , (CO1, LDC)\r
+ vmovups %ymm7 , 8 * SIZE(CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL8x2_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %ymm2\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PS_( %ymm4,%ymm2,%ymm0 )\r
+ VFMADD231PS_( %ymm6,%ymm3,%ymm0 )\r
+ addq $2 , BI \r
+ addq $8 , %rax \r
+.endm\r
+\r
+.macro SAVE8x2\r
+\r
+ vbroadcastss ALPHA, %ymm0\r
+\r
+ vmulps %ymm0 , %ymm4 , %ymm4\r
+ vmulps %ymm0 , %ymm6 , %ymm6\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %ymm4,%ymm4\r
+ vaddps (CO1, LDC), %ymm6,%ymm6\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm6 , (CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL4x2_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm2\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231PS_( %xmm4,%xmm2,%xmm0 )\r
+ VFMADD231PS_( %xmm6,%xmm3,%xmm0 )\r
+ addq $2 , BI \r
+ addq $4 , %rax \r
+.endm\r
+\r
+.macro SAVE4x2\r
+\r
+ vbroadcastss ALPHA, %xmm0\r
+\r
+ vmulps %xmm0 , %xmm4 , %xmm4\r
+ vmulps %xmm0 , %xmm6 , %xmm6\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %xmm4,%xmm4\r
+ vaddps (CO1, LDC), %xmm6,%xmm6\r
+\r
+#endif\r
+\r
+ vmovups %xmm4 , (CO1)\r
+ vmovups %xmm6 , (CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL2x2_SUB\r
+ vmovss -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vmovss -15 * SIZE(AO, %rax, SIZE), %xmm1\r
+ vmovss -4 * SIZE(BO, BI, SIZE), %xmm2\r
+ vmovss -3 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SS_( %xmm4,%xmm2,%xmm0 )\r
+ VFMADD231SS_( %xmm5,%xmm2,%xmm1 )\r
+ VFMADD231SS_( %xmm6,%xmm3,%xmm0 )\r
+ VFMADD231SS_( %xmm7,%xmm3,%xmm1 )\r
+ addq $2 , BI \r
+ addq $2, %rax \r
+.endm\r
+\r
+.macro SAVE2x2\r
+\r
+ vmovss ALPHA, %xmm0\r
+\r
+ vmulss %xmm0 , %xmm4 , %xmm4\r
+ vmulss %xmm0 , %xmm5 , %xmm5\r
+ vmulss %xmm0 , %xmm6 , %xmm6\r
+ vmulss %xmm0 , %xmm7 , %xmm7\r
+\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %xmm4,%xmm4\r
+ vaddps 1 * SIZE(CO1), %xmm5,%xmm5\r
+\r
+ vaddps (CO1, LDC), %xmm6,%xmm6\r
+ vaddps 1 * SIZE(CO1, LDC), %xmm7,%xmm7\r
+\r
+#endif\r
+\r
+ vmovss %xmm4 , (CO1)\r
+ vmovss %xmm5 , 1 * SIZE(CO1)\r
+\r
+ vmovss %xmm6 , (CO1, LDC)\r
+ vmovss %xmm7 , 1 * SIZE(CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL1x2_SUB\r
+ vmovss -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vmovss -4 * SIZE(BO, BI, SIZE), %xmm2\r
+ vmovss -3 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SS_( %xmm4,%xmm2,%xmm0 )\r
+ VFMADD231SS_( %xmm6,%xmm3,%xmm0 )\r
+ addq $2 , BI \r
+ addq $1, %rax \r
+.endm\r
+\r
+.macro SAVE1x2\r
+\r
+ vmovss ALPHA, %xmm0\r
+\r
+ vmulss %xmm0 , %xmm4 , %xmm4\r
+ vmulss %xmm0 , %xmm6 , %xmm6\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %xmm4,%xmm4\r
+ vaddps (CO1, LDC), %xmm6,%xmm6\r
+\r
+#endif\r
+\r
+ vmovss %xmm4 , (CO1)\r
+ vmovss %xmm6 , (CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+/*******************************************************************************************\r
+* 1 line of N\r
+*******************************************************************************************/\r
+\r
+.macro KERNEL16x1_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm1\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PS_( %ymm4,%ymm2,%ymm0 )\r
+ VFMADD231PS_( %ymm5,%ymm2,%ymm1 )\r
+ addq $1 , BI \r
+ addq $16, %rax \r
+.endm\r
+\r
+.macro SAVE16x1\r
+\r
+ vbroadcastss ALPHA, %ymm0\r
+\r
+ vmulps %ymm0 , %ymm4 , %ymm4\r
+ vmulps %ymm0 , %ymm5 , %ymm5\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %ymm4,%ymm4\r
+ vaddps 8 * SIZE(CO1), %ymm5,%ymm5\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm5 , 8 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL8x1_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PS_( %ymm4,%ymm2,%ymm0 )\r
+ addq $1 , BI \r
+ addq $8 , %rax \r
+.endm\r
+\r
+.macro SAVE8x1\r
+\r
+ vbroadcastss ALPHA, %ymm0\r
+\r
+ vmulps %ymm0 , %ymm4 , %ymm4\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %ymm4,%ymm4\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL4x1_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231PS_( %xmm4,%xmm2,%xmm0 )\r
+ addq $1 , BI \r
+ addq $4 , %rax \r
+.endm\r
+\r
+.macro SAVE4x1\r
+\r
+ vbroadcastss ALPHA, %xmm0\r
+\r
+ vmulps %xmm0 , %xmm4 , %xmm4\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %xmm4,%xmm4\r
+\r
+#endif\r
+\r
+ vmovups %xmm4 , (CO1)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL2x1_SUB\r
+ vmovss -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vmovss -15 * SIZE(AO, %rax, SIZE), %xmm1\r
+ vmovss -4 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SS_( %xmm4,%xmm2,%xmm0 )\r
+ VFMADD231SS_( %xmm5,%xmm2,%xmm1 )\r
+ addq $1 , BI \r
+ addq $2, %rax \r
+.endm\r
+\r
+.macro SAVE2x1\r
+\r
+ vmovss ALPHA, %xmm0\r
+\r
+ vmulss %xmm0 , %xmm4 , %xmm4\r
+ vmulss %xmm0 , %xmm5 , %xmm5\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %xmm4,%xmm4\r
+ vaddps 1 * SIZE(CO1), %xmm5,%xmm5\r
+\r
+#endif\r
+\r
+ vmovss %xmm4 , (CO1)\r
+ vmovss %xmm5 , 1 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL1x1_SUB\r
+ vmovss -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vmovss -4 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SS_( %xmm4,%xmm2,%xmm0 )\r
+ addq $1 , BI \r
+ addq $1, %rax \r
+.endm\r
+\r
+.macro SAVE1x1\r
+\r
+ vmovss ALPHA, %xmm0\r
+\r
+ vmulss %xmm0 , %xmm4 , %xmm4\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddps (CO1), %xmm4,%xmm4\r
+\r
+#endif\r
+\r
+ vmovss %xmm4 , (CO1)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+/*************************************************************************************\r
+* TRMM Kernel\r
+*************************************************************************************/\r
+\r
+\r
+ PROLOGUE\r
+ PROFCODE\r
+ \r
+ subq $STACKSIZE, %rsp\r
+ movq %rbx, (%rsp)\r
+ movq %rbp, 8(%rsp)\r
+ movq %r12, 16(%rsp)\r
+ movq %r13, 24(%rsp)\r
+ movq %r14, 32(%rsp)\r
+ movq %r15, 40(%rsp)\r
+\r
+ vzeroupper\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq %rdi, 48(%rsp)\r
+ movq %rsi, 56(%rsp)\r
+ movups %xmm6, 64(%rsp)\r
+ movups %xmm7, 80(%rsp)\r
+ movups %xmm8, 96(%rsp)\r
+ movups %xmm9, 112(%rsp)\r
+ movups %xmm10, 128(%rsp)\r
+ movups %xmm11, 144(%rsp)\r
+ movups %xmm12, 160(%rsp)\r
+ movups %xmm13, 176(%rsp)\r
+ movups %xmm14, 192(%rsp)\r
+ movups %xmm15, 208(%rsp)\r
+\r
+ movq ARG1, OLD_M\r
+ movq ARG2, OLD_N\r
+ movq ARG3, OLD_K\r
+ movq OLD_A, A\r
+ movq OLD_B, B\r
+ movq OLD_C, C\r
+ movq OLD_LDC, LDC\r
+#ifdef TRMMKERNEL\r
+ vmovsd OLD_OFFSET, %xmm12\r
+#endif\r
+ vmovaps %xmm3, %xmm0\r
+\r
+#else\r
+ movq STACKSIZE + 8(%rsp), LDC\r
+#ifdef TRMMKERNEL\r
+ movsd STACKSIZE + 16(%rsp), %xmm12\r
+#endif\r
+\r
+#endif\r
+\r
+ movq %rsp, SP # save old stack\r
+ subq $128 + L_BUFFER_SIZE, %rsp\r
+ andq $-4096, %rsp # align stack\r
+\r
+ STACK_TOUCH\r
+\r
+ cmpq $0, OLD_M\r
+ je .L999\r
+\r
+ cmpq $0, OLD_N\r
+ je .L999\r
+\r
+ cmpq $0, OLD_K\r
+ je .L999\r
+\r
+ movq OLD_M, M\r
+ movq OLD_N, N\r
+ movq OLD_K, K\r
+\r
+ vmovss %xmm0, ALPHA\r
+\r
+ salq $BASE_SHIFT, LDC\r
+\r
+ movq N, %rax\r
+ xorq %rdx, %rdx\r
+ movq $4, %rdi\r
+ divq %rdi // N / 4\r
+ movq %rax, Ndiv6 // N / 4\r
+ movq %rdx, Nmod6 // N % 4\r
+\r
+ \r
+\r
+#ifdef TRMMKERNEL\r
+ vmovsd %xmm12, OFFSET\r
+ vmovsd %xmm12, KK\r
+#ifndef LEFT\r
+ negq KK\r
+#endif \r
+#endif\r
+\r
+ movq Ndiv6, J\r
+ cmpq $0, J\r
+ je .L2_0\r
+ ALIGN_4\r
+\r
+/*******************************************************************************************/\r
+\r
+.L4_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ sarq $2, %rax // K / 4\r
+ jz .L4_01b\r
+ ALIGN_4\r
+\r
+\r
+.L4_01a:\r
+ prefetcht0 512(BO1)\r
+ prefetchw 512(BO)\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups 4*SIZE(BO1), %xmm1\r
+ vmovups 8*SIZE(BO1), %xmm2\r
+ vmovups 12*SIZE(BO1), %xmm3\r
+\r
+ vmovups %xmm0, (BO)\r
+ vmovups %xmm1, 4*SIZE(BO)\r
+ vmovups %xmm2, 8*SIZE(BO)\r
+ vmovups %xmm3,12*SIZE(BO)\r
+\r
+ addq $16*SIZE,BO1\r
+ addq $16*SIZE,BO\r
+ decq %rax\r
+ jnz .L4_01a\r
+\r
+\r
+.L4_01b:\r
+\r
+ movq K, %rax\r
+ andq $3, %rax // K % 4\r
+ jz .L4_02d\r
+ ALIGN_4\r
+\r
+.L4_02c:\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups %xmm0, (BO)\r
+ addq $4*SIZE,BO1\r
+ addq $4*SIZE,BO\r
+ decq %rax\r
+ jnz .L4_02c\r
+\r
+.L4_02d:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L4_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), CO2 \r
+ leaq (C, LDC, 4), C // c += 4 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L4_20\r
+\r
+ ALIGN_4\r
+\r
+.L4_11:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI, 4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $16, %rax // number of values in AO\r
+#else\r
+ addq $4, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L4_16\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4) , BI // BI = BI * 4 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L4_12:\r
+\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ prefetcht0 B_PR1(BO, BI , SIZE)\r
+ KERNEL16x4_SUB\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ KERNEL16x4_SUB\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ KERNEL16x4_SUB\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ KERNEL16x4_SUB\r
+\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ prefetcht0 B_PR1(BO, BI , SIZE)\r
+ KERNEL16x4_SUB\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ KERNEL16x4_SUB\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ KERNEL16x4_SUB\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ KERNEL16x4_SUB\r
+\r
+ je .L4_16\r
+\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ prefetcht0 B_PR1(BO, BI , SIZE)\r
+ KERNEL16x4_SUB\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ KERNEL16x4_SUB\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ KERNEL16x4_SUB\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ KERNEL16x4_SUB\r
+\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ prefetcht0 B_PR1(BO, BI , SIZE)\r
+ KERNEL16x4_SUB\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ KERNEL16x4_SUB\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ KERNEL16x4_SUB\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ KERNEL16x4_SUB\r
+\r
+ je .L4_16\r
+\r
+ jmp .L4_12\r
+ ALIGN_4\r
+\r
+.L4_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L4_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L4_17:\r
+\r
+ KERNEL16x4_SUB\r
+\r
+ jl .L4_17\r
+ ALIGN_4\r
+\r
+\r
+.L4_19:\r
+\r
+ SAVE16x4\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI, 4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $16, KK \r
+#endif\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ addq $16 * SIZE, CO2 # coffset += 16\r
+ decq I # i --\r
+ jg .L4_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L4_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L4_60 // to next 3 lines of N\r
+\r
+ testq $8, M \r
+ jz .L4_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L4_20_1:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI, 4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $8, %rax // number of values in A\r
+#else\r
+ addq $4, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L4_20_6\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L4_20_2:\r
+\r
+ KERNEL8x4_SUB\r
+ KERNEL8x4_SUB\r
+ KERNEL8x4_SUB\r
+ KERNEL8x4_SUB\r
+\r
+ KERNEL8x4_SUB\r
+ KERNEL8x4_SUB\r
+ KERNEL8x4_SUB\r
+ KERNEL8x4_SUB\r
+\r
+ je .L4_20_6\r
+\r
+ KERNEL8x4_SUB\r
+ KERNEL8x4_SUB\r
+ KERNEL8x4_SUB\r
+ KERNEL8x4_SUB\r
+\r
+ KERNEL8x4_SUB\r
+ KERNEL8x4_SUB\r
+ KERNEL8x4_SUB\r
+ KERNEL8x4_SUB\r
+\r
+ je .L4_20_6\r
+\r
+ jmp .L4_20_2\r
+ ALIGN_4\r
+\r
+.L4_20_6:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L4_20_9\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L4_20_7:\r
+\r
+ KERNEL8x4_SUB\r
+\r
+ jl .L4_20_7\r
+ ALIGN_4\r
+\r
+\r
+.L4_20_9:\r
+\r
+ SAVE8x4\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI, 4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $8, KK\r
+#endif\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ addq $8 * SIZE, CO2 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L4_21pre:\r
+\r
+ testq $4, M \r
+ jz .L4_30\r
+ ALIGN_4\r
+\r
+.L4_21:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI, 4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in A\r
+#else\r
+ addq $4, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L4_26\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L4_22:\r
+\r
+ KERNEL4x4_SUB\r
+ KERNEL4x4_SUB\r
+ KERNEL4x4_SUB\r
+ KERNEL4x4_SUB\r
+\r
+ KERNEL4x4_SUB\r
+ KERNEL4x4_SUB\r
+ KERNEL4x4_SUB\r
+ KERNEL4x4_SUB\r
+\r
+ je .L4_26\r
+\r
+ KERNEL4x4_SUB\r
+ KERNEL4x4_SUB\r
+ KERNEL4x4_SUB\r
+ KERNEL4x4_SUB\r
+\r
+ KERNEL4x4_SUB\r
+ KERNEL4x4_SUB\r
+ KERNEL4x4_SUB\r
+ KERNEL4x4_SUB\r
+\r
+ je .L4_26\r
+\r
+ jmp .L4_22\r
+ ALIGN_4\r
+\r
+.L4_26:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L4_29\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L4_27:\r
+\r
+ KERNEL4x4_SUB\r
+\r
+ jl .L4_27\r
+ ALIGN_4\r
+\r
+\r
+.L4_29:\r
+\r
+ SAVE4x4\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI, 4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ addq $4 * SIZE, CO2 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L4_30:\r
+ testq $2, M \r
+ jz .L4_40\r
+\r
+ ALIGN_4\r
+\r
+.L4_31:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI, 4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $4, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L4_36\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L4_32:\r
+\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+\r
+ je .L4_36\r
+\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+ KERNEL2x4_SUB\r
+\r
+ je .L4_36\r
+\r
+ jmp .L4_32\r
+ ALIGN_4\r
+\r
+.L4_36:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L4_39\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI, 4), BI // BI = BI * 4 ; number of values\r
+ \r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L4_37:\r
+\r
+ KERNEL2x4_SUB\r
+\r
+ jl .L4_37\r
+ ALIGN_4\r
+\r
+\r
+.L4_39:\r
+\r
+ SAVE2x4\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI, 4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ addq $2 * SIZE, CO2 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L4_40:\r
+ testq $1, M \r
+ jz .L4_60 // to next 4 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L4_41:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI, 4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $4, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax\r
+ je .L4_46\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L4_42:\r
+\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+\r
+ je .L4_46\r
+\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+ KERNEL1x4_SUB\r
+\r
+ je .L4_46\r
+\r
+ jmp .L4_42\r
+ ALIGN_4\r
+\r
+.L4_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L4_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L4_47:\r
+\r
+ KERNEL1x4_SUB\r
+\r
+ jl .L4_47\r
+ ALIGN_4\r
+\r
+\r
+.L4_49:\r
+\r
+ SAVE1x4\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI, 4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK\r
+#endif\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ addq $1 * SIZE, CO2 # coffset += 1\r
+ ALIGN_4\r
+ \r
+\r
+\r
+\r
+ \r
+.L4_60:\r
+#if defined(TRMMKERNEL) && !defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ decq J // j --\r
+ jg .L4_01 // next 4 lines of N\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+.L2_0:\r
+\r
+ movq Nmod6, J \r
+ andq $3, J // j % 4\r
+ je .L999\r
+\r
+ movq Nmod6, J \r
+ andq $2, J // j % 4\r
+ je .L1_0\r
+\r
+.L2_01:\r
+\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ sarq $2, %rax // K / 4\r
+ jz .L2_01b\r
+ ALIGN_4\r
+\r
+.L2_01a:\r
+\r
+ vmovsd (BO1), %xmm0\r
+ vmovsd 2*SIZE(BO1), %xmm1\r
+ vmovsd 4*SIZE(BO1), %xmm2\r
+ vmovsd 6*SIZE(BO1), %xmm3\r
+\r
+ vmovsd %xmm0, (BO)\r
+ vmovsd %xmm1, 2*SIZE(BO)\r
+ vmovsd %xmm2, 4*SIZE(BO)\r
+ vmovsd %xmm3, 6*SIZE(BO)\r
+\r
+ addq $8*SIZE,BO1\r
+ addq $8*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_01a\r
+\r
+\r
+.L2_01b:\r
+\r
+ movq K, %rax\r
+ andq $3, %rax // K % 4\r
+ jz .L2_02d\r
+ ALIGN_4\r
+\r
+.L2_02c:\r
+\r
+ vmovsd (BO1), %xmm0\r
+ vmovsd %xmm0, (BO)\r
+ addq $2*SIZE,BO1\r
+ addq $2*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_02c\r
+\r
+.L2_02d:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L2_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C // c += 2 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L2_20\r
+\r
+ ALIGN_4\r
+\r
+.L2_11:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $16, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_16\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_12:\r
+\r
+ KERNEL16x2_SUB\r
+ KERNEL16x2_SUB\r
+ KERNEL16x2_SUB\r
+ KERNEL16x2_SUB\r
+\r
+ KERNEL16x2_SUB\r
+ KERNEL16x2_SUB\r
+ KERNEL16x2_SUB\r
+ KERNEL16x2_SUB\r
+\r
+ je .L2_16\r
+\r
+ KERNEL16x2_SUB\r
+ KERNEL16x2_SUB\r
+ KERNEL16x2_SUB\r
+ KERNEL16x2_SUB\r
+\r
+ KERNEL16x2_SUB\r
+ KERNEL16x2_SUB\r
+ KERNEL16x2_SUB\r
+ KERNEL16x2_SUB\r
+\r
+ je .L2_16\r
+\r
+ jmp .L2_12\r
+ ALIGN_4\r
+\r
+.L2_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_17:\r
+\r
+ KERNEL16x2_SUB\r
+\r
+ jl .L2_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_19:\r
+\r
+ SAVE16x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $16, KK \r
+#endif\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L2_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L2_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L2_60 // to next 2 lines of N\r
+\r
+ testq $8, M \r
+ jz .L2_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L2_20_1:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $8, %rax // number of values in A\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L2_20_6\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_20_2:\r
+\r
+\r
+ KERNEL8x2_SUB\r
+ KERNEL8x2_SUB\r
+ KERNEL8x2_SUB\r
+ KERNEL8x2_SUB\r
+\r
+ KERNEL8x2_SUB\r
+ KERNEL8x2_SUB\r
+ KERNEL8x2_SUB\r
+ KERNEL8x2_SUB\r
+\r
+ je .L2_20_6\r
+\r
+ KERNEL8x2_SUB\r
+ KERNEL8x2_SUB\r
+ KERNEL8x2_SUB\r
+ KERNEL8x2_SUB\r
+\r
+ KERNEL8x2_SUB\r
+ KERNEL8x2_SUB\r
+ KERNEL8x2_SUB\r
+ KERNEL8x2_SUB\r
+\r
+ je .L2_20_6\r
+\r
+ jmp .L2_20_2\r
+ ALIGN_4\r
+\r
+.L2_20_6:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_20_9\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_20_7:\r
+\r
+ KERNEL8x2_SUB\r
+\r
+ jl .L2_20_7\r
+ ALIGN_4\r
+\r
+\r
+.L2_20_9:\r
+\r
+ SAVE8x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $8, KK\r
+#endif\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L2_21pre:\r
+\r
+ testq $4, M \r
+ jz .L2_30\r
+ ALIGN_4\r
+\r
+.L2_21:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in A\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L2_26\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 1 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_22:\r
+\r
+\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ je .L2_26\r
+\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ je .L2_26\r
+\r
+ jmp .L2_22\r
+ ALIGN_4\r
+\r
+.L2_26:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_29\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_27:\r
+\r
+ KERNEL4x2_SUB\r
+\r
+ jl .L2_27\r
+ ALIGN_4\r
+\r
+\r
+.L2_29:\r
+\r
+ SAVE4x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L2_30:\r
+ testq $2, M \r
+ jz .L2_40\r
+\r
+ ALIGN_4\r
+\r
+.L2_31:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L2_36\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_32:\r
+\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ je .L2_36\r
+\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ je .L2_36\r
+\r
+ jmp .L2_32\r
+ ALIGN_4\r
+\r
+.L2_36:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_39\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ \r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_37:\r
+\r
+ KERNEL2x2_SUB\r
+\r
+ jl .L2_37\r
+ ALIGN_4\r
+\r
+\r
+.L2_39:\r
+\r
+ SAVE2x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L2_40:\r
+ testq $1, M \r
+ jz .L2_60 // to next 2 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L2_41:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax\r
+ je .L2_46\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_42:\r
+\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ je .L2_46\r
+\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ je .L2_46\r
+\r
+ jmp .L2_42\r
+ ALIGN_4\r
+\r
+.L2_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_47:\r
+\r
+ KERNEL1x2_SUB\r
+\r
+ jl .L2_47\r
+ ALIGN_4\r
+\r
+\r
+.L2_49:\r
+\r
+ SAVE1x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK\r
+#endif\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+\r
+\r
+\r
+ \r
+.L2_60:\r
+#if defined(TRMMKERNEL) && !defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+\r
+\r
+\r
+.L1_0:\r
+\r
+/************************************************************************************************\r
+* Loop for Nmod6 % 2 > 0\r
+*************************************************************************************************/\r
+\r
+ movq Nmod6, J \r
+ andq $1, J // j % 2\r
+ je .L999\r
+ ALIGN_4\r
+\r
+.L1_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ ALIGN_4\r
+\r
+.L1_02b:\r
+\r
+ vmovss (BO1), %xmm0\r
+ vmovss %xmm0, (BO)\r
+ addq $1*SIZE,BO1\r
+ addq $1*SIZE,BO\r
+ decq %rax\r
+ jnz .L1_02b\r
+\r
+.L1_02c:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L1_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 1), C // c += 1 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L1_20\r
+\r
+ ALIGN_4\r
+\r
+.L1_11:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $16, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_16\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_12:\r
+\r
+ KERNEL16x1_SUB\r
+ KERNEL16x1_SUB\r
+ KERNEL16x1_SUB\r
+ KERNEL16x1_SUB\r
+\r
+ KERNEL16x1_SUB\r
+ KERNEL16x1_SUB\r
+ KERNEL16x1_SUB\r
+ KERNEL16x1_SUB\r
+\r
+ je .L1_16\r
+\r
+ KERNEL16x1_SUB\r
+ KERNEL16x1_SUB\r
+ KERNEL16x1_SUB\r
+ KERNEL16x1_SUB\r
+\r
+ KERNEL16x1_SUB\r
+ KERNEL16x1_SUB\r
+ KERNEL16x1_SUB\r
+ KERNEL16x1_SUB\r
+\r
+ je .L1_16\r
+\r
+ jmp .L1_12\r
+ ALIGN_4\r
+\r
+.L1_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_19\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_17:\r
+\r
+ KERNEL16x1_SUB\r
+\r
+ jl .L1_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_19:\r
+\r
+ SAVE16x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $16, KK \r
+#endif\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L1_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L1_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L999\r
+\r
+ testq $8, M \r
+ jz .L1_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L1_20_1:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $8, %rax // number of values in A\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L1_20_6\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_20_2:\r
+\r
+ KERNEL8x1_SUB\r
+ KERNEL8x1_SUB\r
+ KERNEL8x1_SUB\r
+ KERNEL8x1_SUB\r
+\r
+ KERNEL8x1_SUB\r
+ KERNEL8x1_SUB\r
+ KERNEL8x1_SUB\r
+ KERNEL8x1_SUB\r
+\r
+ je .L1_20_6\r
+\r
+ KERNEL8x1_SUB\r
+ KERNEL8x1_SUB\r
+ KERNEL8x1_SUB\r
+ KERNEL8x1_SUB\r
+\r
+ KERNEL8x1_SUB\r
+ KERNEL8x1_SUB\r
+ KERNEL8x1_SUB\r
+ KERNEL8x1_SUB\r
+\r
+ je .L1_20_6\r
+\r
+ jmp .L1_20_2\r
+ ALIGN_4\r
+\r
+.L1_20_6:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_20_9\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_20_7:\r
+\r
+ KERNEL8x1_SUB\r
+\r
+ jl .L1_20_7\r
+ ALIGN_4\r
+\r
+\r
+.L1_20_9:\r
+\r
+ SAVE8x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $8, KK\r
+#endif\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L1_21pre:\r
+\r
+ testq $4, M \r
+ jz .L1_30\r
+ ALIGN_4\r
+\r
+.L1_21:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in A\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L1_26\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_22:\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ je .L1_26\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ je .L1_26\r
+\r
+ jmp .L1_22\r
+ ALIGN_4\r
+\r
+.L1_26:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_29\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_27:\r
+\r
+ KERNEL4x1_SUB\r
+\r
+ jl .L1_27\r
+ ALIGN_4\r
+\r
+\r
+.L1_29:\r
+\r
+ SAVE4x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L1_30:\r
+ testq $2, M \r
+ jz .L1_40\r
+\r
+ ALIGN_4\r
+\r
+.L1_31:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L1_36\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_32:\r
+\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ je .L1_36\r
+\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ je .L1_36\r
+\r
+ jmp .L1_32\r
+ ALIGN_4\r
+\r
+.L1_36:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_39\r
+\r
+ movq %rax, BI // Index for BO\r
+ \r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_37:\r
+\r
+ KERNEL2x1_SUB\r
+\r
+ jl .L1_37\r
+ ALIGN_4\r
+\r
+\r
+.L1_39:\r
+\r
+ SAVE2x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L1_40:\r
+ testq $1, M \r
+ jz .L999\r
+\r
+ ALIGN_4\r
+\r
+.L1_41:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax\r
+ je .L1_46\r
+ movq %rax, BI // Index for BO\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_42:\r
+\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ je .L1_46\r
+\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ je .L1_46\r
+\r
+ jmp .L1_42\r
+ ALIGN_4\r
+\r
+.L1_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_49\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_47:\r
+\r
+ KERNEL1x1_SUB\r
+\r
+ jl .L1_47\r
+ ALIGN_4\r
+\r
+\r
+.L1_49:\r
+\r
+ SAVE1x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO \r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK\r
+#endif\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+\r
+.L999:\r
+ movq SP, %rsp\r
+ movq (%rsp), %rbx\r
+ movq 8(%rsp), %rbp\r
+ movq 16(%rsp), %r12\r
+ movq 24(%rsp), %r13\r
+ movq 32(%rsp), %r14\r
+ movq 40(%rsp), %r15\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq 48(%rsp), %rdi\r
+ movq 56(%rsp), %rsi\r
+ movups 64(%rsp), %xmm6\r
+ movups 80(%rsp), %xmm7\r
+ movups 96(%rsp), %xmm8\r
+ movups 112(%rsp), %xmm9\r
+ movups 128(%rsp), %xmm10\r
+ movups 144(%rsp), %xmm11\r
+ movups 160(%rsp), %xmm12\r
+ movups 176(%rsp), %xmm13\r
+ movups 192(%rsp), %xmm14\r
+ movups 208(%rsp), %xmm15\r
+#endif\r
+\r
+ addq $STACKSIZE, %rsp\r
+ ret\r
+\r
+ EPILOGUE\r
+\r
+\r
+\r
+\r
+\r
#define PREFETCHSIZE (16 * 12)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHW prefetcht0
#define PREFETCHSIZE (16 * 12)
#define PREFETCHSIZE (16 * 12)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHW prefetcht0
#define PREFETCHSIZE (16 * 12)
#define PREFETCHSIZE (16 * 12)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHW prefetcht0
#define PREFETCHSIZE (16 * 12)
#define PREFETCHSIZE (16 * 12)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHW prefetcht0
#define PREFETCHSIZE (16 * 24)
--- /dev/null
+/*********************************************************************************\r
+Copyright (c) 2013, The OpenBLAS Project\r
+All rights reserved.\r
+Redistribution and use in source and binary forms, with or without\r
+modification, are permitted provided that the following conditions are\r
+met:\r
+1. Redistributions of source code must retain the above copyright\r
+notice, this list of conditions and the following disclaimer.\r
+2. Redistributions in binary form must reproduce the above copyright\r
+notice, this list of conditions and the following disclaimer in\r
+the documentation and/or other materials provided with the\r
+distribution.\r
+3. Neither the name of the OpenBLAS project nor the names of\r
+its contributors may be used to endorse or promote products\r
+derived from this software without specific prior written permission.\r
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\r
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r
+ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE\r
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\r
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\r
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\r
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\r
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+**********************************************************************************/\r
+\r
+/********************************************************************************\r
+* 2013/11/13 Saar\r
+* BLASTEST : OK\r
+* CTEST : OK\r
+* TEST : OK\r
+*\r
+* 2013/10/28 Saar\r
+* Parameter:\r
+* ZGEMM_DEFAULT_UNROLL_N 2\r
+* ZGEMM_DEFAULT_UNROLL_M 4 \r
+* ZGEMM_DEFAULT_P 256\r
+* ZGEMM_DEFAULT_Q 128\r
+* A_PR1 512\r
+* B_PR1 512\r
+*\r
+*\r
+* Performance at 4608x4608x4608:\r
+* 1 thread: 43 GFLOPS (SANDYBRIDGE: 29) (MKL: 53)\r
+* 2 threads: 85 GFLOPS (SANDYBRIDGE: 59) (MKL: 100)\r
+* 3 threads: 122 GFLOPS (SANDYBRIDGE: 86) (MKL: 138)\r
+* 4 threads: 156 GFLOPS (SANDYBRIDGE: 108) (MKL: 172)\r
+*\r
+********************************************************************************/\r
+\r
+\r
+#define ASSEMBLER\r
+#include "common.h"\r
+ \r
+#define OLD_M %rdi\r
+#define OLD_N %rsi\r
+#define M %r13\r
+#define J %r14\r
+#define OLD_K %rdx\r
+\r
+#define A %rcx\r
+#define B %r8\r
+#define C %r9\r
+#define LDC %r10\r
+ \r
+#define I %r11\r
+#define AO %rdi\r
+#define BO %rsi\r
+#define CO1 %r15\r
+#define K %r12\r
+#define BI %rbp\r
+#define SP %rbx\r
+\r
+#define BO1 %rdi\r
+#define BO2 %r15\r
+\r
+#ifndef WINDOWS_ABI\r
+\r
+#define STACKSIZE 96\r
+\r
+#else\r
+\r
+#define STACKSIZE 320\r
+\r
+#define OLD_ALPHA_I 40 + STACKSIZE(%rsp)\r
+#define OLD_A 48 + STACKSIZE(%rsp)\r
+#define OLD_B 56 + STACKSIZE(%rsp)\r
+#define OLD_C 64 + STACKSIZE(%rsp)\r
+#define OLD_LDC 72 + STACKSIZE(%rsp)\r
+#define OLD_OFFSET 80 + STACKSIZE(%rsp)\r
+\r
+#endif\r
+\r
+#define L_BUFFER_SIZE 512*8*4\r
+#define LB2_OFFSET 512*8*2\r
+\r
+#define Ndiv6 24(%rsp)\r
+#define Nmod6 32(%rsp)\r
+#define N 40(%rsp)\r
+#define ALPHA_R 48(%rsp)\r
+#define ALPHA_I 56(%rsp)\r
+#define OFFSET 64(%rsp)\r
+#define KK 72(%rsp)\r
+#define KKK 80(%rsp)\r
+#define BUFFER1 128(%rsp)\r
+#define BUFFER2 LB2_OFFSET+128(%rsp)\r
+\r
+#if defined(OS_WINDOWS)\r
+#if L_BUFFER_SIZE > 16384\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 4(%rsp);\\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 12288\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 8192\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 4096\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 1(%rsp);\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+\r
+\r
+#if defined(BULLDOZER) \r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT)\r
+\r
+#define VFMADDPD_R( y0,y1,y2 ) vfmaddpd y0,y1,y2,y0\r
+\r
+#define VFMADDPD_I( y0,y1,y2 ) vfmaddpd y0,y1,y2,y0\r
+\r
+#elif defined(RN) || defined(RT) || defined(CN) || defined(CT)\r
+\r
+#define VFMADDPD_R( y0,y1,y2 ) vfnmaddpd y0,y1,y2,y0\r
+\r
+#define VFMADDPD_I( y0,y1,y2 ) vfmaddpd y0,y1,y2,y0\r
+\r
+#elif defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+#define VFMADDPD_R( y0,y1,y2 ) vfmaddpd y0,y1,y2,y0\r
+\r
+#define VFMADDPD_I( y0,y1,y2 ) vfnmaddpd y0,y1,y2,y0\r
+\r
+#else\r
+\r
+#define VFMADDPD_R( y0,y1,y2 ) vfnmaddpd y0,y1,y2,y0\r
+\r
+#define VFMADDPD_I( y0,y1,y2 ) vfnmaddpd y0,y1,y2,y0\r
+\r
+#endif\r
+\r
+#else\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT)\r
+\r
+#define VFMADDPD_R( y0,y1,y2 ) vfmadd231pd y1,y2,y0\r
+\r
+#define VFMADDPD_I( y0,y1,y2 ) vfmadd231pd y1,y2,y0\r
+\r
+#elif defined(RN) || defined(RT) || defined(CN) || defined(CT)\r
+\r
+#define VFMADDPD_R( y0,y1,y2 ) vfnmadd231pd y1,y2,y0\r
+\r
+#define VFMADDPD_I( y0,y1,y2 ) vfmadd231pd y1,y2,y0\r
+\r
+#elif defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+#define VFMADDPD_R( y0,y1,y2 ) vfmadd231pd y1,y2,y0\r
+\r
+#define VFMADDPD_I( y0,y1,y2 ) vfnmadd231pd y1,y2,y0\r
+\r
+#else\r
+\r
+#define VFMADDPD_R( y0,y1,y2 ) vfnmadd231pd y1,y2,y0\r
+\r
+#define VFMADDPD_I( y0,y1,y2 ) vfnmadd231pd y1,y2,y0\r
+\r
+#endif\r
+\r
+#endif\r
+\r
+#define A_PR1 512\r
+#define B_PR1 512\r
+/***************************************************************************************************/\r
+\r
+.macro KERNEL4x2_SUB\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm1\r
+\r
+ vbroadcastsd -8 * SIZE(BO, BI, SIZE), %ymm4\r
+ vbroadcastsd -7 * SIZE(BO, BI, SIZE), %ymm5\r
+ VFMADDPD_R( %ymm8 ,%ymm4,%ymm0 )\r
+ VFMADDPD_R( %ymm12,%ymm4,%ymm1 )\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm6\r
+ VFMADDPD_I( %ymm9 ,%ymm5,%ymm0 )\r
+ VFMADDPD_I( %ymm13,%ymm5,%ymm1 )\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm7\r
+ VFMADDPD_R( %ymm10,%ymm6,%ymm0 )\r
+ VFMADDPD_R( %ymm14,%ymm6,%ymm1 )\r
+ VFMADDPD_I( %ymm11,%ymm7,%ymm0 )\r
+ VFMADDPD_I( %ymm15,%ymm7,%ymm1 )\r
+\r
+ addq $4, BI \r
+ addq $8, %rax \r
+.endm\r
+\r
+.macro SAVE4x2\r
+\r
+ vbroadcastsd ALPHA_R, %ymm0\r
+ vbroadcastsd ALPHA_I, %ymm1\r
+\r
+ // swap high and low 8 bytes\r
+ vshufpd $0x05, %ymm9 , %ymm9, %ymm9\r
+ vshufpd $0x05, %ymm11, %ymm11, %ymm11\r
+ vshufpd $0x05, %ymm13, %ymm13, %ymm13\r
+ vshufpd $0x05, %ymm15, %ymm15, %ymm15\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubpd %ymm9, %ymm8 , %ymm8\r
+ vaddsubpd %ymm11,%ymm10, %ymm10\r
+ vaddsubpd %ymm13,%ymm12, %ymm12\r
+ vaddsubpd %ymm15,%ymm14, %ymm14\r
+\r
+ vshufpd $0x05, %ymm8 , %ymm8, %ymm9\r
+ vshufpd $0x05, %ymm10, %ymm10, %ymm11\r
+ vshufpd $0x05, %ymm12, %ymm12, %ymm13\r
+ vshufpd $0x05, %ymm14, %ymm14, %ymm15\r
+\r
+#else\r
+ vaddsubpd %ymm8, %ymm9 ,%ymm9\r
+ vaddsubpd %ymm10, %ymm11,%ymm11\r
+ vaddsubpd %ymm12, %ymm13,%ymm13\r
+ vaddsubpd %ymm14, %ymm15,%ymm15\r
+\r
+ vmovapd %ymm9, %ymm8\r
+ vmovapd %ymm11, %ymm10\r
+ vmovapd %ymm13, %ymm12\r
+ vmovapd %ymm15, %ymm14\r
+\r
+ // swap high and low 8 bytes\r
+ vshufpd $0x05, %ymm9 , %ymm9, %ymm9\r
+ vshufpd $0x05, %ymm11, %ymm11, %ymm11\r
+ vshufpd $0x05, %ymm13, %ymm13, %ymm13\r
+ vshufpd $0x05, %ymm15, %ymm15, %ymm15\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulpd %ymm8 , %ymm0, %ymm8\r
+ vmulpd %ymm10, %ymm0, %ymm10\r
+ vmulpd %ymm12, %ymm0, %ymm12\r
+ vmulpd %ymm14, %ymm0, %ymm14\r
+\r
+ // multiply with ALPHA_I\r
+ vmulpd %ymm9 , %ymm1, %ymm9\r
+ vmulpd %ymm11, %ymm1, %ymm11\r
+ vmulpd %ymm13, %ymm1, %ymm13\r
+ vmulpd %ymm15, %ymm1, %ymm15\r
+\r
+ vaddsubpd %ymm9, %ymm8 , %ymm8\r
+ vaddsubpd %ymm11,%ymm10, %ymm10\r
+ vaddsubpd %ymm13,%ymm12, %ymm12\r
+ vaddsubpd %ymm15,%ymm14, %ymm14\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddpd (CO1), %ymm8 , %ymm8\r
+ vaddpd 4 * SIZE(CO1), %ymm12, %ymm12\r
+\r
+ vaddpd (CO1, LDC), %ymm10, %ymm10\r
+ vaddpd 4 * SIZE(CO1, LDC), %ymm14, %ymm14\r
+\r
+#endif\r
+\r
+ vmovups %ymm8 , (CO1)\r
+ vmovups %ymm12 , 4 * SIZE(CO1)\r
+\r
+ vmovups %ymm10 , (CO1, LDC)\r
+ vmovups %ymm14 , 4 * SIZE(CO1, LDC)\r
+\r
+ prefetcht0 64(CO1)\r
+ prefetcht0 64(CO1, LDC)\r
+\r
+.endm\r
+\r
+/***************************************************************************************************/\r
+\r
+.macro KERNEL2x2_SUB\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vmovddup -8 * SIZE(BO, BI, SIZE), %xmm4\r
+ vmovups -6 * SIZE(AO, %rax, SIZE), %xmm1\r
+ VFMADDPD_R( %xmm8,%xmm4,%xmm0 )\r
+ VFMADDPD_R( %xmm12,%xmm4,%xmm1 )\r
+ vmovddup -7 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPD_I( %xmm9,%xmm5,%xmm0 )\r
+ VFMADDPD_I( %xmm13,%xmm5,%xmm1 )\r
+ vmovddup -6 * SIZE(BO, BI, SIZE), %xmm6\r
+ VFMADDPD_R( %xmm10,%xmm6,%xmm0 )\r
+ VFMADDPD_R( %xmm14,%xmm6,%xmm1 )\r
+ vmovddup -5 * SIZE(BO, BI, SIZE), %xmm7\r
+ VFMADDPD_I( %xmm11,%xmm7,%xmm0 )\r
+ VFMADDPD_I( %xmm15,%xmm7,%xmm1 )\r
+ addq $4, BI \r
+ addq $4, %rax \r
+.endm\r
+\r
+.macro SAVE2x2\r
+\r
+ vmovddup ALPHA_R, %xmm0\r
+ vmovddup ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+ vshufpd $0x01, %xmm11, %xmm11, %xmm11\r
+ vshufpd $0x01, %xmm13, %xmm13, %xmm13\r
+ vshufpd $0x01, %xmm15, %xmm15, %xmm15\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubpd %xmm9, %xmm8 , %xmm8\r
+ vaddsubpd %xmm11,%xmm10, %xmm10\r
+ vaddsubpd %xmm13,%xmm12, %xmm12\r
+ vaddsubpd %xmm15,%xmm14, %xmm14\r
+\r
+ vshufpd $0x01, %xmm8 , %xmm8, %xmm9\r
+ vshufpd $0x01, %xmm10, %xmm10, %xmm11\r
+ vshufpd $0x01, %xmm12, %xmm12, %xmm13\r
+ vshufpd $0x01, %xmm14, %xmm14, %xmm15\r
+\r
+#else\r
+ vaddsubpd %xmm8, %xmm9 ,%xmm9\r
+ vaddsubpd %xmm10, %xmm11,%xmm11\r
+ vaddsubpd %xmm12, %xmm13,%xmm13\r
+ vaddsubpd %xmm14, %xmm15,%xmm15\r
+\r
+ vmovapd %xmm9, %xmm8\r
+ vmovapd %xmm11, %xmm10\r
+ vmovapd %xmm13, %xmm12\r
+ vmovapd %xmm15, %xmm14\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+ vshufpd $0x01, %xmm11, %xmm11, %xmm11\r
+ vshufpd $0x01, %xmm13, %xmm13, %xmm13\r
+ vshufpd $0x01, %xmm15, %xmm15, %xmm15\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulpd %xmm8 , %xmm0, %xmm8\r
+ vmulpd %xmm10, %xmm0, %xmm10\r
+ vmulpd %xmm12, %xmm0, %xmm12\r
+ vmulpd %xmm14, %xmm0, %xmm14\r
+\r
+ // multiply with ALPHA_I\r
+ vmulpd %xmm9 , %xmm1, %xmm9\r
+ vmulpd %xmm11, %xmm1, %xmm11\r
+ vmulpd %xmm13, %xmm1, %xmm13\r
+ vmulpd %xmm15, %xmm1, %xmm15\r
+\r
+ vaddsubpd %xmm9, %xmm8 , %xmm8\r
+ vaddsubpd %xmm11,%xmm10, %xmm10\r
+ vaddsubpd %xmm13,%xmm12, %xmm12\r
+ vaddsubpd %xmm15,%xmm14, %xmm14\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddpd (CO1), %xmm8 , %xmm8\r
+ vaddpd 2 * SIZE(CO1), %xmm12, %xmm12\r
+\r
+ vaddpd (CO1, LDC), %xmm10, %xmm10\r
+ vaddpd 2 * SIZE(CO1, LDC), %xmm14, %xmm14\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+ vmovups %xmm12 , 2 * SIZE(CO1)\r
+\r
+ vmovups %xmm10 , (CO1, LDC)\r
+ vmovups %xmm14 , 2 * SIZE(CO1, LDC)\r
+\r
+.endm\r
+\r
+/************************************************************************************************/\r
+\r
+/************************************************************************************************/\r
+\r
+\r
+.macro KERNEL1x2_SUB\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vmovddup -8 * SIZE(BO, BI, SIZE), %xmm4\r
+ vmovddup -7 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPD_R( %xmm8,%xmm4,%xmm0 )\r
+ VFMADDPD_I( %xmm9,%xmm5,%xmm0 )\r
+ vmovddup -6 * SIZE(BO, BI, SIZE), %xmm6\r
+ vmovddup -5 * SIZE(BO, BI, SIZE), %xmm7\r
+ VFMADDPD_R( %xmm10,%xmm6,%xmm0 )\r
+ VFMADDPD_I( %xmm11,%xmm7,%xmm0 )\r
+ addq $4, BI \r
+ addq $2, %rax \r
+.endm\r
+\r
+.macro SAVE1x2\r
+\r
+ vmovddup ALPHA_R, %xmm0\r
+ vmovddup ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+ vshufpd $0x01, %xmm11, %xmm11, %xmm11\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubpd %xmm9, %xmm8 , %xmm8\r
+ vaddsubpd %xmm11,%xmm10, %xmm10\r
+\r
+ vshufpd $0x01, %xmm8 , %xmm8, %xmm9\r
+ vshufpd $0x01, %xmm10, %xmm10, %xmm11\r
+\r
+#else\r
+ vaddsubpd %xmm8, %xmm9, %xmm9\r
+ vaddsubpd %xmm10,%xmm11, %xmm11\r
+\r
+ vmovapd %xmm9, %xmm8\r
+ vmovapd %xmm11, %xmm10\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+ vshufpd $0x01, %xmm11, %xmm11, %xmm11\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulpd %xmm8 , %xmm0, %xmm8\r
+ vmulpd %xmm10, %xmm0, %xmm10\r
+\r
+ // multiply with ALPHA_I\r
+ vmulpd %xmm9 , %xmm1, %xmm9\r
+ vmulpd %xmm11, %xmm1, %xmm11\r
+\r
+ vaddsubpd %xmm9, %xmm8 , %xmm8\r
+ vaddsubpd %xmm11,%xmm10, %xmm10\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddpd (CO1), %xmm8 , %xmm8\r
+ vaddpd (CO1, LDC), %xmm10, %xmm10\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+ vmovups %xmm10 , (CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL4x1_SUB\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm1\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE) , %ymm4\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE) , %ymm5\r
+ VFMADDPD_R( %ymm8 ,%ymm4,%ymm0 )\r
+ VFMADDPD_R( %ymm12,%ymm4,%ymm1 )\r
+ VFMADDPD_I( %ymm9 ,%ymm5,%ymm0 )\r
+ VFMADDPD_I( %ymm13,%ymm5,%ymm1 )\r
+\r
+ addq $2, BI \r
+ addq $8, %rax \r
+.endm\r
+\r
+.macro SAVE4x1\r
+\r
+ vbroadcastsd ALPHA_R, %ymm0\r
+ vbroadcastsd ALPHA_I, %ymm1\r
+\r
+ // swap high and low 8 bytes\r
+ vshufpd $0x05, %ymm9 , %ymm9, %ymm9\r
+ vshufpd $0x05, %ymm13, %ymm13, %ymm13\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubpd %ymm9, %ymm8 , %ymm8\r
+ vaddsubpd %ymm13,%ymm12 , %ymm12\r
+\r
+ vshufpd $0x05, %ymm8 , %ymm8, %ymm9\r
+ vshufpd $0x05, %ymm12, %ymm12, %ymm13\r
+\r
+#else\r
+ vaddsubpd %ymm8, %ymm9 , %ymm9\r
+ vaddsubpd %ymm12,%ymm13, %ymm13\r
+\r
+ vmovapd %ymm9, %ymm8\r
+ vmovapd %ymm13, %ymm12\r
+\r
+ // swap high and low 8 bytes\r
+ vshufpd $0x05, %ymm9 , %ymm9, %ymm9\r
+ vshufpd $0x05, %ymm13, %ymm13, %ymm13\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulpd %ymm8 , %ymm0, %ymm8\r
+ vmulpd %ymm12, %ymm0, %ymm12\r
+\r
+ // multiply with ALPHA_I\r
+ vmulpd %ymm9 , %ymm1, %ymm9\r
+ vmulpd %ymm13, %ymm1, %ymm13\r
+\r
+ vaddsubpd %ymm9, %ymm8 , %ymm8\r
+ vaddsubpd %ymm13, %ymm12, %ymm12\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddpd (CO1), %ymm8 , %ymm8\r
+ vaddpd 4 * SIZE(CO1), %ymm12, %ymm12\r
+\r
+#endif\r
+\r
+ vmovups %ymm8 , (CO1)\r
+ vmovups %ymm12 ,4 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL2x1_SUB\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPD_R( %xmm8,%xmm4,%xmm0 )\r
+ vmovups -6 * SIZE(AO, %rax, SIZE), %xmm1\r
+ VFMADDPD_R( %xmm12,%xmm4,%xmm1 )\r
+ vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPD_I( %xmm9,%xmm5,%xmm0 )\r
+ VFMADDPD_I( %xmm13,%xmm5,%xmm1 )\r
+ addq $2, BI \r
+ addq $4, %rax \r
+.endm\r
+\r
+.macro SAVE2x1\r
+\r
+ vmovddup ALPHA_R, %xmm0\r
+ vmovddup ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+ vshufpd $0x01, %xmm13, %xmm13, %xmm13\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubpd %xmm9, %xmm8 , %xmm8\r
+ vaddsubpd %xmm13,%xmm12 , %xmm12\r
+\r
+ vshufpd $0x01, %xmm8 , %xmm8, %xmm9\r
+ vshufpd $0x01, %xmm12, %xmm12, %xmm13\r
+\r
+#else\r
+ vaddsubpd %xmm8, %xmm9 , %xmm9\r
+ vaddsubpd %xmm12,%xmm13, %xmm13\r
+\r
+ vmovapd %xmm9, %xmm8\r
+ vmovapd %xmm13, %xmm12\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+ vshufpd $0x01, %xmm13, %xmm13, %xmm13\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulpd %xmm8 , %xmm0, %xmm8\r
+ vmulpd %xmm12, %xmm0, %xmm12\r
+\r
+ // multiply with ALPHA_I\r
+ vmulpd %xmm9 , %xmm1, %xmm9\r
+ vmulpd %xmm13, %xmm1, %xmm13\r
+\r
+ vaddsubpd %xmm9, %xmm8 , %xmm8\r
+ vaddsubpd %xmm13, %xmm12, %xmm12\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddpd (CO1), %xmm8 , %xmm8\r
+ vaddpd 2 * SIZE(CO1), %xmm12, %xmm12\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+ vmovups %xmm12 , 2 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL1x1_SUB\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPD_R( %xmm8,%xmm4,%xmm0 )\r
+ vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPD_I( %xmm9,%xmm5,%xmm0 )\r
+ addq $2, BI \r
+ addq $2, %rax \r
+.endm\r
+\r
+.macro SAVE1x1\r
+\r
+ vmovddup ALPHA_R, %xmm0\r
+ vmovddup ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubpd %xmm9, %xmm8, %xmm8\r
+\r
+ vshufpd $0x01, %xmm8 , %xmm8, %xmm9\r
+\r
+#else\r
+ vaddsubpd %xmm8, %xmm9, %xmm9\r
+\r
+ vmovapd %xmm9, %xmm8\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulpd %xmm8 , %xmm0, %xmm8\r
+\r
+ // multiply with ALPHA_I\r
+ vmulpd %xmm9 , %xmm1, %xmm9\r
+\r
+ vaddsubpd %xmm9 ,%xmm8, %xmm8\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddpd (CO1), %xmm8 , %xmm8\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+\r
+.endm\r
+\r
+\r
+/************************************************************************************************/\r
+\r
+\r
+\r
+\r
+ PROLOGUE\r
+ PROFCODE\r
+ \r
+ subq $STACKSIZE, %rsp\r
+ movq %rbx, (%rsp)\r
+ movq %rbp, 8(%rsp)\r
+ movq %r12, 16(%rsp)\r
+ movq %r13, 24(%rsp)\r
+ movq %r14, 32(%rsp)\r
+ movq %r15, 40(%rsp)\r
+\r
+ vzeroupper\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq %rdi, 48(%rsp)\r
+ movq %rsi, 56(%rsp)\r
+ movups %xmm6, 64(%rsp)\r
+ movups %xmm7, 80(%rsp)\r
+ movups %xmm8, 96(%rsp)\r
+ movups %xmm9, 112(%rsp)\r
+ movups %xmm10, 128(%rsp)\r
+ movups %xmm11, 144(%rsp)\r
+ movups %xmm12, 160(%rsp)\r
+ movups %xmm13, 176(%rsp)\r
+ movups %xmm14, 192(%rsp)\r
+ movups %xmm15, 208(%rsp)\r
+\r
+ movq ARG1, OLD_M\r
+ movq ARG2, OLD_N\r
+ movq ARG3, OLD_K\r
+ movq OLD_A, A\r
+ movq OLD_B, B\r
+ movq OLD_C, C\r
+ movq OLD_LDC, LDC\r
+#ifdef TRMMKERNEL\r
+ movsd OLD_OFFSET, %xmm12\r
+#endif\r
+ vmovaps %xmm3, %xmm0\r
+\r
+#else\r
+ movq STACKSIZE + 8(%rsp), LDC\r
+#ifdef TRMMKERNEL\r
+ movsd STACKSIZE + 16(%rsp), %xmm12\r
+#endif\r
+\r
+#endif\r
+\r
+ movq %rsp, SP # save old stack\r
+ subq $128 + L_BUFFER_SIZE, %rsp\r
+ andq $-4096, %rsp # align stack\r
+\r
+ STACK_TOUCH\r
+\r
+ cmpq $0, OLD_M\r
+ je .L999\r
+\r
+ cmpq $0, OLD_N\r
+ je .L999\r
+\r
+ cmpq $0, OLD_K\r
+ je .L999\r
+\r
+ movq OLD_M, M\r
+ movq OLD_N, N\r
+ movq OLD_K, K\r
+\r
+ vmovsd %xmm0, ALPHA_R\r
+ vmovsd %xmm1, ALPHA_I\r
+\r
+ salq $ZBASE_SHIFT, LDC\r
+\r
+ movq N, %rax\r
+ xorq %rdx, %rdx\r
+ movq $2, %rdi\r
+ divq %rdi // N / 2\r
+ movq %rax, Ndiv6 // N / 2\r
+ movq %rdx, Nmod6 // N % 2\r
+\r
+ \r
+\r
+#ifdef TRMMKERNEL\r
+ vmovsd %xmm12, OFFSET\r
+ vmovsd %xmm12, KK\r
+#ifndef LEFT\r
+ negq KK\r
+#endif \r
+#endif\r
+\r
+.L2_00_0:\r
+\r
+ movq Ndiv6, J\r
+ cmpq $0, J\r
+ je .L1_2_0\r
+ ALIGN_4\r
+\r
+\r
+\r
+.L2_00_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ ALIGN_4\r
+\r
+.L2_00_02b:\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups 2 * SIZE(BO1), %xmm1\r
+ vmovups %xmm0, (BO)\r
+ vmovups %xmm1, 2 * SIZE(BO)\r
+ addq $4*SIZE,BO1\r
+ addq $4*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_00_02b\r
+\r
+.L2_00_02c:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+\r
+.L2_00_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C // c += 2 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $8 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $2, I // i = (m >> 2)\r
+ je .L2_2_10\r
+\r
+ ALIGN_4\r
+\r
+/******************************************************************************************************************/\r
+\r
+.L2_4_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_4_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_12:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+\r
+ je .L2_4_16\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+\r
+ je .L2_4_16\r
+\r
+ jmp .L2_4_12\r
+ ALIGN_4\r
+\r
+.L2_4_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_4_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_17:\r
+\r
+ KERNEL4x2_SUB\r
+\r
+ jl .L2_4_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_4_19:\r
+\r
+ SAVE4x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ decq I # i --\r
+ jg .L2_4_11\r
+ ALIGN_4 \r
+\r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+\r
+\r
+/******************************************************************************************************************/\r
+.L2_2_10:\r
+ testq $2, M \r
+ jz .L2_2_40 // to next 2 lines of N\r
+\r
+.L2_2_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_2_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_2_12:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ je .L2_2_16\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ je .L2_2_16\r
+\r
+ jmp .L2_2_12\r
+ ALIGN_4\r
+\r
+.L2_2_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_2_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_2_17:\r
+\r
+ KERNEL2x2_SUB\r
+\r
+ jl .L2_2_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_2_19:\r
+\r
+ SAVE2x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4 \r
+\r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L2_2_40:\r
+ testq $1, M \r
+ jz .L2_2_60 // to next 2 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L2_2_41:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_2_46\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_2_42:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ je .L2_2_46\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ je .L2_2_46\r
+\r
+ jmp .L2_2_42\r
+ ALIGN_4\r
+\r
+.L2_2_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_2_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_2_47:\r
+\r
+ KERNEL1x2_SUB\r
+\r
+ jl .L2_2_47\r
+ ALIGN_4\r
+\r
+\r
+.L2_2_49:\r
+\r
+ SAVE1x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK\r
+#endif\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ decq I # i --\r
+ jg .L2_2_41\r
+ ALIGN_4 \r
+\r
+\r
+\r
+ \r
+.L2_2_60:\r
+#if defined(TRMMKERNEL) && !defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ decq J // j --\r
+ jg .L2_00_01 // next 2 lines of N\r
+\r
+\r
+\r
+.L1_2_0:\r
+\r
+/************************************************************************************************\r
+* Loop for Nmod6 % 2 > 0\r
+*************************************************************************************************/\r
+\r
+ movq Nmod6, J \r
+ andq $1, J // j % 2\r
+ je .L999\r
+ ALIGN_4\r
+\r
+.L1_00_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ ALIGN_4\r
+\r
+.L1_00_02b:\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups %xmm0, (BO)\r
+ addq $2*SIZE,BO1\r
+ addq $2*SIZE,BO\r
+ decq %rax\r
+ jnz .L1_00_02b\r
+\r
+.L1_00_02c:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L1_00_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 1), C // c += 1 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $8 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $2, I // i = (m >> 2)\r
+ je .L1_2_10\r
+\r
+ ALIGN_4\r
+\r
+/*******************************************************************************************************/\r
+\r
+\r
+.L1_4_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_4_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_12:\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ je .L1_4_16\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ je .L1_4_16\r
+\r
+ jmp .L1_4_12\r
+ ALIGN_4\r
+\r
+.L1_4_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_4_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_17:\r
+\r
+ KERNEL4x1_SUB\r
+\r
+ jl .L1_4_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_4_19:\r
+\r
+ SAVE4x1\r
+\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ decq I # i --\r
+ jg .L1_4_11\r
+ ALIGN_4 \r
+\r
+\r
+\r
+\r
+/*******************************************************************************************************/\r
+.L1_2_10:\r
+ testq $2, M \r
+ jz .L1_2_40\r
+\r
+\r
+.L1_2_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_2_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_2_12:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ je .L1_2_16\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ je .L1_2_16\r
+\r
+ jmp .L1_2_12\r
+ ALIGN_4\r
+\r
+.L1_2_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_2_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_2_17:\r
+\r
+ KERNEL2x1_SUB\r
+\r
+ jl .L1_2_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_2_19:\r
+\r
+ SAVE2x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+\r
+ ALIGN_4 \r
+\r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L1_2_40:\r
+ testq $1, M \r
+ jz .L999\r
+\r
+ ALIGN_4\r
+\r
+.L1_2_41:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_2_46\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_2_42:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ je .L1_2_46\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ je .L1_2_46\r
+\r
+ jmp .L1_2_42\r
+ ALIGN_4\r
+\r
+.L1_2_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_2_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_2_47:\r
+\r
+ KERNEL1x1_SUB\r
+\r
+ jl .L1_2_47\r
+ ALIGN_4\r
+\r
+\r
+.L1_2_49:\r
+\r
+ SAVE1x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK\r
+#endif\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ decq I # i --\r
+ jg .L1_2_41\r
+ ALIGN_4 \r
+\r
+\r
+\r
+\r
+\r
+\r
+.L999:\r
+ movq SP, %rsp\r
+ movq (%rsp), %rbx\r
+ movq 8(%rsp), %rbp\r
+ movq 16(%rsp), %r12\r
+ movq 24(%rsp), %r13\r
+ movq 32(%rsp), %r14\r
+ movq 40(%rsp), %r15\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq 48(%rsp), %rdi\r
+ movq 56(%rsp), %rsi\r
+ movups 64(%rsp), %xmm6\r
+ movups 80(%rsp), %xmm7\r
+ movups 96(%rsp), %xmm8\r
+ movups 112(%rsp), %xmm9\r
+ movups 128(%rsp), %xmm10\r
+ movups 144(%rsp), %xmm11\r
+ movups 160(%rsp), %xmm12\r
+ movups 176(%rsp), %xmm13\r
+ movups 192(%rsp), %xmm14\r
+ movups 208(%rsp), %xmm15\r
+#endif\r
+\r
+ addq $STACKSIZE, %rsp\r
+ ret\r
+\r
+ EPILOGUE\r
#define PREFETCHSIZE (16 * 24)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHW prefetcht0
#define PREFETCHSIZE (16 * 24)
#define PREFETCHSIZE (16 * 24)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHW prefetcht0
#define PREFETCHSIZE (16 * 24)
#define PREFETCHSIZE (16 * 24)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHW prefetcht0
#define PREFETCHSIZE (16 * 24)
#define PREFETCHSIZE (16 * 24)
#endif
-#if defined(NEHALEM) || defined(SANDYBRIDGE)
+#if defined(NEHALEM) || defined(NEHALEM_OPTIMIZATION)
#define PREFETCH prefetcht0
#define PREFETCHW prefetcht0
#define PREFETCHSIZE (16 * 24)
#endif
+#ifdef HASWELL
+
+#define SNUMOPT 8
+#define DNUMOPT 4
+
+#define GEMM_DEFAULT_OFFSET_A 0
+#define GEMM_DEFAULT_OFFSET_B 0
+#define GEMM_DEFAULT_ALIGN 0x03fffUL
+
+#define SYMV_P 8
+
+#define SWITCH_RATIO 4
+
+#ifdef ARCH_X86
+
+#define SGEMM_DEFAULT_UNROLL_M 4
+#define DGEMM_DEFAULT_UNROLL_M 2
+#define QGEMM_DEFAULT_UNROLL_M 2
+#define CGEMM_DEFAULT_UNROLL_M 2
+#define ZGEMM_DEFAULT_UNROLL_M 1
+#define XGEMM_DEFAULT_UNROLL_M 1
+
+#define SGEMM_DEFAULT_UNROLL_N 4
+#define DGEMM_DEFAULT_UNROLL_N 4
+#define QGEMM_DEFAULT_UNROLL_N 2
+#define CGEMM_DEFAULT_UNROLL_N 2
+#define ZGEMM_DEFAULT_UNROLL_N 2
+#define XGEMM_DEFAULT_UNROLL_N 1
+
+#else
+
+#define SGEMM_DEFAULT_UNROLL_M 16
+#define DGEMM_DEFAULT_UNROLL_M 4
+#define QGEMM_DEFAULT_UNROLL_M 2
+#define CGEMM_DEFAULT_UNROLL_M 8
+#define ZGEMM_DEFAULT_UNROLL_M 4
+#define XGEMM_DEFAULT_UNROLL_M 1
+
+#define SGEMM_DEFAULT_UNROLL_N 4
+#define DGEMM_DEFAULT_UNROLL_N 4
+#define QGEMM_DEFAULT_UNROLL_N 2
+#define CGEMM_DEFAULT_UNROLL_N 2
+#define ZGEMM_DEFAULT_UNROLL_N 2
+#define XGEMM_DEFAULT_UNROLL_N 1
+
+#endif
+
+#ifdef ARCH_X86
+
+#define SGEMM_DEFAULT_P 512
+#define SGEMM_DEFAULT_R sgemm_r
+#define DGEMM_DEFAULT_P 512
+#define DGEMM_DEFAULT_R dgemm_r
+#define QGEMM_DEFAULT_P 504
+#define QGEMM_DEFAULT_R qgemm_r
+#define CGEMM_DEFAULT_P 128
+#define CGEMM_DEFAULT_R 1024
+#define ZGEMM_DEFAULT_P 512
+#define ZGEMM_DEFAULT_R zgemm_r
+#define XGEMM_DEFAULT_P 252
+#define XGEMM_DEFAULT_R xgemm_r
+#define SGEMM_DEFAULT_Q 256
+#define DGEMM_DEFAULT_Q 256
+#define QGEMM_DEFAULT_Q 128
+#define CGEMM_DEFAULT_Q 256
+#define ZGEMM_DEFAULT_Q 192
+#define XGEMM_DEFAULT_Q 128
+
+#else
+
+#define SGEMM_DEFAULT_P 768
+#define DGEMM_DEFAULT_P 512
+#define CGEMM_DEFAULT_P 384
+#define ZGEMM_DEFAULT_P 256
+
+#define SGEMM_DEFAULT_Q 384
+#define DGEMM_DEFAULT_Q 256
+#define CGEMM_DEFAULT_Q 192
+#define ZGEMM_DEFAULT_Q 128
+
+#define SGEMM_DEFAULT_R sgemm_r
+//#define DGEMM_DEFAULT_R dgemm_r
+#define DGEMM_DEFAULT_R 13824
+#define CGEMM_DEFAULT_R cgemm_r
+#define ZGEMM_DEFAULT_R zgemm_r
+
+#define QGEMM_DEFAULT_Q 128
+#define QGEMM_DEFAULT_P 504
+#define QGEMM_DEFAULT_R qgemm_r
+#define XGEMM_DEFAULT_P 252
+#define XGEMM_DEFAULT_R xgemm_r
+#define XGEMM_DEFAULT_Q 128
+
+#define CGEMM3M_DEFAULT_UNROLL_N 4
+#define CGEMM3M_DEFAULT_UNROLL_M 8
+#define ZGEMM3M_DEFAULT_UNROLL_N 2
+#define ZGEMM3M_DEFAULT_UNROLL_M 8
+#endif
+
+
+#endif
+
#ifdef ATOM