--- /dev/null
+/*********************************************************************/\r
+/* Copyright 2009, 2010 The University of Texas at Austin. */\r
+/* All rights reserved. */\r
+/* */\r
+/* Redistribution and use in source and binary forms, with or */\r
+/* without modification, are permitted provided that the following */\r
+/* conditions are met: */\r
+/* */\r
+/* 1. Redistributions of source code must retain the above */\r
+/* copyright notice, this list of conditions and the following */\r
+/* disclaimer. */\r
+/* */\r
+/* 2. Redistributions in binary form must reproduce the above */\r
+/* copyright notice, this list of conditions and the following */\r
+/* disclaimer in the documentation and/or other materials */\r
+/* provided with the distribution. */\r
+/* */\r
+/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */\r
+/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */\r
+/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */\r
+/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */\r
+/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */\r
+/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */\r
+/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */\r
+/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */\r
+/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */\r
+/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */\r
+/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */\r
+/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */\r
+/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\r
+/* POSSIBILITY OF SUCH DAMAGE. */\r
+/* */\r
+/* The views and conclusions contained in the software and */\r
+/* documentation are those of the authors and should not be */\r
+/* interpreted as representing official policies, either expressed */\r
+/* or implied, of The University of Texas at Austin. */\r
+/*********************************************************************/\r
+\r
+/*********************************************************************\r
+* 2013/08/15 Saar\r
+* Parameter:\r
+* SGEMM_DEFAULT_UNROLL_N 2\r
+* SGEMM_DEFAULT_UNROLL_M 16\r
+* SGEMM_DEFAULT_P 384\r
+* SGEMM_DEFAULT_Q 168\r
+*\r
+* BLASTEST: OK\r
+*\r
+* Performance:\r
+* 1 thread: 2.31 times faster than sandybridge\r
+* 4 threads: 2.26 times faster than sandybridge\r
+*\r
+* Compile for FMA3: OK\r
+*\r
+*********************************************************************/\r
+\r
+\r
+#define ASSEMBLER\r
+#include "common.h"\r
+ \r
+#define OLD_M %rdi\r
+#define OLD_N %rsi\r
+#define M %r13\r
+#define J %r14\r
+#define OLD_K %rdx\r
+\r
+#define A %rcx\r
+#define B %r8\r
+#define C %r9\r
+#define LDC %r10\r
+ \r
+#define I %r11\r
+#define AO %rdi\r
+#define BO %rsi\r
+#define CO1 %r15\r
+#define K %r12\r
+#define BI %rbp\r
+#define SP %rbx\r
+\r
+#define BO1 %rdi\r
+#define BO2 %r15\r
+\r
+#ifndef WINDOWS_ABI\r
+\r
+#define STACKSIZE 96\r
+\r
+#else\r
+\r
+#define STACKSIZE 256\r
+\r
+#define OLD_A 40 + STACKSIZE(%rsp)\r
+#define OLD_B 48 + STACKSIZE(%rsp)\r
+#define OLD_C 56 + STACKSIZE(%rsp)\r
+#define OLD_LDC 64 + STACKSIZE(%rsp)\r
+#define OLD_OFFSET 72 + STACKSIZE(%rsp)\r
+\r
+#endif\r
+\r
+#define L_BUFFER_SIZE 512*8*4\r
+#define LB2_OFFSET 512*8*2\r
+\r
+#define Ndiv6 24(%rsp)\r
+#define Nmod6 32(%rsp)\r
+#define N 40(%rsp)\r
+#define ALPHA 48(%rsp)\r
+#define OFFSET 56(%rsp)\r
+#define KK 64(%rsp)\r
+#define KKK 72(%rsp)\r
+#define BUFFER1 128(%rsp)\r
+#define BUFFER2 LB2_OFFSET+128(%rsp)\r
+\r
+#if defined(OS_WINDOWS)\r
+#if L_BUFFER_SIZE > 16384\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 4(%rsp);\\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 12288\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 8192\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 4096\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 1(%rsp);\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+\r
+#if defined(BULLDOZER)\r
+\r
+.macro VFMADD231PD_ y0,y1,y2\r
+ vfmaddpd \y0,\y1,\y2,\y0\r
+.endm\r
+\r
+.macro VFMADD231SD_ x0,x1,x2\r
+ vfmaddsd \x0,\x1,\x2,\x0\r
+.endm\r
+\r
+#else\r
+\r
+.macro VFMADD231PD_ y0,y1,y2\r
+ vfmadd231pd \y0,\y1,\y2\r
+.endm\r
+\r
+.macro VFMADD231SD_ x0,x1,x2\r
+ vfmadd231sd \x0,\x1,\x2\r
+.endm\r
+\r
+#endif\r
+\r
+\r
+#define A_PR1 384\r
+#define B_PR1 192\r
+\r
+/*******************************************************************************************\r
+* 3 lines of N\r
+*******************************************************************************************/\r
+\r
+.macro KERNEL16x3_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 64+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm12,%ymm3,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm1\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm15,%ymm3,%ymm0\r
+.endm\r
+\r
+\r
+\r
+\r
+.macro KERNEL16x3_2\r
+ prefetcht0 128+A_PR1(AO, %rax, SIZE)\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ prefetcht0 A_PR1+64(AO,%rax,SIZE)\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ prefetcht0 192+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm12,%ymm3,%ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm15,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x3_3\r
+ prefetcht0 256+A_PR1(AO, %rax, SIZE)\r
+ vmovups 0 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups 4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 320+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ vmovups 8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm12,%ymm3,%ymm0\r
+ vmovups 12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm1\r
+ vbroadcastsd 4 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm15,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x3_4\r
+ prefetcht0 384+A_PR1(AO, %rax, SIZE)\r
+ vmovups 16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd 5 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups 20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 448+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ vmovups 24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ addq $12, BI \r
+ VFMADD231PD_ %ymm12,%ymm3,%ymm0\r
+ vmovups 28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ addq $64, %rax \r
+ VFMADD231PD_ %ymm15,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x3_SUB\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm12,%ymm3,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm15,%ymm3,%ymm0\r
+ addq $3 , BI \r
+ addq $16, %rax \r
+.endm\r
+\r
+.macro SAVE16x3\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+ vmulpd %ymm0 , %ymm10, %ymm10\r
+ vmulpd %ymm0 , %ymm13, %ymm13\r
+\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+ vmulpd %ymm0 , %ymm8 , %ymm8\r
+ vmulpd %ymm0 , %ymm11, %ymm11\r
+ vmulpd %ymm0 , %ymm14, %ymm14\r
+\r
+ vmulpd %ymm0 , %ymm6 , %ymm6\r
+ vmulpd %ymm0 , %ymm9 , %ymm9\r
+ vmulpd %ymm0 , %ymm12, %ymm12\r
+ vmulpd %ymm0 , %ymm15, %ymm15\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd 4 * SIZE(CO1), %ymm7,%ymm7\r
+ vaddpd 8 * SIZE(CO1), %ymm10,%ymm10\r
+ vaddpd 12 * SIZE(CO1), %ymm13,%ymm13\r
+\r
+ vaddpd (CO1, LDC), %ymm5,%ymm5\r
+ vaddpd 4 * SIZE(CO1, LDC), %ymm8,%ymm8\r
+ vaddpd 8 * SIZE(CO1, LDC), %ymm11,%ymm11\r
+ vaddpd 12 * SIZE(CO1, LDC), %ymm14,%ymm14\r
+\r
+ vaddpd (CO1, LDC, 2), %ymm6,%ymm6\r
+ vaddpd 4 * SIZE(CO1, LDC, 2), %ymm9,%ymm9\r
+ vaddpd 8 * SIZE(CO1, LDC, 2), %ymm12,%ymm12\r
+ vaddpd 12 * SIZE(CO1, LDC, 2), %ymm15,%ymm15\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm7 , 4 * SIZE(CO1)\r
+ vmovups %ymm10, 8 * SIZE(CO1)\r
+ vmovups %ymm13,12 * SIZE(CO1)\r
+\r
+ vmovups %ymm5 , (CO1, LDC)\r
+ vmovups %ymm8 , 4 * SIZE(CO1, LDC)\r
+ vmovups %ymm11, 8 * SIZE(CO1, LDC)\r
+ vmovups %ymm14,12 * SIZE(CO1, LDC)\r
+\r
+ vmovups %ymm6 , (CO1, LDC, 2)\r
+ vmovups %ymm9 , 4 * SIZE(CO1, LDC, 2)\r
+ vmovups %ymm12, 8 * SIZE(CO1, LDC, 2)\r
+ vmovups %ymm15,12 * SIZE(CO1, LDC, 2)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL8x3_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x3_2\r
+ prefetcht0 64+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x3_3\r
+ prefetcht0 128+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x3_4\r
+ prefetcht0 192+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 4 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd 5 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ addq $12, BI\r
+ addq $32, %rax\r
+.endm\r
+\r
+.macro KERNEL8x3_SUB\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ VFMADD231PD_ %ymm9,%ymm3,%ymm0\r
+ addq $3 , BI\r
+ addq $8 , %rax\r
+.endm\r
+\r
+.macro SAVE8x3\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+ vmulpd %ymm0 , %ymm8 , %ymm8\r
+\r
+ vmulpd %ymm0 , %ymm6 , %ymm6\r
+ vmulpd %ymm0 , %ymm9 , %ymm9\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd 4 * SIZE(CO1), %ymm7,%ymm7\r
+\r
+ vaddpd (CO1, LDC), %ymm5,%ymm5\r
+ vaddpd 4 * SIZE(CO1, LDC), %ymm8,%ymm8\r
+\r
+ vaddpd (CO1, LDC, 2), %ymm6,%ymm6\r
+ vaddpd 4 * SIZE(CO1, LDC, 2), %ymm9,%ymm9\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm7 , 4 * SIZE(CO1)\r
+\r
+ vmovups %ymm5 , (CO1, LDC)\r
+ vmovups %ymm8 , 4 * SIZE(CO1, LDC)\r
+\r
+ vmovups %ymm6 , (CO1, LDC, 2)\r
+ vmovups %ymm9 , 4 * SIZE(CO1, LDC, 2)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL4x3_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x3_2\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x3_3\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x3_4\r
+ vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 4 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd 5 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ addq $12, BI\r
+ addq $16, %rax\r
+.endm\r
+\r
+.macro KERNEL4x3_SUB\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3\r
+ VFMADD231PD_ %ymm6,%ymm3,%ymm0\r
+ addq $3 , BI\r
+ addq $4 , %rax\r
+.endm\r
+\r
+.macro SAVE4x3\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+ vmulpd %ymm0 , %ymm6 , %ymm6\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd (CO1, LDC), %ymm5,%ymm5\r
+ vaddpd (CO1, LDC, 2), %ymm6,%ymm6\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm5 , (CO1, LDC)\r
+ vmovups %ymm6 , (CO1, LDC, 2)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL2x3_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vmovsd -6 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -5 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ VFMADD231SD_ %xmm12,%xmm3,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x3_2\r
+ vmovsd -3 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -1 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ VFMADD231SD_ %xmm12,%xmm3,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x3_3\r
+ vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -28 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 1 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd 2 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ vmovsd -27 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ VFMADD231SD_ %xmm12,%xmm3,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x3_4\r
+ vmovsd 3 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -26 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 4 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd 5 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ vmovsd -25 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ VFMADD231SD_ %xmm12,%xmm3,%xmm0\r
+ addq $12, BI\r
+ addq $8, %rax\r
+.endm\r
+\r
+.macro KERNEL2x3_SUB\r
+ vmovsd -6 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -5 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ VFMADD231SD_ %xmm12,%xmm3,%xmm0\r
+ addq $3 , BI\r
+ addq $2 , %rax\r
+.endm\r
+\r
+.macro SAVE2x3\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+ vmulsd %xmm0 , %xmm8 , %xmm8\r
+ vmulsd %xmm0 , %xmm5 , %xmm5\r
+ vmulsd %xmm0 , %xmm10, %xmm10\r
+ vmulsd %xmm0 , %xmm6 , %xmm6\r
+ vmulsd %xmm0 , %xmm12, %xmm12\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4,%xmm4\r
+ vaddsd 1 * SIZE(CO1), %xmm8,%xmm8\r
+ vaddsd (CO1, LDC), %xmm5,%xmm5\r
+ vaddsd 1 * SIZE(CO1, LDC), %xmm10,%xmm10\r
+ vaddsd (CO1, LDC, 2), %xmm6,%xmm6\r
+ vaddsd 1 * SIZE(CO1, LDC, 2), %xmm12,%xmm12\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+ vmovsd %xmm8 , 1 * SIZE(CO1)\r
+ vmovsd %xmm5 , (CO1, LDC)\r
+ vmovsd %xmm10, 1 * SIZE(CO1, LDC)\r
+ vmovsd %xmm6 , (CO1, LDC, 2)\r
+ vmovsd %xmm12, 1 * SIZE(CO1, LDC, 2)\r
+\r
+.endm\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL1x3_1\r
+ vmovsd -6 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -5 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x3_2\r
+ vmovsd -3 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -1 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x3_3\r
+ vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 1 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd 2 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x3_4\r
+ vmovsd 3 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 4 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd 5 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ addq $12, BI\r
+ addq $4, %rax\r
+.endm\r
+\r
+.macro KERNEL1x3_SUB\r
+ vmovsd -6 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -5 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm3\r
+ VFMADD231SD_ %xmm6,%xmm3,%xmm0\r
+ addq $3 , BI\r
+ addq $1 , %rax\r
+.endm\r
+\r
+.macro SAVE1x3\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+ vmulsd %xmm0 , %xmm5 , %xmm5\r
+ vmulsd %xmm0 , %xmm6 , %xmm6\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4,%xmm4\r
+ vaddsd (CO1, LDC), %xmm5,%xmm5\r
+ vaddsd (CO1, LDC, 2), %xmm6,%xmm6\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+ vmovsd %xmm5 , (CO1, LDC)\r
+ vmovsd %xmm6 , (CO1, LDC, 2)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+/*******************************************************************************************\r
+* 2 lines of N\r
+*******************************************************************************************/\r
+\r
+.macro KERNEL16x2_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 64+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x2_2\r
+ prefetcht0 128+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 192+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x2_3\r
+ prefetcht0 256+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups 0 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups 4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 320+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ vmovups 8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ vmovups 12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x2_4\r
+ prefetcht0 384+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups 16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups 20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ prefetcht0 448+A_PR1(AO, %rax, SIZE)\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ vmovups 24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ vmovups 28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ addq $8, BI\r
+ addq $64, %rax\r
+.endm\r
+\r
+.macro KERNEL16x2_SUB\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm11,%ymm2,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm14,%ymm2,%ymm0\r
+ addq $2, BI\r
+ addq $16, %rax\r
+.endm\r
+\r
+.macro SAVE16x2\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+ vmulpd %ymm0 , %ymm10, %ymm10\r
+ vmulpd %ymm0 , %ymm13, %ymm13\r
+\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+ vmulpd %ymm0 , %ymm8 , %ymm8\r
+ vmulpd %ymm0 , %ymm11, %ymm11\r
+ vmulpd %ymm0 , %ymm14, %ymm14\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd 4 * SIZE(CO1), %ymm7,%ymm7\r
+ vaddpd 8 * SIZE(CO1), %ymm10,%ymm10\r
+ vaddpd 12 * SIZE(CO1), %ymm13,%ymm13\r
+\r
+ vaddpd (CO1, LDC), %ymm5,%ymm5\r
+ vaddpd 4 * SIZE(CO1, LDC), %ymm8,%ymm8\r
+ vaddpd 8 * SIZE(CO1, LDC), %ymm11,%ymm11\r
+ vaddpd 12 * SIZE(CO1, LDC), %ymm14,%ymm14\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm7 , 4 * SIZE(CO1)\r
+ vmovups %ymm10, 8 * SIZE(CO1)\r
+ vmovups %ymm13,12 * SIZE(CO1)\r
+\r
+ vmovups %ymm5 , (CO1, LDC)\r
+ vmovups %ymm8 , 4 * SIZE(CO1, LDC)\r
+ vmovups %ymm11, 8 * SIZE(CO1, LDC)\r
+ vmovups %ymm14,12 * SIZE(CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL8x2_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x2_2\r
+ prefetcht0 64+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x2_3\r
+ prefetcht0 128+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x2_4\r
+ prefetcht0 192+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ addq $8, BI \r
+ addq $32, %rax \r
+.endm\r
+\r
+.macro KERNEL8x2_SUB\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ VFMADD231PD_ %ymm8,%ymm2,%ymm0\r
+ addq $2, BI \r
+ addq $8 , %rax \r
+.endm\r
+\r
+.macro SAVE8x2\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+ vmulpd %ymm0 , %ymm8 , %ymm8\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd 4 * SIZE(CO1), %ymm7,%ymm7\r
+\r
+ vaddpd (CO1, LDC), %ymm5,%ymm5\r
+ vaddpd 4 * SIZE(CO1, LDC), %ymm8,%ymm8\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm7 , 4 * SIZE(CO1)\r
+\r
+ vmovups %ymm5 , (CO1, LDC)\r
+ vmovups %ymm8 , 4 * SIZE(CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL4x2_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x2_2\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x2_3\r
+ prefetcht0 64+A_PR1(AO, %rax, SIZE)\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x2_4\r
+ vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ addq $8, BI \r
+ addq $16, %rax \r
+.endm\r
+\r
+.macro KERNEL4x2_SUB\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2\r
+ VFMADD231PD_ %ymm5,%ymm2,%ymm0\r
+ addq $2, BI \r
+ addq $4 , %rax \r
+.endm\r
+\r
+.macro SAVE4x2\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm5 , %ymm5\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd (CO1, LDC), %ymm5,%ymm5\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm5 , (CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL2x2_1\r
+ prefetcht0 A_PR1(AO, %rax, SIZE)\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -3 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x2_2\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -1 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x2_3\r
+ vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -28 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 1 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -27 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x2_4\r
+ vmovsd 2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -26 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 3 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -25 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ addq $8, BI \r
+ addq $8, %rax \r
+.endm\r
+\r
+.macro KERNEL2x2_SUB\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -3 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ VFMADD231SD_ %xmm10,%xmm2,%xmm0\r
+ addq $2, BI \r
+ addq $2, %rax \r
+.endm\r
+\r
+.macro SAVE2x2\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+ vmulsd %xmm0 , %xmm8 , %xmm8\r
+ vmulsd %xmm0 , %xmm5 , %xmm5\r
+ vmulsd %xmm0 , %xmm10, %xmm10\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4,%xmm4\r
+ vaddsd 1 * SIZE(CO1), %xmm8,%xmm8\r
+ vaddsd (CO1, LDC), %xmm5,%xmm5\r
+ vaddsd 1 * SIZE(CO1, LDC), %xmm10,%xmm10\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+ vmovsd %xmm8 , 1 * SIZE(CO1)\r
+ vmovsd %xmm5 , (CO1, LDC)\r
+ vmovsd %xmm10, 1 * SIZE(CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL1x2_1\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -3 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x2_2\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -1 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x2_3\r
+ vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 1 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x2_4\r
+ vmovsd 2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd 3 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ addq $8, BI \r
+ addq $4, %rax \r
+.endm\r
+\r
+.macro KERNEL1x2_SUB\r
+ vmovsd -4 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -3 * SIZE(BO, BI, SIZE), %xmm2\r
+ VFMADD231SD_ %xmm5,%xmm2,%xmm0\r
+ addq $2, BI \r
+ addq $1, %rax \r
+.endm\r
+\r
+.macro SAVE1x2\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+ vmulsd %xmm0 , %xmm5 , %xmm5\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4,%xmm4\r
+ vaddsd (CO1, LDC), %xmm5,%xmm5\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+ vmovsd %xmm5 , (CO1, LDC)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+/*******************************************************************************************\r
+* 1 line of N\r
+*******************************************************************************************/\r
+\r
+.macro KERNEL16x1_1\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x1_2\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x1_3\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups 0 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups 4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ vmovups 8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ vmovups 12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL16x1_4\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups 16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups 20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ vmovups 24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ vmovups 28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ addq $4, BI \r
+ addq $64, %rax \r
+.endm\r
+\r
+.macro KERNEL16x1_SUB\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm10,%ymm1,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm13,%ymm1,%ymm0\r
+ addq $1, BI \r
+ addq $16, %rax \r
+.endm\r
+\r
+.macro SAVE16x1\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+ vmulpd %ymm0 , %ymm10, %ymm10\r
+ vmulpd %ymm0 , %ymm13, %ymm13\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd 4 * SIZE(CO1), %ymm7,%ymm7\r
+ vaddpd 8 * SIZE(CO1), %ymm10,%ymm10\r
+ vaddpd 12 * SIZE(CO1), %ymm13,%ymm13\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm7 , 4 * SIZE(CO1)\r
+ vmovups %ymm10, 8 * SIZE(CO1)\r
+ vmovups %ymm13,12 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL8x1_1\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x1_2\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x1_3\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL8x1_4\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ addq $4, BI \r
+ addq $32, %rax \r
+.endm\r
+\r
+.macro KERNEL8x1_SUB\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm7,%ymm1,%ymm0\r
+ addq $1, BI \r
+ addq $8 , %rax \r
+.endm\r
+\r
+.macro SAVE8x1\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+ vmulpd %ymm0 , %ymm7 , %ymm7\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+ vaddpd 4 * SIZE(CO1), %ymm7,%ymm7\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+ vmovups %ymm7 , 4 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL4x1_1\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x1_2\r
+ vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x1_3\r
+ vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+.endm\r
+\r
+.macro KERNEL4x1_4\r
+ vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ addq $4, BI \r
+ addq $16, %rax \r
+.endm\r
+\r
+.macro KERNEL4x1_SUB\r
+ vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1\r
+ vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0\r
+ VFMADD231PD_ %ymm4,%ymm1,%ymm0\r
+ addq $1, BI \r
+ addq $4 , %rax \r
+.endm\r
+\r
+.macro SAVE4x1\r
+\r
+ vbroadcastsd ALPHA, %ymm0\r
+\r
+ vmulpd %ymm0 , %ymm4 , %ymm4\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddpd (CO1), %ymm4,%ymm4\r
+\r
+#endif\r
+\r
+ vmovups %ymm4 , (CO1)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL2x1_1\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x1_2\r
+ vmovsd -1 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x1_3\r
+ vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -28 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -27 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+.endm\r
+\r
+.macro KERNEL2x1_4\r
+ vmovsd 1 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -26 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -25 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ addq $4, BI \r
+ addq $8, %rax \r
+.endm\r
+\r
+.macro KERNEL2x1_SUB\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm8,%xmm1,%xmm0\r
+ addq $1, BI \r
+ addq $2 , %rax \r
+.endm\r
+\r
+.macro SAVE2x1\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+ vmulsd %xmm0 , %xmm8 , %xmm8\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4,%xmm4\r
+ vaddsd 1 * SIZE(CO1), %xmm8,%xmm8\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+ vmovsd %xmm8 , 1 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+.macro KERNEL1x1_1\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x1_2\r
+ vmovsd -1 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x1_3\r
+ vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+.endm\r
+\r
+.macro KERNEL1x1_4\r
+ vmovsd 1 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ addq $4, BI \r
+ addq $4, %rax \r
+.endm\r
+\r
+.macro KERNEL1x1_SUB\r
+ vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1\r
+ vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0\r
+ VFMADD231SD_ %xmm4,%xmm1,%xmm0\r
+ addq $1, BI \r
+ addq $1 , %rax \r
+.endm\r
+\r
+.macro SAVE1x1\r
+\r
+ vmovsd ALPHA, %xmm0\r
+\r
+ vmulsd %xmm0 , %xmm4 , %xmm4\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+ vaddsd (CO1), %xmm4,%xmm4\r
+\r
+#endif\r
+\r
+ vmovsd %xmm4 , (CO1)\r
+\r
+.endm\r
+\r
+\r
+/*******************************************************************************************/\r
+\r
+#if !defined(TRMMKERNEL)\r
+\r
+\r
+ PROLOGUE\r
+ PROFCODE\r
+ \r
+ subq $STACKSIZE, %rsp\r
+ movq %rbx, (%rsp)\r
+ movq %rbp, 8(%rsp)\r
+ movq %r12, 16(%rsp)\r
+ movq %r13, 24(%rsp)\r
+ movq %r14, 32(%rsp)\r
+ movq %r15, 40(%rsp)\r
+\r
+ vzeroupper\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq %rdi, 48(%rsp)\r
+ movq %rsi, 56(%rsp)\r
+ movups %xmm6, 64(%rsp)\r
+ movups %xmm7, 80(%rsp)\r
+ movups %xmm8, 96(%rsp)\r
+ movups %xmm9, 112(%rsp)\r
+ movups %xmm10, 128(%rsp)\r
+ movups %xmm11, 144(%rsp)\r
+ movups %xmm12, 160(%rsp)\r
+ movups %xmm13, 176(%rsp)\r
+ movups %xmm14, 192(%rsp)\r
+ movups %xmm15, 208(%rsp)\r
+\r
+ movq ARG1, OLD_M\r
+ movq ARG2, OLD_N\r
+ movq ARG3, OLD_K\r
+ movq OLD_A, A\r
+ movq OLD_B, B\r
+ movq OLD_C, C\r
+ movq OLD_LDC, LDC\r
+\r
+ vmovaps %xmm3, %xmm0\r
+\r
+#else\r
+ movq STACKSIZE + 8(%rsp), LDC\r
+\r
+#endif\r
+\r
+ movq %rsp, SP # save old stack\r
+ subq $128 + L_BUFFER_SIZE, %rsp\r
+ andq $-4096, %rsp # align stack\r
+\r
+ STACK_TOUCH\r
+\r
+ cmpq $0, OLD_M\r
+ je .L999\r
+\r
+ cmpq $0, OLD_N\r
+ je .L999\r
+\r
+ cmpq $0, OLD_K\r
+ je .L999\r
+\r
+ movq OLD_M, M\r
+ movq OLD_N, N\r
+ movq OLD_K, K\r
+\r
+ vmovsd %xmm0, ALPHA\r
+\r
+ salq $BASE_SHIFT, LDC\r
+\r
+ movq N, %rax\r
+ xorq %rdx, %rdx\r
+ movq $6, %rdi\r
+ divq %rdi // N / 6\r
+ movq %rax, Ndiv6 // N / 6\r
+ movq %rdx, Nmod6 // N % 6\r
+\r
+\r
+ movq Ndiv6, J\r
+ cmpq $0, J\r
+ je .L2_0\r
+ ALIGN_4\r
+\r
+.L6_01:\r
+ // copy to sub buffer\r
+ movq K, %rax\r
+ salq $1,%rax // K * 2 ; read 2 values\r
+ movq B, BO1\r
+ leaq (B,%rax, SIZE), BO2 // next offset to BO2\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ sarq $3 , %rax // K / 8\r
+ jz .L6_01a_2\r
+ ALIGN_4\r
+\r
+.L6_01a_1:\r
+\r
+ prefetcht0 512(BO1)\r
+ prefetcht0 512(BO2)\r
+ prefetchw 512(BO)\r
+\r
+\r
+ vmovups 0 * SIZE(BO1), %xmm0\r
+ vmovups 2 * SIZE(BO1), %xmm2\r
+ vmovups 4 * SIZE(BO1), %xmm4\r
+ vmovups 6 * SIZE(BO1), %xmm6\r
+ vmovsd 0 * SIZE(BO2), %xmm1\r
+ vmovsd 2 * SIZE(BO2), %xmm3\r
+ vmovsd 4 * SIZE(BO2), %xmm5\r
+ vmovsd 6 * SIZE(BO2), %xmm7\r
+ vmovups %xmm0, 0*SIZE(BO)\r
+ vmovsd %xmm1, 2*SIZE(BO)\r
+ vmovups %xmm2, 3*SIZE(BO)\r
+ vmovsd %xmm3, 5*SIZE(BO)\r
+ vmovups %xmm4, 6*SIZE(BO)\r
+ vmovsd %xmm5, 8*SIZE(BO)\r
+ vmovups %xmm6, 9*SIZE(BO)\r
+ vmovsd %xmm7,11*SIZE(BO)\r
+ addq $8*SIZE,BO1\r
+ addq $8*SIZE,BO2\r
+ addq $12*SIZE,BO\r
+\r
+ vmovups 0 * SIZE(BO1), %xmm0\r
+ vmovups 2 * SIZE(BO1), %xmm2\r
+ vmovups 4 * SIZE(BO1), %xmm4\r
+ vmovups 6 * SIZE(BO1), %xmm6\r
+ vmovsd 0 * SIZE(BO2), %xmm1\r
+ vmovsd 2 * SIZE(BO2), %xmm3\r
+ vmovsd 4 * SIZE(BO2), %xmm5\r
+ vmovsd 6 * SIZE(BO2), %xmm7\r
+ vmovups %xmm0, 0*SIZE(BO)\r
+ vmovsd %xmm1, 2*SIZE(BO)\r
+ vmovups %xmm2, 3*SIZE(BO)\r
+ vmovsd %xmm3, 5*SIZE(BO)\r
+ vmovups %xmm4, 6*SIZE(BO)\r
+ vmovsd %xmm5, 8*SIZE(BO)\r
+ vmovups %xmm6, 9*SIZE(BO)\r
+ vmovsd %xmm7,11*SIZE(BO)\r
+ addq $8*SIZE,BO1\r
+ addq $8*SIZE,BO2\r
+ addq $12*SIZE,BO\r
+\r
+ decq %rax\r
+ jnz .L6_01a_1\r
+\r
+\r
+\r
+.L6_01a_2:\r
+\r
+ movq K, %rax\r
+ andq $7, %rax // K % 8\r
+ jz .L6_02c\r
+ ALIGN_4\r
+\r
+\r
+.L6_02b:\r
+\r
+ vmovups 0 * SIZE(BO1), %xmm0\r
+ vmovsd 0 * SIZE(BO2), %xmm2\r
+ vmovups %xmm0, 0*SIZE(BO)\r
+ vmovsd %xmm2, 2*SIZE(BO)\r
+ addq $2*SIZE,BO1\r
+ addq $2*SIZE,BO2\r
+ addq $3*SIZE,BO\r
+ decq %rax\r
+ jnz .L6_02b\r
+\r
+.L6_02c:\r
+\r
+ movq K, %rax\r
+ salq $1,%rax // K * 2\r
+ leaq (B,%rax, SIZE), BO1 // next offset to BO1\r
+ leaq (BO1,%rax, SIZE), BO2 // next offset to BO2\r
+ leaq BUFFER2, BO // second buffer to BO\r
+ movq K, %rax\r
+ sarq $3 , %rax // K / 8\r
+ jz .L6_02c_2\r
+ ALIGN_4\r
+\r
+.L6_02c_1:\r
+\r
+ prefetcht0 512(BO2)\r
+ prefetchw 512(BO)\r
+\r
+ vmovups 0 * SIZE(BO2), %xmm0\r
+ vmovups 2 * SIZE(BO2), %xmm2\r
+ vmovups 4 * SIZE(BO2), %xmm4\r
+ vmovups 6 * SIZE(BO2), %xmm6\r
+ vmovsd 1 * SIZE(BO1), %xmm1\r
+ vmovsd 3 * SIZE(BO1), %xmm3\r
+ vmovsd 5 * SIZE(BO1), %xmm5\r
+ vmovsd 7 * SIZE(BO1), %xmm7\r
+ vmovsd %xmm1, 0*SIZE(BO)\r
+ vmovups %xmm0, 1*SIZE(BO)\r
+ vmovsd %xmm3, 3*SIZE(BO)\r
+ vmovups %xmm2, 4*SIZE(BO)\r
+ vmovsd %xmm5, 6*SIZE(BO)\r
+ vmovups %xmm4, 7*SIZE(BO)\r
+ vmovsd %xmm7, 9*SIZE(BO)\r
+ vmovups %xmm6,10*SIZE(BO)\r
+ addq $8*SIZE,BO1\r
+ addq $8*SIZE,BO2\r
+ addq $12*SIZE,BO\r
+\r
+\r
+ vmovups 0 * SIZE(BO2), %xmm0\r
+ vmovups 2 * SIZE(BO2), %xmm2\r
+ vmovups 4 * SIZE(BO2), %xmm4\r
+ vmovups 6 * SIZE(BO2), %xmm6\r
+ vmovsd 1 * SIZE(BO1), %xmm1\r
+ vmovsd 3 * SIZE(BO1), %xmm3\r
+ vmovsd 5 * SIZE(BO1), %xmm5\r
+ vmovsd 7 * SIZE(BO1), %xmm7\r
+ vmovsd %xmm1, 0*SIZE(BO)\r
+ vmovups %xmm0, 1*SIZE(BO)\r
+ vmovsd %xmm3, 3*SIZE(BO)\r
+ vmovups %xmm2, 4*SIZE(BO)\r
+ vmovsd %xmm5, 6*SIZE(BO)\r
+ vmovups %xmm4, 7*SIZE(BO)\r
+ vmovsd %xmm7, 9*SIZE(BO)\r
+ vmovups %xmm6,10*SIZE(BO)\r
+ addq $8*SIZE,BO1\r
+ addq $8*SIZE,BO2\r
+ addq $12*SIZE,BO\r
+\r
+ decq %rax\r
+ jnz .L6_02c_1\r
+\r
+\r
+.L6_02c_2:\r
+\r
+ movq K, %rax\r
+ andq $7, %rax // K % 8\r
+ jz .L6_03c\r
+ ALIGN_4\r
+\r
+.L6_03b:\r
+\r
+ vmovsd 1*SIZE(BO1), %xmm0\r
+ vmovups 0*SIZE(BO2), %xmm1\r
+ vmovsd %xmm0, 0*SIZE(BO)\r
+ vmovups %xmm1, 1*SIZE(BO)\r
+ addq $2*SIZE,BO1\r
+ addq $2*SIZE,BO2\r
+ addq $3*SIZE,BO\r
+ decq %rax\r
+ jnz .L6_03b\r
+\r
+\r
+.L6_03c:\r
+\r
+ movq BO2, B // next offset of B\r
+\r
+.L6_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C \r
+ leaq (C, LDC, 1), C // c += 3 * ldc\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $32 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L6_20\r
+\r
+ ALIGN_4\r
+\r
+.L6_11:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $6 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L6_16\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L6_12:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x3_1\r
+ KERNEL16x3_2\r
+ KERNEL16x3_3\r
+ prefetcht0 B_PR1+64(BO,BI,8)\r
+ KERNEL16x3_4\r
+\r
+ KERNEL16x3_1\r
+ KERNEL16x3_2\r
+ prefetcht0 B_PR1+32(BO,BI,8)\r
+ KERNEL16x3_3\r
+ KERNEL16x3_4\r
+\r
+ je .L6_16\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x3_1\r
+ KERNEL16x3_2\r
+ KERNEL16x3_3\r
+ prefetcht0 B_PR1+64(BO,BI,8)\r
+ KERNEL16x3_4\r
+\r
+ KERNEL16x3_1\r
+ KERNEL16x3_2\r
+ prefetcht0 B_PR1+32(BO,BI,8)\r
+ KERNEL16x3_3\r
+ KERNEL16x3_4\r
+\r
+ je .L6_16\r
+\r
+ jmp .L6_12\r
+ ALIGN_4\r
+\r
+.L6_16:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L6_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L6_17:\r
+\r
+ KERNEL16x3_SUB\r
+\r
+ jl .L6_17\r
+ ALIGN_4\r
+\r
+\r
+.L6_19:\r
+\r
+ SAVE16x3\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L6_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L6_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L7_10 // to next 3 lines of N\r
+\r
+ testq $8, M \r
+ jz .L6_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L6_20_1:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $6 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L6_20_6\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L6_20_2:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x3_1\r
+ KERNEL8x3_2\r
+ KERNEL8x3_3\r
+ prefetcht0 B_PR1+64(BO,BI,8)\r
+ KERNEL8x3_4\r
+\r
+ KERNEL8x3_1\r
+ KERNEL8x3_2\r
+ prefetcht0 B_PR1+32(BO,BI,8)\r
+ KERNEL8x3_3\r
+ KERNEL8x3_4\r
+\r
+ je .L6_20_6\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x3_1\r
+ KERNEL8x3_2\r
+ KERNEL8x3_3\r
+ prefetcht0 B_PR1+64(BO,BI,8)\r
+ KERNEL8x3_4\r
+\r
+ KERNEL8x3_1\r
+ KERNEL8x3_2\r
+ prefetcht0 B_PR1+32(BO,BI,8)\r
+ KERNEL8x3_3\r
+ KERNEL8x3_4\r
+\r
+ je .L6_20_6\r
+\r
+ jmp .L6_20_2\r
+ ALIGN_4\r
+\r
+.L6_20_6:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L6_20_9\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L6_20_7:\r
+\r
+ KERNEL8x3_SUB\r
+\r
+ jl .L6_20_7\r
+ ALIGN_4\r
+\r
+\r
+.L6_20_9:\r
+\r
+ SAVE8x3\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L6_21pre:\r
+\r
+ testq $4, M \r
+ jz .L6_30\r
+ ALIGN_4\r
+\r
+.L6_21:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $6 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L6_26\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L6_22:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x3_1\r
+ KERNEL4x3_2\r
+ KERNEL4x3_3\r
+ prefetcht0 B_PR1+64(BO,BI,8)\r
+ KERNEL4x3_4\r
+\r
+ KERNEL4x3_1\r
+ KERNEL4x3_2\r
+ prefetcht0 B_PR1+32(BO,BI,8)\r
+ KERNEL4x3_3\r
+ KERNEL4x3_4\r
+\r
+ je .L6_26\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x3_1\r
+ KERNEL4x3_2\r
+ KERNEL4x3_3\r
+ prefetcht0 B_PR1+64(BO,BI,8)\r
+ KERNEL4x3_4\r
+\r
+ KERNEL4x3_1\r
+ KERNEL4x3_2\r
+ prefetcht0 B_PR1+32(BO,BI,8)\r
+ KERNEL4x3_3\r
+ KERNEL4x3_4\r
+\r
+ je .L6_26\r
+\r
+ jmp .L6_22\r
+ ALIGN_4\r
+\r
+.L6_26:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L6_29\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L6_27:\r
+\r
+ KERNEL4x3_SUB\r
+\r
+ jl .L6_27\r
+ ALIGN_4\r
+\r
+\r
+.L6_29:\r
+\r
+ SAVE4x3\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L6_30:\r
+ testq $2, M \r
+ jz .L6_40\r
+\r
+ ALIGN_4\r
+\r
+.L6_31:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $6 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L6_36\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L6_32:\r
+\r
+ KERNEL2x3_1\r
+ KERNEL2x3_2\r
+ KERNEL2x3_3\r
+ KERNEL2x3_4\r
+\r
+ KERNEL2x3_1\r
+ KERNEL2x3_2\r
+ KERNEL2x3_3\r
+ KERNEL2x3_4\r
+\r
+ je .L6_36\r
+\r
+ KERNEL2x3_1\r
+ KERNEL2x3_2\r
+ KERNEL2x3_3\r
+ KERNEL2x3_4\r
+\r
+ KERNEL2x3_1\r
+ KERNEL2x3_2\r
+ KERNEL2x3_3\r
+ KERNEL2x3_4\r
+\r
+ je .L6_36\r
+\r
+ jmp .L6_32\r
+ ALIGN_4\r
+\r
+.L6_36:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L6_39\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+ \r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L6_37:\r
+\r
+ KERNEL2x3_SUB\r
+\r
+ jl .L6_37\r
+ ALIGN_4\r
+\r
+\r
+.L6_39:\r
+\r
+ SAVE2x3\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L6_40:\r
+ testq $1, M \r
+ jz .L7_10 // to next 3 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L6_41:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $6 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L6_46\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L6_42:\r
+\r
+ KERNEL1x3_1\r
+ KERNEL1x3_2\r
+ KERNEL1x3_3\r
+ KERNEL1x3_4\r
+\r
+ KERNEL1x3_1\r
+ KERNEL1x3_2\r
+ KERNEL1x3_3\r
+ KERNEL1x3_4\r
+\r
+ je .L6_46\r
+\r
+ KERNEL1x3_1\r
+ KERNEL1x3_2\r
+ KERNEL1x3_3\r
+ KERNEL1x3_4\r
+\r
+ KERNEL1x3_1\r
+ KERNEL1x3_2\r
+ KERNEL1x3_3\r
+ KERNEL1x3_4\r
+\r
+ je .L6_46\r
+\r
+ jmp .L6_42\r
+ ALIGN_4\r
+\r
+.L6_46:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L6_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L6_47:\r
+\r
+ KERNEL1x3_SUB\r
+\r
+ jl .L6_47\r
+ ALIGN_4\r
+\r
+\r
+.L6_49:\r
+\r
+ SAVE1x3\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+\r
+\r
+\r
+/***************************************************************************************************************/\r
+\r
+.L7_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C \r
+ leaq (C, LDC, 1), C // c += 3 * ldc\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $32 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L7_20\r
+\r
+ ALIGN_4\r
+\r
+.L7_11:\r
+ leaq BUFFER2, BO // second buffer to BO\r
+ addq $6 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L7_16\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L7_12:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x3_1\r
+ KERNEL16x3_2\r
+ KERNEL16x3_3\r
+ prefetcht0 B_PR1+64(BO,BI,8)\r
+ KERNEL16x3_4\r
+\r
+ KERNEL16x3_1\r
+ KERNEL16x3_2\r
+ prefetcht0 B_PR1+32(BO,BI,8)\r
+ KERNEL16x3_3\r
+ KERNEL16x3_4\r
+\r
+ je .L7_16\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x3_1\r
+ KERNEL16x3_2\r
+ KERNEL16x3_3\r
+ prefetcht0 B_PR1+64(BO,BI,8)\r
+ KERNEL16x3_4\r
+\r
+ KERNEL16x3_1\r
+ KERNEL16x3_2\r
+ prefetcht0 B_PR1+32(BO,BI,8)\r
+ KERNEL16x3_3\r
+ KERNEL16x3_4\r
+\r
+ je .L7_16\r
+\r
+ jmp .L7_12\r
+ ALIGN_4\r
+\r
+.L7_16:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L7_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L7_17:\r
+\r
+ KERNEL16x3_SUB\r
+\r
+ jl .L7_17\r
+ ALIGN_4\r
+\r
+\r
+.L7_19:\r
+\r
+ SAVE16x3\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L7_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L7_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L7_60 // to next 3 lines of N\r
+\r
+ testq $8, M \r
+ jz .L7_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L7_20_1:\r
+ leaq BUFFER2, BO // first buffer to BO\r
+ addq $6 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L7_20_6\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L7_20_2:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x3_1\r
+ KERNEL8x3_2\r
+ KERNEL8x3_3\r
+ prefetcht0 B_PR1+64(BO,BI,8)\r
+ KERNEL8x3_4\r
+\r
+ KERNEL8x3_1\r
+ KERNEL8x3_2\r
+ prefetcht0 B_PR1+32(BO,BI,8)\r
+ KERNEL8x3_3\r
+ KERNEL8x3_4\r
+\r
+ je .L7_20_6\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x3_1\r
+ KERNEL8x3_2\r
+ KERNEL8x3_3\r
+ prefetcht0 B_PR1+64(BO,BI,8)\r
+ KERNEL8x3_4\r
+\r
+ KERNEL8x3_1\r
+ KERNEL8x3_2\r
+ prefetcht0 B_PR1+128(BO,BI,8)\r
+ KERNEL8x3_3\r
+ KERNEL8x3_4\r
+\r
+ je .L7_20_6\r
+\r
+ jmp .L7_20_2\r
+ ALIGN_4\r
+\r
+.L7_20_6:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L7_20_9\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L7_20_7:\r
+\r
+ KERNEL8x3_SUB\r
+\r
+ jl .L7_20_7\r
+ ALIGN_4\r
+\r
+.L7_20_9:\r
+\r
+ SAVE8x3\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L7_21pre:\r
+\r
+ testq $4, M \r
+ jz .L7_30\r
+ ALIGN_4\r
+\r
+.L7_21:\r
+ leaq BUFFER2, BO // second buffer to BO\r
+ addq $6 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L7_26\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L7_22:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x3_1\r
+ KERNEL4x3_2\r
+ KERNEL4x3_3\r
+ prefetcht0 B_PR1+64(BO,BI,8)\r
+ KERNEL4x3_4\r
+\r
+ KERNEL4x3_1\r
+ KERNEL4x3_2\r
+ prefetcht0 B_PR1+32(BO,BI,8)\r
+ KERNEL4x3_3\r
+ KERNEL4x3_4\r
+\r
+ je .L7_26\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x3_1\r
+ KERNEL4x3_2\r
+ KERNEL4x3_3\r
+ prefetcht0 B_PR1+64(BO,BI,8)\r
+ KERNEL4x3_4\r
+\r
+ KERNEL4x3_1\r
+ KERNEL4x3_2\r
+ prefetcht0 B_PR1+32(BO,BI,8)\r
+ KERNEL4x3_3\r
+ KERNEL4x3_4\r
+\r
+ je .L7_26\r
+\r
+ jmp .L7_22\r
+ ALIGN_4\r
+\r
+.L7_26:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L7_29\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L7_27:\r
+\r
+ KERNEL4x3_SUB\r
+\r
+ jl .L7_27\r
+ ALIGN_4\r
+\r
+\r
+.L7_29:\r
+\r
+ SAVE4x3\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L7_30:\r
+ testq $2, M \r
+ jz .L7_40\r
+\r
+ ALIGN_4\r
+\r
+.L7_31:\r
+ leaq BUFFER2, BO // second buffer to BO\r
+ addq $6 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L7_36\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L7_32:\r
+\r
+ KERNEL2x3_1\r
+ KERNEL2x3_2\r
+ KERNEL2x3_3\r
+ KERNEL2x3_4\r
+\r
+ KERNEL2x3_1\r
+ KERNEL2x3_2\r
+ KERNEL2x3_3\r
+ KERNEL2x3_4\r
+\r
+ je .L7_36\r
+\r
+ KERNEL2x3_1\r
+ KERNEL2x3_2\r
+ KERNEL2x3_3\r
+ KERNEL2x3_4\r
+\r
+ KERNEL2x3_1\r
+ KERNEL2x3_2\r
+ KERNEL2x3_3\r
+ KERNEL2x3_4\r
+\r
+ je .L7_36\r
+\r
+ jmp .L7_32\r
+ ALIGN_4\r
+\r
+.L7_36:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L7_39\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+ \r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L7_37:\r
+\r
+ KERNEL2x3_SUB\r
+\r
+ jl .L7_37\r
+ ALIGN_4\r
+\r
+\r
+.L7_39:\r
+\r
+ SAVE2x3\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L7_40:\r
+ testq $1, M \r
+ jz .L7_60 // to next 3 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L7_41:\r
+ leaq BUFFER2, BO // second buffer to BO\r
+ addq $6 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L7_46\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L7_42:\r
+\r
+ KERNEL1x3_1\r
+ KERNEL1x3_2\r
+ KERNEL1x3_3\r
+ KERNEL1x3_4\r
+\r
+ KERNEL1x3_1\r
+ KERNEL1x3_2\r
+ KERNEL1x3_3\r
+ KERNEL1x3_4\r
+\r
+ je .L7_46\r
+\r
+ KERNEL1x3_1\r
+ KERNEL1x3_2\r
+ KERNEL1x3_3\r
+ KERNEL1x3_4\r
+\r
+ KERNEL1x3_1\r
+ KERNEL1x3_2\r
+ KERNEL1x3_3\r
+ KERNEL1x3_4\r
+\r
+ je .L7_46\r
+\r
+ jmp .L7_42\r
+ ALIGN_4\r
+\r
+.L7_46:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L7_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,2), BI // BI = BI * 3 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L7_47:\r
+\r
+ KERNEL1x3_SUB\r
+\r
+ jl .L7_47\r
+ ALIGN_4\r
+\r
+\r
+.L7_49:\r
+\r
+ SAVE1x3\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+\r
+\r
+.L7_60:\r
+\r
+ decq J // j --\r
+ jg .L6_01\r
+\r
+\r
+.L2_0:\r
+ cmpq $0, Nmod6 // N % 6 == 0\r
+ je .L999\r
+\r
+/************************************************************************************************\r
+* Loop for Nmod6 / 2 > 0\r
+*************************************************************************************************/\r
+\r
+ movq Nmod6, J \r
+ sarq $1, J // j = j / 2\r
+ je .L1_0\r
+ ALIGN_4\r
+\r
+.L2_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ sarq $2, %rax // K / 4\r
+ jz .L2_01b\r
+ ALIGN_4\r
+\r
+.L2_01a:\r
+ prefetcht0 512(BO1)\r
+ prefetchw 512(BO)\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups 2*SIZE(BO1), %xmm1\r
+ vmovups 4*SIZE(BO1), %xmm2\r
+ vmovups 6*SIZE(BO1), %xmm3\r
+\r
+ vmovups %xmm0, (BO)\r
+ vmovups %xmm1, 2*SIZE(BO)\r
+ vmovups %xmm2, 4*SIZE(BO)\r
+ vmovups %xmm3, 6*SIZE(BO)\r
+\r
+ addq $8*SIZE,BO1\r
+ addq $8*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_01a\r
+\r
+\r
+.L2_01b:\r
+\r
+ movq K, %rax\r
+ andq $3, %rax // K % 4\r
+ jz .L2_02d\r
+ ALIGN_4\r
+\r
+.L2_02c:\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups %xmm0, (BO)\r
+ addq $2*SIZE,BO1\r
+ addq $2*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_02c\r
+\r
+.L2_02d:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L2_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C // c += 2 * ldc\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $32 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L2_20\r
+\r
+ ALIGN_4\r
+\r
+.L2_11:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_16\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_12:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ je .L2_16\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ je .L2_16\r
+\r
+ jmp .L2_12\r
+ ALIGN_4\r
+\r
+.L2_16:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_17:\r
+\r
+ KERNEL16x2_SUB\r
+\r
+ jl .L2_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_19:\r
+\r
+ SAVE16x2\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L2_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L2_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L2_60 // to next 3 lines of N\r
+\r
+ testq $8, M \r
+ jz .L2_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L2_20_1:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L2_20_6\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_20_2:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ je .L2_20_6\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ je .L2_20_6\r
+\r
+ jmp .L2_20_2\r
+ ALIGN_4\r
+\r
+.L2_20_6:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_20_9\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_20_7:\r
+\r
+ KERNEL8x2_SUB\r
+\r
+ jl .L2_20_7\r
+ ALIGN_4\r
+\r
+\r
+.L2_20_9:\r
+\r
+ SAVE8x2\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L2_21pre:\r
+\r
+ testq $4, M \r
+ jz .L2_30\r
+ ALIGN_4\r
+\r
+.L2_21:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L2_26\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 1 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_22:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ je .L2_26\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ je .L2_26\r
+\r
+ jmp .L2_22\r
+ ALIGN_4\r
+\r
+.L2_26:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_29\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_27:\r
+\r
+ KERNEL4x2_SUB\r
+\r
+ jl .L2_27\r
+ ALIGN_4\r
+\r
+\r
+.L2_29:\r
+\r
+ SAVE4x2\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L2_30:\r
+ testq $2, M \r
+ jz .L2_40\r
+\r
+ ALIGN_4\r
+\r
+.L2_31:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L2_36\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_32:\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ je .L2_36\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ je .L2_36\r
+\r
+ jmp .L2_32\r
+ ALIGN_4\r
+\r
+.L2_36:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_39\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ \r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_37:\r
+\r
+ KERNEL2x2_SUB\r
+\r
+ jl .L2_37\r
+ ALIGN_4\r
+\r
+\r
+.L2_39:\r
+\r
+ SAVE2x2\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L2_40:\r
+ testq $1, M \r
+ jz .L2_60 // to next 2 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L2_41:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L2_46\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_42:\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ je .L2_46\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ je .L2_46\r
+\r
+ jmp .L2_42\r
+ ALIGN_4\r
+\r
+.L2_46:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_47:\r
+\r
+ KERNEL1x2_SUB\r
+\r
+ jl .L2_47\r
+ ALIGN_4\r
+\r
+\r
+.L2_49:\r
+\r
+ SAVE1x2\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+.L2_60:\r
+\r
+ decq J // j --\r
+ jg .L2_01 // next 2 lines of N\r
+\r
+\r
+\r
+.L1_0:\r
+\r
+/************************************************************************************************\r
+* Loop for Nmod6 % 2 > 0\r
+*************************************************************************************************/\r
+\r
+ movq Nmod6, J \r
+ andq $1, J // j % 2\r
+ je .L999\r
+ ALIGN_4\r
+\r
+.L1_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ ALIGN_4\r
+\r
+.L1_02b:\r
+\r
+ vmovsd (BO1), %xmm0\r
+ vmovsd %xmm0, (BO)\r
+ addq $1*SIZE,BO1\r
+ addq $1*SIZE,BO\r
+ decq %rax\r
+ jnz .L1_02b\r
+\r
+.L1_02c:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L1_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 1), C // c += 1 * ldc\r
+\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $32 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L1_20\r
+\r
+ ALIGN_4\r
+\r
+.L1_11:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_16\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_12:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ je .L1_16\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ je .L1_16\r
+\r
+ jmp .L1_12\r
+ ALIGN_4\r
+\r
+.L1_16:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_19\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_17:\r
+\r
+ KERNEL16x1_SUB\r
+\r
+ jl .L1_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_19:\r
+\r
+ SAVE16x1\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L1_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L1_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L999\r
+\r
+ testq $8, M \r
+ jz .L1_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L1_20_1:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L1_20_6\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_20_2:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ je .L1_20_6\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ je .L1_20_6\r
+\r
+ jmp .L1_20_2\r
+ ALIGN_4\r
+\r
+.L1_20_6:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_20_9\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_20_7:\r
+\r
+ KERNEL8x1_SUB\r
+\r
+ jl .L1_20_7\r
+ ALIGN_4\r
+\r
+\r
+.L1_20_9:\r
+\r
+ SAVE8x1\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L1_21pre:\r
+\r
+ testq $4, M \r
+ jz .L1_30\r
+ ALIGN_4\r
+\r
+.L1_21:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L1_26\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_22:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ je .L1_26\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ je .L1_26\r
+\r
+ jmp .L1_22\r
+ ALIGN_4\r
+\r
+.L1_26:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_29\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_27:\r
+\r
+ KERNEL4x1_SUB\r
+\r
+ jl .L1_27\r
+ ALIGN_4\r
+\r
+\r
+.L1_29:\r
+\r
+ SAVE4x1\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L1_30:\r
+ testq $2, M \r
+ jz .L1_40\r
+\r
+ ALIGN_4\r
+\r
+.L1_31:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L1_36\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_32:\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ je .L1_36\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ je .L1_36\r
+\r
+ jmp .L1_32\r
+ ALIGN_4\r
+\r
+.L1_36:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_39\r
+\r
+ movq %rax, BI // Index for BO\r
+ \r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_37:\r
+\r
+ KERNEL2x1_SUB\r
+\r
+ jl .L1_37\r
+ ALIGN_4\r
+\r
+\r
+.L1_39:\r
+\r
+ SAVE2x1\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L1_40:\r
+ testq $1, M \r
+ jz .L999\r
+\r
+ ALIGN_4\r
+\r
+.L1_41:\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+\r
+ vzeroall\r
+\r
+ movq K, %rax\r
+\r
+ andq $-8, %rax\r
+ je .L1_46\r
+ movq %rax, BI // Index for BO\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_42:\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ je .L1_46\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ je .L1_46\r
+\r
+ jmp .L1_42\r
+ ALIGN_4\r
+\r
+.L1_46:\r
+ movq K, %rax\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_49\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_47:\r
+\r
+ KERNEL1x1_SUB\r
+\r
+ jl .L1_47\r
+ ALIGN_4\r
+\r
+\r
+.L1_49:\r
+\r
+ SAVE1x1\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+\r
+.L999:\r
+ movq SP, %rsp\r
+ movq (%rsp), %rbx\r
+ movq 8(%rsp), %rbp\r
+ movq 16(%rsp), %r12\r
+ movq 24(%rsp), %r13\r
+ movq 32(%rsp), %r14\r
+ movq 40(%rsp), %r15\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq 48(%rsp), %rdi\r
+ movq 56(%rsp), %rsi\r
+ movups 64(%rsp), %xmm6\r
+ movups 80(%rsp), %xmm7\r
+ movups 96(%rsp), %xmm8\r
+ movups 112(%rsp), %xmm9\r
+ movups 128(%rsp), %xmm10\r
+ movups 144(%rsp), %xmm11\r
+ movups 160(%rsp), %xmm12\r
+ movups 176(%rsp), %xmm13\r
+ movups 192(%rsp), %xmm14\r
+ movups 208(%rsp), %xmm15\r
+#endif\r
+\r
+ addq $STACKSIZE, %rsp\r
+ ret\r
+\r
+ EPILOGUE\r
+\r
+\r
+#else\r
+/*************************************************************************************\r
+* TRMM Kernel\r
+*************************************************************************************/\r
+\r
+\r
+ PROLOGUE\r
+ PROFCODE\r
+ \r
+ subq $STACKSIZE, %rsp\r
+ movq %rbx, (%rsp)\r
+ movq %rbp, 8(%rsp)\r
+ movq %r12, 16(%rsp)\r
+ movq %r13, 24(%rsp)\r
+ movq %r14, 32(%rsp)\r
+ movq %r15, 40(%rsp)\r
+\r
+ vzeroupper\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq %rdi, 48(%rsp)\r
+ movq %rsi, 56(%rsp)\r
+ movups %xmm6, 64(%rsp)\r
+ movups %xmm7, 80(%rsp)\r
+ movups %xmm8, 96(%rsp)\r
+ movups %xmm9, 112(%rsp)\r
+ movups %xmm10, 128(%rsp)\r
+ movups %xmm11, 144(%rsp)\r
+ movups %xmm12, 160(%rsp)\r
+ movups %xmm13, 176(%rsp)\r
+ movups %xmm14, 192(%rsp)\r
+ movups %xmm15, 208(%rsp)\r
+\r
+ movq ARG1, OLD_M\r
+ movq ARG2, OLD_N\r
+ movq ARG3, OLD_K\r
+ movq OLD_A, A\r
+ movq OLD_B, B\r
+ movq OLD_C, C\r
+ movq OLD_LDC, LDC\r
+#ifdef TRMMKERNEL\r
+ movsd OLD_OFFSET, %xmm12\r
+#endif\r
+ vmovaps %xmm3, %xmm0\r
+\r
+#else\r
+ movq STACKSIZE + 8(%rsp), LDC\r
+#ifdef TRMMKERNEL\r
+ movsd STACKSIZE + 16(%rsp), %xmm12\r
+#endif\r
+\r
+#endif\r
+\r
+ movq %rsp, SP # save old stack\r
+ subq $128 + L_BUFFER_SIZE, %rsp\r
+ andq $-4096, %rsp # align stack\r
+\r
+ STACK_TOUCH\r
+\r
+ cmpq $0, OLD_M\r
+ je .L999\r
+\r
+ cmpq $0, OLD_N\r
+ je .L999\r
+\r
+ cmpq $0, OLD_K\r
+ je .L999\r
+\r
+ movq OLD_M, M\r
+ movq OLD_N, N\r
+ movq OLD_K, K\r
+\r
+ vmovsd %xmm0, ALPHA\r
+\r
+ salq $BASE_SHIFT, LDC\r
+\r
+ movq N, %rax\r
+ xorq %rdx, %rdx\r
+ movq $2, %rdi\r
+ divq %rdi // N / 6\r
+ movq %rax, Ndiv6 // N / 6\r
+ movq %rdx, Nmod6 // N % 6\r
+\r
+ \r
+\r
+#ifdef TRMMKERNEL\r
+ vmovsd %xmm12, OFFSET\r
+ vmovsd %xmm12, KK\r
+#ifndef LEFT\r
+ negq KK\r
+#endif \r
+#endif\r
+\r
+ movq Ndiv6, J\r
+ cmpq $0, J\r
+ je .L1_0\r
+ ALIGN_4\r
+\r
+.L2_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ sarq $2, %rax // K / 4\r
+ jz .L2_01b\r
+ ALIGN_4\r
+\r
+.L2_01a:\r
+ prefetcht0 512(BO1)\r
+ prefetchw 512(BO)\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups 2*SIZE(BO1), %xmm1\r
+ vmovups 4*SIZE(BO1), %xmm2\r
+ vmovups 6*SIZE(BO1), %xmm3\r
+\r
+ vmovups %xmm0, (BO)\r
+ vmovups %xmm1, 2*SIZE(BO)\r
+ vmovups %xmm2, 4*SIZE(BO)\r
+ vmovups %xmm3, 6*SIZE(BO)\r
+\r
+ addq $8*SIZE,BO1\r
+ addq $8*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_01a\r
+\r
+\r
+.L2_01b:\r
+\r
+ movq K, %rax\r
+ andq $3, %rax // K % 4\r
+ jz .L2_02d\r
+ ALIGN_4\r
+\r
+.L2_02c:\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups %xmm0, (BO)\r
+ addq $2*SIZE,BO1\r
+ addq $2*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_02c\r
+\r
+.L2_02d:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L2_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C // c += 2 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $32 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L2_20\r
+\r
+ ALIGN_4\r
+\r
+.L2_11:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $16, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_16\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_12:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ je .L2_16\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x2_1\r
+ KERNEL16x2_2\r
+ KERNEL16x2_3\r
+ KERNEL16x2_4\r
+\r
+ je .L2_16\r
+\r
+ jmp .L2_12\r
+ ALIGN_4\r
+\r
+.L2_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_17:\r
+\r
+ KERNEL16x2_SUB\r
+\r
+ jl .L2_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_19:\r
+\r
+ SAVE16x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $16, KK \r
+#endif\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L2_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L2_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L2_60 // to next 3 lines of N\r
+\r
+ testq $8, M \r
+ jz .L2_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L2_20_1:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $8, %rax // number of values in A\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L2_20_6\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_20_2:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ je .L2_20_6\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x2_1\r
+ KERNEL8x2_2\r
+ KERNEL8x2_3\r
+ KERNEL8x2_4\r
+\r
+ je .L2_20_6\r
+\r
+ jmp .L2_20_2\r
+ ALIGN_4\r
+\r
+.L2_20_6:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_20_9\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_20_7:\r
+\r
+ KERNEL8x2_SUB\r
+\r
+ jl .L2_20_7\r
+ ALIGN_4\r
+\r
+\r
+.L2_20_9:\r
+\r
+ SAVE8x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $8, KK\r
+#endif\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L2_21pre:\r
+\r
+ testq $4, M \r
+ jz .L2_30\r
+ ALIGN_4\r
+\r
+.L2_21:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in A\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L2_26\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 1 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_22:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ je .L2_26\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x2_1\r
+ KERNEL4x2_2\r
+ KERNEL4x2_3\r
+ KERNEL4x2_4\r
+\r
+ je .L2_26\r
+\r
+ jmp .L2_22\r
+ ALIGN_4\r
+\r
+.L2_26:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_29\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_27:\r
+\r
+ KERNEL4x2_SUB\r
+\r
+ jl .L2_27\r
+ ALIGN_4\r
+\r
+\r
+.L2_29:\r
+\r
+ SAVE4x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L2_30:\r
+ testq $2, M \r
+ jz .L2_40\r
+\r
+ ALIGN_4\r
+\r
+.L2_31:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L2_36\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_32:\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ je .L2_36\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ KERNEL2x2_1\r
+ KERNEL2x2_2\r
+ KERNEL2x2_3\r
+ KERNEL2x2_4\r
+\r
+ je .L2_36\r
+\r
+ jmp .L2_32\r
+ ALIGN_4\r
+\r
+.L2_36:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_39\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ \r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_37:\r
+\r
+ KERNEL2x2_SUB\r
+\r
+ jl .L2_37\r
+ ALIGN_4\r
+\r
+\r
+.L2_39:\r
+\r
+ SAVE2x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L2_40:\r
+ testq $1, M \r
+ jz .L2_60 // to next 2 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L2_41:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax\r
+ je .L2_46\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_42:\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ je .L2_46\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ KERNEL1x2_1\r
+ KERNEL1x2_2\r
+ KERNEL1x2_3\r
+ KERNEL1x2_4\r
+\r
+ je .L2_46\r
+\r
+ jmp .L2_42\r
+ ALIGN_4\r
+\r
+.L2_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_47:\r
+\r
+ KERNEL1x2_SUB\r
+\r
+ jl .L2_47\r
+ ALIGN_4\r
+\r
+\r
+.L2_49:\r
+\r
+ SAVE1x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BI,BI,1), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO \r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK\r
+#endif\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+\r
+\r
+\r
+ \r
+.L2_60:\r
+#if defined(TRMMKERNEL) && !defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ decq J // j --\r
+ jg .L2_01 // next 2 lines of N\r
+\r
+\r
+\r
+.L1_0:\r
+\r
+/************************************************************************************************\r
+* Loop for Nmod6 % 2 > 0\r
+*************************************************************************************************/\r
+\r
+ movq Nmod6, J \r
+ andq $1, J // j % 2\r
+ je .L999\r
+ ALIGN_4\r
+\r
+.L1_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ ALIGN_4\r
+\r
+.L1_02b:\r
+\r
+ vmovsd (BO1), %xmm0\r
+ vmovsd %xmm0, (BO)\r
+ addq $1*SIZE,BO1\r
+ addq $1*SIZE,BO\r
+ decq %rax\r
+ jnz .L1_02b\r
+\r
+.L1_02c:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L1_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 1), C // c += 1 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $32 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $4, I // i = (m >> 4)\r
+ je .L1_20\r
+\r
+ ALIGN_4\r
+\r
+.L1_11:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $16, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_16\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_12:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ je .L1_16\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ KERNEL16x1_1\r
+ KERNEL16x1_2\r
+ KERNEL16x1_3\r
+ KERNEL16x1_4\r
+\r
+ je .L1_16\r
+\r
+ jmp .L1_12\r
+ ALIGN_4\r
+\r
+.L1_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_19\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_17:\r
+\r
+ KERNEL16x1_SUB\r
+\r
+ jl .L1_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_19:\r
+\r
+ SAVE16x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $4, %rax // rax = rax * 16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $16, KK \r
+#endif\r
+\r
+ addq $16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L1_11\r
+ ALIGN_4 \r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L1_20:\r
+ // Test rest of M\r
+\r
+ testq $15, M\r
+ jz .L999\r
+\r
+ testq $8, M \r
+ jz .L1_21pre\r
+ ALIGN_4\r
+\r
+/**************************************************************************/\r
+\r
+.L1_20_1:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $8, %rax // number of values in A\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L1_20_6\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_20_2:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ je .L1_20_6\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ KERNEL8x1_1\r
+ KERNEL8x1_2\r
+ KERNEL8x1_3\r
+ KERNEL8x1_4\r
+\r
+ je .L1_20_6\r
+\r
+ jmp .L1_20_2\r
+ ALIGN_4\r
+\r
+.L1_20_6:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_20_9\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_20_7:\r
+\r
+ KERNEL8x1_SUB\r
+\r
+ jl .L1_20_7\r
+ ALIGN_4\r
+\r
+\r
+.L1_20_9:\r
+\r
+ SAVE8x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $8, KK\r
+#endif\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4\r
+ \r
+\r
+\r
+/**************************************************************************/\r
+\r
+.L1_21pre:\r
+\r
+ testq $4, M \r
+ jz .L1_30\r
+ ALIGN_4\r
+\r
+.L1_21:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in A\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L1_26\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_22:\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ je .L1_26\r
+\r
+ prefetcht0 B_PR1(BO,BI,8)\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ KERNEL4x1_1\r
+ KERNEL4x1_2\r
+ KERNEL4x1_3\r
+ KERNEL4x1_4\r
+\r
+ je .L1_26\r
+\r
+ jmp .L1_22\r
+ ALIGN_4\r
+\r
+.L1_26:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_29\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_27:\r
+\r
+ KERNEL4x1_SUB\r
+\r
+ jl .L1_27\r
+ ALIGN_4\r
+\r
+\r
+.L1_29:\r
+\r
+ SAVE4x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4\r
+ \r
+\r
+.L1_30:\r
+ testq $2, M \r
+ jz .L1_40\r
+\r
+ ALIGN_4\r
+\r
+.L1_31:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax\r
+ je .L1_36\r
+ movq %rax, BI // Index for BO\r
+\r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_32:\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ je .L1_36\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ KERNEL2x1_1\r
+ KERNEL2x1_2\r
+ KERNEL2x1_3\r
+ KERNEL2x1_4\r
+\r
+ je .L1_36\r
+\r
+ jmp .L1_32\r
+ ALIGN_4\r
+\r
+.L1_36:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_39\r
+\r
+ movq %rax, BI // Index for BO\r
+ \r
+ salq $1, %rax // rax = rax *2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_37:\r
+\r
+ KERNEL2x1_SUB\r
+\r
+ jl .L1_37\r
+ ALIGN_4\r
+\r
+\r
+.L1_39:\r
+\r
+ SAVE2x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO \r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4\r
+\r
+.L1_40:\r
+ testq $1, M \r
+ jz .L999\r
+\r
+ ALIGN_4\r
+\r
+.L1_41:\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $2 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+ andq $-8, %rax\r
+ je .L1_46\r
+ movq %rax, BI // Index for BO\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_42:\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ je .L1_46\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ KERNEL1x1_1\r
+ KERNEL1x1_2\r
+ KERNEL1x1_3\r
+ KERNEL1x1_4\r
+\r
+ je .L1_46\r
+\r
+ jmp .L1_42\r
+ ALIGN_4\r
+\r
+.L1_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_49\r
+\r
+ movq %rax, BI // Index for BO\r
+\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_47:\r
+\r
+ KERNEL1x1_SUB\r
+\r
+ jl .L1_47\r
+ ALIGN_4\r
+\r
+\r
+.L1_49:\r
+\r
+ SAVE1x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax \r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq (BO, BI, SIZE), BO \r
+ leaq (AO, %rax, SIZE), AO\r
+#endif \r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK\r
+#endif\r
+\r
+ addq $1 * SIZE, CO1 # coffset += 1\r
+ ALIGN_4\r
+ \r
+\r
+.L999:\r
+ movq SP, %rsp\r
+ movq (%rsp), %rbx\r
+ movq 8(%rsp), %rbp\r
+ movq 16(%rsp), %r12\r
+ movq 24(%rsp), %r13\r
+ movq 32(%rsp), %r14\r
+ movq 40(%rsp), %r15\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq 48(%rsp), %rdi\r
+ movq 56(%rsp), %rsi\r
+ movups 64(%rsp), %xmm6\r
+ movups 80(%rsp), %xmm7\r
+ movups 96(%rsp), %xmm8\r
+ movups 112(%rsp), %xmm9\r
+ movups 128(%rsp), %xmm10\r
+ movups 144(%rsp), %xmm11\r
+ movups 160(%rsp), %xmm12\r
+ movups 176(%rsp), %xmm13\r
+ movups 192(%rsp), %xmm14\r
+ movups 208(%rsp), %xmm15\r
+#endif\r
+\r
+ addq $STACKSIZE, %rsp\r
+ ret\r
+\r
+ EPILOGUE\r
+\r
+\r
+\r
+\r
+\r
+#endif\r