--- /dev/null
+/*********************************************************************/\r
+/* Copyright 2009, 2010 The University of Texas at Austin. */\r
+/* All rights reserved. */\r
+/* */\r
+/* Redistribution and use in source and binary forms, with or */\r
+/* without modification, are permitted provided that the following */\r
+/* conditions are met: */\r
+/* */\r
+/* 1. Redistributions of source code must retain the above */\r
+/* copyright notice, this list of conditions and the following */\r
+/* disclaimer. */\r
+/* */\r
+/* 2. Redistributions in binary form must reproduce the above */\r
+/* copyright notice, this list of conditions and the following */\r
+/* disclaimer in the documentation and/or other materials */\r
+/* provided with the distribution. */\r
+/* */\r
+/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */\r
+/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */\r
+/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */\r
+/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */\r
+/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */\r
+/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */\r
+/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */\r
+/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */\r
+/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */\r
+/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */\r
+/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */\r
+/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */\r
+/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\r
+/* POSSIBILITY OF SUCH DAMAGE. */\r
+/* */\r
+/* The views and conclusions contained in the software and */\r
+/* documentation are those of the authors and should not be */\r
+/* interpreted as representing official policies, either expressed */\r
+/* or implied, of The University of Texas at Austin. */\r
+/*********************************************************************/\r
+\r
+\r
+#define ASSEMBLER\r
+#include "common.h"\r
+ \r
+#define OLD_M %rdi\r
+#define OLD_N %rsi\r
+#define M %r13\r
+#define J %r14\r
+#define OLD_K %rdx\r
+\r
+#define A %rcx\r
+#define B %r8\r
+#define C %r9\r
+#define LDC %r10\r
+ \r
+#define I %r11\r
+#define AO %rdi\r
+#define BO %rsi\r
+#define CO1 %r15\r
+#define K %r12\r
+#define BI %rbp\r
+#define SP %rbx\r
+\r
+#define BO1 %rdi\r
+#define BO2 %r15\r
+\r
+#ifndef WINDOWS_ABI\r
+\r
+#define STACKSIZE 96\r
+\r
+#else\r
+\r
+#define STACKSIZE 320\r
+\r
+#define OLD_ALPHA_I 40 + STACKSIZE(%rsp)\r
+#define OLD_A 48 + STACKSIZE(%rsp)\r
+#define OLD_B 56 + STACKSIZE(%rsp)\r
+#define OLD_C 64 + STACKSIZE(%rsp)\r
+#define OLD_LDC 72 + STACKSIZE(%rsp)\r
+#define OLD_OFFSET 80 + STACKSIZE(%rsp)\r
+\r
+#endif\r
+\r
+#define L_BUFFER_SIZE 512*8*4\r
+#define LB2_OFFSET 512*8*2\r
+\r
+#define Ndiv6 24(%rsp)\r
+#define Nmod6 32(%rsp)\r
+#define N 40(%rsp)\r
+#define ALPHA_R 48(%rsp)\r
+#define ALPHA_I 56(%rsp)\r
+#define OFFSET 64(%rsp)\r
+#define KK 72(%rsp)\r
+#define KKK 80(%rsp)\r
+#define BUFFER1 128(%rsp)\r
+#define BUFFER2 LB2_OFFSET+128(%rsp)\r
+\r
+#if defined(OS_WINDOWS)\r
+#if L_BUFFER_SIZE > 16384\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 4(%rsp);\\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 12288\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 3(%rsp);\\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 8192\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 2(%rsp);\\r
+ movl $0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 4096\r
+#define STACK_TOUCH \\r
+ movl $0, 4096 * 1(%rsp);\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+\r
+#if defined(BULLDOZER)\r
+\r
+.macro VFMADD231PD_ y0,y1,y2\r
+ vfmaddpd \y0,\y1,\y2,\y0\r
+.endm\r
+\r
+.macro VFMADD231SD_ x0,x1,x2\r
+ vfmaddsd \x0,\x1,\x2,\x0\r
+.endm\r
+\r
+#else\r
+\r
+.macro VFMADD231PD_ y0,y1,y2\r
+ vfmadd231pd \y0,\y1,\y2\r
+.endm\r
+\r
+.macro VFMADD231SD_ x0,x1,x2\r
+ vfmadd231sd \x0,\x1,\x2\r
+.endm\r
+\r
+#endif\r
+\r
+#if defined(BULLDOZER)\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT)\r
+\r
+.macro VFMADDPD_R y0,y1,y2\r
+ vfmaddpd \y0,\y1,\y2,\y0\r
+.endm\r
+\r
+.macro VFMADDPD_I y0,y1,y2\r
+ vfmaddpd \y0,\y1,\y2,\y0\r
+.endm\r
+\r
+#define VFMADD_R vfmaddpd\r
+#define VFMADD_I vfmaddpd\r
+\r
+#elif defined(RN) || defined(RT) || defined(CN) || defined(CT)\r
+\r
+.macro VFMADDPD_R y0,y1,y2\r
+ vfnmaddpd \y0,\y1,\y2,\y0\r
+.endm\r
+\r
+.macro VFMADDPD_I y0,y1,y2\r
+ vfmaddpd \y0,\y1,\y2,\y0\r
+.endm\r
+\r
+#define VFMADD_R vfnmaddpd\r
+#define VFMADD_I vfmaddpd\r
+\r
+#elif defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+.macro VFMADDPD_R y0,y1,y2\r
+ vfmaddpd \y0,\y1,\y2,\y0\r
+.endm\r
+\r
+.macro VFMADDPD_I y0,y1,y2\r
+ vfnmaddpd \y0,\y1,\y2,\y0\r
+.endm\r
+\r
+#define VFMADD_R vfmaddpd\r
+#define VFMADD_I vfnmaddpd\r
+\r
+#else\r
+\r
+.macro VFMADDPD_R y0,y1,y2\r
+ vfnmaddpd \y0,\y1,\y2,\y0\r
+.endm\r
+\r
+.macro VFMADDPD_I y0,y1,y2\r
+ vfnmaddpd \y0,\y1,\y2,\y0\r
+.endm\r
+\r
+#define VFMADD_R vfnmaddpd\r
+#define VFMADD_I vfnmaddpd\r
+\r
+#endif\r
+\r
+#else\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT)\r
+\r
+.macro VFMADDPD_R y0,y1,y2\r
+ vfmadd231pd \y0,\y1,\y2\r
+.endm\r
+\r
+.macro VFMADDPD_I y0,y1,y2\r
+ vfmadd231pd \y0,\y1,\y2\r
+.endm\r
+\r
+#define VFMADD_R vfmadd231pd\r
+#define VFMADD_I vfmadd231pd\r
+\r
+#elif defined(RN) || defined(RT) || defined(CN) || defined(CT)\r
+\r
+.macro VFMADDPD_R y0,y1,y2\r
+ vfnmadd231pd \y0,\y1,\y2\r
+.endm\r
+\r
+.macro VFMADDPD_I y0,y1,y2\r
+ vfmadd231pd \y0,\y1,\y2\r
+.endm\r
+\r
+#define VFMADD_R vfnmadd231pd\r
+#define VFMADD_I vfmadd231pd\r
+\r
+#elif defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+.macro VFMADDPD_R y0,y1,y2\r
+ vfmadd231pd \y0,\y1,\y2\r
+.endm\r
+\r
+.macro VFMADDPD_I y0,y1,y2\r
+ vfnmadd231pd \y0,\y1,\y2\r
+.endm\r
+\r
+#define VFMADD_R vfmadd231pd\r
+#define VFMADD_I vfnmadd231pd\r
+\r
+#else\r
+\r
+.macro VFMADDPD_R y0,y1,y2\r
+ vfnmadd231pd \y0,\y1,\y2\r
+.endm\r
+\r
+.macro VFMADDPD_I y0,y1,y2\r
+ vfnmadd231pd \y0,\y1,\y2\r
+.endm\r
+\r
+#define VFMADD_R vfnmadd231pd\r
+#define VFMADD_I vfnmadd231pd\r
+\r
+#endif\r
+\r
+#endif\r
+\r
+#define A_PR1 384\r
+#define B_PR1 192\r
+/***************************************************************************************************/\r
+\r
+.macro KERNEL4x2_SUB\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm1\r
+\r
+ vbroadcastsd -8 * SIZE(BO, BI, SIZE), %ymm4\r
+ vbroadcastsd -7 * SIZE(BO, BI, SIZE), %ymm5\r
+ VFMADDPD_R %ymm8 ,%ymm4,%ymm0\r
+ VFMADDPD_R %ymm12,%ymm4,%ymm1\r
+ vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm6\r
+ VFMADDPD_I %ymm9 ,%ymm5,%ymm0\r
+ VFMADDPD_I %ymm13,%ymm5,%ymm1\r
+ vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm7\r
+ VFMADDPD_R %ymm10,%ymm6,%ymm0\r
+ VFMADDPD_R %ymm14,%ymm6,%ymm1\r
+ VFMADDPD_I %ymm11,%ymm7,%ymm0\r
+ VFMADDPD_I %ymm15,%ymm7,%ymm1\r
+\r
+ addq $4, BI \r
+ addq $8, %rax \r
+.endm\r
+\r
+.macro SAVE4x2\r
+\r
+ vbroadcastsd ALPHA_R, %ymm0\r
+ vbroadcastsd ALPHA_I, %ymm1\r
+\r
+ // swap high and low 8 bytes\r
+ vshufpd $0x05, %ymm9 , %ymm9, %ymm9\r
+ vshufpd $0x05, %ymm11, %ymm11, %ymm11\r
+ vshufpd $0x05, %ymm13, %ymm13, %ymm13\r
+ vshufpd $0x05, %ymm15, %ymm15, %ymm15\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubpd %ymm9, %ymm8 , %ymm8\r
+ vaddsubpd %ymm11,%ymm10, %ymm10\r
+ vaddsubpd %ymm13,%ymm12, %ymm12\r
+ vaddsubpd %ymm15,%ymm14, %ymm14\r
+\r
+ vshufpd $0x05, %ymm8 , %ymm8, %ymm9\r
+ vshufpd $0x05, %ymm10, %ymm10, %ymm11\r
+ vshufpd $0x05, %ymm12, %ymm12, %ymm13\r
+ vshufpd $0x05, %ymm14, %ymm14, %ymm15\r
+\r
+#else\r
+ vaddsubpd %ymm8, %ymm9 ,%ymm9\r
+ vaddsubpd %ymm10, %ymm11,%ymm11\r
+ vaddsubpd %ymm12, %ymm13,%ymm13\r
+ vaddsubpd %ymm14, %ymm15,%ymm15\r
+\r
+ vmovapd %ymm9, %ymm8\r
+ vmovapd %ymm11, %ymm10\r
+ vmovapd %ymm13, %ymm12\r
+ vmovapd %ymm15, %ymm14\r
+\r
+ // swap high and low 8 bytes\r
+ vshufpd $0x05, %ymm9 , %ymm9, %ymm9\r
+ vshufpd $0x05, %ymm11, %ymm11, %ymm11\r
+ vshufpd $0x05, %ymm13, %ymm13, %ymm13\r
+ vshufpd $0x05, %ymm15, %ymm15, %ymm15\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulpd %ymm8 , %ymm0, %ymm8\r
+ vmulpd %ymm10, %ymm0, %ymm10\r
+ vmulpd %ymm12, %ymm0, %ymm12\r
+ vmulpd %ymm14, %ymm0, %ymm14\r
+\r
+ // multiply with ALPHA_I\r
+ vmulpd %ymm9 , %ymm1, %ymm9\r
+ vmulpd %ymm11, %ymm1, %ymm11\r
+ vmulpd %ymm13, %ymm1, %ymm13\r
+ vmulpd %ymm15, %ymm1, %ymm15\r
+\r
+ vaddsubpd %ymm9, %ymm8 , %ymm8\r
+ vaddsubpd %ymm11,%ymm10, %ymm10\r
+ vaddsubpd %ymm13,%ymm12, %ymm12\r
+ vaddsubpd %ymm15,%ymm14, %ymm14\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddpd (CO1), %ymm8 , %ymm8\r
+ vaddpd 4 * SIZE(CO1), %ymm12, %ymm12\r
+\r
+ vaddpd (CO1, LDC), %ymm10, %ymm10\r
+ vaddpd 4 * SIZE(CO1, LDC), %ymm14, %ymm14\r
+\r
+#endif\r
+\r
+ vmovups %ymm8 , (CO1)\r
+ vmovups %ymm12 , 4 * SIZE(CO1)\r
+\r
+ vmovups %ymm10 , (CO1, LDC)\r
+ vmovups %ymm14 , 4 * SIZE(CO1, LDC)\r
+\r
+\r
+\r
+.endm\r
+\r
+/***************************************************************************************************/\r
+#define KERNEL2x2_1(xx) \\r
+ prefetcht0 A_PR1(AO,%rax,SIZE) ;\\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup -8 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovups -6 * SIZE(AO, %rax, SIZE), %xmm1 ;\\r
+ VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\\r
+ vmovddup -7 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\\r
+ vmovddup -6 * SIZE(BO, BI, SIZE), %xmm6 ;\\r
+ VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\\r
+ VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\\r
+ vmovddup -5 * SIZE(BO, BI, SIZE), %xmm7 ;\\r
+ VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\\r
+ VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\\r
+\r
+#define KERNEL2x2_2(xx) \\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovups -2 * SIZE(AO, %rax, SIZE), %xmm1 ;\\r
+ VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\\r
+ vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\\r
+ vmovddup -2 * SIZE(BO, BI, SIZE), %xmm6 ;\\r
+ VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\\r
+ VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\\r
+ vmovddup -1 * SIZE(BO, BI, SIZE), %xmm7 ;\\r
+ VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\\r
+ VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\\r
+\r
+#define KERNEL2x2_3(xx) \\r
+ prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\\r
+ vmovups 0 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup 0 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovups 2 * SIZE(AO, %rax, SIZE), %xmm1 ;\\r
+ VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\\r
+ vmovddup 1 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\\r
+ vmovddup 2 * SIZE(BO, BI, SIZE), %xmm6 ;\\r
+ VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\\r
+ VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\\r
+ vmovddup 3 * SIZE(BO, BI, SIZE), %xmm7 ;\\r
+ VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\\r
+ VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\\r
+\r
+#define KERNEL2x2_4(xx) \\r
+ vmovups 4 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup 4 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovups 6 * SIZE(AO, %rax, SIZE), %xmm1 ;\\r
+ VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\\r
+ vmovddup 5 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\\r
+ vmovddup 6 * SIZE(BO, BI, SIZE), %xmm6 ;\\r
+ VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\\r
+ VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\\r
+ vmovddup 7 * SIZE(BO, BI, SIZE), %xmm7 ;\\r
+ VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\\r
+ VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\\r
+ addq $16, BI ;\\r
+ addq $16, %rax ;\\r
+\r
+\r
+#define KERNEL2x2_SUB(xx) \\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup -8 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovups -6 * SIZE(AO, %rax, SIZE), %xmm1 ;\\r
+ VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\\r
+ vmovddup -7 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\\r
+ vmovddup -6 * SIZE(BO, BI, SIZE), %xmm6 ;\\r
+ VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\\r
+ VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\\r
+ vmovddup -5 * SIZE(BO, BI, SIZE), %xmm7 ;\\r
+ VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\\r
+ VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\\r
+ addq $4, BI ;\\r
+ addq $4, %rax ;\\r
+\r
+/************************************************************************************************/\r
+\r
+/************************************************************************************************/\r
+\r
+#define KERNEL1x2_1(xx) \\r
+ prefetcht0 A_PR1(AO,%rax,SIZE) ;\\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup -8 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovddup -7 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ vmovddup -6 * SIZE(BO, BI, SIZE), %xmm6 ;\\r
+ VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\\r
+ vmovddup -5 * SIZE(BO, BI, SIZE), %xmm7 ;\\r
+ VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\\r
+\r
+#define KERNEL1x2_2(xx) \\r
+ vmovups -6 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\\r
+ vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ vmovddup -2 * SIZE(BO, BI, SIZE), %xmm6 ;\\r
+ VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\\r
+ vmovddup -1 * SIZE(BO, BI, SIZE), %xmm7 ;\\r
+ VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\\r
+\r
+#define KERNEL1x2_3(xx) \\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup 0 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovddup 1 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ vmovddup 2 * SIZE(BO, BI, SIZE), %xmm6 ;\\r
+ VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\\r
+ vmovddup 3 * SIZE(BO, BI, SIZE), %xmm7 ;\\r
+ VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\\r
+\r
+#define KERNEL1x2_4(xx) \\r
+ vmovups -2 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup 4 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovddup 5 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ vmovddup 6 * SIZE(BO, BI, SIZE), %xmm6 ;\\r
+ VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\\r
+ vmovddup 7 * SIZE(BO, BI, SIZE), %xmm7 ;\\r
+ VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\\r
+ addq $16, BI ;\\r
+ addq $8 , %rax ;\\r
+\r
+\r
+#define KERNEL1x2_SUB(xx) \\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup -8 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovddup -7 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ vmovddup -6 * SIZE(BO, BI, SIZE), %xmm6 ;\\r
+ VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\\r
+ vmovddup -5 * SIZE(BO, BI, SIZE), %xmm7 ;\\r
+ VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\\r
+ addq $4, BI ;\\r
+ addq $2, %rax ;\\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL4x1_SUB\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %ymm1\r
+ vbroadcastsd -4 * SIZE(BO, BI, SIZE) , %ymm4\r
+ vbroadcastsd -3 * SIZE(BO, BI, SIZE) , %ymm5\r
+ VFMADDPD_R %ymm8 ,%ymm4,%ymm0\r
+ VFMADDPD_R %ymm12,%ymm4,%ymm1\r
+ VFMADDPD_I %ymm9 ,%ymm5,%ymm0\r
+ VFMADDPD_I %ymm13,%ymm5,%ymm1\r
+\r
+ addq $2, BI \r
+ addq $8, %rax \r
+.endm\r
+\r
+.macro SAVE4x1\r
+\r
+ vbroadcastsd ALPHA_R, %ymm0\r
+ vbroadcastsd ALPHA_I, %ymm1\r
+\r
+ // swap high and low 8 bytes\r
+ vshufpd $0x05, %ymm9 , %ymm9, %ymm9\r
+ vshufpd $0x05, %ymm13, %ymm13, %ymm13\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubpd %ymm9, %ymm8 , %ymm8\r
+ vaddsubpd %ymm13,%ymm12 , %ymm12\r
+\r
+ vshufpd $0x05, %ymm8 , %ymm8, %ymm9\r
+ vshufpd $0x05, %ymm12, %ymm12, %ymm13\r
+\r
+#else\r
+ vaddsubpd %ymm8, %ymm9 , %ymm9\r
+ vaddsubpd %ymm12,%ymm13, %ymm13\r
+\r
+ vmovapd %ymm9, %ymm8\r
+ vmovapd %ymm13, %ymm12\r
+\r
+ // swap high and low 8 bytes\r
+ vshufpd $0x05, %ymm9 , %ymm9, %ymm9\r
+ vshufpd $0x05, %ymm13, %ymm13, %ymm13\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulpd %ymm8 , %ymm0, %ymm8\r
+ vmulpd %ymm12, %ymm0, %ymm12\r
+\r
+ // multiply with ALPHA_I\r
+ vmulpd %ymm9 , %ymm1, %ymm9\r
+ vmulpd %ymm13, %ymm1, %ymm13\r
+\r
+ vaddsubpd %ymm9, %ymm8 , %ymm8\r
+ vaddsubpd %ymm13, %ymm12, %ymm12\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddpd (CO1), %ymm8 , %ymm8\r
+ vaddpd 4 * SIZE(CO1), %ymm12, %ymm12\r
+\r
+#endif\r
+\r
+ vmovups %ymm8 , (CO1)\r
+ vmovups %ymm12 ,4 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+\r
+\r
+/************************************************************************************************/\r
+\r
+#define KERNEL2x1_1(xx) \\r
+ prefetcht0 A_PR1(AO,%rax,SIZE) ;\\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovups -6 * SIZE(AO, %rax, SIZE), %xmm1 ;\\r
+ VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\\r
+ vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\\r
+\r
+#define KERNEL2x1_2(xx) \\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup -2 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovups -2 * SIZE(AO, %rax, SIZE), %xmm1 ;\\r
+ VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\\r
+ vmovddup -1 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\\r
+\r
+#define KERNEL2x1_3(xx) \\r
+ prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\\r
+ vmovups 0 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup 0 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovups 2 * SIZE(AO, %rax, SIZE), %xmm1 ;\\r
+ VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\\r
+ vmovddup 1 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\\r
+\r
+#define KERNEL2x1_4(xx) \\r
+ vmovups 4 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup 2 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovups 6 * SIZE(AO, %rax, SIZE), %xmm1 ;\\r
+ VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\\r
+ vmovddup 3 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\\r
+ addq $8, BI ;\\r
+ addq $16, %rax ;\\r
+\r
+\r
+#define KERNEL2x1_SUB(xx) \\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovups -6 * SIZE(AO, %rax, SIZE), %xmm1 ;\\r
+ VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\\r
+ vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\\r
+ addq $2, BI ;\\r
+ addq $4, %rax ;\\r
+\r
+\r
+/************************************************************************************************/\r
+\r
+#define KERNEL1x1_1(xx) \\r
+ prefetcht0 A_PR1(AO,%rax,SIZE) ;\\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+\r
+#define KERNEL1x1_2(xx) \\r
+ vmovups -6 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup -2 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovddup -1 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+\r
+#define KERNEL1x1_3(xx) \\r
+ vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup 0 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovddup 1 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+\r
+#define KERNEL1x1_4(xx) \\r
+ vmovups -2 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup 2 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovddup 3 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ addq $8, BI ;\\r
+ addq $8, %rax ;\\r
+\r
+\r
+#define KERNEL1x1_SUB(xx) \\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\\r
+ vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4 ;\\r
+ VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\\r
+ vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5 ;\\r
+ VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\\r
+ addq $2, BI ;\\r
+ addq $2, %rax ;\\r
+\r
+\r
+/************************************************************************************************/\r
+\r
+\r
+\r
+\r
+ PROLOGUE\r
+ PROFCODE\r
+ \r
+ subq $STACKSIZE, %rsp\r
+ movq %rbx, (%rsp)\r
+ movq %rbp, 8(%rsp)\r
+ movq %r12, 16(%rsp)\r
+ movq %r13, 24(%rsp)\r
+ movq %r14, 32(%rsp)\r
+ movq %r15, 40(%rsp)\r
+\r
+ vzeroupper\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq %rdi, 48(%rsp)\r
+ movq %rsi, 56(%rsp)\r
+ movups %xmm6, 64(%rsp)\r
+ movups %xmm7, 80(%rsp)\r
+ movups %xmm8, 96(%rsp)\r
+ movups %xmm9, 112(%rsp)\r
+ movups %xmm10, 128(%rsp)\r
+ movups %xmm11, 144(%rsp)\r
+ movups %xmm12, 160(%rsp)\r
+ movups %xmm13, 176(%rsp)\r
+ movups %xmm14, 192(%rsp)\r
+ movups %xmm15, 208(%rsp)\r
+\r
+ movq ARG1, OLD_M\r
+ movq ARG2, OLD_N\r
+ movq ARG3, OLD_K\r
+ movq OLD_A, A\r
+ movq OLD_B, B\r
+ movq OLD_C, C\r
+ movq OLD_LDC, LDC\r
+#ifdef TRMMKERNEL\r
+ movsd OLD_OFFSET, %xmm12\r
+#endif\r
+ vmovaps %xmm3, %xmm0\r
+\r
+#else\r
+ movq STACKSIZE + 8(%rsp), LDC\r
+#ifdef TRMMKERNEL\r
+ movsd STACKSIZE + 16(%rsp), %xmm12\r
+#endif\r
+\r
+#endif\r
+\r
+ movq %rsp, SP # save old stack\r
+ subq $128 + L_BUFFER_SIZE, %rsp\r
+ andq $-4096, %rsp # align stack\r
+\r
+ STACK_TOUCH\r
+\r
+ cmpq $0, OLD_M\r
+ je .L999\r
+\r
+ cmpq $0, OLD_N\r
+ je .L999\r
+\r
+ cmpq $0, OLD_K\r
+ je .L999\r
+\r
+ movq OLD_M, M\r
+ movq OLD_N, N\r
+ movq OLD_K, K\r
+\r
+ vmovsd %xmm0, ALPHA_R\r
+ vmovsd %xmm1, ALPHA_I\r
+\r
+ salq $ZBASE_SHIFT, LDC\r
+\r
+ movq N, %rax\r
+ xorq %rdx, %rdx\r
+ movq $2, %rdi\r
+ divq %rdi // N / 2\r
+ movq %rax, Ndiv6 // N / 2\r
+ movq %rdx, Nmod6 // N % 2\r
+\r
+ \r
+\r
+#ifdef TRMMKERNEL\r
+ vmovsd %xmm12, OFFSET\r
+ vmovsd %xmm12, KK\r
+#ifndef LEFT\r
+ negq KK\r
+#endif \r
+#endif\r
+\r
+.L2_00_0:\r
+\r
+ movq Ndiv6, J\r
+ cmpq $0, J\r
+ je .L1_2_0\r
+ ALIGN_4\r
+\r
+\r
+\r
+.L2_00_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ ALIGN_4\r
+\r
+.L2_00_02b:\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups 2 * SIZE(BO1), %xmm1\r
+ vmovups %xmm0, (BO)\r
+ vmovups %xmm1, 2 * SIZE(BO)\r
+ addq $4*SIZE,BO1\r
+ addq $4*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_00_02b\r
+\r
+.L2_00_02c:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+\r
+.L2_00_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C // c += 2 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $8 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $2, I // i = (m >> 2)\r
+ je .L2_2_10\r
+\r
+ ALIGN_4\r
+\r
+/******************************************************************************************************************/\r
+\r
+.L2_4_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_4_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_12:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+\r
+ je .L2_4_16\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI ,SIZE)\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+\r
+ je .L2_4_16\r
+\r
+ jmp .L2_4_12\r
+ ALIGN_4\r
+\r
+.L2_4_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_4_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_17:\r
+\r
+ KERNEL4x2_SUB\r
+\r
+ jl .L2_4_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_4_19:\r
+\r
+ SAVE4x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ decq I # i --\r
+ jg .L2_4_11\r
+ ALIGN_4 \r
+\r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+\r
+\r
+/******************************************************************************************************************/\r
+.L2_2_10:\r
+ testq $2, M \r
+ jz .L2_2_40 // to next 2 lines of N\r
+\r
+.L2_2_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_2_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_2_12:\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_1(xxx)\r
+ KERNEL2x2_2(xxx)\r
+ prefetcht0 B_PR1+64(BO,BI,SIZE)\r
+ KERNEL2x2_3(xxx)\r
+ KERNEL2x2_4(xxx)\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_1(xxx)\r
+ KERNEL2x2_2(xxx)\r
+ prefetcht0 B_PR1+64(BO,BI,SIZE)\r
+ KERNEL2x2_3(xxx)\r
+ KERNEL2x2_4(xxx)\r
+\r
+ je .L2_2_16\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_1(xxx)\r
+ KERNEL2x2_2(xxx)\r
+ prefetcht0 B_PR1+64(BO,BI,SIZE)\r
+ KERNEL2x2_3(xxx)\r
+ KERNEL2x2_4(xxx)\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_1(xxx)\r
+ KERNEL2x2_2(xxx)\r
+ prefetcht0 B_PR1+64(BO,BI,SIZE)\r
+ KERNEL2x2_3(xxx)\r
+ KERNEL2x2_4(xxx)\r
+\r
+ je .L2_2_16\r
+\r
+ jmp .L2_2_12\r
+ ALIGN_4\r
+\r
+.L2_2_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_2_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_2_17:\r
+\r
+ KERNEL2x2_SUB(xxx)\r
+ jl .L2_2_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_2_19:\r
+\r
+ vmovddup ALPHA_R, %xmm0\r
+ vmovddup ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+ vshufpd $0x01, %xmm11, %xmm11, %xmm11\r
+ vshufpd $0x01, %xmm13, %xmm13, %xmm13\r
+ vshufpd $0x01, %xmm15, %xmm15, %xmm15\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubpd %xmm9, %xmm8 , %xmm8\r
+ vaddsubpd %xmm11,%xmm10, %xmm10\r
+ vaddsubpd %xmm13,%xmm12, %xmm12\r
+ vaddsubpd %xmm15,%xmm14, %xmm14\r
+\r
+ vshufpd $0x01, %xmm8 , %xmm8, %xmm9\r
+ vshufpd $0x01, %xmm10, %xmm10, %xmm11\r
+ vshufpd $0x01, %xmm12, %xmm12, %xmm13\r
+ vshufpd $0x01, %xmm14, %xmm14, %xmm15\r
+\r
+#else\r
+ vaddsubpd %xmm8, %xmm9 ,%xmm9\r
+ vaddsubpd %xmm10, %xmm11,%xmm11\r
+ vaddsubpd %xmm12, %xmm13,%xmm13\r
+ vaddsubpd %xmm14, %xmm15,%xmm15\r
+\r
+ vmovapd %xmm9, %xmm8\r
+ vmovapd %xmm11, %xmm10\r
+ vmovapd %xmm13, %xmm12\r
+ vmovapd %xmm15, %xmm14\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+ vshufpd $0x01, %xmm11, %xmm11, %xmm11\r
+ vshufpd $0x01, %xmm13, %xmm13, %xmm13\r
+ vshufpd $0x01, %xmm15, %xmm15, %xmm15\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulpd %xmm8 , %xmm0, %xmm8\r
+ vmulpd %xmm10, %xmm0, %xmm10\r
+ vmulpd %xmm12, %xmm0, %xmm12\r
+ vmulpd %xmm14, %xmm0, %xmm14\r
+\r
+ // multiply with ALPHA_I\r
+ vmulpd %xmm9 , %xmm1, %xmm9\r
+ vmulpd %xmm11, %xmm1, %xmm11\r
+ vmulpd %xmm13, %xmm1, %xmm13\r
+ vmulpd %xmm15, %xmm1, %xmm15\r
+\r
+ vaddsubpd %xmm9, %xmm8 , %xmm8\r
+ vaddsubpd %xmm11,%xmm10, %xmm10\r
+ vaddsubpd %xmm13,%xmm12, %xmm12\r
+ vaddsubpd %xmm15,%xmm14, %xmm14\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddpd (CO1), %xmm8 , %xmm8\r
+ vaddpd 2 * SIZE(CO1), %xmm12, %xmm12\r
+\r
+ vaddpd (CO1, LDC), %xmm10, %xmm10\r
+ vaddpd 2 * SIZE(CO1, LDC), %xmm14, %xmm14\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+ vmovups %xmm12 , 2 * SIZE(CO1)\r
+\r
+ vmovups %xmm10 , (CO1, LDC)\r
+ vmovups %xmm14 , 2 * SIZE(CO1, LDC)\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4 \r
+\r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L2_2_40:\r
+ testq $1, M \r
+ jz .L2_2_60 // to next 2 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L2_2_41:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L2_2_46\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_2_42:\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_1(xxx)\r
+ KERNEL1x2_2(xxx)\r
+ prefetcht0 B_PR1+64(BO,BI,SIZE)\r
+ KERNEL1x2_3(xxx)\r
+ KERNEL1x2_4(xxx)\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_1(xxx)\r
+ KERNEL1x2_2(xxx)\r
+ prefetcht0 B_PR1+64(BO,BI,SIZE)\r
+ KERNEL1x2_3(xxx)\r
+ KERNEL1x2_4(xxx)\r
+\r
+ je .L2_2_46\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_1(xxx)\r
+ KERNEL1x2_2(xxx)\r
+ prefetcht0 B_PR1+64(BO,BI,SIZE)\r
+ KERNEL1x2_3(xxx)\r
+ KERNEL1x2_4(xxx)\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_1(xxx)\r
+ KERNEL1x2_2(xxx)\r
+ prefetcht0 B_PR1+64(BO,BI,SIZE)\r
+ KERNEL1x2_3(xxx)\r
+ KERNEL1x2_4(xxx)\r
+\r
+ je .L2_2_46\r
+\r
+ jmp .L2_2_42\r
+ ALIGN_4\r
+\r
+.L2_2_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L2_2_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_2_47:\r
+\r
+ KERNEL1x2_SUB(xxx)\r
+ jl .L2_2_47\r
+ ALIGN_4\r
+\r
+\r
+.L2_2_49:\r
+\r
+ vmovddup ALPHA_R, %xmm0\r
+ vmovddup ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+ vshufpd $0x01, %xmm11, %xmm11, %xmm11\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubpd %xmm9, %xmm8 , %xmm8\r
+ vaddsubpd %xmm11,%xmm10, %xmm10\r
+\r
+ vshufpd $0x01, %xmm8 , %xmm8, %xmm9\r
+ vshufpd $0x01, %xmm10, %xmm10, %xmm11\r
+\r
+#else\r
+ vaddsubpd %xmm8, %xmm9, %xmm9\r
+ vaddsubpd %xmm10,%xmm11, %xmm11\r
+\r
+ vmovapd %xmm9, %xmm8\r
+ vmovapd %xmm11, %xmm10\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+ vshufpd $0x01, %xmm11, %xmm11, %xmm11\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulpd %xmm8 , %xmm0, %xmm8\r
+ vmulpd %xmm10, %xmm0, %xmm10\r
+\r
+ // multiply with ALPHA_I\r
+ vmulpd %xmm9 , %xmm1, %xmm9\r
+ vmulpd %xmm11, %xmm1, %xmm11\r
+\r
+ vaddsubpd %xmm9, %xmm8 , %xmm8\r
+ vaddsubpd %xmm11,%xmm10, %xmm10\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddpd (CO1), %xmm8 , %xmm8\r
+ vaddpd (CO1, LDC), %xmm10, %xmm10\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+ vmovups %xmm10 , (CO1, LDC)\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK\r
+#endif\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ decq I # i --\r
+ jg .L2_2_41\r
+ ALIGN_4 \r
+\r
+\r
+\r
+ \r
+.L2_2_60:\r
+#if defined(TRMMKERNEL) && !defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ decq J // j --\r
+ jg .L2_00_01 // next 2 lines of N\r
+\r
+\r
+\r
+.L1_2_0:\r
+\r
+/************************************************************************************************\r
+* Loop for Nmod6 % 2 > 0\r
+*************************************************************************************************/\r
+\r
+ movq Nmod6, J \r
+ andq $1, J // j % 2\r
+ je .L999\r
+ ALIGN_4\r
+\r
+.L1_00_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ ALIGN_4\r
+\r
+.L1_00_02b:\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups %xmm0, (BO)\r
+ addq $2*SIZE,BO1\r
+ addq $2*SIZE,BO\r
+ decq %rax\r
+ jnz .L1_00_02b\r
+\r
+.L1_00_02c:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L1_00_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 1), C // c += 1 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $8 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $2, I // i = (m >> 2)\r
+ je .L1_2_10\r
+\r
+ ALIGN_4\r
+\r
+/*******************************************************************************************************/\r
+\r
+\r
+.L1_4_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $4, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_4_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_12:\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ je .L1_4_16\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ je .L1_4_16\r
+\r
+ jmp .L1_4_12\r
+ ALIGN_4\r
+\r
+.L1_4_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_4_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_17:\r
+\r
+ KERNEL4x1_SUB\r
+\r
+ jl .L1_4_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_4_19:\r
+\r
+ SAVE4x1\r
+\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $4, KK\r
+#endif\r
+\r
+ addq $8 * SIZE, CO1 # coffset += 8\r
+ decq I # i --\r
+ jg .L1_4_11\r
+ ALIGN_4 \r
+\r
+\r
+\r
+\r
+/*******************************************************************************************************/\r
+.L1_2_10:\r
+ testq $2, M \r
+ jz .L1_2_40\r
+\r
+\r
+.L1_2_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $2, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_2_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_2_12:\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x1_1(xxx)\r
+ KERNEL2x1_2(xxx)\r
+ KERNEL2x1_3(xxx)\r
+ KERNEL2x1_4(xxx)\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x1_1(xxx)\r
+ KERNEL2x1_2(xxx)\r
+ KERNEL2x1_3(xxx)\r
+ KERNEL2x1_4(xxx)\r
+\r
+ je .L1_2_16\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x1_1(xxx)\r
+ KERNEL2x1_2(xxx)\r
+ KERNEL2x1_3(xxx)\r
+ KERNEL2x1_4(xxx)\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x1_1(xxx)\r
+ KERNEL2x1_2(xxx)\r
+ KERNEL2x1_3(xxx)\r
+ KERNEL2x1_4(xxx)\r
+\r
+ je .L1_2_16\r
+\r
+ jmp .L1_2_12\r
+ ALIGN_4\r
+\r
+.L1_2_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_2_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_2_17:\r
+\r
+ KERNEL2x1_SUB(xxx)\r
+ jl .L1_2_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_2_19:\r
+\r
+ vmovddup ALPHA_R, %xmm0\r
+ vmovddup ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+ vshufpd $0x01, %xmm13, %xmm13, %xmm13\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubpd %xmm9, %xmm8 , %xmm8\r
+ vaddsubpd %xmm13,%xmm12 , %xmm12\r
+\r
+ vshufpd $0x01, %xmm8 , %xmm8, %xmm9\r
+ vshufpd $0x01, %xmm12, %xmm12, %xmm13\r
+\r
+#else\r
+ vaddsubpd %xmm8, %xmm9 , %xmm9\r
+ vaddsubpd %xmm12,%xmm13, %xmm13\r
+\r
+ vmovapd %xmm9, %xmm8\r
+ vmovapd %xmm13, %xmm12\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+ vshufpd $0x01, %xmm13, %xmm13, %xmm13\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulpd %xmm8 , %xmm0, %xmm8\r
+ vmulpd %xmm12, %xmm0, %xmm12\r
+\r
+ // multiply with ALPHA_I\r
+ vmulpd %xmm9 , %xmm1, %xmm9\r
+ vmulpd %xmm13, %xmm1, %xmm13\r
+\r
+ vaddsubpd %xmm9, %xmm8 , %xmm8\r
+ vaddsubpd %xmm13, %xmm12, %xmm12\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddpd (CO1), %xmm8 , %xmm8\r
+ vaddpd 2 * SIZE(CO1), %xmm12, %xmm12\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+ vmovups %xmm12 , 2 * SIZE(CO1)\r
+\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $2, KK\r
+#endif\r
+\r
+ addq $4 * SIZE, CO1 # coffset += 4\r
+\r
+ ALIGN_4 \r
+\r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+.L1_2_40:\r
+ testq $1, M \r
+ jz .L999\r
+\r
+ ALIGN_4\r
+\r
+.L1_2_41:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $1, %rax // number of values in AO\r
+#else\r
+ addq $1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $-8, %rax // K = K - ( K % 8 )\r
+ je .L1_2_46\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_2_42:\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x1_1(xxx)\r
+ KERNEL1x1_2(xxx)\r
+ KERNEL1x1_3(xxx)\r
+ KERNEL1x1_4(xxx)\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x1_1(xxx)\r
+ KERNEL1x1_2(xxx)\r
+ KERNEL1x1_3(xxx)\r
+ KERNEL1x1_4(xxx)\r
+\r
+ je .L1_2_46\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x1_1(xxx)\r
+ KERNEL1x1_2(xxx)\r
+ KERNEL1x1_3(xxx)\r
+ KERNEL1x1_4(xxx)\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x1_1(xxx)\r
+ KERNEL1x1_2(xxx)\r
+ KERNEL1x1_3(xxx)\r
+ KERNEL1x1_4(xxx)\r
+\r
+ je .L1_2_46\r
+\r
+ jmp .L1_2_42\r
+ ALIGN_4\r
+\r
+.L1_2_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $7, %rax # if (k & 1)\r
+ je .L1_2_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_2_47:\r
+\r
+ KERNEL1x1_SUB(xxx)\r
+ jl .L1_2_47\r
+ ALIGN_4\r
+\r
+\r
+.L1_2_49:\r
+\r
+ vmovddup ALPHA_R, %xmm0\r
+ vmovddup ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubpd %xmm9, %xmm8, %xmm8\r
+\r
+ vshufpd $0x01, %xmm8 , %xmm8, %xmm9\r
+\r
+#else\r
+ vaddsubpd %xmm8, %xmm9, %xmm9\r
+\r
+ vmovapd %xmm9, %xmm8\r
+\r
+ // swap high and low 64 bytes\r
+ vshufpd $0x01, %xmm9 , %xmm9, %xmm9\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulpd %xmm8 , %xmm0, %xmm8\r
+\r
+ // multiply with ALPHA_I\r
+ vmulpd %xmm9 , %xmm1, %xmm9\r
+\r
+ vaddsubpd %xmm9 ,%xmm8, %xmm8\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddpd (CO1), %xmm8 , %xmm8\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $1, KK\r
+#endif\r
+\r
+ addq $2 * SIZE, CO1 # coffset += 2\r
+ decq I # i --\r
+ jg .L1_2_41\r
+ ALIGN_4 \r
+\r
+\r
+\r
+\r
+\r
+\r
+.L999:\r
+ movq SP, %rsp\r
+ movq (%rsp), %rbx\r
+ movq 8(%rsp), %rbp\r
+ movq 16(%rsp), %r12\r
+ movq 24(%rsp), %r13\r
+ movq 32(%rsp), %r14\r
+ movq 40(%rsp), %r15\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq 48(%rsp), %rdi\r
+ movq 56(%rsp), %rsi\r
+ movups 64(%rsp), %xmm6\r
+ movups 80(%rsp), %xmm7\r
+ movups 96(%rsp), %xmm8\r
+ movups 112(%rsp), %xmm9\r
+ movups 128(%rsp), %xmm10\r
+ movups 144(%rsp), %xmm11\r
+ movups 160(%rsp), %xmm12\r
+ movups 176(%rsp), %xmm13\r
+ movups 192(%rsp), %xmm14\r
+ movups 208(%rsp), %xmm15\r
+#endif\r
+\r
+ addq $STACKSIZE, %rsp\r
+ ret\r
+\r
+ EPILOGUE\r