--- /dev/null
+/*********************************************************************************\r
+Copyright (c) 2013, The OpenBLAS Project\r
+All rights reserved.\r
+Redistribution and use in source and binary forms, with or without\r
+modification, are permitted provided that the following conditions are\r
+met:\r
+1. Redistributions of source code must retain the above copyright\r
+notice, this list of conditions and the following disclaimer.\r
+2. Redistributions in binary form must reproduce the above copyright\r
+notice, this list of conditions and the following disclaimer in\r
+the documentation and/or other materials provided with the\r
+distribution.\r
+3. Neither the name of the OpenBLAS project nor the names of\r
+its contributors may be used to endorse or promote products\r
+derived from this software without specific prior written permission.\r
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\r
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r
+ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE\r
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\r
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\r
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\r
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\r
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+**********************************************************************************/\r
+\r
+\r
+#define ASSEMBLER\r
+#include "common.h"\r
+ \r
+#define OLD_M %rdi\r
+#define OLD_N %rsi\r
+#define M %r13\r
+#define J %r14\r
+#define OLD_K %rdx\r
+\r
+#define A %rcx\r
+#define B %r8\r
+#define C %r9\r
+#define LDC %r10\r
+ \r
+#define I %r11\r
+#define AO %rdi\r
+#define BO %rsi\r
+#define CO1 %r15\r
+#define K %r12\r
+#define BI %rbp\r
+#define SP %rbx\r
+\r
+#define BO1 %rdi\r
+#define BO2 %r15\r
+\r
+#ifndef WINDOWS_ABI\r
+\r
+#define STACKSIZE 96\r
+\r
+#else\r
+\r
+#define STACKSIZE 320\r
+\r
+#define OLD_ALPHA_I 40 + STACKSIZE(%rsp)\r
+#define OLD_A 48 + STACKSIZE(%rsp)\r
+#define OLD_B 56 + STACKSIZE(%rsp)\r
+#define OLD_C 64 + STACKSIZE(%rsp)\r
+#define OLD_LDC 72 + STACKSIZE(%rsp)\r
+#define OLD_OFFSET 80 + STACKSIZE(%rsp)\r
+\r
+#endif\r
+\r
+#define L_BUFFER_SIZE 8192\r
+\r
+#define Ndiv6 24(%rsp)\r
+#define Nmod6 32(%rsp)\r
+#define N 40(%rsp)\r
+#define ALPHA_R 48(%rsp)\r
+#define ALPHA_I 56(%rsp)\r
+#define OFFSET 64(%rsp)\r
+#define KK 72(%rsp)\r
+#define KKK 80(%rsp)\r
+#define BUFFER1 128(%rsp)\r
+\r
+#if defined(OS_WINDOWS)\r
+#if L_BUFFER_SIZE > 16384\r
+#define STACK_TOUCH \\r
+ movl $ 0, 4096 * 4(%rsp);\\r
+ movl $ 0, 4096 * 3(%rsp);\\r
+ movl $ 0, 4096 * 2(%rsp);\\r
+ movl $ 0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 12288\r
+#define STACK_TOUCH \\r
+ movl $ 0, 4096 * 3(%rsp);\\r
+ movl $ 0, 4096 * 2(%rsp);\\r
+ movl $ 0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 8192\r
+#define STACK_TOUCH \\r
+ movl $ 0, 4096 * 2(%rsp);\\r
+ movl $ 0, 4096 * 1(%rsp);\r
+#elif L_BUFFER_SIZE > 4096\r
+#define STACK_TOUCH \\r
+ movl $ 0, 4096 * 1(%rsp);\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+#else\r
+#define STACK_TOUCH\r
+#endif\r
+\r
+\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT)\r
+\r
+#define VFMADDPS_YR( y0,y1,y2 ) \\r
+ vmulps y1,y2,%ymm2;\\r
+ vaddps y0,%ymm2,y0\r
+\r
+#define VFMADDPS_YI( y0,y1,y2 ) \\r
+ vmulps y1,y2,%ymm3;\\r
+ vaddps y0,%ymm3,y0\r
+\r
+#define VFMADDPS_R( y0,y1,y2 ) \\r
+ vmulps y1,y2,%xmm2;\\r
+ vaddps y0,%xmm2,y0\r
+\r
+#define VFMADDPS_I( y0,y1,y2 ) \\r
+ vmulps y1,y2,%xmm3;\\r
+ vaddps y0,%xmm3,y0\r
+\r
+\r
+#elif defined(RN) || defined(RT) || defined(CN) || defined(CT)\r
+\r
+#define VFMADDPS_YR( y0,y1,y2 ) \\r
+ vmulps y1,y2,%ymm2;\\r
+ vsubps %ymm2,y0,y0\r
+\r
+#define VFMADDPS_YI( y0,y1,y2 ) \\r
+ vmulps y1,y2,%ymm3;\\r
+ vaddps y0,%ymm3,y0\r
+\r
+#define VFMADDPS_R( y0,y1,y2 ) \\r
+ vmulps y1,y2,%xmm2;\\r
+ vsubps %xmm2,y0,y0\r
+\r
+#define VFMADDPS_I( y0,y1,y2 ) \\r
+ vmulps y1,y2,%xmm3;\\r
+ vaddps y0,%xmm3,y0\r
+\r
+\r
+#elif defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+#define VFMADDPS_YR( y0,y1,y2 ) \\r
+ vmulps y1,y2,%ymm2;\\r
+ vaddps y0,%ymm2,y0\r
+\r
+#define VFMADDPS_YI( y0,y1,y2 ) \\r
+ vmulps y1,y2,%ymm3;\\r
+ vsubps %ymm3,y0,y0\r
+\r
+#define VFMADDPS_R( y0,y1,y2 ) \\r
+ vmulps y1,y2,%xmm2;\\r
+ vaddps y0,%xmm2,y0\r
+\r
+#define VFMADDPS_I( y0,y1,y2 ) \\r
+ vmulps y1,y2,%xmm3;\\r
+ vsubps %xmm3,y0,y0\r
+\r
+\r
+#else\r
+\r
+#define VFMADDPS_YR( y0,y1,y2 ) \\r
+ vmulps y1,y2,%ymm2;\\r
+ vsubps %ymm2,y0,y0\r
+\r
+#define VFMADDPS_YI( y0,y1,y2 ) \\r
+ vmulps y1,y2,%ymm3;\\r
+ vsubps %ymm3,y0,y0\r
+\r
+#define VFMADDPS_R( y0,y1,y2 ) \\r
+ vmulps y1,y2,%xmm2;\\r
+ vsubps %xmm2,y0,y0\r
+\r
+#define VFMADDPS_I( y0,y1,y2 ) \\r
+ vmulps y1,y2,%xmm3;\\r
+ vsubps %xmm3,y0,y0\r
+\r
+\r
+#endif\r
+\r
+\r
+#define A_PR1 512\r
+#define B_PR1 512\r
+\r
+/***************************************************************************************************************************/\r
+\r
+.macro KERNEL8x2_SUB\r
+\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vbroadcastss -8 * SIZE(BO, BI, SIZE), %ymm4\r
+ VFMADDPS_YR( %ymm8,%ymm4,%ymm0 )\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm1\r
+ VFMADDPS_YR( %ymm12,%ymm4,%ymm1 )\r
+ vbroadcastss -7 * SIZE(BO, BI, SIZE), %ymm5\r
+ VFMADDPS_YI( %ymm9,%ymm5,%ymm0 )\r
+ VFMADDPS_YI( %ymm13,%ymm5,%ymm1 )\r
+ vbroadcastss -6 * SIZE(BO, BI, SIZE), %ymm6\r
+ VFMADDPS_YR( %ymm10,%ymm6,%ymm0 )\r
+ VFMADDPS_YR( %ymm14,%ymm6,%ymm1 )\r
+ vbroadcastss -5 * SIZE(BO, BI, SIZE), %ymm7\r
+ VFMADDPS_YI( %ymm11,%ymm7,%ymm0 )\r
+ VFMADDPS_YI( %ymm15,%ymm7,%ymm1 )\r
+ addq $ 4 , BI \r
+ addq $ 16, %rax \r
+.endm\r
+\r
+.macro SAVE8x2\r
+\r
+ vbroadcastss ALPHA_R, %ymm0\r
+ vbroadcastss ALPHA_I, %ymm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %ymm9 , %ymm9, %ymm9\r
+ vshufps $ 0xb1, %ymm11, %ymm11, %ymm11\r
+ vshufps $ 0xb1, %ymm13, %ymm13, %ymm13\r
+ vshufps $ 0xb1, %ymm15, %ymm15, %ymm15\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %ymm9, %ymm8 , %ymm8\r
+ vaddsubps %ymm11,%ymm10, %ymm10\r
+ vaddsubps %ymm13,%ymm12, %ymm12\r
+ vaddsubps %ymm15,%ymm14, %ymm14\r
+\r
+ vshufps $ 0xb1, %ymm8 , %ymm8, %ymm9\r
+ vshufps $ 0xb1, %ymm10, %ymm10, %ymm11\r
+ vshufps $ 0xb1, %ymm12, %ymm12, %ymm13\r
+ vshufps $ 0xb1, %ymm14, %ymm14, %ymm15\r
+\r
+#else\r
+ vaddsubps %ymm8, %ymm9 ,%ymm9\r
+ vaddsubps %ymm10, %ymm11,%ymm11\r
+ vaddsubps %ymm12, %ymm13,%ymm13\r
+ vaddsubps %ymm14, %ymm15,%ymm15\r
+\r
+ vmovaps %ymm9, %ymm8\r
+ vmovaps %ymm11, %ymm10\r
+ vmovaps %ymm13, %ymm12\r
+ vmovaps %ymm15, %ymm14\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %ymm9 , %ymm9, %ymm9\r
+ vshufps $ 0xb1, %ymm11, %ymm11, %ymm11\r
+ vshufps $ 0xb1, %ymm13, %ymm13, %ymm13\r
+ vshufps $ 0xb1, %ymm15, %ymm15, %ymm15\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %ymm8 , %ymm0, %ymm8\r
+ vmulps %ymm10, %ymm0, %ymm10\r
+ vmulps %ymm12, %ymm0, %ymm12\r
+ vmulps %ymm14, %ymm0, %ymm14\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %ymm9 , %ymm1, %ymm9\r
+ vmulps %ymm11, %ymm1, %ymm11\r
+ vmulps %ymm13, %ymm1, %ymm13\r
+ vmulps %ymm15, %ymm1, %ymm15\r
+\r
+ vaddsubps %ymm9, %ymm8 , %ymm8\r
+ vaddsubps %ymm11,%ymm10, %ymm10\r
+ vaddsubps %ymm13,%ymm12, %ymm12\r
+ vaddsubps %ymm15,%ymm14, %ymm14\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %ymm8 , %ymm8\r
+ vaddps 8 * SIZE(CO1), %ymm12, %ymm12\r
+\r
+ vaddps (CO1, LDC), %ymm10, %ymm10\r
+ vaddps 8 * SIZE(CO1, LDC), %ymm14, %ymm14\r
+\r
+#endif\r
+\r
+ vmovups %ymm8 , (CO1)\r
+ vmovups %ymm12 , 8 * SIZE(CO1)\r
+\r
+ vmovups %ymm10 , (CO1, LDC)\r
+ vmovups %ymm14 , 8 * SIZE(CO1, LDC)\r
+\r
+ prefetcht0 64(CO1)\r
+ prefetcht0 64(CO1, LDC)\r
+\r
+.endm\r
+\r
+/***************************************************************************************************************************/\r
+\r
+.macro KERNEL4x2_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -8 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPS_R( %xmm8,%xmm4,%xmm0 )\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %xmm1\r
+ VFMADDPS_R( %xmm12,%xmm4,%xmm1 )\r
+ vbroadcastss -7 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPS_I( %xmm9,%xmm5,%xmm0 )\r
+ VFMADDPS_I( %xmm13,%xmm5,%xmm1 )\r
+ vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm6\r
+ VFMADDPS_R( %xmm10,%xmm6,%xmm0 )\r
+ VFMADDPS_R( %xmm14,%xmm6,%xmm1 )\r
+ vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm7\r
+ VFMADDPS_I( %xmm11,%xmm7,%xmm0 )\r
+ VFMADDPS_I( %xmm15,%xmm7,%xmm1 )\r
+ addq $ 4, BI \r
+ addq $ 8, %rax \r
+.endm\r
+\r
+.macro SAVE4x2\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $ 0xb1, %xmm11, %xmm11, %xmm11\r
+ vshufps $ 0xb1, %xmm13, %xmm13, %xmm13\r
+ vshufps $ 0xb1, %xmm15, %xmm15, %xmm15\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+ vaddsubps %xmm13,%xmm12, %xmm12\r
+ vaddsubps %xmm15,%xmm14, %xmm14\r
+\r
+ vshufps $ 0xb1, %xmm8 , %xmm8, %xmm9\r
+ vshufps $ 0xb1, %xmm10, %xmm10, %xmm11\r
+ vshufps $ 0xb1, %xmm12, %xmm12, %xmm13\r
+ vshufps $ 0xb1, %xmm14, %xmm14, %xmm15\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+ vaddsubps %xmm10, %xmm11,%xmm11\r
+ vaddsubps %xmm12, %xmm13,%xmm13\r
+ vaddsubps %xmm14, %xmm15,%xmm15\r
+\r
+ vmovaps %xmm9, %xmm8\r
+ vmovaps %xmm11, %xmm10\r
+ vmovaps %xmm13, %xmm12\r
+ vmovaps %xmm15, %xmm14\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $ 0xb1, %xmm11, %xmm11, %xmm11\r
+ vshufps $ 0xb1, %xmm13, %xmm13, %xmm13\r
+ vshufps $ 0xb1, %xmm15, %xmm15, %xmm15\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+ vmulps %xmm10, %xmm0, %xmm10\r
+ vmulps %xmm12, %xmm0, %xmm12\r
+ vmulps %xmm14, %xmm0, %xmm14\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+ vmulps %xmm11, %xmm1, %xmm11\r
+ vmulps %xmm13, %xmm1, %xmm13\r
+ vmulps %xmm15, %xmm1, %xmm15\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+ vaddsubps %xmm13,%xmm12, %xmm12\r
+ vaddsubps %xmm15,%xmm14, %xmm14\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %xmm8 , %xmm8\r
+ vaddps 4 * SIZE(CO1), %xmm12, %xmm12\r
+\r
+ vaddps (CO1, LDC), %xmm10, %xmm10\r
+ vaddps 4 * SIZE(CO1, LDC), %xmm14, %xmm14\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+ vmovups %xmm12 , 4 * SIZE(CO1)\r
+\r
+ vmovups %xmm10 , (CO1, LDC)\r
+ vmovups %xmm14 , 4 * SIZE(CO1, LDC)\r
+\r
+.endm\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL2x2_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -8 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPS_R( %xmm8,%xmm4,%xmm0 )\r
+ vbroadcastss -7 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPS_I( %xmm9,%xmm5,%xmm0 )\r
+ vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm6\r
+ VFMADDPS_R( %xmm10,%xmm6,%xmm0 )\r
+ vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm7\r
+ VFMADDPS_I( %xmm11,%xmm7,%xmm0 )\r
+ addq $ 4, BI \r
+ addq $ 4, %rax \r
+.endm\r
+\r
+.macro SAVE2x2\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 4 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $ 0xb1, %xmm11, %xmm11, %xmm11\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+\r
+ vshufps $ 0xb1, %xmm8 , %xmm8, %xmm9\r
+ vshufps $ 0xb1, %xmm10, %xmm10, %xmm11\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+ vaddsubps %xmm10, %xmm11,%xmm11\r
+\r
+ vmovaps %xmm9, %xmm8\r
+ vmovaps %xmm11, %xmm10\r
+\r
+ // swap high and low 4 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $ 0xb1, %xmm11, %xmm11, %xmm11\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+ vmulps %xmm10, %xmm0, %xmm10\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+ vmulps %xmm11, %xmm1, %xmm11\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %xmm8 , %xmm8\r
+\r
+ vaddps (CO1, LDC), %xmm10, %xmm10\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+\r
+ vmovups %xmm10 , (CO1, LDC)\r
+\r
+.endm\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL1x2_SUB\r
+ vmovsd -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -8 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPS_R( %xmm8,%xmm4,%xmm0 )\r
+ vbroadcastss -7 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPS_I( %xmm9,%xmm5,%xmm0 )\r
+ vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm6\r
+ VFMADDPS_R( %xmm10,%xmm6,%xmm0 )\r
+ vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm7\r
+ VFMADDPS_I( %xmm11,%xmm7,%xmm0 )\r
+ addq $ 4, BI \r
+ addq $ 2, %rax \r
+.endm\r
+\r
+.macro SAVE1x2\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $ 0xb1, %xmm11, %xmm11, %xmm11\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+\r
+ vshufps $ 0xb1, %xmm8 , %xmm8, %xmm9\r
+ vshufps $ 0xb1, %xmm10, %xmm10, %xmm11\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+ vaddsubps %xmm10, %xmm11,%xmm11\r
+\r
+ vmovaps %xmm9, %xmm8\r
+ vmovaps %xmm11, %xmm10\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $ 0xb1, %xmm11, %xmm11, %xmm11\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+ vmulps %xmm10, %xmm0, %xmm10\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+ vmulps %xmm11, %xmm1, %xmm11\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vmovsd (CO1), %xmm14\r
+ vaddps %xmm14, %xmm8 , %xmm8\r
+\r
+ vmovsd (CO1, LDC), %xmm15\r
+ vaddps %xmm15, %xmm10, %xmm10\r
+\r
+#endif\r
+\r
+ vmovsd %xmm8 , (CO1)\r
+ vmovsd %xmm10 , (CO1, LDC)\r
+\r
+.endm\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL8x1_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0\r
+ vmovups -8 * SIZE(AO, %rax, SIZE), %ymm1\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %ymm4\r
+ VFMADDPS_YR( %ymm8,%ymm4,%ymm0 )\r
+ VFMADDPS_YR( %ymm12,%ymm4,%ymm1 )\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %ymm5\r
+ VFMADDPS_YI( %ymm9,%ymm5,%ymm0 )\r
+ VFMADDPS_YI( %ymm13,%ymm5,%ymm1 )\r
+ addq $ 2 , BI \r
+ addq $ 16, %rax \r
+.endm\r
+\r
+.macro SAVE8x1\r
+\r
+ vbroadcastss ALPHA_R, %ymm0\r
+ vbroadcastss ALPHA_I, %ymm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %ymm9 , %ymm9, %ymm9\r
+ vshufps $ 0xb1, %ymm13, %ymm13, %ymm13\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %ymm9, %ymm8 , %ymm8\r
+ vaddsubps %ymm13,%ymm12, %ymm12\r
+\r
+ vshufps $ 0xb1, %ymm8 , %ymm8, %ymm9\r
+ vshufps $ 0xb1, %ymm12, %ymm12, %ymm13\r
+\r
+#else\r
+ vaddsubps %ymm8, %ymm9 ,%ymm9\r
+ vaddsubps %ymm12, %ymm13,%ymm13\r
+\r
+ vmovaps %ymm9, %ymm8\r
+ vmovaps %ymm13, %ymm12\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %ymm9 , %ymm9, %ymm9\r
+ vshufps $ 0xb1, %ymm13, %ymm13, %ymm13\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %ymm8 , %ymm0, %ymm8\r
+ vmulps %ymm12, %ymm0, %ymm12\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %ymm9 , %ymm1, %ymm9\r
+ vmulps %ymm13, %ymm1, %ymm13\r
+\r
+ vaddsubps %ymm9, %ymm8 , %ymm8\r
+ vaddsubps %ymm13,%ymm12, %ymm12\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %ymm8 , %ymm8\r
+ vaddps 8 * SIZE(CO1), %ymm12, %ymm12\r
+\r
+#endif\r
+\r
+ vmovups %ymm8 , (CO1)\r
+ vmovups %ymm12 , 8 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL4x1_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPS_R( %xmm8,%xmm4,%xmm0 )\r
+ vmovups -12 * SIZE(AO, %rax, SIZE), %xmm1\r
+ VFMADDPS_R( %xmm12,%xmm4,%xmm1 )\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPS_I( %xmm9,%xmm5,%xmm0 )\r
+ VFMADDPS_I( %xmm13,%xmm5,%xmm1 )\r
+ addq $ 2, BI \r
+ addq $ 8, %rax \r
+.endm\r
+\r
+.macro SAVE4x1\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 4 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $ 0xb1, %xmm13, %xmm13, %xmm13\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm13,%xmm12, %xmm12\r
+\r
+ vshufps $ 0xb1, %xmm8 , %xmm8, %xmm9\r
+ vshufps $ 0xb1, %xmm12, %xmm12, %xmm13\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+ vaddsubps %xmm12, %xmm13,%xmm13\r
+\r
+ vmovaps %xmm9, %xmm8\r
+ vmovaps %xmm13, %xmm12\r
+\r
+ // swap high and low 4 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $ 0xb1, %xmm13, %xmm13, %xmm13\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+ vmulps %xmm12, %xmm0, %xmm12\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+ vmulps %xmm13, %xmm1, %xmm13\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm13,%xmm12, %xmm12\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %xmm8 , %xmm8\r
+ vaddps 4 * SIZE(CO1), %xmm12, %xmm12\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+ vmovups %xmm12 , 4 * SIZE(CO1)\r
+\r
+.endm\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL2x1_SUB\r
+ vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPS_R( %xmm8,%xmm4,%xmm0 )\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPS_I( %xmm9,%xmm5,%xmm0 )\r
+ addq $ 2, BI \r
+ addq $ 4, %rax \r
+.endm\r
+\r
+.macro SAVE2x1\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+\r
+ vshufps $ 0xb1, %xmm8 , %xmm8, %xmm9\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+\r
+ vmovaps %xmm9, %xmm8\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %xmm8 , %xmm8\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+\r
+.endm\r
+\r
+/************************************************************************************************/\r
+\r
+.macro KERNEL1x1_SUB\r
+ vmovsd -16 * SIZE(AO, %rax, SIZE), %xmm0\r
+ vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4\r
+ VFMADDPS_R( %xmm8,%xmm4,%xmm0 )\r
+ vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5\r
+ VFMADDPS_I( %xmm9,%xmm5,%xmm0 )\r
+ addq $ 2, BI \r
+ addq $ 2, %rax \r
+.endm\r
+\r
+.macro SAVE1x1\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+\r
+ vshufps $ 0xb1, %xmm8 , %xmm8, %xmm9\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+\r
+ vmovaps %xmm9, %xmm8\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vmovsd (CO1), %xmm14\r
+ vaddps %xmm14, %xmm8 , %xmm8\r
+\r
+#endif\r
+\r
+ vmovsd %xmm8 , (CO1)\r
+\r
+.endm\r
+\r
+/************************************************************************************************/\r
+\r
+\r
+\r
+\r
+ PROLOGUE\r
+ PROFCODE\r
+ \r
+ subq $ STACKSIZE, %rsp\r
+ movq %rbx, (%rsp)\r
+ movq %rbp, 8(%rsp)\r
+ movq %r12, 16(%rsp)\r
+ movq %r13, 24(%rsp)\r
+ movq %r14, 32(%rsp)\r
+ movq %r15, 40(%rsp)\r
+\r
+ vzeroupper\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq %rdi, 48(%rsp)\r
+ movq %rsi, 56(%rsp)\r
+ vmovups %xmm6, 64(%rsp)\r
+ vmovups %xmm7, 80(%rsp)\r
+ vmovups %xmm8, 96(%rsp)\r
+ vmovups %xmm9, 112(%rsp)\r
+ vmovups %xmm10, 128(%rsp)\r
+ vmovups %xmm11, 144(%rsp)\r
+ vmovups %xmm12, 160(%rsp)\r
+ vmovups %xmm13, 176(%rsp)\r
+ vmovups %xmm14, 192(%rsp)\r
+ vmovups %xmm15, 208(%rsp)\r
+\r
+ movq ARG1, OLD_M\r
+ movq ARG2, OLD_N\r
+ movq ARG3, OLD_K\r
+ movq OLD_A, A\r
+ movq OLD_B, B\r
+ movq OLD_C, C\r
+ movq OLD_LDC, LDC\r
+#ifdef TRMMKERNEL\r
+ movsd OLD_OFFSET, %xmm12\r
+#endif\r
+ vmovaps %xmm3, %xmm0\r
+ vmovsd OLD_ALPHA_I, %xmm1\r
+\r
+#else\r
+ movq STACKSIZE + 8(%rsp), LDC\r
+#ifdef TRMMKERNEL\r
+ movsd STACKSIZE + 16(%rsp), %xmm12\r
+#endif\r
+\r
+#endif\r
+\r
+ movq %rsp, SP # save old stack\r
+ subq $ 128 + L_BUFFER_SIZE, %rsp\r
+ andq $ -4096, %rsp # align stack\r
+\r
+ STACK_TOUCH\r
+\r
+ cmpq $ 0, OLD_M\r
+ je .L999\r
+\r
+ cmpq $ 0, OLD_N\r
+ je .L999\r
+\r
+ cmpq $ 0, OLD_K\r
+ je .L999\r
+\r
+ movq OLD_M, M\r
+ movq OLD_N, N\r
+ movq OLD_K, K\r
+\r
+ vmovss %xmm0, ALPHA_R\r
+ vmovss %xmm1, ALPHA_I\r
+\r
+ salq $ ZBASE_SHIFT, LDC\r
+\r
+ movq N, %rax\r
+ xorq %rdx, %rdx\r
+ movq $ 2, %rdi\r
+ divq %rdi // N / 2\r
+ movq %rax, Ndiv6 // N / 2\r
+ movq %rdx, Nmod6 // N % 2\r
+\r
+ \r
+\r
+#ifdef TRMMKERNEL\r
+ vmovsd %xmm12, OFFSET\r
+ vmovsd %xmm12, KK\r
+#ifndef LEFT\r
+ negq KK\r
+#endif \r
+#endif\r
+\r
+.L2_0:\r
+\r
+ movq Ndiv6, J\r
+ cmpq $ 0, J\r
+ je .L1_0\r
+ ALIGN_4\r
+\r
+\r
+\r
+.L2_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ ALIGN_4\r
+\r
+.L2_02b:\r
+\r
+ vmovups (BO1), %xmm0\r
+ vmovups %xmm0, (BO)\r
+ addq $ 4*SIZE,BO1\r
+ addq $ 4*SIZE,BO\r
+ decq %rax\r
+ jnz .L2_02b\r
+\r
+.L2_02c:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L2_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 2), C // c += 2 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $ 16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $ 3, I // i = (m >> 3)\r
+ je .L2_4_10\r
+\r
+ ALIGN_4\r
+/**********************************************************************************************************/\r
+\r
+.L2_8_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $ 8, %rax // number of values in AO\r
+#else\r
+ addq $ 2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $ -8, %rax // K = K - ( K % 8 )\r
+ je .L2_8_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $ 4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_8_12:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+\r
+ je .L2_8_16\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x2_SUB\r
+\r
+ je .L2_8_16\r
+\r
+ jmp .L2_8_12\r
+ ALIGN_4\r
+\r
+.L2_8_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $ 7, %rax # if (k & 1)\r
+ je .L2_8_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $ 4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_8_17:\r
+\r
+ KERNEL8x2_SUB\r
+\r
+ jl .L2_8_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_8_19:\r
+\r
+ SAVE8x2\r
+\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $ 8, KK\r
+#endif\r
+\r
+ addq $ 16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L2_8_11\r
+ ALIGN_4 \r
+\r
+\r
+/**********************************************************************************************************/\r
+\r
+\r
+\r
+\r
+.L2_4_10:\r
+ testq $ 7, M \r
+ jz .L2_4_60 // to next 2 lines of N\r
+\r
+ testq $ 4, M \r
+ jz .L2_4_20\r
+ ALIGN_4\r
+\r
+\r
+.L2_4_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $ 4, %rax // number of values in AO\r
+#else\r
+ addq $ 2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $ -8, %rax // K = K - ( K % 8 )\r
+ je .L2_4_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $ 3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_12:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ je .L2_4_16\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x2_SUB\r
+ KERNEL4x2_SUB\r
+\r
+ je .L2_4_16\r
+\r
+ jmp .L2_4_12\r
+ ALIGN_4\r
+\r
+.L2_4_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $ 7, %rax # if (k & 1)\r
+ je .L2_4_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $ 3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_17:\r
+\r
+ KERNEL4x2_SUB\r
+\r
+ jl .L2_4_17\r
+ ALIGN_4\r
+\r
+\r
+.L2_4_19:\r
+\r
+ SAVE4x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $ 4, KK\r
+#endif\r
+\r
+ addq $ 8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4 \r
+\r
+\r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+\r
+.L2_4_20:\r
+\r
+ testq $ 2, M \r
+ jz .L2_4_40\r
+ ALIGN_4\r
+\r
+.L2_4_21:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $ 2, %rax // number of values in AO\r
+#else\r
+ addq $ 2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $ -8, %rax // K = K - ( K % 8 )\r
+ je .L2_4_26\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $ 2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_22:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ je .L2_4_26\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+ KERNEL2x2_SUB\r
+\r
+ je .L2_4_26\r
+\r
+ jmp .L2_4_22\r
+ ALIGN_4\r
+\r
+.L2_4_26:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $ 7, %rax # if (k & 1)\r
+ je .L2_4_29\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $ 2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_27:\r
+\r
+ KERNEL2x2_SUB\r
+\r
+ jl .L2_4_27\r
+ ALIGN_4\r
+\r
+\r
+.L2_4_29:\r
+\r
+ vbroadcastss ALPHA_R, %xmm0\r
+ vbroadcastss ALPHA_I, %xmm1\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $ 0xb1, %xmm11, %xmm11, %xmm11\r
+\r
+#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \\r
+ defined(NR) || defined(NC) || defined(TR) || defined(TC)\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+\r
+ vshufps $ 0xb1, %xmm8 , %xmm8, %xmm9\r
+ vshufps $ 0xb1, %xmm10, %xmm10, %xmm11\r
+\r
+#else\r
+ vaddsubps %xmm8, %xmm9 ,%xmm9\r
+ vaddsubps %xmm10, %xmm11,%xmm11\r
+\r
+ vmovaps %xmm9, %xmm8\r
+ vmovaps %xmm11, %xmm10\r
+\r
+ // swap high and low 64 bytes\r
+ vshufps $ 0xb1, %xmm9 , %xmm9, %xmm9\r
+ vshufps $ 0xb1, %xmm11, %xmm11, %xmm11\r
+\r
+#endif\r
+\r
+ // multiply with ALPHA_R\r
+ vmulps %xmm8 , %xmm0, %xmm8\r
+ vmulps %xmm10, %xmm0, %xmm10\r
+\r
+ // multiply with ALPHA_I\r
+ vmulps %xmm9 , %xmm1, %xmm9\r
+ vmulps %xmm11, %xmm1, %xmm11\r
+\r
+ vaddsubps %xmm9, %xmm8 , %xmm8\r
+ vaddsubps %xmm11,%xmm10, %xmm10\r
+\r
+\r
+\r
+#ifndef TRMMKERNEL\r
+\r
+ vaddps (CO1), %xmm8 , %xmm8\r
+\r
+ vaddps (CO1, LDC), %xmm10, %xmm10\r
+\r
+#endif\r
+\r
+ vmovups %xmm8 , (CO1)\r
+\r
+ vmovups %xmm10 , (CO1, LDC)\r
+\r
+\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $ 2, KK\r
+#endif\r
+\r
+ addq $ 4 * SIZE, CO1 # coffset += 4\r
+ decq I # i --\r
+ jg .L2_4_21\r
+ ALIGN_4 \r
+\r
+\r
+\r
+/**************************************************************************/\r
+.L2_4_40:\r
+ testq $ 1, M \r
+ jz .L2_4_60 // to next 2 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L2_4_41:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 8 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 8 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $ 1, %rax // number of values in AO\r
+#else\r
+ addq $ 2, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $ -8, %rax // K = K - ( K % 8 )\r
+ je .L2_4_46\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $ 1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_42:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ je .L2_4_46\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+ KERNEL1x2_SUB\r
+\r
+ je .L2_4_46\r
+\r
+ jmp .L2_4_42\r
+ ALIGN_4\r
+\r
+.L2_4_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $ 7, %rax # if (k & 1)\r
+ je .L2_4_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $ 1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L2_4_47:\r
+\r
+ KERNEL1x2_SUB\r
+\r
+ jl .L2_4_47\r
+ ALIGN_4\r
+\r
+\r
+.L2_4_49:\r
+\r
+ SAVE1x2\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,4), BI // BI = BI * 4 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $ 1, KK\r
+#endif\r
+\r
+ addq $ 2 * SIZE, CO1 # coffset += 2\r
+ decq I # i --\r
+ jg .L2_4_41\r
+ ALIGN_4 \r
+\r
+\r
+\r
+ \r
+.L2_4_60:\r
+#if defined(TRMMKERNEL) && !defined(LEFT)\r
+ addq $ 2, KK\r
+#endif\r
+\r
+ decq J // j --\r
+ jg .L2_01 // next 2 lines of N\r
+\r
+\r
+\r
+.L1_0:\r
+\r
+/************************************************************************************************\r
+* Loop for Nmod6 % 2 > 0\r
+*************************************************************************************************/\r
+\r
+ movq Nmod6, J \r
+ andq $ 1, J // j % 2\r
+ je .L999\r
+ ALIGN_4\r
+\r
+.L1_01:\r
+ // copy to sub buffer\r
+ movq B, BO1\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ movq K, %rax\r
+ ALIGN_4\r
+\r
+.L1_02b:\r
+\r
+ vmovsd (BO1), %xmm0\r
+ vmovsd %xmm0, (BO)\r
+ addq $ 2*SIZE,BO1\r
+ addq $ 2*SIZE,BO\r
+ decq %rax\r
+ jnz .L1_02b\r
+\r
+.L1_02c:\r
+\r
+ movq BO1, B // next offset of B\r
+\r
+.L1_10:\r
+ movq C, CO1\r
+ leaq (C, LDC, 1), C // c += 1 * ldc\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ movq OFFSET, %rax\r
+ movq %rax, KK\r
+#endif\r
+ \r
+ movq A, AO // aoffset = a\r
+ addq $ 16 * SIZE, AO\r
+\r
+ movq M, I\r
+ sarq $ 3, I // i = (m >> 3)\r
+ je .L1_4_10\r
+\r
+ ALIGN_4\r
+\r
+/**************************************************************************************************/\r
+\r
+.L1_8_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $ 8, %rax // number of values in AO\r
+#else\r
+ addq $ 1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $ -8, %rax // K = K - ( K % 8 )\r
+ je .L1_8_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $ 4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_8_12:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+\r
+ je .L1_8_16\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL8x1_SUB\r
+\r
+ je .L1_8_16\r
+\r
+ jmp .L1_8_12\r
+ ALIGN_4\r
+\r
+.L1_8_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $ 7, %rax # if (k & 1)\r
+ je .L1_8_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $ 4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_8_17:\r
+\r
+ KERNEL8x1_SUB\r
+\r
+ jl .L1_8_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_8_19:\r
+\r
+ SAVE8x1\r
+\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 4, %rax // rax = rax *16 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $ 8, KK\r
+#endif\r
+\r
+ addq $ 16 * SIZE, CO1 # coffset += 16\r
+ decq I # i --\r
+ jg .L1_8_11\r
+ ALIGN_4 \r
+\r
+\r
+\r
+/**************************************************************************************************/\r
+.L1_4_10:\r
+\r
+ testq $ 7, M \r
+ jz .L999\r
+\r
+ testq $ 4, M \r
+ jz .L1_4_20\r
+\r
+\r
+.L1_4_11:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $ 4, %rax // number of values in AO\r
+#else\r
+ addq $ 1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $ -8, %rax // K = K - ( K % 8 )\r
+ je .L1_4_16\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $ 3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_12:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ je .L1_4_16\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL4x1_SUB\r
+ KERNEL4x1_SUB\r
+\r
+ je .L1_4_16\r
+\r
+ jmp .L1_4_12\r
+ ALIGN_4\r
+\r
+.L1_4_16:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $ 7, %rax # if (k & 1)\r
+ je .L1_4_19\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 4 ; number of values\r
+\r
+ salq $ 3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_17:\r
+\r
+ KERNEL4x1_SUB\r
+\r
+ jl .L1_4_17\r
+ ALIGN_4\r
+\r
+\r
+.L1_4_19:\r
+\r
+ SAVE4x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 3, %rax // rax = rax * 8 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $ 4, KK\r
+#endif\r
+\r
+ addq $ 8 * SIZE, CO1 # coffset += 8\r
+ ALIGN_4 \r
+\r
+\r
+\r
+/**************************************************************************\r
+* Rest of M \r
+***************************************************************************/\r
+\r
+.L1_4_20:\r
+\r
+ testq $ 2, M \r
+ jz .L1_4_40\r
+ ALIGN_4\r
+\r
+.L1_4_21:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $ 2, %rax // number of values in AO\r
+#else\r
+ addq $ 1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $ -8, %rax // K = K - ( K % 8 )\r
+ je .L1_4_26\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $ 2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_22:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ je .L1_4_26\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+ KERNEL2x1_SUB\r
+\r
+ je .L1_4_26\r
+\r
+ jmp .L1_4_22\r
+ ALIGN_4\r
+\r
+.L1_4_26:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $ 7, %rax # if (k & 1)\r
+ je .L1_4_29\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2; number of values\r
+\r
+ salq $ 2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_27:\r
+\r
+ KERNEL2x1_SUB\r
+\r
+ jl .L1_4_27\r
+ ALIGN_4\r
+\r
+\r
+.L1_4_29:\r
+\r
+ SAVE2x1\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 2, %rax // rax = rax * 4 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $ 2, KK\r
+#endif\r
+\r
+ addq $ 4 * SIZE, CO1 # coffset += 4\r
+ ALIGN_4 \r
+\r
+\r
+\r
+/**************************************************************************/\r
+.L1_4_40:\r
+ testq $ 1, M \r
+ jz .L999 // to next 2 lines of N\r
+\r
+ ALIGN_4\r
+\r
+.L1_4_41:\r
+\r
+#if !defined(TRMMKERNEL) || \\r
+ (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 4 * SIZE, BO\r
+#else\r
+ movq KK, %rax\r
+ leaq BUFFER1, BO // first buffer to BO\r
+ addq $ 4 * SIZE, BO\r
+ movq %rax, BI // Index for BO\r
+ leaq (,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+ vzeroall\r
+\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))\r
+ movq K, %rax\r
+ subq KK, %rax\r
+ movq %rax, KKK\r
+#else\r
+ movq KK, %rax\r
+#ifdef LEFT\r
+ addq $ 1, %rax // number of values in AO\r
+#else\r
+ addq $ 1, %rax // number of values in BO\r
+#endif\r
+ movq %rax, KKK\r
+#endif\r
+\r
+\r
+ andq $ -8, %rax // K = K - ( K % 8 )\r
+ je .L1_4_46\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $ 1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_42:\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ je .L1_4_46\r
+\r
+ prefetcht0 A_PR1(AO,%rax,SIZE)\r
+ prefetcht0 B_PR1(BO,BI,SIZE)\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+ KERNEL1x1_SUB\r
+\r
+ je .L1_4_46\r
+\r
+ jmp .L1_4_42\r
+ ALIGN_4\r
+\r
+.L1_4_46:\r
+#ifndef TRMMKERNEL\r
+ movq K, %rax\r
+#else\r
+ movq KKK, %rax\r
+#endif\r
+\r
+ andq $ 7, %rax # if (k & 1)\r
+ je .L1_4_49\r
+\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+\r
+ salq $ 1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+ leaq (BO, BI, SIZE), BO\r
+ negq BI\r
+ negq %rax\r
+ ALIGN_4\r
+\r
+.L1_4_47:\r
+\r
+ KERNEL1x1_SUB\r
+\r
+ jl .L1_4_47\r
+ ALIGN_4\r
+\r
+\r
+.L1_4_49:\r
+\r
+ SAVE1x1\r
+\r
+\r
+\r
+#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \\r
+ (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))\r
+ movq K, %rax\r
+ subq KKK, %rax\r
+ movq %rax, BI // Index for BO\r
+ leaq ( ,BI,2), BI // BI = BI * 2 ; number of values\r
+ leaq (BO, BI, SIZE), BO\r
+ salq $ 1, %rax // rax = rax * 2 ; number of values\r
+ leaq (AO, %rax, SIZE), AO\r
+#endif\r
+\r
+\r
+#if defined(TRMMKERNEL) && defined(LEFT)\r
+ addq $ 1, KK\r
+#endif\r
+\r
+ addq $ 2 * SIZE, CO1 # coffset += 2\r
+ ALIGN_4 \r
+\r
+\r
+.L999:\r
+ vzeroupper\r
+\r
+ movq SP, %rsp\r
+ movq (%rsp), %rbx\r
+ movq 8(%rsp), %rbp\r
+ movq 16(%rsp), %r12\r
+ movq 24(%rsp), %r13\r
+ movq 32(%rsp), %r14\r
+ movq 40(%rsp), %r15\r
+\r
+#ifdef WINDOWS_ABI\r
+ movq 48(%rsp), %rdi\r
+ movq 56(%rsp), %rsi\r
+ vmovups 64(%rsp), %xmm6\r
+ vmovups 80(%rsp), %xmm7\r
+ vmovups 96(%rsp), %xmm8\r
+ vmovups 112(%rsp), %xmm9\r
+ vmovups 128(%rsp), %xmm10\r
+ vmovups 144(%rsp), %xmm11\r
+ vmovups 160(%rsp), %xmm12\r
+ vmovups 176(%rsp), %xmm13\r
+ vmovups 192(%rsp), %xmm14\r
+ vmovups 208(%rsp), %xmm15\r
+#endif\r
+\r
+ addq $ STACKSIZE, %rsp\r
+ ret\r
+\r
+ EPILOGUE\r