From 00f33c0134da349d9809ff8d9ad334d43381b91e Mon Sep 17 00:00:00 2001 From: wernsaar Date: Mon, 11 Nov 2013 14:20:59 +0100 Subject: [PATCH] added asum_kernel for all precisions and complex --- kernel/arm/KERNEL.ARMV7 | 8 +- kernel/arm/asum_vfpv3.S | 481 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 485 insertions(+), 4 deletions(-) create mode 100644 kernel/arm/asum_vfpv3.S diff --git a/kernel/arm/KERNEL.ARMV7 b/kernel/arm/KERNEL.ARMV7 index a6b1d67..10db665 100644 --- a/kernel/arm/KERNEL.ARMV7 +++ b/kernel/arm/KERNEL.ARMV7 @@ -35,10 +35,10 @@ DSWAPKERNEL = ../arm/swap.c CSWAPKERNEL = ../arm/zswap.c ZSWAPKERNEL = ../arm/zswap.c -SASUMKERNEL = ../arm/asum.c -DASUMKERNEL = ../arm/asum.c -CASUMKERNEL = ../arm/zasum.c -ZASUMKERNEL = ../arm/zasum.c +SASUMKERNEL = asum_vfpv3.S +DASUMKERNEL = asum_vfpv3.S +CASUMKERNEL = asum_vfpv3.S +ZASUMKERNEL = asum_vfpv3.S SAXPYKERNEL = ../arm/axpy.c DAXPYKERNEL = ../arm/axpy.c diff --git a/kernel/arm/asum_vfpv3.S b/kernel/arm/asum_vfpv3.S new file mode 100644 index 0000000..2b6ceb1 --- /dev/null +++ b/kernel/arm/asum_vfpv3.S @@ -0,0 +1,481 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/************************************************************************************** +* 2013/11/11 Saar +* BLASTEST : OK +* CTEST : OK +* TEST : OK +* +**************************************************************************************/ + +#define ASSEMBLER +#include "common.h" + +#define STACKSIZE 256 + +#define N r0 +#define X r1 +#define INC_X r2 + + +#define I r12 + +#define X_PRE 512 + +/************************************************************************************** +* Macro definitions +**************************************************************************************/ + +#if !defined(COMPLEX) + +#if defined(DOUBLE) + +.macro KERNEL_F4 + + pld [ X, #X_PRE ] + fldmiad X!, { d4 - d5 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + vabs.f64 d5, d5 + fldmiad X!, { d6 - d7 } + vabs.f64 d6, d6 + vadd.f64 d1 , d1, d5 + vabs.f64 d7, d7 + vadd.f64 d0 , d0, d6 + vadd.f64 d1 , d1, d7 + +.endm + +.macro KERNEL_F1 + + fldmiad X!, { d4 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + +.endm + + +.macro KERNEL_S4 + + fldmiad X, { d4 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + add X, X, INC_X + + fldmiad X, { d4 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + add X, X, INC_X + + fldmiad X, { d4 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + add X, X, INC_X + + fldmiad X, { d4 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + add X, X, INC_X + +.endm + + +.macro KERNEL_S1 + + fldmiad X, { d4 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + add X, X, INC_X + +.endm + +#else + +.macro KERNEL_F4 + + fldmias X!, { s4 - s5 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + vabs.f32 s5, s5 + fldmias X!, { s6 - s7 } + vabs.f32 s6, s6 + vadd.f32 s1 , s1, s5 + vabs.f32 s7, s7 + vadd.f32 s0 , s0, s6 + vadd.f32 s1 , s1, s7 + +.endm + +.macro KERNEL_F1 + + fldmias X!, { s4 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + +.endm + + +.macro KERNEL_S4 + + fldmias X, { s4 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + add X, X, INC_X + + fldmias X, { s4 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + add X, X, INC_X + + fldmias X, { s4 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + add X, X, INC_X + + fldmias X, { s4 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + add X, X, INC_X + +.endm + + +.macro KERNEL_S1 + + fldmias X, { s4 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + add X, X, INC_X + +.endm + + +#endif + +#else + +#if defined(DOUBLE) + +.macro KERNEL_F4 + + pld [ X, #X_PRE ] + fldmiad X!, { d4 - d5 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + vabs.f64 d5, d5 + fldmiad X!, { d6 - d7 } + vabs.f64 d6, d6 + vadd.f64 d1 , d1, d5 + vabs.f64 d7, d7 + vadd.f64 d0 , d0, d6 + vadd.f64 d1 , d1, d7 + + pld [ X, #X_PRE ] + fldmiad X!, { d4 - d5 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + vabs.f64 d5, d5 + fldmiad X!, { d6 - d7 } + vabs.f64 d6, d6 + vadd.f64 d1 , d1, d5 + vabs.f64 d7, d7 + vadd.f64 d0 , d0, d6 + vadd.f64 d1 , d1, d7 + + +.endm + +.macro KERNEL_F1 + + fldmiad X!, { d4 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + + fldmiad X!, { d4 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + + +.endm + + +.macro KERNEL_S4 + + fldmiad X, { d4 -d5 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + vabs.f64 d5, d5 + vadd.f64 d0 , d0, d5 + add X, X, INC_X + + fldmiad X, { d4 -d5 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + vabs.f64 d5, d5 + vadd.f64 d0 , d0, d5 + add X, X, INC_X + + fldmiad X, { d4 -d5 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + vabs.f64 d5, d5 + vadd.f64 d0 , d0, d5 + add X, X, INC_X + + fldmiad X, { d4 -d5 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + vabs.f64 d5, d5 + vadd.f64 d0 , d0, d5 + add X, X, INC_X + +.endm + + +.macro KERNEL_S1 + + fldmiad X, { d4 -d5 } + vabs.f64 d4, d4 + vadd.f64 d0 , d0, d4 + vabs.f64 d5, d5 + vadd.f64 d0 , d0, d5 + add X, X, INC_X + +.endm + +#else + +.macro KERNEL_F4 + + pld [ X, #X_PRE ] + fldmias X!, { s4 - s5 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + vabs.f32 s5, s5 + fldmias X!, { s6 - s7 } + vabs.f32 s6, s6 + vadd.f32 s1 , s1, s5 + vabs.f32 s7, s7 + vadd.f32 s0 , s0, s6 + vadd.f32 s1 , s1, s7 + + fldmias X!, { s4 - s5 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + vabs.f32 s5, s5 + fldmias X!, { s6 - s7 } + vabs.f32 s6, s6 + vadd.f32 s1 , s1, s5 + vabs.f32 s7, s7 + vadd.f32 s0 , s0, s6 + vadd.f32 s1 , s1, s7 + + +.endm + +.macro KERNEL_F1 + + fldmias X!, { s4 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + + fldmias X!, { s4 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + +.endm + + +.macro KERNEL_S4 + + fldmias X, { s4 -s5 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + vabs.f32 s5, s5 + vadd.f32 s0 , s0, s5 + add X, X, INC_X + + fldmias X, { s4 -s5 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + vabs.f32 s5, s5 + vadd.f32 s0 , s0, s5 + add X, X, INC_X + + fldmias X, { s4 -s5 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + vabs.f32 s5, s5 + vadd.f32 s0 , s0, s5 + add X, X, INC_X + + fldmias X, { s4 -s5 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + vabs.f32 s5, s5 + vadd.f32 s0 , s0, s5 + add X, X, INC_X + +.endm + + +.macro KERNEL_S1 + + fldmias X, { s4 -s5 } + vabs.f32 s4, s4 + vadd.f32 s0 , s0, s4 + vabs.f32 s5, s5 + vadd.f32 s0 , s0, s5 + add X, X, INC_X + +.endm + +#endif + +#endif + +/************************************************************************************** +* End of macro definitions +**************************************************************************************/ + + PROLOGUE + + .align 5 + +#if defined(DOUBLE) + vsub.f64 d0 , d0 , d0 + vsub.f64 d1 , d1 , d1 +#else + vsub.f32 s0 , s0 , s0 + vsub.f32 s1 , s1 , s1 +#endif + + cmp N, #0 + ble asum_kernel_L999 + + cmp INC_X, #0 + beq asum_kernel_L999 + + cmp INC_X, #1 + bne asum_kernel_S_BEGIN + + +asum_kernel_F_BEGIN: + + asrs I, N, #2 // I = N / 4 + ble asum_kernel_F1 + + .align 5 + +asum_kernel_F4: + +#if !defined(DOUBLE) && !defined(COMPLEX) + pld [ X, #X_PRE ] +#endif + KERNEL_F4 + + subs I, I, #1 + ble asum_kernel_F1 + + KERNEL_F4 + + subs I, I, #1 + bne asum_kernel_F4 + +asum_kernel_F1: + + ands I, N, #3 + ble asum_kernel_L999 + +asum_kernel_F10: + + KERNEL_F1 + + subs I, I, #1 + bne asum_kernel_F10 + + b asum_kernel_L999 + +asum_kernel_S_BEGIN: + +#if defined(COMPLEX) + +#if defined(DOUBLE) + lsl INC_X, INC_X, #4 // INC_X * SIZE * 2 +#else + lsl INC_X, INC_X, #3 // INC_X * SIZE * 2 +#endif + +#else + +#if defined(DOUBLE) + lsl INC_X, INC_X, #3 // INC_X * SIZE +#else + lsl INC_X, INC_X, #2 // INC_X * SIZE +#endif + +#endif + + asrs I, N, #2 // I = N / 4 + ble asum_kernel_S1 + + .align 5 + +asum_kernel_S4: + + KERNEL_S4 + + subs I, I, #1 + bne asum_kernel_S4 + +asum_kernel_S1: + + ands I, N, #3 + ble asum_kernel_L999 + +asum_kernel_S10: + + KERNEL_S1 + + subs I, I, #1 + bne asum_kernel_S10 + + +asum_kernel_L999: + + +#if defined(DOUBLE) + vadd.f64 d0 , d0, d1 // set return value +#else + vadd.f32 s0 , s0, s1 // set return value +#endif + + bx lr + + EPILOGUE + -- 2.7.4