added optimized dgemv_n kernel for POWER8
authorWerner Saar <wernsaar@googlemail.com>
Wed, 30 Mar 2016 09:10:53 +0000 (11:10 +0200)
committerWerner Saar <wernsaar@googlemail.com>
Wed, 30 Mar 2016 09:10:53 +0000 (11:10 +0200)
kernel/power/KERNEL.POWER8
kernel/power/dgemv_n.c [new file with mode: 0644]
kernel/power/dgemv_n_microk_power8.c [new file with mode: 0644]

index 890842e..b37a421 100644 (file)
@@ -147,7 +147,7 @@ ZSWAPKERNEL  = zswap.c
 #
 
 #SGEMVNKERNEL = ../arm/gemv_n.c
-#DGEMVNKERNEL = ../arm/gemv_n.c
+DGEMVNKERNEL = dgemv_n.c
 #CGEMVNKERNEL = ../arm/zgemv_n.c
 #ZGEMVNKERNEL = ../arm/zgemv_n.c
 #
diff --git a/kernel/power/dgemv_n.c b/kernel/power/dgemv_n.c
new file mode 100644 (file)
index 0000000..812d09d
--- /dev/null
@@ -0,0 +1,426 @@
+/***************************************************************************
+Copyright (c) 2013-2016, The OpenBLAS Project
+All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+3. Neither the name of the OpenBLAS project nor the names of
+its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+/**************************************************************************************
+* 2016/03/30 Werner Saar (wernsaar@googlemail.com)
+*       BLASTEST               : OK
+*       CTEST                  : OK
+*       TEST                   : OK
+*       LAPACK-TEST            : OK
+**************************************************************************************/
+
+
+
+#include "common.h"
+
+
+#if defined(POWER8)
+#include "dgemv_n_microk_power8.c"
+#endif
+
+
+#define NBMAX 4096
+
+#ifndef HAVE_KERNEL_4x4
+
+static void dgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, FLOAT *alpha)
+{
+       BLASLONG i;
+       FLOAT *a0,*a1,*a2,*a3;
+       FLOAT x[4]  __attribute__ ((aligned (16)));;
+       a0 = ap[0];
+       a1 = ap[1];
+       a2 = ap[2];
+       a3 = ap[3];
+
+       for ( i=0; i<4; i++)
+               x[i] = xo[i] * *alpha;
+
+       for ( i=0; i< n; i+=4 )
+       {
+               y[i] += a0[i]*x[0] + a1[i]*x[1] + a2[i]*x[2] + a3[i]*x[3];              
+               y[i+1] += a0[i+1]*x[0] + a1[i+1]*x[1] + a2[i+1]*x[2] + a3[i+1]*x[3];            
+               y[i+2] += a0[i+2]*x[0] + a1[i+2]*x[1] + a2[i+2]*x[2] + a3[i+2]*x[3];            
+               y[i+3] += a0[i+3]*x[0] + a1[i+3]*x[1] + a2[i+3]*x[2] + a3[i+3]*x[3];            
+       }
+}
+
+#endif
+
+#ifndef HAVE_KERNEL_4x2
+
+static void dgemv_kernel_4x2(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, FLOAT *alpha)
+{
+       BLASLONG i;
+       FLOAT *a0,*a1;
+       FLOAT x[4]  __attribute__ ((aligned (16)));;
+       a0 = ap[0];
+       a1 = ap[1];
+
+       for ( i=0; i<2; i++)
+               x[i] = xo[i] * *alpha;
+
+       for ( i=0; i< n; i+=4 )
+       {
+               y[i] += a0[i]*x[0] + a1[i]*x[1];                
+               y[i+1] += a0[i+1]*x[0] + a1[i+1]*x[1];          
+               y[i+2] += a0[i+2]*x[0] + a1[i+2]*x[1];          
+               y[i+3] += a0[i+3]*x[0] + a1[i+3]*x[1];          
+       }
+}
+
+
+#endif
+
+#ifndef HAVE_KERNEL_4x1
+
+static void dgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *xo, FLOAT *y, FLOAT *alpha)
+{
+       BLASLONG i;
+       FLOAT *a0;
+       FLOAT x[4]  __attribute__ ((aligned (16)));;
+       a0 = ap;
+
+       for ( i=0; i<1; i++)
+               x[i] = xo[i] * *alpha;
+
+       for ( i=0; i< n; i+=4 )
+       {
+               y[i] += a0[i]*x[0];             
+               y[i+1] += a0[i+1]*x[0];         
+               y[i+2] += a0[i+2]*x[0];         
+               y[i+3] += a0[i+3]*x[0];         
+       }
+}
+
+
+#endif
+
+
+static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest)
+{
+       BLASLONG i;
+       if ( inc_dest != 1 )
+       {
+               for ( i=0; i<n; i++ )
+               {
+                       *dest += *src;
+                       src++;
+                       dest += inc_dest;
+               }
+               return;
+       }
+
+}
+
+int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer)
+{
+
+       BLASLONG i;
+       BLASLONG j;
+       FLOAT *a_ptr;
+       FLOAT *x_ptr;
+       FLOAT *y_ptr;
+       BLASLONG n1;
+       BLASLONG m1;
+       BLASLONG m2;
+       BLASLONG m3;
+       BLASLONG n2;
+       BLASLONG lda4 =  lda << 2;
+       FLOAT *ap[4] __attribute__ ((aligned (16)));;
+       FLOAT xbuffer[8] __attribute__ ((aligned (16)));;
+       FLOAT alpha_r[4] __attribute__ ((aligned (16)));;
+       FLOAT *ybuffer;
+
+       alpha_r[0] = alpha;
+
+        if ( m < 1 ) return(0);
+        if ( n < 1 ) return(0);
+
+       ybuffer = buffer;
+       
+       n1 = n >> 2 ;
+       n2 = n &  3 ;
+
+        m3 = m & 3  ;
+        m1 = m & -4 ;
+        m2 = (m & (NBMAX-1)) - m3 ;
+
+       y_ptr = y;
+
+       BLASLONG NB = NBMAX;
+
+       while ( NB == NBMAX )
+       {
+               
+               m1 -= NB;
+               if ( m1 < 0)
+               {
+                       if ( m2 == 0 ) break;   
+                       NB = m2;
+               }
+               
+               a_ptr = a;
+               x_ptr = x;
+               
+               ap[0] = a_ptr;
+               ap[1] = a_ptr + lda;
+               ap[2] = ap[1] + lda;
+               ap[3] = ap[2] + lda;
+
+               if ( inc_y != 1 )
+                       memset(ybuffer,0,NB*8);
+               else
+                       ybuffer = y_ptr;
+
+               if ( inc_x == 1 )
+               {
+
+
+                       for( i = 0; i < n1 ; i++)
+                       {
+                               dgemv_kernel_4x4(NB,ap,x_ptr,ybuffer,alpha_r);
+                               ap[0] += lda4; 
+                               ap[1] += lda4; 
+                               ap[2] += lda4; 
+                               ap[3] += lda4; 
+                               a_ptr += lda4;
+                               x_ptr += 4;     
+                       }
+
+                       if ( n2 & 2 )
+                       {
+                               dgemv_kernel_4x2(NB,ap,x_ptr,ybuffer,alpha_r);
+                               a_ptr += lda*2;
+                               x_ptr += 2;     
+                       }
+
+
+                       if ( n2 & 1 )
+                       {
+                               dgemv_kernel_4x1(NB,a_ptr,x_ptr,ybuffer,alpha_r);
+                               a_ptr += lda;
+                               x_ptr += 1;     
+
+                       }
+
+
+               }
+               else
+               {
+
+                       for( i = 0; i < n1 ; i++)
+                       {
+                               xbuffer[0] = x_ptr[0];
+                               x_ptr += inc_x; 
+                               xbuffer[1] =  x_ptr[0];
+                               x_ptr += inc_x; 
+                               xbuffer[2] =  x_ptr[0];
+                               x_ptr += inc_x; 
+                               xbuffer[3] = x_ptr[0];
+                               x_ptr += inc_x; 
+                               dgemv_kernel_4x4(NB,ap,xbuffer,ybuffer,alpha_r);
+                               ap[0] += lda4; 
+                               ap[1] += lda4; 
+                               ap[2] += lda4; 
+                               ap[3] += lda4; 
+                               a_ptr += lda4;
+                       }
+
+                       for( i = 0; i < n2 ; i++)
+                       {
+                               xbuffer[0] = x_ptr[0];
+                               x_ptr += inc_x; 
+                               dgemv_kernel_4x1(NB,a_ptr,xbuffer,ybuffer,alpha_r);
+                               a_ptr += lda;
+
+                       }
+
+               }
+
+               a     += NB;
+               if ( inc_y != 1 )
+               {
+                       add_y(NB,ybuffer,y_ptr,inc_y);
+                       y_ptr += NB * inc_y;
+               }
+               else
+                       y_ptr += NB ;
+
+       }
+
+       if ( m3 == 0 ) return(0);
+
+       if ( m3 == 3 )
+       {
+               a_ptr = a;
+               x_ptr = x;
+               FLOAT temp0 = 0.0;
+               FLOAT temp1 = 0.0;
+               FLOAT temp2 = 0.0;
+               if ( lda == 3 && inc_x ==1 )
+               {
+
+                       for( i = 0; i < ( n & -4 ); i+=4 )
+                       {
+
+                               temp0 += a_ptr[0] * x_ptr[0] + a_ptr[3] * x_ptr[1];
+                               temp1 += a_ptr[1] * x_ptr[0] + a_ptr[4] * x_ptr[1];
+                               temp2 += a_ptr[2] * x_ptr[0] + a_ptr[5] * x_ptr[1];
+
+                               temp0 += a_ptr[6] * x_ptr[2] + a_ptr[9]  * x_ptr[3];
+                               temp1 += a_ptr[7] * x_ptr[2] + a_ptr[10] * x_ptr[3];
+                               temp2 += a_ptr[8] * x_ptr[2] + a_ptr[11] * x_ptr[3];
+
+                               a_ptr += 12;
+                               x_ptr += 4;
+                       }
+
+                       for( ; i < n; i++ )
+                       {
+                               temp0 += a_ptr[0] * x_ptr[0];
+                               temp1 += a_ptr[1] * x_ptr[0];
+                               temp2 += a_ptr[2] * x_ptr[0];
+                               a_ptr += 3;
+                               x_ptr ++;
+                       }
+
+               }
+               else
+               {
+
+                       for( i = 0; i < n; i++ )
+                       {
+                               temp0 += a_ptr[0] * x_ptr[0];
+                               temp1 += a_ptr[1] * x_ptr[0];
+                               temp2 += a_ptr[2] * x_ptr[0];
+                               a_ptr += lda;
+                               x_ptr += inc_x;
+
+
+                       }
+
+               }
+               y_ptr[0] += alpha * temp0;
+               y_ptr += inc_y;
+               y_ptr[0] += alpha * temp1;
+               y_ptr += inc_y;
+               y_ptr[0] += alpha * temp2;
+               return(0);
+       }
+
+
+       if ( m3 == 2 )
+       {
+               a_ptr = a;
+               x_ptr = x;
+               FLOAT temp0 = 0.0;
+               FLOAT temp1 = 0.0;
+               if ( lda == 2 && inc_x ==1 )
+               {
+
+                       for( i = 0; i < (n & -4) ; i+=4 )
+                       {
+                               temp0 += a_ptr[0] * x_ptr[0] + a_ptr[2] * x_ptr[1];
+                               temp1 += a_ptr[1] * x_ptr[0] + a_ptr[3] * x_ptr[1];
+                               temp0 += a_ptr[4] * x_ptr[2] + a_ptr[6] * x_ptr[3];
+                               temp1 += a_ptr[5] * x_ptr[2] + a_ptr[7] * x_ptr[3];
+                               a_ptr += 8;
+                               x_ptr += 4;
+
+                       }
+
+
+                       for( ; i < n; i++ )
+                       {
+                               temp0 += a_ptr[0]   * x_ptr[0];
+                               temp1 += a_ptr[1]   * x_ptr[0];
+                               a_ptr += 2;
+                               x_ptr ++;
+                       }
+
+               }
+               else
+               {
+
+                       for( i = 0; i < n; i++ )
+                       {
+                               temp0 += a_ptr[0] * x_ptr[0];
+                               temp1 += a_ptr[1] * x_ptr[0];
+                               a_ptr += lda;
+                               x_ptr += inc_x;
+
+
+                       }
+
+               }
+               y_ptr[0] += alpha * temp0;
+               y_ptr += inc_y;
+               y_ptr[0] += alpha * temp1;
+               return(0);
+       }
+
+       if ( m3 == 1 )
+       {
+               a_ptr = a;
+               x_ptr = x;
+               FLOAT temp = 0.0;
+               if ( lda == 1 && inc_x ==1 )
+               {
+
+                       for( i = 0; i < (n & -4); i+=4 )
+                       {
+                               temp += a_ptr[i] * x_ptr[i] + a_ptr[i+1] * x_ptr[i+1] + a_ptr[i+2] * x_ptr[i+2] + a_ptr[i+3] * x_ptr[i+3];
+       
+                       }
+
+                       for( ; i < n; i++ )
+                       {
+                               temp += a_ptr[i] * x_ptr[i];
+                       }
+
+               }
+               else
+               {
+
+                       for( i = 0; i < n; i++ )
+                       {
+                               temp += a_ptr[0] * x_ptr[0];
+                               a_ptr += lda;
+                               x_ptr += inc_x;
+                       }
+
+               }
+               y_ptr[0] += alpha * temp;
+               return(0);
+       }
+
+
+       return(0);
+}
+
+
diff --git a/kernel/power/dgemv_n_microk_power8.c b/kernel/power/dgemv_n_microk_power8.c
new file mode 100644 (file)
index 0000000..9eabe55
--- /dev/null
@@ -0,0 +1,301 @@
+/***************************************************************************
+Copyright (c) 2013-2016, The OpenBLAS Project
+All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+3. Neither the name of the OpenBLAS project nor the names of
+its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+/**************************************************************************************
+* 2016/03/30 Werner Saar (wernsaar@googlemail.com)
+*       BLASTEST               : OK
+*       CTEST                  : OK
+*       TEST                   : OK
+*       LAPACK-TEST            : OK
+**************************************************************************************/
+
+#define HAVE_KERNEL_4x4 1
+
+static void dgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, FLOAT *alpha) __attribute__ ((noinline));
+
+static void dgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, FLOAT *alpha)
+{
+        BLASLONG i=n;
+       BLASLONG o8  = 8;
+       BLASLONG o16 = 16;
+       BLASLONG o24 = 24;
+       BLASLONG pre = 384;
+
+        FLOAT *a0,*a1,*a2,*a3;
+       FLOAT *y1=y+1;
+        FLOAT x[4]  __attribute__ ((aligned (16)));;
+        a0 = ap[0]+1;
+        a1 = ap[1]+1;
+        a2 = ap[2]+1;
+        a3 = ap[3]+1;
+
+       x[0]=xo[0] * *alpha;
+       x[1]=xo[1] * *alpha;
+       x[2]=xo[2] * *alpha;
+       x[3]=xo[3] * *alpha;
+
+
+       __asm__  __volatile__
+       (
+       "lxvdsx         32, 0 , %1                          \n\t"       // x0
+       "lxvdsx         33,%3 , %1                          \n\t"       // x1
+       "lxvdsx         34,%4 , %1                          \n\t"       // x2
+       "lxvdsx         35,%5 , %1                          \n\t"       // x3
+       "addi           %2 , %2 , -8                        \n\t"
+       "addi           %6 , %6 , -8                        \n\t"
+       "addi           %7 , %7 , -8                        \n\t"
+       "addi           %8 , %8 , -8                        \n\t"
+       "addi           %9 , %9 , -8                        \n\t"
+       
+       "lxvd2x         48, 0, %6                           \n\t"       // a0[0], a0[1] 
+       "lxvd2x         49,%4, %6                           \n\t"       // a0[2], a0[3] 
+
+       "lxvd2x         50, 0, %7                           \n\t"       // a1[0], a1[1] 
+       "lxvd2x         51,%4, %7                           \n\t"       // a1[2], a1[3] 
+
+       "lxvd2x         52, 0, %8                           \n\t"       // a2[0], a2[1] 
+       "lxvd2x         53,%4, %8                           \n\t"       // a2[2], a2[3] 
+
+       "lxvd2x         54, 0, %9                           \n\t"       // a3[0], a3[1] 
+       "lxvd2x         55,%4, %9                           \n\t"       // a3[2], a3[3] 
+
+       "addi           %6, %6, 32                          \n\t"
+       "addi           %7, %7, 32                          \n\t"
+       "addi           %8, %8, 32                          \n\t"
+       "addi           %9, %9, 32                          \n\t"
+
+       "addic.         %0 , %0 , -4                         \n\t"
+       "ble            2f                                   \n\t"
+
+       ".align 5                                           \n\t"
+       "1:                                                 \n\t"
+
+       "dcbt           %2, %10                             \n\t"
+
+       "lxvd2x         40, 0, %2                           \n\t"       // y0, y1
+       "lxvd2x         41,%4, %2                           \n\t"       // y2, y3
+       
+       "dcbt           %6, %10                             \n\t"
+       "dcbt           %7, %10                             \n\t"
+       "dcbt           %8, %10                             \n\t"
+       "dcbt           %9, %10                             \n\t"
+
+       "xvmaddadp      40, 48, 32                          \n\t"
+       "xvmaddadp      41, 49, 32                          \n\t"
+
+       "lxvd2x         48, 0, %6                           \n\t"       // a0[0], a0[1] 
+       "lxvd2x         49,%4, %6                           \n\t"       // a0[2], a0[3] 
+
+       "xvmaddadp      40, 50, 33                          \n\t"
+       "addi           %6, %6, 32                          \n\t"
+       "xvmaddadp      41, 51, 33                          \n\t"
+
+       "lxvd2x         50, 0, %7                           \n\t"       // a1[0], a1[1] 
+       "lxvd2x         51,%4, %7                           \n\t"       // a1[2], a1[3] 
+
+       "xvmaddadp      40, 52, 34                          \n\t"
+       "addi           %7, %7, 32                          \n\t"
+       "xvmaddadp      41, 53, 34                          \n\t"
+
+       "lxvd2x         52, 0, %8                           \n\t"       // a2[0], a2[1] 
+       "lxvd2x         53,%4, %8                           \n\t"       // a2[2], a2[3] 
+
+       "xvmaddadp      40, 54, 35                          \n\t"
+       "addi           %8, %8, 32                          \n\t"
+       "xvmaddadp      41, 55, 35                          \n\t"
+
+       "stxvd2x        40, 0, %2                           \n\t"       // y0, y1
+       "stxvd2x        41,%4, %2                           \n\t"       // y2, y3
+
+       "lxvd2x         54, 0, %9                           \n\t"       // a3[0], a3[1] 
+       "lxvd2x         55,%4, %9                           \n\t"       // a3[2], a3[3] 
+
+       "addi           %9, %9, 32                          \n\t"
+       "addi           %2, %2, 32                          \n\t"
+
+       "addic.         %0 , %0 , -4                         \n\t"
+       "ble            2f                                   \n\t"
+
+
+       "lxvd2x         40, 0, %2                           \n\t"       // y0, y1
+       "lxvd2x         41,%4, %2                           \n\t"       // y2, y3
+       
+       "xvmaddadp      40, 48, 32                          \n\t"
+       "xvmaddadp      41, 49, 32                          \n\t"
+
+       "lxvd2x         48, 0, %6                           \n\t"       // a0[0], a0[1] 
+       "lxvd2x         49,%4, %6                           \n\t"       // a0[2], a0[3] 
+
+       "xvmaddadp      40, 50, 33                          \n\t"
+       "addi           %6, %6, 32                          \n\t"
+       "xvmaddadp      41, 51, 33                          \n\t"
+
+       "lxvd2x         50, 0, %7                           \n\t"       // a1[0], a1[1] 
+       "lxvd2x         51,%4, %7                           \n\t"       // a1[2], a1[3] 
+
+       "xvmaddadp      40, 52, 34                          \n\t"
+       "addi           %7, %7, 32                          \n\t"
+       "xvmaddadp      41, 53, 34                          \n\t"
+
+       "lxvd2x         52, 0, %8                           \n\t"       // a2[0], a2[1] 
+       "lxvd2x         53,%4, %8                           \n\t"       // a2[2], a2[3] 
+
+       "xvmaddadp      40, 54, 35                          \n\t"
+       "addi           %8, %8, 32                          \n\t"
+       "xvmaddadp      41, 55, 35                          \n\t"
+
+       "stxvd2x        40, 0, %2                           \n\t"       // y0, y1
+       "stxvd2x        41,%4, %2                           \n\t"       // y2, y3
+
+       "lxvd2x         54, 0, %9                           \n\t"       // a3[0], a3[1] 
+       "lxvd2x         55,%4, %9                           \n\t"       // a3[2], a3[3] 
+
+       "addi           %9, %9, 32                          \n\t"
+       "addi           %2, %2, 32                          \n\t"
+
+       "addic.         %0 , %0 , -4                         \n\t"
+       "ble            2f                                   \n\t"
+
+
+       "lxvd2x         40, 0, %2                           \n\t"       // y0, y1
+       "lxvd2x         41,%4, %2                           \n\t"       // y2, y3
+       
+       "xvmaddadp      40, 48, 32                          \n\t"
+       "xvmaddadp      41, 49, 32                          \n\t"
+
+       "lxvd2x         48, 0, %6                           \n\t"       // a0[0], a0[1] 
+       "lxvd2x         49,%4, %6                           \n\t"       // a0[2], a0[3] 
+
+       "xvmaddadp      40, 50, 33                          \n\t"
+       "addi           %6, %6, 32                          \n\t"
+       "xvmaddadp      41, 51, 33                          \n\t"
+
+       "lxvd2x         50, 0, %7                           \n\t"       // a1[0], a1[1] 
+       "lxvd2x         51,%4, %7                           \n\t"       // a1[2], a1[3] 
+
+       "xvmaddadp      40, 52, 34                          \n\t"
+       "addi           %7, %7, 32                          \n\t"
+       "xvmaddadp      41, 53, 34                          \n\t"
+
+       "lxvd2x         52, 0, %8                           \n\t"       // a2[0], a2[1] 
+       "lxvd2x         53,%4, %8                           \n\t"       // a2[2], a2[3] 
+
+       "xvmaddadp      40, 54, 35                          \n\t"
+       "addi           %8, %8, 32                          \n\t"
+       "xvmaddadp      41, 55, 35                          \n\t"
+
+       "stxvd2x        40, 0, %2                           \n\t"       // y0, y1
+       "stxvd2x        41,%4, %2                           \n\t"       // y2, y3
+
+       "lxvd2x         54, 0, %9                           \n\t"       // a3[0], a3[1] 
+       "lxvd2x         55,%4, %9                           \n\t"       // a3[2], a3[3] 
+
+       "addi           %9, %9, 32                          \n\t"
+       "addi           %2, %2, 32                          \n\t"
+
+       "addic.         %0 , %0 , -4                         \n\t"
+       "ble            2f                                   \n\t"
+
+
+       "lxvd2x         40, 0, %2                           \n\t"       // y0, y1
+       "lxvd2x         41,%4, %2                           \n\t"       // y2, y3
+       
+       "xvmaddadp      40, 48, 32                          \n\t"
+       "xvmaddadp      41, 49, 32                          \n\t"
+
+       "lxvd2x         48, 0, %6                           \n\t"       // a0[0], a0[1] 
+       "lxvd2x         49,%4, %6                           \n\t"       // a0[2], a0[3] 
+
+       "xvmaddadp      40, 50, 33                          \n\t"
+       "addi           %6, %6, 32                          \n\t"
+       "xvmaddadp      41, 51, 33                          \n\t"
+
+       "lxvd2x         50, 0, %7                           \n\t"       // a1[0], a1[1] 
+       "lxvd2x         51,%4, %7                           \n\t"       // a1[2], a1[3] 
+
+       "xvmaddadp      40, 52, 34                          \n\t"
+       "addi           %7, %7, 32                          \n\t"
+       "xvmaddadp      41, 53, 34                          \n\t"
+
+       "lxvd2x         52, 0, %8                           \n\t"       // a2[0], a2[1] 
+       "lxvd2x         53,%4, %8                           \n\t"       // a2[2], a2[3] 
+
+       "xvmaddadp      40, 54, 35                          \n\t"
+       "addi           %8, %8, 32                          \n\t"
+       "xvmaddadp      41, 55, 35                          \n\t"
+
+       "stxvd2x        40, 0, %2                           \n\t"       // y0, y1
+       "stxvd2x        41,%4, %2                           \n\t"       // y2, y3
+
+       "lxvd2x         54, 0, %9                           \n\t"       // a3[0], a3[1] 
+       "lxvd2x         55,%4, %9                           \n\t"       // a3[2], a3[3] 
+
+       "addi           %9, %9, 32                          \n\t"
+       "addi           %2, %2, 32                          \n\t"
+
+       "addic.         %0 , %0 , -4                         \n\t"
+       "bgt            1b                                   \n\t"
+
+       "2:                                                  \n\t"
+
+       "lxvd2x         40, 0, %2                           \n\t"       // y0, y1
+       "lxvd2x         41,%4, %2                           \n\t"       // y2, y3
+
+       "xvmaddadp      40, 48, 32                          \n\t"
+       "xvmaddadp      41, 49, 32                          \n\t"
+
+       "xvmaddadp      40, 50, 33                          \n\t"
+       "xvmaddadp      41, 51, 33                          \n\t"
+
+       "xvmaddadp      40, 52, 34                          \n\t"
+       "xvmaddadp      41, 53, 34                          \n\t"
+
+       "xvmaddadp      40, 54, 35                          \n\t"
+       "xvmaddadp      41, 55, 35                          \n\t"
+
+       "stxvd2x        40, 0, %2                           \n\t"       // y0, y1
+       "stxvd2x        41,%4, %2                           \n\t"       // y2, y3
+
+       :
+        : 
+          "r" (i),     // 0    
+          "r" (x),      // 1
+          "r" (y1),     // 2
+         "r" (o8),     // 3
+         "r" (o16),    // 4
+         "r" (o24),    // 5
+         "r" (a0),     // 6
+         "r" (a1),     // 7
+         "r" (a2),     // 8
+         "r" (a3),     // 9
+         "r" (pre)     // 10
+       : "cr0", "%0", "%2" , "%6", "%7", "%8", "%9", "memory"
+       );
+
+} 
+
+