NBMAX=4096 for gemvn, added sgemvn 8x8 for future
authorUbuntu <quickwritereader@gmail.com>
Mon, 4 Feb 2019 06:57:11 +0000 (06:57 +0000)
committerUbuntu <quickwritereader@gmail.com>
Mon, 4 Feb 2019 06:57:11 +0000 (06:57 +0000)
kernel/power/sgemv_n.c
kernel/power/sgemv_n_8.c [new file with mode: 0644]
kernel/power/sgemv_t_8.c

index 56f08c2..9704757 100644 (file)
@@ -28,7 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 \r
 #include "common.h"\r
 \r
-#define NBMAX 2048\r
+#define NBMAX 4096\r
 \r
 static void sgemv_kernel_4x8(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, BLASLONG lda4, FLOAT *alpha)\r
 {\r
diff --git a/kernel/power/sgemv_n_8.c b/kernel/power/sgemv_n_8.c
new file mode 100644 (file)
index 0000000..d05b08f
--- /dev/null
@@ -0,0 +1,507 @@
+/***************************************************************************\r
+Copyright (c) 2019, The OpenBLAS Project\r
+All rights reserved.\r
+Redistribution and use in source and binary forms, with or without\r
+modification, are permitted provided that the following conditions are\r
+met:\r
+1. Redistributions of source code must retain the above copyright\r
+notice, this list of conditions and the following disclaimer.\r
+2. Redistributions in binary form must reproduce the above copyright\r
+notice, this list of conditions and the following disclaimer in\r
+the documentation and/or other materials provided with the\r
+distribution.\r
+3. Neither the name of the OpenBLAS project nor the names of\r
+its contributors may be used to endorse or promote products\r
+derived from this software without specific prior written permission.\r
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\r
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r
+ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE\r
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\r
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\r
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\r
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\r
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+*****************************************************************************/\r
+\r
+\r
+#include "common.h"\r
+\r
+#define NBMAX 4096\r
+\r
+static void sgemv_kernel_8x8(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, BLASLONG lda4, FLOAT *alpha)\r
+{\r
+\r
+    BLASLONG i;\r
+       FLOAT *a0,*a1,*a2,*a3,*b0,*b1,*b2,*b3; \r
+    FLOAT x0,x1,x2,x3,x4,x5,x6,x7;\r
+       a0 = ap[0];\r
+       a1 = ap[1];\r
+       a2 = ap[2];\r
+       a3 = ap[3]; \r
+    b0 = a0 + lda4 ;\r
+       b1 = a1 + lda4 ;\r
+       b2 = a2 + lda4 ;\r
+       b3 = a3 + lda4 ;\r
+    x0 = xo[0] * *alpha;\r
+    x1 = xo[1] * *alpha;\r
+    x2 = xo[2] * *alpha;\r
+    x3 = xo[3] * *alpha;\r
+    x4 = xo[4] * *alpha;\r
+    x5 = xo[5] * *alpha;\r
+    x6 = xo[6] * *alpha;\r
+    x7 = xo[7] * *alpha;\r
+    __vector float* va0 = (__vector float*)a0;\r
+    __vector float* va1 = (__vector float*)a1;\r
+    __vector float* va2 = (__vector float*)a2;\r
+    __vector float* va3 = (__vector float*)a3;\r
+    __vector float* vb0 = (__vector float*)b0;\r
+    __vector float* vb1 = (__vector float*)b1;\r
+    __vector float* vb2 = (__vector float*)b2;\r
+    __vector float* vb3 = (__vector float*)b3; \r
+    \r
+    register __vector float   v_x0 = {x0,x0,x0,x0};\r
+    register __vector float   v_x1 = {x1,x1,x1,x1};\r
+    register __vector float   v_x2 = {x2,x2,x2,x2};\r
+    register __vector float   v_x3 = {x3,x3,x3,x3};\r
+    register __vector float   v_x4 = {x4,x4,x4,x4};\r
+    register __vector float   v_x5 = {x5,x5,x5,x5};\r
+    register __vector float   v_x6 = {x6,x6,x6,x6};\r
+    register __vector float   v_x7 = {x7,x7,x7,x7};\r
+    __vector float* v_y =(__vector float*)y;   \r
\r
+    for ( i=0; i< n/4; i+=2)\r
+    {\r
+        register __vector float vy_1=v_y[i];\r
+        register __vector float vy_2=v_y[i+1];\r
+        register __vector float va0_1=va0[i] ; \r
+        register __vector float va0_2=va0[i+1] ; \r
+        register __vector float va1_1=va1[i] ; \r
+        register __vector float va1_2=va1[i+1] ; \r
+        register __vector float va2_1=va2[i] ; \r
+        register __vector float va2_2=va2[i+1] ; \r
+        register __vector float va3_1=va3[i] ; \r
+        register __vector float va3_2=va3[i+1] ;\r
+        register __vector float vb0_1=vb0[i] ; \r
+        register __vector float vb0_2=vb0[i+1] ; \r
+        register __vector float vb1_1=vb1[i] ; \r
+        register __vector float vb1_2=vb1[i+1] ; \r
+        register __vector float vb2_1=vb2[i] ; \r
+        register __vector float vb2_2=vb2[i+1] ; \r
+        register __vector float vb3_1=vb3[i] ; \r
+        register __vector float vb3_2=vb3[i+1] ;         \r
+        vy_1   += v_x0 * va0_1  +  v_x1 * va1_1  + v_x2 * va2_1  + v_x3 * va3_1 ;\r
+        vy_1   += v_x4 * vb0_1   +  v_x5 * vb1_1   + v_x6 * vb2_1   + v_x7 * vb3_1 ;\r
+        vy_2   +=  v_x0 * va0_2   +  v_x1 * va1_2   + v_x2 * va2_2   + v_x3 * va3_2 ; \r
+        vy_2   += v_x4 * vb0_2   +  v_x5 * vb1_2   + v_x6 * vb2_2   + v_x7 * vb3_2 ;\r
+        v_y[i] =vy_1;\r
+        v_y[i+1] =vy_2;   \r
+    }\r
+\r
+}\r
+        \r
+static void sgemv_kernel_8x4(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, FLOAT *alpha)\r
+{\r
+    BLASLONG i;\r
+    FLOAT x0,x1,x2,x3;\r
+    x0 = xo[0] * *alpha;\r
+    x1 = xo[1] * *alpha;\r
+    x2 = xo[2] * *alpha;\r
+    x3 = xo[3] * *alpha;\r
+    __vector float   v_x0 = {x0,x0,x0,x0};\r
+    __vector float   v_x1 = {x1,x1,x1,x1};\r
+    __vector float   v_x2 = {x2,x2,x2,x2};\r
+    __vector float   v_x3 = {x3,x3,x3,x3};\r
+    __vector float* v_y =(__vector float*)y;      \r
+    __vector float* va0 = (__vector float*)ap[0];\r
+    __vector float* va1 = (__vector float*)ap[1];\r
+    __vector float* va2 = (__vector float*)ap[2];\r
+    __vector float* va3 = (__vector float*)ap[3]; \r
\r
+    for ( i=0; i< n/4; i+=2 )\r
+    {\r
+        register __vector float vy_1=v_y[i];\r
+        register __vector float vy_2=v_y[i+1];\r
+        register __vector float va0_1=va0[i] ; \r
+        register __vector float va0_2=va0[i+1] ; \r
+        register __vector float va1_1=va1[i] ; \r
+        register __vector float va1_2=va1[i+1] ; \r
+        register __vector float va2_1=va2[i] ; \r
+        register __vector float va2_2=va2[i+1] ; \r
+        register __vector float va3_1=va3[i] ; \r
+        register __vector float va3_2=va3[i+1] ;      \r
+        vy_1   += v_x0 * va0_1  +  v_x1 * va1_1  + v_x2 * va2_1  + v_x3 * va3_1 ;\r
+        vy_2   +=  v_x0 * va0_2   +  v_x1 * va1_2   + v_x2 * va2_2   + v_x3 * va3_2 ;\r
+        v_y[i] =vy_1;\r
+        v_y[i+1] =vy_2;   \r
+    }\r
+  \r
+} \r
+\r
+static void sgemv_kernel_8x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT *alpha)\r
+{\r
+\r
+    BLASLONG i;\r
+    FLOAT x0,x1;\r
+    x0 = x[0] * *alpha;\r
+    x1 = x[1] * *alpha; \r
+    __vector float   v_x0 = {x0,x0,x0,x0};\r
+    __vector float   v_x1 = {x1,x1,x1,x1}; \r
+    __vector float* v_y =(__vector float*)y;      \r
+    __vector float* va0 = (__vector float*)ap[0];\r
+    __vector float* va1 = (__vector float*)ap[1]; \r
\r
+    for ( i=0; i< n/4; i+=2 )\r
+    { \r
+        v_y[i]   += v_x0 * va0[i]   +  v_x1 * va1[i] ;\r
+        v_y[i+1]  += v_x0 * va0[i+1]   +  v_x1 * va1[i+1] ;     \r
+    }\r
+\r
+} \r
\r
\r
+static void sgemv_kernel_8x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT *alpha)\r
+{\r
+\r
+    BLASLONG i;\r
+    FLOAT x0 ;\r
+    x0 = x[0] * *alpha; \r
+    __vector float   v_x0 = {x0,x0,x0,x0}; \r
+    __vector float* v_y =(__vector float*)y;      \r
+    __vector float* va0 = (__vector float*)ap; \r
\r
+    for ( i=0; i< n/4; i+=2 )\r
+    { \r
+        v_y[i]   += v_x0 * va0[i]   ;\r
+        v_y[i+1] +=   v_x0 * va0[i+1]   ;        \r
+    }\r
+\r
+}\r
\r
+static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest)\r
+{\r
+    BLASLONG i;\r
+        \r
+    for ( i=0; i<n; i++ ){\r
+            *dest += *src;\r
+            src++;\r
+            dest += inc_dest;\r
+    }\r
+    return;\r
+     \r
+\r
+}\r
+\r
+int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer)\r
+{\r
+       BLASLONG i;\r
+       FLOAT *a_ptr;\r
+       FLOAT *x_ptr;\r
+       FLOAT *y_ptr;\r
+       FLOAT *ap[4];\r
+       BLASLONG n1;\r
+       BLASLONG m1;\r
+       BLASLONG m2;\r
+       BLASLONG m3;\r
+       BLASLONG n2;\r
+       BLASLONG lda4 =  lda << 2;\r
+       BLASLONG lda8 =  lda << 3;\r
+       FLOAT xbuffer[8],*ybuffer;\r
+\r
+        if ( m < 1 ) return(0);\r
+        if ( n < 1 ) return(0);\r
+\r
+       ybuffer = buffer;\r
+       \r
+        if ( inc_x == 1 )\r
+       {\r
+               n1 = n >> 3 ;\r
+               n2 = n &  7 ;\r
+       }\r
+       else\r
+       {\r
+               n1 = n >> 2 ;\r
+               n2 = n &  3 ;\r
+\r
+       }\r
+        \r
+        m3 = m & 7  ;\r
+        m1 = m - m3;\r
+        m2 = (m & (NBMAX-1)) - m3 ;\r
+\r
+\r
+       y_ptr = y;\r
+\r
+       BLASLONG NB = NBMAX;\r
+\r
+       while ( NB == NBMAX )\r
+       {\r
+               \r
+               m1 -= NB;\r
+               if ( m1 < 0)\r
+               {\r
+                       if ( m2 == 0 ) break;   \r
+                       NB = m2;\r
+               }\r
+               \r
+               a_ptr = a;\r
+               x_ptr = x;\r
+               \r
+               ap[0] = a_ptr;\r
+               ap[1] = a_ptr + lda;\r
+               ap[2] = ap[1] + lda;\r
+               ap[3] = ap[2] + lda;\r
+\r
+               if ( inc_y != 1 )\r
+                       memset(ybuffer,0,NB*4);\r
+               else\r
+                       ybuffer = y_ptr;\r
+\r
+               if ( inc_x == 1 )\r
+               {\r
+\r
+\r
+                       for( i = 0; i < n1 ; i++)\r
+                       {\r
+                               sgemv_kernel_8x8(NB,ap,x_ptr,ybuffer,lda4,&alpha);\r
+                               ap[0] += lda8; \r
+                               ap[1] += lda8; \r
+                               ap[2] += lda8; \r
+                               ap[3] += lda8; \r
+                               a_ptr += lda8;\r
+                               x_ptr += 8;     \r
+                       }\r
+\r
+\r
+                       if ( n2 & 4 )\r
+                       {\r
+                               sgemv_kernel_8x4(NB,ap,x_ptr,ybuffer,&alpha);\r
+                               ap[0] += lda4; \r
+                               ap[1] += lda4; \r
+                               ap[2] += lda4; \r
+                               ap[3] += lda4; \r
+                               a_ptr += lda4;\r
+                               x_ptr += 4;     \r
+                       }\r
+\r
+                       if ( n2 & 2 )\r
+                       {\r
+                               sgemv_kernel_8x2(NB,ap,x_ptr,ybuffer,&alpha);\r
+                               a_ptr += lda*2;\r
+                               x_ptr += 2;     \r
+                       }\r
+\r
+\r
+                       if ( n2 & 1 )\r
+                       {\r
+                               sgemv_kernel_8x1(NB,a_ptr,x_ptr,ybuffer,&alpha); \r
+                a_ptr += lda;\r
+                x_ptr += 1;   \r
+                       }\r
+\r
+\r
+               }\r
+               else\r
+               {\r
+\r
+                       for( i = 0; i < n1 ; i++)\r
+                       {\r
+                               xbuffer[0] = x_ptr[0];\r
+                               x_ptr += inc_x; \r
+                               xbuffer[1] =  x_ptr[0];\r
+                               x_ptr += inc_x; \r
+                               xbuffer[2] =  x_ptr[0];\r
+                               x_ptr += inc_x; \r
+                               xbuffer[3] = x_ptr[0];\r
+                               x_ptr += inc_x; \r
+                               sgemv_kernel_8x4(NB,ap,xbuffer,ybuffer,&alpha);\r
+                               ap[0] += lda4; \r
+                               ap[1] += lda4; \r
+                               ap[2] += lda4; \r
+                               ap[3] += lda4; \r
+                               a_ptr += lda4;\r
+                       }\r
+\r
+                       for( i = 0; i < n2 ; i++)\r
+                       {\r
+                               xbuffer[0] = x_ptr[0];\r
+                               x_ptr += inc_x; \r
+                               sgemv_kernel_8x1(NB,a_ptr,xbuffer,ybuffer,&alpha);\r
+                               a_ptr += lda;\r
+\r
+                       }\r
+\r
+               }\r
+\r
+               a     += NB;\r
+               if ( inc_y != 1 )\r
+               {\r
+                       add_y(NB,ybuffer,y_ptr,inc_y);\r
+                       y_ptr += NB * inc_y;\r
+               }\r
+               else\r
+                       y_ptr += NB ;\r
+\r
+       }\r
+\r
+        \r
+       if ( m3 & 4 )\r
+       {\r
+               a_ptr = a;\r
+               x_ptr = x;\r
+               FLOAT temp0 = 0.0;\r
+               FLOAT temp1 = 0.0;\r
+               FLOAT temp2 = 0.0;\r
+               FLOAT temp3 = 0.0;              \r
+               if ( lda == 4 && inc_x ==1 )\r
+               {\r
+\r
+                       for( i = 0; i < ( n & -4 ); i+=4 )\r
+                       {\r
+\r
+                               temp0 += a_ptr[0] * x_ptr[0] + a_ptr[4] * x_ptr[1];\r
+                               temp1 += a_ptr[1] * x_ptr[0] + a_ptr[5] * x_ptr[1];\r
+                               temp2 += a_ptr[2] * x_ptr[0] + a_ptr[6] * x_ptr[1];\r
+                               temp3 += a_ptr[3] * x_ptr[0] + a_ptr[7] * x_ptr[1];\r
+\r
+                               temp0 += a_ptr[8] * x_ptr[2] + a_ptr[12]  * x_ptr[3];\r
+                               temp1 += a_ptr[9] * x_ptr[2] + a_ptr[13] * x_ptr[3];\r
+                               temp2 += a_ptr[10] * x_ptr[2] + a_ptr[14] * x_ptr[3];\r
+                               temp3 += a_ptr[11] * x_ptr[2] + a_ptr[15] * x_ptr[3];\r
+\r
+                               a_ptr += 16;\r
+                               x_ptr += 4;\r
+                       }\r
+\r
+                       for( ; i < n; i++ )\r
+                       {\r
+                               temp0 += a_ptr[0] * x_ptr[0];\r
+                               temp1 += a_ptr[1] * x_ptr[0];\r
+                               temp2 += a_ptr[2] * x_ptr[0];\r
+                               temp3 += a_ptr[3] * x_ptr[0] ;\r
+                               a_ptr +=4;\r
+                               x_ptr ++;\r
+                       }\r
+\r
+               }\r
+               else\r
+               {\r
+\r
+                       for( i = 0; i < n; i++ )\r
+                       {\r
+                               temp0 += a_ptr[0] * x_ptr[0];\r
+                               temp1 += a_ptr[1] * x_ptr[0];\r
+                               temp2 += a_ptr[2] * x_ptr[0];\r
+                               temp3 += a_ptr[3] * x_ptr[0];\r
+                               a_ptr += lda;\r
+                               x_ptr += inc_x;\r
+\r
+\r
+                       }\r
+\r
+               }\r
+               y_ptr[0] += alpha * temp0;\r
+               y_ptr += inc_y;\r
+               y_ptr[0] += alpha * temp1;\r
+               y_ptr += inc_y;\r
+               y_ptr[0] += alpha * temp2;\r
+               y_ptr += inc_y;\r
+               y_ptr[0] += alpha * temp3; \r
+               y_ptr += inc_y;\r
+        a     += 4;\r
+       }\r
+\r
+\r
+       if ( m3 & 2 )\r
+       {\r
+               a_ptr = a;\r
+               x_ptr = x;\r
+               FLOAT temp0 = 0.0;\r
+               FLOAT temp1 = 0.0;\r
+               if ( lda == 2 && inc_x ==1 )\r
+               {\r
+\r
+                       for( i = 0; i < (n & -4) ; i+=4 )\r
+                       {\r
+                               temp0 += a_ptr[0] * x_ptr[0] + a_ptr[2] * x_ptr[1];\r
+                               temp1 += a_ptr[1] * x_ptr[0] + a_ptr[3] * x_ptr[1];\r
+                               temp0 += a_ptr[4] * x_ptr[2] + a_ptr[6] * x_ptr[3];\r
+                               temp1 += a_ptr[5] * x_ptr[2] + a_ptr[7] * x_ptr[3];\r
+                               a_ptr += 8;\r
+                               x_ptr += 4;\r
+\r
+                       }\r
+\r
+\r
+                       for( ; i < n; i++ )\r
+                       {\r
+                               temp0 += a_ptr[0]   * x_ptr[0];\r
+                               temp1 += a_ptr[1]   * x_ptr[0];\r
+                               a_ptr += 2;\r
+                               x_ptr ++;\r
+                       }\r
+\r
+               }\r
+               else\r
+               {\r
+\r
+                       for( i = 0; i < n; i++ )\r
+                       {\r
+                               temp0 += a_ptr[0] * x_ptr[0];\r
+                               temp1 += a_ptr[1] * x_ptr[0];\r
+                               a_ptr += lda;\r
+                               x_ptr += inc_x;\r
+\r
+\r
+                       }\r
+\r
+               }\r
+               y_ptr[0] += alpha * temp0;\r
+               y_ptr += inc_y;\r
+               y_ptr[0] += alpha * temp1;\r
+               y_ptr += inc_y;\r
+        a     += 2;\r
+       }\r
+\r
+       if ( m3 & 1 )\r
+       {\r
+               a_ptr = a;\r
+               x_ptr = x;\r
+               FLOAT temp = 0.0;\r
+               if ( lda == 1 && inc_x ==1 )\r
+               {\r
+\r
+                       for( i = 0; i < (n & -4); i+=4 )\r
+                       {\r
+                               temp += a_ptr[i] * x_ptr[i] + a_ptr[i+1] * x_ptr[i+1] + a_ptr[i+2] * x_ptr[i+2] + a_ptr[i+3] * x_ptr[i+3];\r
+       \r
+                       }\r
+\r
+                       for( ; i < n; i++ )\r
+                       {\r
+                               temp += a_ptr[i] * x_ptr[i];\r
+                       }\r
+\r
+               }\r
+               else\r
+               {\r
+\r
+                       for( i = 0; i < n; i++ )\r
+                       {\r
+                               temp += a_ptr[0] * x_ptr[0];\r
+                               a_ptr += lda;\r
+                               x_ptr += inc_x;\r
+                       }\r
+\r
+               }\r
+               y_ptr[0] += alpha * temp;\r
\r
\r
+       }\r
+\r
+\r
+       return(0);\r
+}\r
+\r
+\r
index c9f9282..e426f36 100644 (file)
@@ -27,7 +27,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 \r
 #include "common.h"\r
 #include <stdio.h>\r
-#define NBMAX 2048\r
+#define NBMAX 4096\r
 \r
 #include <altivec.h> \r
  \r