arm_compute v18.05
[platform/upstream/armcl.git] / src / core / NEON / kernels / arm_gemm / kernels / a32_sgemm_8x6 / a53.cpp
1 /*
2  * Copyright (c) 2017-2018 ARM Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifdef __arm__
25
26 #include <arm_neon.h>
27
28 #include "../../asmlib.hpp"
29
30 // Kernel implementation.
31 //
32 // Assume that "Apanel" points to a chunk of A blocks (each size 6xK) in read-order.
33 // Assume that "Bpanel" points to a chunk of B blocks (each size 8xK) in read-order.
34 // Assume that "Cpanel" points to a chunk of C output blocks (each size
35 // 8x6), the chunks being arranged in a row major fashion.
36 //
37 // Note that the intent of this is that either ablocks or bblocks will be 1
38 // - this construction allows the output loop to proceed in either order.
39
40 namespace arm_gemm
41 {
42 void a32_sgemm_8x6_a53(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K)
43 {
44     const float *a_ptr = Apanel;
45     float       *c_ptr = Cpanel;
46
47     for(int yb = 0; yb < ablocks; yb++)
48     {
49         const float *a_ptr0 = a_ptr;
50         const float *b_ptr  = Bpanel;
51
52         for(int xb = 0; xb < bblocks; xb++)
53         {
54             a_ptr     = a_ptr0;
55             int tails = (K & 3);
56             if(tails == 0)
57             {
58                 tails = 4;
59             }
60             int k = ((K + 3) / 4) - 1;
61
62             __asm __volatile(
63                 "vmov.i32    q4, #0\n"
64                 "vld1.32    {d0-d1}, [%[a_ptr] :64]\n"
65                 "vmov.i32    q5, #0\n"
66                 "vld1.32    {d4-d5}, [%[b_ptr] :128]\n"
67                 "vmov.i32    q6, #0\n"
68                 "ldr        r0, [%[a_ptr], #0x10]\n"
69                 "vmov.i32    q7, #0\n"
70                 "ldr        r1, [%[a_ptr], #0x14]\n"
71                 "vmov.i32    q8, #0\n" ASM_PREFETCH("[%[a_ptr], #0x40]") "vmov.i32    q9, #0\n" ASM_PREFETCH("[%[b_ptr], #0x40]") "vmov.i32    q10, #0\n" ASM_PREFETCH("[%[a_ptr], #0x80]") "vmov.i32    q11, #0\n"
72                 ASM_PREFETCH("[%[b_ptr], #0x80]")
73                 "vmov.i32    q12, #0\n"
74                 "vmov.i32    q13, #0\n" ASM_PREFETCH("[%[a_ptr], #0xC0]") "vmov.i32    q14, #0\n" ASM_PREFETCH("[%[b_ptr], #0XC0]")
75                 "vmov.i32    q15, #0\n"
76                 "cmp        %[k], #0\n"
77                 "beq        6f\n"
78
79                 "1:\n"
80                 // Unroll 0
81                 "vldr        d6, [%[b_ptr], #0x10]\n"
82                 "vmov        d2, r0, r1\n"
83                 "vmla.f32    q4, q2, d0[0]\n"
84                 "ldr        r0, [%[b_ptr], #0x18]\n"
85                 "vmla.f32    q5, q2, d0[1]\n"
86                 "ldr        r1, [%[b_ptr], #0x1C]\n"
87                 "vmla.f32    q6, q2, d1[0]\n"
88
89                 "vldr        d3, [%[a_ptr], #0x18]\n"
90                 "vmov        d7, r0, r1\n"
91                 "vmla.f32    q7, q2, d1[1]\n" ASM_PREFETCH("[%[a_ptr], #0x100]")
92                 "vmla.f32    q8, q2, d2[0]\n"
93                 "vmla.f32    q9, q2, d2[1]\n"
94
95                 "vldr        d4, [%[b_ptr], #0x20]\n"
96                 "vmla.f32    q10, q3, d0[0]\n"
97                 "ldr        r0, [%[b_ptr], #0x28]\n"
98                 "vmla.f32    q11, q3, d0[1]\n"
99                 "ldr        r1, [%[b_ptr], #0x2C]\n"
100                 "vmla.f32    q12, q3, d1[0]\n"
101
102                 "vldr        d0, [%[a_ptr], #0x20]\n"
103                 "vmov        d5, r0, r1\n"
104                 "vmla.f32    q13, q3, d1[1]\n"
105                 "ldr        r0, [%[a_ptr], #0x28]\n"
106                 "vmla.f32    q14, q3, d2[0]\n"
107                 "ldr        r1, [%[a_ptr], #0x2C]\n"
108                 "vmla.f32    q15, q3, d2[1]\n"
109
110                 // Unroll 1
111                 "vldr        d6, [%[b_ptr], #0x30]\n"
112                 "vmov        d1, r0, r1\n"
113                 "vmla.f32    q4, q2, d3[0]\n"
114                 "ldr        r0, [%[b_ptr], #0x38]\n"
115                 "vmla.f32    q5, q2, d3[1]\n"
116                 "ldr        r1, [%[b_ptr], #0x3C]\n"
117                 "vmla.f32    q6, q2, d0[0]\n"
118
119                 "vldr        d2, [%[a_ptr], #0x30]\n"
120                 "vmov        d7, r0, r1\n"
121                 "vmla.f32    q7, q2, d0[1]\n" ASM_PREFETCH("[%[b_ptr], #0x100]")
122                 "vmla.f32    q8, q2, d1[0]\n"
123                 "vmla.f32    q9, q2, d1[1]\n"
124
125                 "vldr        d4, [%[b_ptr], #0x40]\n"
126                 "vmla.f32    q10, q3, d3[0]\n"
127                 "ldr        r0, [%[b_ptr], #0x48]\n"
128                 "vmla.f32    q11, q3, d3[1]\n"
129                 "ldr        r1, [%[b_ptr], #0x4C]\n"
130                 "vmla.f32    q12, q3, d0[0]\n"
131
132                 "vldr        d3, [%[a_ptr], #0x38]\n"
133                 "vmov        d5, r0, r1\n"
134                 "vmla.f32    q13, q3, d0[1]\n"
135                 "ldr        r0, [%[a_ptr], #0x40]\n"
136                 "vmla.f32    q14, q3, d1[0]\n"
137                 "ldr        r1, [%[a_ptr], #0x44]\n"
138                 "vmla.f32    q15, q3, d1[1]\n"
139
140                 // Unroll 2
141                 "vldr        d6, [%[b_ptr], #0x50]\n"
142                 "vmov        d0, r0, r1\n"
143                 "vmla.f32    q4, q2, d2[0]\n"
144                 "ldr        r0, [%[b_ptr], #0x58]\n"
145                 "vmla.f32    q5, q2, d2[1]\n"
146                 "ldr        r1, [%[b_ptr], #0x5C]\n"
147                 "vmla.f32    q6, q2, d3[0]\n"
148
149                 "vldr        d1, [%[a_ptr], #0x48]\n"
150                 "vmov        d7, r0, r1\n"
151                 "vmla.f32    q7, q2, d3[1]\n" ASM_PREFETCH("[%[a_ptr], #0x140]")
152                 "vmla.f32    q8, q2, d0[0]\n"
153                 "vmla.f32    q9, q2, d0[1]\n"
154
155                 "vldr        d4, [%[b_ptr], #0x60]\n"
156                 "vmla.f32    q10, q3, d2[0]\n"
157                 "ldr        r0, [%[b_ptr], #0x68]\n"
158                 "vmla.f32    q11, q3, d2[1]\n"
159                 "ldr        r1, [%[b_ptr], #0x6C]\n"
160                 "vmla.f32    q12, q3, d3[0]\n"
161
162                 "vldr        d2, [%[a_ptr], #0x50]\n"
163                 "vmov        d5, r0, r1\n"
164                 "vmla.f32    q13, q3, d3[1]\n"
165                 "ldr        r0, [%[a_ptr], #0x58]\n"
166                 "vmla.f32    q14, q3, d0[0]\n"
167                 "ldr        r1, [%[a_ptr], #0x5C]\n"
168                 "vmla.f32    q15, q3, d0[1]\n"
169                 "add        %[a_ptr], %[a_ptr], #0x60\n"
170
171                 // Unroll 3
172                 "vldr        d6, [%[b_ptr], #0x70]\n"
173                 "vmov        d3, r0, r1\n"
174                 "vmla.f32    q4, q2, d1[0]\n"
175                 "ldr        r0, [%[b_ptr], #0x78]\n"
176                 "vmla.f32    q5, q2, d1[1]\n"
177                 "ldr        r1, [%[b_ptr], #0x7C]\n"
178                 "vmla.f32    q6, q2, d2[0]\n"
179                 "add        %[b_ptr], %[b_ptr], #0x80\n"
180
181                 "vldr        d0, [%[a_ptr], #0x00]\n"
182                 "vmov        d7, r0, r1\n"
183                 "vmla.f32    q7, q2, d2[1]\n" ASM_PREFETCH("[%[b_ptr], #0xC0]")
184                 "vmla.f32    q8, q2, d3[0]\n"
185                 "vmla.f32    q9, q2, d3[1]\n"
186
187                 "vldr        d4, [%[b_ptr], #0x00]\n"
188                 "vmla.f32    q10, q3, d1[0]\n"
189                 "ldr        r0, [%[b_ptr], #0x08]\n"
190                 "vmla.f32    q11, q3, d1[1]\n"
191                 "ldr        r1, [%[b_ptr], #0x0C]\n"
192                 "vmla.f32    q12, q3, d2[0]\n"
193                 "subs        %[k], %[k], #1\n"
194
195                 "vldr        d1, [%[a_ptr], #0x08]\n"
196                 "vmov        d5, r0, r1\n"
197                 "vmla.f32    q13, q3, d2[1]\n"
198                 "ldr        r0, [%[a_ptr], #0x10]\n"
199                 "vmla.f32    q14, q3, d3[0]\n"
200                 "ldr        r1, [%[a_ptr], #0x14]\n"
201                 "vmla.f32    q15, q3, d3[1]\n"
202                 "bne        1b\n"
203
204                 // "Tails" shows how many multiply blocks are needed at the
205                 // end, must be 1-4 inclusive.  Bail out to alternative tail
206                 // immediately if it's 1.
207                 "6:\n"
208                 "subs        %[tails], %[tails], #1\n"
209                 "beq        3f\n"
210
211                 // Detached final iteration - for now adapt the generic
212                 // tails rather than reimplementing for A53.
213
214                 // Unroll 0
215                 "vmov        d2, r0, r1\n"
216                 "add        %[a_ptr], %[a_ptr], #0x18\n"
217                 "vmla.f32    q4, q2, d0[0]\n"
218                 "vld1.32    {d3}, [%[a_ptr] :64]!\n"
219                 "vmla.f32    q5, q2, d0[1]\n"
220                 "add        %[b_ptr], %[b_ptr], #0x10\n"
221                 "vmla.f32    q6, q2, d1[0]\n"
222                 "vld1.32    {d6-d7}, [%[b_ptr] :128]!\n"
223                 "vmla.f32    q7, q2, d1[1]\n"
224                 "vmla.f32    q8, q2, d2[0]\n"
225                 "subs        %[tails], %[tails], #1\n"
226                 "vmla.f32    q9, q2, d2[1]\n"
227                 "vld1.32    {d4-d5}, [%[b_ptr] :128]!\n"
228
229                 "vmla.f32    q10, q3, d0[0]\n"
230                 "vmla.f32    q11, q3, d0[1]\n"
231                 "vmla.f32    q12, q3, d1[0]\n"
232                 "vmla.f32    q13, q3, d1[1]\n"
233                 "vld1.32    {d0-d1}, [%[a_ptr] :64]!\n"
234                 "vmla.f32    q14, q3, d2[0]\n"
235                 "vmla.f32    q15, q3, d2[1]\n"
236                 "vld1.32    {d6-d7}, [%[b_ptr] :128]!\n"
237                 "beq        4f\n"
238
239                 // Unroll 1
240                 "vmla.f32    q4, q2, d3[0]\n"
241                 "vmla.f32    q5, q2, d3[1]\n"
242                 "subs        %[tails], %[tails], #1\n"
243                 "vmla.f32    q6, q2, d0[0]\n"
244                 "vmla.f32    q7, q2, d0[1]\n"
245                 "vmla.f32    q8, q2, d1[0]\n"
246                 "vmla.f32    q9, q2, d1[1]\n"
247                 "vld1.32    {d4-d5}, [%[b_ptr] :128]!\n"
248
249                 "vmla.f32    q10, q3, d3[0]\n"
250                 "vmla.f32    q11, q3, d3[1]\n"
251                 "vld1.32    {d2-d3}, [%[a_ptr] :64]!\n"
252                 "vmla.f32    q12, q3, d0[0]\n"
253                 "vmla.f32    q13, q3, d0[1]\n"
254                 "vmla.f32    q14, q3, d1[0]\n"
255                 "vmla.f32    q15, q3, d1[1]\n"
256                 "vld1.32    {d6-d7}, [%[b_ptr] :128]!\n"
257                 "beq        5f\n"
258
259                 // Unroll 2
260                 "vld1.32    {d0-d1}, [%[a_ptr] :64]!\n"
261                 "vmla.f32    q4, q2, d2[0]\n"
262                 "vmla.f32    q5, q2, d2[1]\n"
263                 "vmla.f32    q6, q2, d3[0]\n"
264                 "vmla.f32    q7, q2, d3[1]\n"
265                 "vmla.f32    q8, q2, d0[0]\n"
266                 "vmla.f32    q9, q2, d0[1]\n"
267                 "vld1.32    {d4-d5}, [%[b_ptr] :128]!\n"
268
269                 "vmla.f32    q10, q3, d2[0]\n"
270                 "vmla.f32    q11, q3, d2[1]\n"
271                 "vmla.f32    q12, q3, d3[0]\n"
272                 "vmla.f32    q13, q3, d3[1]\n"
273                 "vld1.32    {d2-d3}, [%[a_ptr] :64]!\n"
274                 "vmla.f32    q14, q3, d0[0]\n"
275                 "vmla.f32    q15, q3, d0[1]\n"
276                 "vld1.32    {d6-d7}, [%[b_ptr] :128]!\n"
277
278                 // Unroll 3
279                 "vmla.f32    q4, q2, d1[0]\n"
280                 "vmla.f32    q10, q3, d1[0]\n"
281                 "vst1.32    {d8-d9}, [%[c_ptr] :128]!\n"
282                 "vmla.f32    q5, q2, d1[1]\n"
283                 "vst1.32    {d20-d21}, [%[c_ptr] :128]!\n"
284                 "vmla.f32    q11, q3, d1[1]\n"
285                 "vst1.32    {d10-d11}, [%[c_ptr] :128]!\n"
286                 "vmla.f32    q6, q2, d2[0]\n"
287                 "vst1.32    {d22-d23}, [%[c_ptr] :128]!\n"
288                 "vmla.f32    q12, q3, d2[0]\n"
289                 "vst1.32    {d12-d13}, [%[c_ptr] :128]!\n"
290                 "vmla.f32    q7, q2, d2[1]\n"
291                 "vst1.32    {d24-d25}, [%[c_ptr] :128]!\n"
292                 "vmla.f32    q13, q3, d2[1]\n"
293                 "vst1.32    {d14-d15}, [%[c_ptr] :128]!\n"
294                 "vmla.f32    q8, q2, d3[0]\n"
295                 "vst1.32    {d26-d27}, [%[c_ptr] :128]!\n"
296                 "vmla.f32    q14, q3, d3[0]\n"
297                 "vst1.32    {d16-d17}, [%[c_ptr] :128]!\n"
298                 "vmla.f32    q9, q2, d3[1]\n"
299                 "vst1.32    {d28-d29}, [%[c_ptr] :128]!\n"
300                 "vmla.f32    q15, q3, d3[1]\n"
301                 "vst1.32    {d18-d19}, [%[c_ptr] :128]!\n"
302                 "b        2f\n"
303
304                 // tails==1 final tail
305                 "3:\n"
306                 "vmov        d2, r0, r1\n"
307                 "add        %[b_ptr], %[b_ptr], #0x10\n"
308                 "vmla.f32    q4, q2, d0[0]\n"
309                 "add        %[a_ptr], %[a_ptr], #0x18\n"
310                 "vmla.f32    q5, q2, d0[1]\n"
311                 "vld1.32    {d6-d7}, [%[b_ptr] :128]!\n"
312                 "vmla.f32    q6, q2, d1[0]\n"
313                 "vst1.32    {d8-d9}, [%[c_ptr] :128]!\n"
314                 "vmla.f32    q10, q3, d0[0]\n"
315                 "vst1.32    {d20-d21}, [%[c_ptr] :128]!\n"
316                 "vmla.f32    q11, q3, d0[1]\n"
317                 "vst1.32    {d10-d11}, [%[c_ptr] :128]!\n"
318                 "vmla.f32    q12, q3, d1[0]\n"
319                 "vst1.32    {d22-d23}, [%[c_ptr] :128]!\n"
320                 "vmla.f32    q7, q2, d1[1]\n"
321                 "vst1.32    {d12-d13}, [%[c_ptr] :128]!\n"
322                 "vmla.f32    q13, q3, d1[1]\n"
323                 "vst1.32    {d24-d25}, [%[c_ptr] :128]!\n"
324                 "vmla.f32    q8, q2, d2[0]\n"
325                 "vst1.32    {d14-d15}, [%[c_ptr] :128]!\n"
326                 "vmla.f32    q14, q3, d2[0]\n"
327                 "vst1.32    {d26-d27}, [%[c_ptr] :128]!\n"
328                 "vmla.f32    q9, q2, d2[1]\n"
329                 "vst1.32    {d16-d17}, [%[c_ptr] :128]!\n"
330                 "vmla.f32    q15, q3, d2[1]\n"
331                 "vst1.32    {d28-d29}, [%[c_ptr] :128]!\n"
332                 "vst1.32    {d18-d19}, [%[c_ptr] :128]!\n"
333                 "b        2f\n"
334
335                 // tails==2 final tail
336                 "4:\n"
337                 "vmla.f32    q4, q2, d3[0]\n"
338                 "vmla.f32    q10, q3, d3[0]\n"
339                 "vst1.32    {d8-d9}, [%[c_ptr] :128]!\n"
340                 "vmla.f32    q5, q2, d3[1]\n"
341                 "vst1.32    {d20-d21}, [%[c_ptr] :128]!\n"
342                 "vmla.f32    q11, q3, d3[1]\n"
343                 "vst1.32    {d10-d11}, [%[c_ptr] :128]!\n"
344                 "vmla.f32    q6, q2, d0[0]\n"
345                 "vst1.32    {d22-d23}, [%[c_ptr] :128]!\n"
346                 "vmla.f32    q12, q3, d0[0]\n"
347                 "vst1.32    {d12-d13}, [%[c_ptr] :128]!\n"
348                 "vmla.f32    q7, q2, d0[1]\n"
349                 "vst1.32    {d24-d25}, [%[c_ptr] :128]!\n"
350                 "vmla.f32    q13, q3, d0[1]\n"
351                 "vst1.32    {d14-d15}, [%[c_ptr] :128]!\n"
352                 "vmla.f32    q8, q2, d1[0]\n"
353                 "vst1.32    {d26-d27}, [%[c_ptr] :128]!\n"
354                 "vmla.f32    q14, q3, d1[0]\n"
355                 "vst1.32    {d16-d17}, [%[c_ptr] :128]!\n"
356                 "vmla.f32    q9, q2, d1[1]\n"
357                 "vst1.32    {d28-d29}, [%[c_ptr] :128]!\n"
358                 "vmla.f32    q15, q3, d1[1]\n"
359                 "vst1.32    {d18-d19}, [%[c_ptr] :128]!\n"
360                 "b        2f\n"
361
362                 // tails==3 final tail
363                 "5:\n"
364                 "vmla.f32    q4, q2, d2[0]\n"
365                 "vld1.32    {d0}, [%[a_ptr] :64]!\n"
366                 "vmla.f32    q5, q2, d2[1]\n"
367                 "vmla.f32    q6, q2, d3[0]\n"
368                 "vst1.32    {d8-d9}, [%[c_ptr] :128]!\n"
369                 "vmla.f32    q10, q3, d2[0]\n"
370                 "vst1.32    {d20-d21}, [%[c_ptr] :128]!\n"
371                 "vmla.f32    q11, q3, d2[1]\n"
372                 "vst1.32    {d10-d11}, [%[c_ptr] :128]!\n"
373                 "vmla.f32    q12, q3, d3[0]\n"
374                 "vst1.32    {d22-d23}, [%[c_ptr] :128]!\n"
375                 "vmla.f32    q7, q2, d3[1]\n"
376                 "vst1.32    {d12-d13}, [%[c_ptr] :128]!\n"
377                 "vmla.f32    q13, q3, d3[1]\n"
378                 "vst1.32    {d24-d25}, [%[c_ptr] :128]!\n"
379                 "vmla.f32    q8, q2, d0[0]\n"
380                 "vst1.32    {d14-d15}, [%[c_ptr] :128]!\n"
381                 "vmla.f32    q14, q3, d0[0]\n"
382                 "vst1.32    {d26-d27}, [%[c_ptr] :128]!\n"
383                 "vmla.f32    q9, q2, d0[1]\n"
384                 "vst1.32    {d16-d17}, [%[c_ptr] :128]!\n"
385                 "vmla.f32    q15, q3, d0[1]\n"
386                 "vst1.32    {d28-d29}, [%[c_ptr] :128]!\n"
387                 "vst1.32    {d18-d19}, [%[c_ptr] :128]!\n"
388
389                 "2:\n"
390                 "vst1.32    {d30-d31}, [%[c_ptr] :128]!\n"
391                 : [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr), [c_ptr] "+r"(c_ptr), [k] "+r"(k), [tails] "+r"(tails)
392                 :
393                 : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", "r0", "r1");
394         }
395     }
396 }
397
398 } // namespace arm_gemm
399
400 #endif