2 * Copyright (c) 2017-2018 ARM Limited.
4 * SPDX-License-Identifier: MIT
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "../../asmlib.hpp"
30 // Kernel implementation.
32 // Assume that "Apanel" points to a chunk of A blocks (each size 6xK) in read-order.
33 // Assume that "Bpanel" points to a chunk of B blocks (each size 8xK) in read-order.
34 // Assume that "Cpanel" points to a chunk of C output blocks (each size
35 // 8x6), the chunks being arranged in a row major fashion.
37 // Note that the intent of this is that either ablocks or bblocks will be 1
38 // - this construction allows the output loop to proceed in either order.
42 void a32_sgemm_8x6_a53(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K)
44 const float *a_ptr = Apanel;
45 float *c_ptr = Cpanel;
47 for(int yb = 0; yb < ablocks; yb++)
49 const float *a_ptr0 = a_ptr;
50 const float *b_ptr = Bpanel;
52 for(int xb = 0; xb < bblocks; xb++)
60 int k = ((K + 3) / 4) - 1;
64 "vld1.32 {d0-d1}, [%[a_ptr] :64]\n"
66 "vld1.32 {d4-d5}, [%[b_ptr] :128]\n"
68 "ldr r0, [%[a_ptr], #0x10]\n"
70 "ldr r1, [%[a_ptr], #0x14]\n"
71 "vmov.i32 q8, #0\n" ASM_PREFETCH("[%[a_ptr], #0x40]") "vmov.i32 q9, #0\n" ASM_PREFETCH("[%[b_ptr], #0x40]") "vmov.i32 q10, #0\n" ASM_PREFETCH("[%[a_ptr], #0x80]") "vmov.i32 q11, #0\n"
72 ASM_PREFETCH("[%[b_ptr], #0x80]")
74 "vmov.i32 q13, #0\n" ASM_PREFETCH("[%[a_ptr], #0xC0]") "vmov.i32 q14, #0\n" ASM_PREFETCH("[%[b_ptr], #0XC0]")
81 "vldr d6, [%[b_ptr], #0x10]\n"
83 "vmla.f32 q4, q2, d0[0]\n"
84 "ldr r0, [%[b_ptr], #0x18]\n"
85 "vmla.f32 q5, q2, d0[1]\n"
86 "ldr r1, [%[b_ptr], #0x1C]\n"
87 "vmla.f32 q6, q2, d1[0]\n"
89 "vldr d3, [%[a_ptr], #0x18]\n"
91 "vmla.f32 q7, q2, d1[1]\n" ASM_PREFETCH("[%[a_ptr], #0x100]")
92 "vmla.f32 q8, q2, d2[0]\n"
93 "vmla.f32 q9, q2, d2[1]\n"
95 "vldr d4, [%[b_ptr], #0x20]\n"
96 "vmla.f32 q10, q3, d0[0]\n"
97 "ldr r0, [%[b_ptr], #0x28]\n"
98 "vmla.f32 q11, q3, d0[1]\n"
99 "ldr r1, [%[b_ptr], #0x2C]\n"
100 "vmla.f32 q12, q3, d1[0]\n"
102 "vldr d0, [%[a_ptr], #0x20]\n"
104 "vmla.f32 q13, q3, d1[1]\n"
105 "ldr r0, [%[a_ptr], #0x28]\n"
106 "vmla.f32 q14, q3, d2[0]\n"
107 "ldr r1, [%[a_ptr], #0x2C]\n"
108 "vmla.f32 q15, q3, d2[1]\n"
111 "vldr d6, [%[b_ptr], #0x30]\n"
113 "vmla.f32 q4, q2, d3[0]\n"
114 "ldr r0, [%[b_ptr], #0x38]\n"
115 "vmla.f32 q5, q2, d3[1]\n"
116 "ldr r1, [%[b_ptr], #0x3C]\n"
117 "vmla.f32 q6, q2, d0[0]\n"
119 "vldr d2, [%[a_ptr], #0x30]\n"
121 "vmla.f32 q7, q2, d0[1]\n" ASM_PREFETCH("[%[b_ptr], #0x100]")
122 "vmla.f32 q8, q2, d1[0]\n"
123 "vmla.f32 q9, q2, d1[1]\n"
125 "vldr d4, [%[b_ptr], #0x40]\n"
126 "vmla.f32 q10, q3, d3[0]\n"
127 "ldr r0, [%[b_ptr], #0x48]\n"
128 "vmla.f32 q11, q3, d3[1]\n"
129 "ldr r1, [%[b_ptr], #0x4C]\n"
130 "vmla.f32 q12, q3, d0[0]\n"
132 "vldr d3, [%[a_ptr], #0x38]\n"
134 "vmla.f32 q13, q3, d0[1]\n"
135 "ldr r0, [%[a_ptr], #0x40]\n"
136 "vmla.f32 q14, q3, d1[0]\n"
137 "ldr r1, [%[a_ptr], #0x44]\n"
138 "vmla.f32 q15, q3, d1[1]\n"
141 "vldr d6, [%[b_ptr], #0x50]\n"
143 "vmla.f32 q4, q2, d2[0]\n"
144 "ldr r0, [%[b_ptr], #0x58]\n"
145 "vmla.f32 q5, q2, d2[1]\n"
146 "ldr r1, [%[b_ptr], #0x5C]\n"
147 "vmla.f32 q6, q2, d3[0]\n"
149 "vldr d1, [%[a_ptr], #0x48]\n"
151 "vmla.f32 q7, q2, d3[1]\n" ASM_PREFETCH("[%[a_ptr], #0x140]")
152 "vmla.f32 q8, q2, d0[0]\n"
153 "vmla.f32 q9, q2, d0[1]\n"
155 "vldr d4, [%[b_ptr], #0x60]\n"
156 "vmla.f32 q10, q3, d2[0]\n"
157 "ldr r0, [%[b_ptr], #0x68]\n"
158 "vmla.f32 q11, q3, d2[1]\n"
159 "ldr r1, [%[b_ptr], #0x6C]\n"
160 "vmla.f32 q12, q3, d3[0]\n"
162 "vldr d2, [%[a_ptr], #0x50]\n"
164 "vmla.f32 q13, q3, d3[1]\n"
165 "ldr r0, [%[a_ptr], #0x58]\n"
166 "vmla.f32 q14, q3, d0[0]\n"
167 "ldr r1, [%[a_ptr], #0x5C]\n"
168 "vmla.f32 q15, q3, d0[1]\n"
169 "add %[a_ptr], %[a_ptr], #0x60\n"
172 "vldr d6, [%[b_ptr], #0x70]\n"
174 "vmla.f32 q4, q2, d1[0]\n"
175 "ldr r0, [%[b_ptr], #0x78]\n"
176 "vmla.f32 q5, q2, d1[1]\n"
177 "ldr r1, [%[b_ptr], #0x7C]\n"
178 "vmla.f32 q6, q2, d2[0]\n"
179 "add %[b_ptr], %[b_ptr], #0x80\n"
181 "vldr d0, [%[a_ptr], #0x00]\n"
183 "vmla.f32 q7, q2, d2[1]\n" ASM_PREFETCH("[%[b_ptr], #0xC0]")
184 "vmla.f32 q8, q2, d3[0]\n"
185 "vmla.f32 q9, q2, d3[1]\n"
187 "vldr d4, [%[b_ptr], #0x00]\n"
188 "vmla.f32 q10, q3, d1[0]\n"
189 "ldr r0, [%[b_ptr], #0x08]\n"
190 "vmla.f32 q11, q3, d1[1]\n"
191 "ldr r1, [%[b_ptr], #0x0C]\n"
192 "vmla.f32 q12, q3, d2[0]\n"
193 "subs %[k], %[k], #1\n"
195 "vldr d1, [%[a_ptr], #0x08]\n"
197 "vmla.f32 q13, q3, d2[1]\n"
198 "ldr r0, [%[a_ptr], #0x10]\n"
199 "vmla.f32 q14, q3, d3[0]\n"
200 "ldr r1, [%[a_ptr], #0x14]\n"
201 "vmla.f32 q15, q3, d3[1]\n"
204 // "Tails" shows how many multiply blocks are needed at the
205 // end, must be 1-4 inclusive. Bail out to alternative tail
206 // immediately if it's 1.
208 "subs %[tails], %[tails], #1\n"
211 // Detached final iteration - for now adapt the generic
212 // tails rather than reimplementing for A53.
216 "add %[a_ptr], %[a_ptr], #0x18\n"
217 "vmla.f32 q4, q2, d0[0]\n"
218 "vld1.32 {d3}, [%[a_ptr] :64]!\n"
219 "vmla.f32 q5, q2, d0[1]\n"
220 "add %[b_ptr], %[b_ptr], #0x10\n"
221 "vmla.f32 q6, q2, d1[0]\n"
222 "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
223 "vmla.f32 q7, q2, d1[1]\n"
224 "vmla.f32 q8, q2, d2[0]\n"
225 "subs %[tails], %[tails], #1\n"
226 "vmla.f32 q9, q2, d2[1]\n"
227 "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
229 "vmla.f32 q10, q3, d0[0]\n"
230 "vmla.f32 q11, q3, d0[1]\n"
231 "vmla.f32 q12, q3, d1[0]\n"
232 "vmla.f32 q13, q3, d1[1]\n"
233 "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
234 "vmla.f32 q14, q3, d2[0]\n"
235 "vmla.f32 q15, q3, d2[1]\n"
236 "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
240 "vmla.f32 q4, q2, d3[0]\n"
241 "vmla.f32 q5, q2, d3[1]\n"
242 "subs %[tails], %[tails], #1\n"
243 "vmla.f32 q6, q2, d0[0]\n"
244 "vmla.f32 q7, q2, d0[1]\n"
245 "vmla.f32 q8, q2, d1[0]\n"
246 "vmla.f32 q9, q2, d1[1]\n"
247 "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
249 "vmla.f32 q10, q3, d3[0]\n"
250 "vmla.f32 q11, q3, d3[1]\n"
251 "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
252 "vmla.f32 q12, q3, d0[0]\n"
253 "vmla.f32 q13, q3, d0[1]\n"
254 "vmla.f32 q14, q3, d1[0]\n"
255 "vmla.f32 q15, q3, d1[1]\n"
256 "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
260 "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
261 "vmla.f32 q4, q2, d2[0]\n"
262 "vmla.f32 q5, q2, d2[1]\n"
263 "vmla.f32 q6, q2, d3[0]\n"
264 "vmla.f32 q7, q2, d3[1]\n"
265 "vmla.f32 q8, q2, d0[0]\n"
266 "vmla.f32 q9, q2, d0[1]\n"
267 "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
269 "vmla.f32 q10, q3, d2[0]\n"
270 "vmla.f32 q11, q3, d2[1]\n"
271 "vmla.f32 q12, q3, d3[0]\n"
272 "vmla.f32 q13, q3, d3[1]\n"
273 "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
274 "vmla.f32 q14, q3, d0[0]\n"
275 "vmla.f32 q15, q3, d0[1]\n"
276 "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
279 "vmla.f32 q4, q2, d1[0]\n"
280 "vmla.f32 q10, q3, d1[0]\n"
281 "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
282 "vmla.f32 q5, q2, d1[1]\n"
283 "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
284 "vmla.f32 q11, q3, d1[1]\n"
285 "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
286 "vmla.f32 q6, q2, d2[0]\n"
287 "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
288 "vmla.f32 q12, q3, d2[0]\n"
289 "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
290 "vmla.f32 q7, q2, d2[1]\n"
291 "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
292 "vmla.f32 q13, q3, d2[1]\n"
293 "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
294 "vmla.f32 q8, q2, d3[0]\n"
295 "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
296 "vmla.f32 q14, q3, d3[0]\n"
297 "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
298 "vmla.f32 q9, q2, d3[1]\n"
299 "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
300 "vmla.f32 q15, q3, d3[1]\n"
301 "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
304 // tails==1 final tail
307 "add %[b_ptr], %[b_ptr], #0x10\n"
308 "vmla.f32 q4, q2, d0[0]\n"
309 "add %[a_ptr], %[a_ptr], #0x18\n"
310 "vmla.f32 q5, q2, d0[1]\n"
311 "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
312 "vmla.f32 q6, q2, d1[0]\n"
313 "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
314 "vmla.f32 q10, q3, d0[0]\n"
315 "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
316 "vmla.f32 q11, q3, d0[1]\n"
317 "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
318 "vmla.f32 q12, q3, d1[0]\n"
319 "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
320 "vmla.f32 q7, q2, d1[1]\n"
321 "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
322 "vmla.f32 q13, q3, d1[1]\n"
323 "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
324 "vmla.f32 q8, q2, d2[0]\n"
325 "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
326 "vmla.f32 q14, q3, d2[0]\n"
327 "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
328 "vmla.f32 q9, q2, d2[1]\n"
329 "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
330 "vmla.f32 q15, q3, d2[1]\n"
331 "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
332 "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
335 // tails==2 final tail
337 "vmla.f32 q4, q2, d3[0]\n"
338 "vmla.f32 q10, q3, d3[0]\n"
339 "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
340 "vmla.f32 q5, q2, d3[1]\n"
341 "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
342 "vmla.f32 q11, q3, d3[1]\n"
343 "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
344 "vmla.f32 q6, q2, d0[0]\n"
345 "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
346 "vmla.f32 q12, q3, d0[0]\n"
347 "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
348 "vmla.f32 q7, q2, d0[1]\n"
349 "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
350 "vmla.f32 q13, q3, d0[1]\n"
351 "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
352 "vmla.f32 q8, q2, d1[0]\n"
353 "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
354 "vmla.f32 q14, q3, d1[0]\n"
355 "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
356 "vmla.f32 q9, q2, d1[1]\n"
357 "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
358 "vmla.f32 q15, q3, d1[1]\n"
359 "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
362 // tails==3 final tail
364 "vmla.f32 q4, q2, d2[0]\n"
365 "vld1.32 {d0}, [%[a_ptr] :64]!\n"
366 "vmla.f32 q5, q2, d2[1]\n"
367 "vmla.f32 q6, q2, d3[0]\n"
368 "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
369 "vmla.f32 q10, q3, d2[0]\n"
370 "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
371 "vmla.f32 q11, q3, d2[1]\n"
372 "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
373 "vmla.f32 q12, q3, d3[0]\n"
374 "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
375 "vmla.f32 q7, q2, d3[1]\n"
376 "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
377 "vmla.f32 q13, q3, d3[1]\n"
378 "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
379 "vmla.f32 q8, q2, d0[0]\n"
380 "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
381 "vmla.f32 q14, q3, d0[0]\n"
382 "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
383 "vmla.f32 q9, q2, d0[1]\n"
384 "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
385 "vmla.f32 q15, q3, d0[1]\n"
386 "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
387 "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
390 "vst1.32 {d30-d31}, [%[c_ptr] :128]!\n"
391 : [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr), [c_ptr] "+r"(c_ptr), [k] "+r"(k), [tails] "+r"(tails)
393 : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", "r0", "r1");
398 } // namespace arm_gemm