2 * Copyright (c) 2017-2018 ARM Limited.
4 * SPDX-License-Identifier: MIT
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "../../asmlib.hpp"
32 void a64_sgemm_asimd_12x8_a55r1(const float *Apanel, const float *Bpanel, float *Cpanel, const int ablocks, const int bblocks, const int K)
34 const float *a_ptr = Apanel;
35 float *c_ptr = Cpanel;
37 // Fix up for odd lengths - set a flag if K is odd, but make
38 // sure we round up the iteration count.
40 int k_iters = ((K + 1) / 2) - 1;
42 for(int yb = 0; yb < ablocks; yb++)
44 const float *a_ptr0 = a_ptr;
45 const float *b_ptr = Bpanel;
47 for(int xb = 0; xb < bblocks; xb++)
52 register float32x4_t a0 asm("v0");
53 register float32x4_t a1 asm("v1");
54 register float32x4_t b0 asm("v2");
55 register float32x4_t b1 asm("v3");
56 register float32x4_t b2 asm("v4");
57 register float32x4_t a0a asm("v5");
58 register float32x4_t a1a asm("v6");
61 // Initialize result registers, load initial operands, prime prefetches.
63 "ldr %q[a0], [%[a_ptr]]\n"
65 "ldr %q[b0], [%[b_ptr]]\n"
67 "ldr %q[a1], [%[a_ptr], #16]\n"
69 "ldr %q[b1], [%[b_ptr], #16]\n"
70 "movi v12.4s, #0x0\n" ASM_PREFETCH("[%[b_ptr], #64]") "movi v13.4s, #0x0\n" ASM_PREFETCH("[%[a_ptr], #64]") "movi v14.4s, #0x0\n" ASM_PREFETCH("[%[b_ptr], #128]") "movi v15.4s, #0x0\n"
71 ASM_PREFETCH("[%[a_ptr], #128]") "movi v16.4s, #0x0\n" ASM_PREFETCH("[%[b_ptr], #192]") "movi v17.4s, #0x0\n" ASM_PREFETCH("[%[b_ptr], #256]")
73 "movi v19.4s, #0x0\n" ASM_PREFETCH("[%[a_ptr], #192]")
75 "movi v21.4s, #0x0\n" ASM_PREFETCH("[%[b_ptr], #320]")
77 "movi v23.4s, #0x0\n" ASM_PREFETCH("[%[a_ptr], #256]")
79 "movi v25.4s, #0x0\n" ASM_PREFETCH("[%[b_ptr], #384]")
81 "movi v27.4s, #0x0\n" ASM_PREFETCH("[%[b_ptr], #448]")
83 "movi v29.4s, #0x0\n" ASM_PREFETCH("[%[a_ptr], #384]")
85 "movi v31.4s, #0x0\n" ASM_PREFETCH("[%[b_ptr], #512]")
87 // The loop is offset by these two instructions which must
88 // always be executed.
89 "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
90 "ldr %d[b2], [%[b_ptr], #32]\n"
92 // Skip loop if we are doing zero iterations of it.
97 "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
98 "ldr x20, [%[b_ptr], #40]\n"
99 "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
100 "subs %w[k], %w[k], #1\n"
101 "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
102 "ldr %d[a0a], [%[a_ptr], #32]\n"
104 "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
105 "ins %[b2].d[1], x20\n"
106 "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
107 "ldr x20, [%[a_ptr], #40]\n"
108 "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
109 "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
110 "ldr %d[a1a], [%[a_ptr], #48]\n"
112 "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
113 "ins %[a0a].d[1], x20\n"
114 "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
115 "ldr x20, [%[a_ptr], #56]\n"
116 "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
117 "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
118 "ldr %d[b0], [%[b_ptr], #48]\n"
120 "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
121 "ins %[a1a].d[1], x20\n"
122 "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
123 "ldr x20, [%[b_ptr], #56]\n"
124 "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
125 "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
126 "ldr %d[b1], [%[b_ptr], #64]\n"
128 "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
129 "ins %[b0].d[1], x20\n"
130 "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
131 "ldr x20, [%[b_ptr], #72]\n"
132 "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
133 "fmla v27.4s, %[b2].4s, %[a0].s[3]\n" ASM_PREFETCH("[%[a_ptr], #448]")
135 "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
136 "fmla v29.4s, %[b2].4s, %[a1].s[1]\n" ASM_PREFETCH("[%[b_ptr], #576]")
137 "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
138 "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
141 "ldr %d[b2], [%[b_ptr], #80]\n"
143 "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
144 "ins %[b1].d[1], x20\n"
145 "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
146 "ldr x20, [%[b_ptr], #88]\n"
147 "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
148 "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
149 "ldr %d[a0], [%[a_ptr], #64]\n"
151 "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
152 "ins %[b2].d[1], x20\n"
153 "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
154 "ldr x20, [%[a_ptr], #72]\n"
155 "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
156 "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
157 "ldr %d[a1], [%[a_ptr], #80]\n"
159 "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
160 "ins %[a0].d[1], x20\n"
161 "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
162 "ldr x20, [%[a_ptr], #88]\n"
163 "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
164 "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
165 "ldr %d[b0], [%[b_ptr], #96]\n"
167 "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
168 "ins %[a1].d[1], x20\n"
169 "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
170 "ldr x20, [%[b_ptr], #104]\n"
171 "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
172 "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
173 "ldr %d[b1], [%[b_ptr], #112]\n"
175 "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
176 "ins %[b0].d[1], x20\n"
177 "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
178 "ldr x20, [%[b_ptr], #120]\n"
179 "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
181 "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
182 "add %[a_ptr], %[a_ptr], #64\n"
184 "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n" ASM_PREFETCH("[%[b_ptr], #640]")
185 "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
186 "add %[b_ptr], %[b_ptr], #96\n"
187 "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
188 "ins %[b1].d[1], x20\n"
189 "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
190 "ldr %d[b2], [%[b_ptr], #32]\n"
192 "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
195 // Branch here if K=1 or 2. Do the right thing for odd/even at the end.
198 // Start final iteration - branch off to "odd" code before we load a0a.
199 "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
200 "ldr x20, [%[b_ptr], #40]\n"
201 "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
202 "cbnz %w[oddk], 2f\n"
204 // Even K continuation
205 "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
206 "ldr %d[a0a], [%[a_ptr], #32]\n"
208 "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
209 "ins %[b2].d[1], x20\n"
210 "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
211 "ldr x20, [%[a_ptr], #40]\n"
212 "fmla v14.4s, %[b0].4s, %[a1].s[2]\n" ASM_PREFETCHW("[%[c_ptr]]")
213 "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
214 "ldr %d[a1a], [%[a_ptr], #48]\n"
216 "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
217 "ins %[a0a].d[1], x20\n"
218 "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
219 "ldr x20, [%[a_ptr], #56]\n"
220 "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
221 "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
222 "ldr %d[b0], [%[b_ptr], #48]\n"
224 "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
225 "ins %[a1a].d[1], x20\n"
226 "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
227 "ldr x20, [%[b_ptr], #56]\n"
228 "fmla v22.4s, %[b1].4s, %[a1].s[2]\n" ASM_PREFETCHW("[%[c_ptr], #64]")
229 "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
231 "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
232 "fmla v25.4s, %[b2].4s, %[a0].s[1]\n" ASM_PREFETCHW("[%[c_ptr], #128]")
233 "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
234 "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
235 "ldr %d[b1], [%[b_ptr], #64]\n"
237 "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
238 "ins %[b0].d[1], x20\n"
239 "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
240 "ldr x20, [%[b_ptr], #72]\n"
241 "fmla v30.4s, %[b2].4s, %[a1].s[2]\n" ASM_PREFETCHW("[%[c_ptr], #192]")
242 "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
243 "ldr %d[b2], [%[b_ptr], #80]\n"
245 "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
246 "ins %[b1].d[1], x20\n"
247 "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
248 "ldr x20, [%[b_ptr], #88]\n"
249 "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
250 "ins %[b2].d[1], x20\n"
252 "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n" ASM_PREFETCHW("[%[c_ptr], #256]")
253 "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
254 "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
255 "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n" ASM_PREFETCHW("[%[c_ptr], #320]")
256 "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
257 "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n" ASM_PREFETCHWL2("[%[c_ptr], #384]")
258 "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
259 "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n" ASM_PREFETCHWL2("[%[c_ptr], #448]")
260 "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
261 "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
262 "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n" ASM_PREFETCHWL2("[%[c_ptr], #512]")
263 "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
264 "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n" ASM_PREFETCHWL2("[%[c_ptr], #576]")
265 "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
266 "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
267 "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n" ASM_PREFETCHWL2("[%[c_ptr], #640]")
268 "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
269 "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n" ASM_PREFETCHWL2("[%[c_ptr], #704]")
270 "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
271 "add %[a_ptr], %[a_ptr], #64\n"
272 "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
273 "add %[b_ptr], %[b_ptr], #96\n"
274 "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
277 // Odd K continuation
279 "fmla v11.4s, %[b0].4s, %[a0].s[3]\n" ASM_PREFETCHW("[%[c_ptr]]")
280 "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
281 "ins %[b2].d[1], x20\n"
282 "fmla v13.4s, %[b0].4s, %[a1].s[1]\n" ASM_PREFETCHW("[%[c_ptr], #64]")
283 "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
284 "add %[a_ptr], %[a_ptr], #32\n"
285 "fmla v15.4s, %[b0].4s, %[a1].s[3]\n" ASM_PREFETCHW("[%[c_ptr], #128]")
286 "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
287 "add %[b_ptr], %[b_ptr], #48\n"
288 "fmla v17.4s, %[b1].4s, %[a0].s[1]\n" ASM_PREFETCHW("[%[c_ptr], #192]")
289 "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
290 "fmla v19.4s, %[b1].4s, %[a0].s[3]\n" ASM_PREFETCHW("[%[c_ptr], #256]")
291 "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
292 "fmla v21.4s, %[b1].4s, %[a1].s[1]\n" ASM_PREFETCHW("[%[c_ptr], #320]")
293 "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
294 "fmla v23.4s, %[b1].4s, %[a1].s[3]\n" ASM_PREFETCHWL2("[%[c_ptr], #384]")
295 "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
296 "fmla v25.4s, %[b2].4s, %[a0].s[1]\n" ASM_PREFETCHWL2("[%[c_ptr], #448]")
297 "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
298 "fmla v27.4s, %[b2].4s, %[a0].s[3]\n" ASM_PREFETCHWL2("[%[c_ptr], #512]") "fmla v28.4s, %[b2].4s, %[a1].s[0]\n" ASM_PREFETCHWL2("[%[c_ptr], #576]") "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
299 ASM_PREFETCHWL2("[%[c_ptr], #640]") "fmla v30.4s, %[b2].4s, %[a1].s[2]\n" ASM_PREFETCHWL2("[%[c_ptr], #704]")
300 "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
304 "str q8, [%[c_ptr]]\n"
305 "str q16, [%[c_ptr], #16]\n"
306 "str q24, [%[c_ptr], #32]\n"
307 "str q9, [%[c_ptr], #48]\n"
308 "str q17, [%[c_ptr], #64]\n"
309 "str q25, [%[c_ptr], #80]\n"
310 "str q10, [%[c_ptr], #96]\n"
311 "str q18, [%[c_ptr], #112]\n"
312 "str q26, [%[c_ptr], #128]\n"
313 "str q11, [%[c_ptr], #144]\n"
314 "str q19, [%[c_ptr], #160]\n"
315 "str q27, [%[c_ptr], #176]\n"
316 "str q12, [%[c_ptr], #192]\n"
317 "str q20, [%[c_ptr], #208]\n"
318 "str q28, [%[c_ptr], #224]\n"
319 "str q13, [%[c_ptr], #240]\n"
320 "str q21, [%[c_ptr], #256]\n"
321 "str q29, [%[c_ptr], #272]\n"
322 "str q14, [%[c_ptr], #288]\n"
323 "str q22, [%[c_ptr], #304]\n"
324 "str q30, [%[c_ptr], #320]\n"
325 "str q15, [%[c_ptr], #336]\n"
326 "str q23, [%[c_ptr], #352]\n"
327 "str q31, [%[c_ptr], #368]\n"
328 "add %[c_ptr], %[c_ptr], #384\n"
330 [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr), [c_ptr] "+r"(c_ptr),
331 [a0] "+w"(a0), [a1] "+w"(a1), [a0a] "+w"(a0a), [a1a] "+w"(a1a),
332 [b0] "+w"(b0), [b1] "+w"(b1), [b2] "+w"(b2), [k] "+r"(k)
334 : "x20", "x21", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18",
335 "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc");
340 } // namespace arm_gemm