1 #if defined(__arm__) && !defined(OPENSSL_NO_ASM)
3 # This implementation was taken from the public domain, neon2 version in
4 # SUPERCOP by D. J. Bernstein and Peter Schwabe.
10 # qhasm: int32 input_2
12 # qhasm: int32 input_3
14 # qhasm: stack32 input_4
16 # qhasm: stack32 input_5
18 # qhasm: stack32 input_6
20 # qhasm: stack32 input_7
22 # qhasm: int32 caller_r4
24 # qhasm: int32 caller_r5
26 # qhasm: int32 caller_r6
28 # qhasm: int32 caller_r7
30 # qhasm: int32 caller_r8
32 # qhasm: int32 caller_r9
34 # qhasm: int32 caller_r10
36 # qhasm: int32 caller_r11
38 # qhasm: int32 caller_r12
40 # qhasm: int32 caller_r14
42 # qhasm: reg128 caller_q4
44 # qhasm: reg128 caller_q5
46 # qhasm: reg128 caller_q6
48 # qhasm: reg128 caller_q7
80 # qhasm: stack128 y0_stack
82 # qhasm: stack128 y12_stack
84 # qhasm: stack128 y34_stack
86 # qhasm: stack128 5y12_stack
88 # qhasm: stack128 5y34_stack
100 # qhasm: stack128 z0_stack
102 # qhasm: stack128 z12_stack
104 # qhasm: stack128 z34_stack
106 # qhasm: stack128 5z12_stack
108 # qhasm: stack128 5z34_stack
110 # qhasm: stack128 two24
154 # qhasm: qpushenter crypto_onetimeauth_poly1305_neon2_blocks
156 .global openssl_poly1305_neon2_blocks
157 .hidden openssl_poly1305_neon2_blocks
158 .type openssl_poly1305_neon2_blocks STT_FUNC
159 openssl_poly1305_neon2_blocks:
163 and sp,sp,#0xffffffe0
165 # qhasm: len = input_3
166 # asm 1: mov >len=int32#4,<input_3=int32#4
167 # asm 2: mov >len=r3,<input_3=r3
172 # qhasm: y0 = mem64[input_1]y0[1]; input_1 += 8
173 # asm 1: vld1.8 {<y0=reg128#1%bot},[<input_1=int32#2]!
174 # asm 2: vld1.8 {<y0=d0},[<input_1=r1]!
177 # qhasm: y12 = mem128[input_1]; input_1 += 16
178 # asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<input_1=int32#2]!
179 # asm 2: vld1.8 {>y12=d2->y12=d3},[<input_1=r1]!
182 # qhasm: y34 = mem128[input_1]; input_1 += 16
183 # asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<input_1=int32#2]!
184 # asm 2: vld1.8 {>y34=d4->y34=d5},[<input_1=r1]!
187 # qhasm: input_1 += 8
188 # asm 1: add >input_1=int32#2,<input_1=int32#2,#8
189 # asm 2: add >input_1=r1,<input_1=r1,#8
194 # qhasm: z0 = mem64[input_1]z0[1]; input_1 += 8
195 # asm 1: vld1.8 {<z0=reg128#4%bot},[<input_1=int32#2]!
196 # asm 2: vld1.8 {<z0=d6},[<input_1=r1]!
199 # qhasm: z12 = mem128[input_1]; input_1 += 16
200 # asm 1: vld1.8 {>z12=reg128#5%bot->z12=reg128#5%top},[<input_1=int32#2]!
201 # asm 2: vld1.8 {>z12=d8->z12=d9},[<input_1=r1]!
204 # qhasm: z34 = mem128[input_1]; input_1 += 16
205 # asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<input_1=int32#2]!
206 # asm 2: vld1.8 {>z34=d10->z34=d11},[<input_1=r1]!
207 vld1.8 {d10-d11},[r1]!
209 # qhasm: 2x mask = 0xffffffff
210 # asm 1: vmov.i64 >mask=reg128#7,#0xffffffff
211 # asm 2: vmov.i64 >mask=q6,#0xffffffff
212 vmov.i64 q6,#0xffffffff
214 # qhasm: 2x u4 = 0xff
215 # asm 1: vmov.i64 >u4=reg128#8,#0xff
216 # asm 2: vmov.i64 >u4=q7,#0xff
219 # qhasm: x01 aligned= mem128[input_0];input_0+=16
220 # asm 1: vld1.8 {>x01=reg128#9%bot->x01=reg128#9%top},[<input_0=int32#1,: 128]!
221 # asm 2: vld1.8 {>x01=d16->x01=d17},[<input_0=r0,: 128]!
222 vld1.8 {d16-d17},[r0,: 128]!
224 # qhasm: x23 aligned= mem128[input_0];input_0+=16
225 # asm 1: vld1.8 {>x23=reg128#10%bot->x23=reg128#10%top},[<input_0=int32#1,: 128]!
226 # asm 2: vld1.8 {>x23=d18->x23=d19},[<input_0=r0,: 128]!
227 vld1.8 {d18-d19},[r0,: 128]!
229 # qhasm: x4 aligned= mem64[input_0]x4[1]
230 # asm 1: vld1.8 {<x4=reg128#11%bot},[<input_0=int32#1,: 64]
231 # asm 2: vld1.8 {<x4=d20},[<input_0=r0,: 64]
232 vld1.8 {d20},[r0,: 64]
234 # qhasm: input_0 -= 32
235 # asm 1: sub >input_0=int32#1,<input_0=int32#1,#32
236 # asm 2: sub >input_0=r0,<input_0=r0,#32
239 # qhasm: 2x mask unsigned>>=6
240 # asm 1: vshr.u64 >mask=reg128#7,<mask=reg128#7,#6
241 # asm 2: vshr.u64 >mask=q6,<mask=q6,#6
244 # qhasm: 2x u4 unsigned>>= 7
245 # asm 1: vshr.u64 >u4=reg128#8,<u4=reg128#8,#7
246 # asm 2: vshr.u64 >u4=q7,<u4=q7,#7
249 # qhasm: 4x 5y12 = y12 << 2
250 # asm 1: vshl.i32 >5y12=reg128#12,<y12=reg128#2,#2
251 # asm 2: vshl.i32 >5y12=q11,<y12=q1,#2
254 # qhasm: 4x 5y34 = y34 << 2
255 # asm 1: vshl.i32 >5y34=reg128#13,<y34=reg128#3,#2
256 # asm 2: vshl.i32 >5y34=q12,<y34=q2,#2
259 # qhasm: 4x 5y12 += y12
260 # asm 1: vadd.i32 >5y12=reg128#12,<5y12=reg128#12,<y12=reg128#2
261 # asm 2: vadd.i32 >5y12=q11,<5y12=q11,<y12=q1
264 # qhasm: 4x 5y34 += y34
265 # asm 1: vadd.i32 >5y34=reg128#13,<5y34=reg128#13,<y34=reg128#3
266 # asm 2: vadd.i32 >5y34=q12,<5y34=q12,<y34=q2
269 # qhasm: 2x u4 <<= 24
270 # asm 1: vshl.i64 >u4=reg128#8,<u4=reg128#8,#24
271 # asm 2: vshl.i64 >u4=q7,<u4=q7,#24
274 # qhasm: 4x 5z12 = z12 << 2
275 # asm 1: vshl.i32 >5z12=reg128#14,<z12=reg128#5,#2
276 # asm 2: vshl.i32 >5z12=q13,<z12=q4,#2
279 # qhasm: 4x 5z34 = z34 << 2
280 # asm 1: vshl.i32 >5z34=reg128#15,<z34=reg128#6,#2
281 # asm 2: vshl.i32 >5z34=q14,<z34=q5,#2
284 # qhasm: 4x 5z12 += z12
285 # asm 1: vadd.i32 >5z12=reg128#14,<5z12=reg128#14,<z12=reg128#5
286 # asm 2: vadd.i32 >5z12=q13,<5z12=q13,<z12=q4
289 # qhasm: 4x 5z34 += z34
290 # asm 1: vadd.i32 >5z34=reg128#15,<5z34=reg128#15,<z34=reg128#6
291 # asm 2: vadd.i32 >5z34=q14,<5z34=q14,<z34=q5
296 # qhasm: new y0_stack
298 # qhasm: new y12_stack
300 # qhasm: new y34_stack
302 # qhasm: new 5y12_stack
304 # qhasm: new 5y34_stack
306 # qhasm: new z0_stack
308 # qhasm: new z12_stack
310 # qhasm: new z34_stack
312 # qhasm: new 5z12_stack
314 # qhasm: new 5z34_stack
316 # qhasm: ptr = &two24
317 # asm 1: lea >ptr=int32#2,<two24=stack128#1
318 # asm 2: lea >ptr=r1,<two24=[sp,#0]
321 # qhasm: mem128[ptr] aligned= u4
322 # asm 1: vst1.8 {<u4=reg128#8%bot-<u4=reg128#8%top},[<ptr=int32#2,: 128]
323 # asm 2: vst1.8 {<u4=d14-<u4=d15},[<ptr=r1,: 128]
324 vst1.8 {d14-d15},[r1,: 128]
327 # asm 1: vmov >r4=reg128#16,<u4=reg128#8
328 # asm 2: vmov >r4=q15,<u4=q7
332 # asm 1: vmov >r0=reg128#8,<u4=reg128#8
333 # asm 2: vmov >r0=q7,<u4=q7
336 # qhasm: ptr = &y0_stack
337 # asm 1: lea >ptr=int32#2,<y0_stack=stack128#2
338 # asm 2: lea >ptr=r1,<y0_stack=[sp,#16]
341 # qhasm: mem128[ptr] aligned= y0
342 # asm 1: vst1.8 {<y0=reg128#1%bot-<y0=reg128#1%top},[<ptr=int32#2,: 128]
343 # asm 2: vst1.8 {<y0=d0-<y0=d1},[<ptr=r1,: 128]
344 vst1.8 {d0-d1},[r1,: 128]
346 # qhasm: ptr = &y12_stack
347 # asm 1: lea >ptr=int32#2,<y12_stack=stack128#3
348 # asm 2: lea >ptr=r1,<y12_stack=[sp,#32]
351 # qhasm: mem128[ptr] aligned= y12
352 # asm 1: vst1.8 {<y12=reg128#2%bot-<y12=reg128#2%top},[<ptr=int32#2,: 128]
353 # asm 2: vst1.8 {<y12=d2-<y12=d3},[<ptr=r1,: 128]
354 vst1.8 {d2-d3},[r1,: 128]
356 # qhasm: ptr = &y34_stack
357 # asm 1: lea >ptr=int32#2,<y34_stack=stack128#4
358 # asm 2: lea >ptr=r1,<y34_stack=[sp,#48]
361 # qhasm: mem128[ptr] aligned= y34
362 # asm 1: vst1.8 {<y34=reg128#3%bot-<y34=reg128#3%top},[<ptr=int32#2,: 128]
363 # asm 2: vst1.8 {<y34=d4-<y34=d5},[<ptr=r1,: 128]
364 vst1.8 {d4-d5},[r1,: 128]
366 # qhasm: ptr = &z0_stack
367 # asm 1: lea >ptr=int32#2,<z0_stack=stack128#7
368 # asm 2: lea >ptr=r1,<z0_stack=[sp,#96]
371 # qhasm: mem128[ptr] aligned= z0
372 # asm 1: vst1.8 {<z0=reg128#4%bot-<z0=reg128#4%top},[<ptr=int32#2,: 128]
373 # asm 2: vst1.8 {<z0=d6-<z0=d7},[<ptr=r1,: 128]
374 vst1.8 {d6-d7},[r1,: 128]
376 # qhasm: ptr = &z12_stack
377 # asm 1: lea >ptr=int32#2,<z12_stack=stack128#8
378 # asm 2: lea >ptr=r1,<z12_stack=[sp,#112]
381 # qhasm: mem128[ptr] aligned= z12
382 # asm 1: vst1.8 {<z12=reg128#5%bot-<z12=reg128#5%top},[<ptr=int32#2,: 128]
383 # asm 2: vst1.8 {<z12=d8-<z12=d9},[<ptr=r1,: 128]
384 vst1.8 {d8-d9},[r1,: 128]
386 # qhasm: ptr = &z34_stack
387 # asm 1: lea >ptr=int32#2,<z34_stack=stack128#9
388 # asm 2: lea >ptr=r1,<z34_stack=[sp,#128]
391 # qhasm: mem128[ptr] aligned= z34
392 # asm 1: vst1.8 {<z34=reg128#6%bot-<z34=reg128#6%top},[<ptr=int32#2,: 128]
393 # asm 2: vst1.8 {<z34=d10-<z34=d11},[<ptr=r1,: 128]
394 vst1.8 {d10-d11},[r1,: 128]
396 # qhasm: ptr = &5y12_stack
397 # asm 1: lea >ptr=int32#2,<5y12_stack=stack128#5
398 # asm 2: lea >ptr=r1,<5y12_stack=[sp,#64]
401 # qhasm: mem128[ptr] aligned= 5y12
402 # asm 1: vst1.8 {<5y12=reg128#12%bot-<5y12=reg128#12%top},[<ptr=int32#2,: 128]
403 # asm 2: vst1.8 {<5y12=d22-<5y12=d23},[<ptr=r1,: 128]
404 vst1.8 {d22-d23},[r1,: 128]
406 # qhasm: ptr = &5y34_stack
407 # asm 1: lea >ptr=int32#2,<5y34_stack=stack128#6
408 # asm 2: lea >ptr=r1,<5y34_stack=[sp,#80]
411 # qhasm: mem128[ptr] aligned= 5y34
412 # asm 1: vst1.8 {<5y34=reg128#13%bot-<5y34=reg128#13%top},[<ptr=int32#2,: 128]
413 # asm 2: vst1.8 {<5y34=d24-<5y34=d25},[<ptr=r1,: 128]
414 vst1.8 {d24-d25},[r1,: 128]
416 # qhasm: ptr = &5z12_stack
417 # asm 1: lea >ptr=int32#2,<5z12_stack=stack128#10
418 # asm 2: lea >ptr=r1,<5z12_stack=[sp,#144]
421 # qhasm: mem128[ptr] aligned= 5z12
422 # asm 1: vst1.8 {<5z12=reg128#14%bot-<5z12=reg128#14%top},[<ptr=int32#2,: 128]
423 # asm 2: vst1.8 {<5z12=d26-<5z12=d27},[<ptr=r1,: 128]
424 vst1.8 {d26-d27},[r1,: 128]
426 # qhasm: ptr = &5z34_stack
427 # asm 1: lea >ptr=int32#2,<5z34_stack=stack128#11
428 # asm 2: lea >ptr=r1,<5z34_stack=[sp,#160]
431 # qhasm: mem128[ptr] aligned= 5z34
432 # asm 1: vst1.8 {<5z34=reg128#15%bot-<5z34=reg128#15%top},[<ptr=int32#2,: 128]
433 # asm 2: vst1.8 {<5z34=d28-<5z34=d29},[<ptr=r1,: 128]
434 vst1.8 {d28-d29},[r1,: 128]
436 # qhasm: unsigned>? len - 64
437 # asm 1: cmp <len=int32#4,#64
438 # asm 2: cmp <len=r3,#64
441 # qhasm: goto below64bytes if !unsigned>
444 # qhasm: input_2 += 32
445 # asm 1: add >input_2=int32#2,<input_2=int32#3,#32
446 # asm 2: add >input_2=r1,<input_2=r2,#32
452 # qhasm: c01 = mem128[input_2];input_2+=16
453 # asm 1: vld1.8 {>c01=reg128#1%bot->c01=reg128#1%top},[<input_2=int32#2]!
454 # asm 2: vld1.8 {>c01=d0->c01=d1},[<input_2=r1]!
457 # qhasm: c23 = mem128[input_2];input_2+=16
458 # asm 1: vld1.8 {>c23=reg128#2%bot->c23=reg128#2%top},[<input_2=int32#2]!
459 # asm 2: vld1.8 {>c23=d2->c23=d3},[<input_2=r1]!
462 # qhasm: r4[0,1] += x01[0] unsigned* z34[2]; r4[2,3] += x01[1] unsigned* z34[3]
463 # asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%bot,<z34=reg128#6%top
464 # asm 2: vmlal.u32 <r4=q15,<x01=d16,<z34=d11
465 vmlal.u32 q15,d16,d11
467 # qhasm: ptr = &z12_stack
468 # asm 1: lea >ptr=int32#3,<z12_stack=stack128#8
469 # asm 2: lea >ptr=r2,<z12_stack=[sp,#112]
472 # qhasm: z12 aligned= mem128[ptr]
473 # asm 1: vld1.8 {>z12=reg128#3%bot->z12=reg128#3%top},[<ptr=int32#3,: 128]
474 # asm 2: vld1.8 {>z12=d4->z12=d5},[<ptr=r2,: 128]
475 vld1.8 {d4-d5},[r2,: 128]
477 # qhasm: r4[0,1] += x01[2] unsigned* z34[0]; r4[2,3] += x01[3] unsigned* z34[1]
478 # asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%top,<z34=reg128#6%bot
479 # asm 2: vmlal.u32 <r4=q15,<x01=d17,<z34=d10
480 vmlal.u32 q15,d17,d10
482 # qhasm: ptr = &z0_stack
483 # asm 1: lea >ptr=int32#3,<z0_stack=stack128#7
484 # asm 2: lea >ptr=r2,<z0_stack=[sp,#96]
487 # qhasm: z0 aligned= mem128[ptr]
488 # asm 1: vld1.8 {>z0=reg128#4%bot->z0=reg128#4%top},[<ptr=int32#3,: 128]
489 # asm 2: vld1.8 {>z0=d6->z0=d7},[<ptr=r2,: 128]
490 vld1.8 {d6-d7},[r2,: 128]
492 # qhasm: r4[0,1] += x23[0] unsigned* z12[2]; r4[2,3] += x23[1] unsigned* z12[3]
493 # asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%bot,<z12=reg128#3%top
494 # asm 2: vmlal.u32 <r4=q15,<x23=d18,<z12=d5
497 # qhasm: c01 c23 = c01[0]c01[1]c01[2]c23[2]c23[0]c23[1]c01[3]c23[3]
498 # asm 1: vtrn.32 <c01=reg128#1%top,<c23=reg128#2%top
499 # asm 2: vtrn.32 <c01=d1,<c23=d3
502 # qhasm: r4[0,1] += x23[2] unsigned* z12[0]; r4[2,3] += x23[3] unsigned* z12[1]
503 # asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%top,<z12=reg128#3%bot
504 # asm 2: vmlal.u32 <r4=q15,<x23=d19,<z12=d4
507 # qhasm: r4[0,1] += x4[0] unsigned* z0[0]; r4[2,3] += x4[1] unsigned* z0[1]
508 # asm 1: vmlal.u32 <r4=reg128#16,<x4=reg128#11%bot,<z0=reg128#4%bot
509 # asm 2: vmlal.u32 <r4=q15,<x4=d20,<z0=d6
512 # qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
513 # asm 1: vshll.u32 >r3=reg128#5,<c23=reg128#2%top,#18
514 # asm 2: vshll.u32 >r3=q4,<c23=d3,#18
517 # qhasm: c01 c23 = c01[0]c23[0]c01[2]c01[3]c01[1]c23[1]c23[2]c23[3]
518 # asm 1: vtrn.32 <c01=reg128#1%bot,<c23=reg128#2%bot
519 # asm 2: vtrn.32 <c01=d0,<c23=d2
522 # qhasm: r3[0,1] += x01[0] unsigned* z34[0]; r3[2,3] += x01[1] unsigned* z34[1]
523 # asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%bot,<z34=reg128#6%bot
524 # asm 2: vmlal.u32 <r3=q4,<x01=d16,<z34=d10
527 # qhasm: r3[0,1] += x01[2] unsigned* z12[2]; r3[2,3] += x01[3] unsigned* z12[3]
528 # asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%top,<z12=reg128#3%top
529 # asm 2: vmlal.u32 <r3=q4,<x01=d17,<z12=d5
532 # qhasm: r0 = r0[1]c01[0]r0[2,3]
533 # asm 1: vext.32 <r0=reg128#8%bot,<r0=reg128#8%bot,<c01=reg128#1%bot,#1
534 # asm 2: vext.32 <r0=d14,<r0=d14,<c01=d0,#1
535 vext.32 d14,d14,d0,#1
537 # qhasm: r3[0,1] += x23[0] unsigned* z12[0]; r3[2,3] += x23[1] unsigned* z12[1]
538 # asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%bot,<z12=reg128#3%bot
539 # asm 2: vmlal.u32 <r3=q4,<x23=d18,<z12=d4
542 # qhasm: input_2 -= 64
543 # asm 1: sub >input_2=int32#2,<input_2=int32#2,#64
544 # asm 2: sub >input_2=r1,<input_2=r1,#64
547 # qhasm: r3[0,1] += x23[2] unsigned* z0[0]; r3[2,3] += x23[3] unsigned* z0[1]
548 # asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%top,<z0=reg128#4%bot
549 # asm 2: vmlal.u32 <r3=q4,<x23=d19,<z0=d6
552 # qhasm: ptr = &5z34_stack
553 # asm 1: lea >ptr=int32#3,<5z34_stack=stack128#11
554 # asm 2: lea >ptr=r2,<5z34_stack=[sp,#160]
557 # qhasm: 5z34 aligned= mem128[ptr]
558 # asm 1: vld1.8 {>5z34=reg128#6%bot->5z34=reg128#6%top},[<ptr=int32#3,: 128]
559 # asm 2: vld1.8 {>5z34=d10->5z34=d11},[<ptr=r2,: 128]
560 vld1.8 {d10-d11},[r2,: 128]
562 # qhasm: r3[0,1] += x4[0] unsigned* 5z34[2]; r3[2,3] += x4[1] unsigned* 5z34[3]
563 # asm 1: vmlal.u32 <r3=reg128#5,<x4=reg128#11%bot,<5z34=reg128#6%top
564 # asm 2: vmlal.u32 <r3=q4,<x4=d20,<5z34=d11
567 # qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
568 # asm 1: vrev64.i32 >r0=reg128#8,<r0=reg128#8
569 # asm 2: vrev64.i32 >r0=q7,<r0=q7
572 # qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
573 # asm 1: vshll.u32 >r2=reg128#14,<c01=reg128#1%top,#12
574 # asm 2: vshll.u32 >r2=q13,<c01=d1,#12
577 # qhasm: d01 = mem128[input_2];input_2+=16
578 # asm 1: vld1.8 {>d01=reg128#12%bot->d01=reg128#12%top},[<input_2=int32#2]!
579 # asm 2: vld1.8 {>d01=d22->d01=d23},[<input_2=r1]!
580 vld1.8 {d22-d23},[r1]!
582 # qhasm: r2[0,1] += x01[0] unsigned* z12[2]; r2[2,3] += x01[1] unsigned* z12[3]
583 # asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%bot,<z12=reg128#3%top
584 # asm 2: vmlal.u32 <r2=q13,<x01=d16,<z12=d5
587 # qhasm: r2[0,1] += x01[2] unsigned* z12[0]; r2[2,3] += x01[3] unsigned* z12[1]
588 # asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%top,<z12=reg128#3%bot
589 # asm 2: vmlal.u32 <r2=q13,<x01=d17,<z12=d4
592 # qhasm: r2[0,1] += x23[0] unsigned* z0[0]; r2[2,3] += x23[1] unsigned* z0[1]
593 # asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%bot,<z0=reg128#4%bot
594 # asm 2: vmlal.u32 <r2=q13,<x23=d18,<z0=d6
597 # qhasm: r2[0,1] += x23[2] unsigned* 5z34[2]; r2[2,3] += x23[3] unsigned* 5z34[3]
598 # asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%top,<5z34=reg128#6%top
599 # asm 2: vmlal.u32 <r2=q13,<x23=d19,<5z34=d11
600 vmlal.u32 q13,d19,d11
602 # qhasm: r2[0,1] += x4[0] unsigned* 5z34[0]; r2[2,3] += x4[1] unsigned* 5z34[1]
603 # asm 1: vmlal.u32 <r2=reg128#14,<x4=reg128#11%bot,<5z34=reg128#6%bot
604 # asm 2: vmlal.u32 <r2=q13,<x4=d20,<5z34=d10
605 vmlal.u32 q13,d20,d10
607 # qhasm: r0 = r0[0,1]c01[1]r0[2]
608 # asm 1: vext.32 <r0=reg128#8%top,<c01=reg128#1%bot,<r0=reg128#8%top,#1
609 # asm 2: vext.32 <r0=d15,<c01=d0,<r0=d15,#1
610 vext.32 d15,d0,d15,#1
612 # qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
613 # asm 1: vshll.u32 >r1=reg128#15,<c23=reg128#2%bot,#6
614 # asm 2: vshll.u32 >r1=q14,<c23=d2,#6
617 # qhasm: r1[0,1] += x01[0] unsigned* z12[0]; r1[2,3] += x01[1] unsigned* z12[1]
618 # asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%bot,<z12=reg128#3%bot
619 # asm 2: vmlal.u32 <r1=q14,<x01=d16,<z12=d4
622 # qhasm: r1[0,1] += x01[2] unsigned* z0[0]; r1[2,3] += x01[3] unsigned* z0[1]
623 # asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%top,<z0=reg128#4%bot
624 # asm 2: vmlal.u32 <r1=q14,<x01=d17,<z0=d6
627 # qhasm: r1[0,1] += x23[0] unsigned* 5z34[2]; r1[2,3] += x23[1] unsigned* 5z34[3]
628 # asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%bot,<5z34=reg128#6%top
629 # asm 2: vmlal.u32 <r1=q14,<x23=d18,<5z34=d11
630 vmlal.u32 q14,d18,d11
632 # qhasm: r1[0,1] += x23[2] unsigned* 5z34[0]; r1[2,3] += x23[3] unsigned* 5z34[1]
633 # asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%top,<5z34=reg128#6%bot
634 # asm 2: vmlal.u32 <r1=q14,<x23=d19,<5z34=d10
635 vmlal.u32 q14,d19,d10
637 # qhasm: ptr = &5z12_stack
638 # asm 1: lea >ptr=int32#3,<5z12_stack=stack128#10
639 # asm 2: lea >ptr=r2,<5z12_stack=[sp,#144]
642 # qhasm: 5z12 aligned= mem128[ptr]
643 # asm 1: vld1.8 {>5z12=reg128#1%bot->5z12=reg128#1%top},[<ptr=int32#3,: 128]
644 # asm 2: vld1.8 {>5z12=d0->5z12=d1},[<ptr=r2,: 128]
645 vld1.8 {d0-d1},[r2,: 128]
647 # qhasm: r1[0,1] += x4[0] unsigned* 5z12[2]; r1[2,3] += x4[1] unsigned* 5z12[3]
648 # asm 1: vmlal.u32 <r1=reg128#15,<x4=reg128#11%bot,<5z12=reg128#1%top
649 # asm 2: vmlal.u32 <r1=q14,<x4=d20,<5z12=d1
652 # qhasm: d23 = mem128[input_2];input_2+=16
653 # asm 1: vld1.8 {>d23=reg128#2%bot->d23=reg128#2%top},[<input_2=int32#2]!
654 # asm 2: vld1.8 {>d23=d2->d23=d3},[<input_2=r1]!
657 # qhasm: input_2 += 32
658 # asm 1: add >input_2=int32#2,<input_2=int32#2,#32
659 # asm 2: add >input_2=r1,<input_2=r1,#32
662 # qhasm: r0[0,1] += x4[0] unsigned* 5z12[0]; r0[2,3] += x4[1] unsigned* 5z12[1]
663 # asm 1: vmlal.u32 <r0=reg128#8,<x4=reg128#11%bot,<5z12=reg128#1%bot
664 # asm 2: vmlal.u32 <r0=q7,<x4=d20,<5z12=d0
667 # qhasm: r0[0,1] += x23[0] unsigned* 5z34[0]; r0[2,3] += x23[1] unsigned* 5z34[1]
668 # asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%bot,<5z34=reg128#6%bot
669 # asm 2: vmlal.u32 <r0=q7,<x23=d18,<5z34=d10
672 # qhasm: d01 d23 = d01[0] d23[0] d01[1] d23[1]
673 # asm 1: vswp <d23=reg128#2%bot,<d01=reg128#12%top
674 # asm 2: vswp <d23=d2,<d01=d23
677 # qhasm: r0[0,1] += x23[2] unsigned* 5z12[2]; r0[2,3] += x23[3] unsigned* 5z12[3]
678 # asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%top,<5z12=reg128#1%top
679 # asm 2: vmlal.u32 <r0=q7,<x23=d19,<5z12=d1
682 # qhasm: r0[0,1] += x01[0] unsigned* z0[0]; r0[2,3] += x01[1] unsigned* z0[1]
683 # asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%bot,<z0=reg128#4%bot
684 # asm 2: vmlal.u32 <r0=q7,<x01=d16,<z0=d6
689 # qhasm: 2x v4 = d23 unsigned>> 40
690 # asm 1: vshr.u64 >v4=reg128#4,<d23=reg128#2,#40
691 # asm 2: vshr.u64 >v4=q3,<d23=q1,#40
694 # qhasm: mid = d01[1]d23[0] mid[2,3]
695 # asm 1: vext.32 <mid=reg128#1%bot,<d01=reg128#12%bot,<d23=reg128#2%bot,#1
696 # asm 2: vext.32 <mid=d0,<d01=d22,<d23=d2,#1
701 # qhasm: v23[2] = d23[0,1] unsigned>> 14; v23[3] = d23[2,3] unsigned>> 14
702 # asm 1: vshrn.u64 <v23=reg128#10%top,<d23=reg128#2,#14
703 # asm 2: vshrn.u64 <v23=d19,<d23=q1,#14
706 # qhasm: mid = mid[0,1] d01[3]d23[2]
707 # asm 1: vext.32 <mid=reg128#1%top,<d01=reg128#12%top,<d23=reg128#2%top,#1
708 # asm 2: vext.32 <mid=d1,<d01=d23,<d23=d3,#1
713 # qhasm: v01[2] = d01[0,1] unsigned>> 26; v01[3] = d01[2,3] unsigned>> 26
714 # asm 1: vshrn.u64 <v01=reg128#11%top,<d01=reg128#12,#26
715 # asm 2: vshrn.u64 <v01=d21,<d01=q11,#26
716 vshrn.u64 d21,q11,#26
718 # qhasm: v01 = d01[1]d01[0] v01[2,3]
719 # asm 1: vext.32 <v01=reg128#11%bot,<d01=reg128#12%bot,<d01=reg128#12%bot,#1
720 # asm 2: vext.32 <v01=d20,<d01=d22,<d01=d22,#1
721 vext.32 d20,d22,d22,#1
723 # qhasm: r0[0,1] += x01[2] unsigned* 5z34[2]; r0[2,3] += x01[3] unsigned* 5z34[3]
724 # asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%top,<5z34=reg128#6%top
725 # asm 2: vmlal.u32 <r0=q7,<x01=d17,<5z34=d11
728 # qhasm: v01 = v01[1]d01[2] v01[2,3]
729 # asm 1: vext.32 <v01=reg128#11%bot,<v01=reg128#11%bot,<d01=reg128#12%top,#1
730 # asm 2: vext.32 <v01=d20,<v01=d20,<d01=d23,#1
731 vext.32 d20,d20,d23,#1
733 # qhasm: v23[0] = mid[0,1] unsigned>> 20; v23[1] = mid[2,3] unsigned>> 20
734 # asm 1: vshrn.u64 <v23=reg128#10%bot,<mid=reg128#1,#20
735 # asm 2: vshrn.u64 <v23=d18,<mid=q0,#20
738 # qhasm: v4 = v4[0]v4[2]v4[1]v4[3]
739 # asm 1: vtrn.32 <v4=reg128#4%bot,<v4=reg128#4%top
740 # asm 2: vtrn.32 <v4=d6,<v4=d7
743 # qhasm: 4x v01 &= 0x03ffffff
744 # asm 1: vand.i32 <v01=reg128#11,#0x03ffffff
745 # asm 2: vand.i32 <v01=q10,#0x03ffffff
746 vand.i32 q10,#0x03ffffff
748 # qhasm: ptr = &y34_stack
749 # asm 1: lea >ptr=int32#3,<y34_stack=stack128#4
750 # asm 2: lea >ptr=r2,<y34_stack=[sp,#48]
753 # qhasm: y34 aligned= mem128[ptr]
754 # asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<ptr=int32#3,: 128]
755 # asm 2: vld1.8 {>y34=d4->y34=d5},[<ptr=r2,: 128]
756 vld1.8 {d4-d5},[r2,: 128]
758 # qhasm: 4x v23 &= 0x03ffffff
759 # asm 1: vand.i32 <v23=reg128#10,#0x03ffffff
760 # asm 2: vand.i32 <v23=q9,#0x03ffffff
761 vand.i32 q9,#0x03ffffff
763 # qhasm: ptr = &y12_stack
764 # asm 1: lea >ptr=int32#3,<y12_stack=stack128#3
765 # asm 2: lea >ptr=r2,<y12_stack=[sp,#32]
768 # qhasm: y12 aligned= mem128[ptr]
769 # asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<ptr=int32#3,: 128]
770 # asm 2: vld1.8 {>y12=d2->y12=d3},[<ptr=r2,: 128]
771 vld1.8 {d2-d3},[r2,: 128]
773 # qhasm: 4x v4 |= 0x01000000
774 # asm 1: vorr.i32 <v4=reg128#4,#0x01000000
775 # asm 2: vorr.i32 <v4=q3,#0x01000000
776 vorr.i32 q3,#0x01000000
778 # qhasm: ptr = &y0_stack
779 # asm 1: lea >ptr=int32#3,<y0_stack=stack128#2
780 # asm 2: lea >ptr=r2,<y0_stack=[sp,#16]
783 # qhasm: y0 aligned= mem128[ptr]
784 # asm 1: vld1.8 {>y0=reg128#1%bot->y0=reg128#1%top},[<ptr=int32#3,: 128]
785 # asm 2: vld1.8 {>y0=d0->y0=d1},[<ptr=r2,: 128]
786 vld1.8 {d0-d1},[r2,: 128]
788 # qhasm: r4[0,1] += v01[0] unsigned* y34[2]; r4[2,3] += v01[1] unsigned* y34[3]
789 # asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%bot,<y34=reg128#3%top
790 # asm 2: vmlal.u32 <r4=q15,<v01=d20,<y34=d5
793 # qhasm: r4[0,1] += v01[2] unsigned* y34[0]; r4[2,3] += v01[3] unsigned* y34[1]
794 # asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%top,<y34=reg128#3%bot
795 # asm 2: vmlal.u32 <r4=q15,<v01=d21,<y34=d4
798 # qhasm: r4[0,1] += v23[0] unsigned* y12[2]; r4[2,3] += v23[1] unsigned* y12[3]
799 # asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%bot,<y12=reg128#2%top
800 # asm 2: vmlal.u32 <r4=q15,<v23=d18,<y12=d3
803 # qhasm: r4[0,1] += v23[2] unsigned* y12[0]; r4[2,3] += v23[3] unsigned* y12[1]
804 # asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%top,<y12=reg128#2%bot
805 # asm 2: vmlal.u32 <r4=q15,<v23=d19,<y12=d2
808 # qhasm: r4[0,1] += v4[0] unsigned* y0[0]; r4[2,3] += v4[1] unsigned* y0[1]
809 # asm 1: vmlal.u32 <r4=reg128#16,<v4=reg128#4%bot,<y0=reg128#1%bot
810 # asm 2: vmlal.u32 <r4=q15,<v4=d6,<y0=d0
813 # qhasm: ptr = &5y34_stack
814 # asm 1: lea >ptr=int32#3,<5y34_stack=stack128#6
815 # asm 2: lea >ptr=r2,<5y34_stack=[sp,#80]
818 # qhasm: 5y34 aligned= mem128[ptr]
819 # asm 1: vld1.8 {>5y34=reg128#13%bot->5y34=reg128#13%top},[<ptr=int32#3,: 128]
820 # asm 2: vld1.8 {>5y34=d24->5y34=d25},[<ptr=r2,: 128]
821 vld1.8 {d24-d25},[r2,: 128]
823 # qhasm: r3[0,1] += v01[0] unsigned* y34[0]; r3[2,3] += v01[1] unsigned* y34[1]
824 # asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%bot,<y34=reg128#3%bot
825 # asm 2: vmlal.u32 <r3=q4,<v01=d20,<y34=d4
828 # qhasm: r3[0,1] += v01[2] unsigned* y12[2]; r3[2,3] += v01[3] unsigned* y12[3]
829 # asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%top,<y12=reg128#2%top
830 # asm 2: vmlal.u32 <r3=q4,<v01=d21,<y12=d3
833 # qhasm: r3[0,1] += v23[0] unsigned* y12[0]; r3[2,3] += v23[1] unsigned* y12[1]
834 # asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%bot,<y12=reg128#2%bot
835 # asm 2: vmlal.u32 <r3=q4,<v23=d18,<y12=d2
838 # qhasm: r3[0,1] += v23[2] unsigned* y0[0]; r3[2,3] += v23[3] unsigned* y0[1]
839 # asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%top,<y0=reg128#1%bot
840 # asm 2: vmlal.u32 <r3=q4,<v23=d19,<y0=d0
843 # qhasm: r3[0,1] += v4[0] unsigned* 5y34[2]; r3[2,3] += v4[1] unsigned* 5y34[3]
844 # asm 1: vmlal.u32 <r3=reg128#5,<v4=reg128#4%bot,<5y34=reg128#13%top
845 # asm 2: vmlal.u32 <r3=q4,<v4=d6,<5y34=d25
848 # qhasm: ptr = &5y12_stack
849 # asm 1: lea >ptr=int32#3,<5y12_stack=stack128#5
850 # asm 2: lea >ptr=r2,<5y12_stack=[sp,#64]
853 # qhasm: 5y12 aligned= mem128[ptr]
854 # asm 1: vld1.8 {>5y12=reg128#12%bot->5y12=reg128#12%top},[<ptr=int32#3,: 128]
855 # asm 2: vld1.8 {>5y12=d22->5y12=d23},[<ptr=r2,: 128]
856 vld1.8 {d22-d23},[r2,: 128]
858 # qhasm: r0[0,1] += v4[0] unsigned* 5y12[0]; r0[2,3] += v4[1] unsigned* 5y12[1]
859 # asm 1: vmlal.u32 <r0=reg128#8,<v4=reg128#4%bot,<5y12=reg128#12%bot
860 # asm 2: vmlal.u32 <r0=q7,<v4=d6,<5y12=d22
863 # qhasm: r0[0,1] += v23[0] unsigned* 5y34[0]; r0[2,3] += v23[1] unsigned* 5y34[1]
864 # asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%bot,<5y34=reg128#13%bot
865 # asm 2: vmlal.u32 <r0=q7,<v23=d18,<5y34=d24
868 # qhasm: r0[0,1] += v23[2] unsigned* 5y12[2]; r0[2,3] += v23[3] unsigned* 5y12[3]
869 # asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%top,<5y12=reg128#12%top
870 # asm 2: vmlal.u32 <r0=q7,<v23=d19,<5y12=d23
873 # qhasm: r0[0,1] += v01[0] unsigned* y0[0]; r0[2,3] += v01[1] unsigned* y0[1]
874 # asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%bot,<y0=reg128#1%bot
875 # asm 2: vmlal.u32 <r0=q7,<v01=d20,<y0=d0
878 # qhasm: r0[0,1] += v01[2] unsigned* 5y34[2]; r0[2,3] += v01[3] unsigned* 5y34[3]
879 # asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%top,<5y34=reg128#13%top
880 # asm 2: vmlal.u32 <r0=q7,<v01=d21,<5y34=d25
883 # qhasm: r1[0,1] += v01[0] unsigned* y12[0]; r1[2,3] += v01[1] unsigned* y12[1]
884 # asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%bot,<y12=reg128#2%bot
885 # asm 2: vmlal.u32 <r1=q14,<v01=d20,<y12=d2
888 # qhasm: r1[0,1] += v01[2] unsigned* y0[0]; r1[2,3] += v01[3] unsigned* y0[1]
889 # asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%top,<y0=reg128#1%bot
890 # asm 2: vmlal.u32 <r1=q14,<v01=d21,<y0=d0
893 # qhasm: r1[0,1] += v23[0] unsigned* 5y34[2]; r1[2,3] += v23[1] unsigned* 5y34[3]
894 # asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%bot,<5y34=reg128#13%top
895 # asm 2: vmlal.u32 <r1=q14,<v23=d18,<5y34=d25
896 vmlal.u32 q14,d18,d25
898 # qhasm: r1[0,1] += v23[2] unsigned* 5y34[0]; r1[2,3] += v23[3] unsigned* 5y34[1]
899 # asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%top,<5y34=reg128#13%bot
900 # asm 2: vmlal.u32 <r1=q14,<v23=d19,<5y34=d24
901 vmlal.u32 q14,d19,d24
903 # qhasm: r1[0,1] += v4[0] unsigned* 5y12[2]; r1[2,3] += v4[1] unsigned* 5y12[3]
904 # asm 1: vmlal.u32 <r1=reg128#15,<v4=reg128#4%bot,<5y12=reg128#12%top
905 # asm 2: vmlal.u32 <r1=q14,<v4=d6,<5y12=d23
908 # qhasm: r2[0,1] += v01[0] unsigned* y12[2]; r2[2,3] += v01[1] unsigned* y12[3]
909 # asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%bot,<y12=reg128#2%top
910 # asm 2: vmlal.u32 <r2=q13,<v01=d20,<y12=d3
913 # qhasm: r2[0,1] += v01[2] unsigned* y12[0]; r2[2,3] += v01[3] unsigned* y12[1]
914 # asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%top,<y12=reg128#2%bot
915 # asm 2: vmlal.u32 <r2=q13,<v01=d21,<y12=d2
918 # qhasm: r2[0,1] += v23[0] unsigned* y0[0]; r2[2,3] += v23[1] unsigned* y0[1]
919 # asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%bot,<y0=reg128#1%bot
920 # asm 2: vmlal.u32 <r2=q13,<v23=d18,<y0=d0
923 # qhasm: r2[0,1] += v23[2] unsigned* 5y34[2]; r2[2,3] += v23[3] unsigned* 5y34[3]
924 # asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%top,<5y34=reg128#13%top
925 # asm 2: vmlal.u32 <r2=q13,<v23=d19,<5y34=d25
926 vmlal.u32 q13,d19,d25
928 # qhasm: r2[0,1] += v4[0] unsigned* 5y34[0]; r2[2,3] += v4[1] unsigned* 5y34[1]
929 # asm 1: vmlal.u32 <r2=reg128#14,<v4=reg128#4%bot,<5y34=reg128#13%bot
930 # asm 2: vmlal.u32 <r2=q13,<v4=d6,<5y34=d24
933 # qhasm: ptr = &two24
934 # asm 1: lea >ptr=int32#3,<two24=stack128#1
935 # asm 2: lea >ptr=r2,<two24=[sp,#0]
938 # qhasm: 2x t1 = r0 unsigned>> 26
939 # asm 1: vshr.u64 >t1=reg128#4,<r0=reg128#8,#26
940 # asm 2: vshr.u64 >t1=q3,<r0=q7,#26
944 # asm 1: sub >len=int32#4,<len=int32#4,#64
945 # asm 2: sub >len=r3,<len=r3,#64
949 # asm 1: vand >r0=reg128#6,<r0=reg128#8,<mask=reg128#7
950 # asm 2: vand >r0=q5,<r0=q7,<mask=q6
954 # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#15,<t1=reg128#4
955 # asm 2: vadd.i64 >r1=q3,<r1=q14,<t1=q3
958 # qhasm: 2x t4 = r3 unsigned>> 26
959 # asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#5,#26
960 # asm 2: vshr.u64 >t4=q7,<r3=q4,#26
964 # asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
965 # asm 2: vand >r3=q4,<r3=q4,<mask=q6
968 # qhasm: 2x x4 = r4 + t4
969 # asm 1: vadd.i64 >x4=reg128#8,<r4=reg128#16,<t4=reg128#8
970 # asm 2: vadd.i64 >x4=q7,<r4=q15,<t4=q7
973 # qhasm: r4 aligned= mem128[ptr]
974 # asm 1: vld1.8 {>r4=reg128#16%bot->r4=reg128#16%top},[<ptr=int32#3,: 128]
975 # asm 2: vld1.8 {>r4=d30->r4=d31},[<ptr=r2,: 128]
976 vld1.8 {d30-d31},[r2,: 128]
978 # qhasm: 2x t2 = r1 unsigned>> 26
979 # asm 1: vshr.u64 >t2=reg128#9,<r1=reg128#4,#26
980 # asm 2: vshr.u64 >t2=q8,<r1=q3,#26
984 # asm 1: vand >r1=reg128#4,<r1=reg128#4,<mask=reg128#7
985 # asm 2: vand >r1=q3,<r1=q3,<mask=q6
988 # qhasm: 2x t0 = x4 unsigned>> 26
989 # asm 1: vshr.u64 >t0=reg128#10,<x4=reg128#8,#26
990 # asm 2: vshr.u64 >t0=q9,<x4=q7,#26
994 # asm 1: vadd.i64 >r2=reg128#9,<r2=reg128#14,<t2=reg128#9
995 # asm 2: vadd.i64 >r2=q8,<r2=q13,<t2=q8
999 # asm 1: vand >x4=reg128#11,<x4=reg128#8,<mask=reg128#7
1000 # asm 2: vand >x4=q10,<x4=q7,<mask=q6
1003 # qhasm: 2x x01 = r0 + t0
1004 # asm 1: vadd.i64 >x01=reg128#6,<r0=reg128#6,<t0=reg128#10
1005 # asm 2: vadd.i64 >x01=q5,<r0=q5,<t0=q9
1008 # qhasm: r0 aligned= mem128[ptr]
1009 # asm 1: vld1.8 {>r0=reg128#8%bot->r0=reg128#8%top},[<ptr=int32#3,: 128]
1010 # asm 2: vld1.8 {>r0=d14->r0=d15},[<ptr=r2,: 128]
1011 vld1.8 {d14-d15},[r2,: 128]
1013 # qhasm: ptr = &z34_stack
1014 # asm 1: lea >ptr=int32#3,<z34_stack=stack128#9
1015 # asm 2: lea >ptr=r2,<z34_stack=[sp,#128]
1018 # qhasm: 2x t0 <<= 2
1019 # asm 1: vshl.i64 >t0=reg128#10,<t0=reg128#10,#2
1020 # asm 2: vshl.i64 >t0=q9,<t0=q9,#2
1023 # qhasm: 2x t3 = r2 unsigned>> 26
1024 # asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#9,#26
1025 # asm 2: vshr.u64 >t3=q13,<r2=q8,#26
1028 # qhasm: 2x x01 += t0
1029 # asm 1: vadd.i64 >x01=reg128#15,<x01=reg128#6,<t0=reg128#10
1030 # asm 2: vadd.i64 >x01=q14,<x01=q5,<t0=q9
1033 # qhasm: z34 aligned= mem128[ptr]
1034 # asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<ptr=int32#3,: 128]
1035 # asm 2: vld1.8 {>z34=d10->z34=d11},[<ptr=r2,: 128]
1036 vld1.8 {d10-d11},[r2,: 128]
1038 # qhasm: x23 = r2 & mask
1039 # asm 1: vand >x23=reg128#10,<r2=reg128#9,<mask=reg128#7
1040 # asm 2: vand >x23=q9,<r2=q8,<mask=q6
1043 # qhasm: 2x r3 += t3
1044 # asm 1: vadd.i64 >r3=reg128#5,<r3=reg128#5,<t3=reg128#14
1045 # asm 2: vadd.i64 >r3=q4,<r3=q4,<t3=q13
1048 # qhasm: input_2 += 32
1049 # asm 1: add >input_2=int32#2,<input_2=int32#2,#32
1050 # asm 2: add >input_2=r1,<input_2=r1,#32
1053 # qhasm: 2x t1 = x01 unsigned>> 26
1054 # asm 1: vshr.u64 >t1=reg128#14,<x01=reg128#15,#26
1055 # asm 2: vshr.u64 >t1=q13,<x01=q14,#26
1056 vshr.u64 q13,q14,#26
1058 # qhasm: x23 = x23[0,2,1,3]
1059 # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
1060 # asm 2: vtrn.32 <x23=d18,<x23=d19
1063 # qhasm: x01 = x01 & mask
1064 # asm 1: vand >x01=reg128#9,<x01=reg128#15,<mask=reg128#7
1065 # asm 2: vand >x01=q8,<x01=q14,<mask=q6
1068 # qhasm: 2x r1 += t1
1069 # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#4,<t1=reg128#14
1070 # asm 2: vadd.i64 >r1=q3,<r1=q3,<t1=q13
1073 # qhasm: 2x t4 = r3 unsigned>> 26
1074 # asm 1: vshr.u64 >t4=reg128#14,<r3=reg128#5,#26
1075 # asm 2: vshr.u64 >t4=q13,<r3=q4,#26
1078 # qhasm: x01 = x01[0,2,1,3]
1079 # asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
1080 # asm 2: vtrn.32 <x01=d16,<x01=d17
1084 # asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
1085 # asm 2: vand >r3=q4,<r3=q4,<mask=q6
1088 # qhasm: r1 = r1[0,2,1,3]
1089 # asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
1090 # asm 2: vtrn.32 <r1=d6,<r1=d7
1093 # qhasm: 2x x4 += t4
1094 # asm 1: vadd.i64 >x4=reg128#11,<x4=reg128#11,<t4=reg128#14
1095 # asm 2: vadd.i64 >x4=q10,<x4=q10,<t4=q13
1096 vadd.i64 q10,q10,q13
1098 # qhasm: r3 = r3[0,2,1,3]
1099 # asm 1: vtrn.32 <r3=reg128#5%bot,<r3=reg128#5%top
1100 # asm 2: vtrn.32 <r3=d8,<r3=d9
1103 # qhasm: x01 = x01[0,1] r1[0,1]
1104 # asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
1105 # asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
1106 vext.32 d17,d6,d6,#0
1108 # qhasm: x23 = x23[0,1] r3[0,1]
1109 # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#5%bot,<r3=reg128#5%bot,#0
1110 # asm 2: vext.32 <x23=d19,<r3=d8,<r3=d8,#0
1111 vext.32 d19,d8,d8,#0
1113 # qhasm: x4 = x4[0,2,1,3]
1114 # asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
1115 # asm 2: vtrn.32 <x4=d20,<x4=d21
1118 # qhasm: unsigned>? len - 64
1119 # asm 1: cmp <len=int32#4,#64
1120 # asm 2: cmp <len=r3,#64
1123 # qhasm: goto mainloop2 if unsigned>
1126 # qhasm: input_2 -= 32
1127 # asm 1: sub >input_2=int32#3,<input_2=int32#2,#32
1128 # asm 2: sub >input_2=r2,<input_2=r1,#32
1131 # qhasm: below64bytes:
1134 # qhasm: unsigned>? len - 32
1135 # asm 1: cmp <len=int32#4,#32
1136 # asm 2: cmp <len=r3,#32
1139 # qhasm: goto end if !unsigned>
1147 # qhasm: ptr = &two24
1148 # asm 1: lea >ptr=int32#2,<two24=stack128#1
1149 # asm 2: lea >ptr=r1,<two24=[sp,#0]
1152 # qhasm: r4 aligned= mem128[ptr]
1153 # asm 1: vld1.8 {>r4=reg128#5%bot->r4=reg128#5%top},[<ptr=int32#2,: 128]
1154 # asm 2: vld1.8 {>r4=d8->r4=d9},[<ptr=r1,: 128]
1155 vld1.8 {d8-d9},[r1,: 128]
1157 # qhasm: u4 aligned= mem128[ptr]
1158 # asm 1: vld1.8 {>u4=reg128#6%bot->u4=reg128#6%top},[<ptr=int32#2,: 128]
1159 # asm 2: vld1.8 {>u4=d10->u4=d11},[<ptr=r1,: 128]
1160 vld1.8 {d10-d11},[r1,: 128]
1162 # qhasm: c01 = mem128[input_2];input_2+=16
1163 # asm 1: vld1.8 {>c01=reg128#8%bot->c01=reg128#8%top},[<input_2=int32#3]!
1164 # asm 2: vld1.8 {>c01=d14->c01=d15},[<input_2=r2]!
1165 vld1.8 {d14-d15},[r2]!
1167 # qhasm: r4[0,1] += x01[0] unsigned* y34[2]; r4[2,3] += x01[1] unsigned* y34[3]
1168 # asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%bot,<y34=reg128#3%top
1169 # asm 2: vmlal.u32 <r4=q4,<x01=d16,<y34=d5
1172 # qhasm: c23 = mem128[input_2];input_2+=16
1173 # asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_2=int32#3]!
1174 # asm 2: vld1.8 {>c23=d26->c23=d27},[<input_2=r2]!
1175 vld1.8 {d26-d27},[r2]!
1177 # qhasm: r4[0,1] += x01[2] unsigned* y34[0]; r4[2,3] += x01[3] unsigned* y34[1]
1178 # asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%top,<y34=reg128#3%bot
1179 # asm 2: vmlal.u32 <r4=q4,<x01=d17,<y34=d4
1182 # qhasm: r0 = u4[1]c01[0]r0[2,3]
1183 # asm 1: vext.32 <r0=reg128#4%bot,<u4=reg128#6%bot,<c01=reg128#8%bot,#1
1184 # asm 2: vext.32 <r0=d6,<u4=d10,<c01=d14,#1
1185 vext.32 d6,d10,d14,#1
1187 # qhasm: r4[0,1] += x23[0] unsigned* y12[2]; r4[2,3] += x23[1] unsigned* y12[3]
1188 # asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%bot,<y12=reg128#2%top
1189 # asm 2: vmlal.u32 <r4=q4,<x23=d18,<y12=d3
1192 # qhasm: r0 = r0[0,1]u4[1]c23[0]
1193 # asm 1: vext.32 <r0=reg128#4%top,<u4=reg128#6%bot,<c23=reg128#14%bot,#1
1194 # asm 2: vext.32 <r0=d7,<u4=d10,<c23=d26,#1
1195 vext.32 d7,d10,d26,#1
1197 # qhasm: r4[0,1] += x23[2] unsigned* y12[0]; r4[2,3] += x23[3] unsigned* y12[1]
1198 # asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%top,<y12=reg128#2%bot
1199 # asm 2: vmlal.u32 <r4=q4,<x23=d19,<y12=d2
1202 # qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
1203 # asm 1: vrev64.i32 >r0=reg128#4,<r0=reg128#4
1204 # asm 2: vrev64.i32 >r0=q3,<r0=q3
1207 # qhasm: r4[0,1] += x4[0] unsigned* y0[0]; r4[2,3] += x4[1] unsigned* y0[1]
1208 # asm 1: vmlal.u32 <r4=reg128#5,<x4=reg128#11%bot,<y0=reg128#1%bot
1209 # asm 2: vmlal.u32 <r4=q4,<x4=d20,<y0=d0
1212 # qhasm: r0[0,1] += x4[0] unsigned* 5y12[0]; r0[2,3] += x4[1] unsigned* 5y12[1]
1213 # asm 1: vmlal.u32 <r0=reg128#4,<x4=reg128#11%bot,<5y12=reg128#12%bot
1214 # asm 2: vmlal.u32 <r0=q3,<x4=d20,<5y12=d22
1215 vmlal.u32 q3,d20,d22
1217 # qhasm: r0[0,1] += x23[0] unsigned* 5y34[0]; r0[2,3] += x23[1] unsigned* 5y34[1]
1218 # asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%bot,<5y34=reg128#13%bot
1219 # asm 2: vmlal.u32 <r0=q3,<x23=d18,<5y34=d24
1220 vmlal.u32 q3,d18,d24
1222 # qhasm: r0[0,1] += x23[2] unsigned* 5y12[2]; r0[2,3] += x23[3] unsigned* 5y12[3]
1223 # asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%top,<5y12=reg128#12%top
1224 # asm 2: vmlal.u32 <r0=q3,<x23=d19,<5y12=d23
1225 vmlal.u32 q3,d19,d23
1227 # qhasm: c01 c23 = c01[0]c23[0]c01[2]c23[2]c01[1]c23[1]c01[3]c23[3]
1228 # asm 1: vtrn.32 <c01=reg128#8,<c23=reg128#14
1229 # asm 2: vtrn.32 <c01=q7,<c23=q13
1232 # qhasm: r0[0,1] += x01[0] unsigned* y0[0]; r0[2,3] += x01[1] unsigned* y0[1]
1233 # asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%bot,<y0=reg128#1%bot
1234 # asm 2: vmlal.u32 <r0=q3,<x01=d16,<y0=d0
1237 # qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
1238 # asm 1: vshll.u32 >r3=reg128#6,<c23=reg128#14%top,#18
1239 # asm 2: vshll.u32 >r3=q5,<c23=d27,#18
1240 vshll.u32 q5,d27,#18
1242 # qhasm: r0[0,1] += x01[2] unsigned* 5y34[2]; r0[2,3] += x01[3] unsigned* 5y34[3]
1243 # asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%top,<5y34=reg128#13%top
1244 # asm 2: vmlal.u32 <r0=q3,<x01=d17,<5y34=d25
1245 vmlal.u32 q3,d17,d25
1247 # qhasm: r3[0,1] += x01[0] unsigned* y34[0]; r3[2,3] += x01[1] unsigned* y34[1]
1248 # asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%bot,<y34=reg128#3%bot
1249 # asm 2: vmlal.u32 <r3=q5,<x01=d16,<y34=d4
1252 # qhasm: r3[0,1] += x01[2] unsigned* y12[2]; r3[2,3] += x01[3] unsigned* y12[3]
1253 # asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%top,<y12=reg128#2%top
1254 # asm 2: vmlal.u32 <r3=q5,<x01=d17,<y12=d3
1257 # qhasm: r3[0,1] += x23[0] unsigned* y12[0]; r3[2,3] += x23[1] unsigned* y12[1]
1258 # asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%bot,<y12=reg128#2%bot
1259 # asm 2: vmlal.u32 <r3=q5,<x23=d18,<y12=d2
1262 # qhasm: r3[0,1] += x23[2] unsigned* y0[0]; r3[2,3] += x23[3] unsigned* y0[1]
1263 # asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%top,<y0=reg128#1%bot
1264 # asm 2: vmlal.u32 <r3=q5,<x23=d19,<y0=d0
1267 # qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
1268 # asm 1: vshll.u32 >r1=reg128#14,<c23=reg128#14%bot,#6
1269 # asm 2: vshll.u32 >r1=q13,<c23=d26,#6
1270 vshll.u32 q13,d26,#6
1272 # qhasm: r3[0,1] += x4[0] unsigned* 5y34[2]; r3[2,3] += x4[1] unsigned* 5y34[3]
1273 # asm 1: vmlal.u32 <r3=reg128#6,<x4=reg128#11%bot,<5y34=reg128#13%top
1274 # asm 2: vmlal.u32 <r3=q5,<x4=d20,<5y34=d25
1275 vmlal.u32 q5,d20,d25
1277 # qhasm: r1[0,1] += x01[0] unsigned* y12[0]; r1[2,3] += x01[1] unsigned* y12[1]
1278 # asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%bot,<y12=reg128#2%bot
1279 # asm 2: vmlal.u32 <r1=q13,<x01=d16,<y12=d2
1280 vmlal.u32 q13,d16,d2
1282 # qhasm: r1[0,1] += x01[2] unsigned* y0[0]; r1[2,3] += x01[3] unsigned* y0[1]
1283 # asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%top,<y0=reg128#1%bot
1284 # asm 2: vmlal.u32 <r1=q13,<x01=d17,<y0=d0
1285 vmlal.u32 q13,d17,d0
1287 # qhasm: r1[0,1] += x23[0] unsigned* 5y34[2]; r1[2,3] += x23[1] unsigned* 5y34[3]
1288 # asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%bot,<5y34=reg128#13%top
1289 # asm 2: vmlal.u32 <r1=q13,<x23=d18,<5y34=d25
1290 vmlal.u32 q13,d18,d25
1292 # qhasm: r1[0,1] += x23[2] unsigned* 5y34[0]; r1[2,3] += x23[3] unsigned* 5y34[1]
1293 # asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%top,<5y34=reg128#13%bot
1294 # asm 2: vmlal.u32 <r1=q13,<x23=d19,<5y34=d24
1295 vmlal.u32 q13,d19,d24
1297 # qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
1298 # asm 1: vshll.u32 >r2=reg128#8,<c01=reg128#8%top,#12
1299 # asm 2: vshll.u32 >r2=q7,<c01=d15,#12
1300 vshll.u32 q7,d15,#12
1302 # qhasm: r1[0,1] += x4[0] unsigned* 5y12[2]; r1[2,3] += x4[1] unsigned* 5y12[3]
1303 # asm 1: vmlal.u32 <r1=reg128#14,<x4=reg128#11%bot,<5y12=reg128#12%top
1304 # asm 2: vmlal.u32 <r1=q13,<x4=d20,<5y12=d23
1305 vmlal.u32 q13,d20,d23
1307 # qhasm: r2[0,1] += x01[0] unsigned* y12[2]; r2[2,3] += x01[1] unsigned* y12[3]
1308 # asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%bot,<y12=reg128#2%top
1309 # asm 2: vmlal.u32 <r2=q7,<x01=d16,<y12=d3
1312 # qhasm: r2[0,1] += x01[2] unsigned* y12[0]; r2[2,3] += x01[3] unsigned* y12[1]
1313 # asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%top,<y12=reg128#2%bot
1314 # asm 2: vmlal.u32 <r2=q7,<x01=d17,<y12=d2
1317 # qhasm: r2[0,1] += x23[0] unsigned* y0[0]; r2[2,3] += x23[1] unsigned* y0[1]
1318 # asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%bot,<y0=reg128#1%bot
1319 # asm 2: vmlal.u32 <r2=q7,<x23=d18,<y0=d0
1322 # qhasm: r2[0,1] += x23[2] unsigned* 5y34[2]; r2[2,3] += x23[3] unsigned* 5y34[3]
1323 # asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%top,<5y34=reg128#13%top
1324 # asm 2: vmlal.u32 <r2=q7,<x23=d19,<5y34=d25
1325 vmlal.u32 q7,d19,d25
1327 # qhasm: r2[0,1] += x4[0] unsigned* 5y34[0]; r2[2,3] += x4[1] unsigned* 5y34[1]
1328 # asm 1: vmlal.u32 <r2=reg128#8,<x4=reg128#11%bot,<5y34=reg128#13%bot
1329 # asm 2: vmlal.u32 <r2=q7,<x4=d20,<5y34=d24
1330 vmlal.u32 q7,d20,d24
1332 # qhasm: 2x t1 = r0 unsigned>> 26
1333 # asm 1: vshr.u64 >t1=reg128#9,<r0=reg128#4,#26
1334 # asm 2: vshr.u64 >t1=q8,<r0=q3,#26
1338 # asm 1: vand >r0=reg128#4,<r0=reg128#4,<mask=reg128#7
1339 # asm 2: vand >r0=q3,<r0=q3,<mask=q6
1342 # qhasm: 2x r1 += t1
1343 # asm 1: vadd.i64 >r1=reg128#9,<r1=reg128#14,<t1=reg128#9
1344 # asm 2: vadd.i64 >r1=q8,<r1=q13,<t1=q8
1347 # qhasm: 2x t4 = r3 unsigned>> 26
1348 # asm 1: vshr.u64 >t4=reg128#10,<r3=reg128#6,#26
1349 # asm 2: vshr.u64 >t4=q9,<r3=q5,#26
1353 # asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
1354 # asm 2: vand >r3=q5,<r3=q5,<mask=q6
1357 # qhasm: 2x r4 += t4
1358 # asm 1: vadd.i64 >r4=reg128#5,<r4=reg128#5,<t4=reg128#10
1359 # asm 2: vadd.i64 >r4=q4,<r4=q4,<t4=q9
1362 # qhasm: 2x t2 = r1 unsigned>> 26
1363 # asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#9,#26
1364 # asm 2: vshr.u64 >t2=q9,<r1=q8,#26
1368 # asm 1: vand >r1=reg128#11,<r1=reg128#9,<mask=reg128#7
1369 # asm 2: vand >r1=q10,<r1=q8,<mask=q6
1372 # qhasm: 2x t0 = r4 unsigned>> 26
1373 # asm 1: vshr.u64 >t0=reg128#9,<r4=reg128#5,#26
1374 # asm 2: vshr.u64 >t0=q8,<r4=q4,#26
1377 # qhasm: 2x r2 += t2
1378 # asm 1: vadd.i64 >r2=reg128#8,<r2=reg128#8,<t2=reg128#10
1379 # asm 2: vadd.i64 >r2=q7,<r2=q7,<t2=q9
1383 # asm 1: vand >r4=reg128#5,<r4=reg128#5,<mask=reg128#7
1384 # asm 2: vand >r4=q4,<r4=q4,<mask=q6
1387 # qhasm: 2x r0 += t0
1388 # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
1389 # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
1392 # qhasm: 2x t0 <<= 2
1393 # asm 1: vshl.i64 >t0=reg128#9,<t0=reg128#9,#2
1394 # asm 2: vshl.i64 >t0=q8,<t0=q8,#2
1397 # qhasm: 2x t3 = r2 unsigned>> 26
1398 # asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#8,#26
1399 # asm 2: vshr.u64 >t3=q13,<r2=q7,#26
1402 # qhasm: 2x r0 += t0
1403 # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
1404 # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
1407 # qhasm: x23 = r2 & mask
1408 # asm 1: vand >x23=reg128#10,<r2=reg128#8,<mask=reg128#7
1409 # asm 2: vand >x23=q9,<r2=q7,<mask=q6
1412 # qhasm: 2x r3 += t3
1413 # asm 1: vadd.i64 >r3=reg128#6,<r3=reg128#6,<t3=reg128#14
1414 # asm 2: vadd.i64 >r3=q5,<r3=q5,<t3=q13
1417 # qhasm: 2x t1 = r0 unsigned>> 26
1418 # asm 1: vshr.u64 >t1=reg128#8,<r0=reg128#4,#26
1419 # asm 2: vshr.u64 >t1=q7,<r0=q3,#26
1422 # qhasm: x01 = r0 & mask
1423 # asm 1: vand >x01=reg128#9,<r0=reg128#4,<mask=reg128#7
1424 # asm 2: vand >x01=q8,<r0=q3,<mask=q6
1427 # qhasm: 2x r1 += t1
1428 # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#11,<t1=reg128#8
1429 # asm 2: vadd.i64 >r1=q3,<r1=q10,<t1=q7
1432 # qhasm: 2x t4 = r3 unsigned>> 26
1433 # asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#6,#26
1434 # asm 2: vshr.u64 >t4=q7,<r3=q5,#26
1438 # asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
1439 # asm 2: vand >r3=q5,<r3=q5,<mask=q6
1442 # qhasm: 2x x4 = r4 + t4
1443 # asm 1: vadd.i64 >x4=reg128#11,<r4=reg128#5,<t4=reg128#8
1444 # asm 2: vadd.i64 >x4=q10,<r4=q4,<t4=q7
1448 # asm 1: sub >len=int32#4,<len=int32#4,#32
1449 # asm 2: sub >len=r3,<len=r3,#32
1452 # qhasm: x01 = x01[0,2,1,3]
1453 # asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
1454 # asm 2: vtrn.32 <x01=d16,<x01=d17
1457 # qhasm: x23 = x23[0,2,1,3]
1458 # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
1459 # asm 2: vtrn.32 <x23=d18,<x23=d19
1462 # qhasm: r1 = r1[0,2,1,3]
1463 # asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
1464 # asm 2: vtrn.32 <r1=d6,<r1=d7
1467 # qhasm: r3 = r3[0,2,1,3]
1468 # asm 1: vtrn.32 <r3=reg128#6%bot,<r3=reg128#6%top
1469 # asm 2: vtrn.32 <r3=d10,<r3=d11
1472 # qhasm: x4 = x4[0,2,1,3]
1473 # asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
1474 # asm 2: vtrn.32 <x4=d20,<x4=d21
1477 # qhasm: x01 = x01[0,1] r1[0,1]
1478 # asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
1479 # asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
1480 vext.32 d17,d6,d6,#0
1482 # qhasm: x23 = x23[0,1] r3[0,1]
1483 # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#6%bot,<r3=reg128#6%bot,#0
1484 # asm 2: vext.32 <x23=d19,<r3=d10,<r3=d10,#0
1485 vext.32 d19,d10,d10,#0
1487 # qhasm: unsigned>? len - 32
1488 # asm 1: cmp <len=int32#4,#32
1489 # asm 2: cmp <len=r3,#32
1492 # qhasm: goto mainloop if unsigned>
1498 # qhasm: mem128[input_0] = x01;input_0+=16
1499 # asm 1: vst1.8 {<x01=reg128#9%bot-<x01=reg128#9%top},[<input_0=int32#1]!
1500 # asm 2: vst1.8 {<x01=d16-<x01=d17},[<input_0=r0]!
1501 vst1.8 {d16-d17},[r0]!
1503 # qhasm: mem128[input_0] = x23;input_0+=16
1504 # asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1]!
1505 # asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0]!
1506 vst1.8 {d18-d19},[r0]!
1508 # qhasm: mem64[input_0] = x4[0]
1509 # asm 1: vst1.8 <x4=reg128#11%bot,[<input_0=int32#1]
1510 # asm 2: vst1.8 <x4=d20,[<input_0=r0]
1514 # asm 1: mov >len=int32#1,<len=int32#4
1515 # asm 2: mov >len=r0,<len=r3
1518 # qhasm: qpopreturn len
1523 # qhasm: int32 input_0
1525 # qhasm: int32 input_1
1527 # qhasm: int32 input_2
1529 # qhasm: int32 input_3
1531 # qhasm: stack32 input_4
1533 # qhasm: stack32 input_5
1535 # qhasm: stack32 input_6
1537 # qhasm: stack32 input_7
1539 # qhasm: int32 caller_r4
1541 # qhasm: int32 caller_r5
1543 # qhasm: int32 caller_r6
1545 # qhasm: int32 caller_r7
1547 # qhasm: int32 caller_r8
1549 # qhasm: int32 caller_r9
1551 # qhasm: int32 caller_r10
1553 # qhasm: int32 caller_r11
1555 # qhasm: int32 caller_r12
1557 # qhasm: int32 caller_r14
1559 # qhasm: reg128 caller_q4
1561 # qhasm: reg128 caller_q5
1563 # qhasm: reg128 caller_q6
1565 # qhasm: reg128 caller_q7
1589 # qhasm: reg128 _5y01
1591 # qhasm: reg128 _5y23
1593 # qhasm: reg128 _5y4
1611 # qhasm: reg128 mask
1613 # qhasm: enter crypto_onetimeauth_poly1305_neon2_addmulmod
1615 .global openssl_poly1305_neon2_addmulmod
1616 .hidden openssl_poly1305_neon2_addmulmod
1617 .type openssl_poly1305_neon2_addmulmod STT_FUNC
1618 openssl_poly1305_neon2_addmulmod:
1621 # qhasm: 2x mask = 0xffffffff
1622 # asm 1: vmov.i64 >mask=reg128#1,#0xffffffff
1623 # asm 2: vmov.i64 >mask=q0,#0xffffffff
1624 vmov.i64 q0,#0xffffffff
1626 # qhasm: y01 aligned= mem128[input_2];input_2+=16
1627 # asm 1: vld1.8 {>y01=reg128#2%bot->y01=reg128#2%top},[<input_2=int32#3,: 128]!
1628 # asm 2: vld1.8 {>y01=d2->y01=d3},[<input_2=r2,: 128]!
1629 vld1.8 {d2-d3},[r2,: 128]!
1631 # qhasm: 4x _5y01 = y01 << 2
1632 # asm 1: vshl.i32 >_5y01=reg128#3,<y01=reg128#2,#2
1633 # asm 2: vshl.i32 >_5y01=q2,<y01=q1,#2
1636 # qhasm: y23 aligned= mem128[input_2];input_2+=16
1637 # asm 1: vld1.8 {>y23=reg128#4%bot->y23=reg128#4%top},[<input_2=int32#3,: 128]!
1638 # asm 2: vld1.8 {>y23=d6->y23=d7},[<input_2=r2,: 128]!
1639 vld1.8 {d6-d7},[r2,: 128]!
1641 # qhasm: 4x _5y23 = y23 << 2
1642 # asm 1: vshl.i32 >_5y23=reg128#9,<y23=reg128#4,#2
1643 # asm 2: vshl.i32 >_5y23=q8,<y23=q3,#2
1646 # qhasm: y4 aligned= mem64[input_2]y4[1]
1647 # asm 1: vld1.8 {<y4=reg128#10%bot},[<input_2=int32#3,: 64]
1648 # asm 2: vld1.8 {<y4=d18},[<input_2=r2,: 64]
1649 vld1.8 {d18},[r2,: 64]
1651 # qhasm: 4x _5y4 = y4 << 2
1652 # asm 1: vshl.i32 >_5y4=reg128#11,<y4=reg128#10,#2
1653 # asm 2: vshl.i32 >_5y4=q10,<y4=q9,#2
1656 # qhasm: x01 aligned= mem128[input_1];input_1+=16
1657 # asm 1: vld1.8 {>x01=reg128#12%bot->x01=reg128#12%top},[<input_1=int32#2,: 128]!
1658 # asm 2: vld1.8 {>x01=d22->x01=d23},[<input_1=r1,: 128]!
1659 vld1.8 {d22-d23},[r1,: 128]!
1661 # qhasm: 4x _5y01 += y01
1662 # asm 1: vadd.i32 >_5y01=reg128#3,<_5y01=reg128#3,<y01=reg128#2
1663 # asm 2: vadd.i32 >_5y01=q2,<_5y01=q2,<y01=q1
1666 # qhasm: x23 aligned= mem128[input_1];input_1+=16
1667 # asm 1: vld1.8 {>x23=reg128#13%bot->x23=reg128#13%top},[<input_1=int32#2,: 128]!
1668 # asm 2: vld1.8 {>x23=d24->x23=d25},[<input_1=r1,: 128]!
1669 vld1.8 {d24-d25},[r1,: 128]!
1671 # qhasm: 4x _5y23 += y23
1672 # asm 1: vadd.i32 >_5y23=reg128#9,<_5y23=reg128#9,<y23=reg128#4
1673 # asm 2: vadd.i32 >_5y23=q8,<_5y23=q8,<y23=q3
1676 # qhasm: 4x _5y4 += y4
1677 # asm 1: vadd.i32 >_5y4=reg128#11,<_5y4=reg128#11,<y4=reg128#10
1678 # asm 2: vadd.i32 >_5y4=q10,<_5y4=q10,<y4=q9
1681 # qhasm: c01 aligned= mem128[input_3];input_3+=16
1682 # asm 1: vld1.8 {>c01=reg128#14%bot->c01=reg128#14%top},[<input_3=int32#4,: 128]!
1683 # asm 2: vld1.8 {>c01=d26->c01=d27},[<input_3=r3,: 128]!
1684 vld1.8 {d26-d27},[r3,: 128]!
1686 # qhasm: 4x x01 += c01
1687 # asm 1: vadd.i32 >x01=reg128#12,<x01=reg128#12,<c01=reg128#14
1688 # asm 2: vadd.i32 >x01=q11,<x01=q11,<c01=q13
1689 vadd.i32 q11,q11,q13
1691 # qhasm: c23 aligned= mem128[input_3];input_3+=16
1692 # asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_3=int32#4,: 128]!
1693 # asm 2: vld1.8 {>c23=d26->c23=d27},[<input_3=r3,: 128]!
1694 vld1.8 {d26-d27},[r3,: 128]!
1696 # qhasm: 4x x23 += c23
1697 # asm 1: vadd.i32 >x23=reg128#13,<x23=reg128#13,<c23=reg128#14
1698 # asm 2: vadd.i32 >x23=q12,<x23=q12,<c23=q13
1699 vadd.i32 q12,q12,q13
1701 # qhasm: x4 aligned= mem64[input_1]x4[1]
1702 # asm 1: vld1.8 {<x4=reg128#14%bot},[<input_1=int32#2,: 64]
1703 # asm 2: vld1.8 {<x4=d26},[<input_1=r1,: 64]
1704 vld1.8 {d26},[r1,: 64]
1706 # qhasm: 2x mask unsigned>>=6
1707 # asm 1: vshr.u64 >mask=reg128#1,<mask=reg128#1,#6
1708 # asm 2: vshr.u64 >mask=q0,<mask=q0,#6
1711 # qhasm: c4 aligned= mem64[input_3]c4[1]
1712 # asm 1: vld1.8 {<c4=reg128#15%bot},[<input_3=int32#4,: 64]
1713 # asm 2: vld1.8 {<c4=d28},[<input_3=r3,: 64]
1714 vld1.8 {d28},[r3,: 64]
1716 # qhasm: 4x x4 += c4
1717 # asm 1: vadd.i32 >x4=reg128#14,<x4=reg128#14,<c4=reg128#15
1718 # asm 2: vadd.i32 >x4=q13,<x4=q13,<c4=q14
1719 vadd.i32 q13,q13,q14
1721 # qhasm: r0[0,1] = x01[0] unsigned* y01[0]; r0[2,3] = x01[1] unsigned* y01[1]
1722 # asm 1: vmull.u32 >r0=reg128#15,<x01=reg128#12%bot,<y01=reg128#2%bot
1723 # asm 2: vmull.u32 >r0=q14,<x01=d22,<y01=d2
1724 vmull.u32 q14,d22,d2
1726 # qhasm: r0[0,1] += x01[2] unsigned* _5y4[0]; r0[2,3] += x01[3] unsigned* _5y4[1]
1727 # asm 1: vmlal.u32 <r0=reg128#15,<x01=reg128#12%top,<_5y4=reg128#11%bot
1728 # asm 2: vmlal.u32 <r0=q14,<x01=d23,<_5y4=d20
1729 vmlal.u32 q14,d23,d20
1731 # qhasm: r0[0,1] += x23[0] unsigned* _5y23[2]; r0[2,3] += x23[1] unsigned* _5y23[3]
1732 # asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%bot,<_5y23=reg128#9%top
1733 # asm 2: vmlal.u32 <r0=q14,<x23=d24,<_5y23=d17
1734 vmlal.u32 q14,d24,d17
1736 # qhasm: r0[0,1] += x23[2] unsigned* _5y23[0]; r0[2,3] += x23[3] unsigned* _5y23[1]
1737 # asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%top,<_5y23=reg128#9%bot
1738 # asm 2: vmlal.u32 <r0=q14,<x23=d25,<_5y23=d16
1739 vmlal.u32 q14,d25,d16
1741 # qhasm: r0[0,1] += x4[0] unsigned* _5y01[2]; r0[2,3] += x4[1] unsigned* _5y01[3]
1742 # asm 1: vmlal.u32 <r0=reg128#15,<x4=reg128#14%bot,<_5y01=reg128#3%top
1743 # asm 2: vmlal.u32 <r0=q14,<x4=d26,<_5y01=d5
1744 vmlal.u32 q14,d26,d5
1746 # qhasm: r1[0,1] = x01[0] unsigned* y01[2]; r1[2,3] = x01[1] unsigned* y01[3]
1747 # asm 1: vmull.u32 >r1=reg128#3,<x01=reg128#12%bot,<y01=reg128#2%top
1748 # asm 2: vmull.u32 >r1=q2,<x01=d22,<y01=d3
1751 # qhasm: r1[0,1] += x01[2] unsigned* y01[0]; r1[2,3] += x01[3] unsigned* y01[1]
1752 # asm 1: vmlal.u32 <r1=reg128#3,<x01=reg128#12%top,<y01=reg128#2%bot
1753 # asm 2: vmlal.u32 <r1=q2,<x01=d23,<y01=d2
1756 # qhasm: r1[0,1] += x23[0] unsigned* _5y4[0]; r1[2,3] += x23[1] unsigned* _5y4[1]
1757 # asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%bot,<_5y4=reg128#11%bot
1758 # asm 2: vmlal.u32 <r1=q2,<x23=d24,<_5y4=d20
1759 vmlal.u32 q2,d24,d20
1761 # qhasm: r1[0,1] += x23[2] unsigned* _5y23[2]; r1[2,3] += x23[3] unsigned* _5y23[3]
1762 # asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%top,<_5y23=reg128#9%top
1763 # asm 2: vmlal.u32 <r1=q2,<x23=d25,<_5y23=d17
1764 vmlal.u32 q2,d25,d17
1766 # qhasm: r1[0,1] += x4[0] unsigned* _5y23[0]; r1[2,3] += x4[1] unsigned* _5y23[1]
1767 # asm 1: vmlal.u32 <r1=reg128#3,<x4=reg128#14%bot,<_5y23=reg128#9%bot
1768 # asm 2: vmlal.u32 <r1=q2,<x4=d26,<_5y23=d16
1769 vmlal.u32 q2,d26,d16
1771 # qhasm: r2[0,1] = x01[0] unsigned* y23[0]; r2[2,3] = x01[1] unsigned* y23[1]
1772 # asm 1: vmull.u32 >r2=reg128#16,<x01=reg128#12%bot,<y23=reg128#4%bot
1773 # asm 2: vmull.u32 >r2=q15,<x01=d22,<y23=d6
1774 vmull.u32 q15,d22,d6
1776 # qhasm: r2[0,1] += x01[2] unsigned* y01[2]; r2[2,3] += x01[3] unsigned* y01[3]
1777 # asm 1: vmlal.u32 <r2=reg128#16,<x01=reg128#12%top,<y01=reg128#2%top
1778 # asm 2: vmlal.u32 <r2=q15,<x01=d23,<y01=d3
1779 vmlal.u32 q15,d23,d3
1781 # qhasm: r2[0,1] += x23[0] unsigned* y01[0]; r2[2,3] += x23[1] unsigned* y01[1]
1782 # asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%bot,<y01=reg128#2%bot
1783 # asm 2: vmlal.u32 <r2=q15,<x23=d24,<y01=d2
1784 vmlal.u32 q15,d24,d2
1786 # qhasm: r2[0,1] += x23[2] unsigned* _5y4[0]; r2[2,3] += x23[3] unsigned* _5y4[1]
1787 # asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%top,<_5y4=reg128#11%bot
1788 # asm 2: vmlal.u32 <r2=q15,<x23=d25,<_5y4=d20
1789 vmlal.u32 q15,d25,d20
1791 # qhasm: r2[0,1] += x4[0] unsigned* _5y23[2]; r2[2,3] += x4[1] unsigned* _5y23[3]
1792 # asm 1: vmlal.u32 <r2=reg128#16,<x4=reg128#14%bot,<_5y23=reg128#9%top
1793 # asm 2: vmlal.u32 <r2=q15,<x4=d26,<_5y23=d17
1794 vmlal.u32 q15,d26,d17
1796 # qhasm: r3[0,1] = x01[0] unsigned* y23[2]; r3[2,3] = x01[1] unsigned* y23[3]
1797 # asm 1: vmull.u32 >r3=reg128#9,<x01=reg128#12%bot,<y23=reg128#4%top
1798 # asm 2: vmull.u32 >r3=q8,<x01=d22,<y23=d7
1801 # qhasm: r3[0,1] += x01[2] unsigned* y23[0]; r3[2,3] += x01[3] unsigned* y23[1]
1802 # asm 1: vmlal.u32 <r3=reg128#9,<x01=reg128#12%top,<y23=reg128#4%bot
1803 # asm 2: vmlal.u32 <r3=q8,<x01=d23,<y23=d6
1806 # qhasm: r3[0,1] += x23[0] unsigned* y01[2]; r3[2,3] += x23[1] unsigned* y01[3]
1807 # asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%bot,<y01=reg128#2%top
1808 # asm 2: vmlal.u32 <r3=q8,<x23=d24,<y01=d3
1811 # qhasm: r3[0,1] += x23[2] unsigned* y01[0]; r3[2,3] += x23[3] unsigned* y01[1]
1812 # asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%top,<y01=reg128#2%bot
1813 # asm 2: vmlal.u32 <r3=q8,<x23=d25,<y01=d2
1816 # qhasm: r3[0,1] += x4[0] unsigned* _5y4[0]; r3[2,3] += x4[1] unsigned* _5y4[1]
1817 # asm 1: vmlal.u32 <r3=reg128#9,<x4=reg128#14%bot,<_5y4=reg128#11%bot
1818 # asm 2: vmlal.u32 <r3=q8,<x4=d26,<_5y4=d20
1819 vmlal.u32 q8,d26,d20
1821 # qhasm: r4[0,1] = x01[0] unsigned* y4[0]; r4[2,3] = x01[1] unsigned* y4[1]
1822 # asm 1: vmull.u32 >r4=reg128#10,<x01=reg128#12%bot,<y4=reg128#10%bot
1823 # asm 2: vmull.u32 >r4=q9,<x01=d22,<y4=d18
1824 vmull.u32 q9,d22,d18
1826 # qhasm: r4[0,1] += x01[2] unsigned* y23[2]; r4[2,3] += x01[3] unsigned* y23[3]
1827 # asm 1: vmlal.u32 <r4=reg128#10,<x01=reg128#12%top,<y23=reg128#4%top
1828 # asm 2: vmlal.u32 <r4=q9,<x01=d23,<y23=d7
1831 # qhasm: r4[0,1] += x23[0] unsigned* y23[0]; r4[2,3] += x23[1] unsigned* y23[1]
1832 # asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%bot,<y23=reg128#4%bot
1833 # asm 2: vmlal.u32 <r4=q9,<x23=d24,<y23=d6
1836 # qhasm: r4[0,1] += x23[2] unsigned* y01[2]; r4[2,3] += x23[3] unsigned* y01[3]
1837 # asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%top,<y01=reg128#2%top
1838 # asm 2: vmlal.u32 <r4=q9,<x23=d25,<y01=d3
1841 # qhasm: r4[0,1] += x4[0] unsigned* y01[0]; r4[2,3] += x4[1] unsigned* y01[1]
1842 # asm 1: vmlal.u32 <r4=reg128#10,<x4=reg128#14%bot,<y01=reg128#2%bot
1843 # asm 2: vmlal.u32 <r4=q9,<x4=d26,<y01=d2
1846 # qhasm: 2x t1 = r0 unsigned>> 26
1847 # asm 1: vshr.u64 >t1=reg128#2,<r0=reg128#15,#26
1848 # asm 2: vshr.u64 >t1=q1,<r0=q14,#26
1852 # asm 1: vand >r0=reg128#4,<r0=reg128#15,<mask=reg128#1
1853 # asm 2: vand >r0=q3,<r0=q14,<mask=q0
1856 # qhasm: 2x r1 += t1
1857 # asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#3,<t1=reg128#2
1858 # asm 2: vadd.i64 >r1=q1,<r1=q2,<t1=q1
1861 # qhasm: 2x t4 = r3 unsigned>> 26
1862 # asm 1: vshr.u64 >t4=reg128#3,<r3=reg128#9,#26
1863 # asm 2: vshr.u64 >t4=q2,<r3=q8,#26
1867 # asm 1: vand >r3=reg128#9,<r3=reg128#9,<mask=reg128#1
1868 # asm 2: vand >r3=q8,<r3=q8,<mask=q0
1871 # qhasm: 2x r4 += t4
1872 # asm 1: vadd.i64 >r4=reg128#3,<r4=reg128#10,<t4=reg128#3
1873 # asm 2: vadd.i64 >r4=q2,<r4=q9,<t4=q2
1876 # qhasm: 2x t2 = r1 unsigned>> 26
1877 # asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#2,#26
1878 # asm 2: vshr.u64 >t2=q9,<r1=q1,#26
1882 # asm 1: vand >r1=reg128#2,<r1=reg128#2,<mask=reg128#1
1883 # asm 2: vand >r1=q1,<r1=q1,<mask=q0
1886 # qhasm: 2x t0 = r4 unsigned>> 26
1887 # asm 1: vshr.u64 >t0=reg128#11,<r4=reg128#3,#26
1888 # asm 2: vshr.u64 >t0=q10,<r4=q2,#26
1891 # qhasm: 2x r2 += t2
1892 # asm 1: vadd.i64 >r2=reg128#10,<r2=reg128#16,<t2=reg128#10
1893 # asm 2: vadd.i64 >r2=q9,<r2=q15,<t2=q9
1897 # asm 1: vand >r4=reg128#3,<r4=reg128#3,<mask=reg128#1
1898 # asm 2: vand >r4=q2,<r4=q2,<mask=q0
1901 # qhasm: 2x r0 += t0
1902 # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
1903 # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
1906 # qhasm: 2x t0 <<= 2
1907 # asm 1: vshl.i64 >t0=reg128#11,<t0=reg128#11,#2
1908 # asm 2: vshl.i64 >t0=q10,<t0=q10,#2
1911 # qhasm: 2x t3 = r2 unsigned>> 26
1912 # asm 1: vshr.u64 >t3=reg128#12,<r2=reg128#10,#26
1913 # asm 2: vshr.u64 >t3=q11,<r2=q9,#26
1916 # qhasm: 2x r0 += t0
1917 # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
1918 # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
1921 # qhasm: x23 = r2 & mask
1922 # asm 1: vand >x23=reg128#10,<r2=reg128#10,<mask=reg128#1
1923 # asm 2: vand >x23=q9,<r2=q9,<mask=q0
1926 # qhasm: 2x r3 += t3
1927 # asm 1: vadd.i64 >r3=reg128#9,<r3=reg128#9,<t3=reg128#12
1928 # asm 2: vadd.i64 >r3=q8,<r3=q8,<t3=q11
1931 # qhasm: 2x t1 = r0 unsigned>> 26
1932 # asm 1: vshr.u64 >t1=reg128#11,<r0=reg128#4,#26
1933 # asm 2: vshr.u64 >t1=q10,<r0=q3,#26
1936 # qhasm: x23 = x23[0,2,1,3]
1937 # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
1938 # asm 2: vtrn.32 <x23=d18,<x23=d19
1941 # qhasm: x01 = r0 & mask
1942 # asm 1: vand >x01=reg128#4,<r0=reg128#4,<mask=reg128#1
1943 # asm 2: vand >x01=q3,<r0=q3,<mask=q0
1946 # qhasm: 2x r1 += t1
1947 # asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#2,<t1=reg128#11
1948 # asm 2: vadd.i64 >r1=q1,<r1=q1,<t1=q10
1951 # qhasm: 2x t4 = r3 unsigned>> 26
1952 # asm 1: vshr.u64 >t4=reg128#11,<r3=reg128#9,#26
1953 # asm 2: vshr.u64 >t4=q10,<r3=q8,#26
1956 # qhasm: x01 = x01[0,2,1,3]
1957 # asm 1: vtrn.32 <x01=reg128#4%bot,<x01=reg128#4%top
1958 # asm 2: vtrn.32 <x01=d6,<x01=d7
1962 # asm 1: vand >r3=reg128#1,<r3=reg128#9,<mask=reg128#1
1963 # asm 2: vand >r3=q0,<r3=q8,<mask=q0
1966 # qhasm: r1 = r1[0,2,1,3]
1967 # asm 1: vtrn.32 <r1=reg128#2%bot,<r1=reg128#2%top
1968 # asm 2: vtrn.32 <r1=d2,<r1=d3
1971 # qhasm: 2x x4 = r4 + t4
1972 # asm 1: vadd.i64 >x4=reg128#3,<r4=reg128#3,<t4=reg128#11
1973 # asm 2: vadd.i64 >x4=q2,<r4=q2,<t4=q10
1976 # qhasm: r3 = r3[0,2,1,3]
1977 # asm 1: vtrn.32 <r3=reg128#1%bot,<r3=reg128#1%top
1978 # asm 2: vtrn.32 <r3=d0,<r3=d1
1981 # qhasm: x01 = x01[0,1] r1[0,1]
1982 # asm 1: vext.32 <x01=reg128#4%top,<r1=reg128#2%bot,<r1=reg128#2%bot,#0
1983 # asm 2: vext.32 <x01=d7,<r1=d2,<r1=d2,#0
1986 # qhasm: x23 = x23[0,1] r3[0,1]
1987 # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#1%bot,<r3=reg128#1%bot,#0
1988 # asm 2: vext.32 <x23=d19,<r3=d0,<r3=d0,#0
1989 vext.32 d19,d0,d0,#0
1991 # qhasm: x4 = x4[0,2,1,3]
1992 # asm 1: vtrn.32 <x4=reg128#3%bot,<x4=reg128#3%top
1993 # asm 2: vtrn.32 <x4=d4,<x4=d5
1996 # qhasm: mem128[input_0] aligned= x01;input_0+=16
1997 # asm 1: vst1.8 {<x01=reg128#4%bot-<x01=reg128#4%top},[<input_0=int32#1,: 128]!
1998 # asm 2: vst1.8 {<x01=d6-<x01=d7},[<input_0=r0,: 128]!
1999 vst1.8 {d6-d7},[r0,: 128]!
2001 # qhasm: mem128[input_0] aligned= x23;input_0+=16
2002 # asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1,: 128]!
2003 # asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0,: 128]!
2004 vst1.8 {d18-d19},[r0,: 128]!
2006 # qhasm: mem64[input_0] aligned= x4[0]
2007 # asm 1: vst1.8 <x4=reg128#3%bot,[<input_0=int32#1,: 64]
2008 # asm 2: vst1.8 <x4=d4,[<input_0=r0,: 64]
2015 #endif /* __arm__ && !OPENSSL_NO_ASM */