3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # Montgomery multiplication for ARMv4.
14 # Performance improvement naturally varies among CPU implementations
15 # and compilers. The code was observed to provide +65-35% improvement
16 # [depending on key length, less for longer keys] on ARM920T, and
17 # +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
18 # base and compiler generated code with in-lined umull and even umlal
19 # instructions. The latter means that this code didn't really have an
20 # "advantage" of utilizing some "secret" instruction.
22 # The code is interoperable with Thumb ISA and is rather compact, less
23 # than 1/2KB. Windows CE port would be trivial, as it's exclusively
24 # about decorations, ABI and instruction syntax are identical.
28 # Add NEON code path, which handles lengths divisible by 8. RSA/DSA
29 # performance improvement on Cortex-A8 is ~45-100% depending on key
30 # length, more for longer keys. On Cortex-A15 the span is ~10-105%.
31 # On Snapdragon S4 improvement was measured to vary from ~70% to
32 # incredible ~380%, yes, 4.8x faster, for RSA4096 sign. But this is
33 # rather because original integer-only code seems to perform
34 # suboptimally on S4. Situation on Cortex-A9 is unfortunately
35 # different. It's being looked into, but the trouble is that
36 # performance for vectors longer than 256 bits is actually couple
37 # of percent worse than for integer-only code. The code is chosen
38 # for execution on all NEON-capable processors, because gain on
39 # others outweighs the marginal loss on Cortex-A9.
41 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
42 open STDOUT,">$output";
44 $num="r0"; # starts as num argument, but holds &tp[num-1]
46 $bp="r2"; $bi="r2"; $rp="r2";
53 ########### # r9 is reserved by ELF as platform specific, e.g. TLS pointer
54 $alo="r10"; # sl, gcc uses it to keep @GOT
57 ########### # r13 is stack pointer
59 ########### # r15 is program counter
61 #### argument block layout relative to &tp[num-1], a.k.a. $num
63 # ap permanently resides in r1
65 # np permanently resides in r3
67 $_num="$num,#15*4"; $_bpend=$_num;
78 .word OPENSSL_armcap_P-bn_mul_mont
83 .type bn_mul_mont,%function
87 ldr ip,[sp,#4] @ load num
88 stmdb sp!,{r0,r2} @ sp points at argument block
93 ldr r2,.LOPENSSL_armcap
95 tst r0,#1 @ NEON available?
104 mov $num,ip @ load num
109 stmdb sp!,{r4-r12,lr} @ save 10 registers
111 mov $num,$num,lsl#2 @ rescale $num for byte count
112 sub sp,sp,$num @ alloca(4*num)
113 sub sp,sp,#4 @ +extra dword
114 sub $num,$num,#4 @ "num=num-1"
115 add $tp,$bp,$num @ &bp[num-1]
117 add $num,sp,$num @ $num to point at &tp[num-1]
119 ldr $bi,[$bp] @ bp[0]
120 ldr $aj,[$ap],#4 @ ap[0],ap++
121 ldr $nj,[$np],#4 @ np[0],np++
123 str $tp,[$_bpend] @ save &bp[num]
125 umull $alo,$ahi,$aj,$bi @ ap[0]*bp[0]
126 str $n0,[$_n0] @ save n0 value
127 mul $n0,$alo,$n0 @ "tp[0]"*n0
129 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]"
133 ldr $aj,[$ap],#4 @ ap[j],ap++
135 ldr $nj,[$np],#4 @ np[j],np++
137 umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0]
139 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
141 str $nlo,[$tp],#4 @ tp[j-1]=,tp++
147 ldr $tp,[$_bp] @ restore bp
149 ldr $n0,[$_n0] @ restore n0
151 str $nlo,[$num] @ tp[num-1]=
152 str $nhi,[$num,#4] @ tp[num]=
155 sub $tj,$num,sp @ "original" $num-1 value
156 sub $ap,$ap,$tj @ "rewind" ap to &ap[1]
157 ldr $bi,[$tp,#4]! @ *(++bp)
158 sub $np,$np,$tj @ "rewind" np to &np[1]
159 ldr $aj,[$ap,#-4] @ ap[0]
160 ldr $alo,[sp] @ tp[0]
161 ldr $nj,[$np,#-4] @ np[0]
162 ldr $tj,[sp,#4] @ tp[1]
165 umlal $alo,$ahi,$aj,$bi @ ap[0]*bp[i]+tp[0]
166 str $tp,[$_bp] @ save bp
169 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]"
173 ldr $aj,[$ap],#4 @ ap[j],ap++
174 adds $alo,$ahi,$tj @ +=tp[j]
175 ldr $nj,[$np],#4 @ np[j],np++
177 umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i]
179 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
181 ldr $tj,[$tp,#8] @ tp[j+1]
183 str $nlo,[$tp],#4 @ tp[j-1]=,tp++
190 ldr $tp,[$_bp] @ restore bp
192 ldr $n0,[$_n0] @ restore n0
194 ldr $tj,[$_bpend] @ restore &bp[num]
196 str $nlo,[$num] @ tp[num-1]=
197 str $nhi,[$num,#4] @ tp[num]=
202 ldr $rp,[$_rp] @ pull rp
203 add $num,$num,#4 @ $num to point at &tp[num]
204 sub $aj,$num,sp @ "original" num value
205 mov $tp,sp @ "rewind" $tp
206 mov $ap,$tp @ "borrow" $ap
207 sub $np,$np,$aj @ "rewind" $np to &np[0]
209 subs $tj,$tj,$tj @ "clear" carry flag
210 .Lsub: ldr $tj,[$tp],#4
212 sbcs $tj,$tj,$nj @ tp[j]-np[j]
213 str $tj,[$rp],#4 @ rp[j]=
214 teq $tp,$num @ preserve carry
216 sbcs $nhi,$nhi,#0 @ upmost carry
217 mov $tp,sp @ "rewind" $tp
218 sub $rp,$rp,$aj @ "rewind" $rp
222 orr $ap,$ap,$np @ ap=borrow?tp:rp
224 .Lcopy: ldr $tj,[$ap],#4 @ copy or in-place refresh
225 str sp,[$tp],#4 @ zap tp
230 add sp,$num,#4 @ skip over tp[num+1]
231 ldmia sp!,{r4-r12,lr} @ restore registers
232 add sp,sp,#2*4 @ skip over {r0,r2}
235 moveq pc,lr @ be binary compatible with V4, yet
236 bx lr @ interoperable with Thumb ISA:-)
237 .size bn_mul_mont,.-bn_mul_mont
240 sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
241 sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
243 my ($A0,$A1,$A2,$A3)=map("d$_",(0..3));
244 my ($N0,$N1,$N2,$N3)=map("d$_",(4..7));
245 my ($Z,$Temp)=("q4","q5");
246 my ($A0xB,$A1xB,$A2xB,$A3xB,$A4xB,$A5xB,$A6xB,$A7xB)=map("q$_",(6..13));
247 my ($Bi,$Ni,$M0)=map("d$_",(28..31));
249 my $temp=&Dlo($Temp);
251 my ($rptr,$aptr,$bptr,$nptr,$n0,$num)=map("r$_",(0..5));
252 my ($tinptr,$toutptr,$inner,$outer)=map("r$_",(6..9));
258 .type bn_mul8x_mont_neon,%function
263 vstmdb sp!,{d8-d15} @ ABI specification says so
264 ldmia ip,{r4-r5} @ load rest of parameter block
267 vld1.32 {${Bi}[0]}, [$bptr,:32]!
268 sub $toutptr,$toutptr,$num,lsl#4
269 vld1.32 {$A0-$A3}, [$aptr]! @ can't specify :32 :-(
270 and $toutptr,$toutptr,#-64
271 vld1.32 {${M0}[0]}, [$n0,:32]
272 mov sp,$toutptr @ alloca
273 veor $zero,$zero,$zero
277 vmull.u32 $A0xB,$Bi,${A0}[0]
278 vmull.u32 $A1xB,$Bi,${A0}[1]
279 vmull.u32 $A2xB,$Bi,${A1}[0]
280 vshl.i64 $temp,`&Dhi("$A0xB")`,#16
281 vmull.u32 $A3xB,$Bi,${A1}[1]
283 vadd.u64 $temp,$temp,`&Dlo("$A0xB")`
284 veor $zero,$zero,$zero
285 vmul.u32 $Ni,$temp,$M0
287 vmull.u32 $A4xB,$Bi,${A2}[0]
288 vld1.32 {$N0-$N3}, [$nptr]!
289 vmull.u32 $A5xB,$Bi,${A2}[1]
290 vmull.u32 $A6xB,$Bi,${A3}[0]
292 vmull.u32 $A7xB,$Bi,${A3}[1]
296 @ special case for num=8, everything is in register bank...
298 vmlal.u32 $A0xB,$Ni,${N0}[0]
300 vmlal.u32 $A1xB,$Ni,${N0}[1]
301 vmlal.u32 $A2xB,$Ni,${N1}[0]
302 vmlal.u32 $A3xB,$Ni,${N1}[1]
304 vmlal.u32 $A4xB,$Ni,${N2}[0]
306 vmlal.u32 $A5xB,$Ni,${N2}[1]
308 vmlal.u32 $A6xB,$Ni,${N3}[0]
310 vmlal.u32 $A7xB,$Ni,${N3}[1]
313 vshr.u64 $temp,$temp,#16
316 vadd.u64 $temp,$temp,`&Dhi("$Temp")`
319 vshr.u64 $temp,$temp,#16
325 vld1.32 {${Bi}[0]}, [$bptr,:32]!
326 veor $zero,$zero,$zero
328 vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
330 vmlal.u32 $A0xB,$Bi,${A0}[0]
331 vmlal.u32 $A1xB,$Bi,${A0}[1]
332 vmlal.u32 $A2xB,$Bi,${A1}[0]
333 vshl.i64 $temp,`&Dhi("$A0xB")`,#16
334 vmlal.u32 $A3xB,$Bi,${A1}[1]
336 vadd.u64 $temp,$temp,`&Dlo("$A0xB")`
337 veor $zero,$zero,$zero
338 subs $outer,$outer,#1
339 vmul.u32 $Ni,$temp,$M0
341 vmlal.u32 $A4xB,$Bi,${A2}[0]
342 vmlal.u32 $A5xB,$Bi,${A2}[1]
343 vmlal.u32 $A6xB,$Bi,${A3}[0]
345 vmlal.u32 $A7xB,$Bi,${A3}[1]
347 vmlal.u32 $A0xB,$Ni,${N0}[0]
348 vmlal.u32 $A1xB,$Ni,${N0}[1]
349 vmlal.u32 $A2xB,$Ni,${N1}[0]
350 vmlal.u32 $A3xB,$Ni,${N1}[1]
352 vmlal.u32 $A4xB,$Ni,${N2}[0]
354 vmlal.u32 $A5xB,$Ni,${N2}[1]
356 vmlal.u32 $A6xB,$Ni,${N3}[0]
358 vmlal.u32 $A7xB,$Ni,${N3}[1]
361 vshr.u64 $temp,$temp,#16
364 vadd.u64 $temp,$temp,`&Dhi("$Temp")`
367 vshr.u64 $temp,$temp,#16
371 vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
373 vshr.u64 $temp,`&Dlo("$A0xB")`,#16
375 vadd.u64 `&Dhi("$A0xB")`,`&Dhi("$A0xB")`,$temp
377 vshr.u64 $temp,`&Dhi("$A0xB")`,#16
378 vzip.16 `&Dlo("$A0xB")`,`&Dhi("$A0xB")`
384 vmlal.u32 $A0xB,$Ni,${N0}[0]
385 vld1.32 {$A0-$A3}, [$aptr]!
386 vmlal.u32 $A1xB,$Ni,${N0}[1]
387 subs $inner,$inner,#8
388 vmlal.u32 $A2xB,$Ni,${N1}[0]
389 vmlal.u32 $A3xB,$Ni,${N1}[1]
391 vmlal.u32 $A4xB,$Ni,${N2}[0]
392 vld1.32 {$N0-$N1}, [$nptr]!
393 vmlal.u32 $A5xB,$Ni,${N2}[1]
394 vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]!
395 vmlal.u32 $A6xB,$Ni,${N3}[0]
396 vmlal.u32 $A7xB,$Ni,${N3}[1]
397 vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]!
399 vmull.u32 $A0xB,$Bi,${A0}[0]
400 vld1.32 {$N2-$N3}, [$nptr]!
401 vmull.u32 $A1xB,$Bi,${A0}[1]
402 vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]!
403 vmull.u32 $A2xB,$Bi,${A1}[0]
404 vmull.u32 $A3xB,$Bi,${A1}[1]
405 vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]!
407 vmull.u32 $A4xB,$Bi,${A2}[0]
408 vmull.u32 $A5xB,$Bi,${A2}[1]
409 vmull.u32 $A6xB,$Bi,${A3}[0]
410 vmull.u32 $A7xB,$Bi,${A3}[1]
414 vmlal.u32 $A0xB,$Ni,${N0}[0]
416 vmlal.u32 $A1xB,$Ni,${N0}[1]
417 sub $aptr,$aptr,$num,lsl#2 @ rewind $aptr
418 vmlal.u32 $A2xB,$Ni,${N1}[0]
419 vld1.64 {$Temp}, [sp,:128]
420 vmlal.u32 $A3xB,$Ni,${N1}[1]
423 vmlal.u32 $A4xB,$Ni,${N2}[0]
424 vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]!
425 vmlal.u32 $A5xB,$Ni,${N2}[1]
426 vshr.u64 $temp,$temp,#16
427 vld1.64 {$A0xB}, [$tinptr, :128]!
428 vmlal.u32 $A6xB,$Ni,${N3}[0]
429 vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]!
430 vmlal.u32 $A7xB,$Ni,${N3}[1]
432 vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]!
433 vadd.u64 $temp,$temp,`&Dhi("$Temp")`
435 vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]!
436 vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]!
437 vst1.64 {$Z}, [$toutptr,:128]
438 vshr.u64 $temp,$temp,#16
444 vld1.32 {${Bi}[0]}, [$bptr,:32]!
445 sub $nptr,$nptr,$num,lsl#2 @ rewind $nptr
446 vld1.32 {$A0-$A3}, [$aptr]!
447 veor $zero,$zero,$zero
451 vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
453 vmlal.u32 $A0xB,$Bi,${A0}[0]
454 vld1.64 {$A3xB-$A4xB},[$tinptr,:256]!
455 vmlal.u32 $A1xB,$Bi,${A0}[1]
456 vmlal.u32 $A2xB,$Bi,${A1}[0]
457 vld1.64 {$A5xB-$A6xB},[$tinptr,:256]!
458 vmlal.u32 $A3xB,$Bi,${A1}[1]
460 vshl.i64 $temp,`&Dhi("$A0xB")`,#16
461 veor $zero,$zero,$zero
462 vadd.u64 $temp,$temp,`&Dlo("$A0xB")`
463 vld1.64 {$A7xB},[$tinptr,:128]!
464 vmul.u32 $Ni,$temp,$M0
466 vmlal.u32 $A4xB,$Bi,${A2}[0]
467 vld1.32 {$N0-$N3}, [$nptr]!
468 vmlal.u32 $A5xB,$Bi,${A2}[1]
469 vmlal.u32 $A6xB,$Bi,${A3}[0]
471 vmlal.u32 $A7xB,$Bi,${A3}[1]
474 vmlal.u32 $A0xB,$Ni,${N0}[0]
475 vld1.32 {$A0-$A3}, [$aptr]!
476 vmlal.u32 $A1xB,$Ni,${N0}[1]
477 subs $inner,$inner,#8
478 vmlal.u32 $A2xB,$Ni,${N1}[0]
479 vmlal.u32 $A3xB,$Ni,${N1}[1]
480 vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]!
482 vmlal.u32 $A4xB,$Ni,${N2}[0]
483 vld1.64 {$A0xB}, [$tinptr, :128]!
484 vmlal.u32 $A5xB,$Ni,${N2}[1]
485 vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]!
486 vmlal.u32 $A6xB,$Ni,${N3}[0]
487 vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]!
488 vmlal.u32 $A7xB,$Ni,${N3}[1]
489 vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]!
491 vmlal.u32 $A0xB,$Bi,${A0}[0]
492 vld1.64 {$A3xB-$A4xB}, [$tinptr, :256]!
493 vmlal.u32 $A1xB,$Bi,${A0}[1]
494 vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]!
495 vmlal.u32 $A2xB,$Bi,${A1}[0]
496 vld1.64 {$A5xB-$A6xB}, [$tinptr, :256]!
497 vmlal.u32 $A3xB,$Bi,${A1}[1]
498 vld1.32 {$N0-$N3}, [$nptr]!
500 vmlal.u32 $A4xB,$Bi,${A2}[0]
501 vld1.64 {$A7xB}, [$tinptr, :128]!
502 vmlal.u32 $A5xB,$Bi,${A2}[1]
503 vmlal.u32 $A6xB,$Bi,${A3}[0]
504 vmlal.u32 $A7xB,$Bi,${A3}[1]
508 vmlal.u32 $A0xB,$Ni,${N0}[0]
510 vmlal.u32 $A1xB,$Ni,${N0}[1]
511 sub $aptr,$aptr,$num,lsl#2 @ rewind $aptr
512 vmlal.u32 $A2xB,$Ni,${N1}[0]
513 vld1.64 {$Temp}, [sp,:128]
514 vmlal.u32 $A3xB,$Ni,${N1}[1]
515 subs $outer,$outer,#1
517 vmlal.u32 $A4xB,$Ni,${N2}[0]
518 vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]!
519 vmlal.u32 $A5xB,$Ni,${N2}[1]
520 vld1.64 {$A0xB}, [$tinptr, :128]!
521 vshr.u64 $temp,$temp,#16
522 vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]!
523 vmlal.u32 $A6xB,$Ni,${N3}[0]
524 vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]!
525 vmlal.u32 $A7xB,$Ni,${N3}[1]
527 vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]!
528 vadd.u64 $temp,$temp,`&Dhi("$Temp")`
529 vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]!
530 vshr.u64 $temp,$temp,#16
538 vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
539 vld1.64 {$A3xB-$A4xB}, [$tinptr, :256]!
540 vshr.u64 $temp,`&Dlo("$A0xB")`,#16
541 vadd.u64 `&Dhi("$A0xB")`,`&Dhi("$A0xB")`,$temp
542 vld1.64 {$A5xB-$A6xB}, [$tinptr, :256]!
543 vshr.u64 $temp,`&Dhi("$A0xB")`,#16
544 vld1.64 {$A7xB}, [$tinptr, :128]!
545 vzip.16 `&Dlo("$A0xB")`,`&Dhi("$A0xB")`
548 vadd.u64 `&Dlo("$A1xB")`,`&Dlo("$A1xB")`,$temp
549 vst1.32 {`&Dlo("$A0xB")`[0]}, [$toutptr, :32]!
550 vshr.u64 $temp,`&Dlo("$A1xB")`,#16
551 vadd.u64 `&Dhi("$A1xB")`,`&Dhi("$A1xB")`,$temp
552 vshr.u64 $temp,`&Dhi("$A1xB")`,#16
553 vzip.16 `&Dlo("$A1xB")`,`&Dhi("$A1xB")`
555 vadd.u64 `&Dlo("$A2xB")`,`&Dlo("$A2xB")`,$temp
556 vst1.32 {`&Dlo("$A1xB")`[0]}, [$toutptr, :32]!
557 vshr.u64 $temp,`&Dlo("$A2xB")`,#16
558 vadd.u64 `&Dhi("$A2xB")`,`&Dhi("$A2xB")`,$temp
559 vshr.u64 $temp,`&Dhi("$A2xB")`,#16
560 vzip.16 `&Dlo("$A2xB")`,`&Dhi("$A2xB")`
562 vadd.u64 `&Dlo("$A3xB")`,`&Dlo("$A3xB")`,$temp
563 vst1.32 {`&Dlo("$A2xB")`[0]}, [$toutptr, :32]!
564 vshr.u64 $temp,`&Dlo("$A3xB")`,#16
565 vadd.u64 `&Dhi("$A3xB")`,`&Dhi("$A3xB")`,$temp
566 vshr.u64 $temp,`&Dhi("$A3xB")`,#16
567 vzip.16 `&Dlo("$A3xB")`,`&Dhi("$A3xB")`
569 vadd.u64 `&Dlo("$A4xB")`,`&Dlo("$A4xB")`,$temp
570 vst1.32 {`&Dlo("$A3xB")`[0]}, [$toutptr, :32]!
571 vshr.u64 $temp,`&Dlo("$A4xB")`,#16
572 vadd.u64 `&Dhi("$A4xB")`,`&Dhi("$A4xB")`,$temp
573 vshr.u64 $temp,`&Dhi("$A4xB")`,#16
574 vzip.16 `&Dlo("$A4xB")`,`&Dhi("$A4xB")`
576 vadd.u64 `&Dlo("$A5xB")`,`&Dlo("$A5xB")`,$temp
577 vst1.32 {`&Dlo("$A4xB")`[0]}, [$toutptr, :32]!
578 vshr.u64 $temp,`&Dlo("$A5xB")`,#16
579 vadd.u64 `&Dhi("$A5xB")`,`&Dhi("$A5xB")`,$temp
580 vshr.u64 $temp,`&Dhi("$A5xB")`,#16
581 vzip.16 `&Dlo("$A5xB")`,`&Dhi("$A5xB")`
583 vadd.u64 `&Dlo("$A6xB")`,`&Dlo("$A6xB")`,$temp
584 vst1.32 {`&Dlo("$A5xB")`[0]}, [$toutptr, :32]!
585 vshr.u64 $temp,`&Dlo("$A6xB")`,#16
586 vadd.u64 `&Dhi("$A6xB")`,`&Dhi("$A6xB")`,$temp
587 vld1.64 {$A0xB}, [$tinptr, :128]!
588 vshr.u64 $temp,`&Dhi("$A6xB")`,#16
589 vzip.16 `&Dlo("$A6xB")`,`&Dhi("$A6xB")`
591 vadd.u64 `&Dlo("$A7xB")`,`&Dlo("$A7xB")`,$temp
592 vst1.32 {`&Dlo("$A6xB")`[0]}, [$toutptr, :32]!
593 vshr.u64 $temp,`&Dlo("$A7xB")`,#16
594 vadd.u64 `&Dhi("$A7xB")`,`&Dhi("$A7xB")`,$temp
595 vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]!
596 vshr.u64 $temp,`&Dhi("$A7xB")`,#16
597 vzip.16 `&Dlo("$A7xB")`,`&Dhi("$A7xB")`
598 subs $inner,$inner,#8
599 vst1.32 {`&Dlo("$A7xB")`[0]}, [$toutptr, :32]!
603 vst1.32 {${temp}[0]}, [$toutptr, :32] @ top-most bit
604 sub $nptr,$nptr,$num,lsl#2 @ rewind $nptr
605 subs $aptr,sp,#0 @ clear carry flag
606 add $bptr,sp,$num,lsl#2
609 ldmia $aptr!, {r4-r7}
610 ldmia $nptr!, {r8-r11}
615 teq $aptr,$bptr @ preserves carry
616 stmia $rptr!, {r8-r11}
619 ldr r10, [$aptr] @ load top-most bit
621 sub r11,$bptr,sp @ this is num*4
624 sub $rptr,$rptr,r11 @ rewind $rptr
625 mov $nptr,$bptr @ second 3/4th of frame
626 sbcs r10,r10,#0 @ result is carry flag
629 ldmia $aptr!, {r4-r7}
630 ldmia $rptr, {r8-r11}
632 vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
635 vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
638 stmia $rptr!, {r8-r11}
640 ldmia $rptr, {r8-r11}
642 vst1.64 {q0-q1}, [$aptr,:256]! @ wipe
645 vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
647 teq $aptr,$bptr @ preserves carry
648 stmia $rptr!, {r8-r11}
649 bne .LNEON_copy_n_zap
655 .size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
660 .asciz "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
663 .comm OPENSSL_armcap_P,4,4
667 $code =~ s/\`([^\`]*)\`/eval $1/gem;
668 $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4