Upstream version 9.38.198.0
[platform/framework/web/crosswalk.git] / src / third_party / boringssl / src / crypto / bn / asm / armv4-mont.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9
10 # January 2007.
11
12 # Montgomery multiplication for ARMv4.
13 #
14 # Performance improvement naturally varies among CPU implementations
15 # and compilers. The code was observed to provide +65-35% improvement
16 # [depending on key length, less for longer keys] on ARM920T, and
17 # +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
18 # base and compiler generated code with in-lined umull and even umlal
19 # instructions. The latter means that this code didn't really have an 
20 # "advantage" of utilizing some "secret" instruction.
21 #
22 # The code is interoperable with Thumb ISA and is rather compact, less
23 # than 1/2KB. Windows CE port would be trivial, as it's exclusively
24 # about decorations, ABI and instruction syntax are identical.
25
26 # November 2013
27 #
28 # Add NEON code path, which handles lengths divisible by 8. RSA/DSA
29 # performance improvement on Cortex-A8 is ~45-100% depending on key
30 # length, more for longer keys. On Cortex-A15 the span is ~10-105%.
31 # On Snapdragon S4 improvement was measured to vary from ~70% to
32 # incredible ~380%, yes, 4.8x faster, for RSA4096 sign. But this is
33 # rather because original integer-only code seems to perform
34 # suboptimally on S4. Situation on Cortex-A9 is unfortunately
35 # different. It's being looked into, but the trouble is that
36 # performance for vectors longer than 256 bits is actually couple
37 # of percent worse than for integer-only code. The code is chosen
38 # for execution on all NEON-capable processors, because gain on
39 # others outweighs the marginal loss on Cortex-A9.
40
41 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
42 open STDOUT,">$output";
43
44 $num="r0";      # starts as num argument, but holds &tp[num-1]
45 $ap="r1";
46 $bp="r2"; $bi="r2"; $rp="r2";
47 $np="r3";
48 $tp="r4";
49 $aj="r5";
50 $nj="r6";
51 $tj="r7";
52 $n0="r8";
53 ###########     # r9 is reserved by ELF as platform specific, e.g. TLS pointer
54 $alo="r10";     # sl, gcc uses it to keep @GOT
55 $ahi="r11";     # fp
56 $nlo="r12";     # ip
57 ###########     # r13 is stack pointer
58 $nhi="r14";     # lr
59 ###########     # r15 is program counter
60
61 #### argument block layout relative to &tp[num-1], a.k.a. $num
62 $_rp="$num,#12*4";
63 # ap permanently resides in r1
64 $_bp="$num,#13*4";
65 # np permanently resides in r3
66 $_n0="$num,#14*4";
67 $_num="$num,#15*4";     $_bpend=$_num;
68
69 $code=<<___;
70 #include "arm_arch.h"
71
72 .text
73 .code   32
74
75 #if __ARM_ARCH__>=7
76 .align  5
77 .LOPENSSL_armcap:
78 .word   OPENSSL_armcap_P-bn_mul_mont
79 #endif
80
81 .global bn_mul_mont
82 .hidden bn_mul_mont
83 .type   bn_mul_mont,%function
84
85 .align  5
86 bn_mul_mont:
87         ldr     ip,[sp,#4]              @ load num
88         stmdb   sp!,{r0,r2}             @ sp points at argument block
89 #if __ARM_ARCH__>=7
90         tst     ip,#7
91         bne     .Lialu
92         adr     r0,bn_mul_mont
93         ldr     r2,.LOPENSSL_armcap
94         ldr     r0,[r0,r2]
95         tst     r0,#1                   @ NEON available?
96         ldmia   sp, {r0,r2}
97         beq     .Lialu
98         add     sp,sp,#8
99         b       bn_mul8x_mont_neon
100 .align  4
101 .Lialu:
102 #endif
103         cmp     ip,#2
104         mov     $num,ip                 @ load num
105         movlt   r0,#0
106         addlt   sp,sp,#2*4
107         blt     .Labrt
108
109         stmdb   sp!,{r4-r12,lr}         @ save 10 registers
110
111         mov     $num,$num,lsl#2         @ rescale $num for byte count
112         sub     sp,sp,$num              @ alloca(4*num)
113         sub     sp,sp,#4                @ +extra dword
114         sub     $num,$num,#4            @ "num=num-1"
115         add     $tp,$bp,$num            @ &bp[num-1]
116
117         add     $num,sp,$num            @ $num to point at &tp[num-1]
118         ldr     $n0,[$_n0]              @ &n0
119         ldr     $bi,[$bp]               @ bp[0]
120         ldr     $aj,[$ap],#4            @ ap[0],ap++
121         ldr     $nj,[$np],#4            @ np[0],np++
122         ldr     $n0,[$n0]               @ *n0
123         str     $tp,[$_bpend]           @ save &bp[num]
124
125         umull   $alo,$ahi,$aj,$bi       @ ap[0]*bp[0]
126         str     $n0,[$_n0]              @ save n0 value
127         mul     $n0,$alo,$n0            @ "tp[0]"*n0
128         mov     $nlo,#0
129         umlal   $alo,$nlo,$nj,$n0       @ np[0]*n0+"t[0]"
130         mov     $tp,sp
131
132 .L1st:
133         ldr     $aj,[$ap],#4            @ ap[j],ap++
134         mov     $alo,$ahi
135         ldr     $nj,[$np],#4            @ np[j],np++
136         mov     $ahi,#0
137         umlal   $alo,$ahi,$aj,$bi       @ ap[j]*bp[0]
138         mov     $nhi,#0
139         umlal   $nlo,$nhi,$nj,$n0       @ np[j]*n0
140         adds    $nlo,$nlo,$alo
141         str     $nlo,[$tp],#4           @ tp[j-1]=,tp++
142         adc     $nlo,$nhi,#0
143         cmp     $tp,$num
144         bne     .L1st
145
146         adds    $nlo,$nlo,$ahi
147         ldr     $tp,[$_bp]              @ restore bp
148         mov     $nhi,#0
149         ldr     $n0,[$_n0]              @ restore n0
150         adc     $nhi,$nhi,#0
151         str     $nlo,[$num]             @ tp[num-1]=
152         str     $nhi,[$num,#4]          @ tp[num]=
153 \f
154 .Louter:
155         sub     $tj,$num,sp             @ "original" $num-1 value
156         sub     $ap,$ap,$tj             @ "rewind" ap to &ap[1]
157         ldr     $bi,[$tp,#4]!           @ *(++bp)
158         sub     $np,$np,$tj             @ "rewind" np to &np[1]
159         ldr     $aj,[$ap,#-4]           @ ap[0]
160         ldr     $alo,[sp]               @ tp[0]
161         ldr     $nj,[$np,#-4]           @ np[0]
162         ldr     $tj,[sp,#4]             @ tp[1]
163
164         mov     $ahi,#0
165         umlal   $alo,$ahi,$aj,$bi       @ ap[0]*bp[i]+tp[0]
166         str     $tp,[$_bp]              @ save bp
167         mul     $n0,$alo,$n0
168         mov     $nlo,#0
169         umlal   $alo,$nlo,$nj,$n0       @ np[0]*n0+"tp[0]"
170         mov     $tp,sp
171
172 .Linner:
173         ldr     $aj,[$ap],#4            @ ap[j],ap++
174         adds    $alo,$ahi,$tj           @ +=tp[j]
175         ldr     $nj,[$np],#4            @ np[j],np++
176         mov     $ahi,#0
177         umlal   $alo,$ahi,$aj,$bi       @ ap[j]*bp[i]
178         mov     $nhi,#0
179         umlal   $nlo,$nhi,$nj,$n0       @ np[j]*n0
180         adc     $ahi,$ahi,#0
181         ldr     $tj,[$tp,#8]            @ tp[j+1]
182         adds    $nlo,$nlo,$alo
183         str     $nlo,[$tp],#4           @ tp[j-1]=,tp++
184         adc     $nlo,$nhi,#0
185         cmp     $tp,$num
186         bne     .Linner
187
188         adds    $nlo,$nlo,$ahi
189         mov     $nhi,#0
190         ldr     $tp,[$_bp]              @ restore bp
191         adc     $nhi,$nhi,#0
192         ldr     $n0,[$_n0]              @ restore n0
193         adds    $nlo,$nlo,$tj
194         ldr     $tj,[$_bpend]           @ restore &bp[num]
195         adc     $nhi,$nhi,#0
196         str     $nlo,[$num]             @ tp[num-1]=
197         str     $nhi,[$num,#4]          @ tp[num]=
198
199         cmp     $tp,$tj
200         bne     .Louter
201 \f
202         ldr     $rp,[$_rp]              @ pull rp
203         add     $num,$num,#4            @ $num to point at &tp[num]
204         sub     $aj,$num,sp             @ "original" num value
205         mov     $tp,sp                  @ "rewind" $tp
206         mov     $ap,$tp                 @ "borrow" $ap
207         sub     $np,$np,$aj             @ "rewind" $np to &np[0]
208
209         subs    $tj,$tj,$tj             @ "clear" carry flag
210 .Lsub:  ldr     $tj,[$tp],#4
211         ldr     $nj,[$np],#4
212         sbcs    $tj,$tj,$nj             @ tp[j]-np[j]
213         str     $tj,[$rp],#4            @ rp[j]=
214         teq     $tp,$num                @ preserve carry
215         bne     .Lsub
216         sbcs    $nhi,$nhi,#0            @ upmost carry
217         mov     $tp,sp                  @ "rewind" $tp
218         sub     $rp,$rp,$aj             @ "rewind" $rp
219
220         and     $ap,$tp,$nhi
221         bic     $np,$rp,$nhi
222         orr     $ap,$ap,$np             @ ap=borrow?tp:rp
223
224 .Lcopy: ldr     $tj,[$ap],#4            @ copy or in-place refresh
225         str     sp,[$tp],#4             @ zap tp
226         str     $tj,[$rp],#4
227         cmp     $tp,$num
228         bne     .Lcopy
229
230         add     sp,$num,#4              @ skip over tp[num+1]
231         ldmia   sp!,{r4-r12,lr}         @ restore registers
232         add     sp,sp,#2*4              @ skip over {r0,r2}
233         mov     r0,#1
234 .Labrt: tst     lr,#1
235         moveq   pc,lr                   @ be binary compatible with V4, yet
236         bx      lr                      @ interoperable with Thumb ISA:-)
237 .size   bn_mul_mont,.-bn_mul_mont
238 ___
239 {
240 sub Dlo()   { shift=~m|q([1]?[0-9])|?"d".($1*2):"";     }
241 sub Dhi()   { shift=~m|q([1]?[0-9])|?"d".($1*2+1):"";   }
242
243 my ($A0,$A1,$A2,$A3)=map("d$_",(0..3));
244 my ($N0,$N1,$N2,$N3)=map("d$_",(4..7));
245 my ($Z,$Temp)=("q4","q5");
246 my ($A0xB,$A1xB,$A2xB,$A3xB,$A4xB,$A5xB,$A6xB,$A7xB)=map("q$_",(6..13));
247 my ($Bi,$Ni,$M0)=map("d$_",(28..31));
248 my $zero=&Dlo($Z);
249 my $temp=&Dlo($Temp);
250
251 my ($rptr,$aptr,$bptr,$nptr,$n0,$num)=map("r$_",(0..5));
252 my ($tinptr,$toutptr,$inner,$outer)=map("r$_",(6..9));
253
254 $code.=<<___;
255 #if __ARM_ARCH__>=7
256 .fpu    neon
257
258 .type   bn_mul8x_mont_neon,%function
259 .align  5
260 bn_mul8x_mont_neon:
261         mov     ip,sp
262         stmdb   sp!,{r4-r11}
263         vstmdb  sp!,{d8-d15}            @ ABI specification says so
264         ldmia   ip,{r4-r5}              @ load rest of parameter block
265
266         sub             $toutptr,sp,#16
267         vld1.32         {${Bi}[0]}, [$bptr,:32]!
268         sub             $toutptr,$toutptr,$num,lsl#4
269         vld1.32         {$A0-$A3},  [$aptr]!            @ can't specify :32 :-(
270         and             $toutptr,$toutptr,#-64
271         vld1.32         {${M0}[0]}, [$n0,:32]
272         mov             sp,$toutptr                     @ alloca
273         veor            $zero,$zero,$zero
274         subs            $inner,$num,#8
275         vzip.16         $Bi,$zero
276
277         vmull.u32       $A0xB,$Bi,${A0}[0]
278         vmull.u32       $A1xB,$Bi,${A0}[1]
279         vmull.u32       $A2xB,$Bi,${A1}[0]
280         vshl.i64        $temp,`&Dhi("$A0xB")`,#16
281         vmull.u32       $A3xB,$Bi,${A1}[1]
282
283         vadd.u64        $temp,$temp,`&Dlo("$A0xB")`
284         veor            $zero,$zero,$zero
285         vmul.u32        $Ni,$temp,$M0
286
287         vmull.u32       $A4xB,$Bi,${A2}[0]
288          vld1.32        {$N0-$N3}, [$nptr]!
289         vmull.u32       $A5xB,$Bi,${A2}[1]
290         vmull.u32       $A6xB,$Bi,${A3}[0]
291         vzip.16         $Ni,$zero
292         vmull.u32       $A7xB,$Bi,${A3}[1]
293
294         bne     .LNEON_1st
295
296         @ special case for num=8, everything is in register bank...
297
298         vmlal.u32       $A0xB,$Ni,${N0}[0]
299         sub             $outer,$num,#1
300         vmlal.u32       $A1xB,$Ni,${N0}[1]
301         vmlal.u32       $A2xB,$Ni,${N1}[0]
302         vmlal.u32       $A3xB,$Ni,${N1}[1]
303
304         vmlal.u32       $A4xB,$Ni,${N2}[0]
305         vmov            $Temp,$A0xB
306         vmlal.u32       $A5xB,$Ni,${N2}[1]
307         vmov            $A0xB,$A1xB
308         vmlal.u32       $A6xB,$Ni,${N3}[0]
309         vmov            $A1xB,$A2xB
310         vmlal.u32       $A7xB,$Ni,${N3}[1]
311         vmov            $A2xB,$A3xB
312         vmov            $A3xB,$A4xB
313         vshr.u64        $temp,$temp,#16
314         vmov            $A4xB,$A5xB
315         vmov            $A5xB,$A6xB
316         vadd.u64        $temp,$temp,`&Dhi("$Temp")`
317         vmov            $A6xB,$A7xB
318         veor            $A7xB,$A7xB
319         vshr.u64        $temp,$temp,#16
320
321         b       .LNEON_outer8
322
323 .align  4
324 .LNEON_outer8:
325         vld1.32         {${Bi}[0]}, [$bptr,:32]!
326         veor            $zero,$zero,$zero
327         vzip.16         $Bi,$zero
328         vadd.u64        `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
329
330         vmlal.u32       $A0xB,$Bi,${A0}[0]
331         vmlal.u32       $A1xB,$Bi,${A0}[1]
332         vmlal.u32       $A2xB,$Bi,${A1}[0]
333         vshl.i64        $temp,`&Dhi("$A0xB")`,#16
334         vmlal.u32       $A3xB,$Bi,${A1}[1]
335
336         vadd.u64        $temp,$temp,`&Dlo("$A0xB")`
337         veor            $zero,$zero,$zero
338         subs            $outer,$outer,#1
339         vmul.u32        $Ni,$temp,$M0
340
341         vmlal.u32       $A4xB,$Bi,${A2}[0]
342         vmlal.u32       $A5xB,$Bi,${A2}[1]
343         vmlal.u32       $A6xB,$Bi,${A3}[0]
344         vzip.16         $Ni,$zero
345         vmlal.u32       $A7xB,$Bi,${A3}[1]
346
347         vmlal.u32       $A0xB,$Ni,${N0}[0]
348         vmlal.u32       $A1xB,$Ni,${N0}[1]
349         vmlal.u32       $A2xB,$Ni,${N1}[0]
350         vmlal.u32       $A3xB,$Ni,${N1}[1]
351
352         vmlal.u32       $A4xB,$Ni,${N2}[0]
353         vmov            $Temp,$A0xB
354         vmlal.u32       $A5xB,$Ni,${N2}[1]
355         vmov            $A0xB,$A1xB
356         vmlal.u32       $A6xB,$Ni,${N3}[0]
357         vmov            $A1xB,$A2xB
358         vmlal.u32       $A7xB,$Ni,${N3}[1]
359         vmov            $A2xB,$A3xB
360         vmov            $A3xB,$A4xB
361         vshr.u64        $temp,$temp,#16
362         vmov            $A4xB,$A5xB
363         vmov            $A5xB,$A6xB
364         vadd.u64        $temp,$temp,`&Dhi("$Temp")`
365         vmov            $A6xB,$A7xB
366         veor            $A7xB,$A7xB
367         vshr.u64        $temp,$temp,#16
368
369         bne     .LNEON_outer8
370
371         vadd.u64        `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
372         mov             $toutptr,sp
373         vshr.u64        $temp,`&Dlo("$A0xB")`,#16
374         mov             $inner,$num
375         vadd.u64        `&Dhi("$A0xB")`,`&Dhi("$A0xB")`,$temp
376         add             $tinptr,sp,#16
377         vshr.u64        $temp,`&Dhi("$A0xB")`,#16
378         vzip.16         `&Dlo("$A0xB")`,`&Dhi("$A0xB")`
379
380         b       .LNEON_tail2
381
382 .align  4
383 .LNEON_1st:
384         vmlal.u32       $A0xB,$Ni,${N0}[0]
385          vld1.32        {$A0-$A3}, [$aptr]!
386         vmlal.u32       $A1xB,$Ni,${N0}[1]
387         subs            $inner,$inner,#8
388         vmlal.u32       $A2xB,$Ni,${N1}[0]
389         vmlal.u32       $A3xB,$Ni,${N1}[1]
390
391         vmlal.u32       $A4xB,$Ni,${N2}[0]
392          vld1.32        {$N0-$N1}, [$nptr]!
393         vmlal.u32       $A5xB,$Ni,${N2}[1]
394          vst1.64        {$A0xB-$A1xB}, [$toutptr,:256]!
395         vmlal.u32       $A6xB,$Ni,${N3}[0]
396         vmlal.u32       $A7xB,$Ni,${N3}[1]
397          vst1.64        {$A2xB-$A3xB}, [$toutptr,:256]!
398
399         vmull.u32       $A0xB,$Bi,${A0}[0]
400          vld1.32        {$N2-$N3}, [$nptr]!
401         vmull.u32       $A1xB,$Bi,${A0}[1]
402          vst1.64        {$A4xB-$A5xB}, [$toutptr,:256]!
403         vmull.u32       $A2xB,$Bi,${A1}[0]
404         vmull.u32       $A3xB,$Bi,${A1}[1]
405          vst1.64        {$A6xB-$A7xB}, [$toutptr,:256]!
406
407         vmull.u32       $A4xB,$Bi,${A2}[0]
408         vmull.u32       $A5xB,$Bi,${A2}[1]
409         vmull.u32       $A6xB,$Bi,${A3}[0]
410         vmull.u32       $A7xB,$Bi,${A3}[1]
411
412         bne     .LNEON_1st
413
414         vmlal.u32       $A0xB,$Ni,${N0}[0]
415         add             $tinptr,sp,#16
416         vmlal.u32       $A1xB,$Ni,${N0}[1]
417         sub             $aptr,$aptr,$num,lsl#2          @ rewind $aptr
418         vmlal.u32       $A2xB,$Ni,${N1}[0]
419          vld1.64        {$Temp}, [sp,:128]
420         vmlal.u32       $A3xB,$Ni,${N1}[1]
421         sub             $outer,$num,#1
422
423         vmlal.u32       $A4xB,$Ni,${N2}[0]
424         vst1.64         {$A0xB-$A1xB}, [$toutptr,:256]!
425         vmlal.u32       $A5xB,$Ni,${N2}[1]
426         vshr.u64        $temp,$temp,#16
427          vld1.64        {$A0xB},       [$tinptr, :128]!
428         vmlal.u32       $A6xB,$Ni,${N3}[0]
429         vst1.64         {$A2xB-$A3xB}, [$toutptr,:256]!
430         vmlal.u32       $A7xB,$Ni,${N3}[1]
431
432         vst1.64         {$A4xB-$A5xB}, [$toutptr,:256]!
433         vadd.u64        $temp,$temp,`&Dhi("$Temp")`
434         veor            $Z,$Z,$Z
435         vst1.64         {$A6xB-$A7xB}, [$toutptr,:256]!
436          vld1.64        {$A1xB-$A2xB}, [$tinptr, :256]!
437         vst1.64         {$Z},          [$toutptr,:128]
438         vshr.u64        $temp,$temp,#16
439
440         b               .LNEON_outer
441
442 .align  4
443 .LNEON_outer:
444         vld1.32         {${Bi}[0]}, [$bptr,:32]!
445         sub             $nptr,$nptr,$num,lsl#2          @ rewind $nptr
446         vld1.32         {$A0-$A3},  [$aptr]!
447         veor            $zero,$zero,$zero
448         mov             $toutptr,sp
449         vzip.16         $Bi,$zero
450         sub             $inner,$num,#8
451         vadd.u64        `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
452
453         vmlal.u32       $A0xB,$Bi,${A0}[0]
454          vld1.64        {$A3xB-$A4xB},[$tinptr,:256]!
455         vmlal.u32       $A1xB,$Bi,${A0}[1]
456         vmlal.u32       $A2xB,$Bi,${A1}[0]
457          vld1.64        {$A5xB-$A6xB},[$tinptr,:256]!
458         vmlal.u32       $A3xB,$Bi,${A1}[1]
459
460         vshl.i64        $temp,`&Dhi("$A0xB")`,#16
461         veor            $zero,$zero,$zero
462         vadd.u64        $temp,$temp,`&Dlo("$A0xB")`
463          vld1.64        {$A7xB},[$tinptr,:128]!
464         vmul.u32        $Ni,$temp,$M0
465
466         vmlal.u32       $A4xB,$Bi,${A2}[0]
467          vld1.32        {$N0-$N3}, [$nptr]!
468         vmlal.u32       $A5xB,$Bi,${A2}[1]
469         vmlal.u32       $A6xB,$Bi,${A3}[0]
470         vzip.16         $Ni,$zero
471         vmlal.u32       $A7xB,$Bi,${A3}[1]
472
473 .LNEON_inner:
474         vmlal.u32       $A0xB,$Ni,${N0}[0]
475          vld1.32        {$A0-$A3}, [$aptr]!
476         vmlal.u32       $A1xB,$Ni,${N0}[1]
477          subs           $inner,$inner,#8
478         vmlal.u32       $A2xB,$Ni,${N1}[0]
479         vmlal.u32       $A3xB,$Ni,${N1}[1]
480         vst1.64         {$A0xB-$A1xB}, [$toutptr,:256]!
481
482         vmlal.u32       $A4xB,$Ni,${N2}[0]
483          vld1.64        {$A0xB},       [$tinptr, :128]!
484         vmlal.u32       $A5xB,$Ni,${N2}[1]
485         vst1.64         {$A2xB-$A3xB}, [$toutptr,:256]!
486         vmlal.u32       $A6xB,$Ni,${N3}[0]
487          vld1.64        {$A1xB-$A2xB}, [$tinptr, :256]!
488         vmlal.u32       $A7xB,$Ni,${N3}[1]
489         vst1.64         {$A4xB-$A5xB}, [$toutptr,:256]!
490
491         vmlal.u32       $A0xB,$Bi,${A0}[0]
492          vld1.64        {$A3xB-$A4xB}, [$tinptr, :256]!
493         vmlal.u32       $A1xB,$Bi,${A0}[1]
494         vst1.64         {$A6xB-$A7xB}, [$toutptr,:256]!
495         vmlal.u32       $A2xB,$Bi,${A1}[0]
496          vld1.64        {$A5xB-$A6xB}, [$tinptr, :256]!
497         vmlal.u32       $A3xB,$Bi,${A1}[1]
498          vld1.32        {$N0-$N3}, [$nptr]!
499
500         vmlal.u32       $A4xB,$Bi,${A2}[0]
501          vld1.64        {$A7xB},       [$tinptr, :128]!
502         vmlal.u32       $A5xB,$Bi,${A2}[1]
503         vmlal.u32       $A6xB,$Bi,${A3}[0]
504         vmlal.u32       $A7xB,$Bi,${A3}[1]
505
506         bne     .LNEON_inner
507
508         vmlal.u32       $A0xB,$Ni,${N0}[0]
509         add             $tinptr,sp,#16
510         vmlal.u32       $A1xB,$Ni,${N0}[1]
511         sub             $aptr,$aptr,$num,lsl#2          @ rewind $aptr
512         vmlal.u32       $A2xB,$Ni,${N1}[0]
513          vld1.64        {$Temp}, [sp,:128]
514         vmlal.u32       $A3xB,$Ni,${N1}[1]
515         subs            $outer,$outer,#1
516
517         vmlal.u32       $A4xB,$Ni,${N2}[0]
518         vst1.64         {$A0xB-$A1xB}, [$toutptr,:256]!
519         vmlal.u32       $A5xB,$Ni,${N2}[1]
520          vld1.64        {$A0xB},       [$tinptr, :128]!
521         vshr.u64        $temp,$temp,#16
522         vst1.64         {$A2xB-$A3xB}, [$toutptr,:256]!
523         vmlal.u32       $A6xB,$Ni,${N3}[0]
524          vld1.64        {$A1xB-$A2xB}, [$tinptr, :256]!
525         vmlal.u32       $A7xB,$Ni,${N3}[1]
526
527         vst1.64         {$A4xB-$A5xB}, [$toutptr,:256]!
528         vadd.u64        $temp,$temp,`&Dhi("$Temp")`
529         vst1.64         {$A6xB-$A7xB}, [$toutptr,:256]!
530         vshr.u64        $temp,$temp,#16
531
532         bne     .LNEON_outer
533
534         mov             $toutptr,sp
535         mov             $inner,$num
536
537 .LNEON_tail:
538         vadd.u64        `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
539         vld1.64         {$A3xB-$A4xB}, [$tinptr, :256]!
540         vshr.u64        $temp,`&Dlo("$A0xB")`,#16
541         vadd.u64        `&Dhi("$A0xB")`,`&Dhi("$A0xB")`,$temp
542         vld1.64         {$A5xB-$A6xB}, [$tinptr, :256]!
543         vshr.u64        $temp,`&Dhi("$A0xB")`,#16
544         vld1.64         {$A7xB},       [$tinptr, :128]!
545         vzip.16         `&Dlo("$A0xB")`,`&Dhi("$A0xB")`
546
547 .LNEON_tail2:
548         vadd.u64        `&Dlo("$A1xB")`,`&Dlo("$A1xB")`,$temp
549         vst1.32         {`&Dlo("$A0xB")`[0]}, [$toutptr, :32]!
550         vshr.u64        $temp,`&Dlo("$A1xB")`,#16
551         vadd.u64        `&Dhi("$A1xB")`,`&Dhi("$A1xB")`,$temp
552         vshr.u64        $temp,`&Dhi("$A1xB")`,#16
553         vzip.16         `&Dlo("$A1xB")`,`&Dhi("$A1xB")`
554
555         vadd.u64        `&Dlo("$A2xB")`,`&Dlo("$A2xB")`,$temp
556         vst1.32         {`&Dlo("$A1xB")`[0]}, [$toutptr, :32]!
557         vshr.u64        $temp,`&Dlo("$A2xB")`,#16
558         vadd.u64        `&Dhi("$A2xB")`,`&Dhi("$A2xB")`,$temp
559         vshr.u64        $temp,`&Dhi("$A2xB")`,#16
560         vzip.16         `&Dlo("$A2xB")`,`&Dhi("$A2xB")`
561
562         vadd.u64        `&Dlo("$A3xB")`,`&Dlo("$A3xB")`,$temp
563         vst1.32         {`&Dlo("$A2xB")`[0]}, [$toutptr, :32]!
564         vshr.u64        $temp,`&Dlo("$A3xB")`,#16
565         vadd.u64        `&Dhi("$A3xB")`,`&Dhi("$A3xB")`,$temp
566         vshr.u64        $temp,`&Dhi("$A3xB")`,#16
567         vzip.16         `&Dlo("$A3xB")`,`&Dhi("$A3xB")`
568
569         vadd.u64        `&Dlo("$A4xB")`,`&Dlo("$A4xB")`,$temp
570         vst1.32         {`&Dlo("$A3xB")`[0]}, [$toutptr, :32]!
571         vshr.u64        $temp,`&Dlo("$A4xB")`,#16
572         vadd.u64        `&Dhi("$A4xB")`,`&Dhi("$A4xB")`,$temp
573         vshr.u64        $temp,`&Dhi("$A4xB")`,#16
574         vzip.16         `&Dlo("$A4xB")`,`&Dhi("$A4xB")`
575
576         vadd.u64        `&Dlo("$A5xB")`,`&Dlo("$A5xB")`,$temp
577         vst1.32         {`&Dlo("$A4xB")`[0]}, [$toutptr, :32]!
578         vshr.u64        $temp,`&Dlo("$A5xB")`,#16
579         vadd.u64        `&Dhi("$A5xB")`,`&Dhi("$A5xB")`,$temp
580         vshr.u64        $temp,`&Dhi("$A5xB")`,#16
581         vzip.16         `&Dlo("$A5xB")`,`&Dhi("$A5xB")`
582
583         vadd.u64        `&Dlo("$A6xB")`,`&Dlo("$A6xB")`,$temp
584         vst1.32         {`&Dlo("$A5xB")`[0]}, [$toutptr, :32]!
585         vshr.u64        $temp,`&Dlo("$A6xB")`,#16
586         vadd.u64        `&Dhi("$A6xB")`,`&Dhi("$A6xB")`,$temp
587         vld1.64         {$A0xB}, [$tinptr, :128]!
588         vshr.u64        $temp,`&Dhi("$A6xB")`,#16
589         vzip.16         `&Dlo("$A6xB")`,`&Dhi("$A6xB")`
590
591         vadd.u64        `&Dlo("$A7xB")`,`&Dlo("$A7xB")`,$temp
592         vst1.32         {`&Dlo("$A6xB")`[0]}, [$toutptr, :32]!
593         vshr.u64        $temp,`&Dlo("$A7xB")`,#16
594         vadd.u64        `&Dhi("$A7xB")`,`&Dhi("$A7xB")`,$temp
595         vld1.64         {$A1xB-$A2xB},  [$tinptr, :256]!
596         vshr.u64        $temp,`&Dhi("$A7xB")`,#16
597         vzip.16         `&Dlo("$A7xB")`,`&Dhi("$A7xB")`
598         subs            $inner,$inner,#8
599         vst1.32         {`&Dlo("$A7xB")`[0]}, [$toutptr, :32]!
600
601         bne     .LNEON_tail
602
603         vst1.32 {${temp}[0]}, [$toutptr, :32]           @ top-most bit
604         sub     $nptr,$nptr,$num,lsl#2                  @ rewind $nptr
605         subs    $aptr,sp,#0                             @ clear carry flag
606         add     $bptr,sp,$num,lsl#2
607
608 .LNEON_sub:
609         ldmia   $aptr!, {r4-r7}
610         ldmia   $nptr!, {r8-r11}
611         sbcs    r8, r4,r8
612         sbcs    r9, r5,r9
613         sbcs    r10,r6,r10
614         sbcs    r11,r7,r11
615         teq     $aptr,$bptr                             @ preserves carry
616         stmia   $rptr!, {r8-r11}
617         bne     .LNEON_sub
618
619         ldr     r10, [$aptr]                            @ load top-most bit
620         veor    q0,q0,q0
621         sub     r11,$bptr,sp                            @ this is num*4
622         veor    q1,q1,q1
623         mov     $aptr,sp
624         sub     $rptr,$rptr,r11                         @ rewind $rptr
625         mov     $nptr,$bptr                             @ second 3/4th of frame
626         sbcs    r10,r10,#0                              @ result is carry flag
627
628 .LNEON_copy_n_zap:
629         ldmia   $aptr!, {r4-r7}
630         ldmia   $rptr,  {r8-r11}
631         movcc   r8, r4
632         vst1.64 {q0-q1}, [$nptr,:256]!                  @ wipe
633         movcc   r9, r5
634         movcc   r10,r6
635         vst1.64 {q0-q1}, [$nptr,:256]!                  @ wipe
636         movcc   r11,r7
637         ldmia   $aptr, {r4-r7}
638         stmia   $rptr!, {r8-r11}
639         sub     $aptr,$aptr,#16
640         ldmia   $rptr, {r8-r11}
641         movcc   r8, r4
642         vst1.64 {q0-q1}, [$aptr,:256]!                  @ wipe
643         movcc   r9, r5
644         movcc   r10,r6
645         vst1.64 {q0-q1}, [$nptr,:256]!                  @ wipe
646         movcc   r11,r7
647         teq     $aptr,$bptr                             @ preserves carry
648         stmia   $rptr!, {r8-r11}
649         bne     .LNEON_copy_n_zap
650
651         sub     sp,ip,#96
652         vldmia  sp!,{d8-d15}
653         ldmia   sp!,{r4-r11}
654         bx      lr
655 .size   bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
656 #endif
657 ___
658 }
659 $code.=<<___;
660 .asciz  "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
661 .align  2
662 #if __ARM_ARCH__>=7
663 .comm   OPENSSL_armcap_P,4,4
664 #endif
665 ___
666
667 $code =~ s/\`([^\`]*)\`/eval $1/gem;
668 $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;    # make it possible to compile with -march=armv4
669 print $code;
670 close STDOUT;