2 # Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # ECP_NISTZ256 module for ARMv4.
21 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
22 # http://eprint.iacr.org/2013/816. In the process of adaptation
23 # original .c module was made 32-bit savvy in order to make this
24 # implementation possible.
26 # with/without -DECP_NISTZ256_ASM
29 # Cortex-A15 +100-316%
30 # Snapdragon S4 +66-187%
32 # Ranges denote minimum and maximum improvement coefficients depending
33 # on benchmark. Lower coefficients are for ECDSA sign, server-side
34 # operation. Keep in mind that +200% means 3x improvement.
37 if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
38 else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
40 if ($flavour && $flavour ne "void") {
41 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
42 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
43 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
44 die "can't locate arm-xlate.pl";
46 open STDOUT,"| \"$^X\" $xlate $flavour $output";
48 open STDOUT,">$output";
55 #if defined(__thumb2__)
62 ########################################################################
63 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
65 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
66 open TABLE,"<ecp_nistz256_table.c" or
67 open TABLE,"<${dir}../ecp_nistz256_table.c" or
68 die "failed to open ecp_nistz256_table.c:",$!;
73 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
77 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
78 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
80 die "insane number of elements" if ($#arr != 64*16*37-1);
83 .globl ecp_nistz256_precomputed
84 .type ecp_nistz256_precomputed,%object
86 ecp_nistz256_precomputed:
88 ########################################################################
89 # this conversion smashes P256_POINT_AFFINE by individual bytes with
90 # 64 byte interval, similar to
94 @tbl = splice(@arr,0,64*16);
95 for($i=0;$i<64;$i++) {
97 for($j=0;$j<64;$j++) {
98 push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
101 $code.=join(',',map { sprintf "0x%02x",$_} @line);
106 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
108 .LRR: @ 2^512 mod P precomputed for NIST P256 polynomial
109 .long 0x00000003, 0x00000000, 0xffffffff, 0xfffffffb
110 .long 0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004
112 .long 1,0,0,0,0,0,0,0
113 .asciz "ECP_NISTZ256 for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
117 ########################################################################
118 # common register layout, note that $t2 is link register, so that if
119 # internal subroutine uses $t2, then it has to offload lr...
121 ($r_ptr,$a_ptr,$b_ptr,$ff,$a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7,$t1,$t2)=
122 map("r$_",(0..12,14));
123 ($t0,$t3)=($ff,$a_ptr);
126 @ void ecp_nistz256_to_mont(BN_ULONG r0[8],const BN_ULONG r1[8]);
127 .globl ecp_nistz256_to_mont
128 .type ecp_nistz256_to_mont,%function
129 ecp_nistz256_to_mont:
131 b .Lecp_nistz256_mul_mont
132 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
134 @ void ecp_nistz256_from_mont(BN_ULONG r0[8],const BN_ULONG r1[8]);
135 .globl ecp_nistz256_from_mont
136 .type ecp_nistz256_from_mont,%function
137 ecp_nistz256_from_mont:
139 b .Lecp_nistz256_mul_mont
140 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
142 @ void ecp_nistz256_mul_by_2(BN_ULONG r0[8],const BN_ULONG r1[8]);
143 .globl ecp_nistz256_mul_by_2
144 .type ecp_nistz256_mul_by_2,%function
146 ecp_nistz256_mul_by_2:
147 stmdb sp!,{r4-r12,lr}
148 bl __ecp_nistz256_mul_by_2
149 #if __ARM_ARCH__>=5 || !defined(__thumb__)
150 ldmia sp!,{r4-r12,pc}
152 ldmia sp!,{r4-r12,lr}
153 bx lr @ interoperable with Thumb ISA:-)
155 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
157 .type __ecp_nistz256_mul_by_2,%function
159 __ecp_nistz256_mul_by_2:
163 adds $a0,$a0,$a0 @ a[0:7]+=a[0:7], i.e. add with itself
180 .size __ecp_nistz256_mul_by_2,.-__ecp_nistz256_mul_by_2
182 @ void ecp_nistz256_add(BN_ULONG r0[8],const BN_ULONG r1[8],
183 @ const BN_ULONG r2[8]);
184 .globl ecp_nistz256_add
185 .type ecp_nistz256_add,%function
188 stmdb sp!,{r4-r12,lr}
189 bl __ecp_nistz256_add
190 #if __ARM_ARCH__>=5 || !defined(__thumb__)
191 ldmia sp!,{r4-r12,pc}
193 ldmia sp!,{r4-r12,lr}
194 bx lr @ interoperable with Thumb ISA:-)
196 .size ecp_nistz256_add,.-ecp_nistz256_add
198 .type __ecp_nistz256_add,%function
201 str lr,[sp,#-4]! @ push lr
229 ldr lr,[sp],#4 @ pop lr
233 @ if a+b >= modulus, subtract modulus.
235 @ But since comparison implies subtraction, we subtract
236 @ modulus and then add it back if subtraction borrowed.
248 @ Note that because mod has special form, i.e. consists of
249 @ 0xffffffff, 1 and 0s, we can conditionally synthesize it by
250 @ using value of borrow as a whole or extracting single bit.
251 @ Follow $ff register...
253 adds $a0,$a0,$ff @ add synthesized modulus
264 adcs $a6,$a6,$ff,lsr#31
271 .size __ecp_nistz256_add,.-__ecp_nistz256_add
273 @ void ecp_nistz256_mul_by_3(BN_ULONG r0[8],const BN_ULONG r1[8]);
274 .globl ecp_nistz256_mul_by_3
275 .type ecp_nistz256_mul_by_3,%function
277 ecp_nistz256_mul_by_3:
278 stmdb sp!,{r4-r12,lr}
279 bl __ecp_nistz256_mul_by_3
280 #if __ARM_ARCH__>=5 || !defined(__thumb__)
281 ldmia sp!,{r4-r12,pc}
283 ldmia sp!,{r4-r12,lr}
284 bx lr @ interoperable with Thumb ISA:-)
286 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
288 .type __ecp_nistz256_mul_by_3,%function
290 __ecp_nistz256_mul_by_3:
291 str lr,[sp,#-4]! @ push lr
293 @ As multiplication by 3 is performed as 2*n+n, below are inline
294 @ copies of __ecp_nistz256_mul_by_2 and __ecp_nistz256_add, see
295 @ corresponding subroutines for details.
300 adds $a0,$a0,$a0 @ a[0:7]+=a[0:7]
316 subs $a0,$a0,#-1 @ .Lreduce_by_sub but without stores
326 adds $a0,$a0,$ff @ add synthesized modulus
331 ldr $b_ptr,[$a_ptr,#0]
334 adcs $a6,$a6,$ff,lsr#31
339 adds $a0,$a0,$b_ptr @ 2*a[0:7]+=a[0:7]
340 ldr $b_ptr,[$a_ptr,#16]
353 ldr lr,[sp],#4 @ pop lr
356 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
358 @ void ecp_nistz256_div_by_2(BN_ULONG r0[8],const BN_ULONG r1[8]);
359 .globl ecp_nistz256_div_by_2
360 .type ecp_nistz256_div_by_2,%function
362 ecp_nistz256_div_by_2:
363 stmdb sp!,{r4-r12,lr}
364 bl __ecp_nistz256_div_by_2
365 #if __ARM_ARCH__>=5 || !defined(__thumb__)
366 ldmia sp!,{r4-r12,pc}
368 ldmia sp!,{r4-r12,lr}
369 bx lr @ interoperable with Thumb ISA:-)
371 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
373 .type __ecp_nistz256_div_by_2,%function
375 __ecp_nistz256_div_by_2:
376 @ ret = (a is odd ? a+mod : a) >> 1
381 mov $ff,$a0,lsl#31 @ place least significant bit to most
382 @ significant position, now arithmetic
383 @ right shift by 31 will produce -1 or
384 @ 0, while logical right shift 1 or 0,
385 @ this is how modulus is conditionally
386 @ synthesized in this case...
388 adds $a0,$a0,$ff,asr#31
390 adcs $a1,$a1,$ff,asr#31
392 adcs $a2,$a2,$ff,asr#31
397 mov $a0,$a0,lsr#1 @ a[0:7]>>=1, we can start early
398 @ because it doesn't affect flags
400 orr $a0,$a0,$a1,lsl#31
401 adcs $a6,$a6,$ff,lsr#31
403 adcs $a7,$a7,$ff,asr#31
405 adc $b_ptr,$b_ptr,#0 @ top-most carry bit from addition
407 orr $a1,$a1,$a2,lsl#31
410 orr $a2,$a2,$a3,lsl#31
413 orr $a3,$a3,$a4,lsl#31
416 orr $a4,$a4,$a5,lsl#31
419 orr $a5,$a5,$a6,lsl#31
422 orr $a6,$a6,$a7,lsl#31
425 orr $a7,$a7,$b_ptr,lsl#31 @ don't forget the top-most carry bit
430 .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
432 @ void ecp_nistz256_sub(BN_ULONG r0[8],const BN_ULONG r1[8],
433 @ const BN_ULONG r2[8]);
434 .globl ecp_nistz256_sub
435 .type ecp_nistz256_sub,%function
438 stmdb sp!,{r4-r12,lr}
439 bl __ecp_nistz256_sub
440 #if __ARM_ARCH__>=5 || !defined(__thumb__)
441 ldmia sp!,{r4-r12,pc}
443 ldmia sp!,{r4-r12,lr}
444 bx lr @ interoperable with Thumb ISA:-)
446 .size ecp_nistz256_sub,.-ecp_nistz256_sub
448 .type __ecp_nistz256_sub,%function
451 str lr,[sp,#-4]! @ push lr
477 sbc $ff,$ff,$ff @ broadcast borrow bit
478 ldr lr,[sp],#4 @ pop lr
482 @ if a-b borrows, add modulus.
484 @ Note that because mod has special form, i.e. consists of
485 @ 0xffffffff, 1 and 0s, we can conditionally synthesize it by
486 @ broadcasting borrow bit to a register, $ff, and using it as
487 @ a whole or extracting single bit.
489 adds $a0,$a0,$ff @ add synthesized modulus
500 adcs $a6,$a6,$ff,lsr#31
507 .size __ecp_nistz256_sub,.-__ecp_nistz256_sub
509 @ void ecp_nistz256_neg(BN_ULONG r0[8],const BN_ULONG r1[8]);
510 .globl ecp_nistz256_neg
511 .type ecp_nistz256_neg,%function
514 stmdb sp!,{r4-r12,lr}
515 bl __ecp_nistz256_neg
516 #if __ARM_ARCH__>=5 || !defined(__thumb__)
517 ldmia sp!,{r4-r12,pc}
519 ldmia sp!,{r4-r12,lr}
520 bx lr @ interoperable with Thumb ISA:-)
522 .size ecp_nistz256_neg,.-ecp_nistz256_neg
524 .type __ecp_nistz256_neg,%function
547 .size __ecp_nistz256_neg,.-__ecp_nistz256_neg
550 my @acc=map("r$_",(3..11));
551 my ($t0,$t1,$bj,$t2,$t3)=map("r$_",(0,1,2,12,14));
554 @ void ecp_nistz256_sqr_mont(BN_ULONG r0[8],const BN_ULONG r1[8]);
555 .globl ecp_nistz256_sqr_mont
556 .type ecp_nistz256_sqr_mont,%function
558 ecp_nistz256_sqr_mont:
560 b .Lecp_nistz256_mul_mont
561 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
563 @ void ecp_nistz256_mul_mont(BN_ULONG r0[8],const BN_ULONG r1[8],
564 @ const BN_ULONG r2[8]);
565 .globl ecp_nistz256_mul_mont
566 .type ecp_nistz256_mul_mont,%function
568 ecp_nistz256_mul_mont:
569 .Lecp_nistz256_mul_mont:
570 stmdb sp!,{r4-r12,lr}
571 bl __ecp_nistz256_mul_mont
572 #if __ARM_ARCH__>=5 || !defined(__thumb__)
573 ldmia sp!,{r4-r12,pc}
575 ldmia sp!,{r4-r12,lr}
576 bx lr @ interoperable with Thumb ISA:-)
578 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
580 .type __ecp_nistz256_mul_mont,%function
582 __ecp_nistz256_mul_mont:
583 stmdb sp!,{r0-r2,lr} @ make a copy of arguments too
585 ldr $bj,[$b_ptr,#0] @ b[0]
586 ldmia $a_ptr,{@acc[1]-@acc[8]}
588 umull @acc[0],$t3,@acc[1],$bj @ r[0]=a[0]*b[0]
589 stmdb sp!,{$acc[1]-@acc[8]} @ copy a[0-7] to stack, so
590 @ that it can be addressed
591 @ without spending register
593 umull @acc[1],$t0,@acc[2],$bj @ r[1]=a[1]*b[0]
594 umull @acc[2],$t1,@acc[3],$bj
595 adds @acc[1],@acc[1],$t3 @ accumulate high part of mult
596 umull @acc[3],$t2,@acc[4],$bj
597 adcs @acc[2],@acc[2],$t0
598 umull @acc[4],$t3,@acc[5],$bj
599 adcs @acc[3],@acc[3],$t1
600 umull @acc[5],$t0,@acc[6],$bj
601 adcs @acc[4],@acc[4],$t2
602 umull @acc[6],$t1,@acc[7],$bj
603 adcs @acc[5],@acc[5],$t3
604 umull @acc[7],$t2,@acc[8],$bj
605 adcs @acc[6],@acc[6],$t0
606 adcs @acc[7],@acc[7],$t1
607 eor $t3,$t3,$t3 @ first overflow bit is zero
610 for(my $i=1;$i<8;$i++) {
613 # Reduction iteration is normally performed by accumulating
614 # result of multiplication of modulus by "magic" digit [and
615 # omitting least significant word, which is guaranteed to
616 # be 0], but thanks to special form of modulus and "magic"
617 # digit being equal to least significant word, it can be
618 # performed with additions and subtractions alone. Indeed:
620 # ffff.0001.0000.0000.0000.ffff.ffff.ffff
622 # + xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
624 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
627 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
628 # + abcd.0000.abcd.0000.0000.abcd.0000.0000.0000
629 # - abcd.0000.0000.0000.0000.0000.0000.abcd
631 # or marking redundant operations:
633 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.----
634 # + abcd.0000.abcd.0000.0000.abcd.----.----.----
635 # - abcd.----.----.----.----.----.----.----
638 @ multiplication-less reduction $i
639 adds @acc[3],@acc[3],@acc[0] @ r[3]+=r[0]
640 ldr $bj,[sp,#40] @ restore b_ptr
641 adcs @acc[4],@acc[4],#0 @ r[4]+=0
642 adcs @acc[5],@acc[5],#0 @ r[5]+=0
643 adcs @acc[6],@acc[6],@acc[0] @ r[6]+=r[0]
644 ldr $t1,[sp,#0] @ load a[0]
645 adcs @acc[7],@acc[7],#0 @ r[7]+=0
646 ldr $bj,[$bj,#4*$i] @ load b[i]
647 adcs @acc[8],@acc[8],@acc[0] @ r[8]+=r[0]
649 adc $t3,$t3,#0 @ overflow bit
650 subs @acc[7],@acc[7],@acc[0] @ r[7]-=r[0]
651 ldr $t2,[sp,#4] @ a[1]
652 sbcs @acc[8],@acc[8],#0 @ r[8]-=0
653 umlal @acc[1],$t0,$t1,$bj @ "r[0]"+=a[0]*b[i]
655 sbc @acc[0],$t3,#0 @ overflow bit, keep in mind
656 @ that netto result is
657 @ addition of a value which
658 @ makes underflow impossible
660 ldr $t3,[sp,#8] @ a[2]
661 umlal @acc[2],$t1,$t2,$bj @ "r[1]"+=a[1]*b[i]
662 str @acc[0],[sp,#36] @ temporarily offload overflow
664 ldr $t4,[sp,#12] @ a[3], $t4 is alias @acc[0]
665 umlal @acc[3],$t2,$t3,$bj @ "r[2]"+=a[2]*b[i]
667 adds @acc[2],@acc[2],$t0 @ accumulate high part of mult
668 ldr $t0,[sp,#16] @ a[4]
669 umlal @acc[4],$t3,$t4,$bj @ "r[3]"+=a[3]*b[i]
671 adcs @acc[3],@acc[3],$t1
672 ldr $t1,[sp,#20] @ a[5]
673 umlal @acc[5],$t4,$t0,$bj @ "r[4]"+=a[4]*b[i]
675 adcs @acc[4],@acc[4],$t2
676 ldr $t2,[sp,#24] @ a[6]
677 umlal @acc[6],$t0,$t1,$bj @ "r[5]"+=a[5]*b[i]
679 adcs @acc[5],@acc[5],$t3
680 ldr $t3,[sp,#28] @ a[7]
681 umlal @acc[7],$t1,$t2,$bj @ "r[6]"+=a[6]*b[i]
683 adcs @acc[6],@acc[6],$t4
684 ldr @acc[0],[sp,#36] @ restore overflow bit
685 umlal @acc[8],$t2,$t3,$bj @ "r[7]"+=a[7]*b[i]
687 adcs @acc[7],@acc[7],$t0
688 adcs @acc[8],@acc[8],$t1
689 adcs @acc[0],$acc[0],$t2
690 adc $t3,$t3,#0 @ new overflow bit
692 push(@acc,shift(@acc)); # rotate registers, so that
693 # "r[i]" becomes r[i]
696 @ last multiplication-less reduction
697 adds @acc[3],@acc[3],@acc[0]
698 ldr $r_ptr,[sp,#32] @ restore r_ptr
699 adcs @acc[4],@acc[4],#0
700 adcs @acc[5],@acc[5],#0
701 adcs @acc[6],@acc[6],@acc[0]
702 adcs @acc[7],@acc[7],#0
703 adcs @acc[8],@acc[8],@acc[0]
705 subs @acc[7],@acc[7],@acc[0]
706 sbcs @acc[8],@acc[8],#0
707 sbc @acc[0],$t3,#0 @ overflow bit
709 @ Final step is "if result > mod, subtract mod", but we do it
710 @ "other way around", namely subtract modulus from result
711 @ and if it borrowed, add modulus back.
713 adds @acc[1],@acc[1],#1 @ subs @acc[1],@acc[1],#-1
714 adcs @acc[2],@acc[2],#0 @ sbcs @acc[2],@acc[2],#-1
715 adcs @acc[3],@acc[3],#0 @ sbcs @acc[3],@acc[3],#-1
716 sbcs @acc[4],@acc[4],#0
717 sbcs @acc[5],@acc[5],#0
718 sbcs @acc[6],@acc[6],#0
719 sbcs @acc[7],@acc[7],#1
720 adcs @acc[8],@acc[8],#0 @ sbcs @acc[8],@acc[8],#-1
721 ldr lr,[sp,#44] @ restore lr
722 sbc @acc[0],@acc[0],#0 @ broadcast borrow bit
725 @ Note that because mod has special form, i.e. consists of
726 @ 0xffffffff, 1 and 0s, we can conditionally synthesize it by
727 @ broadcasting borrow bit to a register, @acc[0], and using it as
728 @ a whole or extracting single bit.
730 adds @acc[1],@acc[1],@acc[0] @ add modulus or zero
731 adcs @acc[2],@acc[2],@acc[0]
732 str @acc[1],[$r_ptr,#0]
733 adcs @acc[3],@acc[3],@acc[0]
734 str @acc[2],[$r_ptr,#4]
735 adcs @acc[4],@acc[4],#0
736 str @acc[3],[$r_ptr,#8]
737 adcs @acc[5],@acc[5],#0
738 str @acc[4],[$r_ptr,#12]
739 adcs @acc[6],@acc[6],#0
740 str @acc[5],[$r_ptr,#16]
741 adcs @acc[7],@acc[7],@acc[0],lsr#31
742 str @acc[6],[$r_ptr,#20]
743 adc @acc[8],@acc[8],@acc[0]
744 str @acc[7],[$r_ptr,#24]
745 str @acc[8],[$r_ptr,#28]
748 .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
753 my ($out,$inp,$index,$mask)=map("r$_",(0..3));
755 @ void ecp_nistz256_scatter_w5(void *r0,const P256_POINT *r1,
757 .globl ecp_nistz256_scatter_w5
758 .type ecp_nistz256_scatter_w5,%function
760 ecp_nistz256_scatter_w5:
763 add $out,$out,$index,lsl#2
765 ldmia $inp!,{r4-r11} @ X
766 str r4,[$out,#64*0-4]
767 str r5,[$out,#64*1-4]
768 str r6,[$out,#64*2-4]
769 str r7,[$out,#64*3-4]
770 str r8,[$out,#64*4-4]
771 str r9,[$out,#64*5-4]
772 str r10,[$out,#64*6-4]
773 str r11,[$out,#64*7-4]
776 ldmia $inp!,{r4-r11} @ Y
777 str r4,[$out,#64*0-4]
778 str r5,[$out,#64*1-4]
779 str r6,[$out,#64*2-4]
780 str r7,[$out,#64*3-4]
781 str r8,[$out,#64*4-4]
782 str r9,[$out,#64*5-4]
783 str r10,[$out,#64*6-4]
784 str r11,[$out,#64*7-4]
787 ldmia $inp,{r4-r11} @ Z
788 str r4,[$out,#64*0-4]
789 str r5,[$out,#64*1-4]
790 str r6,[$out,#64*2-4]
791 str r7,[$out,#64*3-4]
792 str r8,[$out,#64*4-4]
793 str r9,[$out,#64*5-4]
794 str r10,[$out,#64*6-4]
795 str r11,[$out,#64*7-4]
798 #if __ARM_ARCH__>=5 || defined(__thumb__)
803 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
805 @ void ecp_nistz256_gather_w5(P256_POINT *r0,const void *r1,
807 .globl ecp_nistz256_gather_w5
808 .type ecp_nistz256_gather_w5,%function
810 ecp_nistz256_gather_w5:
818 subne $index,$index,#1
820 add $inp,$inp,$index,lsl#2
839 stmia $out!,{r4-r11} @ X
858 stmia $out!,{r4-r11} @ Y
876 stmia $out,{r4-r11} @ Z
879 #if __ARM_ARCH__>=5 || defined(__thumb__)
884 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
886 @ void ecp_nistz256_scatter_w7(void *r0,const P256_POINT_AFFINE *r1,
888 .globl ecp_nistz256_scatter_w7
889 .type ecp_nistz256_scatter_w7,%function
891 ecp_nistz256_scatter_w7:
896 subs $index,$index,#1
897 strb $mask,[$out,#64*0]
898 mov $mask,$mask,lsr#8
899 strb $mask,[$out,#64*1]
900 mov $mask,$mask,lsr#8
901 strb $mask,[$out,#64*2]
902 mov $mask,$mask,lsr#8
903 strb $mask,[$out,#64*3]
907 #if __ARM_ARCH__>=5 || defined(__thumb__)
912 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
914 @ void ecp_nistz256_gather_w7(P256_POINT_AFFINE *r0,const void *r1,
916 .globl ecp_nistz256_gather_w7
917 .type ecp_nistz256_gather_w7,%function
919 ecp_nistz256_gather_w7:
927 subne $index,$index,#1
934 subs $index,$index,#1
947 #if __ARM_ARCH__>=5 || defined(__thumb__)
952 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
956 # In comparison to integer-only equivalent of below subroutine:
962 # As not all time is spent in multiplication, overall impact is deemed
963 # too low to care about.
965 my ($A0,$A1,$A2,$A3,$Bi,$zero,$temp)=map("d$_",(0..7));
968 my @AxB=map("q$_",(8..15));
970 my ($rptr,$aptr,$bptr,$toutptr)=map("r$_",(0..3));
976 .globl ecp_nistz256_mul_mont_neon
977 .type ecp_nistz256_mul_mont_neon,%function
979 ecp_nistz256_mul_mont_neon:
982 vstmdb sp!,{q4-q5} @ ABI specification says so
985 vld1.32 {${Bi}[0]},[$bptr,:32]!
986 veor $zero,$zero,$zero
987 vld1.32 {$A0-$A3}, [$aptr] @ can't specify :32 :-(
989 mov sp,$toutptr @ alloca
990 vmov.i64 $mask,#0xffff
992 vmull.u32 @AxB[0],$Bi,${A0}[0]
993 vmull.u32 @AxB[1],$Bi,${A0}[1]
994 vmull.u32 @AxB[2],$Bi,${A1}[0]
995 vmull.u32 @AxB[3],$Bi,${A1}[1]
996 vshr.u64 $temp,@AxB[0]#lo,#16
997 vmull.u32 @AxB[4],$Bi,${A2}[0]
998 vadd.u64 @AxB[0]#hi,@AxB[0]#hi,$temp
999 vmull.u32 @AxB[5],$Bi,${A2}[1]
1000 vshr.u64 $temp,@AxB[0]#hi,#16 @ upper 32 bits of a[0]*b[0]
1001 vmull.u32 @AxB[6],$Bi,${A3}[0]
1002 vand.u64 @AxB[0],@AxB[0],$mask @ lower 32 bits of a[0]*b[0]
1003 vmull.u32 @AxB[7],$Bi,${A3}[1]
1005 for($i=1;$i<8;$i++) {
1007 vld1.32 {${Bi}[0]},[$bptr,:32]!
1008 veor $zero,$zero,$zero
1009 vadd.u64 @AxB[1]#lo,@AxB[1]#lo,$temp @ reduction
1010 vshl.u64 $mult,@AxB[0],#32
1011 vadd.u64 @AxB[3],@AxB[3],@AxB[0]
1012 vsub.u64 $mult,$mult,@AxB[0]
1014 vadd.u64 @AxB[6],@AxB[6],@AxB[0]
1015 vadd.u64 @AxB[7],@AxB[7],$mult
1017 push(@AxB,shift(@AxB));
1019 vmlal.u32 @AxB[0],$Bi,${A0}[0]
1020 vmlal.u32 @AxB[1],$Bi,${A0}[1]
1021 vmlal.u32 @AxB[2],$Bi,${A1}[0]
1022 vmlal.u32 @AxB[3],$Bi,${A1}[1]
1023 vshr.u64 $temp,@AxB[0]#lo,#16
1024 vmlal.u32 @AxB[4],$Bi,${A2}[0]
1025 vadd.u64 @AxB[0]#hi,@AxB[0]#hi,$temp
1026 vmlal.u32 @AxB[5],$Bi,${A2}[1]
1027 vshr.u64 $temp,@AxB[0]#hi,#16 @ upper 33 bits of a[0]*b[i]+t[0]
1028 vmlal.u32 @AxB[6],$Bi,${A3}[0]
1029 vand.u64 @AxB[0],@AxB[0],$mask @ lower 32 bits of a[0]*b[0]
1030 vmull.u32 @AxB[7],$Bi,${A3}[1]
1034 vadd.u64 @AxB[1]#lo,@AxB[1]#lo,$temp @ last reduction
1035 vshl.u64 $mult,@AxB[0],#32
1036 vadd.u64 @AxB[3],@AxB[3],@AxB[0]
1037 vsub.u64 $mult,$mult,@AxB[0]
1038 vadd.u64 @AxB[6],@AxB[6],@AxB[0]
1039 vadd.u64 @AxB[7],@AxB[7],$mult
1041 vshr.u64 $temp,@AxB[1]#lo,#16 @ convert
1042 vadd.u64 @AxB[1]#hi,@AxB[1]#hi,$temp
1043 vshr.u64 $temp,@AxB[1]#hi,#16
1044 vzip.16 @AxB[1]#lo,@AxB[1]#hi
1048 vadd.u64 @AxB[$_]#lo,@AxB[$_]#lo,$temp
1049 vst1.32 {@AxB[$_-1]#lo[0]},[$toutptr,:32]!
1050 vshr.u64 $temp,@AxB[$_]#lo,#16
1051 vadd.u64 @AxB[$_]#hi,@AxB[$_]#hi,$temp
1052 vshr.u64 $temp,@AxB[$_]#hi,#16
1053 vzip.16 @AxB[$_]#lo,@AxB[$_]#hi
1057 vst1.32 {@AxB[7]#lo[0]},[$toutptr,:32]!
1058 vst1.32 {$temp},[$toutptr] @ upper 33 bits
1074 ldr r9,[sp,#32] @ top-most bit
1092 adcs r7,r7,r9,lsr#31
1100 .size ecp_nistz256_mul_mont_neon,.-ecp_nistz256_mul_mont_neon
1106 ########################################################################
1107 # Below $aN assignment matches order in which 256-bit result appears in
1108 # register bank at return from __ecp_nistz256_mul_mont, so that we can
1109 # skip over reloading it from memory. This means that below functions
1110 # use custom calling sequence accepting 256-bit input in registers,
1111 # output pointer in r0, $r_ptr, and optional pointer in r2, $b_ptr.
1113 # See their "normal" counterparts for insights on calculations.
1115 my ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7,
1116 $t0,$t1,$t2,$t3)=map("r$_",(11,3..10,12,14,1));
1120 .type __ecp_nistz256_sub_from,%function
1122 __ecp_nistz256_sub_from:
1123 str lr,[sp,#-4]! @ push lr
1128 ldr $t3,[$b_ptr,#12]
1130 ldr $t0,[$b_ptr,#16]
1132 ldr $t1,[$b_ptr,#20]
1134 ldr $t2,[$b_ptr,#24]
1136 ldr $t3,[$b_ptr,#28]
1141 sbc $ff,$ff,$ff @ broadcast borrow bit
1142 ldr lr,[sp],#4 @ pop lr
1144 adds $a0,$a0,$ff @ add synthesized modulus
1152 str $a3,[$r_ptr,#12]
1154 str $a4,[$r_ptr,#16]
1155 adcs $a6,$a6,$ff,lsr#31
1156 str $a5,[$r_ptr,#20]
1158 str $a6,[$r_ptr,#24]
1159 str $a7,[$r_ptr,#28]
1162 .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
1164 .type __ecp_nistz256_sub_morf,%function
1166 __ecp_nistz256_sub_morf:
1167 str lr,[sp,#-4]! @ push lr
1172 ldr $t3,[$b_ptr,#12]
1174 ldr $t0,[$b_ptr,#16]
1176 ldr $t1,[$b_ptr,#20]
1178 ldr $t2,[$b_ptr,#24]
1180 ldr $t3,[$b_ptr,#28]
1185 sbc $ff,$ff,$ff @ broadcast borrow bit
1186 ldr lr,[sp],#4 @ pop lr
1188 adds $a0,$a0,$ff @ add synthesized modulus
1196 str $a3,[$r_ptr,#12]
1198 str $a4,[$r_ptr,#16]
1199 adcs $a6,$a6,$ff,lsr#31
1200 str $a5,[$r_ptr,#20]
1202 str $a6,[$r_ptr,#24]
1203 str $a7,[$r_ptr,#28]
1206 .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
1208 .type __ecp_nistz256_add_self,%function
1210 __ecp_nistz256_add_self:
1211 adds $a0,$a0,$a0 @ a[0:7]+=a[0:7]
1222 @ if a+b >= modulus, subtract modulus.
1224 @ But since comparison implies subtraction, we subtract
1225 @ modulus and then add it back if subtraction borrowed.
1237 @ Note that because mod has special form, i.e. consists of
1238 @ 0xffffffff, 1 and 0s, we can conditionally synthesize it by
1239 @ using value of borrow as a whole or extracting single bit.
1240 @ Follow $ff register...
1242 adds $a0,$a0,$ff @ add synthesized modulus
1250 str $a3,[$r_ptr,#12]
1252 str $a4,[$r_ptr,#16]
1253 adcs $a6,$a6,$ff,lsr#31
1254 str $a5,[$r_ptr,#20]
1256 str $a6,[$r_ptr,#24]
1257 str $a7,[$r_ptr,#28]
1260 .size __ecp_nistz256_add_self,.-__ecp_nistz256_add_self
1264 ########################################################################
1265 # following subroutines are "literal" implementation of those found in
1268 ########################################################################
1269 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
1272 my ($S,$M,$Zsqr,$in_x,$tmp0)=map(32*$_,(0..4));
1273 # above map() describes stack layout with 5 temporary
1274 # 256-bit vectors on top. Then note that we push
1275 # starting from r0, which means that we have copy of
1276 # input arguments just below these temporary vectors.
1279 .globl ecp_nistz256_point_double
1280 .type ecp_nistz256_point_double,%function
1282 ecp_nistz256_point_double:
1283 stmdb sp!,{r0-r12,lr} @ push from r0, unusual, but intentional
1286 .Lpoint_double_shortcut:
1288 ldmia $a_ptr!,{r4-r11} @ copy in_x
1292 bl __ecp_nistz256_mul_by_2 @ p256_mul_by_2(S, in_y);
1294 add $b_ptr,$a_ptr,#32
1295 add $a_ptr,$a_ptr,#32
1296 add $r_ptr,sp,#$Zsqr
1297 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Zsqr, in_z);
1302 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(S, S);
1304 ldr $b_ptr,[sp,#32*5+4]
1305 add $a_ptr,$b_ptr,#32
1306 add $b_ptr,$b_ptr,#64
1307 add $r_ptr,sp,#$tmp0
1308 bl __ecp_nistz256_mul_mont @ p256_mul_mont(tmp0, in_z, in_y);
1310 ldr $r_ptr,[sp,#32*5]
1311 add $r_ptr,$r_ptr,#64
1312 bl __ecp_nistz256_add_self @ p256_mul_by_2(res_z, tmp0);
1314 add $a_ptr,sp,#$in_x
1315 add $b_ptr,sp,#$Zsqr
1317 bl __ecp_nistz256_add @ p256_add(M, in_x, Zsqr);
1319 add $a_ptr,sp,#$in_x
1320 add $b_ptr,sp,#$Zsqr
1321 add $r_ptr,sp,#$Zsqr
1322 bl __ecp_nistz256_sub @ p256_sub(Zsqr, in_x, Zsqr);
1326 add $r_ptr,sp,#$tmp0
1327 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(tmp0, S);
1329 add $a_ptr,sp,#$Zsqr
1332 bl __ecp_nistz256_mul_mont @ p256_mul_mont(M, M, Zsqr);
1334 ldr $r_ptr,[sp,#32*5]
1335 add $a_ptr,sp,#$tmp0
1336 add $r_ptr,$r_ptr,#32
1337 bl __ecp_nistz256_div_by_2 @ p256_div_by_2(res_y, tmp0);
1341 bl __ecp_nistz256_mul_by_3 @ p256_mul_by_3(M, M);
1343 add $a_ptr,sp,#$in_x
1346 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S, S, in_x);
1348 add $r_ptr,sp,#$tmp0
1349 bl __ecp_nistz256_add_self @ p256_mul_by_2(tmp0, S);
1351 ldr $r_ptr,[sp,#32*5]
1354 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(res_x, M);
1356 add $b_ptr,sp,#$tmp0
1357 bl __ecp_nistz256_sub_from @ p256_sub(res_x, res_x, tmp0);
1361 bl __ecp_nistz256_sub_morf @ p256_sub(S, S, res_x);
1365 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S, S, M);
1367 ldr $r_ptr,[sp,#32*5]
1368 add $b_ptr,$r_ptr,#32
1369 add $r_ptr,$r_ptr,#32
1370 bl __ecp_nistz256_sub_from @ p256_sub(res_y, S, res_y);
1372 add sp,sp,#32*5+16 @ +16 means "skip even over saved r0-r3"
1373 #if __ARM_ARCH__>=5 || !defined(__thumb__)
1374 ldmia sp!,{r4-r12,pc}
1376 ldmia sp!,{r4-r12,lr}
1377 bx lr @ interoperable with Thumb ISA:-)
1379 .size ecp_nistz256_point_double,.-ecp_nistz256_point_double
1383 ########################################################################
1384 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
1385 # const P256_POINT *in2);
1387 my ($res_x,$res_y,$res_z,
1388 $in1_x,$in1_y,$in1_z,
1389 $in2_x,$in2_y,$in2_z,
1390 $H,$Hsqr,$R,$Rsqr,$Hcub,
1391 $U1,$U2,$S1,$S2)=map(32*$_,(0..17));
1392 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
1393 # above map() describes stack layout with 18 temporary
1394 # 256-bit vectors on top. Then note that we push
1395 # starting from r0, which means that we have copy of
1396 # input arguments just below these temporary vectors.
1397 # We use three of them for ~in1infty, ~in2infty and
1398 # result of check for zero.
1401 .globl ecp_nistz256_point_add
1402 .type ecp_nistz256_point_add,%function
1404 ecp_nistz256_point_add:
1405 stmdb sp!,{r0-r12,lr} @ push from r0, unusual, but intentional
1408 ldmia $b_ptr!,{r4-r11} @ copy in2_x
1411 ldmia $b_ptr!,{r4-r11} @ copy in2_y
1413 ldmia $b_ptr,{r4-r11} @ copy in2_z
1427 str r12,[sp,#32*18+8] @ ~in2infty
1429 ldmia $a_ptr!,{r4-r11} @ copy in1_x
1432 ldmia $a_ptr!,{r4-r11} @ copy in1_y
1434 ldmia $a_ptr,{r4-r11} @ copy in1_z
1448 str r12,[sp,#32*18+4] @ ~in1infty
1450 add $a_ptr,sp,#$in2_z
1451 add $b_ptr,sp,#$in2_z
1452 add $r_ptr,sp,#$Z2sqr
1453 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Z2sqr, in2_z);
1455 add $a_ptr,sp,#$in1_z
1456 add $b_ptr,sp,#$in1_z
1457 add $r_ptr,sp,#$Z1sqr
1458 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Z1sqr, in1_z);
1460 add $a_ptr,sp,#$in2_z
1461 add $b_ptr,sp,#$Z2sqr
1463 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S1, Z2sqr, in2_z);
1465 add $a_ptr,sp,#$in1_z
1466 add $b_ptr,sp,#$Z1sqr
1468 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, Z1sqr, in1_z);
1470 add $a_ptr,sp,#$in1_y
1473 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S1, S1, in1_y);
1475 add $a_ptr,sp,#$in2_y
1478 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, S2, in2_y);
1482 bl __ecp_nistz256_sub_from @ p256_sub(R, S2, S1);
1484 orr $a0,$a0,$a1 @ see if result is zero
1490 add $a_ptr,sp,#$in1_x
1492 add $b_ptr,sp,#$Z2sqr
1493 str $a0,[sp,#32*18+12]
1496 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U1, in1_x, Z2sqr);
1498 add $a_ptr,sp,#$in2_x
1499 add $b_ptr,sp,#$Z1sqr
1501 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U2, in2_x, Z1sqr);
1505 bl __ecp_nistz256_sub_from @ p256_sub(H, U2, U1);
1507 orr $a0,$a0,$a1 @ see if result is zero
1513 orr $a0,$a0,$a4 @ ~is_equal(U1,U2)
1515 ldr $t0,[sp,#32*18+4] @ ~in1infty
1516 ldr $t1,[sp,#32*18+8] @ ~in2infty
1517 ldr $t2,[sp,#32*18+12] @ ~is_equal(S1,S2)
1518 mvn $t0,$t0 @ -1/0 -> 0/-1
1519 mvn $t1,$t1 @ -1/0 -> 0/-1
1522 orrs $a0,$a0,$t2 @ set flags
1524 @ if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2))
1528 ldr $a_ptr,[sp,#32*18+20]
1529 add sp,sp,#32*(18-5)+16 @ difference in frame sizes
1530 b .Lpoint_double_shortcut
1536 add $r_ptr,sp,#$Rsqr
1537 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Rsqr, R);
1540 add $b_ptr,sp,#$in1_z
1541 add $r_ptr,sp,#$res_z
1542 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_z, H, in1_z);
1546 add $r_ptr,sp,#$Hsqr
1547 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Hsqr, H);
1549 add $a_ptr,sp,#$in2_z
1550 add $b_ptr,sp,#$res_z
1551 add $r_ptr,sp,#$res_z
1552 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_z, res_z, in2_z);
1555 add $b_ptr,sp,#$Hsqr
1556 add $r_ptr,sp,#$Hcub
1557 bl __ecp_nistz256_mul_mont @ p256_mul_mont(Hcub, Hsqr, H);
1559 add $a_ptr,sp,#$Hsqr
1562 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U2, U1, Hsqr);
1564 add $r_ptr,sp,#$Hsqr
1565 bl __ecp_nistz256_add_self @ p256_mul_by_2(Hsqr, U2);
1567 add $b_ptr,sp,#$Rsqr
1568 add $r_ptr,sp,#$res_x
1569 bl __ecp_nistz256_sub_morf @ p256_sub(res_x, Rsqr, Hsqr);
1571 add $b_ptr,sp,#$Hcub
1572 bl __ecp_nistz256_sub_from @ p256_sub(res_x, res_x, Hcub);
1575 add $r_ptr,sp,#$res_y
1576 bl __ecp_nistz256_sub_morf @ p256_sub(res_y, U2, res_x);
1578 add $a_ptr,sp,#$Hcub
1581 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, S1, Hcub);
1584 add $b_ptr,sp,#$res_y
1585 add $r_ptr,sp,#$res_y
1586 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_y, res_y, R);
1589 bl __ecp_nistz256_sub_from @ p256_sub(res_y, res_y, S2);
1591 ldr r11,[sp,#32*18+4] @ ~in1infty
1592 ldr r12,[sp,#32*18+8] @ ~in2infty
1595 and r10,r11,r12 @ ~in1infty & ~in2infty
1598 and r11,r11,r12 @ in1infty & ~in2infty
1599 mvn r12,r12 @ in2infty
1600 ldr $r_ptr,[sp,#32*18+16]
1602 for($i=0;$i<96;$i+=8) { # conditional moves
1604 ldmia r1!,{r4-r5} @ res_x
1605 ldmia r2!,{r6-r7} @ in2_x
1606 ldmia r3!,{r8-r9} @ in1_x
1607 and r4,r4,r10 @ ~in1infty & ~in2infty
1609 and r6,r6,r11 @ in1infty & ~in2infty
1611 and r8,r8,r12 @ in2infty
1617 stmia $r_ptr!,{r4-r5}
1622 add sp,sp,#32*18+16+16 @ +16 means "skip even over saved r0-r3"
1623 #if __ARM_ARCH__>=5 || !defined(__thumb__)
1624 ldmia sp!,{r4-r12,pc}
1626 ldmia sp!,{r4-r12,lr}
1627 bx lr @ interoperable with Thumb ISA:-)
1629 .size ecp_nistz256_point_add,.-ecp_nistz256_point_add
1633 ########################################################################
1634 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1635 # const P256_POINT_AFFINE *in2);
1637 my ($res_x,$res_y,$res_z,
1638 $in1_x,$in1_y,$in1_z,
1640 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..14));
1642 # above map() describes stack layout with 18 temporary
1643 # 256-bit vectors on top. Then note that we push
1644 # starting from r0, which means that we have copy of
1645 # input arguments just below these temporary vectors.
1646 # We use two of them for ~in1infty, ~in2infty.
1648 my @ONE_mont=(1,0,0,-1,-1,-1,-2,0);
1651 .globl ecp_nistz256_point_add_affine
1652 .type ecp_nistz256_point_add_affine,%function
1654 ecp_nistz256_point_add_affine:
1655 stmdb sp!,{r0-r12,lr} @ push from r0, unusual, but intentional
1658 ldmia $a_ptr!,{r4-r11} @ copy in1_x
1661 ldmia $a_ptr!,{r4-r11} @ copy in1_y
1663 ldmia $a_ptr,{r4-r11} @ copy in1_z
1677 str r12,[sp,#32*15+4] @ ~in1infty
1679 ldmia $b_ptr!,{r4-r11} @ copy in2_x
1689 ldmia $b_ptr!,{r4-r11} @ copy in2_y
1704 str r12,[sp,#32*15+8] @ ~in2infty
1706 add $a_ptr,sp,#$in1_z
1707 add $b_ptr,sp,#$in1_z
1708 add $r_ptr,sp,#$Z1sqr
1709 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Z1sqr, in1_z);
1711 add $a_ptr,sp,#$Z1sqr
1712 add $b_ptr,sp,#$in2_x
1714 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U2, Z1sqr, in2_x);
1716 add $b_ptr,sp,#$in1_x
1718 bl __ecp_nistz256_sub_from @ p256_sub(H, U2, in1_x);
1720 add $a_ptr,sp,#$Z1sqr
1721 add $b_ptr,sp,#$in1_z
1723 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, Z1sqr, in1_z);
1726 add $b_ptr,sp,#$in1_z
1727 add $r_ptr,sp,#$res_z
1728 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_z, H, in1_z);
1730 add $a_ptr,sp,#$in2_y
1733 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, S2, in2_y);
1735 add $b_ptr,sp,#$in1_y
1737 bl __ecp_nistz256_sub_from @ p256_sub(R, S2, in1_y);
1741 add $r_ptr,sp,#$Hsqr
1742 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Hsqr, H);
1746 add $r_ptr,sp,#$Rsqr
1747 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Rsqr, R);
1750 add $b_ptr,sp,#$Hsqr
1751 add $r_ptr,sp,#$Hcub
1752 bl __ecp_nistz256_mul_mont @ p256_mul_mont(Hcub, Hsqr, H);
1754 add $a_ptr,sp,#$Hsqr
1755 add $b_ptr,sp,#$in1_x
1757 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U2, in1_x, Hsqr);
1759 add $r_ptr,sp,#$Hsqr
1760 bl __ecp_nistz256_add_self @ p256_mul_by_2(Hsqr, U2);
1762 add $b_ptr,sp,#$Rsqr
1763 add $r_ptr,sp,#$res_x
1764 bl __ecp_nistz256_sub_morf @ p256_sub(res_x, Rsqr, Hsqr);
1766 add $b_ptr,sp,#$Hcub
1767 bl __ecp_nistz256_sub_from @ p256_sub(res_x, res_x, Hcub);
1770 add $r_ptr,sp,#$res_y
1771 bl __ecp_nistz256_sub_morf @ p256_sub(res_y, U2, res_x);
1773 add $a_ptr,sp,#$Hcub
1774 add $b_ptr,sp,#$in1_y
1776 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, in1_y, Hcub);
1779 add $b_ptr,sp,#$res_y
1780 add $r_ptr,sp,#$res_y
1781 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_y, res_y, R);
1784 bl __ecp_nistz256_sub_from @ p256_sub(res_y, res_y, S2);
1786 ldr r11,[sp,#32*15+4] @ ~in1infty
1787 ldr r12,[sp,#32*15+8] @ ~in2infty
1790 and r10,r11,r12 @ ~in1infty & ~in2infty
1793 and r11,r11,r12 @ in1infty & ~in2infty
1794 mvn r12,r12 @ in2infty
1795 ldr $r_ptr,[sp,#32*15]
1797 for($i=0;$i<64;$i+=8) { # conditional moves
1799 ldmia r1!,{r4-r5} @ res_x
1800 ldmia r2!,{r6-r7} @ in2_x
1801 ldmia r3!,{r8-r9} @ in1_x
1802 and r4,r4,r10 @ ~in1infty & ~in2infty
1804 and r6,r6,r11 @ in1infty & ~in2infty
1806 and r8,r8,r12 @ in2infty
1812 stmia $r_ptr!,{r4-r5}
1818 ldmia r1!,{r4-r5} @ res_z
1819 ldmia r3!,{r8-r9} @ in1_z
1822 and r6,r11,#@ONE_mont[$j]
1823 and r7,r11,#@ONE_mont[$j+1]
1830 stmia $r_ptr!,{r4-r5}
1834 add sp,sp,#32*15+16 @ +16 means "skip even over saved r0-r3"
1835 #if __ARM_ARCH__>=5 || !defined(__thumb__)
1836 ldmia sp!,{r4-r12,pc}
1838 ldmia sp!,{r4-r12,lr}
1839 bx lr @ interoperable with Thumb ISA:-)
1841 .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1845 foreach (split("\n",$code)) {
1846 s/\`([^\`]*)\`/eval $1/geo;
1848 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
1852 close STDOUT or die "error closing STDOUT: $!"; # enforce flush