3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # Montgomery multiplication routine for x86_64. While it gives modest
13 # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
14 # than twice, >2x, as fast. Most common rsa1024 sign is improved by
15 # respectful 50%. It remains to be seen if loop unrolling and
16 # dedicated squaring routine can provide further improvement...
20 # Add dedicated squaring procedure. Performance improvement varies
21 # from platform to platform, but in average it's ~5%/15%/25%/33%
22 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
26 # Unroll and modulo-schedule inner loops in such manner that they
27 # are "fallen through" for input lengths of 8, which is critical for
28 # 1024-bit RSA *sign*. Average performance improvement in comparison
29 # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
30 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
34 # Optimize reduction in squaring procedure and improve 1024+-bit RSA
35 # sign performance by 10-16% on Intel Sandy Bridge and later
36 # (virtually same on non-Intel processors).
40 # Add MULX/ADOX/ADCX code path.
44 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
46 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
48 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
49 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
50 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
51 die "can't locate x86_64-xlate.pl";
53 open OUT,"| \"$^X\" $xlate $flavour $output";
56 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
57 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
61 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
62 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
66 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
67 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
72 $rp="%rdi"; # BN_ULONG *rp,
73 $ap="%rsi"; # const BN_ULONG *ap,
74 $bp="%rdx"; # const BN_ULONG *bp,
75 $np="%rcx"; # const BN_ULONG *np,
76 $n0="%r8"; # const BN_ULONG *n0,
77 $num="%r9"; # int num);
89 .extern OPENSSL_ia32cap_P
92 .type bn_mul_mont,\@function,6
100 $code.=<<___ if ($addx);
101 mov OPENSSL_ia32cap_P+8(%rip),%r11d
123 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
124 and \$-1024,%rsp # minimize TLB usage
126 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
128 mov $bp,%r12 # reassign $bp
132 mov ($n0),$n0 # pull n0[0] value
133 mov ($bp),$m0 # m0=bp[0]
140 mulq $m0 # ap[0]*bp[0]
144 imulq $lo0,$m1 # "tp[0]"*n0
148 add %rax,$lo0 # discarded
161 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
164 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
168 mulq $m0 # ap[j]*bp[0]
180 mov ($ap),%rax # ap[0]
182 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
184 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
191 mov $hi1,-8(%rsp,$num,8)
192 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
198 mov ($bp,$i,8),$m0 # m0=bp[i]
202 mulq $m0 # ap[0]*bp[i]
203 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
207 imulq $lo0,$m1 # tp[0]*n0
211 add %rax,$lo0 # discarded
214 mov 8(%rsp),$lo0 # tp[1]
225 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
228 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
232 mulq $m0 # ap[j]*bp[i]
236 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
246 mov ($ap),%rax # ap[0]
248 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
251 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
257 add $lo0,$hi1 # pull upmost overflow bit
259 mov $hi1,-8(%rsp,$num,8)
260 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
266 xor $i,$i # i=0 and clear CF!
267 mov (%rsp),%rax # tp[0]
268 lea (%rsp),$ap # borrow ap for tp
272 .Lsub: sbb ($np,$i,8),%rax
273 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
274 mov 8($ap,$i,8),%rax # tp[i+1]
276 dec $j # doesn't affect CF!
279 sbb \$0,%rax # handle upmost overflow bit
283 .Lcopy: # copy or in-place refresh
286 xor $np,$ap # conditional select:
287 and %rax,$ap # ((ap ^ np) & %rax) ^ np
288 xor $np,$ap # ap = borrow?tp:rp
289 mov $i,(%rsp,$i,8) # zap temporary vector
290 mov $ap,($rp,$i,8) # rp[i]=tp[i]
295 mov 8(%rsp,$num,8),%rsi # restore %rsp
306 .size bn_mul_mont,.-bn_mul_mont
309 my @A=("%r10","%r11");
310 my @N=("%r13","%rdi");
312 .type bn_mul4x_mont,\@function,6
317 $code.=<<___ if ($addx);
334 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
335 and \$-1024,%rsp # minimize TLB usage
337 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
339 mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
340 mov %rdx,%r12 # reassign $bp
344 mov ($n0),$n0 # pull n0[0] value
345 mov ($bp),$m0 # m0=bp[0]
352 mulq $m0 # ap[0]*bp[0]
356 imulq $A[0],$m1 # "tp[0]"*n0
360 add %rax,$A[0] # discarded
383 mulq $m0 # ap[j]*bp[0]
385 mov -16($np,$j,8),%rax
391 mov -8($ap,$j,8),%rax
393 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
395 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
398 mulq $m0 # ap[j]*bp[0]
400 mov -8($np,$j,8),%rax
408 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
410 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
413 mulq $m0 # ap[j]*bp[0]
423 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
425 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
428 mulq $m0 # ap[j]*bp[0]
437 mov -16($ap,$j,8),%rax
439 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
441 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
446 mulq $m0 # ap[j]*bp[0]
448 mov -16($np,$j,8),%rax
454 mov -8($ap,$j,8),%rax
456 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
458 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
461 mulq $m0 # ap[j]*bp[0]
463 mov -8($np,$j,8),%rax
469 mov ($ap),%rax # ap[0]
471 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
473 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
479 mov $N[0],-8(%rsp,$j,8)
480 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
485 mov ($bp,$i,8),$m0 # m0=bp[i]
489 mulq $m0 # ap[0]*bp[i]
490 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
494 imulq $A[0],$m1 # tp[0]*n0
498 add %rax,$A[0] # "$N[0]", discarded
503 mulq $m0 # ap[j]*bp[i]
507 add 8(%rsp),$A[1] # +tp[1]
515 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
518 mov $N[1],(%rsp) # tp[j-1]
523 mulq $m0 # ap[j]*bp[i]
525 mov -16($np,$j,8),%rax
527 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
533 mov -8($ap,$j,8),%rax
537 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
540 mulq $m0 # ap[j]*bp[i]
542 mov -8($np,$j,8),%rax
544 add -8(%rsp,$j,8),$A[1]
554 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
557 mulq $m0 # ap[j]*bp[i]
561 add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
571 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
574 mulq $m0 # ap[j]*bp[i]
578 add 8(%rsp,$j,8),$A[1]
585 mov -16($ap,$j,8),%rax
589 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
594 mulq $m0 # ap[j]*bp[i]
596 mov -16($np,$j,8),%rax
598 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
604 mov -8($ap,$j,8),%rax
608 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
611 mulq $m0 # ap[j]*bp[i]
613 mov -8($np,$j,8),%rax
615 add -8(%rsp,$j,8),$A[1]
622 mov ($ap),%rax # ap[0]
626 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
632 add (%rsp,$num,8),$N[0] # pull upmost overflow bit
634 mov $N[0],-8(%rsp,$j,8)
635 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
641 my @ri=("%rax","%rdx",$m0,$m1);
643 mov 16(%rsp,$num,8),$rp # restore $rp
644 mov 0(%rsp),@ri[0] # tp[0]
645 mov 8(%rsp),@ri[1] # tp[1]
646 shr \$2,$num # num/=4
647 lea (%rsp),$ap # borrow ap for tp
648 xor $i,$i # i=0 and clear CF!
651 mov 16($ap),@ri[2] # tp[2]
652 mov 24($ap),@ri[3] # tp[3]
654 lea -1($num),$j # j=num/4-1
658 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
659 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
660 sbb 16($np,$i,8),@ri[2]
661 mov 32($ap,$i,8),@ri[0] # tp[i+1]
662 mov 40($ap,$i,8),@ri[1]
663 sbb 24($np,$i,8),@ri[3]
664 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
665 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
666 sbb 32($np,$i,8),@ri[0]
667 mov 48($ap,$i,8),@ri[2]
668 mov 56($ap,$i,8),@ri[3]
669 sbb 40($np,$i,8),@ri[1]
671 dec $j # doesnn't affect CF!
674 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
675 mov 32($ap,$i,8),@ri[0] # load overflow bit
676 sbb 16($np,$i,8),@ri[2]
677 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
678 sbb 24($np,$i,8),@ri[3]
679 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
681 sbb \$0,@ri[0] # handle upmost overflow bit
683 punpcklqdq %xmm0,%xmm0 # extend mask to 128 bits
684 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
691 .Lcopy4x: # copy or in-place refresh
692 movdqu (%rsp,$i),%xmm2
693 movdqu 16(%rsp,$i),%xmm4
694 movdqu ($rp,$i),%xmm1
695 movdqu 16($rp,$i),%xmm3
696 pxor %xmm1,%xmm2 # conditional select
702 movdqu %xmm2,($rp,$i)
703 movdqu %xmm4,16($rp,$i)
704 movdqa %xmm5,(%rsp,$i) # zap temporary vectors
705 movdqa %xmm5,16(%rsp,$i)
715 mov 8(%rsp,$num,8),%rsi # restore %rsp
726 .size bn_mul4x_mont,.-bn_mul4x_mont
730 ######################################################################
731 # void bn_sqr8x_mont(
732 my $rptr="%rdi"; # const BN_ULONG *rptr,
733 my $aptr="%rsi"; # const BN_ULONG *aptr,
734 my $bptr="%rdx"; # not used
735 my $nptr="%rcx"; # const BN_ULONG *nptr,
736 my $n0 ="%r8"; # const BN_ULONG *n0);
737 my $num ="%r9"; # int num, has to be divisible by 8
739 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
740 my @A0=("%r10","%r11");
741 my @A1=("%r12","%r13");
742 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
744 $code.=<<___ if ($addx);
745 .extern bn_sqrx8x_internal # see x86_64-mont5 module
748 .extern bn_sqr8x_internal # see x86_64-mont5 module
750 .type bn_sqr8x_mont,\@function,6
763 shl \$3,${num}d # convert $num to bytes
764 shl \$3+2,%r10 # 4*$num
767 ##############################################################
768 # ensure that stack frame doesn't alias with $aptr modulo
769 # 4096. this is done to allow memory disambiguation logic
772 lea -64(%rsp,$num,4),%r11
778 sub %r11,%rsp # align with $aptr
779 lea -64(%rsp,$num,4),%rsp # alloca(frame+4*$num)
784 lea 4096-64(,$num,4),%r10 # 4096-frame-4*$num
785 lea -64(%rsp,$num,4),%rsp # alloca(frame+4*$num)
795 lea 64(%rsp,$num,2),%r11 # copy of modulus
797 mov %rax, 40(%rsp) # save original %rsp
801 movq %r11, %xmm2 # save pointer to modulus copy
803 mov OPENSSL_ia32cap_P+8(%rip),%eax
808 movq 8*0($nptr),%xmm0
809 movq 8*1($nptr),%xmm1
810 movq 8*2($nptr),%xmm3
811 movq 8*3($nptr),%xmm4
813 movdqa %xmm0,16*0(%r11)
814 movdqa %xmm1,16*1(%r11)
815 movdqa %xmm3,16*2(%r11)
816 movdqa %xmm4,16*3(%r11)
822 movq $rptr,%xmm1 # save $rptr
823 movq %r10, %xmm3 # -$num
825 $code.=<<___ if ($addx);
830 call bn_sqrx8x_internal # see x86_64-mont5 module
834 lea 64(%rsp,$num,2),%rdx
836 mov 40(%rsp),%rsi # restore %rsp
843 call bn_sqr8x_internal # see x86_64-mont5 module
847 lea 64(%rsp,$num,2),%rdx
849 mov 40(%rsp),%rsi # restore %rsp
854 movdqa %xmm0,16*0(%rax) # wipe t
855 movdqa %xmm0,16*1(%rax)
856 movdqa %xmm0,16*2(%rax)
857 movdqa %xmm0,16*3(%rax)
859 movdqa %xmm0,16*0(%rdx) # wipe n
860 movdqa %xmm0,16*1(%rdx)
861 movdqa %xmm0,16*2(%rdx)
862 movdqa %xmm0,16*3(%rdx)
877 .size bn_sqr8x_mont,.-bn_sqr8x_mont
882 my $bp="%rdx"; # original value
885 .type bn_mulx4x_mont,\@function,6
897 shl \$3,${num}d # convert $num to bytes
900 sub $num,%r10 # -$num
902 lea -72(%rsp,%r10),%rsp # alloca(frame+$num+8)
905 ##############################################################
908 # +8 off-loaded &b[i]
917 mov $num,0(%rsp) # save $num
919 mov %r10,16(%rsp) # end of b[num]
921 mov $n0, 24(%rsp) # save *n0
922 mov $rp, 32(%rsp) # save $rp
923 mov %rax,40(%rsp) # save original %rsp
924 mov $num,48(%rsp) # inner counter
930 my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
931 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
935 mov ($bp),%rdx # b[0], $bp==%rdx actually
936 lea 64+32(%rsp),$tptr
939 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
940 mulx 1*8($aptr),%r11,%r14 # a[1]*b[0]
942 mov $bptr,8(%rsp) # off-load &b[i]
943 mulx 2*8($aptr),%r12,%r13 # ...
947 mov $mi,$bptr # borrow $bptr
948 imulq 24(%rsp),$mi # "t[0]"*n0
949 xor $zero,$zero # cf=0, of=0
951 mulx 3*8($aptr),%rax,%r14
955 adcx $zero,%r14 # cf=0
957 mulx 0*8($nptr),%rax,%r10
958 adcx %rax,$bptr # discarded
960 mulx 1*8($nptr),%rax,%r11
963 .byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 # mulx 2*8($nptr),%rax,%r12
964 mov 48(%rsp),$bptr # counter value
968 mulx 3*8($nptr),%rax,%r15
972 adox $zero,%r15 # of=0
980 adcx $zero,%r15 # cf=0, modulo-scheduled
981 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
983 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
985 mulx 2*8($aptr),%r12,%rax # ...
987 mulx 3*8($aptr),%r13,%r14
991 adcx $zero,%r14 # cf=0
996 mulx 0*8($nptr),%rax,%r15
999 mulx 1*8($nptr),%rax,%r15
1002 mulx 2*8($nptr),%rax,%r15
1003 mov %r10,-5*8($tptr)
1005 mov %r11,-4*8($tptr)
1007 mulx 3*8($nptr),%rax,%r15
1009 mov %r12,-3*8($tptr)
1012 lea 4*8($nptr),$nptr
1013 mov %r13,-2*8($tptr)
1015 dec $bptr # of=0, pass cf
1018 mov 0(%rsp),$num # load num
1019 mov 8(%rsp),$bptr # re-load &b[i]
1020 adc $zero,%r15 # modulo-scheduled
1022 sbb %r15,%r15 # top-most carry
1023 mov %r14,-1*8($tptr)
1028 mov ($bptr),%rdx # b[i]
1029 lea 8($bptr),$bptr # b++
1030 sub $num,$aptr # rewind $aptr
1031 mov %r15,($tptr) # save top-most carry
1032 lea 64+4*8(%rsp),$tptr
1033 sub $num,$nptr # rewind $nptr
1035 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
1036 xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
1038 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
1039 adox -4*8($tptr),$mi
1041 mulx 2*8($aptr),%r15,%r13 # ...
1042 adox -3*8($tptr),%r11
1047 mov $bptr,8(%rsp) # off-load &b[i]
1050 imulq 24(%rsp),$mi # "t[0]"*n0
1051 xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
1053 mulx 3*8($aptr),%rax,%r14
1055 adox -2*8($tptr),%r12
1057 adox -1*8($tptr),%r13
1059 lea 4*8($aptr),$aptr
1062 mulx 0*8($nptr),%rax,%r10
1063 adcx %rax,%r15 # discarded
1065 mulx 1*8($nptr),%rax,%r11
1068 mulx 2*8($nptr),%rax,%r12
1069 mov %r10,-4*8($tptr)
1072 mulx 3*8($nptr),%rax,%r15
1074 mov %r11,-3*8($tptr)
1075 lea 4*8($nptr),$nptr
1077 adox $zero,%r15 # of=0
1078 mov 48(%rsp),$bptr # counter value
1079 mov %r12,-2*8($tptr)
1085 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
1086 adcx $zero,%r15 # cf=0, modulo-scheduled
1088 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
1089 adcx 0*8($tptr),%r10
1091 mulx 2*8($aptr),%r12,%rax # ...
1092 adcx 1*8($tptr),%r11
1094 mulx 3*8($aptr),%r13,%r14
1096 adcx 2*8($tptr),%r12
1098 adcx 3*8($tptr),%r13
1099 adox $zero,%r14 # of=0
1100 lea 4*8($aptr),$aptr
1101 lea 4*8($tptr),$tptr
1102 adcx $zero,%r14 # cf=0
1105 mulx 0*8($nptr),%rax,%r15
1108 mulx 1*8($nptr),%rax,%r15
1111 mulx 2*8($nptr),%rax,%r15
1112 mov %r10,-5*8($tptr)
1115 mulx 3*8($nptr),%rax,%r15
1117 mov %r11,-4*8($tptr)
1118 mov %r12,-3*8($tptr)
1121 lea 4*8($nptr),$nptr
1122 mov %r13,-2*8($tptr)
1124 dec $bptr # of=0, pass cf
1127 mov 0(%rsp),$num # load num
1128 mov 8(%rsp),$bptr # re-load &b[i]
1129 adc $zero,%r15 # modulo-scheduled
1130 sub 0*8($tptr),$zero # pull top-most carry
1133 sbb %r15,%r15 # top-most carry
1134 mov %r14,-1*8($tptr)
1139 sub %r14,$mi # compare top-most words
1145 mov 32(%rsp),$rptr # restore rp
1149 mov 0*8($nptr,$num),%r8
1150 mov 1*8($nptr,$num),%r9
1152 jmp .Lmulx4x_sub_entry
1156 mov 0*8($nptr,$num),%r8
1157 mov 1*8($nptr,$num),%r9
1160 mov 2*8($nptr,$num),%r10
1163 mov 3*8($nptr,$num),%r11
1170 neg %rdx # mov %rdx,%cf
1173 movdqa %xmm0,($tptr)
1176 movdqa %xmm0,16($tptr)
1177 lea 4*8($tptr),$tptr
1178 sbb %rdx,%rdx # mov %cf,%rdx
1184 lea 4*8($rptr),$rptr
1189 mov 40(%rsp),%rsi # restore %rsp
1200 .size bn_mulx4x_mont,.-bn_mulx4x_mont
1204 .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1208 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1209 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
1217 .extern __imp_RtlVirtualUnwind
1218 .type mul_handler,\@abi-omnipotent
1232 mov 120($context),%rax # pull context->Rax
1233 mov 248($context),%rbx # pull context->Rip
1235 mov 8($disp),%rsi # disp->ImageBase
1236 mov 56($disp),%r11 # disp->HandlerData
1238 mov 0(%r11),%r10d # HandlerData[0]
1239 lea (%rsi,%r10),%r10 # end of prologue label
1240 cmp %r10,%rbx # context->Rip<end of prologue label
1241 jb .Lcommon_seh_tail
1243 mov 152($context),%rax # pull context->Rsp
1245 mov 4(%r11),%r10d # HandlerData[1]
1246 lea (%rsi,%r10),%r10 # epilogue label
1247 cmp %r10,%rbx # context->Rip>=epilogue label
1248 jae .Lcommon_seh_tail
1250 mov 192($context),%r10 # pull $num
1251 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
1260 mov %rbx,144($context) # restore context->Rbx
1261 mov %rbp,160($context) # restore context->Rbp
1262 mov %r12,216($context) # restore context->R12
1263 mov %r13,224($context) # restore context->R13
1264 mov %r14,232($context) # restore context->R14
1265 mov %r15,240($context) # restore context->R15
1267 jmp .Lcommon_seh_tail
1268 .size mul_handler,.-mul_handler
1270 .type sqr_handler,\@abi-omnipotent
1284 mov 120($context),%rax # pull context->Rax
1285 mov 248($context),%rbx # pull context->Rip
1287 mov 8($disp),%rsi # disp->ImageBase
1288 mov 56($disp),%r11 # disp->HandlerData
1290 mov 0(%r11),%r10d # HandlerData[0]
1291 lea (%rsi,%r10),%r10 # end of prologue label
1292 cmp %r10,%rbx # context->Rip<.Lsqr_body
1293 jb .Lcommon_seh_tail
1295 mov 152($context),%rax # pull context->Rsp
1297 mov 4(%r11),%r10d # HandlerData[1]
1298 lea (%rsi,%r10),%r10 # epilogue label
1299 cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
1300 jae .Lcommon_seh_tail
1302 mov 40(%rax),%rax # pull saved stack pointer
1310 mov %rbx,144($context) # restore context->Rbx
1311 mov %rbp,160($context) # restore context->Rbp
1312 mov %r12,216($context) # restore context->R12
1313 mov %r13,224($context) # restore context->R13
1314 mov %r14,232($context) # restore context->R14
1315 mov %r15,240($context) # restore context->R15
1320 mov %rax,152($context) # restore context->Rsp
1321 mov %rsi,168($context) # restore context->Rsi
1322 mov %rdi,176($context) # restore context->Rdi
1324 mov 40($disp),%rdi # disp->ContextRecord
1325 mov $context,%rsi # context
1326 mov \$154,%ecx # sizeof(CONTEXT)
1327 .long 0xa548f3fc # cld; rep movsq
1330 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1331 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1332 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1333 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1334 mov 40(%rsi),%r10 # disp->ContextRecord
1335 lea 56(%rsi),%r11 # &disp->HandlerData
1336 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1337 mov %r10,32(%rsp) # arg5
1338 mov %r11,40(%rsp) # arg6
1339 mov %r12,48(%rsp) # arg7
1340 mov %rcx,56(%rsp) # arg8, (NULL)
1341 call *__imp_RtlVirtualUnwind(%rip)
1343 mov \$1,%eax # ExceptionContinueSearch
1355 .size sqr_handler,.-sqr_handler
1359 .rva .LSEH_begin_bn_mul_mont
1360 .rva .LSEH_end_bn_mul_mont
1361 .rva .LSEH_info_bn_mul_mont
1363 .rva .LSEH_begin_bn_mul4x_mont
1364 .rva .LSEH_end_bn_mul4x_mont
1365 .rva .LSEH_info_bn_mul4x_mont
1367 .rva .LSEH_begin_bn_sqr8x_mont
1368 .rva .LSEH_end_bn_sqr8x_mont
1369 .rva .LSEH_info_bn_sqr8x_mont
1371 $code.=<<___ if ($addx);
1372 .rva .LSEH_begin_bn_mulx4x_mont
1373 .rva .LSEH_end_bn_mulx4x_mont
1374 .rva .LSEH_info_bn_mulx4x_mont
1379 .LSEH_info_bn_mul_mont:
1382 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
1383 .LSEH_info_bn_mul4x_mont:
1386 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
1387 .LSEH_info_bn_sqr8x_mont:
1390 .rva .Lsqr8x_body,.Lsqr8x_epilogue # HandlerData[]
1392 $code.=<<___ if ($addx);
1393 .LSEH_info_bn_mulx4x_mont:
1396 .rva .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]