1 /* ieee754-df.S double-precision floating point support for ARM
3 Copyright (C) 2003, 2004, 2005, 2007, 2008, 2009, 2012
4 Free Software Foundation, Inc.
5 Contributed by Nicolas Pitre (nico@cam.org)
7 This file is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 This file is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
29 * The goal of this code is to be as fast as possible. This is
30 * not meant to be easy to understand for the casual reader.
31 * For slightly simpler code please see the single precision version
34 * Only the default rounding mode is intended for best performances.
35 * Exceptions aren't supported yet, but that can be added quite easily
36 * if necessary without impacting performances.
56 ARM_FUNC_ALIAS aeabi_dneg negdf2
59 eor xh, xh, #0x80000000
67 #ifdef L_arm_addsubdf3
69 ARM_FUNC_START aeabi_drsub
71 eor xh, xh, #0x80000000 @ flip sign bit of first arg
75 ARM_FUNC_ALIAS aeabi_dsub subdf3
77 eor yh, yh, #0x80000000 @ flip sign bit of second arg
78 #if defined(__INTERWORKING_STUBS__)
79 b 1f @ Skip Thumb-code prologue
83 ARM_FUNC_ALIAS aeabi_dadd adddf3
85 1: do_push {r4, r5, lr}
87 @ Look for zeroes, equal values, INF, or NAN.
88 shift1 lsl, r4, xh, #1
89 shift1 lsl, r5, yh, #1
94 COND(orr,s,ne) ip, r4, xl
95 COND(orr,s,ne) ip, r5, yl
96 COND(mvn,s,ne) ip, r4, asr #21
97 COND(mvn,s,ne) ip, r5, asr #21
100 @ Compute exponent difference. Make largest exponent in r4,
101 @ corresponding arg in xh-xl, and positive exponent difference in r5.
102 shift1 lsr, r4, r4, #21
103 rsbs r5, r4, r5, lsr #21
115 @ If exponent difference is too large, return largest argument
116 @ already in xh-xl. We need up to 54 bit to handle proper rounding
122 @ Convert mantissa to signed integer.
126 orr xh, ip, xh, lsr #12
128 #if defined(__thumb2__)
130 sbc xh, xh, xh, lsl #1
138 orr yh, ip, yh, lsr #12
140 #if defined(__thumb2__)
142 sbc yh, yh, yh, lsl #1
148 @ If exponent == difference, one or both args were denormalized.
149 @ Since this is not common case, rescale them off line.
154 @ Compensate for the exponent overlapping the mantissa MSB added later
157 @ Shift yh-yl right per r5, add to xh-xl, keep leftover bits into ip.
160 shift1 lsl, ip, yl, lr
161 shiftop adds xl xl yl lsr r5 yl
163 shiftop adds xl xl yh lsl lr yl
164 shiftop adcs xh xh yh asr r5 yh
169 shift1 lsl,ip, yh, lr
171 orrcs ip, ip, #2 @ 2 not 1, to allow lsr #1 later
172 shiftop adds xl xl yh asr r5 yh
173 adcs xh, xh, yh, asr #31
175 @ We now have a result in xh-xl-ip.
176 @ Keep absolute value in xh-xl-ip, sign in r5 (the n bit was set above)
177 and r5, xh, #0x80000000
179 #if defined(__thumb2__)
190 @ Determine how to normalize the result.
197 @ Result needs to be shifted right.
203 @ Make sure we did not bust our exponent.
208 @ Our result is now properly aligned into xh-xl, remaining bits in ip.
209 @ Round with MSB of ip. If halfway between two numbers, round towards
211 @ Pack final result together.
215 COND(mov,s,eq) ip, xl, lsr #1
217 adc xh, xh, r4, lsl #20
221 @ Result must be shifted left and exponent adjusted.
230 @ No rounding necessary since ip will always be 0 at this point.
242 movhs r2, r2, lsr #16
252 sublo r3, r3, r2, lsr #1
253 sub r3, r3, r2, lsr #3
268 @ determine how to shift the value.
274 @ shift value left 21 to 31 bits, or actually right 11 to 1 bits
275 @ since a register switch happened above.
278 shift1 lsl, xl, xh, ip
279 shift1 lsr, xh, xh, r2
282 @ actually shift value left 1 to 20 bits, which might also represent
283 @ 32 to 52 bits if counting the register switch that happened earlier.
287 shift1 lsl, xh, xh, r2
288 #if defined(__thumb2__)
294 orrle xh, xh, xl, lsr ip
298 @ adjust exponent accordingly.
301 addge xh, xh, r4, lsl #20
305 @ Exponent too small, denormalize result.
306 @ Find out proper shift value.
313 @ shift result right of 1 to 20 bits, sign is in r5.
316 shift1 lsr, xl, xl, r4
317 shiftop orr xl xl xh lsl r2 yh
318 shiftop orr xh r5 xh lsr r4 yh
321 @ shift result right of 21 to 31 bits, or left 11 to 1 bits after
322 @ a register switch from xh to xl.
325 shift1 lsr, xl, xl, r2
326 shiftop orr xl xl xh lsl r4 yh
330 @ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
332 2: shift1 lsr, xl, xh, r4
336 @ Adjust exponents for denormalized arguments.
337 @ Note that r4 must not remain equal to 0.
340 eor yh, yh, #0x00100000
342 eoreq xh, xh, #0x00100000
351 COND(mvn,s,ne) ip, r5, asr #21
359 @ Result is x + 0.0 = x or 0.0 + y = y.
368 @ Result is x - x = 0.
374 @ Result is x + x = 2x.
380 orrcs xh, xh, #0x80000000
382 2: adds r4, r4, #(2 << 21)
384 addcc xh, xh, #(1 << 20)
386 and r5, xh, #0x80000000
388 @ Overflow: return INF.
390 orr xh, r5, #0x7f000000
391 orr xh, xh, #0x00f00000
395 @ At least one of x or y is INF/NAN.
396 @ if xh-xl != INF/NAN: return yh-yl (which is INF/NAN)
397 @ if yh-yl != INF/NAN: return xh-xl (which is INF/NAN)
398 @ if either is NAN: return NAN
399 @ if opposite sign: return NAN
400 @ otherwise return xh-xl (which is INF or -INF)
406 COND(mvn,s,eq) ip, r5, asr #21
410 orrs r4, xl, xh, lsl #12
412 COND(orr,s,eq) r5, yl, yh, lsl #12
414 orrne xh, xh, #0x00080000 @ quiet NAN
422 ARM_FUNC_START floatunsidf
423 ARM_FUNC_ALIAS aeabi_ui2d floatunsidf
430 mov r4, #0x400 @ initial exponent
431 add r4, r4, #(52-1 - 1)
432 mov r5, #0 @ sign bit is 0
442 ARM_FUNC_START floatsidf
443 ARM_FUNC_ALIAS aeabi_i2d floatsidf
450 mov r4, #0x400 @ initial exponent
451 add r4, r4, #(52-1 - 1)
452 ands r5, r0, #0x80000000 @ sign bit in r5
454 rsbmi r0, r0, #0 @ absolute value
464 ARM_FUNC_START extendsfdf2
465 ARM_FUNC_ALIAS aeabi_f2d extendsfdf2
467 movs r2, r0, lsl #1 @ toss sign bit
468 mov xh, r2, asr #3 @ stretch exponent
469 mov xh, xh, rrx @ retrieve sign bit
470 mov xl, r2, lsl #28 @ retrieve remaining bits
472 COND(and,s,ne) r3, r2, #0xff000000 @ isolate exponent
473 teqne r3, #0xff000000 @ if not 0, check if INF or NAN
474 eorne xh, xh, #0x38000000 @ fixup exponent otherwise.
475 RETc(ne) @ and return it.
477 teq r2, #0 @ if actually 0
479 teqne r3, #0xff000000 @ or INF or NAN
480 RETc(eq) @ we are done already.
482 @ value was denormalized. We can normalize it now.
484 mov r4, #0x380 @ setup corresponding exponent
485 and r5, xh, #0x80000000 @ move sign bit in r5
486 bic xh, xh, #0x80000000
492 ARM_FUNC_START floatundidf
493 ARM_FUNC_ALIAS aeabi_ul2d floatundidf
504 ARM_FUNC_START floatdidf
505 ARM_FUNC_ALIAS aeabi_l2d floatdidf
513 ands r5, ah, #0x80000000 @ sign bit in r5
515 #if defined(__thumb2__)
517 sbc ah, ah, ah, lsl #1
523 mov r4, #0x400 @ initial exponent
524 add r4, r4, #(52-1 - 1)
526 @ If FP word order does not match integer word order, swap the words.
536 @ The value is too big. Scale it down a bit...
544 add r2, r2, ip, lsr #3
547 shift1 lsl, ip, xl, r3
548 shift1 lsr, xl, xl, r2
549 shiftop orr xl xl xh lsl r3 lr
550 shift1 lsr, xh, xh, r2
559 #endif /* L_addsubdf3 */
561 #ifdef L_arm_muldivdf3
563 ARM_FUNC_START muldf3
564 ARM_FUNC_ALIAS aeabi_dmul muldf3
565 do_push {r4, r5, r6, lr}
567 @ Mask out exponents, trap any zero/denormal/INF/NAN.
570 ands r4, ip, xh, lsr #20
572 COND(and,s,ne) r5, ip, yh, lsr #20
577 @ Add exponents together
580 @ Determine final sign.
583 @ Convert mantissa to unsigned integer.
584 @ If power of two, branch to a separate path.
585 bic xh, xh, ip, lsl #21
586 bic yh, yh, ip, lsl #21
587 orrs r5, xl, xh, lsl #12
589 COND(orr,s,ne) r5, yl, yh, lsl #12
590 orr xh, xh, #0x00100000
591 orr yh, yh, #0x00100000
596 @ Put sign bit in r6, which will be restored in yl later.
597 and r6, r6, #0x80000000
599 @ Well, no way to make it shorter without the umull instruction.
600 stmfd sp!, {r6, r7, r8, r9, sl, fp}
605 bic xl, xl, r7, lsl #16
606 bic yl, yl, r8, lsl #16
607 bic xh, xh, r9, lsl #16
608 bic yh, yh, sl, lsl #16
612 adds ip, ip, fp, lsl #16
613 adc lr, lr, fp, lsr #16
615 adds ip, ip, fp, lsl #16
616 adc lr, lr, fp, lsr #16
619 adds lr, lr, fp, lsl #16
620 adc r5, r5, fp, lsr #16
622 adds lr, lr, fp, lsl #16
623 adc r5, r5, fp, lsr #16
625 adds lr, lr, fp, lsl #16
626 adc r5, r5, fp, lsr #16
628 adds lr, lr, fp, lsl #16
629 adc r5, r5, fp, lsr #16
632 adds r5, r5, fp, lsl #16
633 adc r6, r6, fp, lsr #16
635 adds r5, r5, fp, lsl #16
636 adc r6, r6, fp, lsr #16
652 ldmfd sp!, {yl, r7, r8, r9, sl, fp}
656 @ Here is the actual multiplication.
660 and yl, r6, #0x80000000
667 @ The LSBs in ip are only significant for the final rounding.
673 @ Adjust result upon the MSB position.
675 cmp r6, #(1 << (20-11))
682 @ Shift to final position, add sign to result.
683 orr xh, yl, r6, lsl #11
684 orr xh, xh, r5, lsr #21
686 orr xl, xl, lr, lsr #21
689 @ Check exponent range for under/overflow.
690 subs ip, r4, #(254 - 1)
695 @ Round the result, merge final exponent.
698 COND(mov,s,eq) lr, xl, lsr #1
700 adc xh, xh, r4, lsl #20
703 @ Multiplication by 0x1p*: let''s shortcut a lot of code.
705 and r6, r6, #0x80000000
709 subs r4, r4, ip, lsr #1
711 COND(rsb,s,gt) r5, r4, ip
712 orrgt xh, xh, r4, lsl #20
713 RETLDM "r4, r5, r6" gt
715 @ Under/overflow: fix things up for the code below.
716 orr xh, xh, #0x00100000
724 @ Check if denormalized result is possible, otherwise return signed 0.
728 bicle xh, xh, #0x7fffffff
729 RETLDM "r4, r5, r6" le
731 @ Find out proper shift value.
738 @ shift result right of 1 to 20 bits, preserve sign bit, round, etc.
741 shift1 lsl, r3, xl, r5
742 shift1 lsr, xl, xl, r4
743 shiftop orr xl xl xh lsl r5 r2
744 and r2, xh, #0x80000000
745 bic xh, xh, #0x80000000
746 adds xl, xl, r3, lsr #31
747 shiftop adc xh r2 xh lsr r4 r6
748 orrs lr, lr, r3, lsl #1
750 biceq xl, xl, r3, lsr #31
753 @ shift result right of 21 to 31 bits, or left 11 to 1 bits after
754 @ a register switch from xh to xl. Then round.
757 shift1 lsl, r3, xl, r4
758 shift1 lsr, xl, xl, r5
759 shiftop orr xl xl xh lsl r4 r2
760 bic xh, xh, #0x7fffffff
761 adds xl, xl, r3, lsr #31
763 orrs lr, lr, r3, lsl #1
765 biceq xl, xl, r3, lsr #31
768 @ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
769 @ from xh to xl. Leftover bits are in r3-r6-lr for rounding.
771 shiftop orr lr lr xl lsl r5 r2
772 shift1 lsr, r3, xl, r4
773 shiftop orr r3 r3 xh lsl r5 r2
774 shift1 lsr, xl, xh, r4
775 bic xh, xh, #0x7fffffff
776 shiftop bic xl xl xh lsr r4 r2
777 add xl, xl, r3, lsr #31
778 orrs lr, lr, r3, lsl #1
780 biceq xl, xl, r3, lsr #31
783 @ One or both arguments are denormalized.
784 @ Scale them leftwards and preserve sign bit.
788 and r6, xh, #0x80000000
789 1: movs xl, xl, lsl #1
799 2: and r6, yh, #0x80000000
800 3: movs yl, yl, lsl #1
810 @ Isolate the INF and NAN cases away
812 and r5, ip, yh, lsr #20
817 @ Here, one or more arguments are either denormalized or zero.
818 orrs r6, xl, xh, lsl #1
820 COND(orr,s,ne) r6, yl, yh, lsl #1
823 @ Result is 0, but determine sign anyway.
826 and xh, xh, #0x80000000
830 1: @ One or both args are INF or NAN.
831 orrs r6, xl, xh, lsl #1
835 COND(orr,s,ne) r6, yl, yh, lsl #1
836 beq LSYM(Lml_n) @ 0 * INF or INF * 0 -> NAN
839 orrs r6, xl, xh, lsl #12
840 bne LSYM(Lml_n) @ NAN * <anything> -> NAN
843 orrs r6, yl, yh, lsl #12
847 bne LSYM(Lml_n) @ <anything> * NAN -> NAN
849 @ Result is INF, but we need to determine its sign.
853 @ Overflow: return INF (sign already in xh).
855 and xh, xh, #0x80000000
856 orr xh, xh, #0x7f000000
857 orr xh, xh, #0x00f00000
861 @ Return a quiet NAN.
863 orr xh, xh, #0x7f000000
864 orr xh, xh, #0x00f80000
870 ARM_FUNC_START divdf3
871 ARM_FUNC_ALIAS aeabi_ddiv divdf3
873 do_push {r4, r5, r6, lr}
875 @ Mask out exponents, trap any zero/denormal/INF/NAN.
878 ands r4, ip, xh, lsr #20
880 COND(and,s,ne) r5, ip, yh, lsr #20
885 @ Subtract divisor exponent from dividend''s.
888 @ Preserve final sign into lr.
891 @ Convert mantissa to unsigned integer.
892 @ Dividend -> r5-r6, divisor -> yh-yl.
893 orrs r5, yl, yh, lsl #12
898 orr yh, r5, yh, lsr #4
899 orr yh, yh, yl, lsr #24
901 orr r5, r5, xh, lsr #4
902 orr r5, r5, xl, lsr #24
905 @ Initialize xh with final sign bit.
906 and xh, lr, #0x80000000
908 @ Ensure result will land to known bit position.
909 @ Apply exponent bias accordingly.
913 adc r4, r4, #(255 - 2)
919 @ Perform first subtraction to align result to a nibble.
927 @ The actual division loop.
941 orrcs xl, xl, ip, lsr #1
949 orrcs xl, xl, ip, lsr #2
957 orrcs xl, xl, ip, lsr #3
962 orr r5, r5, r6, lsr #28
965 orr yh, yh, yl, lsr #29
970 @ We are done with a word of the result.
971 @ Loop again for the low word if this pass was for the high word.
979 @ Be sure result starts in the high word.
985 @ Check exponent range for under/overflow.
986 subs ip, r4, #(254 - 1)
991 @ Round the result, merge final exponent.
994 COND(sub,s,eq) ip, r6, yl
995 COND(mov,s,eq) ip, xl, lsr #1
997 adc xh, xh, r4, lsl #20
1000 @ Division by 0x1p*: shortcut a lot of code.
1002 and lr, lr, #0x80000000
1003 orr xh, lr, xh, lsr #12
1004 adds r4, r4, ip, lsr #1
1006 COND(rsb,s,gt) r5, r4, ip
1007 orrgt xh, xh, r4, lsl #20
1008 RETLDM "r4, r5, r6" gt
1010 orr xh, xh, #0x00100000
1015 @ Result mightt need to be denormalized: put remainder bits
1016 @ in lr for rounding considerations.
1021 @ One or both arguments is either INF, NAN or zero.
1023 and r5, ip, yh, lsr #20
1027 beq LSYM(Lml_n) @ INF/NAN / INF/NAN -> NAN
1030 orrs r4, xl, xh, lsl #12
1031 bne LSYM(Lml_n) @ NAN / <anything> -> NAN
1033 bne LSYM(Lml_i) @ INF / <anything> -> INF
1036 b LSYM(Lml_n) @ INF / (INF or NAN) -> NAN
1039 orrs r5, yl, yh, lsl #12
1040 beq LSYM(Lml_z) @ <anything> / INF -> 0
1043 b LSYM(Lml_n) @ <anything> / NAN -> NAN
1044 2: @ If both are nonzero, we need to normalize and resume above.
1045 orrs r6, xl, xh, lsl #1
1047 COND(orr,s,ne) r6, yl, yh, lsl #1
1049 @ One or both arguments are 0.
1050 orrs r4, xl, xh, lsl #1
1051 bne LSYM(Lml_i) @ <non_zero> / 0 -> INF
1052 orrs r5, yl, yh, lsl #1
1053 bne LSYM(Lml_z) @ 0 / <non_zero> -> 0
1054 b LSYM(Lml_n) @ 0 / 0 -> NAN
1059 #endif /* L_muldivdf3 */
1063 @ Note: only r0 (return value) and ip are clobbered here.
1065 ARM_FUNC_START gtdf2
1066 ARM_FUNC_ALIAS gedf2 gtdf2
1070 ARM_FUNC_START ltdf2
1071 ARM_FUNC_ALIAS ledf2 ltdf2
1075 ARM_FUNC_START cmpdf2
1076 ARM_FUNC_ALIAS nedf2 cmpdf2
1077 ARM_FUNC_ALIAS eqdf2 cmpdf2
1078 mov ip, #1 @ how should we specify unordered here?
1080 1: str ip, [sp, #-4]!
1082 @ Trap any INF/NAN first.
1084 mvns ip, ip, asr #21
1087 COND(mvn,s,ne) ip, ip, asr #21
1090 @ Test for equality.
1091 @ Note that 0.0 is equal to -0.0.
1093 orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0
1095 COND(orr,s,eq) ip, yl, yh, lsl #1 @ and y == 0.0 or -0.0
1096 teqne xh, yh @ or xh == yh
1098 teqeq xl, yl @ and xl == yl
1099 moveq r0, #0 @ then equal.
1108 @ Compare values if same sign
1116 movcs r0, yh, asr #31
1117 mvncc r0, yh, asr #31
1122 3: mov ip, xh, lsl #1
1123 mvns ip, ip, asr #21
1125 orrs ip, xl, xh, lsl #12
1127 4: mov ip, yh, lsl #1
1128 mvns ip, ip, asr #21
1130 orrs ip, yl, yh, lsl #12
1131 beq 2b @ y is not NAN
1132 5: ldr r0, [sp], #4 @ unordered return code
1143 ARM_FUNC_START aeabi_cdrcmple
1153 ARM_FUNC_START aeabi_cdcmpeq
1154 ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq
1156 @ The status-returning routines are required to preserve all
1157 @ registers except ip, lr, and cpsr.
1160 @ Set the Z flag correctly, and the C flag unconditionally.
1162 @ Clear the C flag if the return value was -1, indicating
1163 @ that the first operand was smaller than the second.
1168 FUNC_END aeabi_cdcmple
1169 FUNC_END aeabi_cdcmpeq
1170 FUNC_END aeabi_cdrcmple
1172 ARM_FUNC_START aeabi_dcmpeq
1175 ARM_CALL aeabi_cdcmple
1177 moveq r0, #1 @ Equal to.
1178 movne r0, #0 @ Less than, greater than, or unordered.
1181 FUNC_END aeabi_dcmpeq
1183 ARM_FUNC_START aeabi_dcmplt
1186 ARM_CALL aeabi_cdcmple
1188 movcc r0, #1 @ Less than.
1189 movcs r0, #0 @ Equal to, greater than, or unordered.
1192 FUNC_END aeabi_dcmplt
1194 ARM_FUNC_START aeabi_dcmple
1197 ARM_CALL aeabi_cdcmple
1199 movls r0, #1 @ Less than or equal to.
1200 movhi r0, #0 @ Greater than or unordered.
1203 FUNC_END aeabi_dcmple
1205 ARM_FUNC_START aeabi_dcmpge
1208 ARM_CALL aeabi_cdrcmple
1210 movls r0, #1 @ Operand 2 is less than or equal to operand 1.
1211 movhi r0, #0 @ Operand 2 greater than operand 1, or unordered.
1214 FUNC_END aeabi_dcmpge
1216 ARM_FUNC_START aeabi_dcmpgt
1219 ARM_CALL aeabi_cdrcmple
1221 movcc r0, #1 @ Operand 2 is less than operand 1.
1222 movcs r0, #0 @ Operand 2 is greater than or equal to operand 1,
1223 @ or they are unordered.
1226 FUNC_END aeabi_dcmpgt
1228 #endif /* L_cmpdf2 */
1230 #ifdef L_arm_unorddf2
1232 ARM_FUNC_START unorddf2
1233 ARM_FUNC_ALIAS aeabi_dcmpun unorddf2
1236 mvns ip, ip, asr #21
1238 orrs ip, xl, xh, lsl #12
1240 1: mov ip, yh, lsl #1
1241 mvns ip, ip, asr #21
1243 orrs ip, yl, yh, lsl #12
1245 2: mov r0, #0 @ arguments are ordered.
1248 3: mov r0, #1 @ arguments are unordered.
1251 FUNC_END aeabi_dcmpun
1254 #endif /* L_unorddf2 */
1256 #ifdef L_arm_fixdfsi
1258 ARM_FUNC_START fixdfsi
1259 ARM_FUNC_ALIAS aeabi_d2iz fixdfsi
1261 @ check exponent range.
1263 adds r2, r2, #(1 << 21)
1264 bcs 2f @ value is INF or NAN
1265 bpl 1f @ value is too small
1266 mov r3, #(0xfffffc00 + 31)
1267 subs r2, r3, r2, asr #21
1268 bls 3f @ value is too large
1272 orr r3, r3, #0x80000000
1273 orr r3, r3, xl, lsr #21
1274 tst xh, #0x80000000 @ the sign bit
1275 shift1 lsr, r0, r3, r2
1283 2: orrs xl, xl, xh, lsl #12
1285 3: ands r0, xh, #0x80000000 @ the sign bit
1287 moveq r0, #0x7fffffff @ maximum signed positive si
1290 4: mov r0, #0 @ How should we convert NAN?
1296 #endif /* L_fixdfsi */
1298 #ifdef L_arm_fixunsdfsi
1300 ARM_FUNC_START fixunsdfsi
1301 ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi
1303 @ check exponent range.
1305 bcs 1f @ value is negative
1306 adds r2, r2, #(1 << 21)
1307 bcs 2f @ value is INF or NAN
1308 bpl 1f @ value is too small
1309 mov r3, #(0xfffffc00 + 31)
1310 subs r2, r3, r2, asr #21
1311 bmi 3f @ value is too large
1315 orr r3, r3, #0x80000000
1316 orr r3, r3, xl, lsr #21
1317 shift1 lsr, r0, r3, r2
1323 2: orrs xl, xl, xh, lsl #12
1324 bne 4f @ value is NAN.
1325 3: mov r0, #0xffffffff @ maximum unsigned si
1328 4: mov r0, #0 @ How should we convert NAN?
1331 FUNC_END aeabi_d2uiz
1334 #endif /* L_fixunsdfsi */
1336 #ifdef L_arm_truncdfsf2
1338 ARM_FUNC_START truncdfsf2
1339 ARM_FUNC_ALIAS aeabi_d2f truncdfsf2
1341 @ check exponent range.
1343 subs r3, r2, #((1023 - 127) << 21)
1345 COND(sub,s,cs) ip, r3, #(1 << 21)
1346 COND(rsb,s,cs) ip, ip, #(254 << 21)
1347 bls 2f @ value is out of range
1349 1: @ shift and round mantissa
1350 and ip, xh, #0x80000000
1352 orr xl, ip, xl, lsr #29
1354 adc r0, xl, r3, lsl #2
1359 2: @ either overflow or underflow
1363 @ check if denormalized value is possible
1364 adds r2, r3, #(23 << 21)
1366 andlt r0, xh, #0x80000000 @ too small, return signed 0.
1369 @ denormalize value so we can resume with the code above afterwards.
1370 orr xh, xh, #0x00100000
1374 #if defined(__thumb2__)
1379 shift1 lsr, xl, xl, r2
1381 orrne xl, xl, #1 @ fold r3 for rounding considerations.
1384 shiftop orr xl xl r3 lsl ip ip
1385 shift1 lsr, r3, r3, r2
1390 mvns r3, r2, asr #21
1391 bne 5f @ simple overflow
1392 orrs r3, xl, xh, lsl #12
1394 movne r0, #0x7f000000
1395 orrne r0, r0, #0x00c00000
1396 RETc(ne) @ return NAN
1398 5: @ return INF with sign
1399 and r0, xh, #0x80000000
1400 orr r0, r0, #0x7f000000
1401 orr r0, r0, #0x00800000
1407 #endif /* L_truncdfsf2 */