1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
5 /* Copyright 1994, 1997, 1998, 2003 Free Software Foundation, Inc.
7 This file is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 2, or (at your option) any
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file with other programs, and to distribute
15 those programs without any restriction coming from the use of this
16 file. (The General Public License restrictions do apply in other
17 respects; for example, they cover modification of the file, and
18 distribution when not linked into another program.)
20 This file is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; see the file COPYING. If not, write to
27 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
29 /* As a special exception, if you link this library with other files,
30 some of which are compiled with GCC, to produce an executable,
31 this library does not by itself cause the resulting executable
32 to be covered by the GNU General Public License.
33 This exception does not however invalidate any other reasons why
34 the executable file might be covered by the GNU General Public License. */
36 /* This implements IEEE 754 format arithmetic, but does not provide a
37 mechanism for setting the rounding mode, or for generating or handling
40 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
41 Wilson, all of Cygnus Support. */
47 #include "sim-basics.h"
51 #include "sim-assert.h"
55 If digits is -1, then print all digits. */
58 print_bits (unsigned64 x,
61 sim_fpu_print_func print,
64 unsigned64 bit = LSBIT64 (msbit);
77 if (digits > 0) digits--;
84 /* Quick and dirty conversion between a host double and host 64bit int */
92 /* A packed IEEE floating point number.
94 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
95 32 and 64 bit numbers. This number is interpreted as:
97 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
98 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
100 Denormalized (0 == BIASEDEXP && FRAC != 0):
101 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
103 Zero (0 == BIASEDEXP && FRAC == 0):
104 (sign ? "-" : "+") 0.0
106 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
107 (sign ? "-" : "+") "infinity"
109 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
112 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
117 #define NR_EXPBITS (is_double ? 11 : 8)
118 #define NR_FRACBITS (is_double ? 52 : 23)
119 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
121 #define EXPMAX32 (255)
122 #define EXMPAX64 (2047)
123 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
125 #define EXPBIAS32 (127)
126 #define EXPBIAS64 (1023)
127 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
129 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
133 /* An unpacked floating point number.
135 When unpacked, the fraction of both a 32 and 64 bit floating point
136 number is stored using the same format:
138 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
139 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
141 #define NR_PAD32 (30)
143 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
144 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
146 #define NR_GUARDS32 (7 + NR_PAD32)
147 #define NR_GUARDS64 (8 + NR_PAD64)
148 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
149 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
151 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
152 #define GUARDLSB LSBIT64 (NR_PAD)
153 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
155 #define NR_FRAC_GUARD (60)
156 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
157 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
158 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
161 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
163 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
165 #define NORMAL_EXPMAX32 (EXPBIAS32)
166 #define NORMAL_EXPMAX64 (EXPBIAS64)
167 #define NORMAL_EXPMAX (EXPBIAS)
170 /* Integer constants */
172 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
173 #define MAX_UINT32 LSMASK64 (31, 0)
174 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
176 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
177 #define MAX_UINT64 LSMASK64 (63, 0)
178 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
180 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
181 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
182 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
183 #define NR_INTBITS (is_64bit ? 64 : 32)
185 /* Squeese an unpacked sim_fpu struct into a 32/64 bit integer */
186 STATIC_INLINE_SIM_FPU (unsigned64)
187 pack_fpu (const sim_fpu *src,
198 case sim_fpu_class_qnan:
201 /* force fraction to correct class */
202 fraction = src->fraction;
203 fraction >>= NR_GUARDS;
204 #ifdef SIM_QUIET_NAN_NEGATED
205 fraction |= QUIET_NAN - 1;
207 fraction |= QUIET_NAN;
210 case sim_fpu_class_snan:
213 /* force fraction to correct class */
214 fraction = src->fraction;
215 fraction >>= NR_GUARDS;
216 fraction &= ~QUIET_NAN;
218 case sim_fpu_class_infinity:
223 case sim_fpu_class_zero:
228 case sim_fpu_class_number:
229 case sim_fpu_class_denorm:
230 ASSERT (src->fraction >= IMPLICIT_1);
231 ASSERT (src->fraction < IMPLICIT_2);
232 if (src->normal_exp < NORMAL_EXPMIN)
234 /* This number's exponent is too low to fit into the bits
235 available in the number We'll denormalize the number by
236 storing zero in the exponent and shift the fraction to
237 the right to make up for it. */
238 int nr_shift = NORMAL_EXPMIN - src->normal_exp;
239 if (nr_shift > NR_FRACBITS)
241 /* underflow, just make the number zero */
250 /* Shift by the value */
251 fraction = src->fraction;
252 fraction >>= NR_GUARDS;
253 fraction >>= nr_shift;
256 else if (src->normal_exp > NORMAL_EXPMAX)
265 exp = (src->normal_exp + EXPBIAS);
267 fraction = src->fraction;
268 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
270 /* Round to nearest: If the guard bits are the all zero, but
271 the first, then we're half way between two numbers,
272 choose the one which makes the lsb of the answer 0. */
273 if ((fraction & GUARDMASK) == GUARDMSB)
275 if ((fraction & (GUARDMSB << 1)))
276 fraction += (GUARDMSB << 1);
280 /* Add a one to the guards to force round to nearest */
281 fraction += GUARDROUND;
283 if ((fraction & IMPLICIT_2)) /* rounding resulted in carry */
288 fraction >>= NR_GUARDS;
289 /* When exp == EXPMAX (overflow from carry) fraction must
290 have been made zero */
291 ASSERT ((exp == EXPMAX) <= ((fraction & ~IMPLICIT_1) == 0));
298 packed = ((sign ? SIGNBIT : 0)
299 | (exp << NR_FRACBITS)
300 | LSMASKED64 (fraction, NR_FRACBITS - 1, 0));
302 /* trace operation */
309 printf ("pack_fpu: ");
310 printf ("-> %c%0lX.%06lX\n",
311 LSMASKED32 (packed, 31, 31) ? '8' : '0',
312 (long) LSEXTRACTED32 (packed, 30, 23),
313 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
321 /* Unpack a 32/64 bit integer into a sim_fpu structure */
322 STATIC_INLINE_SIM_FPU (void)
323 unpack_fpu (sim_fpu *dst, unsigned64 packed, int is_double)
325 unsigned64 fraction = LSMASKED64 (packed, NR_FRACBITS - 1, 0);
326 unsigned exp = LSEXTRACTED64 (packed, NR_EXPBITS + NR_FRACBITS - 1, NR_FRACBITS);
327 int sign = (packed & SIGNBIT) != 0;
331 /* Hmm. Looks like 0 */
334 /* tastes like zero */
335 dst->class = sim_fpu_class_zero;
341 /* Zero exponent with non zero fraction - it's denormalized,
342 so there isn't a leading implicit one - we'll shift it so
344 dst->normal_exp = exp - EXPBIAS + 1;
345 dst->class = sim_fpu_class_denorm;
347 fraction <<= NR_GUARDS;
348 while (fraction < IMPLICIT_1)
353 dst->fraction = fraction;
356 else if (exp == EXPMAX)
361 /* Attached to a zero fraction - means infinity */
362 dst->class = sim_fpu_class_infinity;
364 /* dst->normal_exp = EXPBIAS; */
365 /* dst->fraction = 0; */
371 /* Non zero fraction, means NaN */
373 dst->fraction = (fraction << NR_GUARDS);
374 #ifdef SIM_QUIET_NAN_NEGATED
375 qnan = (fraction & QUIET_NAN) == 0;
377 qnan = fraction >= QUIET_NAN;
380 dst->class = sim_fpu_class_qnan;
382 dst->class = sim_fpu_class_snan;
387 /* Nothing strange about this number */
388 dst->class = sim_fpu_class_number;
390 dst->fraction = ((fraction << NR_GUARDS) | IMPLICIT_1);
391 dst->normal_exp = exp - EXPBIAS;
394 /* trace operation */
401 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
402 LSMASKED32 (packed, 31, 31) ? '8' : '0',
403 (long) LSEXTRACTED32 (packed, 30, 23),
404 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
411 val.i = pack_fpu (dst, 1);
414 ASSERT (val.i == packed);
418 unsigned32 val = pack_fpu (dst, 0);
419 unsigned32 org = packed;
426 /* Convert a floating point into an integer */
427 STATIC_INLINE_SIM_FPU (int)
436 if (sim_fpu_is_zero (s))
441 if (sim_fpu_is_snan (s))
443 *i = MIN_INT; /* FIXME */
444 return sim_fpu_status_invalid_cvi;
446 if (sim_fpu_is_qnan (s))
448 *i = MIN_INT; /* FIXME */
449 return sim_fpu_status_invalid_cvi;
451 /* map infinity onto MAX_INT... */
452 if (sim_fpu_is_infinity (s))
454 *i = s->sign ? MIN_INT : MAX_INT;
455 return sim_fpu_status_invalid_cvi;
457 /* it is a number, but a small one */
458 if (s->normal_exp < 0)
461 return sim_fpu_status_inexact;
463 /* Is the floating point MIN_INT or just close? */
464 if (s->sign && s->normal_exp == (NR_INTBITS - 1))
467 ASSERT (s->fraction >= IMPLICIT_1);
468 if (s->fraction == IMPLICIT_1)
469 return 0; /* exact */
470 if (is_64bit) /* can't round */
471 return sim_fpu_status_invalid_cvi; /* must be overflow */
472 /* For a 32bit with MAX_INT, rounding is possible */
475 case sim_fpu_round_default:
477 case sim_fpu_round_zero:
478 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
479 return sim_fpu_status_invalid_cvi;
481 return sim_fpu_status_inexact;
483 case sim_fpu_round_near:
485 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
486 return sim_fpu_status_invalid_cvi;
487 else if ((s->fraction & !FRAC32MASK) >= (~FRAC32MASK >> 1))
488 return sim_fpu_status_invalid_cvi;
490 return sim_fpu_status_inexact;
492 case sim_fpu_round_up:
493 if ((s->fraction & FRAC32MASK) == IMPLICIT_1)
494 return sim_fpu_status_inexact;
496 return sim_fpu_status_invalid_cvi;
497 case sim_fpu_round_down:
498 return sim_fpu_status_invalid_cvi;
501 /* Would right shifting result in the FRAC being shifted into
502 (through) the integer's sign bit? */
503 if (s->normal_exp > (NR_INTBITS - 2))
505 *i = s->sign ? MIN_INT : MAX_INT;
506 return sim_fpu_status_invalid_cvi;
508 /* normal number shift it into place */
510 shift = (s->normal_exp - (NR_FRAC_GUARD));
518 if (tmp & ((SIGNED64 (1) << shift) - 1))
519 status |= sim_fpu_status_inexact;
522 *i = s->sign ? (-tmp) : (tmp);
526 /* convert an integer into a floating point */
527 STATIC_INLINE_SIM_FPU (int)
528 i2fpu (sim_fpu *f, signed64 i, int is_64bit)
533 f->class = sim_fpu_class_zero;
539 f->class = sim_fpu_class_number;
541 f->normal_exp = NR_FRAC_GUARD;
545 /* Special case for minint, since there is no corresponding
546 +ve integer representation for it */
549 f->fraction = IMPLICIT_1;
550 f->normal_exp = NR_INTBITS - 1;
558 if (f->fraction >= IMPLICIT_2)
562 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
565 while (f->fraction >= IMPLICIT_2);
567 else if (f->fraction < IMPLICIT_1)
574 while (f->fraction < IMPLICIT_1);
578 /* trace operation */
581 printf ("i2fpu: 0x%08lX ->\n", (long) i);
588 fpu2i (&val, f, is_64bit, sim_fpu_round_zero);
589 if (i >= MIN_INT32 && i <= MAX_INT32)
599 /* Convert a floating point into an integer */
600 STATIC_INLINE_SIM_FPU (int)
601 fpu2u (unsigned64 *u, const sim_fpu *s, int is_64bit)
603 const int is_double = 1;
606 if (sim_fpu_is_zero (s))
611 if (sim_fpu_is_nan (s))
616 /* it is a negative number */
622 /* get reasonable MAX_USI_INT... */
623 if (sim_fpu_is_infinity (s))
628 /* it is a number, but a small one */
629 if (s->normal_exp < 0)
635 if (s->normal_exp > (NR_INTBITS - 1))
641 tmp = (s->fraction & ~PADMASK);
642 shift = (s->normal_exp - (NR_FRACBITS + NR_GUARDS));
656 /* Convert an unsigned integer into a floating point */
657 STATIC_INLINE_SIM_FPU (int)
658 u2fpu (sim_fpu *f, unsigned64 u, int is_64bit)
662 f->class = sim_fpu_class_zero;
668 f->class = sim_fpu_class_number;
670 f->normal_exp = NR_FRAC_GUARD;
673 while (f->fraction < IMPLICIT_1)
683 /* register <-> sim_fpu */
685 INLINE_SIM_FPU (void)
686 sim_fpu_32to (sim_fpu *f, unsigned32 s)
688 unpack_fpu (f, s, 0);
692 INLINE_SIM_FPU (void)
693 sim_fpu_232to (sim_fpu *f, unsigned32 h, unsigned32 l)
697 unpack_fpu (f, s, 1);
701 INLINE_SIM_FPU (void)
702 sim_fpu_64to (sim_fpu *f, unsigned64 s)
704 unpack_fpu (f, s, 1);
708 INLINE_SIM_FPU (void)
709 sim_fpu_to32 (unsigned32 *s,
712 *s = pack_fpu (f, 0);
716 INLINE_SIM_FPU (void)
717 sim_fpu_to232 (unsigned32 *h, unsigned32 *l,
720 unsigned64 s = pack_fpu (f, 1);
726 INLINE_SIM_FPU (void)
727 sim_fpu_to64 (unsigned64 *u,
730 *u = pack_fpu (f, 1);
734 INLINE_SIM_FPU (void)
735 sim_fpu_fractionto (sim_fpu *f,
741 int shift = (NR_FRAC_GUARD - precision);
742 f->class = sim_fpu_class_number;
744 f->normal_exp = normal_exp;
745 /* shift the fraction to where sim-fpu expects it */
747 f->fraction = (fraction << shift);
749 f->fraction = (fraction >> -shift);
750 f->fraction |= IMPLICIT_1;
754 INLINE_SIM_FPU (unsigned64)
755 sim_fpu_tofraction (const sim_fpu *d,
758 /* we have NR_FRAC_GUARD bits, we want only PRECISION bits */
759 int shift = (NR_FRAC_GUARD - precision);
760 unsigned64 fraction = (d->fraction & ~IMPLICIT_1);
762 return fraction >> shift;
764 return fraction << -shift;
770 STATIC_INLINE_SIM_FPU (int)
771 do_normal_overflow (sim_fpu *f,
777 case sim_fpu_round_default:
779 case sim_fpu_round_near:
780 f->class = sim_fpu_class_infinity;
782 case sim_fpu_round_up:
784 f->class = sim_fpu_class_infinity;
786 case sim_fpu_round_down:
788 f->class = sim_fpu_class_infinity;
790 case sim_fpu_round_zero:
793 f->normal_exp = NORMAL_EXPMAX;
794 f->fraction = LSMASK64 (NR_FRAC_GUARD, NR_GUARDS);
795 return (sim_fpu_status_overflow | sim_fpu_status_inexact);
798 STATIC_INLINE_SIM_FPU (int)
799 do_normal_underflow (sim_fpu *f,
805 case sim_fpu_round_default:
807 case sim_fpu_round_near:
808 f->class = sim_fpu_class_zero;
810 case sim_fpu_round_up:
812 f->class = sim_fpu_class_zero;
814 case sim_fpu_round_down:
816 f->class = sim_fpu_class_zero;
818 case sim_fpu_round_zero:
819 f->class = sim_fpu_class_zero;
822 f->normal_exp = NORMAL_EXPMIN - NR_FRACBITS;
823 f->fraction = IMPLICIT_1;
824 return (sim_fpu_status_inexact | sim_fpu_status_underflow);
829 /* Round a number using NR_GUARDS.
830 Will return the rounded number or F->FRACTION == 0 when underflow */
832 STATIC_INLINE_SIM_FPU (int)
833 do_normal_round (sim_fpu *f,
837 unsigned64 guardmask = LSMASK64 (nr_guards - 1, 0);
838 unsigned64 guardmsb = LSBIT64 (nr_guards - 1);
839 unsigned64 fraclsb = guardmsb << 1;
840 if ((f->fraction & guardmask))
842 int status = sim_fpu_status_inexact;
845 case sim_fpu_round_default:
847 case sim_fpu_round_near:
848 if ((f->fraction & guardmsb))
850 if ((f->fraction & fraclsb))
852 status |= sim_fpu_status_rounded;
854 else if ((f->fraction & (guardmask >> 1)))
856 status |= sim_fpu_status_rounded;
860 case sim_fpu_round_up:
862 status |= sim_fpu_status_rounded;
864 case sim_fpu_round_down:
866 status |= sim_fpu_status_rounded;
868 case sim_fpu_round_zero:
871 f->fraction &= ~guardmask;
872 /* round if needed, handle resulting overflow */
873 if ((status & sim_fpu_status_rounded))
875 f->fraction += fraclsb;
876 if ((f->fraction & IMPLICIT_2))
889 STATIC_INLINE_SIM_FPU (int)
890 do_round (sim_fpu *f,
893 sim_fpu_denorm denorm)
897 case sim_fpu_class_qnan:
898 case sim_fpu_class_zero:
899 case sim_fpu_class_infinity:
902 case sim_fpu_class_snan:
903 /* Quieten a SignalingNaN */
904 f->class = sim_fpu_class_qnan;
905 return sim_fpu_status_invalid_snan;
907 case sim_fpu_class_number:
908 case sim_fpu_class_denorm:
911 ASSERT (f->fraction < IMPLICIT_2);
912 ASSERT (f->fraction >= IMPLICIT_1);
913 if (f->normal_exp < NORMAL_EXPMIN)
915 /* This number's exponent is too low to fit into the bits
916 available in the number. Round off any bits that will be
917 discarded as a result of denormalization. Edge case is
918 the implicit bit shifted to GUARD0 and then rounded
920 int shift = NORMAL_EXPMIN - f->normal_exp;
921 if (shift + NR_GUARDS <= NR_FRAC_GUARD + 1
922 && !(denorm & sim_fpu_denorm_zero))
924 status = do_normal_round (f, shift + NR_GUARDS, round);
925 if (f->fraction == 0) /* rounding underflowed */
927 status |= do_normal_underflow (f, is_double, round);
929 else if (f->normal_exp < NORMAL_EXPMIN) /* still underflow? */
931 status |= sim_fpu_status_denorm;
932 /* Any loss of precision when denormalizing is
933 underflow. Some processors check for underflow
934 before rounding, some after! */
935 if (status & sim_fpu_status_inexact)
936 status |= sim_fpu_status_underflow;
937 /* Flag that resultant value has been denormalized */
938 f->class = sim_fpu_class_denorm;
940 else if ((denorm & sim_fpu_denorm_underflow_inexact))
942 if ((status & sim_fpu_status_inexact))
943 status |= sim_fpu_status_underflow;
948 status = do_normal_underflow (f, is_double, round);
951 else if (f->normal_exp > NORMAL_EXPMAX)
954 status = do_normal_overflow (f, is_double, round);
958 status = do_normal_round (f, NR_GUARDS, round);
959 if (f->fraction == 0)
960 /* f->class = sim_fpu_class_zero; */
961 status |= do_normal_underflow (f, is_double, round);
962 else if (f->normal_exp > NORMAL_EXPMAX)
963 /* oops! rounding caused overflow */
964 status |= do_normal_overflow (f, is_double, round);
966 ASSERT ((f->class == sim_fpu_class_number
967 || f->class == sim_fpu_class_denorm)
968 <= (f->fraction < IMPLICIT_2 && f->fraction >= IMPLICIT_1));
976 sim_fpu_round_32 (sim_fpu *f,
978 sim_fpu_denorm denorm)
980 return do_round (f, 0, round, denorm);
984 sim_fpu_round_64 (sim_fpu *f,
986 sim_fpu_denorm denorm)
988 return do_round (f, 1, round, denorm);
996 sim_fpu_add (sim_fpu *f,
1000 if (sim_fpu_is_snan (l))
1003 f->class = sim_fpu_class_qnan;
1004 return sim_fpu_status_invalid_snan;
1006 if (sim_fpu_is_snan (r))
1009 f->class = sim_fpu_class_qnan;
1010 return sim_fpu_status_invalid_snan;
1012 if (sim_fpu_is_qnan (l))
1017 if (sim_fpu_is_qnan (r))
1022 if (sim_fpu_is_infinity (l))
1024 if (sim_fpu_is_infinity (r)
1025 && l->sign != r->sign)
1028 return sim_fpu_status_invalid_isi;
1033 if (sim_fpu_is_infinity (r))
1038 if (sim_fpu_is_zero (l))
1040 if (sim_fpu_is_zero (r))
1043 f->sign = l->sign & r->sign;
1049 if (sim_fpu_is_zero (r))
1056 int shift = l->normal_exp - r->normal_exp;
1057 unsigned64 lfraction;
1058 unsigned64 rfraction;
1059 /* use exp of larger */
1060 if (shift >= NR_FRAC_GUARD)
1062 /* left has much bigger magnitute */
1064 return sim_fpu_status_inexact;
1066 if (shift <= - NR_FRAC_GUARD)
1068 /* right has much bigger magnitute */
1070 return sim_fpu_status_inexact;
1072 lfraction = l->fraction;
1073 rfraction = r->fraction;
1076 f->normal_exp = l->normal_exp;
1077 if (rfraction & LSMASK64 (shift - 1, 0))
1079 status |= sim_fpu_status_inexact;
1080 rfraction |= LSBIT64 (shift); /* stick LSBit */
1082 rfraction >>= shift;
1086 f->normal_exp = r->normal_exp;
1087 if (lfraction & LSMASK64 (- shift - 1, 0))
1089 status |= sim_fpu_status_inexact;
1090 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1092 lfraction >>= -shift;
1096 f->normal_exp = r->normal_exp;
1099 /* perform the addition */
1101 lfraction = - lfraction;
1103 rfraction = - rfraction;
1104 f->fraction = lfraction + rfraction;
1107 if (f->fraction == 0)
1114 f->class = sim_fpu_class_number;
1115 if ((signed64) f->fraction >= 0)
1120 f->fraction = - f->fraction;
1124 if ((f->fraction & IMPLICIT_2))
1126 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1129 else if (f->fraction < IMPLICIT_1)
1136 while (f->fraction < IMPLICIT_1);
1138 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1144 INLINE_SIM_FPU (int)
1145 sim_fpu_sub (sim_fpu *f,
1149 if (sim_fpu_is_snan (l))
1152 f->class = sim_fpu_class_qnan;
1153 return sim_fpu_status_invalid_snan;
1155 if (sim_fpu_is_snan (r))
1158 f->class = sim_fpu_class_qnan;
1159 return sim_fpu_status_invalid_snan;
1161 if (sim_fpu_is_qnan (l))
1166 if (sim_fpu_is_qnan (r))
1171 if (sim_fpu_is_infinity (l))
1173 if (sim_fpu_is_infinity (r)
1174 && l->sign == r->sign)
1177 return sim_fpu_status_invalid_isi;
1182 if (sim_fpu_is_infinity (r))
1188 if (sim_fpu_is_zero (l))
1190 if (sim_fpu_is_zero (r))
1193 f->sign = l->sign & !r->sign;
1202 if (sim_fpu_is_zero (r))
1209 int shift = l->normal_exp - r->normal_exp;
1210 unsigned64 lfraction;
1211 unsigned64 rfraction;
1212 /* use exp of larger */
1213 if (shift >= NR_FRAC_GUARD)
1215 /* left has much bigger magnitute */
1217 return sim_fpu_status_inexact;
1219 if (shift <= - NR_FRAC_GUARD)
1221 /* right has much bigger magnitute */
1224 return sim_fpu_status_inexact;
1226 lfraction = l->fraction;
1227 rfraction = r->fraction;
1230 f->normal_exp = l->normal_exp;
1231 if (rfraction & LSMASK64 (shift - 1, 0))
1233 status |= sim_fpu_status_inexact;
1234 rfraction |= LSBIT64 (shift); /* stick LSBit */
1236 rfraction >>= shift;
1240 f->normal_exp = r->normal_exp;
1241 if (lfraction & LSMASK64 (- shift - 1, 0))
1243 status |= sim_fpu_status_inexact;
1244 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1246 lfraction >>= -shift;
1250 f->normal_exp = r->normal_exp;
1253 /* perform the subtraction */
1255 lfraction = - lfraction;
1257 rfraction = - rfraction;
1258 f->fraction = lfraction + rfraction;
1261 if (f->fraction == 0)
1268 f->class = sim_fpu_class_number;
1269 if ((signed64) f->fraction >= 0)
1274 f->fraction = - f->fraction;
1278 if ((f->fraction & IMPLICIT_2))
1280 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1283 else if (f->fraction < IMPLICIT_1)
1290 while (f->fraction < IMPLICIT_1);
1292 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1298 INLINE_SIM_FPU (int)
1299 sim_fpu_mul (sim_fpu *f,
1303 if (sim_fpu_is_snan (l))
1306 f->class = sim_fpu_class_qnan;
1307 return sim_fpu_status_invalid_snan;
1309 if (sim_fpu_is_snan (r))
1312 f->class = sim_fpu_class_qnan;
1313 return sim_fpu_status_invalid_snan;
1315 if (sim_fpu_is_qnan (l))
1320 if (sim_fpu_is_qnan (r))
1325 if (sim_fpu_is_infinity (l))
1327 if (sim_fpu_is_zero (r))
1330 return sim_fpu_status_invalid_imz;
1333 f->sign = l->sign ^ r->sign;
1336 if (sim_fpu_is_infinity (r))
1338 if (sim_fpu_is_zero (l))
1341 return sim_fpu_status_invalid_imz;
1344 f->sign = l->sign ^ r->sign;
1347 if (sim_fpu_is_zero (l) || sim_fpu_is_zero (r))
1350 f->sign = l->sign ^ r->sign;
1353 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1358 unsigned64 nl = l->fraction & 0xffffffff;
1359 unsigned64 nh = l->fraction >> 32;
1360 unsigned64 ml = r->fraction & 0xffffffff;
1361 unsigned64 mh = r->fraction >>32;
1362 unsigned64 pp_ll = ml * nl;
1363 unsigned64 pp_hl = mh * nl;
1364 unsigned64 pp_lh = ml * nh;
1365 unsigned64 pp_hh = mh * nh;
1366 unsigned64 res2 = 0;
1367 unsigned64 res0 = 0;
1368 unsigned64 ps_hh__ = pp_hl + pp_lh;
1369 if (ps_hh__ < pp_hl)
1370 res2 += UNSIGNED64 (0x100000000);
1371 pp_hl = (ps_hh__ << 32) & UNSIGNED64 (0xffffffff00000000);
1372 res0 = pp_ll + pp_hl;
1375 res2 += ((ps_hh__ >> 32) & 0xffffffff) + pp_hh;
1379 f->normal_exp = l->normal_exp + r->normal_exp;
1380 f->sign = l->sign ^ r->sign;
1381 f->class = sim_fpu_class_number;
1383 /* Input is bounded by [1,2) ; [2^60,2^61)
1384 Output is bounded by [1,4) ; [2^120,2^122) */
1386 /* Adjust the exponent according to where the decimal point ended
1387 up in the high 64 bit word. In the source the decimal point
1388 was at NR_FRAC_GUARD. */
1389 f->normal_exp += NR_FRAC_GUARD + 64 - (NR_FRAC_GUARD * 2);
1391 /* The high word is bounded according to the above. Consequently
1392 it has never overflowed into IMPLICIT_2. */
1393 ASSERT (high < LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64));
1394 ASSERT (high >= LSBIT64 ((NR_FRAC_GUARD * 2) - 64));
1395 ASSERT (LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64) < IMPLICIT_1);
1402 if (low & LSBIT64 (63))
1406 while (high < IMPLICIT_1);
1408 ASSERT (high >= IMPLICIT_1 && high < IMPLICIT_2);
1411 f->fraction = (high | 1); /* sticky */
1412 return sim_fpu_status_inexact;
1423 INLINE_SIM_FPU (int)
1424 sim_fpu_div (sim_fpu *f,
1428 if (sim_fpu_is_snan (l))
1431 f->class = sim_fpu_class_qnan;
1432 return sim_fpu_status_invalid_snan;
1434 if (sim_fpu_is_snan (r))
1437 f->class = sim_fpu_class_qnan;
1438 return sim_fpu_status_invalid_snan;
1440 if (sim_fpu_is_qnan (l))
1443 f->class = sim_fpu_class_qnan;
1446 if (sim_fpu_is_qnan (r))
1449 f->class = sim_fpu_class_qnan;
1452 if (sim_fpu_is_infinity (l))
1454 if (sim_fpu_is_infinity (r))
1457 return sim_fpu_status_invalid_idi;
1462 f->sign = l->sign ^ r->sign;
1466 if (sim_fpu_is_zero (l))
1468 if (sim_fpu_is_zero (r))
1471 return sim_fpu_status_invalid_zdz;
1476 f->sign = l->sign ^ r->sign;
1480 if (sim_fpu_is_infinity (r))
1483 f->sign = l->sign ^ r->sign;
1486 if (sim_fpu_is_zero (r))
1488 f->class = sim_fpu_class_infinity;
1489 f->sign = l->sign ^ r->sign;
1490 return sim_fpu_status_invalid_div0;
1493 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1496 /* quotient = ( ( numerator / denominator)
1497 x 2^(numerator exponent - denominator exponent)
1499 unsigned64 numerator;
1500 unsigned64 denominator;
1501 unsigned64 quotient;
1504 f->class = sim_fpu_class_number;
1505 f->sign = l->sign ^ r->sign;
1506 f->normal_exp = l->normal_exp - r->normal_exp;
1508 numerator = l->fraction;
1509 denominator = r->fraction;
1511 /* Fraction will be less than 1.0 */
1512 if (numerator < denominator)
1517 ASSERT (numerator >= denominator);
1519 /* Gain extra precision, already used one spare bit */
1520 numerator <<= NR_SPARE;
1521 denominator <<= NR_SPARE;
1523 /* Does divide one bit at a time. Optimize??? */
1525 bit = (IMPLICIT_1 << NR_SPARE);
1528 if (numerator >= denominator)
1531 numerator -= denominator;
1537 /* discard (but save) the extra bits */
1538 if ((quotient & LSMASK64 (NR_SPARE -1, 0)))
1539 quotient = (quotient >> NR_SPARE) | 1;
1541 quotient = (quotient >> NR_SPARE);
1543 f->fraction = quotient;
1544 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1547 f->fraction |= 1; /* stick remaining bits */
1548 return sim_fpu_status_inexact;
1556 INLINE_SIM_FPU (int)
1557 sim_fpu_max (sim_fpu *f,
1561 if (sim_fpu_is_snan (l))
1564 f->class = sim_fpu_class_qnan;
1565 return sim_fpu_status_invalid_snan;
1567 if (sim_fpu_is_snan (r))
1570 f->class = sim_fpu_class_qnan;
1571 return sim_fpu_status_invalid_snan;
1573 if (sim_fpu_is_qnan (l))
1578 if (sim_fpu_is_qnan (r))
1583 if (sim_fpu_is_infinity (l))
1585 if (sim_fpu_is_infinity (r)
1586 && l->sign == r->sign)
1589 return sim_fpu_status_invalid_isi;
1592 *f = *r; /* -inf < anything */
1594 *f = *l; /* +inf > anthing */
1597 if (sim_fpu_is_infinity (r))
1600 *f = *l; /* anything > -inf */
1602 *f = *r; /* anthing < +inf */
1605 if (l->sign > r->sign)
1607 *f = *r; /* -ve < +ve */
1610 if (l->sign < r->sign)
1612 *f = *l; /* +ve > -ve */
1615 ASSERT (l->sign == r->sign);
1616 if (l->normal_exp > r->normal_exp
1617 || (l->normal_exp == r->normal_exp &&
1618 l->fraction > r->fraction))
1622 *f = *r; /* -ve < -ve */
1624 *f = *l; /* +ve > +ve */
1631 *f = *l; /* -ve > -ve */
1633 *f = *r; /* +ve < +ve */
1639 INLINE_SIM_FPU (int)
1640 sim_fpu_min (sim_fpu *f,
1644 if (sim_fpu_is_snan (l))
1647 f->class = sim_fpu_class_qnan;
1648 return sim_fpu_status_invalid_snan;
1650 if (sim_fpu_is_snan (r))
1653 f->class = sim_fpu_class_qnan;
1654 return sim_fpu_status_invalid_snan;
1656 if (sim_fpu_is_qnan (l))
1661 if (sim_fpu_is_qnan (r))
1666 if (sim_fpu_is_infinity (l))
1668 if (sim_fpu_is_infinity (r)
1669 && l->sign == r->sign)
1672 return sim_fpu_status_invalid_isi;
1675 *f = *l; /* -inf < anything */
1677 *f = *r; /* +inf > anthing */
1680 if (sim_fpu_is_infinity (r))
1683 *f = *r; /* anything > -inf */
1685 *f = *l; /* anything < +inf */
1688 if (l->sign > r->sign)
1690 *f = *l; /* -ve < +ve */
1693 if (l->sign < r->sign)
1695 *f = *r; /* +ve > -ve */
1698 ASSERT (l->sign == r->sign);
1699 if (l->normal_exp > r->normal_exp
1700 || (l->normal_exp == r->normal_exp &&
1701 l->fraction > r->fraction))
1705 *f = *l; /* -ve < -ve */
1707 *f = *r; /* +ve > +ve */
1714 *f = *r; /* -ve > -ve */
1716 *f = *l; /* +ve < +ve */
1722 INLINE_SIM_FPU (int)
1723 sim_fpu_neg (sim_fpu *f,
1726 if (sim_fpu_is_snan (r))
1729 f->class = sim_fpu_class_qnan;
1730 return sim_fpu_status_invalid_snan;
1732 if (sim_fpu_is_qnan (r))
1743 INLINE_SIM_FPU (int)
1744 sim_fpu_abs (sim_fpu *f,
1747 if (sim_fpu_is_snan (r))
1750 f->class = sim_fpu_class_qnan;
1751 return sim_fpu_status_invalid_snan;
1753 if (sim_fpu_is_qnan (r))
1764 INLINE_SIM_FPU (int)
1765 sim_fpu_inv (sim_fpu *f,
1768 return sim_fpu_div (f, &sim_fpu_one, r);
1772 INLINE_SIM_FPU (int)
1773 sim_fpu_sqrt (sim_fpu *f,
1776 if (sim_fpu_is_snan (r))
1779 return sim_fpu_status_invalid_snan;
1781 if (sim_fpu_is_qnan (r))
1786 if (sim_fpu_is_zero (r))
1788 f->class = sim_fpu_class_zero;
1793 if (sim_fpu_is_infinity (r))
1798 return sim_fpu_status_invalid_sqrt;
1802 f->class = sim_fpu_class_infinity;
1811 return sim_fpu_status_invalid_sqrt;
1814 /* @(#)e_sqrt.c 5.1 93/09/24 */
1816 * ====================================================
1817 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1819 * Developed at SunPro, a Sun Microsystems, Inc. business.
1820 * Permission to use, copy, modify, and distribute this
1821 * software is freely granted, provided that this notice
1823 * ====================================================
1826 /* __ieee754_sqrt(x)
1827 * Return correctly rounded sqrt.
1828 * ------------------------------------------
1829 * | Use the hardware sqrt if you have one |
1830 * ------------------------------------------
1832 * Bit by bit method using integer arithmetic. (Slow, but portable)
1834 * Scale x to y in [1,4) with even powers of 2:
1835 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1836 * sqrt(x) = 2^k * sqrt(y)
1839 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1840 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1842 - y = ((m even) ? x : 2.x)
1844 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1846 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1848 * 2. Bit by bit computation
1849 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1852 * s = 2*q , and y = 2 * ( y - q ). (1)
1855 * To compute q from q , one checks whether
1859 * (q + 2 ) <= y. (2)
1862 * If (2) is false, then q = q ; otherwise q = q + 2 .
1865 * With some algebric manipulation, it is not difficult to see
1866 * that (2) is equivalent to
1871 * The advantage of (3) is that s and y can be computed by
1873 * the following recurrence formula:
1876 * s = s , y = y ; (4)
1885 * s = s + 2 , y = y - s - 2 (5)
1890 - NOTE: y = 2 (y - s - 2 )
1893 * One may easily use induction to prove (4) and (5).
1894 * Note. Since the left hand side of (3) contain only i+2 bits,
1895 * it does not necessary to do a full (53-bit) comparison
1898 * After generating the 53 bits result, we compute one more bit.
1899 * Together with the remainder, we can decide whether the
1900 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1901 * (it will never equal to 1/2ulp).
1902 * The rounding mode can be detected by checking whether
1903 * huge + tiny is equal to huge, and whether huge - tiny is
1904 * equal to huge for some floating point number "huge" and "tiny".
1907 * sqrt(+-0) = +-0 ... exact
1909 * sqrt(-ve) = NaN ... with invalid signal
1910 * sqrt(NaN) = NaN ... with invalid signal for signaling NaN
1912 * Other methods : see the appended file at the end of the program below.
1917 /* generate sqrt(x) bit by bit */
1923 f->class = sim_fpu_class_number;
1926 f->normal_exp = (r->normal_exp >> 1); /* exp = [exp/2] */
1928 /* odd exp, double x to make it even */
1929 ASSERT (y >= IMPLICIT_1 && y < IMPLICIT_4);
1930 if ((r->normal_exp & 1))
1934 ASSERT (y >= IMPLICIT_1 && y < (IMPLICIT_2 << 1));
1936 /* Let loop determine first value of s (either 1 or 2) */
1943 unsigned64 t = s + b;
1954 ASSERT (q >= IMPLICIT_1 && q < IMPLICIT_2);
1958 f->fraction |= 1; /* stick remaining bits */
1959 return sim_fpu_status_inexact;
1967 /* int/long <-> sim_fpu */
1969 INLINE_SIM_FPU (int)
1970 sim_fpu_i32to (sim_fpu *f,
1972 sim_fpu_round round)
1978 INLINE_SIM_FPU (int)
1979 sim_fpu_u32to (sim_fpu *f,
1981 sim_fpu_round round)
1987 INLINE_SIM_FPU (int)
1988 sim_fpu_i64to (sim_fpu *f,
1990 sim_fpu_round round)
1996 INLINE_SIM_FPU (int)
1997 sim_fpu_u64to (sim_fpu *f,
1999 sim_fpu_round round)
2006 INLINE_SIM_FPU (int)
2007 sim_fpu_to32i (signed32 *i,
2009 sim_fpu_round round)
2012 int status = fpu2i (&i64, f, 0, round);
2017 INLINE_SIM_FPU (int)
2018 sim_fpu_to32u (unsigned32 *u,
2020 sim_fpu_round round)
2023 int status = fpu2u (&u64, f, 0);
2028 INLINE_SIM_FPU (int)
2029 sim_fpu_to64i (signed64 *i,
2031 sim_fpu_round round)
2033 return fpu2i (i, f, 1, round);
2037 INLINE_SIM_FPU (int)
2038 sim_fpu_to64u (unsigned64 *u,
2040 sim_fpu_round round)
2042 return fpu2u (u, f, 1);
2047 /* sim_fpu -> host format */
2050 INLINE_SIM_FPU (float)
2051 sim_fpu_2f (const sim_fpu *f)
2058 INLINE_SIM_FPU (double)
2059 sim_fpu_2d (const sim_fpu *s)
2062 if (sim_fpu_is_snan (s))
2066 n.class = sim_fpu_class_qnan;
2067 val.i = pack_fpu (&n, 1);
2071 val.i = pack_fpu (s, 1);
2078 INLINE_SIM_FPU (void)
2079 sim_fpu_f2 (sim_fpu *f,
2084 unpack_fpu (f, val.i, 1);
2089 INLINE_SIM_FPU (void)
2090 sim_fpu_d2 (sim_fpu *f,
2095 unpack_fpu (f, val.i, 1);
2101 INLINE_SIM_FPU (int)
2102 sim_fpu_is_nan (const sim_fpu *d)
2106 case sim_fpu_class_qnan:
2107 case sim_fpu_class_snan:
2114 INLINE_SIM_FPU (int)
2115 sim_fpu_is_qnan (const sim_fpu *d)
2119 case sim_fpu_class_qnan:
2126 INLINE_SIM_FPU (int)
2127 sim_fpu_is_snan (const sim_fpu *d)
2131 case sim_fpu_class_snan:
2138 INLINE_SIM_FPU (int)
2139 sim_fpu_is_zero (const sim_fpu *d)
2143 case sim_fpu_class_zero:
2150 INLINE_SIM_FPU (int)
2151 sim_fpu_is_infinity (const sim_fpu *d)
2155 case sim_fpu_class_infinity:
2162 INLINE_SIM_FPU (int)
2163 sim_fpu_is_number (const sim_fpu *d)
2167 case sim_fpu_class_denorm:
2168 case sim_fpu_class_number:
2175 INLINE_SIM_FPU (int)
2176 sim_fpu_is_denorm (const sim_fpu *d)
2180 case sim_fpu_class_denorm:
2188 INLINE_SIM_FPU (int)
2189 sim_fpu_sign (const sim_fpu *d)
2195 INLINE_SIM_FPU (int)
2196 sim_fpu_exp (const sim_fpu *d)
2198 return d->normal_exp;
2202 INLINE_SIM_FPU (unsigned64)
2203 sim_fpu_fraction (const sim_fpu *d)
2209 INLINE_SIM_FPU (unsigned64)
2210 sim_fpu_guard (const sim_fpu *d, int is_double)
2213 unsigned64 guardmask = LSMASK64 (NR_GUARDS - 1, 0);
2214 rv = (d->fraction & guardmask) >> NR_PAD;
2219 INLINE_SIM_FPU (int)
2220 sim_fpu_is (const sim_fpu *d)
2224 case sim_fpu_class_qnan:
2225 return SIM_FPU_IS_QNAN;
2226 case sim_fpu_class_snan:
2227 return SIM_FPU_IS_SNAN;
2228 case sim_fpu_class_infinity:
2230 return SIM_FPU_IS_NINF;
2232 return SIM_FPU_IS_PINF;
2233 case sim_fpu_class_number:
2235 return SIM_FPU_IS_NNUMBER;
2237 return SIM_FPU_IS_PNUMBER;
2238 case sim_fpu_class_denorm:
2240 return SIM_FPU_IS_NDENORM;
2242 return SIM_FPU_IS_PDENORM;
2243 case sim_fpu_class_zero:
2245 return SIM_FPU_IS_NZERO;
2247 return SIM_FPU_IS_PZERO;
2254 INLINE_SIM_FPU (int)
2255 sim_fpu_cmp (const sim_fpu *l, const sim_fpu *r)
2258 sim_fpu_sub (&res, l, r);
2259 return sim_fpu_is (&res);
2262 INLINE_SIM_FPU (int)
2263 sim_fpu_is_lt (const sim_fpu *l, const sim_fpu *r)
2266 sim_fpu_lt (&status, l, r);
2270 INLINE_SIM_FPU (int)
2271 sim_fpu_is_le (const sim_fpu *l, const sim_fpu *r)
2274 sim_fpu_le (&is, l, r);
2278 INLINE_SIM_FPU (int)
2279 sim_fpu_is_eq (const sim_fpu *l, const sim_fpu *r)
2282 sim_fpu_eq (&is, l, r);
2286 INLINE_SIM_FPU (int)
2287 sim_fpu_is_ne (const sim_fpu *l, const sim_fpu *r)
2290 sim_fpu_ne (&is, l, r);
2294 INLINE_SIM_FPU (int)
2295 sim_fpu_is_ge (const sim_fpu *l, const sim_fpu *r)
2298 sim_fpu_ge (&is, l, r);
2302 INLINE_SIM_FPU (int)
2303 sim_fpu_is_gt (const sim_fpu *l, const sim_fpu *r)
2306 sim_fpu_gt (&is, l, r);
2311 /* Compare operators */
2313 INLINE_SIM_FPU (int)
2314 sim_fpu_lt (int *is,
2318 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2322 lval.i = pack_fpu (l, 1);
2323 rval.i = pack_fpu (r, 1);
2324 (*is) = (lval.d < rval.d);
2327 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2330 return sim_fpu_status_invalid_snan;
2335 return sim_fpu_status_invalid_qnan;
2339 INLINE_SIM_FPU (int)
2340 sim_fpu_le (int *is,
2344 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2348 lval.i = pack_fpu (l, 1);
2349 rval.i = pack_fpu (r, 1);
2350 *is = (lval.d <= rval.d);
2353 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2356 return sim_fpu_status_invalid_snan;
2361 return sim_fpu_status_invalid_qnan;
2365 INLINE_SIM_FPU (int)
2366 sim_fpu_eq (int *is,
2370 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2374 lval.i = pack_fpu (l, 1);
2375 rval.i = pack_fpu (r, 1);
2376 (*is) = (lval.d == rval.d);
2379 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2382 return sim_fpu_status_invalid_snan;
2387 return sim_fpu_status_invalid_qnan;
2391 INLINE_SIM_FPU (int)
2392 sim_fpu_ne (int *is,
2396 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2400 lval.i = pack_fpu (l, 1);
2401 rval.i = pack_fpu (r, 1);
2402 (*is) = (lval.d != rval.d);
2405 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2408 return sim_fpu_status_invalid_snan;
2413 return sim_fpu_status_invalid_qnan;
2417 INLINE_SIM_FPU (int)
2418 sim_fpu_ge (int *is,
2422 return sim_fpu_le (is, r, l);
2425 INLINE_SIM_FPU (int)
2426 sim_fpu_gt (int *is,
2430 return sim_fpu_lt (is, r, l);
2434 /* A number of useful constants */
2436 #if EXTERN_SIM_FPU_P
2437 const sim_fpu sim_fpu_zero = {
2440 const sim_fpu sim_fpu_qnan = {
2443 const sim_fpu sim_fpu_one = {
2444 sim_fpu_class_number, 0, IMPLICIT_1, 0
2446 const sim_fpu sim_fpu_two = {
2447 sim_fpu_class_number, 0, IMPLICIT_1, 1
2449 const sim_fpu sim_fpu_max32 = {
2450 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS32), NORMAL_EXPMAX32
2452 const sim_fpu sim_fpu_max64 = {
2453 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS64), NORMAL_EXPMAX64
2460 INLINE_SIM_FPU (void)
2461 sim_fpu_print_fpu (const sim_fpu *f,
2462 sim_fpu_print_func *print,
2465 sim_fpu_printn_fpu (f, print, -1, arg);
2468 INLINE_SIM_FPU (void)
2469 sim_fpu_printn_fpu (const sim_fpu *f,
2470 sim_fpu_print_func *print,
2474 print (arg, "%s", f->sign ? "-" : "+");
2477 case sim_fpu_class_qnan:
2479 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2480 print (arg, "*QuietNaN");
2482 case sim_fpu_class_snan:
2484 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2485 print (arg, "*SignalNaN");
2487 case sim_fpu_class_zero:
2490 case sim_fpu_class_infinity:
2493 case sim_fpu_class_number:
2494 case sim_fpu_class_denorm:
2496 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2497 print (arg, "*2^%+d", f->normal_exp);
2498 ASSERT (f->fraction >= IMPLICIT_1);
2499 ASSERT (f->fraction < IMPLICIT_2);
2504 INLINE_SIM_FPU (void)
2505 sim_fpu_print_status (int status,
2506 sim_fpu_print_func *print,
2513 switch ((sim_fpu_status) (status & i))
2515 case sim_fpu_status_denorm:
2516 print (arg, "%sD", prefix);
2518 case sim_fpu_status_invalid_snan:
2519 print (arg, "%sSNaN", prefix);
2521 case sim_fpu_status_invalid_qnan:
2522 print (arg, "%sQNaN", prefix);
2524 case sim_fpu_status_invalid_isi:
2525 print (arg, "%sISI", prefix);
2527 case sim_fpu_status_invalid_idi:
2528 print (arg, "%sIDI", prefix);
2530 case sim_fpu_status_invalid_zdz:
2531 print (arg, "%sZDZ", prefix);
2533 case sim_fpu_status_invalid_imz:
2534 print (arg, "%sIMZ", prefix);
2536 case sim_fpu_status_invalid_cvi:
2537 print (arg, "%sCVI", prefix);
2539 case sim_fpu_status_invalid_cmp:
2540 print (arg, "%sCMP", prefix);
2542 case sim_fpu_status_invalid_sqrt:
2543 print (arg, "%sSQRT", prefix);
2546 case sim_fpu_status_inexact:
2547 print (arg, "%sX", prefix);
2550 case sim_fpu_status_overflow:
2551 print (arg, "%sO", prefix);
2554 case sim_fpu_status_underflow:
2555 print (arg, "%sU", prefix);
2558 case sim_fpu_status_invalid_div0:
2559 print (arg, "%s/", prefix);
2562 case sim_fpu_status_rounded:
2563 print (arg, "%sR", prefix);