1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
5 /* Copyright 1994-2014 Free Software Foundation, Inc.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 /* As a special exception, if you link this library with other files,
21 some of which are compiled with GCC, to produce an executable,
22 this library does not by itself cause the resulting executable
23 to be covered by the GNU General Public License.
24 This exception does not however invalidate any other reasons why
25 the executable file might be covered by the GNU General Public License. */
27 /* This implements IEEE 754 format arithmetic, but does not provide a
28 mechanism for setting the rounding mode, or for generating or handling
31 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
32 Wilson, all of Cygnus Support. */
38 #include "sim-basics.h"
42 #include "sim-assert.h"
46 If digits is -1, then print all digits. */
49 print_bits (unsigned64 x,
52 sim_fpu_print_func print,
55 unsigned64 bit = LSBIT64 (msbit);
68 if (digits > 0) digits--;
75 /* Quick and dirty conversion between a host double and host 64bit int */
83 /* A packed IEEE floating point number.
85 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
86 32 and 64 bit numbers. This number is interpreted as:
88 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
89 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
91 Denormalized (0 == BIASEDEXP && FRAC != 0):
92 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
94 Zero (0 == BIASEDEXP && FRAC == 0):
95 (sign ? "-" : "+") 0.0
97 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
98 (sign ? "-" : "+") "infinity"
100 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
103 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
108 #define NR_EXPBITS (is_double ? 11 : 8)
109 #define NR_FRACBITS (is_double ? 52 : 23)
110 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
112 #define EXPMAX32 (255)
113 #define EXMPAX64 (2047)
114 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
116 #define EXPBIAS32 (127)
117 #define EXPBIAS64 (1023)
118 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
120 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
124 /* An unpacked floating point number.
126 When unpacked, the fraction of both a 32 and 64 bit floating point
127 number is stored using the same format:
129 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
130 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
132 #define NR_PAD32 (30)
134 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
135 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
137 #define NR_GUARDS32 (7 + NR_PAD32)
138 #define NR_GUARDS64 (8 + NR_PAD64)
139 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
140 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
142 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
143 #define GUARDLSB LSBIT64 (NR_PAD)
144 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
146 #define NR_FRAC_GUARD (60)
147 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
148 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
149 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
152 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
154 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
156 #define NORMAL_EXPMAX32 (EXPBIAS32)
157 #define NORMAL_EXPMAX64 (EXPBIAS64)
158 #define NORMAL_EXPMAX (EXPBIAS)
161 /* Integer constants */
163 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
164 #define MAX_UINT32 LSMASK64 (31, 0)
165 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
167 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
168 #define MAX_UINT64 LSMASK64 (63, 0)
169 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
171 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
172 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
173 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
174 #define NR_INTBITS (is_64bit ? 64 : 32)
176 /* Squeese an unpacked sim_fpu struct into a 32/64 bit integer */
177 STATIC_INLINE_SIM_FPU (unsigned64)
178 pack_fpu (const sim_fpu *src,
189 case sim_fpu_class_qnan:
192 /* force fraction to correct class */
193 fraction = src->fraction;
194 fraction >>= NR_GUARDS;
195 #ifdef SIM_QUIET_NAN_NEGATED
196 fraction |= QUIET_NAN - 1;
198 fraction |= QUIET_NAN;
201 case sim_fpu_class_snan:
204 /* force fraction to correct class */
205 fraction = src->fraction;
206 fraction >>= NR_GUARDS;
207 #ifdef SIM_QUIET_NAN_NEGATED
208 fraction |= QUIET_NAN;
210 fraction &= ~QUIET_NAN;
213 case sim_fpu_class_infinity:
218 case sim_fpu_class_zero:
223 case sim_fpu_class_number:
224 case sim_fpu_class_denorm:
225 ASSERT (src->fraction >= IMPLICIT_1);
226 ASSERT (src->fraction < IMPLICIT_2);
227 if (src->normal_exp < NORMAL_EXPMIN)
229 /* This number's exponent is too low to fit into the bits
230 available in the number We'll denormalize the number by
231 storing zero in the exponent and shift the fraction to
232 the right to make up for it. */
233 int nr_shift = NORMAL_EXPMIN - src->normal_exp;
234 if (nr_shift > NR_FRACBITS)
236 /* underflow, just make the number zero */
245 /* Shift by the value */
246 fraction = src->fraction;
247 fraction >>= NR_GUARDS;
248 fraction >>= nr_shift;
251 else if (src->normal_exp > NORMAL_EXPMAX)
260 exp = (src->normal_exp + EXPBIAS);
262 fraction = src->fraction;
263 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
265 /* Round to nearest: If the guard bits are the all zero, but
266 the first, then we're half way between two numbers,
267 choose the one which makes the lsb of the answer 0. */
268 if ((fraction & GUARDMASK) == GUARDMSB)
270 if ((fraction & (GUARDMSB << 1)))
271 fraction += (GUARDMSB << 1);
275 /* Add a one to the guards to force round to nearest */
276 fraction += GUARDROUND;
278 if ((fraction & IMPLICIT_2)) /* rounding resulted in carry */
283 fraction >>= NR_GUARDS;
284 /* When exp == EXPMAX (overflow from carry) fraction must
285 have been made zero */
286 ASSERT ((exp == EXPMAX) <= ((fraction & ~IMPLICIT_1) == 0));
293 packed = ((sign ? SIGNBIT : 0)
294 | (exp << NR_FRACBITS)
295 | LSMASKED64 (fraction, NR_FRACBITS - 1, 0));
297 /* trace operation */
304 printf ("pack_fpu: ");
305 printf ("-> %c%0lX.%06lX\n",
306 LSMASKED32 (packed, 31, 31) ? '8' : '0',
307 (long) LSEXTRACTED32 (packed, 30, 23),
308 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
316 /* Unpack a 32/64 bit integer into a sim_fpu structure */
317 STATIC_INLINE_SIM_FPU (void)
318 unpack_fpu (sim_fpu *dst, unsigned64 packed, int is_double)
320 unsigned64 fraction = LSMASKED64 (packed, NR_FRACBITS - 1, 0);
321 unsigned exp = LSEXTRACTED64 (packed, NR_EXPBITS + NR_FRACBITS - 1, NR_FRACBITS);
322 int sign = (packed & SIGNBIT) != 0;
326 /* Hmm. Looks like 0 */
329 /* tastes like zero */
330 dst->class = sim_fpu_class_zero;
336 /* Zero exponent with non zero fraction - it's denormalized,
337 so there isn't a leading implicit one - we'll shift it so
339 dst->normal_exp = exp - EXPBIAS + 1;
340 dst->class = sim_fpu_class_denorm;
342 fraction <<= NR_GUARDS;
343 while (fraction < IMPLICIT_1)
348 dst->fraction = fraction;
351 else if (exp == EXPMAX)
356 /* Attached to a zero fraction - means infinity */
357 dst->class = sim_fpu_class_infinity;
359 /* dst->normal_exp = EXPBIAS; */
360 /* dst->fraction = 0; */
366 /* Non zero fraction, means NaN */
368 dst->fraction = (fraction << NR_GUARDS);
369 #ifdef SIM_QUIET_NAN_NEGATED
370 qnan = (fraction & QUIET_NAN) == 0;
372 qnan = fraction >= QUIET_NAN;
375 dst->class = sim_fpu_class_qnan;
377 dst->class = sim_fpu_class_snan;
382 /* Nothing strange about this number */
383 dst->class = sim_fpu_class_number;
385 dst->fraction = ((fraction << NR_GUARDS) | IMPLICIT_1);
386 dst->normal_exp = exp - EXPBIAS;
389 /* trace operation */
396 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
397 LSMASKED32 (packed, 31, 31) ? '8' : '0',
398 (long) LSEXTRACTED32 (packed, 30, 23),
399 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
406 val.i = pack_fpu (dst, 1);
409 ASSERT (val.i == packed);
413 unsigned32 val = pack_fpu (dst, 0);
414 unsigned32 org = packed;
421 /* Convert a floating point into an integer */
422 STATIC_INLINE_SIM_FPU (int)
431 if (sim_fpu_is_zero (s))
436 if (sim_fpu_is_snan (s))
438 *i = MIN_INT; /* FIXME */
439 return sim_fpu_status_invalid_cvi;
441 if (sim_fpu_is_qnan (s))
443 *i = MIN_INT; /* FIXME */
444 return sim_fpu_status_invalid_cvi;
446 /* map infinity onto MAX_INT... */
447 if (sim_fpu_is_infinity (s))
449 *i = s->sign ? MIN_INT : MAX_INT;
450 return sim_fpu_status_invalid_cvi;
452 /* it is a number, but a small one */
453 if (s->normal_exp < 0)
456 return sim_fpu_status_inexact;
458 /* Is the floating point MIN_INT or just close? */
459 if (s->sign && s->normal_exp == (NR_INTBITS - 1))
462 ASSERT (s->fraction >= IMPLICIT_1);
463 if (s->fraction == IMPLICIT_1)
464 return 0; /* exact */
465 if (is_64bit) /* can't round */
466 return sim_fpu_status_invalid_cvi; /* must be overflow */
467 /* For a 32bit with MAX_INT, rounding is possible */
470 case sim_fpu_round_default:
472 case sim_fpu_round_zero:
473 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
474 return sim_fpu_status_invalid_cvi;
476 return sim_fpu_status_inexact;
478 case sim_fpu_round_near:
480 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
481 return sim_fpu_status_invalid_cvi;
482 else if ((s->fraction & !FRAC32MASK) >= (~FRAC32MASK >> 1))
483 return sim_fpu_status_invalid_cvi;
485 return sim_fpu_status_inexact;
487 case sim_fpu_round_up:
488 if ((s->fraction & FRAC32MASK) == IMPLICIT_1)
489 return sim_fpu_status_inexact;
491 return sim_fpu_status_invalid_cvi;
492 case sim_fpu_round_down:
493 return sim_fpu_status_invalid_cvi;
496 /* Would right shifting result in the FRAC being shifted into
497 (through) the integer's sign bit? */
498 if (s->normal_exp > (NR_INTBITS - 2))
500 *i = s->sign ? MIN_INT : MAX_INT;
501 return sim_fpu_status_invalid_cvi;
503 /* normal number shift it into place */
505 shift = (s->normal_exp - (NR_FRAC_GUARD));
513 if (tmp & ((SIGNED64 (1) << shift) - 1))
514 status |= sim_fpu_status_inexact;
517 *i = s->sign ? (-tmp) : (tmp);
521 /* convert an integer into a floating point */
522 STATIC_INLINE_SIM_FPU (int)
523 i2fpu (sim_fpu *f, signed64 i, int is_64bit)
528 f->class = sim_fpu_class_zero;
534 f->class = sim_fpu_class_number;
536 f->normal_exp = NR_FRAC_GUARD;
540 /* Special case for minint, since there is no corresponding
541 +ve integer representation for it */
544 f->fraction = IMPLICIT_1;
545 f->normal_exp = NR_INTBITS - 1;
553 if (f->fraction >= IMPLICIT_2)
557 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
560 while (f->fraction >= IMPLICIT_2);
562 else if (f->fraction < IMPLICIT_1)
569 while (f->fraction < IMPLICIT_1);
573 /* trace operation */
576 printf ("i2fpu: 0x%08lX ->\n", (long) i);
583 fpu2i (&val, f, is_64bit, sim_fpu_round_zero);
584 if (i >= MIN_INT32 && i <= MAX_INT32)
594 /* Convert a floating point into an integer */
595 STATIC_INLINE_SIM_FPU (int)
596 fpu2u (unsigned64 *u, const sim_fpu *s, int is_64bit)
598 const int is_double = 1;
601 if (sim_fpu_is_zero (s))
606 if (sim_fpu_is_nan (s))
611 /* it is a negative number */
617 /* get reasonable MAX_USI_INT... */
618 if (sim_fpu_is_infinity (s))
623 /* it is a number, but a small one */
624 if (s->normal_exp < 0)
630 if (s->normal_exp > (NR_INTBITS - 1))
636 tmp = (s->fraction & ~PADMASK);
637 shift = (s->normal_exp - (NR_FRACBITS + NR_GUARDS));
651 /* Convert an unsigned integer into a floating point */
652 STATIC_INLINE_SIM_FPU (int)
653 u2fpu (sim_fpu *f, unsigned64 u, int is_64bit)
657 f->class = sim_fpu_class_zero;
663 f->class = sim_fpu_class_number;
665 f->normal_exp = NR_FRAC_GUARD;
668 while (f->fraction < IMPLICIT_1)
678 /* register <-> sim_fpu */
680 INLINE_SIM_FPU (void)
681 sim_fpu_32to (sim_fpu *f, unsigned32 s)
683 unpack_fpu (f, s, 0);
687 INLINE_SIM_FPU (void)
688 sim_fpu_232to (sim_fpu *f, unsigned32 h, unsigned32 l)
692 unpack_fpu (f, s, 1);
696 INLINE_SIM_FPU (void)
697 sim_fpu_64to (sim_fpu *f, unsigned64 s)
699 unpack_fpu (f, s, 1);
703 INLINE_SIM_FPU (void)
704 sim_fpu_to32 (unsigned32 *s,
707 *s = pack_fpu (f, 0);
711 INLINE_SIM_FPU (void)
712 sim_fpu_to232 (unsigned32 *h, unsigned32 *l,
715 unsigned64 s = pack_fpu (f, 1);
721 INLINE_SIM_FPU (void)
722 sim_fpu_to64 (unsigned64 *u,
725 *u = pack_fpu (f, 1);
729 INLINE_SIM_FPU (void)
730 sim_fpu_fractionto (sim_fpu *f,
736 int shift = (NR_FRAC_GUARD - precision);
737 f->class = sim_fpu_class_number;
739 f->normal_exp = normal_exp;
740 /* shift the fraction to where sim-fpu expects it */
742 f->fraction = (fraction << shift);
744 f->fraction = (fraction >> -shift);
745 f->fraction |= IMPLICIT_1;
749 INLINE_SIM_FPU (unsigned64)
750 sim_fpu_tofraction (const sim_fpu *d,
753 /* we have NR_FRAC_GUARD bits, we want only PRECISION bits */
754 int shift = (NR_FRAC_GUARD - precision);
755 unsigned64 fraction = (d->fraction & ~IMPLICIT_1);
757 return fraction >> shift;
759 return fraction << -shift;
765 STATIC_INLINE_SIM_FPU (int)
766 do_normal_overflow (sim_fpu *f,
772 case sim_fpu_round_default:
774 case sim_fpu_round_near:
775 f->class = sim_fpu_class_infinity;
777 case sim_fpu_round_up:
779 f->class = sim_fpu_class_infinity;
781 case sim_fpu_round_down:
783 f->class = sim_fpu_class_infinity;
785 case sim_fpu_round_zero:
788 f->normal_exp = NORMAL_EXPMAX;
789 f->fraction = LSMASK64 (NR_FRAC_GUARD, NR_GUARDS);
790 return (sim_fpu_status_overflow | sim_fpu_status_inexact);
793 STATIC_INLINE_SIM_FPU (int)
794 do_normal_underflow (sim_fpu *f,
800 case sim_fpu_round_default:
802 case sim_fpu_round_near:
803 f->class = sim_fpu_class_zero;
805 case sim_fpu_round_up:
807 f->class = sim_fpu_class_zero;
809 case sim_fpu_round_down:
811 f->class = sim_fpu_class_zero;
813 case sim_fpu_round_zero:
814 f->class = sim_fpu_class_zero;
817 f->normal_exp = NORMAL_EXPMIN - NR_FRACBITS;
818 f->fraction = IMPLICIT_1;
819 return (sim_fpu_status_inexact | sim_fpu_status_underflow);
824 /* Round a number using NR_GUARDS.
825 Will return the rounded number or F->FRACTION == 0 when underflow */
827 STATIC_INLINE_SIM_FPU (int)
828 do_normal_round (sim_fpu *f,
832 unsigned64 guardmask = LSMASK64 (nr_guards - 1, 0);
833 unsigned64 guardmsb = LSBIT64 (nr_guards - 1);
834 unsigned64 fraclsb = guardmsb << 1;
835 if ((f->fraction & guardmask))
837 int status = sim_fpu_status_inexact;
840 case sim_fpu_round_default:
842 case sim_fpu_round_near:
843 if ((f->fraction & guardmsb))
845 if ((f->fraction & fraclsb))
847 status |= sim_fpu_status_rounded;
849 else if ((f->fraction & (guardmask >> 1)))
851 status |= sim_fpu_status_rounded;
855 case sim_fpu_round_up:
857 status |= sim_fpu_status_rounded;
859 case sim_fpu_round_down:
861 status |= sim_fpu_status_rounded;
863 case sim_fpu_round_zero:
866 f->fraction &= ~guardmask;
867 /* round if needed, handle resulting overflow */
868 if ((status & sim_fpu_status_rounded))
870 f->fraction += fraclsb;
871 if ((f->fraction & IMPLICIT_2))
884 STATIC_INLINE_SIM_FPU (int)
885 do_round (sim_fpu *f,
888 sim_fpu_denorm denorm)
892 case sim_fpu_class_qnan:
893 case sim_fpu_class_zero:
894 case sim_fpu_class_infinity:
897 case sim_fpu_class_snan:
898 /* Quieten a SignalingNaN */
899 f->class = sim_fpu_class_qnan;
900 return sim_fpu_status_invalid_snan;
902 case sim_fpu_class_number:
903 case sim_fpu_class_denorm:
906 ASSERT (f->fraction < IMPLICIT_2);
907 ASSERT (f->fraction >= IMPLICIT_1);
908 if (f->normal_exp < NORMAL_EXPMIN)
910 /* This number's exponent is too low to fit into the bits
911 available in the number. Round off any bits that will be
912 discarded as a result of denormalization. Edge case is
913 the implicit bit shifted to GUARD0 and then rounded
915 int shift = NORMAL_EXPMIN - f->normal_exp;
916 if (shift + NR_GUARDS <= NR_FRAC_GUARD + 1
917 && !(denorm & sim_fpu_denorm_zero))
919 status = do_normal_round (f, shift + NR_GUARDS, round);
920 if (f->fraction == 0) /* rounding underflowed */
922 status |= do_normal_underflow (f, is_double, round);
924 else if (f->normal_exp < NORMAL_EXPMIN) /* still underflow? */
926 status |= sim_fpu_status_denorm;
927 /* Any loss of precision when denormalizing is
928 underflow. Some processors check for underflow
929 before rounding, some after! */
930 if (status & sim_fpu_status_inexact)
931 status |= sim_fpu_status_underflow;
932 /* Flag that resultant value has been denormalized */
933 f->class = sim_fpu_class_denorm;
935 else if ((denorm & sim_fpu_denorm_underflow_inexact))
937 if ((status & sim_fpu_status_inexact))
938 status |= sim_fpu_status_underflow;
943 status = do_normal_underflow (f, is_double, round);
946 else if (f->normal_exp > NORMAL_EXPMAX)
949 status = do_normal_overflow (f, is_double, round);
953 status = do_normal_round (f, NR_GUARDS, round);
954 if (f->fraction == 0)
955 /* f->class = sim_fpu_class_zero; */
956 status |= do_normal_underflow (f, is_double, round);
957 else if (f->normal_exp > NORMAL_EXPMAX)
958 /* oops! rounding caused overflow */
959 status |= do_normal_overflow (f, is_double, round);
961 ASSERT ((f->class == sim_fpu_class_number
962 || f->class == sim_fpu_class_denorm)
963 <= (f->fraction < IMPLICIT_2 && f->fraction >= IMPLICIT_1));
971 sim_fpu_round_32 (sim_fpu *f,
973 sim_fpu_denorm denorm)
975 return do_round (f, 0, round, denorm);
979 sim_fpu_round_64 (sim_fpu *f,
981 sim_fpu_denorm denorm)
983 return do_round (f, 1, round, denorm);
991 sim_fpu_add (sim_fpu *f,
995 if (sim_fpu_is_snan (l))
998 f->class = sim_fpu_class_qnan;
999 return sim_fpu_status_invalid_snan;
1001 if (sim_fpu_is_snan (r))
1004 f->class = sim_fpu_class_qnan;
1005 return sim_fpu_status_invalid_snan;
1007 if (sim_fpu_is_qnan (l))
1012 if (sim_fpu_is_qnan (r))
1017 if (sim_fpu_is_infinity (l))
1019 if (sim_fpu_is_infinity (r)
1020 && l->sign != r->sign)
1023 return sim_fpu_status_invalid_isi;
1028 if (sim_fpu_is_infinity (r))
1033 if (sim_fpu_is_zero (l))
1035 if (sim_fpu_is_zero (r))
1038 f->sign = l->sign & r->sign;
1044 if (sim_fpu_is_zero (r))
1051 int shift = l->normal_exp - r->normal_exp;
1052 unsigned64 lfraction;
1053 unsigned64 rfraction;
1054 /* use exp of larger */
1055 if (shift >= NR_FRAC_GUARD)
1057 /* left has much bigger magnitute */
1059 return sim_fpu_status_inexact;
1061 if (shift <= - NR_FRAC_GUARD)
1063 /* right has much bigger magnitute */
1065 return sim_fpu_status_inexact;
1067 lfraction = l->fraction;
1068 rfraction = r->fraction;
1071 f->normal_exp = l->normal_exp;
1072 if (rfraction & LSMASK64 (shift - 1, 0))
1074 status |= sim_fpu_status_inexact;
1075 rfraction |= LSBIT64 (shift); /* stick LSBit */
1077 rfraction >>= shift;
1081 f->normal_exp = r->normal_exp;
1082 if (lfraction & LSMASK64 (- shift - 1, 0))
1084 status |= sim_fpu_status_inexact;
1085 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1087 lfraction >>= -shift;
1091 f->normal_exp = r->normal_exp;
1094 /* perform the addition */
1096 lfraction = - lfraction;
1098 rfraction = - rfraction;
1099 f->fraction = lfraction + rfraction;
1102 if (f->fraction == 0)
1109 f->class = sim_fpu_class_number;
1110 if ((signed64) f->fraction >= 0)
1115 f->fraction = - f->fraction;
1119 if ((f->fraction & IMPLICIT_2))
1121 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1124 else if (f->fraction < IMPLICIT_1)
1131 while (f->fraction < IMPLICIT_1);
1133 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1139 INLINE_SIM_FPU (int)
1140 sim_fpu_sub (sim_fpu *f,
1144 if (sim_fpu_is_snan (l))
1147 f->class = sim_fpu_class_qnan;
1148 return sim_fpu_status_invalid_snan;
1150 if (sim_fpu_is_snan (r))
1153 f->class = sim_fpu_class_qnan;
1154 return sim_fpu_status_invalid_snan;
1156 if (sim_fpu_is_qnan (l))
1161 if (sim_fpu_is_qnan (r))
1166 if (sim_fpu_is_infinity (l))
1168 if (sim_fpu_is_infinity (r)
1169 && l->sign == r->sign)
1172 return sim_fpu_status_invalid_isi;
1177 if (sim_fpu_is_infinity (r))
1183 if (sim_fpu_is_zero (l))
1185 if (sim_fpu_is_zero (r))
1188 f->sign = l->sign & !r->sign;
1197 if (sim_fpu_is_zero (r))
1204 int shift = l->normal_exp - r->normal_exp;
1205 unsigned64 lfraction;
1206 unsigned64 rfraction;
1207 /* use exp of larger */
1208 if (shift >= NR_FRAC_GUARD)
1210 /* left has much bigger magnitute */
1212 return sim_fpu_status_inexact;
1214 if (shift <= - NR_FRAC_GUARD)
1216 /* right has much bigger magnitute */
1219 return sim_fpu_status_inexact;
1221 lfraction = l->fraction;
1222 rfraction = r->fraction;
1225 f->normal_exp = l->normal_exp;
1226 if (rfraction & LSMASK64 (shift - 1, 0))
1228 status |= sim_fpu_status_inexact;
1229 rfraction |= LSBIT64 (shift); /* stick LSBit */
1231 rfraction >>= shift;
1235 f->normal_exp = r->normal_exp;
1236 if (lfraction & LSMASK64 (- shift - 1, 0))
1238 status |= sim_fpu_status_inexact;
1239 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1241 lfraction >>= -shift;
1245 f->normal_exp = r->normal_exp;
1248 /* perform the subtraction */
1250 lfraction = - lfraction;
1252 rfraction = - rfraction;
1253 f->fraction = lfraction + rfraction;
1256 if (f->fraction == 0)
1263 f->class = sim_fpu_class_number;
1264 if ((signed64) f->fraction >= 0)
1269 f->fraction = - f->fraction;
1273 if ((f->fraction & IMPLICIT_2))
1275 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1278 else if (f->fraction < IMPLICIT_1)
1285 while (f->fraction < IMPLICIT_1);
1287 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1293 INLINE_SIM_FPU (int)
1294 sim_fpu_mul (sim_fpu *f,
1298 if (sim_fpu_is_snan (l))
1301 f->class = sim_fpu_class_qnan;
1302 return sim_fpu_status_invalid_snan;
1304 if (sim_fpu_is_snan (r))
1307 f->class = sim_fpu_class_qnan;
1308 return sim_fpu_status_invalid_snan;
1310 if (sim_fpu_is_qnan (l))
1315 if (sim_fpu_is_qnan (r))
1320 if (sim_fpu_is_infinity (l))
1322 if (sim_fpu_is_zero (r))
1325 return sim_fpu_status_invalid_imz;
1328 f->sign = l->sign ^ r->sign;
1331 if (sim_fpu_is_infinity (r))
1333 if (sim_fpu_is_zero (l))
1336 return sim_fpu_status_invalid_imz;
1339 f->sign = l->sign ^ r->sign;
1342 if (sim_fpu_is_zero (l) || sim_fpu_is_zero (r))
1345 f->sign = l->sign ^ r->sign;
1348 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1353 unsigned64 nl = l->fraction & 0xffffffff;
1354 unsigned64 nh = l->fraction >> 32;
1355 unsigned64 ml = r->fraction & 0xffffffff;
1356 unsigned64 mh = r->fraction >>32;
1357 unsigned64 pp_ll = ml * nl;
1358 unsigned64 pp_hl = mh * nl;
1359 unsigned64 pp_lh = ml * nh;
1360 unsigned64 pp_hh = mh * nh;
1361 unsigned64 res2 = 0;
1362 unsigned64 res0 = 0;
1363 unsigned64 ps_hh__ = pp_hl + pp_lh;
1364 if (ps_hh__ < pp_hl)
1365 res2 += UNSIGNED64 (0x100000000);
1366 pp_hl = (ps_hh__ << 32) & UNSIGNED64 (0xffffffff00000000);
1367 res0 = pp_ll + pp_hl;
1370 res2 += ((ps_hh__ >> 32) & 0xffffffff) + pp_hh;
1374 f->normal_exp = l->normal_exp + r->normal_exp;
1375 f->sign = l->sign ^ r->sign;
1376 f->class = sim_fpu_class_number;
1378 /* Input is bounded by [1,2) ; [2^60,2^61)
1379 Output is bounded by [1,4) ; [2^120,2^122) */
1381 /* Adjust the exponent according to where the decimal point ended
1382 up in the high 64 bit word. In the source the decimal point
1383 was at NR_FRAC_GUARD. */
1384 f->normal_exp += NR_FRAC_GUARD + 64 - (NR_FRAC_GUARD * 2);
1386 /* The high word is bounded according to the above. Consequently
1387 it has never overflowed into IMPLICIT_2. */
1388 ASSERT (high < LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64));
1389 ASSERT (high >= LSBIT64 ((NR_FRAC_GUARD * 2) - 64));
1390 ASSERT (LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64) < IMPLICIT_1);
1397 if (low & LSBIT64 (63))
1401 while (high < IMPLICIT_1);
1403 ASSERT (high >= IMPLICIT_1 && high < IMPLICIT_2);
1406 f->fraction = (high | 1); /* sticky */
1407 return sim_fpu_status_inexact;
1418 INLINE_SIM_FPU (int)
1419 sim_fpu_div (sim_fpu *f,
1423 if (sim_fpu_is_snan (l))
1426 f->class = sim_fpu_class_qnan;
1427 return sim_fpu_status_invalid_snan;
1429 if (sim_fpu_is_snan (r))
1432 f->class = sim_fpu_class_qnan;
1433 return sim_fpu_status_invalid_snan;
1435 if (sim_fpu_is_qnan (l))
1438 f->class = sim_fpu_class_qnan;
1441 if (sim_fpu_is_qnan (r))
1444 f->class = sim_fpu_class_qnan;
1447 if (sim_fpu_is_infinity (l))
1449 if (sim_fpu_is_infinity (r))
1452 return sim_fpu_status_invalid_idi;
1457 f->sign = l->sign ^ r->sign;
1461 if (sim_fpu_is_zero (l))
1463 if (sim_fpu_is_zero (r))
1466 return sim_fpu_status_invalid_zdz;
1471 f->sign = l->sign ^ r->sign;
1475 if (sim_fpu_is_infinity (r))
1478 f->sign = l->sign ^ r->sign;
1481 if (sim_fpu_is_zero (r))
1483 f->class = sim_fpu_class_infinity;
1484 f->sign = l->sign ^ r->sign;
1485 return sim_fpu_status_invalid_div0;
1488 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1491 /* quotient = ( ( numerator / denominator)
1492 x 2^(numerator exponent - denominator exponent)
1494 unsigned64 numerator;
1495 unsigned64 denominator;
1496 unsigned64 quotient;
1499 f->class = sim_fpu_class_number;
1500 f->sign = l->sign ^ r->sign;
1501 f->normal_exp = l->normal_exp - r->normal_exp;
1503 numerator = l->fraction;
1504 denominator = r->fraction;
1506 /* Fraction will be less than 1.0 */
1507 if (numerator < denominator)
1512 ASSERT (numerator >= denominator);
1514 /* Gain extra precision, already used one spare bit */
1515 numerator <<= NR_SPARE;
1516 denominator <<= NR_SPARE;
1518 /* Does divide one bit at a time. Optimize??? */
1520 bit = (IMPLICIT_1 << NR_SPARE);
1523 if (numerator >= denominator)
1526 numerator -= denominator;
1532 /* discard (but save) the extra bits */
1533 if ((quotient & LSMASK64 (NR_SPARE -1, 0)))
1534 quotient = (quotient >> NR_SPARE) | 1;
1536 quotient = (quotient >> NR_SPARE);
1538 f->fraction = quotient;
1539 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1542 f->fraction |= 1; /* stick remaining bits */
1543 return sim_fpu_status_inexact;
1551 INLINE_SIM_FPU (int)
1552 sim_fpu_max (sim_fpu *f,
1556 if (sim_fpu_is_snan (l))
1559 f->class = sim_fpu_class_qnan;
1560 return sim_fpu_status_invalid_snan;
1562 if (sim_fpu_is_snan (r))
1565 f->class = sim_fpu_class_qnan;
1566 return sim_fpu_status_invalid_snan;
1568 if (sim_fpu_is_qnan (l))
1573 if (sim_fpu_is_qnan (r))
1578 if (sim_fpu_is_infinity (l))
1580 if (sim_fpu_is_infinity (r)
1581 && l->sign == r->sign)
1584 return sim_fpu_status_invalid_isi;
1587 *f = *r; /* -inf < anything */
1589 *f = *l; /* +inf > anthing */
1592 if (sim_fpu_is_infinity (r))
1595 *f = *l; /* anything > -inf */
1597 *f = *r; /* anthing < +inf */
1600 if (l->sign > r->sign)
1602 *f = *r; /* -ve < +ve */
1605 if (l->sign < r->sign)
1607 *f = *l; /* +ve > -ve */
1610 ASSERT (l->sign == r->sign);
1611 if (l->normal_exp > r->normal_exp
1612 || (l->normal_exp == r->normal_exp &&
1613 l->fraction > r->fraction))
1617 *f = *r; /* -ve < -ve */
1619 *f = *l; /* +ve > +ve */
1626 *f = *l; /* -ve > -ve */
1628 *f = *r; /* +ve < +ve */
1634 INLINE_SIM_FPU (int)
1635 sim_fpu_min (sim_fpu *f,
1639 if (sim_fpu_is_snan (l))
1642 f->class = sim_fpu_class_qnan;
1643 return sim_fpu_status_invalid_snan;
1645 if (sim_fpu_is_snan (r))
1648 f->class = sim_fpu_class_qnan;
1649 return sim_fpu_status_invalid_snan;
1651 if (sim_fpu_is_qnan (l))
1656 if (sim_fpu_is_qnan (r))
1661 if (sim_fpu_is_infinity (l))
1663 if (sim_fpu_is_infinity (r)
1664 && l->sign == r->sign)
1667 return sim_fpu_status_invalid_isi;
1670 *f = *l; /* -inf < anything */
1672 *f = *r; /* +inf > anthing */
1675 if (sim_fpu_is_infinity (r))
1678 *f = *r; /* anything > -inf */
1680 *f = *l; /* anything < +inf */
1683 if (l->sign > r->sign)
1685 *f = *l; /* -ve < +ve */
1688 if (l->sign < r->sign)
1690 *f = *r; /* +ve > -ve */
1693 ASSERT (l->sign == r->sign);
1694 if (l->normal_exp > r->normal_exp
1695 || (l->normal_exp == r->normal_exp &&
1696 l->fraction > r->fraction))
1700 *f = *l; /* -ve < -ve */
1702 *f = *r; /* +ve > +ve */
1709 *f = *r; /* -ve > -ve */
1711 *f = *l; /* +ve < +ve */
1717 INLINE_SIM_FPU (int)
1718 sim_fpu_neg (sim_fpu *f,
1721 if (sim_fpu_is_snan (r))
1724 f->class = sim_fpu_class_qnan;
1725 return sim_fpu_status_invalid_snan;
1727 if (sim_fpu_is_qnan (r))
1738 INLINE_SIM_FPU (int)
1739 sim_fpu_abs (sim_fpu *f,
1744 if (sim_fpu_is_snan (r))
1746 f->class = sim_fpu_class_qnan;
1747 return sim_fpu_status_invalid_snan;
1753 INLINE_SIM_FPU (int)
1754 sim_fpu_inv (sim_fpu *f,
1757 return sim_fpu_div (f, &sim_fpu_one, r);
1761 INLINE_SIM_FPU (int)
1762 sim_fpu_sqrt (sim_fpu *f,
1765 if (sim_fpu_is_snan (r))
1768 return sim_fpu_status_invalid_snan;
1770 if (sim_fpu_is_qnan (r))
1775 if (sim_fpu_is_zero (r))
1777 f->class = sim_fpu_class_zero;
1782 if (sim_fpu_is_infinity (r))
1787 return sim_fpu_status_invalid_sqrt;
1791 f->class = sim_fpu_class_infinity;
1800 return sim_fpu_status_invalid_sqrt;
1803 /* @(#)e_sqrt.c 5.1 93/09/24 */
1805 * ====================================================
1806 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1808 * Developed at SunPro, a Sun Microsystems, Inc. business.
1809 * Permission to use, copy, modify, and distribute this
1810 * software is freely granted, provided that this notice
1812 * ====================================================
1815 /* __ieee754_sqrt(x)
1816 * Return correctly rounded sqrt.
1817 * ------------------------------------------
1818 * | Use the hardware sqrt if you have one |
1819 * ------------------------------------------
1821 * Bit by bit method using integer arithmetic. (Slow, but portable)
1823 * Scale x to y in [1,4) with even powers of 2:
1824 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1825 * sqrt(x) = 2^k * sqrt(y)
1828 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1829 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1831 - y = ((m even) ? x : 2.x)
1833 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1835 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1837 * 2. Bit by bit computation
1838 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1841 * s = 2*q , and y = 2 * ( y - q ). (1)
1844 * To compute q from q , one checks whether
1848 * (q + 2 ) <= y. (2)
1851 * If (2) is false, then q = q ; otherwise q = q + 2 .
1854 * With some algebric manipulation, it is not difficult to see
1855 * that (2) is equivalent to
1860 * The advantage of (3) is that s and y can be computed by
1862 * the following recurrence formula:
1865 * s = s , y = y ; (4)
1874 * s = s + 2 , y = y - s - 2 (5)
1879 - NOTE: y = 2 (y - s - 2 )
1882 * One may easily use induction to prove (4) and (5).
1883 * Note. Since the left hand side of (3) contain only i+2 bits,
1884 * it does not necessary to do a full (53-bit) comparison
1887 * After generating the 53 bits result, we compute one more bit.
1888 * Together with the remainder, we can decide whether the
1889 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1890 * (it will never equal to 1/2ulp).
1891 * The rounding mode can be detected by checking whether
1892 * huge + tiny is equal to huge, and whether huge - tiny is
1893 * equal to huge for some floating point number "huge" and "tiny".
1896 * sqrt(+-0) = +-0 ... exact
1898 * sqrt(-ve) = NaN ... with invalid signal
1899 * sqrt(NaN) = NaN ... with invalid signal for signaling NaN
1901 * Other methods : see the appended file at the end of the program below.
1906 /* generate sqrt(x) bit by bit */
1912 f->class = sim_fpu_class_number;
1915 f->normal_exp = (r->normal_exp >> 1); /* exp = [exp/2] */
1917 /* odd exp, double x to make it even */
1918 ASSERT (y >= IMPLICIT_1 && y < IMPLICIT_4);
1919 if ((r->normal_exp & 1))
1923 ASSERT (y >= IMPLICIT_1 && y < (IMPLICIT_2 << 1));
1925 /* Let loop determine first value of s (either 1 or 2) */
1932 unsigned64 t = s + b;
1943 ASSERT (q >= IMPLICIT_1 && q < IMPLICIT_2);
1947 f->fraction |= 1; /* stick remaining bits */
1948 return sim_fpu_status_inexact;
1956 /* int/long <-> sim_fpu */
1958 INLINE_SIM_FPU (int)
1959 sim_fpu_i32to (sim_fpu *f,
1961 sim_fpu_round round)
1967 INLINE_SIM_FPU (int)
1968 sim_fpu_u32to (sim_fpu *f,
1970 sim_fpu_round round)
1976 INLINE_SIM_FPU (int)
1977 sim_fpu_i64to (sim_fpu *f,
1979 sim_fpu_round round)
1985 INLINE_SIM_FPU (int)
1986 sim_fpu_u64to (sim_fpu *f,
1988 sim_fpu_round round)
1995 INLINE_SIM_FPU (int)
1996 sim_fpu_to32i (signed32 *i,
1998 sim_fpu_round round)
2001 int status = fpu2i (&i64, f, 0, round);
2006 INLINE_SIM_FPU (int)
2007 sim_fpu_to32u (unsigned32 *u,
2009 sim_fpu_round round)
2012 int status = fpu2u (&u64, f, 0);
2017 INLINE_SIM_FPU (int)
2018 sim_fpu_to64i (signed64 *i,
2020 sim_fpu_round round)
2022 return fpu2i (i, f, 1, round);
2026 INLINE_SIM_FPU (int)
2027 sim_fpu_to64u (unsigned64 *u,
2029 sim_fpu_round round)
2031 return fpu2u (u, f, 1);
2036 /* sim_fpu -> host format */
2039 INLINE_SIM_FPU (float)
2040 sim_fpu_2f (const sim_fpu *f)
2047 INLINE_SIM_FPU (double)
2048 sim_fpu_2d (const sim_fpu *s)
2051 if (sim_fpu_is_snan (s))
2055 n.class = sim_fpu_class_qnan;
2056 val.i = pack_fpu (&n, 1);
2060 val.i = pack_fpu (s, 1);
2067 INLINE_SIM_FPU (void)
2068 sim_fpu_f2 (sim_fpu *f,
2073 unpack_fpu (f, val.i, 1);
2078 INLINE_SIM_FPU (void)
2079 sim_fpu_d2 (sim_fpu *f,
2084 unpack_fpu (f, val.i, 1);
2090 INLINE_SIM_FPU (int)
2091 sim_fpu_is_nan (const sim_fpu *d)
2095 case sim_fpu_class_qnan:
2096 case sim_fpu_class_snan:
2103 INLINE_SIM_FPU (int)
2104 sim_fpu_is_qnan (const sim_fpu *d)
2108 case sim_fpu_class_qnan:
2115 INLINE_SIM_FPU (int)
2116 sim_fpu_is_snan (const sim_fpu *d)
2120 case sim_fpu_class_snan:
2127 INLINE_SIM_FPU (int)
2128 sim_fpu_is_zero (const sim_fpu *d)
2132 case sim_fpu_class_zero:
2139 INLINE_SIM_FPU (int)
2140 sim_fpu_is_infinity (const sim_fpu *d)
2144 case sim_fpu_class_infinity:
2151 INLINE_SIM_FPU (int)
2152 sim_fpu_is_number (const sim_fpu *d)
2156 case sim_fpu_class_denorm:
2157 case sim_fpu_class_number:
2164 INLINE_SIM_FPU (int)
2165 sim_fpu_is_denorm (const sim_fpu *d)
2169 case sim_fpu_class_denorm:
2177 INLINE_SIM_FPU (int)
2178 sim_fpu_sign (const sim_fpu *d)
2184 INLINE_SIM_FPU (int)
2185 sim_fpu_exp (const sim_fpu *d)
2187 return d->normal_exp;
2191 INLINE_SIM_FPU (unsigned64)
2192 sim_fpu_fraction (const sim_fpu *d)
2198 INLINE_SIM_FPU (unsigned64)
2199 sim_fpu_guard (const sim_fpu *d, int is_double)
2202 unsigned64 guardmask = LSMASK64 (NR_GUARDS - 1, 0);
2203 rv = (d->fraction & guardmask) >> NR_PAD;
2208 INLINE_SIM_FPU (int)
2209 sim_fpu_is (const sim_fpu *d)
2213 case sim_fpu_class_qnan:
2214 return SIM_FPU_IS_QNAN;
2215 case sim_fpu_class_snan:
2216 return SIM_FPU_IS_SNAN;
2217 case sim_fpu_class_infinity:
2219 return SIM_FPU_IS_NINF;
2221 return SIM_FPU_IS_PINF;
2222 case sim_fpu_class_number:
2224 return SIM_FPU_IS_NNUMBER;
2226 return SIM_FPU_IS_PNUMBER;
2227 case sim_fpu_class_denorm:
2229 return SIM_FPU_IS_NDENORM;
2231 return SIM_FPU_IS_PDENORM;
2232 case sim_fpu_class_zero:
2234 return SIM_FPU_IS_NZERO;
2236 return SIM_FPU_IS_PZERO;
2243 INLINE_SIM_FPU (int)
2244 sim_fpu_cmp (const sim_fpu *l, const sim_fpu *r)
2247 sim_fpu_sub (&res, l, r);
2248 return sim_fpu_is (&res);
2251 INLINE_SIM_FPU (int)
2252 sim_fpu_is_lt (const sim_fpu *l, const sim_fpu *r)
2255 sim_fpu_lt (&status, l, r);
2259 INLINE_SIM_FPU (int)
2260 sim_fpu_is_le (const sim_fpu *l, const sim_fpu *r)
2263 sim_fpu_le (&is, l, r);
2267 INLINE_SIM_FPU (int)
2268 sim_fpu_is_eq (const sim_fpu *l, const sim_fpu *r)
2271 sim_fpu_eq (&is, l, r);
2275 INLINE_SIM_FPU (int)
2276 sim_fpu_is_ne (const sim_fpu *l, const sim_fpu *r)
2279 sim_fpu_ne (&is, l, r);
2283 INLINE_SIM_FPU (int)
2284 sim_fpu_is_ge (const sim_fpu *l, const sim_fpu *r)
2287 sim_fpu_ge (&is, l, r);
2291 INLINE_SIM_FPU (int)
2292 sim_fpu_is_gt (const sim_fpu *l, const sim_fpu *r)
2295 sim_fpu_gt (&is, l, r);
2300 /* Compare operators */
2302 INLINE_SIM_FPU (int)
2303 sim_fpu_lt (int *is,
2307 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2311 lval.i = pack_fpu (l, 1);
2312 rval.i = pack_fpu (r, 1);
2313 (*is) = (lval.d < rval.d);
2316 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2319 return sim_fpu_status_invalid_snan;
2324 return sim_fpu_status_invalid_qnan;
2328 INLINE_SIM_FPU (int)
2329 sim_fpu_le (int *is,
2333 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2337 lval.i = pack_fpu (l, 1);
2338 rval.i = pack_fpu (r, 1);
2339 *is = (lval.d <= rval.d);
2342 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2345 return sim_fpu_status_invalid_snan;
2350 return sim_fpu_status_invalid_qnan;
2354 INLINE_SIM_FPU (int)
2355 sim_fpu_eq (int *is,
2359 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2363 lval.i = pack_fpu (l, 1);
2364 rval.i = pack_fpu (r, 1);
2365 (*is) = (lval.d == rval.d);
2368 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2371 return sim_fpu_status_invalid_snan;
2376 return sim_fpu_status_invalid_qnan;
2380 INLINE_SIM_FPU (int)
2381 sim_fpu_ne (int *is,
2385 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2389 lval.i = pack_fpu (l, 1);
2390 rval.i = pack_fpu (r, 1);
2391 (*is) = (lval.d != rval.d);
2394 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2397 return sim_fpu_status_invalid_snan;
2402 return sim_fpu_status_invalid_qnan;
2406 INLINE_SIM_FPU (int)
2407 sim_fpu_ge (int *is,
2411 return sim_fpu_le (is, r, l);
2414 INLINE_SIM_FPU (int)
2415 sim_fpu_gt (int *is,
2419 return sim_fpu_lt (is, r, l);
2423 /* A number of useful constants */
2425 #if EXTERN_SIM_FPU_P
2426 const sim_fpu sim_fpu_zero = {
2427 sim_fpu_class_zero, 0, 0, 0
2429 const sim_fpu sim_fpu_qnan = {
2430 sim_fpu_class_qnan, 0, 0, 0
2432 const sim_fpu sim_fpu_one = {
2433 sim_fpu_class_number, 0, IMPLICIT_1, 0
2435 const sim_fpu sim_fpu_two = {
2436 sim_fpu_class_number, 0, IMPLICIT_1, 1
2438 const sim_fpu sim_fpu_max32 = {
2439 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS32), NORMAL_EXPMAX32
2441 const sim_fpu sim_fpu_max64 = {
2442 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS64), NORMAL_EXPMAX64
2449 INLINE_SIM_FPU (void)
2450 sim_fpu_print_fpu (const sim_fpu *f,
2451 sim_fpu_print_func *print,
2454 sim_fpu_printn_fpu (f, print, -1, arg);
2457 INLINE_SIM_FPU (void)
2458 sim_fpu_printn_fpu (const sim_fpu *f,
2459 sim_fpu_print_func *print,
2463 print (arg, "%s", f->sign ? "-" : "+");
2466 case sim_fpu_class_qnan:
2468 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2469 print (arg, "*QuietNaN");
2471 case sim_fpu_class_snan:
2473 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2474 print (arg, "*SignalNaN");
2476 case sim_fpu_class_zero:
2479 case sim_fpu_class_infinity:
2482 case sim_fpu_class_number:
2483 case sim_fpu_class_denorm:
2485 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2486 print (arg, "*2^%+d", f->normal_exp);
2487 ASSERT (f->fraction >= IMPLICIT_1);
2488 ASSERT (f->fraction < IMPLICIT_2);
2493 INLINE_SIM_FPU (void)
2494 sim_fpu_print_status (int status,
2495 sim_fpu_print_func *print,
2499 const char *prefix = "";
2502 switch ((sim_fpu_status) (status & i))
2504 case sim_fpu_status_denorm:
2505 print (arg, "%sD", prefix);
2507 case sim_fpu_status_invalid_snan:
2508 print (arg, "%sSNaN", prefix);
2510 case sim_fpu_status_invalid_qnan:
2511 print (arg, "%sQNaN", prefix);
2513 case sim_fpu_status_invalid_isi:
2514 print (arg, "%sISI", prefix);
2516 case sim_fpu_status_invalid_idi:
2517 print (arg, "%sIDI", prefix);
2519 case sim_fpu_status_invalid_zdz:
2520 print (arg, "%sZDZ", prefix);
2522 case sim_fpu_status_invalid_imz:
2523 print (arg, "%sIMZ", prefix);
2525 case sim_fpu_status_invalid_cvi:
2526 print (arg, "%sCVI", prefix);
2528 case sim_fpu_status_invalid_cmp:
2529 print (arg, "%sCMP", prefix);
2531 case sim_fpu_status_invalid_sqrt:
2532 print (arg, "%sSQRT", prefix);
2534 case sim_fpu_status_inexact:
2535 print (arg, "%sX", prefix);
2537 case sim_fpu_status_overflow:
2538 print (arg, "%sO", prefix);
2540 case sim_fpu_status_underflow:
2541 print (arg, "%sU", prefix);
2543 case sim_fpu_status_invalid_div0:
2544 print (arg, "%s/", prefix);
2546 case sim_fpu_status_rounded:
2547 print (arg, "%sR", prefix);