1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
5 /* Copyright (C) 1994,1997-1998 Free Software Foundation, Inc.
7 This file is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 2, or (at your option) any
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file with other programs, and to distribute
15 those programs without any restriction coming from the use of this
16 file. (The General Public License restrictions do apply in other
17 respects; for example, they cover modification of the file, and
18 distribution when not linked into another program.)
20 This file is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; see the file COPYING. If not, write to
27 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
29 /* As a special exception, if you link this library with other files,
30 some of which are compiled with GCC, to produce an executable,
31 this library does not by itself cause the resulting executable
32 to be covered by the GNU General Public License.
33 This exception does not however invalidate any other reasons why
34 the executable file might be covered by the GNU General Public License. */
36 /* This implements IEEE 754 format arithmetic, but does not provide a
37 mechanism for setting the rounding mode, or for generating or handling
40 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
41 Wilson, all of Cygnus Support. */
47 #include "sim-basics.h"
51 #include "sim-assert.h"
55 If digits is -1, then print all digits. */
58 print_bits (unsigned64 x,
61 sim_fpu_print_func print,
64 unsigned64 bit = LSBIT64 (msbit);
77 if (digits > 0) digits--;
84 /* Quick and dirty conversion between a host double and host 64bit int */
92 /* A packed IEEE floating point number.
94 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
95 32 and 64 bit numbers. This number is interpreted as:
97 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
98 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
100 Denormalized (0 == BIASEDEXP && FRAC != 0):
101 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
103 Zero (0 == BIASEDEXP && FRAC == 0):
104 (sign ? "-" : "+") 0.0
106 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
107 (sign ? "-" : "+") "infinity"
109 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
112 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
117 #define NR_EXPBITS (is_double ? 11 : 8)
118 #define NR_FRACBITS (is_double ? 52 : 23)
119 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
121 #define EXPMAX32 (255)
122 #define EXMPAX64 (2047)
123 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
125 #define EXPBIAS32 (127)
126 #define EXPBIAS64 (1023)
127 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
129 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
133 /* An unpacked floating point number.
135 When unpacked, the fraction of both a 32 and 64 bit floating point
136 number is stored using the same format:
138 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
139 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
141 #define NR_PAD32 (30)
143 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
144 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
146 #define NR_GUARDS32 (7 + NR_PAD32)
147 #define NR_GUARDS64 (8 + NR_PAD64)
148 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
149 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
151 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
152 #define GUARDLSB LSBIT64 (NR_PAD)
153 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
155 #define NR_FRAC_GUARD (60)
156 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
157 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
158 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
161 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
163 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
165 #define NORMAL_EXPMAX32 (EXPBIAS32)
166 #define NORMAL_EXPMAX64 (EXPBIAS64)
167 #define NORMAL_EXPMAX (EXPBIAS)
170 /* Integer constants */
172 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
173 #define MAX_UINT32 LSMASK64 (31, 0)
174 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
176 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
177 #define MAX_UINT64 LSMASK64 (63, 0)
178 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
180 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
181 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
182 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
183 #define NR_INTBITS (is_64bit ? 64 : 32)
185 /* Squeese an unpacked sim_fpu struct into a 32/64 bit integer */
186 STATIC_INLINE_SIM_FPU (unsigned64)
187 pack_fpu (const sim_fpu *src,
198 case sim_fpu_class_qnan:
201 /* force fraction to correct class */
202 fraction = src->fraction;
203 fraction >>= NR_GUARDS;
204 fraction |= QUIET_NAN;
206 case sim_fpu_class_snan:
209 /* force fraction to correct class */
210 fraction = src->fraction;
211 fraction >>= NR_GUARDS;
212 fraction &= ~QUIET_NAN;
214 case sim_fpu_class_infinity:
219 case sim_fpu_class_zero:
224 case sim_fpu_class_number:
225 case sim_fpu_class_denorm:
226 ASSERT (src->fraction >= IMPLICIT_1);
227 ASSERT (src->fraction < IMPLICIT_2);
228 if (src->normal_exp < NORMAL_EXPMIN)
230 /* This number's exponent is too low to fit into the bits
231 available in the number We'll denormalize the number by
232 storing zero in the exponent and shift the fraction to
233 the right to make up for it. */
234 int nr_shift = NORMAL_EXPMIN - src->normal_exp;
235 if (nr_shift > NR_FRACBITS)
237 /* underflow, just make the number zero */
246 /* Shift by the value */
247 fraction = src->fraction;
248 fraction >>= NR_GUARDS;
249 fraction >>= nr_shift;
252 else if (src->normal_exp > NORMAL_EXPMAX)
261 exp = (src->normal_exp + EXPBIAS);
263 fraction = src->fraction;
264 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
266 /* Round to nearest: If the guard bits are the all zero, but
267 the first, then we're half way between two numbers,
268 choose the one which makes the lsb of the answer 0. */
269 if ((fraction & GUARDMASK) == GUARDMSB)
271 if ((fraction & (GUARDMSB << 1)))
272 fraction += (GUARDMSB << 1);
276 /* Add a one to the guards to force round to nearest */
277 fraction += GUARDROUND;
279 if ((fraction & IMPLICIT_2)) /* rounding resulted in carry */
284 fraction >>= NR_GUARDS;
285 /* When exp == EXPMAX (overflow from carry) fraction must
286 have been made zero */
287 ASSERT ((exp == EXPMAX) <= ((fraction & ~IMPLICIT_1) == 0));
294 packed = ((sign ? SIGNBIT : 0)
295 | (exp << NR_FRACBITS)
296 | LSMASKED64 (fraction, NR_FRACBITS - 1, 0));
298 /* trace operation */
305 printf ("pack_fpu: ");
306 printf ("-> %c%0lX.%06lX\n",
307 LSMASKED32 (packed, 31, 31) ? '8' : '0',
308 (long) LSEXTRACTED32 (packed, 30, 23),
309 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
317 /* Unpack a 32/64 bit integer into a sim_fpu structure */
318 STATIC_INLINE_SIM_FPU (void)
319 unpack_fpu (sim_fpu *dst, unsigned64 packed, int is_double)
321 unsigned64 fraction = LSMASKED64 (packed, NR_FRACBITS - 1, 0);
322 unsigned exp = LSEXTRACTED64 (packed, NR_EXPBITS + NR_FRACBITS - 1, NR_FRACBITS);
323 int sign = (packed & SIGNBIT) != 0;
327 /* Hmm. Looks like 0 */
330 /* tastes like zero */
331 dst->class = sim_fpu_class_zero;
337 /* Zero exponent with non zero fraction - it's denormalized,
338 so there isn't a leading implicit one - we'll shift it so
340 dst->normal_exp = exp - EXPBIAS + 1;
341 dst->class = sim_fpu_class_denorm;
343 fraction <<= NR_GUARDS;
344 while (fraction < IMPLICIT_1)
349 dst->fraction = fraction;
352 else if (exp == EXPMAX)
357 /* Attached to a zero fraction - means infinity */
358 dst->class = sim_fpu_class_infinity;
360 /* dst->normal_exp = EXPBIAS; */
361 /* dst->fraction = 0; */
365 /* Non zero fraction, means NaN */
367 dst->fraction = (fraction << NR_GUARDS);
368 if (fraction >= QUIET_NAN)
369 dst->class = sim_fpu_class_qnan;
371 dst->class = sim_fpu_class_snan;
376 /* Nothing strange about this number */
377 dst->class = sim_fpu_class_number;
379 dst->fraction = ((fraction << NR_GUARDS) | IMPLICIT_1);
380 dst->normal_exp = exp - EXPBIAS;
383 /* trace operation */
390 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
391 LSMASKED32 (packed, 31, 31) ? '8' : '0',
392 (long) LSEXTRACTED32 (packed, 30, 23),
393 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
400 val.i = pack_fpu (dst, 1);
403 ASSERT (val.i == packed);
407 unsigned32 val = pack_fpu (dst, 0);
408 unsigned32 org = packed;
415 /* Convert a floating point into an integer */
416 STATIC_INLINE_SIM_FPU (int)
425 if (sim_fpu_is_zero (s))
430 if (sim_fpu_is_snan (s))
432 *i = MIN_INT; /* FIXME */
433 return sim_fpu_status_invalid_cvi;
435 if (sim_fpu_is_qnan (s))
437 *i = MIN_INT; /* FIXME */
438 return sim_fpu_status_invalid_cvi;
440 /* map infinity onto MAX_INT... */
441 if (sim_fpu_is_infinity (s))
443 *i = s->sign ? MIN_INT : MAX_INT;
444 return sim_fpu_status_invalid_cvi;
446 /* it is a number, but a small one */
447 if (s->normal_exp < 0)
450 return sim_fpu_status_inexact;
452 /* Is the floating point MIN_INT or just close? */
453 if (s->sign && s->normal_exp == (NR_INTBITS - 1))
456 ASSERT (s->fraction >= IMPLICIT_1);
457 if (s->fraction == IMPLICIT_1)
458 return 0; /* exact */
459 if (is_64bit) /* can't round */
460 return sim_fpu_status_invalid_cvi; /* must be overflow */
461 /* For a 32bit with MAX_INT, rounding is possible */
464 case sim_fpu_round_default:
466 case sim_fpu_round_zero:
467 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
468 return sim_fpu_status_invalid_cvi;
470 return sim_fpu_status_inexact;
472 case sim_fpu_round_near:
474 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
475 return sim_fpu_status_invalid_cvi;
476 else if ((s->fraction & !FRAC32MASK) >= (~FRAC32MASK >> 1))
477 return sim_fpu_status_invalid_cvi;
479 return sim_fpu_status_inexact;
481 case sim_fpu_round_up:
482 if ((s->fraction & FRAC32MASK) == IMPLICIT_1)
483 return sim_fpu_status_inexact;
485 return sim_fpu_status_invalid_cvi;
486 case sim_fpu_round_down:
487 return sim_fpu_status_invalid_cvi;
490 /* Would right shifting result in the FRAC being shifted into
491 (through) the integer's sign bit? */
492 if (s->normal_exp > (NR_INTBITS - 2))
494 *i = s->sign ? MIN_INT : MAX_INT;
495 return sim_fpu_status_invalid_cvi;
497 /* normal number shift it into place */
499 shift = (s->normal_exp - (NR_FRAC_GUARD));
507 if (tmp & ((SIGNED64 (1) << shift) - 1))
508 status |= sim_fpu_status_inexact;
511 *i = s->sign ? (-tmp) : (tmp);
515 /* convert an integer into a floating point */
516 STATIC_INLINE_SIM_FPU (int)
517 i2fpu (sim_fpu *f, signed64 i, int is_64bit)
522 f->class = sim_fpu_class_zero;
528 f->class = sim_fpu_class_number;
530 f->normal_exp = NR_FRAC_GUARD;
534 /* Special case for minint, since there is no corresponding
535 +ve integer representation for it */
538 f->fraction = IMPLICIT_1;
539 f->normal_exp = NR_INTBITS - 1;
547 if (f->fraction >= IMPLICIT_2)
551 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
554 while (f->fraction >= IMPLICIT_2);
556 else if (f->fraction < IMPLICIT_1)
563 while (f->fraction < IMPLICIT_1);
567 /* trace operation */
570 printf ("i2fpu: 0x%08lX ->\n", (long) i);
577 fpu2i (&val, f, is_64bit, sim_fpu_round_zero);
578 if (i >= MIN_INT32 && i <= MAX_INT32)
588 /* Convert a floating point into an integer */
589 STATIC_INLINE_SIM_FPU (int)
590 fpu2u (unsigned64 *u, const sim_fpu *s, int is_64bit)
592 const int is_double = 1;
595 if (sim_fpu_is_zero (s))
600 if (sim_fpu_is_nan (s))
605 /* it is a negative number */
611 /* get reasonable MAX_USI_INT... */
612 if (sim_fpu_is_infinity (s))
617 /* it is a number, but a small one */
618 if (s->normal_exp < 0)
624 if (s->normal_exp > (NR_INTBITS - 1))
630 tmp = (s->fraction & ~PADMASK);
631 shift = (s->normal_exp - (NR_FRACBITS + NR_GUARDS));
645 /* Convert an unsigned integer into a floating point */
646 STATIC_INLINE_SIM_FPU (int)
647 u2fpu (sim_fpu *f, unsigned64 u, int is_64bit)
651 f->class = sim_fpu_class_zero;
657 f->class = sim_fpu_class_number;
659 f->normal_exp = NR_FRAC_GUARD;
662 while (f->fraction < IMPLICIT_1)
672 /* register <-> sim_fpu */
674 INLINE_SIM_FPU (void)
675 sim_fpu_32to (sim_fpu *f, unsigned32 s)
677 unpack_fpu (f, s, 0);
681 INLINE_SIM_FPU (void)
682 sim_fpu_232to (sim_fpu *f, unsigned32 h, unsigned32 l)
686 unpack_fpu (f, s, 1);
690 INLINE_SIM_FPU (void)
691 sim_fpu_64to (sim_fpu *f, unsigned64 s)
693 unpack_fpu (f, s, 1);
697 INLINE_SIM_FPU (void)
698 sim_fpu_to32 (unsigned32 *s,
701 *s = pack_fpu (f, 0);
705 INLINE_SIM_FPU (void)
706 sim_fpu_to232 (unsigned32 *h, unsigned32 *l,
709 unsigned64 s = pack_fpu (f, 1);
715 INLINE_SIM_FPU (void)
716 sim_fpu_to64 (unsigned64 *u,
719 *u = pack_fpu (f, 1);
723 INLINE_SIM_FPU (void)
724 sim_fpu_fractionto (sim_fpu *f,
730 int shift = (NR_FRAC_GUARD - precision);
731 f->class = sim_fpu_class_number;
733 f->normal_exp = normal_exp;
734 /* shift the fraction to where sim-fpu expects it */
736 f->fraction = (fraction << shift);
738 f->fraction = (fraction >> -shift);
739 f->fraction |= IMPLICIT_1;
743 INLINE_SIM_FPU (unsigned64)
744 sim_fpu_tofraction (const sim_fpu *d,
747 /* we have NR_FRAC_GUARD bits, we want only PRECISION bits */
748 int shift = (NR_FRAC_GUARD - precision);
749 unsigned64 fraction = (d->fraction & ~IMPLICIT_1);
751 return fraction >> shift;
753 return fraction << -shift;
759 STATIC_INLINE_SIM_FPU (int)
760 do_normal_overflow (sim_fpu *f,
766 case sim_fpu_round_default:
768 case sim_fpu_round_near:
769 f->class = sim_fpu_class_infinity;
771 case sim_fpu_round_up:
773 f->class = sim_fpu_class_infinity;
775 case sim_fpu_round_down:
777 f->class = sim_fpu_class_infinity;
779 case sim_fpu_round_zero:
782 f->normal_exp = NORMAL_EXPMAX;
783 f->fraction = LSMASK64 (NR_FRAC_GUARD, NR_GUARDS);
784 return (sim_fpu_status_overflow | sim_fpu_status_inexact);
787 STATIC_INLINE_SIM_FPU (int)
788 do_normal_underflow (sim_fpu *f,
794 case sim_fpu_round_default:
796 case sim_fpu_round_near:
797 f->class = sim_fpu_class_zero;
799 case sim_fpu_round_up:
801 f->class = sim_fpu_class_zero;
803 case sim_fpu_round_down:
805 f->class = sim_fpu_class_zero;
807 case sim_fpu_round_zero:
808 f->class = sim_fpu_class_zero;
811 f->normal_exp = NORMAL_EXPMIN - NR_FRACBITS;
812 f->fraction = IMPLICIT_1;
813 return (sim_fpu_status_inexact | sim_fpu_status_underflow);
818 /* Round a number using NR_GUARDS.
819 Will return the rounded number or F->FRACTION == 0 when underflow */
821 STATIC_INLINE_SIM_FPU (int)
822 do_normal_round (sim_fpu *f,
826 unsigned64 guardmask = LSMASK64 (nr_guards - 1, 0);
827 unsigned64 guardmsb = LSBIT64 (nr_guards - 1);
828 unsigned64 fraclsb = guardmsb << 1;
829 if ((f->fraction & guardmask))
831 int status = sim_fpu_status_inexact;
834 case sim_fpu_round_default:
836 case sim_fpu_round_near:
837 if ((f->fraction & guardmsb))
839 if ((f->fraction & fraclsb))
841 status |= sim_fpu_status_rounded;
843 else if ((f->fraction & (guardmask >> 1)))
845 status |= sim_fpu_status_rounded;
849 case sim_fpu_round_up:
851 status |= sim_fpu_status_rounded;
853 case sim_fpu_round_down:
855 status |= sim_fpu_status_rounded;
857 case sim_fpu_round_zero:
860 f->fraction &= ~guardmask;
861 /* round if needed, handle resulting overflow */
862 if ((status & sim_fpu_status_rounded))
864 f->fraction += fraclsb;
865 if ((f->fraction & IMPLICIT_2))
878 STATIC_INLINE_SIM_FPU (int)
879 do_round (sim_fpu *f,
882 sim_fpu_denorm denorm)
886 case sim_fpu_class_qnan:
887 case sim_fpu_class_zero:
888 case sim_fpu_class_infinity:
891 case sim_fpu_class_snan:
892 /* Quieten a SignalingNaN */
893 f->class = sim_fpu_class_qnan;
894 return sim_fpu_status_invalid_snan;
896 case sim_fpu_class_number:
897 case sim_fpu_class_denorm:
900 ASSERT (f->fraction < IMPLICIT_2);
901 ASSERT (f->fraction >= IMPLICIT_1);
902 if (f->normal_exp < NORMAL_EXPMIN)
904 /* This number's exponent is too low to fit into the bits
905 available in the number. Round off any bits that will be
906 discarded as a result of denormalization. Edge case is
907 the implicit bit shifted to GUARD0 and then rounded
909 int shift = NORMAL_EXPMIN - f->normal_exp;
910 if (shift + NR_GUARDS <= NR_FRAC_GUARD + 1
911 && !(denorm & sim_fpu_denorm_zero))
913 status = do_normal_round (f, shift + NR_GUARDS, round);
914 if (f->fraction == 0) /* rounding underflowed */
916 status |= do_normal_underflow (f, is_double, round);
918 else if (f->normal_exp < NORMAL_EXPMIN) /* still underflow? */
920 status |= sim_fpu_status_denorm;
921 /* Any loss of precision when denormalizing is
922 underflow. Some processors check for underflow
923 before rounding, some after! */
924 if (status & sim_fpu_status_inexact)
925 status |= sim_fpu_status_underflow;
926 /* Flag that resultant value has been denormalized */
927 f->class = sim_fpu_class_denorm;
929 else if ((denorm & sim_fpu_denorm_underflow_inexact))
931 if ((status & sim_fpu_status_inexact))
932 status |= sim_fpu_status_underflow;
937 status = do_normal_underflow (f, is_double, round);
940 else if (f->normal_exp > NORMAL_EXPMAX)
943 status = do_normal_overflow (f, is_double, round);
947 status = do_normal_round (f, NR_GUARDS, round);
948 if (f->fraction == 0)
949 /* f->class = sim_fpu_class_zero; */
950 status |= do_normal_underflow (f, is_double, round);
951 else if (f->normal_exp > NORMAL_EXPMAX)
952 /* oops! rounding caused overflow */
953 status |= do_normal_overflow (f, is_double, round);
955 ASSERT ((f->class == sim_fpu_class_number
956 || f->class == sim_fpu_class_denorm)
957 <= (f->fraction < IMPLICIT_2 && f->fraction >= IMPLICIT_1));
965 sim_fpu_round_32 (sim_fpu *f,
967 sim_fpu_denorm denorm)
969 return do_round (f, 0, round, denorm);
973 sim_fpu_round_64 (sim_fpu *f,
975 sim_fpu_denorm denorm)
977 return do_round (f, 1, round, denorm);
985 sim_fpu_add (sim_fpu *f,
989 if (sim_fpu_is_snan (l))
992 f->class = sim_fpu_class_qnan;
993 return sim_fpu_status_invalid_snan;
995 if (sim_fpu_is_snan (r))
998 f->class = sim_fpu_class_qnan;
999 return sim_fpu_status_invalid_snan;
1001 if (sim_fpu_is_qnan (l))
1006 if (sim_fpu_is_qnan (r))
1011 if (sim_fpu_is_infinity (l))
1013 if (sim_fpu_is_infinity (r)
1014 && l->sign != r->sign)
1017 return sim_fpu_status_invalid_isi;
1022 if (sim_fpu_is_infinity (r))
1027 if (sim_fpu_is_zero (l))
1029 if (sim_fpu_is_zero (r))
1032 f->sign = l->sign & r->sign;
1038 if (sim_fpu_is_zero (r))
1045 int shift = l->normal_exp - r->normal_exp;
1046 unsigned64 lfraction;
1047 unsigned64 rfraction;
1048 /* use exp of larger */
1049 if (shift >= NR_FRAC_GUARD)
1051 /* left has much bigger magnitute */
1053 return sim_fpu_status_inexact;
1055 if (shift <= - NR_FRAC_GUARD)
1057 /* right has much bigger magnitute */
1059 return sim_fpu_status_inexact;
1061 lfraction = l->fraction;
1062 rfraction = r->fraction;
1065 f->normal_exp = l->normal_exp;
1066 if (rfraction & LSMASK64 (shift - 1, 0))
1068 status |= sim_fpu_status_inexact;
1069 rfraction |= LSBIT64 (shift); /* stick LSBit */
1071 rfraction >>= shift;
1075 f->normal_exp = r->normal_exp;
1076 if (lfraction & LSMASK64 (- shift - 1, 0))
1078 status |= sim_fpu_status_inexact;
1079 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1081 lfraction >>= -shift;
1085 f->normal_exp = r->normal_exp;
1088 /* perform the addition */
1090 lfraction = - lfraction;
1092 rfraction = - rfraction;
1093 f->fraction = lfraction + rfraction;
1096 if (f->fraction == 0)
1103 f->class = sim_fpu_class_number;
1104 if ((signed64) f->fraction >= 0)
1109 f->fraction = - f->fraction;
1113 if ((f->fraction & IMPLICIT_2))
1115 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1118 else if (f->fraction < IMPLICIT_1)
1125 while (f->fraction < IMPLICIT_1);
1127 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1133 INLINE_SIM_FPU (int)
1134 sim_fpu_sub (sim_fpu *f,
1138 if (sim_fpu_is_snan (l))
1141 f->class = sim_fpu_class_qnan;
1142 return sim_fpu_status_invalid_snan;
1144 if (sim_fpu_is_snan (r))
1147 f->class = sim_fpu_class_qnan;
1148 return sim_fpu_status_invalid_snan;
1150 if (sim_fpu_is_qnan (l))
1155 if (sim_fpu_is_qnan (r))
1160 if (sim_fpu_is_infinity (l))
1162 if (sim_fpu_is_infinity (r)
1163 && l->sign == r->sign)
1166 return sim_fpu_status_invalid_isi;
1171 if (sim_fpu_is_infinity (r))
1177 if (sim_fpu_is_zero (l))
1179 if (sim_fpu_is_zero (r))
1182 f->sign = l->sign & !r->sign;
1191 if (sim_fpu_is_zero (r))
1198 int shift = l->normal_exp - r->normal_exp;
1199 unsigned64 lfraction;
1200 unsigned64 rfraction;
1201 /* use exp of larger */
1202 if (shift >= NR_FRAC_GUARD)
1204 /* left has much bigger magnitute */
1206 return sim_fpu_status_inexact;
1208 if (shift <= - NR_FRAC_GUARD)
1210 /* right has much bigger magnitute */
1213 return sim_fpu_status_inexact;
1215 lfraction = l->fraction;
1216 rfraction = r->fraction;
1219 f->normal_exp = l->normal_exp;
1220 if (rfraction & LSMASK64 (shift - 1, 0))
1222 status |= sim_fpu_status_inexact;
1223 rfraction |= LSBIT64 (shift); /* stick LSBit */
1225 rfraction >>= shift;
1229 f->normal_exp = r->normal_exp;
1230 if (lfraction & LSMASK64 (- shift - 1, 0))
1232 status |= sim_fpu_status_inexact;
1233 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1235 lfraction >>= -shift;
1239 f->normal_exp = r->normal_exp;
1242 /* perform the subtraction */
1244 lfraction = - lfraction;
1246 rfraction = - rfraction;
1247 f->fraction = lfraction + rfraction;
1250 if (f->fraction == 0)
1257 f->class = sim_fpu_class_number;
1258 if ((signed64) f->fraction >= 0)
1263 f->fraction = - f->fraction;
1267 if ((f->fraction & IMPLICIT_2))
1269 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1272 else if (f->fraction < IMPLICIT_1)
1279 while (f->fraction < IMPLICIT_1);
1281 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1287 INLINE_SIM_FPU (int)
1288 sim_fpu_mul (sim_fpu *f,
1292 if (sim_fpu_is_snan (l))
1295 f->class = sim_fpu_class_qnan;
1296 return sim_fpu_status_invalid_snan;
1298 if (sim_fpu_is_snan (r))
1301 f->class = sim_fpu_class_qnan;
1302 return sim_fpu_status_invalid_snan;
1304 if (sim_fpu_is_qnan (l))
1309 if (sim_fpu_is_qnan (r))
1314 if (sim_fpu_is_infinity (l))
1316 if (sim_fpu_is_zero (r))
1319 return sim_fpu_status_invalid_imz;
1322 f->sign = l->sign ^ r->sign;
1325 if (sim_fpu_is_infinity (r))
1327 if (sim_fpu_is_zero (l))
1330 return sim_fpu_status_invalid_imz;
1333 f->sign = l->sign ^ r->sign;
1336 if (sim_fpu_is_zero (l) || sim_fpu_is_zero (r))
1339 f->sign = l->sign ^ r->sign;
1342 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1347 unsigned64 nl = l->fraction & 0xffffffff;
1348 unsigned64 nh = l->fraction >> 32;
1349 unsigned64 ml = r->fraction & 0xffffffff;
1350 unsigned64 mh = r->fraction >>32;
1351 unsigned64 pp_ll = ml * nl;
1352 unsigned64 pp_hl = mh * nl;
1353 unsigned64 pp_lh = ml * nh;
1354 unsigned64 pp_hh = mh * nh;
1355 unsigned64 res2 = 0;
1356 unsigned64 res0 = 0;
1357 unsigned64 ps_hh__ = pp_hl + pp_lh;
1358 if (ps_hh__ < pp_hl)
1359 res2 += UNSIGNED64 (0x100000000);
1360 pp_hl = (ps_hh__ << 32) & UNSIGNED64 (0xffffffff00000000);
1361 res0 = pp_ll + pp_hl;
1364 res2 += ((ps_hh__ >> 32) & 0xffffffff) + pp_hh;
1368 f->normal_exp = l->normal_exp + r->normal_exp;
1369 f->sign = l->sign ^ r->sign;
1370 f->class = sim_fpu_class_number;
1372 /* Input is bounded by [1,2) ; [2^60,2^61)
1373 Output is bounded by [1,4) ; [2^120,2^122) */
1375 /* Adjust the exponent according to where the decimal point ended
1376 up in the high 64 bit word. In the source the decimal point
1377 was at NR_FRAC_GUARD. */
1378 f->normal_exp += NR_FRAC_GUARD + 64 - (NR_FRAC_GUARD * 2);
1380 /* The high word is bounded according to the above. Consequently
1381 it has never overflowed into IMPLICIT_2. */
1382 ASSERT (high < LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64));
1383 ASSERT (high >= LSBIT64 ((NR_FRAC_GUARD * 2) - 64));
1384 ASSERT (LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64) < IMPLICIT_1);
1391 if (low & LSBIT64 (63))
1395 while (high < IMPLICIT_1);
1397 ASSERT (high >= IMPLICIT_1 && high < IMPLICIT_2);
1400 f->fraction = (high | 1); /* sticky */
1401 return sim_fpu_status_inexact;
1412 INLINE_SIM_FPU (int)
1413 sim_fpu_div (sim_fpu *f,
1417 if (sim_fpu_is_snan (l))
1420 f->class = sim_fpu_class_qnan;
1421 return sim_fpu_status_invalid_snan;
1423 if (sim_fpu_is_snan (r))
1426 f->class = sim_fpu_class_qnan;
1427 return sim_fpu_status_invalid_snan;
1429 if (sim_fpu_is_qnan (l))
1432 f->class = sim_fpu_class_qnan;
1435 if (sim_fpu_is_qnan (r))
1438 f->class = sim_fpu_class_qnan;
1441 if (sim_fpu_is_infinity (l))
1443 if (sim_fpu_is_infinity (r))
1446 return sim_fpu_status_invalid_idi;
1451 f->sign = l->sign ^ r->sign;
1455 if (sim_fpu_is_zero (l))
1457 if (sim_fpu_is_zero (r))
1460 return sim_fpu_status_invalid_zdz;
1465 f->sign = l->sign ^ r->sign;
1469 if (sim_fpu_is_infinity (r))
1472 f->sign = l->sign ^ r->sign;
1475 if (sim_fpu_is_zero (r))
1477 f->class = sim_fpu_class_infinity;
1478 f->sign = l->sign ^ r->sign;
1479 return sim_fpu_status_invalid_div0;
1482 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1485 /* quotient = ( ( numerator / denominator)
1486 x 2^(numerator exponent - denominator exponent)
1488 unsigned64 numerator;
1489 unsigned64 denominator;
1490 unsigned64 quotient;
1493 f->class = sim_fpu_class_number;
1494 f->sign = l->sign ^ r->sign;
1495 f->normal_exp = l->normal_exp - r->normal_exp;
1497 numerator = l->fraction;
1498 denominator = r->fraction;
1500 /* Fraction will be less than 1.0 */
1501 if (numerator < denominator)
1506 ASSERT (numerator >= denominator);
1508 /* Gain extra precision, already used one spare bit */
1509 numerator <<= NR_SPARE;
1510 denominator <<= NR_SPARE;
1512 /* Does divide one bit at a time. Optimize??? */
1514 bit = (IMPLICIT_1 << NR_SPARE);
1517 if (numerator >= denominator)
1520 numerator -= denominator;
1526 /* discard (but save) the extra bits */
1527 if ((quotient & LSMASK64 (NR_SPARE -1, 0)))
1528 quotient = (quotient >> NR_SPARE) | 1;
1530 quotient = (quotient >> NR_SPARE);
1532 f->fraction = quotient;
1533 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1536 f->fraction |= 1; /* stick remaining bits */
1537 return sim_fpu_status_inexact;
1545 INLINE_SIM_FPU (int)
1546 sim_fpu_max (sim_fpu *f,
1550 if (sim_fpu_is_snan (l))
1553 f->class = sim_fpu_class_qnan;
1554 return sim_fpu_status_invalid_snan;
1556 if (sim_fpu_is_snan (r))
1559 f->class = sim_fpu_class_qnan;
1560 return sim_fpu_status_invalid_snan;
1562 if (sim_fpu_is_qnan (l))
1567 if (sim_fpu_is_qnan (r))
1572 if (sim_fpu_is_infinity (l))
1574 if (sim_fpu_is_infinity (r)
1575 && l->sign == r->sign)
1578 return sim_fpu_status_invalid_isi;
1581 *f = *r; /* -inf < anything */
1583 *f = *l; /* +inf > anthing */
1586 if (sim_fpu_is_infinity (r))
1589 *f = *l; /* anything > -inf */
1591 *f = *r; /* anthing < +inf */
1594 if (l->sign > r->sign)
1596 *f = *r; /* -ve < +ve */
1599 if (l->sign < r->sign)
1601 *f = *l; /* +ve > -ve */
1604 ASSERT (l->sign == r->sign);
1605 if (l->normal_exp > r->normal_exp
1606 || (l->normal_exp == r->normal_exp &&
1607 l->fraction > r->fraction))
1611 *f = *r; /* -ve < -ve */
1613 *f = *l; /* +ve > +ve */
1620 *f = *l; /* -ve > -ve */
1622 *f = *r; /* +ve < +ve */
1628 INLINE_SIM_FPU (int)
1629 sim_fpu_min (sim_fpu *f,
1633 if (sim_fpu_is_snan (l))
1636 f->class = sim_fpu_class_qnan;
1637 return sim_fpu_status_invalid_snan;
1639 if (sim_fpu_is_snan (r))
1642 f->class = sim_fpu_class_qnan;
1643 return sim_fpu_status_invalid_snan;
1645 if (sim_fpu_is_qnan (l))
1650 if (sim_fpu_is_qnan (r))
1655 if (sim_fpu_is_infinity (l))
1657 if (sim_fpu_is_infinity (r)
1658 && l->sign == r->sign)
1661 return sim_fpu_status_invalid_isi;
1664 *f = *l; /* -inf < anything */
1666 *f = *r; /* +inf > anthing */
1669 if (sim_fpu_is_infinity (r))
1672 *f = *r; /* anything > -inf */
1674 *f = *l; /* anything < +inf */
1677 if (l->sign > r->sign)
1679 *f = *l; /* -ve < +ve */
1682 if (l->sign < r->sign)
1684 *f = *r; /* +ve > -ve */
1687 ASSERT (l->sign == r->sign);
1688 if (l->normal_exp > r->normal_exp
1689 || (l->normal_exp == r->normal_exp &&
1690 l->fraction > r->fraction))
1694 *f = *l; /* -ve < -ve */
1696 *f = *r; /* +ve > +ve */
1703 *f = *r; /* -ve > -ve */
1705 *f = *l; /* +ve < +ve */
1711 INLINE_SIM_FPU (int)
1712 sim_fpu_neg (sim_fpu *f,
1715 if (sim_fpu_is_snan (r))
1718 f->class = sim_fpu_class_qnan;
1719 return sim_fpu_status_invalid_snan;
1721 if (sim_fpu_is_qnan (r))
1732 INLINE_SIM_FPU (int)
1733 sim_fpu_abs (sim_fpu *f,
1736 if (sim_fpu_is_snan (r))
1739 f->class = sim_fpu_class_qnan;
1740 return sim_fpu_status_invalid_snan;
1742 if (sim_fpu_is_qnan (r))
1753 INLINE_SIM_FPU (int)
1754 sim_fpu_inv (sim_fpu *f,
1757 return sim_fpu_div (f, &sim_fpu_one, r);
1761 INLINE_SIM_FPU (int)
1762 sim_fpu_sqrt (sim_fpu *f,
1765 if (sim_fpu_is_snan (r))
1768 return sim_fpu_status_invalid_snan;
1770 if (sim_fpu_is_qnan (r))
1775 if (sim_fpu_is_zero (r))
1777 f->class = sim_fpu_class_zero;
1782 if (sim_fpu_is_infinity (r))
1787 return sim_fpu_status_invalid_sqrt;
1791 f->class = sim_fpu_class_infinity;
1800 return sim_fpu_status_invalid_sqrt;
1803 /* @(#)e_sqrt.c 5.1 93/09/24 */
1805 * ====================================================
1806 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1808 * Developed at SunPro, a Sun Microsystems, Inc. business.
1809 * Permission to use, copy, modify, and distribute this
1810 * software is freely granted, provided that this notice
1812 * ====================================================
1815 /* __ieee754_sqrt(x)
1816 * Return correctly rounded sqrt.
1817 * ------------------------------------------
1818 * | Use the hardware sqrt if you have one |
1819 * ------------------------------------------
1821 * Bit by bit method using integer arithmetic. (Slow, but portable)
1823 * Scale x to y in [1,4) with even powers of 2:
1824 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1825 * sqrt(x) = 2^k * sqrt(y)
1828 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1829 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1831 - y = ((m even) ? x : 2.x)
1833 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1835 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1837 * 2. Bit by bit computation
1838 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1841 * s = 2*q , and y = 2 * ( y - q ). (1)
1844 * To compute q from q , one checks whether
1848 * (q + 2 ) <= y. (2)
1851 * If (2) is false, then q = q ; otherwise q = q + 2 .
1854 * With some algebric manipulation, it is not difficult to see
1855 * that (2) is equivalent to
1860 * The advantage of (3) is that s and y can be computed by
1862 * the following recurrence formula:
1865 * s = s , y = y ; (4)
1874 * s = s + 2 , y = y - s - 2 (5)
1879 - NOTE: y = 2 (y - s - 2 )
1882 * One may easily use induction to prove (4) and (5).
1883 * Note. Since the left hand side of (3) contain only i+2 bits,
1884 * it does not necessary to do a full (53-bit) comparison
1887 * After generating the 53 bits result, we compute one more bit.
1888 * Together with the remainder, we can decide whether the
1889 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1890 * (it will never equal to 1/2ulp).
1891 * The rounding mode can be detected by checking whether
1892 * huge + tiny is equal to huge, and whether huge - tiny is
1893 * equal to huge for some floating point number "huge" and "tiny".
1896 * sqrt(+-0) = +-0 ... exact
1898 * sqrt(-ve) = NaN ... with invalid signal
1899 * sqrt(NaN) = NaN ... with invalid signal for signaling NaN
1901 * Other methods : see the appended file at the end of the program below.
1906 /* generate sqrt(x) bit by bit */
1912 f->class = sim_fpu_class_number;
1915 f->normal_exp = (r->normal_exp >> 1); /* exp = [exp/2] */
1917 /* odd exp, double x to make it even */
1918 ASSERT (y >= IMPLICIT_1 && y < IMPLICIT_4);
1919 if ((r->normal_exp & 1))
1923 ASSERT (y >= IMPLICIT_1 && y < (IMPLICIT_2 << 1));
1925 /* Let loop determine first value of s (either 1 or 2) */
1932 unsigned64 t = s + b;
1943 ASSERT (q >= IMPLICIT_1 && q < IMPLICIT_2);
1947 f->fraction |= 1; /* stick remaining bits */
1948 return sim_fpu_status_inexact;
1956 /* int/long <-> sim_fpu */
1958 INLINE_SIM_FPU (int)
1959 sim_fpu_i32to (sim_fpu *f,
1961 sim_fpu_round round)
1967 INLINE_SIM_FPU (int)
1968 sim_fpu_u32to (sim_fpu *f,
1970 sim_fpu_round round)
1976 INLINE_SIM_FPU (int)
1977 sim_fpu_i64to (sim_fpu *f,
1979 sim_fpu_round round)
1985 INLINE_SIM_FPU (int)
1986 sim_fpu_u64to (sim_fpu *f,
1988 sim_fpu_round round)
1995 INLINE_SIM_FPU (int)
1996 sim_fpu_to32i (signed32 *i,
1998 sim_fpu_round round)
2001 int status = fpu2i (&i64, f, 0, round);
2006 INLINE_SIM_FPU (int)
2007 sim_fpu_to32u (unsigned32 *u,
2009 sim_fpu_round round)
2012 int status = fpu2u (&u64, f, 0);
2017 INLINE_SIM_FPU (int)
2018 sim_fpu_to64i (signed64 *i,
2020 sim_fpu_round round)
2022 return fpu2i (i, f, 1, round);
2026 INLINE_SIM_FPU (int)
2027 sim_fpu_to64u (unsigned64 *u,
2029 sim_fpu_round round)
2031 return fpu2u (u, f, 1);
2036 /* sim_fpu -> host format */
2039 INLINE_SIM_FPU (float)
2040 sim_fpu_2f (const sim_fpu *f)
2047 INLINE_SIM_FPU (double)
2048 sim_fpu_2d (const sim_fpu *s)
2051 if (sim_fpu_is_snan (s))
2055 n.class = sim_fpu_class_qnan;
2056 val.i = pack_fpu (&n, 1);
2060 val.i = pack_fpu (s, 1);
2067 INLINE_SIM_FPU (void)
2068 sim_fpu_f2 (sim_fpu *f,
2073 unpack_fpu (f, val.i, 1);
2078 INLINE_SIM_FPU (void)
2079 sim_fpu_d2 (sim_fpu *f,
2084 unpack_fpu (f, val.i, 1);
2090 INLINE_SIM_FPU (int)
2091 sim_fpu_is_nan (const sim_fpu *d)
2095 case sim_fpu_class_qnan:
2096 case sim_fpu_class_snan:
2103 INLINE_SIM_FPU (int)
2104 sim_fpu_is_qnan (const sim_fpu *d)
2108 case sim_fpu_class_qnan:
2115 INLINE_SIM_FPU (int)
2116 sim_fpu_is_snan (const sim_fpu *d)
2120 case sim_fpu_class_snan:
2127 INLINE_SIM_FPU (int)
2128 sim_fpu_is_zero (const sim_fpu *d)
2132 case sim_fpu_class_zero:
2139 INLINE_SIM_FPU (int)
2140 sim_fpu_is_infinity (const sim_fpu *d)
2144 case sim_fpu_class_infinity:
2151 INLINE_SIM_FPU (int)
2152 sim_fpu_is_number (const sim_fpu *d)
2156 case sim_fpu_class_denorm:
2157 case sim_fpu_class_number:
2164 INLINE_SIM_FPU (int)
2165 sim_fpu_is_denorm (const sim_fpu *d)
2169 case sim_fpu_class_denorm:
2177 INLINE_SIM_FPU (int)
2178 sim_fpu_sign (const sim_fpu *d)
2184 INLINE_SIM_FPU (int)
2185 sim_fpu_exp (const sim_fpu *d)
2187 return d->normal_exp;
2192 INLINE_SIM_FPU (int)
2193 sim_fpu_is (const sim_fpu *d)
2197 case sim_fpu_class_qnan:
2198 return SIM_FPU_IS_QNAN;
2199 case sim_fpu_class_snan:
2200 return SIM_FPU_IS_SNAN;
2201 case sim_fpu_class_infinity:
2203 return SIM_FPU_IS_NINF;
2205 return SIM_FPU_IS_PINF;
2206 case sim_fpu_class_number:
2208 return SIM_FPU_IS_NNUMBER;
2210 return SIM_FPU_IS_PNUMBER;
2211 case sim_fpu_class_denorm:
2213 return SIM_FPU_IS_NDENORM;
2215 return SIM_FPU_IS_PDENORM;
2216 case sim_fpu_class_zero:
2218 return SIM_FPU_IS_NZERO;
2220 return SIM_FPU_IS_PZERO;
2227 INLINE_SIM_FPU (int)
2228 sim_fpu_cmp (const sim_fpu *l, const sim_fpu *r)
2231 sim_fpu_sub (&res, l, r);
2232 return sim_fpu_is (&res);
2235 INLINE_SIM_FPU (int)
2236 sim_fpu_is_lt (const sim_fpu *l, const sim_fpu *r)
2239 sim_fpu_lt (&status, l, r);
2243 INLINE_SIM_FPU (int)
2244 sim_fpu_is_le (const sim_fpu *l, const sim_fpu *r)
2247 sim_fpu_le (&is, l, r);
2251 INLINE_SIM_FPU (int)
2252 sim_fpu_is_eq (const sim_fpu *l, const sim_fpu *r)
2255 sim_fpu_eq (&is, l, r);
2259 INLINE_SIM_FPU (int)
2260 sim_fpu_is_ne (const sim_fpu *l, const sim_fpu *r)
2263 sim_fpu_ne (&is, l, r);
2267 INLINE_SIM_FPU (int)
2268 sim_fpu_is_ge (const sim_fpu *l, const sim_fpu *r)
2271 sim_fpu_ge (&is, l, r);
2275 INLINE_SIM_FPU (int)
2276 sim_fpu_is_gt (const sim_fpu *l, const sim_fpu *r)
2279 sim_fpu_gt (&is, l, r);
2284 /* Compare operators */
2286 INLINE_SIM_FPU (int)
2287 sim_fpu_lt (int *is,
2291 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2295 lval.i = pack_fpu (l, 1);
2296 rval.i = pack_fpu (r, 1);
2297 (*is) = (lval.d < rval.d);
2300 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2303 return sim_fpu_status_invalid_snan;
2308 return sim_fpu_status_invalid_qnan;
2312 INLINE_SIM_FPU (int)
2313 sim_fpu_le (int *is,
2317 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2321 lval.i = pack_fpu (l, 1);
2322 rval.i = pack_fpu (r, 1);
2323 *is = (lval.d <= rval.d);
2326 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2329 return sim_fpu_status_invalid_snan;
2334 return sim_fpu_status_invalid_qnan;
2338 INLINE_SIM_FPU (int)
2339 sim_fpu_eq (int *is,
2343 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2347 lval.i = pack_fpu (l, 1);
2348 rval.i = pack_fpu (r, 1);
2349 (*is) = (lval.d == rval.d);
2352 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2355 return sim_fpu_status_invalid_snan;
2360 return sim_fpu_status_invalid_qnan;
2364 INLINE_SIM_FPU (int)
2365 sim_fpu_ne (int *is,
2369 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2373 lval.i = pack_fpu (l, 1);
2374 rval.i = pack_fpu (r, 1);
2375 (*is) = (lval.d != rval.d);
2378 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2381 return sim_fpu_status_invalid_snan;
2386 return sim_fpu_status_invalid_qnan;
2390 INLINE_SIM_FPU (int)
2391 sim_fpu_ge (int *is,
2395 return sim_fpu_le (is, r, l);
2398 INLINE_SIM_FPU (int)
2399 sim_fpu_gt (int *is,
2403 return sim_fpu_lt (is, r, l);
2407 /* A number of useful constants */
2409 #if EXTERN_SIM_FPU_P
2410 const sim_fpu sim_fpu_zero = {
2413 const sim_fpu sim_fpu_qnan = {
2416 const sim_fpu sim_fpu_one = {
2417 sim_fpu_class_number, 0, IMPLICIT_1, 0
2419 const sim_fpu sim_fpu_two = {
2420 sim_fpu_class_number, 0, IMPLICIT_1, 1
2422 const sim_fpu sim_fpu_max32 = {
2423 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS32), NORMAL_EXPMAX32
2425 const sim_fpu sim_fpu_max64 = {
2426 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS64), NORMAL_EXPMAX64
2433 INLINE_SIM_FPU (void)
2434 sim_fpu_print_fpu (const sim_fpu *f,
2435 sim_fpu_print_func *print,
2438 sim_fpu_printn_fpu (f, print, -1, arg);
2441 INLINE_SIM_FPU (void)
2442 sim_fpu_printn_fpu (const sim_fpu *f,
2443 sim_fpu_print_func *print,
2447 print (arg, "%s", f->sign ? "-" : "+");
2450 case sim_fpu_class_qnan:
2452 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2453 print (arg, "*QuietNaN");
2455 case sim_fpu_class_snan:
2457 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2458 print (arg, "*SignalNaN");
2460 case sim_fpu_class_zero:
2463 case sim_fpu_class_infinity:
2466 case sim_fpu_class_number:
2467 case sim_fpu_class_denorm:
2469 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2470 print (arg, "*2^%+d", f->normal_exp);
2471 ASSERT (f->fraction >= IMPLICIT_1);
2472 ASSERT (f->fraction < IMPLICIT_2);
2477 INLINE_SIM_FPU (void)
2478 sim_fpu_print_status (int status,
2479 sim_fpu_print_func *print,
2486 switch ((sim_fpu_status) (status & i))
2488 case sim_fpu_status_denorm:
2489 print (arg, "%sD", prefix);
2491 case sim_fpu_status_invalid_snan:
2492 print (arg, "%sSNaN", prefix);
2494 case sim_fpu_status_invalid_qnan:
2495 print (arg, "%sQNaN", prefix);
2497 case sim_fpu_status_invalid_isi:
2498 print (arg, "%sISI", prefix);
2500 case sim_fpu_status_invalid_idi:
2501 print (arg, "%sIDI", prefix);
2503 case sim_fpu_status_invalid_zdz:
2504 print (arg, "%sZDZ", prefix);
2506 case sim_fpu_status_invalid_imz:
2507 print (arg, "%sIMZ", prefix);
2509 case sim_fpu_status_invalid_cvi:
2510 print (arg, "%sCVI", prefix);
2512 case sim_fpu_status_invalid_cmp:
2513 print (arg, "%sCMP", prefix);
2515 case sim_fpu_status_invalid_sqrt:
2516 print (arg, "%sSQRT", prefix);
2519 case sim_fpu_status_inexact:
2520 print (arg, "%sX", prefix);
2523 case sim_fpu_status_overflow:
2524 print (arg, "%sO", prefix);
2527 case sim_fpu_status_underflow:
2528 print (arg, "%sU", prefix);
2531 case sim_fpu_status_invalid_div0:
2532 print (arg, "%s/", prefix);
2535 case sim_fpu_status_rounded:
2536 print (arg, "%sR", prefix);