1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
5 /* Copyright (C) 1994,1997-1998 Free Software Foundation, Inc.
7 This file is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 2, or (at your option) any
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file with other programs, and to distribute
15 those programs without any restriction coming from the use of this
16 file. (The General Public License restrictions do apply in other
17 respects; for example, they cover modification of the file, and
18 distribution when not linked into another program.)
20 This file is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; see the file COPYING. If not, write to
27 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
29 /* As a special exception, if you link this library with other files,
30 some of which are compiled with GCC, to produce an executable,
31 this library does not by itself cause the resulting executable
32 to be covered by the GNU General Public License.
33 This exception does not however invalidate any other reasons why
34 the executable file might be covered by the GNU General Public License. */
36 /* This implements IEEE 754 format arithmetic, but does not provide a
37 mechanism for setting the rounding mode, or for generating or handling
40 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
41 Wilson, all of Cygnus Support. */
47 #include "sim-basics.h"
51 #include "sim-assert.h"
55 If digits is -1, then print all digits. */
58 print_bits (unsigned64 x,
61 sim_fpu_print_func print,
64 unsigned64 bit = LSBIT64 (msbit);
77 if (digits > 0) digits--;
84 /* Quick and dirty conversion between a host double and host 64bit int */
92 /* A packed IEEE floating point number.
94 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
95 32 and 64 bit numbers. This number is interpreted as:
97 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
98 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
100 Denormalized (0 == BIASEDEXP && FRAC != 0):
101 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
103 Zero (0 == BIASEDEXP && FRAC == 0):
104 (sign ? "-" : "+") 0.0
106 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
107 (sign ? "-" : "+") "infinity"
109 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
112 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
117 #define NR_EXPBITS (is_double ? 11 : 8)
118 #define NR_FRACBITS (is_double ? 52 : 23)
119 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
121 #define EXPMAX32 (255)
122 #define EXMPAX64 (2047)
123 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
125 #define EXPBIAS32 (127)
126 #define EXPBIAS64 (1023)
127 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
129 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
133 /* An unpacked floating point number.
135 When unpacked, the fraction of both a 32 and 64 bit floating point
136 number is stored using the same format:
138 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
139 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
141 #define NR_PAD32 (30)
143 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
144 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
146 #define NR_GUARDS32 (7 + NR_PAD32)
147 #define NR_GUARDS64 (8 + NR_PAD64)
148 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
149 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
151 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
152 #define GUARDLSB LSBIT64 (NR_PAD)
153 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
155 #define NR_FRAC_GUARD (60)
156 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
157 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
158 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
161 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
163 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
165 #define NORMAL_EXPMAX32 (EXPBIAS32)
166 #define NORMAL_EXPMAX64 (EXPBIAS64)
167 #define NORMAL_EXPMAX (EXPBIAS)
170 /* Integer constants */
172 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
173 #define MAX_UINT32 LSMASK64 (31, 0)
174 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
176 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
177 #define MAX_UINT64 LSMASK64 (63, 0)
178 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
180 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
181 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
182 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
183 #define NR_INTBITS (is_64bit ? 64 : 32)
185 /* Squeese an unpacked sim_fpu struct into a 32/64 bit integer */
186 STATIC_INLINE_SIM_FPU (unsigned64)
187 pack_fpu (const sim_fpu *src,
198 case sim_fpu_class_qnan:
201 /* force fraction to correct class */
202 fraction = src->fraction;
203 fraction >>= NR_GUARDS;
204 fraction |= QUIET_NAN;
206 case sim_fpu_class_snan:
209 /* force fraction to correct class */
210 fraction = src->fraction;
211 fraction >>= NR_GUARDS;
212 fraction &= ~QUIET_NAN;
214 case sim_fpu_class_infinity:
219 case sim_fpu_class_zero:
224 case sim_fpu_class_number:
225 case sim_fpu_class_denorm:
226 ASSERT (src->fraction >= IMPLICIT_1);
227 ASSERT (src->fraction < IMPLICIT_2);
228 if (src->normal_exp < NORMAL_EXPMIN)
230 /* This number's exponent is too low to fit into the bits
231 available in the number We'll denormalize the number by
232 storing zero in the exponent and shift the fraction to
233 the right to make up for it. */
234 int nr_shift = NORMAL_EXPMIN - src->normal_exp;
235 if (nr_shift > NR_FRACBITS)
237 /* underflow, just make the number zero */
246 /* Shift by the value */
247 fraction = src->fraction;
248 fraction >>= NR_GUARDS;
249 fraction >>= nr_shift;
252 else if (src->normal_exp > NORMAL_EXPMAX)
261 exp = (src->normal_exp + EXPBIAS);
263 fraction = src->fraction;
264 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
266 /* Round to nearest: If the guard bits are the all zero, but
267 the first, then we're half way between two numbers,
268 choose the one which makes the lsb of the answer 0. */
269 if ((fraction & GUARDMASK) == GUARDMSB)
271 if ((fraction & (GUARDMSB << 1)))
272 fraction += (GUARDMSB << 1);
276 /* Add a one to the guards to force round to nearest */
277 fraction += GUARDROUND;
279 if ((fraction & IMPLICIT_2)) /* rounding resulted in carry */
284 fraction >>= NR_GUARDS;
285 /* When exp == EXPMAX (overflow from carry) fraction must
286 have been made zero */
287 ASSERT ((exp == EXPMAX) <= ((fraction & ~IMPLICIT_1) == 0));
294 packed = ((sign ? SIGNBIT : 0)
295 | (exp << NR_FRACBITS)
296 | LSMASKED64 (fraction, NR_FRACBITS - 1, 0));
298 /* trace operation */
305 printf ("pack_fpu: ");
306 printf ("-> %c%0lX.%06lX\n",
307 LSMASKED32 (packed, 31, 31) ? '8' : '0',
308 (long) LSEXTRACTED32 (packed, 30, 23),
309 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
317 /* Unpack a 32/64 bit integer into a sim_fpu structure */
318 STATIC_INLINE_SIM_FPU (void)
319 unpack_fpu (sim_fpu *dst, unsigned64 packed, int is_double)
321 unsigned64 fraction = LSMASKED64 (packed, NR_FRACBITS - 1, 0);
322 unsigned exp = LSEXTRACTED64 (packed, NR_EXPBITS + NR_FRACBITS - 1, NR_FRACBITS);
323 int sign = (packed & SIGNBIT) != 0;
327 /* Hmm. Looks like 0 */
330 /* tastes like zero */
331 dst->class = sim_fpu_class_zero;
337 /* Zero exponent with non zero fraction - it's denormalized,
338 so there isn't a leading implicit one - we'll shift it so
340 dst->normal_exp = exp - EXPBIAS + 1;
341 dst->class = sim_fpu_class_denorm;
343 fraction <<= NR_GUARDS;
344 while (fraction < IMPLICIT_1)
349 dst->fraction = fraction;
352 else if (exp == EXPMAX)
357 /* Attached to a zero fraction - means infinity */
358 dst->class = sim_fpu_class_infinity;
360 /* dst->normal_exp = EXPBIAS; */
361 /* dst->fraction = 0; */
365 /* Non zero fraction, means NaN */
367 dst->fraction = (fraction << NR_GUARDS);
368 if (fraction >= QUIET_NAN)
369 dst->class = sim_fpu_class_qnan;
371 dst->class = sim_fpu_class_snan;
376 /* Nothing strange about this number */
377 dst->class = sim_fpu_class_number;
379 dst->fraction = ((fraction << NR_GUARDS) | IMPLICIT_1);
380 dst->normal_exp = exp - EXPBIAS;
383 /* trace operation */
390 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
391 LSMASKED32 (packed, 31, 31) ? '8' : '0',
392 (long) LSEXTRACTED32 (packed, 30, 23),
393 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
400 val.i = pack_fpu (dst, 1);
403 ASSERT (val.i == packed);
407 unsigned32 val = pack_fpu (dst, 0);
408 unsigned32 org = packed;
415 /* Convert a floating point into an integer */
416 STATIC_INLINE_SIM_FPU (int)
425 if (sim_fpu_is_zero (s))
430 if (sim_fpu_is_snan (s))
432 *i = MIN_INT; /* FIXME */
433 return sim_fpu_status_invalid_cvi;
435 if (sim_fpu_is_qnan (s))
437 *i = MIN_INT; /* FIXME */
438 return sim_fpu_status_invalid_cvi;
440 /* map infinity onto MAX_INT... */
441 if (sim_fpu_is_infinity (s))
443 *i = s->sign ? MIN_INT : MAX_INT;
444 return sim_fpu_status_invalid_cvi;
446 /* it is a number, but a small one */
447 if (s->normal_exp < 0)
450 return sim_fpu_status_inexact;
452 /* Is the floating point MIN_INT or just close? */
453 if (s->sign && s->normal_exp == (NR_INTBITS - 1))
456 ASSERT (s->fraction >= IMPLICIT_1);
457 if (s->fraction == IMPLICIT_1)
458 return 0; /* exact */
459 if (is_64bit) /* can't round */
460 return sim_fpu_status_invalid_cvi; /* must be overflow */
461 /* For a 32bit with MAX_INT, rounding is possible */
464 case sim_fpu_round_default:
466 case sim_fpu_round_zero:
467 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
468 return sim_fpu_status_invalid_cvi;
470 return sim_fpu_status_inexact;
472 case sim_fpu_round_near:
474 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
475 return sim_fpu_status_invalid_cvi;
476 else if ((s->fraction & !FRAC32MASK) >= (~FRAC32MASK >> 1))
477 return sim_fpu_status_invalid_cvi;
479 return sim_fpu_status_inexact;
481 case sim_fpu_round_up:
482 if ((s->fraction & FRAC32MASK) == IMPLICIT_1)
483 return sim_fpu_status_inexact;
485 return sim_fpu_status_invalid_cvi;
486 case sim_fpu_round_down:
487 return sim_fpu_status_invalid_cvi;
490 /* Would right shifting result in the FRAC being shifted into
491 (through) the integer's sign bit? */
492 if (s->normal_exp > (NR_INTBITS - 2))
494 *i = s->sign ? MIN_INT : MAX_INT;
495 return sim_fpu_status_invalid_cvi;
497 /* normal number shift it into place */
499 shift = (s->normal_exp - (NR_FRAC_GUARD));
507 if (tmp & ((SIGNED64 (1) << shift) - 1))
508 status |= sim_fpu_status_inexact;
511 *i = s->sign ? (-tmp) : (tmp);
515 /* convert an integer into a floating point */
516 STATIC_INLINE_SIM_FPU (int)
517 i2fpu (sim_fpu *f, signed64 i, int is_64bit)
522 f->class = sim_fpu_class_zero;
528 f->class = sim_fpu_class_number;
530 f->normal_exp = NR_FRAC_GUARD;
534 /* Special case for minint, since there is no corresponding
535 +ve integer representation for it */
538 f->fraction = IMPLICIT_1;
539 f->normal_exp = NR_INTBITS - 1;
547 if (f->fraction >= IMPLICIT_2)
551 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
554 while (f->fraction >= IMPLICIT_2);
556 else if (f->fraction < IMPLICIT_1)
563 while (f->fraction < IMPLICIT_1);
567 /* trace operation */
570 printf ("i2fpu: 0x%08lX ->\n", (long) i);
577 fpu2i (&val, f, is_64bit, sim_fpu_round_zero);
578 if (i >= MIN_INT32 && i <= MAX_INT32)
588 /* Convert a floating point into an integer */
589 STATIC_INLINE_SIM_FPU (int)
590 fpu2u (unsigned64 *u, const sim_fpu *s, int is_64bit)
592 const int is_double = 1;
595 if (sim_fpu_is_zero (s))
600 if (sim_fpu_is_nan (s))
605 /* it is a negative number */
611 /* get reasonable MAX_USI_INT... */
612 if (sim_fpu_is_infinity (s))
617 /* it is a number, but a small one */
618 if (s->normal_exp < 0)
624 if (s->normal_exp > (NR_INTBITS - 1))
630 tmp = (s->fraction & ~PADMASK);
631 shift = (s->normal_exp - (NR_FRACBITS + NR_GUARDS));
645 /* Convert an unsigned integer into a floating point */
646 STATIC_INLINE_SIM_FPU (int)
647 u2fpu (sim_fpu *f, unsigned64 u, int is_64bit)
651 f->class = sim_fpu_class_zero;
657 f->class = sim_fpu_class_number;
659 f->normal_exp = NR_FRAC_GUARD;
662 while (f->fraction < IMPLICIT_1)
672 /* register <-> sim_fpu */
674 INLINE_SIM_FPU (void)
675 sim_fpu_32to (sim_fpu *f, unsigned32 s)
677 unpack_fpu (f, s, 0);
681 INLINE_SIM_FPU (void)
682 sim_fpu_232to (sim_fpu *f, unsigned32 h, unsigned32 l)
686 unpack_fpu (f, s, 1);
690 INLINE_SIM_FPU (void)
691 sim_fpu_64to (sim_fpu *f, unsigned64 s)
693 unpack_fpu (f, s, 1);
697 INLINE_SIM_FPU (void)
698 sim_fpu_to32 (unsigned32 *s,
701 *s = pack_fpu (f, 0);
705 INLINE_SIM_FPU (void)
706 sim_fpu_to232 (unsigned32 *h, unsigned32 *l,
709 unsigned64 s = pack_fpu (f, 1);
715 INLINE_SIM_FPU (void)
716 sim_fpu_to64 (unsigned64 *u,
719 *u = pack_fpu (f, 1);
723 INLINE_SIM_FPU (void)
724 sim_fpu_fractionto (sim_fpu *f,
730 int shift = (NR_FRAC_GUARD - precision);
731 f->class = sim_fpu_class_number;
733 f->normal_exp = normal_exp;
734 /* shift the fraction to where sim-fpu expects it */
736 f->fraction = (fraction << shift);
738 f->fraction = (fraction >> -shift);
739 f->fraction |= IMPLICIT_1;
743 INLINE_SIM_FPU (unsigned64)
744 sim_fpu_tofraction (const sim_fpu *d,
747 /* we have NR_FRAC_GUARD bits, we want only PRECISION bits */
748 int shift = (NR_FRAC_GUARD - precision);
749 unsigned64 fraction = (d->fraction & ~IMPLICIT_1);
751 return fraction >> shift;
753 return fraction << -shift;
759 STATIC_INLINE_SIM_FPU (int)
760 do_normal_overflow (sim_fpu *f,
766 case sim_fpu_round_default:
768 case sim_fpu_round_near:
769 f->class = sim_fpu_class_infinity;
771 case sim_fpu_round_up:
773 f->class = sim_fpu_class_infinity;
775 case sim_fpu_round_down:
777 f->class = sim_fpu_class_infinity;
779 case sim_fpu_round_zero:
782 f->normal_exp = NORMAL_EXPMAX;
783 f->fraction = LSMASK64 (NR_FRAC_GUARD, NR_GUARDS);
784 return (sim_fpu_status_overflow | sim_fpu_status_inexact);
787 STATIC_INLINE_SIM_FPU (int)
788 do_normal_underflow (sim_fpu *f,
794 case sim_fpu_round_default:
796 case sim_fpu_round_near:
797 f->class = sim_fpu_class_zero;
799 case sim_fpu_round_up:
801 f->class = sim_fpu_class_zero;
803 case sim_fpu_round_down:
805 f->class = sim_fpu_class_zero;
807 case sim_fpu_round_zero:
808 f->class = sim_fpu_class_zero;
811 f->normal_exp = NORMAL_EXPMIN - NR_FRACBITS;
812 f->fraction = IMPLICIT_1;
813 return (sim_fpu_status_inexact | sim_fpu_status_underflow);
818 /* Round a number using NR_GUARDS.
819 Will return the rounded number or F->FRACTION == 0 when underflow */
821 STATIC_INLINE_SIM_FPU (int)
822 do_normal_round (sim_fpu *f,
826 unsigned64 guardmask = LSMASK64 (nr_guards - 1, 0);
827 unsigned64 guardmsb = LSBIT64 (nr_guards - 1);
828 unsigned64 fraclsb = guardmsb << 1;
829 if ((f->fraction & guardmask))
831 int status = sim_fpu_status_inexact;
834 case sim_fpu_round_default:
836 case sim_fpu_round_near:
837 if ((f->fraction & guardmsb))
839 if ((f->fraction & fraclsb))
841 status |= sim_fpu_status_rounded;
843 else if ((f->fraction & (guardmask >> 1)))
845 status |= sim_fpu_status_rounded;
849 case sim_fpu_round_up:
851 status |= sim_fpu_status_rounded;
853 case sim_fpu_round_down:
855 status |= sim_fpu_status_rounded;
857 case sim_fpu_round_zero:
860 f->fraction &= ~guardmask;
861 /* round if needed, handle resulting overflow */
862 if ((status & sim_fpu_status_rounded))
864 f->fraction += fraclsb;
865 if ((f->fraction & IMPLICIT_2))
878 STATIC_INLINE_SIM_FPU (int)
879 do_round (sim_fpu *f,
882 sim_fpu_denorm denorm)
886 case sim_fpu_class_qnan:
887 case sim_fpu_class_zero:
888 case sim_fpu_class_infinity:
891 case sim_fpu_class_snan:
892 /* Quieten a SignalingNaN */
893 f->class = sim_fpu_class_qnan;
894 return sim_fpu_status_invalid_snan;
896 case sim_fpu_class_number:
897 case sim_fpu_class_denorm:
900 ASSERT (f->fraction < IMPLICIT_2);
901 ASSERT (f->fraction >= IMPLICIT_1);
902 if (f->normal_exp < NORMAL_EXPMIN)
904 /* This number's exponent is too low to fit into the bits
905 available in the number. Round off any bits that will be
906 discarded as a result of denormalization. Edge case is
907 the implicit bit shifted to GUARD0 and then rounded
909 int shift = NORMAL_EXPMIN - f->normal_exp;
910 if (shift + NR_GUARDS <= NR_FRAC_GUARD + 1
911 && !(denorm & sim_fpu_denorm_zero))
913 status = do_normal_round (f, shift + NR_GUARDS, round);
914 if (f->fraction == 0) /* rounding underflowed */
916 status |= do_normal_underflow (f, is_double, round);
918 else if (f->normal_exp < NORMAL_EXPMIN) /* still underflow? */
920 status |= sim_fpu_status_denorm;
921 /* Any loss of precision when denormalizing is
922 underflow. Some processors check for underflow
923 before rounding, some after! */
924 if (status & sim_fpu_status_inexact)
925 status |= sim_fpu_status_underflow;
926 /* Flag that resultant value has been denormalized */
927 f->class = sim_fpu_class_denorm;
929 else if ((denorm & sim_fpu_denorm_underflow_inexact))
931 if ((status & sim_fpu_status_inexact))
932 status |= sim_fpu_status_underflow;
937 status = do_normal_underflow (f, is_double, round);
940 else if (f->normal_exp > NORMAL_EXPMAX)
943 status = do_normal_overflow (f, is_double, round);
947 status = do_normal_round (f, NR_GUARDS, round);
948 if (f->fraction == 0)
949 /* f->class = sim_fpu_class_zero; */
950 status |= do_normal_underflow (f, is_double, round);
951 else if (f->normal_exp > NORMAL_EXPMAX)
952 /* oops! rounding caused overflow */
953 status |= do_normal_overflow (f, is_double, round);
955 ASSERT ((f->class == sim_fpu_class_number
956 || f->class == sim_fpu_class_denorm)
957 <= (f->fraction < IMPLICIT_2 && f->fraction >= IMPLICIT_1));
965 sim_fpu_round_32 (sim_fpu *f,
967 sim_fpu_denorm denorm)
969 return do_round (f, 0, round, denorm);
973 sim_fpu_round_64 (sim_fpu *f,
975 sim_fpu_denorm denorm)
977 return do_round (f, 1, round, denorm);
985 sim_fpu_add (sim_fpu *f,
989 if (sim_fpu_is_snan (l))
992 f->class = sim_fpu_class_qnan;
993 return sim_fpu_status_invalid_snan;
995 if (sim_fpu_is_snan (r))
998 f->class = sim_fpu_class_qnan;
999 return sim_fpu_status_invalid_snan;
1001 if (sim_fpu_is_qnan (l))
1006 if (sim_fpu_is_qnan (r))
1011 if (sim_fpu_is_infinity (l))
1013 if (sim_fpu_is_infinity (r)
1014 && l->sign != r->sign)
1017 return sim_fpu_status_invalid_isi;
1022 if (sim_fpu_is_infinity (r))
1027 if (sim_fpu_is_zero (l))
1029 if (sim_fpu_is_zero (r))
1032 f->sign = l->sign & r->sign;
1038 if (sim_fpu_is_zero (r))
1045 int shift = l->normal_exp - r->normal_exp;
1046 unsigned64 lfraction;
1047 unsigned64 rfraction;
1048 /* use exp of larger */
1049 if (shift >= NR_FRAC_GUARD)
1051 /* left has much bigger magnitute */
1053 return sim_fpu_status_inexact;
1055 if (shift <= - NR_FRAC_GUARD)
1057 /* right has much bigger magnitute */
1059 return sim_fpu_status_inexact;
1061 lfraction = l->fraction;
1062 rfraction = r->fraction;
1065 f->normal_exp = l->normal_exp;
1066 if (rfraction & LSMASK64 (shift - 1, 0))
1068 status |= sim_fpu_status_inexact;
1069 rfraction |= LSBIT64 (shift); /* stick LSBit */
1071 rfraction >>= shift;
1075 f->normal_exp = r->normal_exp;
1076 if (lfraction & LSMASK64 (- shift - 1, 0))
1078 status |= sim_fpu_status_inexact;
1079 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1081 lfraction >>= -shift;
1085 f->normal_exp = r->normal_exp;
1088 /* perform the addition */
1090 lfraction = - lfraction;
1092 rfraction = - rfraction;
1093 f->fraction = lfraction + rfraction;
1096 if (f->fraction == 0)
1103 f->class = sim_fpu_class_number;
1104 if ((signed64) f->fraction >= 0)
1109 f->fraction = - f->fraction;
1113 if ((f->fraction & IMPLICIT_2))
1115 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1118 else if (f->fraction < IMPLICIT_1)
1125 while (f->fraction < IMPLICIT_1);
1127 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1133 INLINE_SIM_FPU (int)
1134 sim_fpu_sub (sim_fpu *f,
1138 if (sim_fpu_is_snan (l))
1141 f->class = sim_fpu_class_qnan;
1142 return sim_fpu_status_invalid_snan;
1144 if (sim_fpu_is_snan (r))
1147 f->class = sim_fpu_class_qnan;
1148 return sim_fpu_status_invalid_snan;
1150 if (sim_fpu_is_qnan (l))
1155 if (sim_fpu_is_qnan (r))
1160 if (sim_fpu_is_infinity (l))
1162 if (sim_fpu_is_infinity (r)
1163 && l->sign == r->sign)
1166 return sim_fpu_status_invalid_isi;
1171 if (sim_fpu_is_infinity (r))
1177 if (sim_fpu_is_zero (l))
1179 if (sim_fpu_is_zero (r))
1182 f->sign = l->sign & !r->sign;
1191 if (sim_fpu_is_zero (r))
1198 int shift = l->normal_exp - r->normal_exp;
1199 unsigned64 lfraction;
1200 unsigned64 rfraction;
1201 /* use exp of larger */
1202 if (shift >= NR_FRAC_GUARD)
1204 /* left has much bigger magnitute */
1206 return sim_fpu_status_inexact;
1208 if (shift <= - NR_FRAC_GUARD)
1210 /* right has much bigger magnitute */
1213 return sim_fpu_status_inexact;
1215 lfraction = l->fraction;
1216 rfraction = r->fraction;
1219 f->normal_exp = l->normal_exp;
1220 if (rfraction & LSMASK64 (shift - 1, 0))
1222 status |= sim_fpu_status_inexact;
1223 rfraction |= LSBIT64 (shift); /* stick LSBit */
1225 rfraction >>= shift;
1229 f->normal_exp = r->normal_exp;
1230 if (lfraction & LSMASK64 (- shift - 1, 0))
1232 status |= sim_fpu_status_inexact;
1233 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1235 lfraction >>= -shift;
1239 f->normal_exp = r->normal_exp;
1242 /* perform the subtraction */
1244 lfraction = - lfraction;
1246 rfraction = - rfraction;
1247 f->fraction = lfraction + rfraction;
1250 if (f->fraction == 0)
1257 f->class = sim_fpu_class_number;
1258 if ((signed64) f->fraction >= 0)
1263 f->fraction = - f->fraction;
1267 if ((f->fraction & IMPLICIT_2))
1269 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1272 else if (f->fraction < IMPLICIT_1)
1279 while (f->fraction < IMPLICIT_1);
1281 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1287 INLINE_SIM_FPU (int)
1288 sim_fpu_mul (sim_fpu *f,
1292 if (sim_fpu_is_snan (l))
1295 f->class = sim_fpu_class_qnan;
1296 return sim_fpu_status_invalid_snan;
1298 if (sim_fpu_is_snan (r))
1301 f->class = sim_fpu_class_qnan;
1302 return sim_fpu_status_invalid_snan;
1304 if (sim_fpu_is_qnan (l))
1309 if (sim_fpu_is_qnan (r))
1314 if (sim_fpu_is_infinity (l))
1316 if (sim_fpu_is_zero (r))
1319 return sim_fpu_status_invalid_imz;
1322 f->sign = l->sign ^ r->sign;
1325 if (sim_fpu_is_infinity (r))
1327 if (sim_fpu_is_zero (l))
1330 return sim_fpu_status_invalid_imz;
1333 f->sign = l->sign ^ r->sign;
1336 if (sim_fpu_is_zero (l) || sim_fpu_is_zero (r))
1339 f->sign = l->sign ^ r->sign;
1342 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1347 unsigned64 nl = l->fraction & 0xffffffff;
1348 unsigned64 nh = l->fraction >> 32;
1349 unsigned64 ml = r->fraction & 0xffffffff;
1350 unsigned64 mh = r->fraction >>32;
1351 unsigned64 pp_ll = ml * nl;
1352 unsigned64 pp_hl = mh * nl;
1353 unsigned64 pp_lh = ml * nh;
1354 unsigned64 pp_hh = mh * nh;
1355 unsigned64 res2 = 0;
1356 unsigned64 res0 = 0;
1357 unsigned64 ps_hh__ = pp_hl + pp_lh;
1358 if (ps_hh__ < pp_hl)
1359 res2 += UNSIGNED64 (0x100000000);
1360 pp_hl = (ps_hh__ << 32) & UNSIGNED64 (0xffffffff00000000);
1361 res0 = pp_ll + pp_hl;
1364 res2 += ((ps_hh__ >> 32) & 0xffffffff) + pp_hh;
1368 f->normal_exp = l->normal_exp + r->normal_exp;
1369 f->sign = l->sign ^ r->sign;
1370 f->class = sim_fpu_class_number;
1372 /* Input is bounded by [1,2) ; [2^60,2^61)
1373 Output is bounded by [1,4) ; [2^120,2^122) */
1375 /* Adjust the exponent according to where the decimal point ended
1376 up in the high 64 bit word. In the source the decimal point
1377 was at NR_FRAC_GUARD. */
1378 f->normal_exp += NR_FRAC_GUARD + 64 - (NR_FRAC_GUARD * 2);
1380 /* The high word is bounded according to the above. Consequently
1381 it has never overflowed into IMPLICIT_2. */
1382 ASSERT (high < LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64));
1383 ASSERT (high >= LSBIT64 ((NR_FRAC_GUARD * 2) - 64));
1384 ASSERT (LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64) < IMPLICIT_1);
1391 if (low & LSBIT64 (63))
1395 while (high < IMPLICIT_1);
1397 ASSERT (high >= IMPLICIT_1 && high < IMPLICIT_2);
1400 f->fraction = (high | 1); /* sticky */
1401 return sim_fpu_status_inexact;
1412 INLINE_SIM_FPU (int)
1413 sim_fpu_div (sim_fpu *f,
1417 if (sim_fpu_is_snan (l))
1420 f->class = sim_fpu_class_qnan;
1421 return sim_fpu_status_invalid_snan;
1423 if (sim_fpu_is_snan (r))
1426 f->class = sim_fpu_class_qnan;
1427 return sim_fpu_status_invalid_snan;
1429 if (sim_fpu_is_qnan (l))
1432 f->class = sim_fpu_class_qnan;
1435 if (sim_fpu_is_qnan (r))
1438 f->class = sim_fpu_class_qnan;
1441 if (sim_fpu_is_infinity (l))
1443 if (sim_fpu_is_infinity (r))
1446 return sim_fpu_status_invalid_idi;
1451 f->sign = l->sign ^ r->sign;
1455 if (sim_fpu_is_zero (l))
1457 if (sim_fpu_is_zero (r))
1460 return sim_fpu_status_invalid_zdz;
1465 f->sign = l->sign ^ r->sign;
1469 if (sim_fpu_is_infinity (r))
1472 f->sign = l->sign ^ r->sign;
1475 if (sim_fpu_is_zero (r))
1477 f->class = sim_fpu_class_infinity;
1478 f->sign = l->sign ^ r->sign;
1479 return sim_fpu_status_invalid_div0;
1482 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1485 /* quotient = ( ( numerator / denominator)
1486 x 2^(numerator exponent - denominator exponent)
1488 unsigned64 numerator;
1489 unsigned64 denominator;
1490 unsigned64 quotient;
1493 f->class = sim_fpu_class_number;
1494 f->sign = l->sign ^ r->sign;
1495 f->normal_exp = l->normal_exp - r->normal_exp;
1497 numerator = l->fraction;
1498 denominator = r->fraction;
1500 /* Fraction will be less than 1.0 */
1501 if (numerator < denominator)
1506 ASSERT (numerator >= denominator);
1508 /* Gain extra precision, already used one spare bit */
1509 numerator <<= NR_SPARE;
1510 denominator <<= NR_SPARE;
1512 /* Does divide one bit at a time. Optimize??? */
1514 bit = (IMPLICIT_1 << NR_SPARE);
1517 if (numerator >= denominator)
1520 numerator -= denominator;
1526 /* discard (but save) the extra bits */
1527 if ((quotient & LSMASK64 (NR_SPARE -1, 0)))
1528 quotient = (quotient >> NR_SPARE) | 1;
1530 quotient = (quotient >> NR_SPARE);
1532 f->fraction = quotient;
1533 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1536 f->fraction |= 1; /* stick remaining bits */
1537 return sim_fpu_status_inexact;
1545 INLINE_SIM_FPU (int)
1546 sim_fpu_max (sim_fpu *f,
1550 if (sim_fpu_is_snan (l))
1553 f->class = sim_fpu_class_qnan;
1554 return sim_fpu_status_invalid_snan;
1556 if (sim_fpu_is_snan (r))
1559 f->class = sim_fpu_class_qnan;
1560 return sim_fpu_status_invalid_snan;
1562 if (sim_fpu_is_qnan (l))
1567 if (sim_fpu_is_qnan (r))
1572 if (sim_fpu_is_infinity (l))
1574 if (sim_fpu_is_infinity (r)
1575 && l->sign == r->sign)
1578 return sim_fpu_status_invalid_isi;
1581 *f = *r; /* -inf < anything */
1583 *f = *l; /* +inf > anthing */
1586 if (sim_fpu_is_infinity (r))
1589 *f = *l; /* anything > -inf */
1591 *f = *r; /* anthing < +inf */
1594 if (l->sign > r->sign)
1596 *f = *r; /* -ve < +ve */
1599 if (l->sign < r->sign)
1601 *f = *l; /* +ve > -ve */
1604 ASSERT (l->sign == r->sign);
1605 if (l->normal_exp > r->normal_exp
1606 || (l->normal_exp == r->normal_exp &&
1607 l->fraction > r->fraction))
1611 *f = *r; /* -ve < -ve */
1613 *f = *l; /* +ve > +ve */
1620 *f = *l; /* -ve > -ve */
1622 *f = *r; /* +ve < +ve */
1628 INLINE_SIM_FPU (int)
1629 sim_fpu_min (sim_fpu *f,
1633 if (sim_fpu_is_snan (l))
1636 f->class = sim_fpu_class_qnan;
1637 return sim_fpu_status_invalid_snan;
1639 if (sim_fpu_is_snan (r))
1642 f->class = sim_fpu_class_qnan;
1643 return sim_fpu_status_invalid_snan;
1645 if (sim_fpu_is_qnan (l))
1650 if (sim_fpu_is_qnan (r))
1655 if (sim_fpu_is_infinity (l))
1657 if (sim_fpu_is_infinity (r)
1658 && l->sign == r->sign)
1661 return sim_fpu_status_invalid_isi;
1664 *f = *l; /* -inf < anything */
1666 *f = *r; /* +inf > anthing */
1669 if (sim_fpu_is_infinity (r))
1672 *f = *r; /* anything > -inf */
1674 *f = *l; /* anything < +inf */
1677 if (l->sign > r->sign)
1679 *f = *l; /* -ve < +ve */
1682 if (l->sign < r->sign)
1684 *f = *r; /* +ve > -ve */
1687 ASSERT (l->sign == r->sign);
1688 if (l->normal_exp > r->normal_exp
1689 || (l->normal_exp == r->normal_exp &&
1690 l->fraction > r->fraction))
1694 *f = *l; /* -ve < -ve */
1696 *f = *r; /* +ve > +ve */
1703 *f = *r; /* -ve > -ve */
1705 *f = *l; /* +ve < +ve */
1711 INLINE_SIM_FPU (int)
1712 sim_fpu_neg (sim_fpu *f,
1715 if (sim_fpu_is_snan (r))
1718 f->class = sim_fpu_class_qnan;
1719 return sim_fpu_status_invalid_snan;
1721 if (sim_fpu_is_qnan (r))
1732 INLINE_SIM_FPU (int)
1733 sim_fpu_abs (sim_fpu *f,
1736 if (sim_fpu_is_snan (r))
1739 f->class = sim_fpu_class_qnan;
1740 return sim_fpu_status_invalid_snan;
1742 if (sim_fpu_is_qnan (r))
1753 INLINE_SIM_FPU (int)
1754 sim_fpu_inv (sim_fpu *f,
1757 if (sim_fpu_is_snan (r))
1760 f->class = sim_fpu_class_qnan;
1761 return sim_fpu_status_invalid_snan;
1763 if (sim_fpu_is_qnan (r))
1766 f->class = sim_fpu_class_qnan;
1769 if (sim_fpu_is_infinity (r))
1775 if (sim_fpu_is_zero (r))
1777 f->class = sim_fpu_class_infinity;
1779 return sim_fpu_status_invalid_div0;
1782 f->normal_exp = - r->normal_exp;
1787 INLINE_SIM_FPU (int)
1788 sim_fpu_sqrt (sim_fpu *f,
1791 if (sim_fpu_is_snan (r))
1794 return sim_fpu_status_invalid_snan;
1796 if (sim_fpu_is_qnan (r))
1801 if (sim_fpu_is_zero (r))
1803 f->class = sim_fpu_class_zero;
1808 if (sim_fpu_is_infinity (r))
1813 return sim_fpu_status_invalid_sqrt;
1817 f->class = sim_fpu_class_infinity;
1826 return sim_fpu_status_invalid_sqrt;
1829 /* @(#)e_sqrt.c 5.1 93/09/24 */
1831 * ====================================================
1832 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1834 * Developed at SunPro, a Sun Microsystems, Inc. business.
1835 * Permission to use, copy, modify, and distribute this
1836 * software is freely granted, provided that this notice
1838 * ====================================================
1841 /* __ieee754_sqrt(x)
1842 * Return correctly rounded sqrt.
1843 * ------------------------------------------
1844 * | Use the hardware sqrt if you have one |
1845 * ------------------------------------------
1847 * Bit by bit method using integer arithmetic. (Slow, but portable)
1849 * Scale x to y in [1,4) with even powers of 2:
1850 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1851 * sqrt(x) = 2^k * sqrt(y)
1854 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1855 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1857 - y = ((m even) ? x : 2.x)
1859 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1861 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1863 * 2. Bit by bit computation
1864 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1867 * s = 2*q , and y = 2 * ( y - q ). (1)
1870 * To compute q from q , one checks whether
1874 * (q + 2 ) <= y. (2)
1877 * If (2) is false, then q = q ; otherwise q = q + 2 .
1880 * With some algebric manipulation, it is not difficult to see
1881 * that (2) is equivalent to
1886 * The advantage of (3) is that s and y can be computed by
1888 * the following recurrence formula:
1891 * s = s , y = y ; (4)
1900 * s = s + 2 , y = y - s - 2 (5)
1905 - NOTE: y = 2 (y - s - 2 )
1908 * One may easily use induction to prove (4) and (5).
1909 * Note. Since the left hand side of (3) contain only i+2 bits,
1910 * it does not necessary to do a full (53-bit) comparison
1913 * After generating the 53 bits result, we compute one more bit.
1914 * Together with the remainder, we can decide whether the
1915 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1916 * (it will never equal to 1/2ulp).
1917 * The rounding mode can be detected by checking whether
1918 * huge + tiny is equal to huge, and whether huge - tiny is
1919 * equal to huge for some floating point number "huge" and "tiny".
1922 * sqrt(+-0) = +-0 ... exact
1924 * sqrt(-ve) = NaN ... with invalid signal
1925 * sqrt(NaN) = NaN ... with invalid signal for signaling NaN
1927 * Other methods : see the appended file at the end of the program below.
1932 /* generate sqrt(x) bit by bit */
1938 f->class = sim_fpu_class_number;
1941 f->normal_exp = (r->normal_exp >> 1); /* exp = [exp/2] */
1943 /* odd exp, double x to make it even */
1944 ASSERT (y >= IMPLICIT_1 && y < IMPLICIT_4);
1945 if ((r->normal_exp & 1))
1949 ASSERT (y >= IMPLICIT_1 && y < (IMPLICIT_2 << 1));
1951 /* Let loop determine first value of s (either 1 or 2) */
1958 unsigned64 t = s + b;
1969 ASSERT (q >= IMPLICIT_1 && q < IMPLICIT_2);
1973 f->fraction |= 1; /* stick remaining bits */
1974 return sim_fpu_status_inexact;
1982 /* int/long <-> sim_fpu */
1984 INLINE_SIM_FPU (int)
1985 sim_fpu_i32to (sim_fpu *f,
1987 sim_fpu_round round)
1993 INLINE_SIM_FPU (int)
1994 sim_fpu_u32to (sim_fpu *f,
1996 sim_fpu_round round)
2002 INLINE_SIM_FPU (int)
2003 sim_fpu_i64to (sim_fpu *f,
2005 sim_fpu_round round)
2011 INLINE_SIM_FPU (int)
2012 sim_fpu_u64to (sim_fpu *f,
2014 sim_fpu_round round)
2021 INLINE_SIM_FPU (int)
2022 sim_fpu_to32i (signed32 *i,
2024 sim_fpu_round round)
2027 int status = fpu2i (&i64, f, 0, round);
2032 INLINE_SIM_FPU (int)
2033 sim_fpu_to32u (unsigned32 *u,
2035 sim_fpu_round round)
2038 int status = fpu2u (&u64, f, 0);
2043 INLINE_SIM_FPU (int)
2044 sim_fpu_to64i (signed64 *i,
2046 sim_fpu_round round)
2048 return fpu2i (i, f, 1, round);
2052 INLINE_SIM_FPU (int)
2053 sim_fpu_to64u (unsigned64 *u,
2055 sim_fpu_round round)
2057 return fpu2u (u, f, 1);
2062 /* sim_fpu -> host format */
2065 INLINE_SIM_FPU (float)
2066 sim_fpu_2f (const sim_fpu *f)
2073 INLINE_SIM_FPU (double)
2074 sim_fpu_2d (const sim_fpu *s)
2077 if (sim_fpu_is_snan (s))
2081 n.class = sim_fpu_class_qnan;
2082 val.i = pack_fpu (&n, 1);
2086 val.i = pack_fpu (s, 1);
2093 INLINE_SIM_FPU (void)
2094 sim_fpu_f2 (sim_fpu *f,
2099 unpack_fpu (f, val.i, 1);
2104 INLINE_SIM_FPU (void)
2105 sim_fpu_d2 (sim_fpu *f,
2110 unpack_fpu (f, val.i, 1);
2116 INLINE_SIM_FPU (int)
2117 sim_fpu_is_nan (const sim_fpu *d)
2121 case sim_fpu_class_qnan:
2122 case sim_fpu_class_snan:
2129 INLINE_SIM_FPU (int)
2130 sim_fpu_is_qnan (const sim_fpu *d)
2134 case sim_fpu_class_qnan:
2141 INLINE_SIM_FPU (int)
2142 sim_fpu_is_snan (const sim_fpu *d)
2146 case sim_fpu_class_snan:
2153 INLINE_SIM_FPU (int)
2154 sim_fpu_is_zero (const sim_fpu *d)
2158 case sim_fpu_class_zero:
2165 INLINE_SIM_FPU (int)
2166 sim_fpu_is_infinity (const sim_fpu *d)
2170 case sim_fpu_class_infinity:
2177 INLINE_SIM_FPU (int)
2178 sim_fpu_is_number (const sim_fpu *d)
2182 case sim_fpu_class_denorm:
2183 case sim_fpu_class_number:
2190 INLINE_SIM_FPU (int)
2191 sim_fpu_is_denorm (const sim_fpu *d)
2195 case sim_fpu_class_denorm:
2203 INLINE_SIM_FPU (int)
2204 sim_fpu_sign (const sim_fpu *d)
2210 INLINE_SIM_FPU (int)
2211 sim_fpu_exp (const sim_fpu *d)
2213 return d->normal_exp;
2218 INLINE_SIM_FPU (int)
2219 sim_fpu_is (const sim_fpu *d)
2223 case sim_fpu_class_qnan:
2224 return SIM_FPU_IS_QNAN;
2225 case sim_fpu_class_snan:
2226 return SIM_FPU_IS_SNAN;
2227 case sim_fpu_class_infinity:
2229 return SIM_FPU_IS_NINF;
2231 return SIM_FPU_IS_PINF;
2232 case sim_fpu_class_number:
2234 return SIM_FPU_IS_NNUMBER;
2236 return SIM_FPU_IS_PNUMBER;
2237 case sim_fpu_class_denorm:
2239 return SIM_FPU_IS_NDENORM;
2241 return SIM_FPU_IS_PDENORM;
2242 case sim_fpu_class_zero:
2244 return SIM_FPU_IS_NZERO;
2246 return SIM_FPU_IS_PZERO;
2253 INLINE_SIM_FPU (int)
2254 sim_fpu_cmp (const sim_fpu *l, const sim_fpu *r)
2257 sim_fpu_sub (&res, l, r);
2258 return sim_fpu_is (&res);
2261 INLINE_SIM_FPU (int)
2262 sim_fpu_is_lt (const sim_fpu *l, const sim_fpu *r)
2265 sim_fpu_lt (&status, l, r);
2269 INLINE_SIM_FPU (int)
2270 sim_fpu_is_le (const sim_fpu *l, const sim_fpu *r)
2273 sim_fpu_le (&is, l, r);
2277 INLINE_SIM_FPU (int)
2278 sim_fpu_is_eq (const sim_fpu *l, const sim_fpu *r)
2281 sim_fpu_eq (&is, l, r);
2285 INLINE_SIM_FPU (int)
2286 sim_fpu_is_ne (const sim_fpu *l, const sim_fpu *r)
2289 sim_fpu_ne (&is, l, r);
2293 INLINE_SIM_FPU (int)
2294 sim_fpu_is_ge (const sim_fpu *l, const sim_fpu *r)
2297 sim_fpu_ge (&is, l, r);
2301 INLINE_SIM_FPU (int)
2302 sim_fpu_is_gt (const sim_fpu *l, const sim_fpu *r)
2305 sim_fpu_gt (&is, l, r);
2310 /* Compare operators */
2312 INLINE_SIM_FPU (int)
2313 sim_fpu_lt (int *is,
2317 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2321 lval.i = pack_fpu (l, 1);
2322 rval.i = pack_fpu (r, 1);
2323 (*is) = (lval.d < rval.d);
2326 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2329 return sim_fpu_status_invalid_snan;
2334 return sim_fpu_status_invalid_qnan;
2338 INLINE_SIM_FPU (int)
2339 sim_fpu_le (int *is,
2343 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2347 lval.i = pack_fpu (l, 1);
2348 rval.i = pack_fpu (r, 1);
2349 *is = (lval.d <= rval.d);
2352 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2355 return sim_fpu_status_invalid_snan;
2360 return sim_fpu_status_invalid_qnan;
2364 INLINE_SIM_FPU (int)
2365 sim_fpu_eq (int *is,
2369 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2373 lval.i = pack_fpu (l, 1);
2374 rval.i = pack_fpu (r, 1);
2375 (*is) = (lval.d == rval.d);
2378 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2381 return sim_fpu_status_invalid_snan;
2386 return sim_fpu_status_invalid_qnan;
2390 INLINE_SIM_FPU (int)
2391 sim_fpu_ne (int *is,
2395 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2399 lval.i = pack_fpu (l, 1);
2400 rval.i = pack_fpu (r, 1);
2401 (*is) = (lval.d != rval.d);
2404 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2407 return sim_fpu_status_invalid_snan;
2412 return sim_fpu_status_invalid_qnan;
2416 INLINE_SIM_FPU (int)
2417 sim_fpu_ge (int *is,
2421 return sim_fpu_le (is, r, l);
2424 INLINE_SIM_FPU (int)
2425 sim_fpu_gt (int *is,
2429 return sim_fpu_lt (is, r, l);
2433 /* A number of useful constants */
2435 #if EXTERN_SIM_FPU_P
2436 const sim_fpu sim_fpu_zero = {
2439 const sim_fpu sim_fpu_qnan = {
2442 const sim_fpu sim_fpu_one = {
2443 sim_fpu_class_number, 0, IMPLICIT_1, 0
2445 const sim_fpu sim_fpu_two = {
2446 sim_fpu_class_number, 0, IMPLICIT_1, 1
2448 const sim_fpu sim_fpu_max32 = {
2449 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS32), NORMAL_EXPMAX32
2451 const sim_fpu sim_fpu_max64 = {
2452 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS64), NORMAL_EXPMAX64
2459 INLINE_SIM_FPU (void)
2460 sim_fpu_print_fpu (const sim_fpu *f,
2461 sim_fpu_print_func *print,
2464 sim_fpu_printn_fpu (f, print, -1, arg);
2467 INLINE_SIM_FPU (void)
2468 sim_fpu_printn_fpu (const sim_fpu *f,
2469 sim_fpu_print_func *print,
2473 print (arg, "%s", f->sign ? "-" : "+");
2476 case sim_fpu_class_qnan:
2478 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2479 print (arg, "*QuietNaN");
2481 case sim_fpu_class_snan:
2483 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2484 print (arg, "*SignalNaN");
2486 case sim_fpu_class_zero:
2489 case sim_fpu_class_infinity:
2492 case sim_fpu_class_number:
2493 case sim_fpu_class_denorm:
2495 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2496 print (arg, "*2^%+d", f->normal_exp);
2497 ASSERT (f->fraction >= IMPLICIT_1);
2498 ASSERT (f->fraction < IMPLICIT_2);
2503 INLINE_SIM_FPU (void)
2504 sim_fpu_print_status (int status,
2505 sim_fpu_print_func *print,
2512 switch ((sim_fpu_status) (status & i))
2514 case sim_fpu_status_denorm:
2515 print (arg, "%sD", prefix);
2517 case sim_fpu_status_invalid_snan:
2518 print (arg, "%sSNaN", prefix);
2520 case sim_fpu_status_invalid_qnan:
2521 print (arg, "%sQNaN", prefix);
2523 case sim_fpu_status_invalid_isi:
2524 print (arg, "%sISI", prefix);
2526 case sim_fpu_status_invalid_idi:
2527 print (arg, "%sIDI", prefix);
2529 case sim_fpu_status_invalid_zdz:
2530 print (arg, "%sZDZ", prefix);
2532 case sim_fpu_status_invalid_imz:
2533 print (arg, "%sIMZ", prefix);
2535 case sim_fpu_status_invalid_cvi:
2536 print (arg, "%sCVI", prefix);
2538 case sim_fpu_status_invalid_cmp:
2539 print (arg, "%sCMP", prefix);
2541 case sim_fpu_status_invalid_sqrt:
2542 print (arg, "%sSQRT", prefix);
2545 case sim_fpu_status_inexact:
2546 print (arg, "%sX", prefix);
2549 case sim_fpu_status_overflow:
2550 print (arg, "%sO", prefix);
2553 case sim_fpu_status_underflow:
2554 print (arg, "%sU", prefix);
2557 case sim_fpu_status_invalid_div0:
2558 print (arg, "%s/", prefix);
2561 case sim_fpu_status_rounded:
2562 print (arg, "%sR", prefix);