1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 92, 93, 94, 95, 96, 97, 98, 1999, 2000
4 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GNU CC is distributed in the hope that it will be useful,
23 but WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 GNU General Public License for more details.
27 You should have received a copy of the GNU General Public License
28 along with GNU CC; see the file COPYING. If not, write to
29 the Free Software Foundation, 59 Temple Place - Suite 330,
30 Boston, MA 02111-1307, USA. */
32 /* It is incorrect to include config.h here, because this file is being
33 compiled for the target, and hence definitions concerning only the host
42 /* Don't use `fancy_abort' here even if config.h says to use it. */
49 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
50 #if defined (L_divdi3) || defined (L_moddi3)
62 w.s.high = -uu.s.high - ((UWtype) w.s.low > 0);
68 /* Unless shift functions are defined whith full ANSI prototypes,
69 parameter b will be promoted to int if word_type is smaller than an int. */
72 __lshrdi3 (DWtype u, word_type b)
83 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
87 w.s.low = (UWtype) uu.s.high >> -bm;
91 UWtype carries = (UWtype) uu.s.high << bm;
93 w.s.high = (UWtype) uu.s.high >> b;
94 w.s.low = ((UWtype) uu.s.low >> b) | carries;
103 __ashldi3 (DWtype u, word_type b)
114 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
118 w.s.high = (UWtype) uu.s.low << -bm;
122 UWtype carries = (UWtype) uu.s.low >> bm;
124 w.s.low = (UWtype) uu.s.low << b;
125 w.s.high = ((UWtype) uu.s.high << b) | carries;
134 __ashrdi3 (DWtype u, word_type b)
145 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
148 /* w.s.high = 1..1 or 0..0 */
149 w.s.high = uu.s.high >> (sizeof (Wtype) * BITS_PER_UNIT - 1);
150 w.s.low = uu.s.high >> -bm;
154 UWtype carries = (UWtype) uu.s.high << bm;
156 w.s.high = uu.s.high >> b;
157 w.s.low = ((UWtype) uu.s.low >> b) | carries;
169 UWtype word, count, add;
173 word = uu.s.low, add = 0;
174 else if (uu.s.high != 0)
175 word = uu.s.high, add = BITS_PER_UNIT * sizeof (Wtype);
179 count_trailing_zeros (count, word);
180 return count + add + 1;
186 __muldi3 (DWtype u, DWtype v)
194 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
195 w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
196 + (UWtype) uu.s.high * (UWtype) vv.s.low);
203 #if defined (sdiv_qrnnd)
205 __udiv_w_sdiv (UWtype *rp, UWtype a1, UWtype a0, UWtype d)
212 if (a1 < d - a1 - (a0 >> (W_TYPE_SIZE - 1)))
214 /* dividend, divisor, and quotient are nonnegative */
215 sdiv_qrnnd (q, r, a1, a0, d);
219 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
220 sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (W_TYPE_SIZE - 1));
221 /* Divide (c1*2^32 + c0) by d */
222 sdiv_qrnnd (q, r, c1, c0, d);
223 /* Add 2^31 to quotient */
224 q += (UWtype) 1 << (W_TYPE_SIZE - 1);
229 b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
230 c1 = a1 >> 1; /* A/2 */
231 c0 = (a1 << (W_TYPE_SIZE - 1)) + (a0 >> 1);
233 if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
235 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
237 r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
254 else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
257 c0 = ~c0; /* logical NOT */
259 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
261 q = ~q; /* (A/2)/b1 */
264 r = 2*r + (a0 & 1); /* A/(2*b1) */
282 else /* Implies c1 = b1 */
283 { /* Hence a1 = d - 1 = 2*b1 - 1 */
301 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
303 __udiv_w_sdiv (UWtype *rp __attribute__ ((__unused__)),
304 UWtype a1 __attribute__ ((__unused__)),
305 UWtype a0 __attribute__ ((__unused__)),
306 UWtype d __attribute__ ((__unused__)))
313 #if (defined (L_udivdi3) || defined (L_divdi3) || \
314 defined (L_umoddi3) || defined (L_moddi3))
319 const UQItype __clz_tab[] =
321 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
322 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
323 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
324 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
325 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
326 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
327 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
328 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
334 #if (defined (L_udivdi3) || defined (L_divdi3) || \
335 defined (L_umoddi3) || defined (L_moddi3))
339 __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
344 UWtype d0, d1, n0, n1, n2;
356 #if !UDIV_NEEDS_NORMALIZATION
363 udiv_qrnnd (q0, n0, n1, n0, d0);
366 /* Remainder in n0. */
373 d0 = 1 / d0; /* Divide intentionally by zero. */
375 udiv_qrnnd (q1, n1, 0, n1, d0);
376 udiv_qrnnd (q0, n0, n1, n0, d0);
378 /* Remainder in n0. */
389 #else /* UDIV_NEEDS_NORMALIZATION */
397 count_leading_zeros (bm, d0);
401 /* Normalize, i.e. make the most significant bit of the
405 n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
409 udiv_qrnnd (q0, n0, n1, n0, d0);
412 /* Remainder in n0 >> bm. */
419 d0 = 1 / d0; /* Divide intentionally by zero. */
421 count_leading_zeros (bm, d0);
425 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
426 conclude (the most significant bit of n1 is set) /\ (the
427 leading quotient digit q1 = 1).
429 This special case is necessary, not an optimization.
430 (Shifts counts of W_TYPE_SIZE are undefined.) */
439 b = W_TYPE_SIZE - bm;
443 n1 = (n1 << bm) | (n0 >> b);
446 udiv_qrnnd (q1, n1, n2, n1, d0);
451 udiv_qrnnd (q0, n0, n1, n0, d0);
453 /* Remainder in n0 >> bm. */
463 #endif /* UDIV_NEEDS_NORMALIZATION */
474 /* Remainder in n1n0. */
486 count_leading_zeros (bm, d1);
489 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
490 conclude (the most significant bit of n1 is set) /\ (the
491 quotient digit q0 = 0 or 1).
493 This special case is necessary, not an optimization. */
495 /* The condition on the next line takes advantage of that
496 n1 >= d1 (true due to program flow). */
497 if (n1 > d1 || n0 >= d0)
500 sub_ddmmss (n1, n0, n1, n0, d1, d0);
519 b = W_TYPE_SIZE - bm;
521 d1 = (d1 << bm) | (d0 >> b);
524 n1 = (n1 << bm) | (n0 >> b);
527 udiv_qrnnd (q0, n1, n2, n1, d1);
528 umul_ppmm (m1, m0, q0, d0);
530 if (m1 > n1 || (m1 == n1 && m0 > n0))
533 sub_ddmmss (m1, m0, m1, m0, d1, d0);
538 /* Remainder in (n1n0 - m1m0) >> bm. */
541 sub_ddmmss (n1, n0, n1, n0, m1, m0);
542 rr.s.low = (n1 << b) | (n0 >> bm);
543 rr.s.high = n1 >> bm;
558 __divdi3 (DWtype u, DWtype v)
569 uu.ll = __negdi2 (uu.ll);
572 vv.ll = __negdi2 (vv.ll);
574 w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
584 __moddi3 (DWtype u, DWtype v)
595 uu.ll = __negdi2 (uu.ll);
597 vv.ll = __negdi2 (vv.ll);
599 (void) __udivmoddi4 (uu.ll, vv.ll, &w);
609 __umoddi3 (UDWtype u, UDWtype v)
613 (void) __udivmoddi4 (u, v, &w);
621 __udivdi3 (UDWtype n, UDWtype d)
623 return __udivmoddi4 (n, d, (UDWtype *) 0);
629 __cmpdi2 (DWtype a, DWtype b)
633 au.ll = a, bu.ll = b;
635 if (au.s.high < bu.s.high)
637 else if (au.s.high > bu.s.high)
639 if ((UWtype) au.s.low < (UWtype) bu.s.low)
641 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
649 __ucmpdi2 (DWtype a, DWtype b)
653 au.ll = a, bu.ll = b;
655 if ((UWtype) au.s.high < (UWtype) bu.s.high)
657 else if ((UWtype) au.s.high > (UWtype) bu.s.high)
659 if ((UWtype) au.s.low < (UWtype) bu.s.low)
661 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
667 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
668 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
669 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
672 __fixunstfDI (TFtype a)
680 /* Compute high word of result, as a flonum. */
681 b = (a / HIGH_WORD_COEFF);
682 /* Convert that to fixed (but not to DWtype!),
683 and shift it into the high word. */
686 /* Remove high part from the TFtype, leaving the low part as flonum. */
688 /* Convert that to fixed (but not to DWtype!) and add it in.
689 Sometimes A comes out negative. This is significant, since
690 A has more bits than a long int does. */
699 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
704 return - __fixunstfDI (-a);
705 return __fixunstfDI (a);
709 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
710 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
711 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
714 __fixunsxfDI (XFtype a)
722 /* Compute high word of result, as a flonum. */
723 b = (a / HIGH_WORD_COEFF);
724 /* Convert that to fixed (but not to DWtype!),
725 and shift it into the high word. */
728 /* Remove high part from the XFtype, leaving the low part as flonum. */
730 /* Convert that to fixed (but not to DWtype!) and add it in.
731 Sometimes A comes out negative. This is significant, since
732 A has more bits than a long int does. */
741 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
746 return - __fixunsxfDI (-a);
747 return __fixunsxfDI (a);
752 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
753 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
756 __fixunsdfDI (DFtype a)
764 /* Compute high word of result, as a flonum. */
765 b = (a / HIGH_WORD_COEFF);
766 /* Convert that to fixed (but not to DWtype!),
767 and shift it into the high word. */
770 /* Remove high part from the DFtype, leaving the low part as flonum. */
772 /* Convert that to fixed (but not to DWtype!) and add it in.
773 Sometimes A comes out negative. This is significant, since
774 A has more bits than a long int does. */
788 return - __fixunsdfDI (-a);
789 return __fixunsdfDI (a);
794 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
795 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
798 __fixunssfDI (SFtype original_a)
800 /* Convert the SFtype to a DFtype, because that is surely not going
801 to lose any bits. Some day someone else can write a faster version
802 that avoids converting to DFtype, and verify it really works right. */
803 DFtype a = original_a;
810 /* Compute high word of result, as a flonum. */
811 b = (a / HIGH_WORD_COEFF);
812 /* Convert that to fixed (but not to DWtype!),
813 and shift it into the high word. */
816 /* Remove high part from the DFtype, leaving the low part as flonum. */
818 /* Convert that to fixed (but not to DWtype!) and add it in.
819 Sometimes A comes out negative. This is significant, since
820 A has more bits than a long int does. */
834 return - __fixunssfDI (-a);
835 return __fixunssfDI (a);
839 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
840 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
841 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
842 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
845 __floatdixf (DWtype u)
849 d = (Wtype) (u >> WORD_SIZE);
850 d *= HIGH_HALFWORD_COEFF;
851 d *= HIGH_HALFWORD_COEFF;
852 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
858 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
859 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
860 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
861 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
864 __floatditf (DWtype u)
868 d = (Wtype) (u >> WORD_SIZE);
869 d *= HIGH_HALFWORD_COEFF;
870 d *= HIGH_HALFWORD_COEFF;
871 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
878 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
879 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
880 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
883 __floatdidf (DWtype u)
887 d = (Wtype) (u >> WORD_SIZE);
888 d *= HIGH_HALFWORD_COEFF;
889 d *= HIGH_HALFWORD_COEFF;
890 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
897 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
898 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
899 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
900 #define DI_SIZE (sizeof (DWtype) * BITS_PER_UNIT)
902 /* Define codes for all the float formats that we know of. Note
903 that this is copied from real.h. */
905 #define UNKNOWN_FLOAT_FORMAT 0
906 #define IEEE_FLOAT_FORMAT 1
907 #define VAX_FLOAT_FORMAT 2
908 #define IBM_FLOAT_FORMAT 3
910 /* Default to IEEE float if not specified. Nearly all machines use it. */
911 #ifndef HOST_FLOAT_FORMAT
912 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
915 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
920 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
925 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
931 __floatdisf (DWtype u)
933 /* Do the calculation in DFmode
934 so that we don't lose any of the precision of the high word
935 while multiplying it. */
938 /* Protect against double-rounding error.
939 Represent any low-order bits, that might be truncated in DFmode,
940 by a bit that won't be lost. The bit can go in anywhere below the
941 rounding position of the SFmode. A fixed mask and bit position
942 handles all usual configurations. It doesn't handle the case
943 of 128-bit DImode, however. */
944 if (DF_SIZE < DI_SIZE
945 && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE))
947 #define REP_BIT ((UDWtype) 1 << (DI_SIZE - DF_SIZE))
948 if (! (- ((DWtype) 1 << DF_SIZE) < u
949 && u < ((DWtype) 1 << DF_SIZE)))
951 if ((UDWtype) u & (REP_BIT - 1))
955 f = (Wtype) (u >> WORD_SIZE);
956 f *= HIGH_HALFWORD_COEFF;
957 f *= HIGH_HALFWORD_COEFF;
958 f += (UWtype) (u & (HIGH_WORD_COEFF - 1));
964 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
965 /* Reenable the normal types, in case limits.h needs them. */
978 __fixunsxfSI (XFtype a)
980 if (a >= - (DFtype) LONG_MIN)
981 return (Wtype) (a + LONG_MIN) - LONG_MIN;
987 /* Reenable the normal types, in case limits.h needs them. */
1000 __fixunsdfSI (DFtype a)
1002 if (a >= - (DFtype) LONG_MIN)
1003 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1009 /* Reenable the normal types, in case limits.h needs them. */
1022 __fixunssfSI (SFtype a)
1024 if (a >= - (SFtype) LONG_MIN)
1025 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1030 /* From here on down, the routines use normal data types. */
1032 #define SItype bogus_type
1033 #define USItype bogus_type
1034 #define DItype bogus_type
1035 #define UDItype bogus_type
1036 #define SFtype bogus_type
1037 #define DFtype bogus_type
1055 /* Like bcmp except the sign is meaningful.
1056 Result is negative if S1 is less than S2,
1057 positive if S1 is greater, 0 if S1 and S2 are equal. */
1060 __gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
1064 unsigned char c1 = *s1++, c2 = *s2++;
1081 #if defined(__svr4__) || defined(__alliant__)
1085 /* The Alliant needs the added underscore. */
1086 asm (".globl __builtin_saveregs");
1087 asm ("__builtin_saveregs:");
1088 asm (".globl ___builtin_saveregs");
1089 asm ("___builtin_saveregs:");
1091 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1092 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1093 area and also for a new va_list
1095 /* Save all argument registers in the arg reg save area. The
1096 arg reg save area must have the following layout (according
1108 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1109 asm (" fst.q %f12,16(%sp)");
1111 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1112 asm (" st.l %r17,36(%sp)");
1113 asm (" st.l %r18,40(%sp)");
1114 asm (" st.l %r19,44(%sp)");
1115 asm (" st.l %r20,48(%sp)");
1116 asm (" st.l %r21,52(%sp)");
1117 asm (" st.l %r22,56(%sp)");
1118 asm (" st.l %r23,60(%sp)");
1119 asm (" st.l %r24,64(%sp)");
1120 asm (" st.l %r25,68(%sp)");
1121 asm (" st.l %r26,72(%sp)");
1122 asm (" st.l %r27,76(%sp)");
1124 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1125 va_list structure. Put in into
1126 r16 so that it will be returned
1129 /* Initialize all fields of the new va_list structure. This
1130 structure looks like:
1133 unsigned long ireg_used;
1134 unsigned long freg_used;
1140 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1141 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1142 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1143 asm (" bri %r1"); /* delayed return */
1144 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1146 #else /* not __svr4__ */
1147 #if defined(__PARAGON__)
1149 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1150 * and we stand a better chance of hooking into libraries
1151 * compiled by PGI. [andyp@ssd.intel.com]
1155 asm (".globl __builtin_saveregs");
1156 asm ("__builtin_saveregs:");
1157 asm (".globl ___builtin_saveregs");
1158 asm ("___builtin_saveregs:");
1160 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1161 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1162 area and also for a new va_list
1164 /* Save all argument registers in the arg reg save area. The
1165 arg reg save area must have the following layout (according
1177 asm (" fst.q f8, 0(sp)");
1178 asm (" fst.q f12,16(sp)");
1179 asm (" st.l r16,32(sp)");
1180 asm (" st.l r17,36(sp)");
1181 asm (" st.l r18,40(sp)");
1182 asm (" st.l r19,44(sp)");
1183 asm (" st.l r20,48(sp)");
1184 asm (" st.l r21,52(sp)");
1185 asm (" st.l r22,56(sp)");
1186 asm (" st.l r23,60(sp)");
1187 asm (" st.l r24,64(sp)");
1188 asm (" st.l r25,68(sp)");
1189 asm (" st.l r26,72(sp)");
1190 asm (" st.l r27,76(sp)");
1192 asm (" adds 80,sp,r16"); /* compute the address of the new
1193 va_list structure. Put in into
1194 r16 so that it will be returned
1197 /* Initialize all fields of the new va_list structure. This
1198 structure looks like:
1201 unsigned long ireg_used;
1202 unsigned long freg_used;
1208 asm (" st.l r0, 0(r16)"); /* nfixed */
1209 asm (" st.l r0, 4(r16)"); /* nfloating */
1210 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1211 asm (" bri r1"); /* delayed return */
1212 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1213 #else /* not __PARAGON__ */
1217 asm (".globl ___builtin_saveregs");
1218 asm ("___builtin_saveregs:");
1219 asm (" mov sp,r30");
1220 asm (" andnot 0x0f,sp,sp");
1221 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1223 /* Fill in the __va_struct. */
1224 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1225 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1226 asm (" st.l r18, 8(sp)");
1227 asm (" st.l r19,12(sp)");
1228 asm (" st.l r20,16(sp)");
1229 asm (" st.l r21,20(sp)");
1230 asm (" st.l r22,24(sp)");
1231 asm (" st.l r23,28(sp)");
1232 asm (" st.l r24,32(sp)");
1233 asm (" st.l r25,36(sp)");
1234 asm (" st.l r26,40(sp)");
1235 asm (" st.l r27,44(sp)");
1237 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1238 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1240 /* Fill in the __va_ctl. */
1241 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1242 asm (" st.l r28,84(sp)"); /* pointer to more args */
1243 asm (" st.l r0, 88(sp)"); /* nfixed */
1244 asm (" st.l r0, 92(sp)"); /* nfloating */
1246 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1248 asm (" mov r30,sp");
1249 /* recover stack and pass address to start
1251 #endif /* not __PARAGON__ */
1252 #endif /* not __svr4__ */
1253 #else /* not __i860__ */
1255 asm (".global __builtin_saveregs");
1256 asm ("__builtin_saveregs:");
1257 asm (".global ___builtin_saveregs");
1258 asm ("___builtin_saveregs:");
1259 #ifdef NEED_PROC_COMMAND
1262 asm ("st %i0,[%fp+68]");
1263 asm ("st %i1,[%fp+72]");
1264 asm ("st %i2,[%fp+76]");
1265 asm ("st %i3,[%fp+80]");
1266 asm ("st %i4,[%fp+84]");
1268 asm ("st %i5,[%fp+88]");
1269 #ifdef NEED_TYPE_COMMAND
1270 asm (".type __builtin_saveregs,#function");
1271 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1273 #else /* not __sparc__ */
1274 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1278 asm (" .set nomips16");
1280 asm (" .ent __builtin_saveregs");
1281 asm (" .globl __builtin_saveregs");
1282 asm ("__builtin_saveregs:");
1283 asm (" sw $4,0($30)");
1284 asm (" sw $5,4($30)");
1285 asm (" sw $6,8($30)");
1286 asm (" sw $7,12($30)");
1288 asm (" .end __builtin_saveregs");
1289 #else /* not __mips__, etc. */
1291 void * __attribute__ ((__noreturn__))
1292 __builtin_saveregs (void)
1297 #endif /* not __mips__ */
1298 #endif /* not __sparc__ */
1299 #endif /* not __i860__ */
1303 #ifndef inhibit_libc
1305 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1307 /* This is used by the `assert' macro. */
1309 __eprintf (const char *string, const char *expression,
1310 unsigned int line, const char *filename)
1312 fprintf (stderr, string, expression, line, filename);
1322 /* Structure emitted by -a */
1326 const char *filename;
1330 const unsigned long *addresses;
1332 /* Older GCC's did not emit these fields. */
1334 const char **functions;
1335 const long *line_nums;
1336 const char **filenames;
1340 #ifdef BLOCK_PROFILER_CODE
1343 #ifndef inhibit_libc
1345 /* Simple minded basic block profiling output dumper for
1346 systems that don't provide tcov support. At present,
1347 it requires atexit and stdio. */
1349 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1353 #include "gbl-ctors.h"
1354 #include "gcov-io.h"
1356 #ifdef TARGET_HAS_F_SETLKW
1361 static struct bb *bb_head;
1363 static int num_digits (long value, int base) __attribute__ ((const));
1365 /* Return the number of digits needed to print a value */
1366 /* __inline__ */ static int num_digits (long value, int base)
1368 int minus = (value < 0 && base != 16);
1369 unsigned long v = (minus) ? -value : value;
1383 __bb_exit_func (void)
1385 FILE *da_file, *file;
1392 i = strlen (bb_head->filename) - 3;
1394 if (!strcmp (bb_head->filename+i, ".da"))
1396 /* Must be -fprofile-arcs not -a.
1397 Dump data in a form that gcov expects. */
1401 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1405 /* Make sure the output file exists -
1406 but don't clobber exiting data. */
1407 if ((da_file = fopen (ptr->filename, "a")) != 0)
1410 /* Need to re-open in order to be able to write from the start. */
1411 da_file = fopen (ptr->filename, "r+b");
1412 /* Some old systems might not allow the 'b' mode modifier.
1413 Therefore, try to open without it. This can lead to a race
1414 condition so that when you delete and re-create the file, the
1415 file might be opened in text mode, but then, you shouldn't
1416 delete the file in the first place. */
1418 da_file = fopen (ptr->filename, "r+");
1421 fprintf (stderr, "arc profiling: Can't open output file %s.\n",
1426 /* After a fork, another process might try to read and/or write
1427 the same file simultanously. So if we can, lock the file to
1428 avoid race conditions. */
1429 #if defined (TARGET_HAS_F_SETLKW)
1431 struct flock s_flock;
1433 s_flock.l_type = F_WRLCK;
1434 s_flock.l_whence = SEEK_SET;
1435 s_flock.l_start = 0;
1437 s_flock.l_pid = getpid ();
1439 while (fcntl (fileno (da_file), F_SETLKW, &s_flock)
1444 /* If the file is not empty, and the number of counts in it is the
1445 same, then merge them in. */
1446 firstchar = fgetc (da_file);
1447 if (firstchar == EOF)
1449 if (ferror (da_file))
1451 fprintf (stderr, "arc profiling: Can't read output file ");
1452 perror (ptr->filename);
1459 if (ungetc (firstchar, da_file) == EOF)
1461 if (__read_long (&n_counts, da_file, 8) != 0)
1463 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1468 if (n_counts == ptr->ncounts)
1472 for (i = 0; i < n_counts; i++)
1476 if (__read_long (&v, da_file, 8) != 0)
1478 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1482 ptr->counts[i] += v;
1490 /* ??? Should first write a header to the file. Preferably, a 4 byte
1491 magic number, 4 bytes containing the time the program was
1492 compiled, 4 bytes containing the last modification time of the
1493 source file, and 4 bytes indicating the compiler options used.
1495 That way we can easily verify that the proper source/executable/
1496 data file combination is being used from gcov. */
1498 if (__write_long (ptr->ncounts, da_file, 8) != 0)
1501 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1507 long *count_ptr = ptr->counts;
1509 for (j = ptr->ncounts; j > 0; j--)
1511 if (__write_long (*count_ptr, da_file, 8) != 0)
1519 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1523 if (fclose (da_file) == EOF)
1524 fprintf (stderr, "arc profiling: Error closing output file %s.\n",
1531 /* Must be basic block profiling. Emit a human readable output file. */
1533 file = fopen ("bb.out", "a");
1542 /* This is somewhat type incorrect, but it avoids worrying about
1543 exactly where time.h is included from. It should be ok unless
1544 a void * differs from other pointer formats, or if sizeof (long)
1545 is < sizeof (time_t). It would be nice if we could assume the
1546 use of rationale standards here. */
1548 time ((void *) &time_value);
1549 fprintf (file, "Basic block profiling finished on %s\n", ctime ((void *) &time_value));
1551 /* We check the length field explicitly in order to allow compatibility
1552 with older GCC's which did not provide it. */
1554 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1557 int func_p = (ptr->nwords >= (long) sizeof (struct bb)
1558 && ptr->nwords <= 1000
1560 int line_p = (func_p && ptr->line_nums);
1561 int file_p = (func_p && ptr->filenames);
1562 int addr_p = (ptr->addresses != 0);
1563 long ncounts = ptr->ncounts;
1569 int blk_len = num_digits (ncounts, 10);
1574 fprintf (file, "File %s, %ld basic blocks \n\n",
1575 ptr->filename, ncounts);
1577 /* Get max values for each field. */
1578 for (i = 0; i < ncounts; i++)
1583 if (cnt_max < ptr->counts[i])
1584 cnt_max = ptr->counts[i];
1586 if (addr_p && (unsigned long) addr_max < ptr->addresses[i])
1587 addr_max = ptr->addresses[i];
1589 if (line_p && line_max < ptr->line_nums[i])
1590 line_max = ptr->line_nums[i];
1594 p = (ptr->functions[i]) ? (ptr->functions[i]) : "<none>";
1602 p = (ptr->filenames[i]) ? (ptr->filenames[i]) : "<none>";
1609 addr_len = num_digits (addr_max, 16);
1610 cnt_len = num_digits (cnt_max, 10);
1611 line_len = num_digits (line_max, 10);
1613 /* Now print out the basic block information. */
1614 for (i = 0; i < ncounts; i++)
1617 " Block #%*d: executed %*ld time(s)",
1619 cnt_len, ptr->counts[i]);
1622 fprintf (file, " address= 0x%.*lx", addr_len,
1626 fprintf (file, " function= %-*s", func_len,
1627 (ptr->functions[i]) ? ptr->functions[i] : "<none>");
1630 fprintf (file, " line= %*ld", line_len, ptr->line_nums[i]);
1633 fprintf (file, " file= %s",
1634 (ptr->filenames[i]) ? ptr->filenames[i] : "<none>");
1636 fprintf (file, "\n");
1639 fprintf (file, "\n");
1643 fprintf (file, "\n\n");
1649 __bb_init_func (struct bb *blocks)
1651 /* User is supposed to check whether the first word is non-0,
1652 but just in case.... */
1654 if (blocks->zero_word)
1657 /* Initialize destructor. */
1659 atexit (__bb_exit_func);
1661 /* Set up linked list. */
1662 blocks->zero_word = 1;
1663 blocks->next = bb_head;
1667 /* Called before fork or exec - write out profile information gathered so
1668 far and reset it to zero. This avoids duplication or loss of the
1669 profile information gathered so far. */
1671 __bb_fork_func (void)
1676 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1679 for (i = ptr->ncounts - 1; i >= 0; i--)
1684 #ifndef MACHINE_STATE_SAVE
1685 #define MACHINE_STATE_SAVE(ID)
1687 #ifndef MACHINE_STATE_RESTORE
1688 #define MACHINE_STATE_RESTORE(ID)
1691 /* Number of buckets in hashtable of basic block addresses. */
1693 #define BB_BUCKETS 311
1695 /* Maximum length of string in file bb.in. */
1697 #define BBINBUFSIZE 500
1701 struct bb_edge *next;
1702 unsigned long src_addr;
1703 unsigned long dst_addr;
1704 unsigned long count;
1709 TRACE_KEEP = 0, TRACE_ON = 1, TRACE_OFF = 2
1714 struct bb_func *next;
1717 enum bb_func_mode mode;
1720 /* This is the connection to the outside world.
1721 The BLOCK_PROFILER macro must set __bb.blocks
1722 and __bb.blockno. */
1725 unsigned long blockno;
1729 /* Vars to store addrs of source and destination basic blocks
1732 static unsigned long bb_src = 0;
1733 static unsigned long bb_dst = 0;
1735 static FILE *bb_tracefile = (FILE *) 0;
1736 static struct bb_edge **bb_hashbuckets = (struct bb_edge **) 0;
1737 static struct bb_func *bb_func_head = (struct bb_func *) 0;
1738 static unsigned long bb_callcount = 0;
1739 static int bb_mode = 0;
1741 static unsigned long *bb_stack = (unsigned long *) 0;
1742 static size_t bb_stacksize = 0;
1744 static int reported = 0;
1747 Always : Print execution frequencies of basic blocks
1749 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1750 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1751 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1752 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1757 /*#include <sys/types.h>*/
1758 #include <sys/stat.h>
1759 /*#include <malloc.h>*/
1761 /* Commands executed by gopen. */
1763 #define GOPENDECOMPRESS "gzip -cd "
1764 #define GOPENCOMPRESS "gzip -c >"
1766 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1767 If it does not compile, simply replace gopen by fopen and delete
1768 '.gz' from any first parameter to gopen. */
1771 gopen (char *fn, char *mode)
1779 if (mode[0] != 'r' && mode[0] != 'w')
1782 p = fn + strlen (fn)-1;
1783 use_gzip = ((p[-1] == '.' && (p[0] == 'Z' || p[0] == 'z'))
1784 || (p[-2] == '.' && p[-1] == 'g' && p[0] == 'z'));
1791 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1792 + sizeof (GOPENDECOMPRESS));
1793 strcpy (s, GOPENDECOMPRESS);
1794 strcpy (s + (sizeof (GOPENDECOMPRESS)-1), fn);
1795 f = popen (s, mode);
1803 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1804 + sizeof (GOPENCOMPRESS));
1805 strcpy (s, GOPENCOMPRESS);
1806 strcpy (s + (sizeof (GOPENCOMPRESS)-1), fn);
1807 if (!(f = popen (s, mode)))
1808 f = fopen (s, mode);
1815 return fopen (fn, mode);
1825 if (!fstat (fileno (f), &buf) && S_ISFIFO (buf.st_mode))
1833 #endif /* HAVE_POPEN */
1835 /* Called once per program. */
1838 __bb_exit_trace_func (void)
1840 FILE *file = fopen ("bb.out", "a");
1853 gclose (bb_tracefile);
1855 fclose (bb_tracefile);
1856 #endif /* HAVE_POPEN */
1859 /* Check functions in `bb.in'. */
1864 const struct bb_func *p;
1865 int printed_something = 0;
1869 /* This is somewhat type incorrect. */
1870 time ((void *) &time_value);
1872 for (p = bb_func_head; p != (struct bb_func *) 0; p = p->next)
1874 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1876 if (!ptr->filename || (p->filename != (char *) 0 && strcmp (p->filename, ptr->filename)))
1878 for (blk = 0; blk < ptr->ncounts; blk++)
1880 if (!strcmp (p->funcname, ptr->functions[blk]))
1885 if (!printed_something)
1887 fprintf (file, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value));
1888 printed_something = 1;
1891 fprintf (file, "\tFunction %s", p->funcname);
1893 fprintf (file, " of file %s", p->filename);
1894 fprintf (file, "\n" );
1899 if (printed_something)
1900 fprintf (file, "\n");
1906 if (!bb_hashbuckets)
1910 fprintf (stderr, "Profiler: out of memory\n");
1920 unsigned long addr_max = 0;
1921 unsigned long cnt_max = 0;
1925 /* This is somewhat type incorrect, but it avoids worrying about
1926 exactly where time.h is included from. It should be ok unless
1927 a void * differs from other pointer formats, or if sizeof (long)
1928 is < sizeof (time_t). It would be nice if we could assume the
1929 use of rationale standards here. */
1931 time ((void *) &time_value);
1932 fprintf (file, "Basic block jump tracing");
1934 switch (bb_mode & 12)
1937 fprintf (file, " (with call)");
1941 /* Print nothing. */
1945 fprintf (file, " (with call & ret)");
1949 fprintf (file, " (with ret)");
1953 fprintf (file, " finished on %s\n", ctime ((void *) &time_value));
1955 for (i = 0; i < BB_BUCKETS; i++)
1957 struct bb_edge *bucket = bb_hashbuckets[i];
1958 for ( ; bucket; bucket = bucket->next )
1960 if (addr_max < bucket->src_addr)
1961 addr_max = bucket->src_addr;
1962 if (addr_max < bucket->dst_addr)
1963 addr_max = bucket->dst_addr;
1964 if (cnt_max < bucket->count)
1965 cnt_max = bucket->count;
1968 addr_len = num_digits (addr_max, 16);
1969 cnt_len = num_digits (cnt_max, 10);
1971 for ( i = 0; i < BB_BUCKETS; i++)
1973 struct bb_edge *bucket = bb_hashbuckets[i];
1974 for ( ; bucket; bucket = bucket->next )
1977 "Jump from block 0x%.*lx to block 0x%.*lx executed %*lu time(s)\n",
1978 addr_len, bucket->src_addr,
1979 addr_len, bucket->dst_addr,
1980 cnt_len, bucket->count);
1984 fprintf (file, "\n");
1992 /* Free allocated memory. */
1997 struct bb_func *old = f;
2000 if (old->funcname) free (old->funcname);
2001 if (old->filename) free (old->filename);
2012 for (i = 0; i < BB_BUCKETS; i++)
2014 struct bb_edge *old, *bucket = bb_hashbuckets[i];
2019 bucket = bucket->next;
2023 free (bb_hashbuckets);
2026 for (b = bb_head; b; b = b->next)
2027 if (b->flags) free (b->flags);
2030 /* Called once per program. */
2033 __bb_init_prg (void)
2036 char buf[BBINBUFSIZE];
2039 enum bb_func_mode m;
2042 /* Initialize destructor. */
2043 atexit (__bb_exit_func);
2045 if (!(file = fopen ("bb.in", "r")))
2048 while(fgets (buf, BBINBUFSIZE, file) != 0)
2064 if (!strcmp (p, "__bb_trace__"))
2066 else if (!strcmp (p, "__bb_jumps__"))
2068 else if (!strcmp (p, "__bb_hidecall__"))
2070 else if (!strcmp (p, "__bb_showret__"))
2074 struct bb_func *f = (struct bb_func *) malloc (sizeof (struct bb_func));
2078 f->next = bb_func_head;
2079 if ((pos = strchr (p, ':')))
2081 if (!(f->funcname = (char *) malloc (strlen (pos+1)+1)))
2083 strcpy (f->funcname, pos+1);
2085 if ((f->filename = (char *) malloc (l+1)))
2087 strncpy (f->filename, p, l);
2088 f->filename[l] = '\0';
2091 f->filename = (char *) 0;
2095 if (!(f->funcname = (char *) malloc (strlen (p)+1)))
2097 strcpy (f->funcname, p);
2098 f->filename = (char *) 0;
2110 bb_tracefile = gopen ("bbtrace.gz", "w");
2115 bb_tracefile = fopen ("bbtrace", "w");
2117 #endif /* HAVE_POPEN */
2121 bb_hashbuckets = (struct bb_edge **)
2122 malloc (BB_BUCKETS * sizeof (struct bb_edge *));
2124 /* Use a loop here rather than calling bzero to avoid having to
2125 conditionalize its existance. */
2126 for (i = 0; i < BB_BUCKETS; i++)
2127 bb_hashbuckets[i] = 0;
2133 bb_stack = (unsigned long *) malloc (bb_stacksize * sizeof (*bb_stack));
2136 /* Initialize destructor. */
2137 atexit (__bb_exit_trace_func);
2140 /* Called upon entering a basic block. */
2143 __bb_trace_func (void)
2145 struct bb_edge *bucket;
2147 MACHINE_STATE_SAVE("1")
2149 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2152 bb_dst = __bb.blocks->addresses[__bb.blockno];
2153 __bb.blocks->counts[__bb.blockno]++;
2157 fwrite (&bb_dst, sizeof (unsigned long), 1, bb_tracefile);
2162 struct bb_edge **startbucket, **oldnext;
2164 oldnext = startbucket
2165 = & bb_hashbuckets[ (((int) bb_src*8) ^ (int) bb_dst) % BB_BUCKETS ];
2166 bucket = *startbucket;
2168 for (bucket = *startbucket; bucket;
2169 oldnext = &(bucket->next), bucket = *oldnext)
2171 if (bucket->src_addr == bb_src
2172 && bucket->dst_addr == bb_dst)
2175 *oldnext = bucket->next;
2176 bucket->next = *startbucket;
2177 *startbucket = bucket;
2182 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2188 fprintf (stderr, "Profiler: out of memory\n");
2195 bucket->src_addr = bb_src;
2196 bucket->dst_addr = bb_dst;
2197 bucket->next = *startbucket;
2198 *startbucket = bucket;
2209 MACHINE_STATE_RESTORE("1")
2213 /* Called when returning from a function and `__bb_showret__' is set. */
2216 __bb_trace_func_ret (void)
2218 struct bb_edge *bucket;
2220 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2225 struct bb_edge **startbucket, **oldnext;
2227 oldnext = startbucket
2228 = & bb_hashbuckets[ (((int) bb_dst * 8) ^ (int) bb_src) % BB_BUCKETS ];
2229 bucket = *startbucket;
2231 for (bucket = *startbucket; bucket;
2232 oldnext = &(bucket->next), bucket = *oldnext)
2234 if (bucket->src_addr == bb_dst
2235 && bucket->dst_addr == bb_src)
2238 *oldnext = bucket->next;
2239 bucket->next = *startbucket;
2240 *startbucket = bucket;
2245 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2251 fprintf (stderr, "Profiler: out of memory\n");
2258 bucket->src_addr = bb_dst;
2259 bucket->dst_addr = bb_src;
2260 bucket->next = *startbucket;
2261 *startbucket = bucket;
2274 /* Called upon entering the first function of a file. */
2277 __bb_init_file (struct bb *blocks)
2280 const struct bb_func *p;
2281 long blk, ncounts = blocks->ncounts;
2282 const char **functions = blocks->functions;
2284 /* Set up linked list. */
2285 blocks->zero_word = 1;
2286 blocks->next = bb_head;
2291 || !(blocks->flags = (char *) malloc (sizeof (char) * blocks->ncounts)))
2294 for (blk = 0; blk < ncounts; blk++)
2295 blocks->flags[blk] = 0;
2297 for (blk = 0; blk < ncounts; blk++)
2299 for (p = bb_func_head; p; p = p->next)
2301 if (!strcmp (p->funcname, functions[blk])
2302 && (!p->filename || !strcmp (p->filename, blocks->filename)))
2304 blocks->flags[blk] |= p->mode;
2311 /* Called when exiting from a function. */
2314 __bb_trace_ret (void)
2317 MACHINE_STATE_SAVE("2")
2321 if ((bb_mode & 12) && bb_stacksize > bb_callcount)
2323 bb_src = bb_stack[bb_callcount];
2325 __bb_trace_func_ret ();
2331 MACHINE_STATE_RESTORE("2")
2335 /* Called when entering a function. */
2338 __bb_init_trace_func (struct bb *blocks, unsigned long blockno)
2340 static int trace_init = 0;
2342 MACHINE_STATE_SAVE("3")
2344 if (!blocks->zero_word)
2351 __bb_init_file (blocks);
2361 if (bb_callcount >= bb_stacksize)
2363 size_t newsize = bb_callcount + 100;
2365 bb_stack = (unsigned long *) realloc (bb_stack, newsize);
2370 fprintf (stderr, "Profiler: out of memory\n");
2374 goto stack_overflow;
2376 bb_stacksize = newsize;
2378 bb_stack[bb_callcount] = bb_src;
2389 else if (blocks->flags && (blocks->flags[blockno] & TRACE_ON))
2395 bb_stack[bb_callcount] = bb_src;
2398 MACHINE_STATE_RESTORE("3")
2401 #endif /* not inhibit_libc */
2402 #endif /* not BLOCK_PROFILER_CODE */
2406 unsigned int __shtab[] = {
2407 0x00000001, 0x00000002, 0x00000004, 0x00000008,
2408 0x00000010, 0x00000020, 0x00000040, 0x00000080,
2409 0x00000100, 0x00000200, 0x00000400, 0x00000800,
2410 0x00001000, 0x00002000, 0x00004000, 0x00008000,
2411 0x00010000, 0x00020000, 0x00040000, 0x00080000,
2412 0x00100000, 0x00200000, 0x00400000, 0x00800000,
2413 0x01000000, 0x02000000, 0x04000000, 0x08000000,
2414 0x10000000, 0x20000000, 0x40000000, 0x80000000
2418 #ifdef L_clear_cache
2419 /* Clear part of an instruction cache. */
2421 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2424 __clear_cache (char *beg __attribute__((__unused__)),
2425 char *end __attribute__((__unused__)))
2427 #ifdef CLEAR_INSN_CACHE
2428 CLEAR_INSN_CACHE (beg, end);
2430 #ifdef INSN_CACHE_SIZE
2431 static char array[INSN_CACHE_SIZE + INSN_CACHE_PLANE_SIZE + INSN_CACHE_LINE_WIDTH];
2432 static int initialized;
2436 typedef (*function_ptr) (void);
2438 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2439 /* It's cheaper to clear the whole cache.
2440 Put in a series of jump instructions so that calling the beginning
2441 of the cache will clear the whole thing. */
2445 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2446 & -INSN_CACHE_LINE_WIDTH);
2447 int end_ptr = ptr + INSN_CACHE_SIZE;
2449 while (ptr < end_ptr)
2451 *(INSTRUCTION_TYPE *)ptr
2452 = JUMP_AHEAD_INSTRUCTION + INSN_CACHE_LINE_WIDTH;
2453 ptr += INSN_CACHE_LINE_WIDTH;
2455 *(INSTRUCTION_TYPE *) (ptr - INSN_CACHE_LINE_WIDTH) = RETURN_INSTRUCTION;
2460 /* Call the beginning of the sequence. */
2461 (((function_ptr) (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2462 & -INSN_CACHE_LINE_WIDTH))
2465 #else /* Cache is large. */
2469 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2470 & -INSN_CACHE_LINE_WIDTH);
2472 while (ptr < (int) array + sizeof array)
2474 *(INSTRUCTION_TYPE *)ptr = RETURN_INSTRUCTION;
2475 ptr += INSN_CACHE_LINE_WIDTH;
2481 /* Find the location in array that occupies the same cache line as BEG. */
2483 offset = ((int) beg & -INSN_CACHE_LINE_WIDTH) & (INSN_CACHE_PLANE_SIZE - 1);
2484 start_addr = (((int) (array + INSN_CACHE_PLANE_SIZE - 1)
2485 & -INSN_CACHE_PLANE_SIZE)
2488 /* Compute the cache alignment of the place to stop clearing. */
2489 #if 0 /* This is not needed for gcc's purposes. */
2490 /* If the block to clear is bigger than a cache plane,
2491 we clear the entire cache, and OFFSET is already correct. */
2492 if (end < beg + INSN_CACHE_PLANE_SIZE)
2494 offset = (((int) (end + INSN_CACHE_LINE_WIDTH - 1)
2495 & -INSN_CACHE_LINE_WIDTH)
2496 & (INSN_CACHE_PLANE_SIZE - 1));
2498 #if INSN_CACHE_DEPTH > 1
2499 end_addr = (start_addr & -INSN_CACHE_PLANE_SIZE) + offset;
2500 if (end_addr <= start_addr)
2501 end_addr += INSN_CACHE_PLANE_SIZE;
2503 for (plane = 0; plane < INSN_CACHE_DEPTH; plane++)
2505 int addr = start_addr + plane * INSN_CACHE_PLANE_SIZE;
2506 int stop = end_addr + plane * INSN_CACHE_PLANE_SIZE;
2508 while (addr != stop)
2510 /* Call the return instruction at ADDR. */
2511 ((function_ptr) addr) ();
2513 addr += INSN_CACHE_LINE_WIDTH;
2516 #else /* just one plane */
2519 /* Call the return instruction at START_ADDR. */
2520 ((function_ptr) start_addr) ();
2522 start_addr += INSN_CACHE_LINE_WIDTH;
2524 while ((start_addr % INSN_CACHE_SIZE) != offset);
2525 #endif /* just one plane */
2526 #endif /* Cache is large */
2527 #endif /* Cache exists */
2528 #endif /* CLEAR_INSN_CACHE */
2531 #endif /* L_clear_cache */
2535 /* Jump to a trampoline, loading the static chain address. */
2537 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2550 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2554 mprotect (char *addr, int len, int prot)
2571 if (VirtualProtect (addr, len, np, &op))
2577 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2579 #ifdef TRANSFER_FROM_TRAMPOLINE
2580 TRANSFER_FROM_TRAMPOLINE
2583 #if defined (NeXT) && defined (__MACH__)
2585 /* Make stack executable so we can call trampolines on stack.
2586 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2590 #include <mach/mach.h>
2594 __enable_execute_stack (char *addr)
2597 char *eaddr = addr + TRAMPOLINE_SIZE;
2598 vm_address_t a = (vm_address_t) addr;
2600 /* turn on execute access on stack */
2601 r = vm_protect (task_self (), a, TRAMPOLINE_SIZE, FALSE, VM_PROT_ALL);
2602 if (r != KERN_SUCCESS)
2604 mach_error("vm_protect VM_PROT_ALL", r);
2608 /* We inline the i-cache invalidation for speed */
2610 #ifdef CLEAR_INSN_CACHE
2611 CLEAR_INSN_CACHE (addr, eaddr);
2613 __clear_cache ((int) addr, (int) eaddr);
2617 #endif /* defined (NeXT) && defined (__MACH__) */
2621 /* Make stack executable so we can call trampolines on stack.
2622 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2624 #include <sys/mman.h>
2625 #include <sys/vmparam.h>
2626 #include <machine/machparam.h>
2629 __enable_execute_stack (void)
2632 static unsigned lowest = USRSTACK;
2633 unsigned current = (unsigned) &fp & -NBPG;
2635 if (lowest > current)
2637 unsigned len = lowest - current;
2638 mremap (current, &len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE);
2642 /* Clear instruction cache in case an old trampoline is in it. */
2645 #endif /* __convex__ */
2649 /* Modified from the convex -code above. */
2651 #include <sys/param.h>
2653 #include <sys/m88kbcs.h>
2656 __enable_execute_stack (void)
2659 static unsigned long lowest = USRSTACK;
2660 unsigned long current = (unsigned long) &save_errno & -NBPC;
2662 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2663 address is seen as 'negative'. That is the case with the stack. */
2666 if (lowest > current)
2668 unsigned len=lowest-current;
2669 memctl(current,len,MCT_TEXT);
2673 memctl(current,NBPC,MCT_TEXT);
2677 #endif /* __sysV88__ */
2681 #include <sys/signal.h>
2684 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2685 so define it here, because we need it in __clear_insn_cache below */
2686 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2687 hence we enable this stuff only if MCT_TEXT is #define'd. */
2702 /* Clear instruction cache so we can call trampolines on stack.
2703 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2706 __clear_insn_cache (void)
2711 /* Preserve errno, because users would be surprised to have
2712 errno changing without explicitly calling any system-call. */
2715 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2716 No need to use an address derived from _start or %sp, as 0 works also. */
2717 memctl(0, 4096, MCT_TEXT);
2722 #endif /* __sysV68__ */
2726 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2728 #include <sys/mman.h>
2729 #include <sys/types.h>
2730 #include <sys/param.h>
2731 #include <sys/vmmac.h>
2733 /* Modified from the convex -code above.
2734 mremap promises to clear the i-cache. */
2737 __enable_execute_stack (void)
2740 if (mprotect (((unsigned int)&fp/PAGSIZ)*PAGSIZ, PAGSIZ,
2741 PROT_READ|PROT_WRITE|PROT_EXEC))
2743 perror ("mprotect in __enable_execute_stack");
2748 #endif /* __pyr__ */
2750 #if defined (sony_news) && defined (SYSTYPE_BSD)
2753 #include <sys/types.h>
2754 #include <sys/param.h>
2755 #include <syscall.h>
2756 #include <machine/sysnews.h>
2758 /* cacheflush function for NEWS-OS 4.2.
2759 This function is called from trampoline-initialize code
2760 defined in config/mips/mips.h. */
2763 cacheflush (char *beg, int size, int flag)
2765 if (syscall (SYS_sysnews, NEWS_CACHEFLUSH, beg, size, FLUSH_BCACHE))
2767 perror ("cache_flush");
2773 #endif /* sony_news */
2774 #endif /* L_trampoline */
2779 #include "gbl-ctors.h"
2780 /* Some systems use __main in a way incompatible with its use in gcc, in these
2781 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2782 give the same symbol without quotes for an alternative entry point. You
2783 must define both, or neither. */
2785 #define NAME__MAIN "__main"
2786 #define SYMBOL__MAIN __main
2789 #ifdef INIT_SECTION_ASM_OP
2790 #undef HAS_INIT_SECTION
2791 #define HAS_INIT_SECTION
2794 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2796 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
2797 code to run constructors. In that case, we need to handle EH here, too. */
2799 #ifdef EH_FRAME_SECTION
2801 extern unsigned char __EH_FRAME_BEGIN__[];
2804 /* Run all the global destructors on exit from the program. */
2807 __do_global_dtors (void)
2809 #ifdef DO_GLOBAL_DTORS_BODY
2810 DO_GLOBAL_DTORS_BODY;
2812 static func_ptr *p = __DTOR_LIST__ + 1;
2819 #if defined (EH_FRAME_SECTION) && !defined (HAS_INIT_SECTION)
2821 static int completed = 0;
2825 __deregister_frame_info (__EH_FRAME_BEGIN__);
2832 #ifndef HAS_INIT_SECTION
2833 /* Run all the global constructors on entry to the program. */
2836 __do_global_ctors (void)
2838 #ifdef EH_FRAME_SECTION
2840 static struct object object;
2841 __register_frame_info (__EH_FRAME_BEGIN__, &object);
2844 DO_GLOBAL_CTORS_BODY;
2845 atexit (__do_global_dtors);
2847 #endif /* no HAS_INIT_SECTION */
2849 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
2850 /* Subroutine called automatically by `main'.
2851 Compiling a global function named `main'
2852 produces an automatic call to this function at the beginning.
2854 For many systems, this routine calls __do_global_ctors.
2855 For systems which support a .init section we use the .init section
2856 to run __do_global_ctors, so we need not do anything here. */
2861 /* Support recursive calls to `main': run initializers just once. */
2862 static int initialized;
2866 __do_global_ctors ();
2869 #endif /* no HAS_INIT_SECTION or INVOKE__main */
2871 #endif /* L__main */
2872 #endif /* __CYGWIN__ */
2876 #include "gbl-ctors.h"
2878 /* Provide default definitions for the lists of constructors and
2879 destructors, so that we don't get linker errors. These symbols are
2880 intentionally bss symbols, so that gld and/or collect will provide
2881 the right values. */
2883 /* We declare the lists here with two elements each,
2884 so that they are valid empty lists if no other definition is loaded.
2886 If we are using the old "set" extensions to have the gnu linker
2887 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
2888 must be in the bss/common section.
2890 Long term no port should use those extensions. But many still do. */
2891 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
2892 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
2893 func_ptr __CTOR_LIST__[2] = {0, 0};
2894 func_ptr __DTOR_LIST__[2] = {0, 0};
2896 func_ptr __CTOR_LIST__[2];
2897 func_ptr __DTOR_LIST__[2];
2899 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
2900 #endif /* L_ctors */
2904 #include "gbl-ctors.h"
2912 static func_ptr *atexit_chain = 0;
2913 static long atexit_chain_length = 0;
2914 static volatile long last_atexit_chain_slot = -1;
2917 atexit (func_ptr func)
2919 if (++last_atexit_chain_slot == atexit_chain_length)
2921 atexit_chain_length += 32;
2923 atexit_chain = (func_ptr *) realloc (atexit_chain, atexit_chain_length
2924 * sizeof (func_ptr));
2926 atexit_chain = (func_ptr *) malloc (atexit_chain_length
2927 * sizeof (func_ptr));
2930 atexit_chain_length = 0;
2931 last_atexit_chain_slot = -1;
2936 atexit_chain[last_atexit_chain_slot] = func;
2940 extern void _cleanup (void);
2941 extern void _exit (int) __attribute__ ((__noreturn__));
2948 for ( ; last_atexit_chain_slot-- >= 0; )
2950 (*atexit_chain[last_atexit_chain_slot + 1]) ();
2951 atexit_chain[last_atexit_chain_slot + 1] = 0;
2953 free (atexit_chain);
2966 /* Simple; we just need a wrapper for ON_EXIT. */
2968 atexit (func_ptr func)
2970 return ON_EXIT (func);
2973 #endif /* ON_EXIT */
2974 #endif /* NEED_ATEXIT */
2982 /* Shared exception handling support routines. */
2985 __default_terminate (void)
2990 void (*__terminate_func)(void) __attribute__ ((__noreturn__)) =
2991 __default_terminate;
2993 void __attribute__((__noreturn__))
2996 (*__terminate_func)();
3000 __throw_type_match (void *catch_type, void *throw_type, void *obj)
3003 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3004 catch_type, throw_type);
3006 if (strcmp ((const char *)catch_type, (const char *)throw_type) == 0)
3017 /* Include definitions of EH context and table layout */
3019 #include "eh-common.h"
3020 #ifndef inhibit_libc
3024 /* Allocate and return a new EH context structure. */
3028 new_eh_context (void)
3030 struct eh_full_context {
3031 struct eh_context c;
3033 } *ehfc = (struct eh_full_context *) malloc (sizeof *ehfc);
3038 memset (ehfc, 0, sizeof *ehfc);
3040 ehfc->c.dynamic_handler_chain = (void **) ehfc->top_elt;
3042 /* This should optimize out entirely. This should always be true,
3043 but just in case it ever isn't, don't allow bogus code to be
3046 if ((void*)(&ehfc->c) != (void*)ehfc)
3052 static __gthread_key_t eh_context_key;
3054 /* Destructor for struct eh_context. */
3056 eh_context_free (void *ptr)
3058 __gthread_key_dtor (eh_context_key, ptr);
3064 /* Pointer to function to return EH context. */
3066 static struct eh_context *eh_context_initialize (void);
3067 static struct eh_context *eh_context_static (void);
3069 static struct eh_context *eh_context_specific (void);
3072 static struct eh_context *(*get_eh_context) (void) = &eh_context_initialize;
3074 /* Routine to get EH context.
3075 This one will simply call the function pointer. */
3078 __get_eh_context (void)
3080 return (void *) (*get_eh_context) ();
3083 /* Get and set the language specific info pointer. */
3086 __get_eh_info (void)
3088 struct eh_context *eh = (*get_eh_context) ();
3092 #ifdef DWARF2_UNWIND_INFO
3093 static int dwarf_reg_size_table_initialized = 0;
3094 static char dwarf_reg_size_table[DWARF_FRAME_REGISTERS];
3097 init_reg_size_table (void)
3099 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table);
3100 dwarf_reg_size_table_initialized = 1;
3106 eh_threads_initialize (void)
3108 /* Try to create the key. If it fails, revert to static method,
3109 otherwise start using thread specific EH contexts. */
3110 if (__gthread_key_create (&eh_context_key, &eh_context_free) == 0)
3111 get_eh_context = &eh_context_specific;
3113 get_eh_context = &eh_context_static;
3115 #endif /* no __GTHREADS */
3117 /* Initialize EH context.
3118 This will be called only once, since we change GET_EH_CONTEXT
3119 pointer to another routine. */
3121 static struct eh_context *
3122 eh_context_initialize (void)
3126 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
3127 /* Make sure that get_eh_context does not point to us anymore.
3128 Some systems have dummy thread routines in their libc that
3129 return a success (Solaris 2.6 for example). */
3130 if (__gthread_once (&once, eh_threads_initialize) != 0
3131 || get_eh_context == &eh_context_initialize)
3133 /* Use static version of EH context. */
3134 get_eh_context = &eh_context_static;
3136 #ifdef DWARF2_UNWIND_INFO
3138 static __gthread_once_t once_regsizes = __GTHREAD_ONCE_INIT;
3139 if (__gthread_once (&once_regsizes, init_reg_size_table) != 0
3140 || ! dwarf_reg_size_table_initialized)
3141 init_reg_size_table ();
3145 #else /* no __GTHREADS */
3147 /* Use static version of EH context. */
3148 get_eh_context = &eh_context_static;
3150 #ifdef DWARF2_UNWIND_INFO
3151 init_reg_size_table ();
3154 #endif /* no __GTHREADS */
3156 return (*get_eh_context) ();
3159 /* Return a static EH context. */
3161 static struct eh_context *
3162 eh_context_static (void)
3164 static struct eh_context eh;
3165 static int initialized;
3166 static void *top_elt[2];
3171 memset (&eh, 0, sizeof eh);
3172 eh.dynamic_handler_chain = top_elt;
3178 /* Return a thread specific EH context. */
3180 static struct eh_context *
3181 eh_context_specific (void)
3183 struct eh_context *eh;
3184 eh = (struct eh_context *) __gthread_getspecific (eh_context_key);
3187 eh = new_eh_context ();
3188 if (__gthread_setspecific (eh_context_key, (void *) eh) != 0)
3194 #endif /* __GTHREADS */
3196 /* Support routines for alloc/free during exception handling */
3198 /* __eh_alloc and __eh_free attempt allocation using malloc, but fall back to
3199 the small arena in the eh_context. This is needed because throwing an
3200 out-of-memory exception would fail otherwise. The emergency space is
3201 allocated in blocks of size EH_ALLOC_ALIGN, the
3202 minimum allocation being two blocks. A bitmask indicates which blocks
3203 have been allocated. To indicate the size of an allocation, the bit for
3204 the final block is not set. Hence each allocation is a run of 1s followed
3207 __eh_alloc (size_t size)
3216 struct eh_context *eh = __get_eh_context ();
3217 unsigned blocks = (size + EH_ALLOC_ALIGN - 1) / EH_ALLOC_ALIGN;
3218 unsigned real_mask = eh->alloc_mask | (eh->alloc_mask << 1);
3222 if (blocks > EH_ALLOC_SIZE / EH_ALLOC_ALIGN)
3224 blocks += blocks == 1;
3225 our_mask = (1 << blocks) - 1;
3227 for (ix = EH_ALLOC_SIZE / EH_ALLOC_ALIGN - blocks; ix; ix--)
3228 if (! ((real_mask >> ix) & our_mask))
3230 /* found some space */
3231 p = &eh->alloc_buffer[ix * EH_ALLOC_ALIGN];
3232 eh->alloc_mask |= (our_mask >> 1) << ix;
3240 /* Free the memory for an cp_eh_info and associated exception, given
3241 a pointer to the cp_eh_info. */
3245 struct eh_context *eh = __get_eh_context ();
3247 ptrdiff_t diff = (char *)p - &eh->alloc_buffer[0];
3248 if (diff >= 0 && diff < EH_ALLOC_SIZE)
3250 unsigned mask = eh->alloc_mask;
3251 unsigned bit = 1 << (diff / EH_ALLOC_ALIGN);
3259 eh->alloc_mask = mask;
3265 /* Support routines for setjmp/longjmp exception handling. */
3267 /* Calls to __sjthrow are generated by the compiler when an exception
3268 is raised when using the setjmp/longjmp exception handling codegen
3271 #ifdef DONT_USE_BUILTIN_SETJMP
3272 extern void longjmp (void *, int);
3275 /* Routine to get the head of the current thread's dynamic handler chain
3276 use for exception handling. */
3279 __get_dynamic_handler_chain (void)
3281 struct eh_context *eh = (*get_eh_context) ();
3282 return &eh->dynamic_handler_chain;
3285 /* This is used to throw an exception when the setjmp/longjmp codegen
3286 method is used for exception handling.
3288 We call __terminate if there are no handlers left. Otherwise we run the
3289 cleanup actions off the dynamic cleanup stack, and pop the top of the
3290 dynamic handler chain, and use longjmp to transfer back to the associated
3296 struct eh_context *eh = (*get_eh_context) ();
3297 void ***dhc = &eh->dynamic_handler_chain;
3299 void (*func)(void *, int);
3301 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3302 void ***cleanup = (void***)&(*dhc)[1];
3304 /* If there are any cleanups in the chain, run them now. */
3308 void **buf = (void**)store;
3313 #ifdef DONT_USE_BUILTIN_SETJMP
3314 if (! setjmp (&buf[2]))
3316 if (! __builtin_setjmp (&buf[2]))
3322 func = (void(*)(void*, int))cleanup[0][1];
3323 arg = (void*)cleanup[0][2];
3325 /* Update this before running the cleanup. */
3326 cleanup[0] = (void **)cleanup[0][0];
3339 /* We must call terminate if we try and rethrow an exception, when
3340 there is no exception currently active and when there are no
3342 if (! eh->info || (*dhc)[0] == 0)
3345 /* Find the jmpbuf associated with the top element of the dynamic
3346 handler chain. The jumpbuf starts two words into the buffer. */
3347 jmpbuf = &(*dhc)[2];
3349 /* Then we pop the top element off the dynamic handler chain. */
3350 *dhc = (void**)(*dhc)[0];
3352 /* And then we jump to the handler. */
3354 #ifdef DONT_USE_BUILTIN_SETJMP
3355 longjmp (jmpbuf, 1);
3357 __builtin_longjmp (jmpbuf, 1);
3361 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3362 handler, then pop the handler off the dynamic handler stack, and
3363 then throw. This is used to skip the first handler, and transfer
3364 control to the next handler in the dynamic handler stack. */
3367 __sjpopnthrow (void)
3369 struct eh_context *eh = (*get_eh_context) ();
3370 void ***dhc = &eh->dynamic_handler_chain;
3371 void (*func)(void *, int);
3373 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3374 void ***cleanup = (void***)&(*dhc)[1];
3376 /* If there are any cleanups in the chain, run them now. */
3380 void **buf = (void**)store;
3385 #ifdef DONT_USE_BUILTIN_SETJMP
3386 if (! setjmp (&buf[2]))
3388 if (! __builtin_setjmp (&buf[2]))
3394 func = (void(*)(void*, int))cleanup[0][1];
3395 arg = (void*)cleanup[0][2];
3397 /* Update this before running the cleanup. */
3398 cleanup[0] = (void **)cleanup[0][0];
3411 /* Then we pop the top element off the dynamic handler chain. */
3412 *dhc = (void**)(*dhc)[0];
3417 /* Support code for all exception region-based exception handling. */
3420 __eh_rtime_match (void *rtime)
3423 __eh_matcher matcher;
3426 info = *(__get_eh_info ());
3427 matcher = ((__eh_info *)info)->match_function;
3430 #ifndef inhibit_libc
3431 fprintf (stderr, "Internal Compiler Bug: No runtime type matcher.");
3435 ret = (*matcher) (info, rtime, (void *)0);
3436 return (ret != NULL);
3439 /* This value identifies the place from which an exception is being
3442 #ifdef EH_TABLE_LOOKUP
3448 #ifdef DWARF2_UNWIND_INFO
3450 /* Return the table version of an exception descriptor */
3453 __get_eh_table_version (exception_descriptor *table)
3455 return table->lang.version;
3458 /* Return the originating table language of an exception descriptor */
3461 __get_eh_table_language (exception_descriptor *table)
3463 return table->lang.language;
3466 /* This routine takes a PC and a pointer to the exception region TABLE for
3467 its translation unit, and returns the address of the exception handler
3468 associated with the closest exception table handler entry associated
3469 with that PC, or 0 if there are no table entries the PC fits in.
3471 In the advent of a tie, we have to give the last entry, as it represents
3475 old_find_exception_handler (void *pc, old_exception_table *table)
3482 /* We can't do a binary search because the table isn't guaranteed
3483 to be sorted from function to function. */
3484 for (pos = 0; table[pos].start_region != (void *) -1; ++pos)
3486 if (table[pos].start_region <= pc && table[pos].end_region > pc)
3488 /* This can apply. Make sure it is at least as small as
3489 the previous best. */
3490 if (best == -1 || (table[pos].end_region <= table[best].end_region
3491 && table[pos].start_region >= table[best].start_region))
3494 /* But it is sorted by starting PC within a function. */
3495 else if (best >= 0 && table[pos].start_region > pc)
3499 return table[best].exception_handler;
3505 /* find_exception_handler finds the correct handler, if there is one, to
3506 handle an exception.
3507 returns a pointer to the handler which controlled should be transferred
3508 to, or NULL if there is nothing left.
3510 PC - pc where the exception originates. If this is a rethrow,
3511 then this starts out as a pointer to the exception table
3512 entry we wish to rethrow out of.
3513 TABLE - exception table for the current module.
3514 EH_INFO - eh info pointer for this exception.
3515 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3516 CLEANUP - returned flag indicating whether this is a cleanup handler.
3519 find_exception_handler (void *pc, exception_descriptor *table,
3520 __eh_info *eh_info, int rethrow, int *cleanup)
3523 void *retval = NULL;
3528 /* The new model assumed the table is sorted inner-most out so the
3529 first region we find which matches is the correct one */
3531 exception_table *tab = &(table->table[0]);
3533 /* Subtract 1 from the PC to avoid hitting the next region */
3536 /* pc is actually the region table entry to rethrow out of */
3537 pos = ((exception_table *) pc) - tab;
3538 pc = ((exception_table *) pc)->end_region - 1;
3540 /* The label is always on the LAST handler entry for a region,
3541 so we know the next entry is a different region, even if the
3542 addresses are the same. Make sure its not end of table tho. */
3543 if (tab[pos].start_region != (void *) -1)
3549 /* We can't do a binary search because the table is in inner-most
3550 to outermost address ranges within functions */
3551 for ( ; tab[pos].start_region != (void *) -1; pos++)
3553 if (tab[pos].start_region <= pc && tab[pos].end_region > pc)
3555 if (tab[pos].match_info)
3557 __eh_matcher matcher = eh_info->match_function;
3558 /* match info but no matcher is NOT a match */
3561 void *ret = (*matcher)((void *) eh_info,
3562 tab[pos].match_info, table);
3566 retval = tab[pos].exception_handler;
3575 retval = tab[pos].exception_handler;
3582 #endif /* DWARF2_UNWIND_INFO */
3583 #endif /* EH_TABLE_LOOKUP */
3585 #ifdef DWARF2_UNWIND_INFO
3586 /* Support code for exception handling using static unwind information. */
3590 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3591 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3592 avoid a warning about casting between int and pointer of different
3595 typedef int ptr_type __attribute__ ((mode (pointer)));
3597 #ifdef INCOMING_REGNO
3598 /* Is the saved value for register REG in frame UDATA stored in a register
3599 window in the previous frame? */
3601 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3602 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3603 compiled functions won't work with the frame-unwind stuff here.
3604 Perhaps the entireity of in_reg_window should be conditional on having
3605 seen a DW_CFA_GNU_window_save? */
3606 #define target_flags 0
3609 in_reg_window (int reg, frame_state *udata)
3611 if (udata->saved[reg] == REG_SAVED_REG)
3612 return INCOMING_REGNO (reg) == reg;
3613 if (udata->saved[reg] != REG_SAVED_OFFSET)
3616 #ifdef STACK_GROWS_DOWNWARD
3617 return udata->reg_or_offset[reg] > 0;
3619 return udata->reg_or_offset[reg] < 0;
3624 in_reg_window (int reg __attribute__ ((__unused__)),
3625 frame_state *udata __attribute__ ((__unused__)))
3629 #endif /* INCOMING_REGNO */
3631 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3632 frame called by UDATA or 0. */
3635 get_reg_addr (unsigned reg, frame_state *udata, frame_state *sub_udata)
3637 while (udata->saved[reg] == REG_SAVED_REG)
3639 reg = udata->reg_or_offset[reg];
3640 if (in_reg_window (reg, udata))
3646 if (udata->saved[reg] == REG_SAVED_OFFSET)
3647 return (word_type *)(udata->cfa + udata->reg_or_offset[reg]);
3652 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3653 frame called by UDATA or 0. */
3655 static inline void *
3656 get_reg (unsigned reg, frame_state *udata, frame_state *sub_udata)
3658 return (void *)(ptr_type) *get_reg_addr (reg, udata, sub_udata);
3661 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3664 put_reg (unsigned reg, void *val, frame_state *udata)
3666 *get_reg_addr (reg, udata, NULL) = (word_type)(ptr_type) val;
3669 /* Copy the saved value for register REG from frame UDATA to frame
3670 TARGET_UDATA. Unlike the previous two functions, this can handle
3671 registers that are not one word large. */
3674 copy_reg (unsigned reg, frame_state *udata, frame_state *target_udata)
3676 word_type *preg = get_reg_addr (reg, udata, NULL);
3677 word_type *ptreg = get_reg_addr (reg, target_udata, NULL);
3679 memcpy (ptreg, preg, dwarf_reg_size_table [reg]);
3682 /* Retrieve the return address for frame UDATA. */
3684 static inline void *
3685 get_return_addr (frame_state *udata, frame_state *sub_udata)
3687 return __builtin_extract_return_addr
3688 (get_reg (udata->retaddr_column, udata, sub_udata));
3691 /* Overwrite the return address for frame UDATA with VAL. */
3694 put_return_addr (void *val, frame_state *udata)
3696 val = __builtin_frob_return_addr (val);
3697 put_reg (udata->retaddr_column, val, udata);
3700 /* Given the current frame UDATA and its return address PC, return the
3701 information about the calling frame in CALLER_UDATA. */
3704 next_stack_level (void *pc, frame_state *udata, frame_state *caller_udata)
3706 caller_udata = __frame_state_for (pc, caller_udata);
3710 /* Now go back to our caller's stack frame. If our caller's CFA register
3711 was saved in our stack frame, restore it; otherwise, assume the CFA
3712 register is SP and restore it to our CFA value. */
3713 if (udata->saved[caller_udata->cfa_reg])
3714 caller_udata->cfa = get_reg (caller_udata->cfa_reg, udata, 0);
3716 caller_udata->cfa = udata->cfa;
3717 if (caller_udata->indirect)
3718 caller_udata->cfa = * (void **) ((unsigned char *)caller_udata->cfa
3719 + caller_udata->base_offset);
3720 caller_udata->cfa += caller_udata->cfa_offset;
3722 return caller_udata;
3725 /* Hook to call before __terminate if only cleanup handlers remain. */
3727 __unwinding_cleanup (void)
3731 /* throw_helper performs some of the common grunt work for a throw. This
3732 routine is called by throw and rethrows. This is pretty much split
3733 out from the old __throw routine. An addition has been added which allows
3734 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3735 but cleanups remaining. This allows a debugger to examine the state
3736 at which the throw was executed, before any cleanups, rather than
3737 at the terminate point after the stack has been unwound.
3739 EH is the current eh_context structure.
3740 PC is the address of the call to __throw.
3741 MY_UDATA is the unwind information for __throw.
3742 OFFSET_P is where we return the SP adjustment offset. */
3745 throw_helper (struct eh_context *eh, void *pc, frame_state *my_udata,
3748 frame_state ustruct2, *udata = &ustruct2;
3749 frame_state ustruct;
3750 frame_state *sub_udata = &ustruct;
3751 void *saved_pc = pc;
3753 void *handler_p = 0;
3755 frame_state saved_ustruct;
3758 int only_cleanup = 0;
3760 int saved_state = 0;
3762 __eh_info *eh_info = (__eh_info *)eh->info;
3764 /* Do we find a handler based on a re-throw PC? */
3765 if (eh->table_index != (void *) 0)
3768 memcpy (udata, my_udata, sizeof (*udata));
3770 handler = (void *) 0;
3773 frame_state *p = udata;
3774 udata = next_stack_level (pc, udata, sub_udata);
3777 /* If we couldn't find the next frame, we lose. */
3781 if (udata->eh_ptr == NULL)
3784 new_eh_model = (((exception_descriptor *)(udata->eh_ptr))->
3785 runtime_id_field == NEW_EH_RUNTIME);
3790 handler = find_exception_handler (eh->table_index, udata->eh_ptr,
3791 eh_info, 1, &cleanup);
3792 eh->table_index = (void *)0;
3796 handler = find_exception_handler (pc, udata->eh_ptr, eh_info,
3799 handler = old_find_exception_handler (pc, udata->eh_ptr);
3801 /* If we found one, we can stop searching, if its not a cleanup.
3802 for cleanups, we save the state, and keep looking. This allows
3803 us to call a debug hook if there are nothing but cleanups left. */
3810 saved_ustruct = *udata;
3811 handler_p = handler;
3824 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3825 hitting the beginning of the next region. */
3826 pc = get_return_addr (udata, sub_udata) - 1;
3831 udata = &saved_ustruct;
3832 handler = handler_p;
3835 __unwinding_cleanup ();
3838 /* If we haven't found a handler by now, this is an unhandled
3843 eh->handler_label = handler;
3845 args_size = udata->args_size;
3848 /* We found a handler in the throw context, no need to unwind. */
3854 /* Unwind all the frames between this one and the handler by copying
3855 their saved register values into our register save slots. */
3857 /* Remember the PC where we found the handler. */
3858 void *handler_pc = pc;
3860 /* Start from the throw context again. */
3862 memcpy (udata, my_udata, sizeof (*udata));
3864 while (pc != handler_pc)
3866 frame_state *p = udata;
3867 udata = next_stack_level (pc, udata, sub_udata);
3870 for (i = 0; i < DWARF_FRAME_REGISTERS; ++i)
3871 if (i != udata->retaddr_column && udata->saved[i])
3873 /* If you modify the saved value of the return address
3874 register on the SPARC, you modify the return address for
3875 your caller's frame. Don't do that here, as it will
3876 confuse get_return_addr. */
3877 if (in_reg_window (i, udata)
3878 && udata->saved[udata->retaddr_column] == REG_SAVED_REG
3879 && udata->reg_or_offset[udata->retaddr_column] == i)
3881 copy_reg (i, udata, my_udata);
3884 pc = get_return_addr (udata, sub_udata) - 1;
3887 /* But we do need to update the saved return address register from
3888 the last frame we unwind, or the handler frame will have the wrong
3890 if (udata->saved[udata->retaddr_column] == REG_SAVED_REG)
3892 i = udata->reg_or_offset[udata->retaddr_column];
3893 if (in_reg_window (i, udata))
3894 copy_reg (i, udata, my_udata);
3897 /* udata now refers to the frame called by the handler frame. */
3899 /* We adjust SP by the difference between __throw's CFA and the CFA for
3900 the frame called by the handler frame, because those CFAs correspond
3901 to the SP values at the two call sites. We need to further adjust by
3902 the args_size of the handler frame itself to get the handler frame's
3903 SP from before the args were pushed for that call. */
3904 #ifdef STACK_GROWS_DOWNWARD
3905 *offset_p = udata->cfa - my_udata->cfa + args_size;
3907 *offset_p = my_udata->cfa - udata->cfa - args_size;
3914 /* We first search for an exception handler, and if we don't find
3915 it, we call __terminate on the current stack frame so that we may
3916 use the debugger to walk the stack and understand why no handler
3919 If we find one, then we unwind the frames down to the one that
3920 has the handler and transfer control into the handler. */
3922 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
3927 struct eh_context *eh = (*get_eh_context) ();
3931 /* XXX maybe make my_ustruct static so we don't have to look it up for
3933 frame_state my_ustruct, *my_udata = &my_ustruct;
3935 /* This is required for C++ semantics. We must call terminate if we
3936 try and rethrow an exception, when there is no exception currently
3941 /* Start at our stack frame. */
3943 my_udata = __frame_state_for (&&label, my_udata);
3947 /* We need to get the value from the CFA register. */
3948 my_udata->cfa = __builtin_dwarf_cfa ();
3950 /* Do any necessary initialization to access arbitrary stack frames.
3951 On the SPARC, this means flushing the register windows. */
3952 __builtin_unwind_init ();
3954 /* Now reset pc to the right throw point. */
3955 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3957 handler = throw_helper (eh, pc, my_udata, &offset);
3961 __builtin_eh_return ((void *)eh, offset, handler);
3963 /* Epilogue: restore the handler frame's register values and return
3967 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
3970 __rethrow (void *index)
3972 struct eh_context *eh = (*get_eh_context) ();
3976 /* XXX maybe make my_ustruct static so we don't have to look it up for
3978 frame_state my_ustruct, *my_udata = &my_ustruct;
3980 /* This is required for C++ semantics. We must call terminate if we
3981 try and rethrow an exception, when there is no exception currently
3986 /* This is the table index we want to rethrow from. The value of
3987 the END_REGION label is used for the PC of the throw, and the
3988 search begins with the next table entry. */
3989 eh->table_index = index;
3991 /* Start at our stack frame. */
3993 my_udata = __frame_state_for (&&label, my_udata);
3997 /* We need to get the value from the CFA register. */
3998 my_udata->cfa = __builtin_dwarf_cfa ();
4000 /* Do any necessary initialization to access arbitrary stack frames.
4001 On the SPARC, this means flushing the register windows. */
4002 __builtin_unwind_init ();
4004 /* Now reset pc to the right throw point. */
4005 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
4007 handler = throw_helper (eh, pc, my_udata, &offset);
4011 __builtin_eh_return ((void *)eh, offset, handler);
4013 /* Epilogue: restore the handler frame's register values and return
4016 #endif /* DWARF2_UNWIND_INFO */
4018 #ifdef IA64_UNWIND_INFO
4021 /* Return handler to which we want to transfer control, NULL if we don't
4022 intend to handle this exception here. */
4024 __ia64_personality_v1 (void *pc, old_exception_table *table)
4031 for (pos = 0; table[pos].start_region != (void *) -1; ++pos)
4033 if (table[pos].start_region <= pc && table[pos].end_region > pc)
4035 /* This can apply. Make sure it is at least as small as
4036 the previous best. */
4037 if (best == -1 || (table[pos].end_region <= table[best].end_region
4038 && table[pos].start_region >= table[best].start_region))
4041 /* It is sorted by starting PC within a function. */
4042 else if (best >= 0 && table[pos].start_region > pc)
4046 return table[best].exception_handler;
4052 ia64_throw_helper (ia64_frame_state *throw_frame, ia64_frame_state *caller,
4053 void *throw_bsp, void *throw_sp)
4055 void *throw_pc = __builtin_return_address (0);
4056 unwind_info_ptr *info;
4057 void *pc, *handler = NULL;
4062 __builtin_ia64_flushrs (); /* Make the local register stacks available. */
4064 /* Start at our stack frame, get our state. */
4065 __build_ia64_frame_state (throw_pc, throw_frame, throw_bsp, throw_sp,
4068 /* Now we have to find the proper frame for pc, and see if there
4069 is a handler for it. if not, we keep going back frames until
4070 we do find one. Otherwise we call uncaught (). */
4073 memcpy (caller, throw_frame, sizeof (*caller));
4076 void *(*personality) ();
4080 /* We only care about the RP right now, so we dont need to keep
4081 any other information about a call frame right now. */
4082 pc = __get_real_reg_value (&caller->rp) - 1;
4083 bsp = __calc_caller_bsp ((long)__get_real_reg_value (&caller->pfs),
4085 info = __build_ia64_frame_state (pc, caller, bsp, caller->my_psp,
4088 /* If we couldn't find the next frame, we lose. */
4092 personality = __get_personality (info);
4093 /* TODO Haven't figured out how to actually load the personality address
4094 yet, so just always default to the one we expect for now. */
4095 if (personality != 0)
4096 personality = __ia64_personality_v1;
4097 eh_table = __get_except_table (info);
4098 /* If there is no personality routine, we'll keep unwinding. */
4100 /* Pass a segment relative PC address to the personality routine,
4101 because the unwind_info section uses segrel relocs. */
4102 handler = personality (pc - pc_base, eh_table);
4108 /* Handler is a segment relative address, so we must adjust it here. */
4109 handler += (long) pc_base;
4111 /* If we found a handler, we need to unwind the stack to that point.
4112 We do this by copying saved values from previous frames into the
4113 save slot for the throw_frame saved slots. when __throw returns,
4114 it'll pickup the correct values. */
4116 /* Start with where __throw saved things, and copy each saved register
4117 of each previous frame until we get to the one before we're
4118 throwing back to. */
4119 memcpy (caller, throw_frame, sizeof (*caller));
4120 for ( ; frame_count > 0; frame_count--)
4122 pc = __get_real_reg_value (&caller->rp) - 1;
4123 bsp = __calc_caller_bsp ((long)__get_real_reg_value (&caller->pfs),
4125 __build_ia64_frame_state (pc, caller, bsp, caller->my_psp, &pc_base);
4126 /* Any regs that were saved can be put in the throw frame now. */
4127 /* We don't want to copy any saved register from the
4128 target destination, but we do want to load up it's frame. */
4129 if (frame_count > 1)
4130 __copy_saved_reg_state (throw_frame, caller);
4133 /* Set return address of the throw frame to the handler. */
4134 __set_real_reg_value (&throw_frame->rp, handler);
4136 /* TODO, do we need to do anything to make the values we wrote 'stick'? */
4137 /* DO we need to go through the whole loadrs seqeunce? */
4144 register void *stack_pointer __asm__("r12");
4145 struct eh_context *eh = (*get_eh_context) ();
4146 ia64_frame_state my_frame;
4147 ia64_frame_state originator; /* For the context handler is in. */
4148 void *bsp, *tmp_bsp;
4151 /* This is required for C++ semantics. We must call terminate if we
4152 try and rethrow an exception, when there is no exception currently
4157 __builtin_unwind_init ();
4159 /* We have to call another routine to actually process the frame
4160 information, which will force all of __throw's local registers into
4163 /* Get the value of ar.bsp while we're here. */
4165 bsp = __builtin_ia64_bsp ();
4166 ia64_throw_helper (&my_frame, &originator, bsp, stack_pointer);
4168 /* Now we have to fudge the bsp by the amount in our (__throw)
4169 frame marker, since the return is going to adjust it by that much. */
4171 tmp_bsp = __calc_caller_bsp ((long)__get_real_reg_value (&my_frame.pfs),
4173 offset = (char *)my_frame.my_bsp - (char *)tmp_bsp;
4174 tmp_bsp = (char *)originator.my_bsp + offset;
4176 __builtin_eh_return (tmp_bsp, offset, originator.my_sp);
4178 /* The return address was already set by throw_helper. */
4181 #endif /* IA64_UNWIND_INFO */