1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 92, 93, 94, 95, 96, 97, 98, 1999, 2000
4 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GNU CC is distributed in the hope that it will be useful,
23 but WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 GNU General Public License for more details.
27 You should have received a copy of the GNU General Public License
28 along with GNU CC; see the file COPYING. If not, write to
29 the Free Software Foundation, 59 Temple Place - Suite 330,
30 Boston, MA 02111-1307, USA. */
32 /* It is incorrect to include config.h here, because this file is being
33 compiled for the target, and hence definitions concerning only the host
42 /* Don't use `fancy_abort' here even if config.h says to use it. */
49 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
50 #if defined (L_divdi3) || defined (L_moddi3)
62 w.s.high = -uu.s.high - ((UWtype) w.s.low > 0);
70 __addvsi3 (SItype a, SItype b)
76 if (b >= 0 ? w < a : w > a)
85 __addvdi3 (DItype a, DItype b)
91 if (b >= 0 ? w < a : w > a)
100 __subvsi3 (SItype a, SItype b)
103 return __addvsi3 (a, (-b));
109 if (b >= 0 ? w > a : w < a)
119 __subvdi3 (DItype a, DItype b)
128 if (b >= 0 ? w > a : w < a)
138 __mulvsi3 (SItype a, SItype b)
144 if (((u >= 0) == (v >= 0)) ? w < 0 : w > 0)
159 if (a >= 0 ? w > 0 : w < 0)
174 if (a >= 0 ? w > 0 : w < 0)
223 __mulvdi3 (DItype u, DItype v)
229 if (((u >= 0) == (v >= 0)) ? w < 0 : w > 0)
237 /* Unless shift functions are defined whith full ANSI prototypes,
238 parameter b will be promoted to int if word_type is smaller than an int. */
241 __lshrdi3 (DWtype u, word_type b)
252 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
256 w.s.low = (UWtype) uu.s.high >> -bm;
260 UWtype carries = (UWtype) uu.s.high << bm;
262 w.s.high = (UWtype) uu.s.high >> b;
263 w.s.low = ((UWtype) uu.s.low >> b) | carries;
272 __ashldi3 (DWtype u, word_type b)
283 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
287 w.s.high = (UWtype) uu.s.low << -bm;
291 UWtype carries = (UWtype) uu.s.low >> bm;
293 w.s.low = (UWtype) uu.s.low << b;
294 w.s.high = ((UWtype) uu.s.high << b) | carries;
303 __ashrdi3 (DWtype u, word_type b)
314 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
317 /* w.s.high = 1..1 or 0..0 */
318 w.s.high = uu.s.high >> (sizeof (Wtype) * BITS_PER_UNIT - 1);
319 w.s.low = uu.s.high >> -bm;
323 UWtype carries = (UWtype) uu.s.high << bm;
325 w.s.high = uu.s.high >> b;
326 w.s.low = ((UWtype) uu.s.low >> b) | carries;
338 UWtype word, count, add;
342 word = uu.s.low, add = 0;
343 else if (uu.s.high != 0)
344 word = uu.s.high, add = BITS_PER_UNIT * sizeof (Wtype);
348 count_trailing_zeros (count, word);
349 return count + add + 1;
355 __muldi3 (DWtype u, DWtype v)
363 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
364 w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
365 + (UWtype) uu.s.high * (UWtype) vv.s.low);
372 #if defined (sdiv_qrnnd)
374 __udiv_w_sdiv (UWtype *rp, UWtype a1, UWtype a0, UWtype d)
381 if (a1 < d - a1 - (a0 >> (W_TYPE_SIZE - 1)))
383 /* dividend, divisor, and quotient are nonnegative */
384 sdiv_qrnnd (q, r, a1, a0, d);
388 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
389 sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (W_TYPE_SIZE - 1));
390 /* Divide (c1*2^32 + c0) by d */
391 sdiv_qrnnd (q, r, c1, c0, d);
392 /* Add 2^31 to quotient */
393 q += (UWtype) 1 << (W_TYPE_SIZE - 1);
398 b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
399 c1 = a1 >> 1; /* A/2 */
400 c0 = (a1 << (W_TYPE_SIZE - 1)) + (a0 >> 1);
402 if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
404 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
406 r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
423 else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
426 c0 = ~c0; /* logical NOT */
428 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
430 q = ~q; /* (A/2)/b1 */
433 r = 2*r + (a0 & 1); /* A/(2*b1) */
451 else /* Implies c1 = b1 */
452 { /* Hence a1 = d - 1 = 2*b1 - 1 */
470 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
472 __udiv_w_sdiv (UWtype *rp __attribute__ ((__unused__)),
473 UWtype a1 __attribute__ ((__unused__)),
474 UWtype a0 __attribute__ ((__unused__)),
475 UWtype d __attribute__ ((__unused__)))
482 #if (defined (L_udivdi3) || defined (L_divdi3) || \
483 defined (L_umoddi3) || defined (L_moddi3))
488 const UQItype __clz_tab[] =
490 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
491 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
492 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
493 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
494 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
495 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
496 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
497 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
503 #if (defined (L_udivdi3) || defined (L_divdi3) || \
504 defined (L_umoddi3) || defined (L_moddi3))
508 __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
513 UWtype d0, d1, n0, n1, n2;
525 #if !UDIV_NEEDS_NORMALIZATION
532 udiv_qrnnd (q0, n0, n1, n0, d0);
535 /* Remainder in n0. */
542 d0 = 1 / d0; /* Divide intentionally by zero. */
544 udiv_qrnnd (q1, n1, 0, n1, d0);
545 udiv_qrnnd (q0, n0, n1, n0, d0);
547 /* Remainder in n0. */
558 #else /* UDIV_NEEDS_NORMALIZATION */
566 count_leading_zeros (bm, d0);
570 /* Normalize, i.e. make the most significant bit of the
574 n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
578 udiv_qrnnd (q0, n0, n1, n0, d0);
581 /* Remainder in n0 >> bm. */
588 d0 = 1 / d0; /* Divide intentionally by zero. */
590 count_leading_zeros (bm, d0);
594 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
595 conclude (the most significant bit of n1 is set) /\ (the
596 leading quotient digit q1 = 1).
598 This special case is necessary, not an optimization.
599 (Shifts counts of W_TYPE_SIZE are undefined.) */
608 b = W_TYPE_SIZE - bm;
612 n1 = (n1 << bm) | (n0 >> b);
615 udiv_qrnnd (q1, n1, n2, n1, d0);
620 udiv_qrnnd (q0, n0, n1, n0, d0);
622 /* Remainder in n0 >> bm. */
632 #endif /* UDIV_NEEDS_NORMALIZATION */
643 /* Remainder in n1n0. */
655 count_leading_zeros (bm, d1);
658 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
659 conclude (the most significant bit of n1 is set) /\ (the
660 quotient digit q0 = 0 or 1).
662 This special case is necessary, not an optimization. */
664 /* The condition on the next line takes advantage of that
665 n1 >= d1 (true due to program flow). */
666 if (n1 > d1 || n0 >= d0)
669 sub_ddmmss (n1, n0, n1, n0, d1, d0);
688 b = W_TYPE_SIZE - bm;
690 d1 = (d1 << bm) | (d0 >> b);
693 n1 = (n1 << bm) | (n0 >> b);
696 udiv_qrnnd (q0, n1, n2, n1, d1);
697 umul_ppmm (m1, m0, q0, d0);
699 if (m1 > n1 || (m1 == n1 && m0 > n0))
702 sub_ddmmss (m1, m0, m1, m0, d1, d0);
707 /* Remainder in (n1n0 - m1m0) >> bm. */
710 sub_ddmmss (n1, n0, n1, n0, m1, m0);
711 rr.s.low = (n1 << b) | (n0 >> bm);
712 rr.s.high = n1 >> bm;
727 __divdi3 (DWtype u, DWtype v)
738 uu.ll = __negdi2 (uu.ll);
741 vv.ll = __negdi2 (vv.ll);
743 w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
753 __moddi3 (DWtype u, DWtype v)
764 uu.ll = __negdi2 (uu.ll);
766 vv.ll = __negdi2 (vv.ll);
768 (void) __udivmoddi4 (uu.ll, vv.ll, &w);
778 __umoddi3 (UDWtype u, UDWtype v)
782 (void) __udivmoddi4 (u, v, &w);
790 __udivdi3 (UDWtype n, UDWtype d)
792 return __udivmoddi4 (n, d, (UDWtype *) 0);
798 __cmpdi2 (DWtype a, DWtype b)
802 au.ll = a, bu.ll = b;
804 if (au.s.high < bu.s.high)
806 else if (au.s.high > bu.s.high)
808 if ((UWtype) au.s.low < (UWtype) bu.s.low)
810 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
818 __ucmpdi2 (DWtype a, DWtype b)
822 au.ll = a, bu.ll = b;
824 if ((UWtype) au.s.high < (UWtype) bu.s.high)
826 else if ((UWtype) au.s.high > (UWtype) bu.s.high)
828 if ((UWtype) au.s.low < (UWtype) bu.s.low)
830 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
836 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
837 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
838 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
841 __fixunstfDI (TFtype a)
849 /* Compute high word of result, as a flonum. */
850 b = (a / HIGH_WORD_COEFF);
851 /* Convert that to fixed (but not to DWtype!),
852 and shift it into the high word. */
855 /* Remove high part from the TFtype, leaving the low part as flonum. */
857 /* Convert that to fixed (but not to DWtype!) and add it in.
858 Sometimes A comes out negative. This is significant, since
859 A has more bits than a long int does. */
868 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
873 return - __fixunstfDI (-a);
874 return __fixunstfDI (a);
878 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
879 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
880 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
883 __fixunsxfDI (XFtype a)
891 /* Compute high word of result, as a flonum. */
892 b = (a / HIGH_WORD_COEFF);
893 /* Convert that to fixed (but not to DWtype!),
894 and shift it into the high word. */
897 /* Remove high part from the XFtype, leaving the low part as flonum. */
899 /* Convert that to fixed (but not to DWtype!) and add it in.
900 Sometimes A comes out negative. This is significant, since
901 A has more bits than a long int does. */
910 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
915 return - __fixunsxfDI (-a);
916 return __fixunsxfDI (a);
921 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
922 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
925 __fixunsdfDI (DFtype a)
933 /* Compute high word of result, as a flonum. */
934 b = (a / HIGH_WORD_COEFF);
935 /* Convert that to fixed (but not to DWtype!),
936 and shift it into the high word. */
939 /* Remove high part from the DFtype, leaving the low part as flonum. */
941 /* Convert that to fixed (but not to DWtype!) and add it in.
942 Sometimes A comes out negative. This is significant, since
943 A has more bits than a long int does. */
957 return - __fixunsdfDI (-a);
958 return __fixunsdfDI (a);
963 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
964 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
967 __fixunssfDI (SFtype original_a)
969 /* Convert the SFtype to a DFtype, because that is surely not going
970 to lose any bits. Some day someone else can write a faster version
971 that avoids converting to DFtype, and verify it really works right. */
972 DFtype a = original_a;
979 /* Compute high word of result, as a flonum. */
980 b = (a / HIGH_WORD_COEFF);
981 /* Convert that to fixed (but not to DWtype!),
982 and shift it into the high word. */
985 /* Remove high part from the DFtype, leaving the low part as flonum. */
987 /* Convert that to fixed (but not to DWtype!) and add it in.
988 Sometimes A comes out negative. This is significant, since
989 A has more bits than a long int does. */
1000 __fixsfdi (SFtype a)
1003 return - __fixunssfDI (-a);
1004 return __fixunssfDI (a);
1008 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
1009 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1010 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1011 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1014 __floatdixf (DWtype u)
1018 d = (Wtype) (u >> WORD_SIZE);
1019 d *= HIGH_HALFWORD_COEFF;
1020 d *= HIGH_HALFWORD_COEFF;
1021 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
1027 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
1028 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1029 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1030 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1033 __floatditf (DWtype u)
1037 d = (Wtype) (u >> WORD_SIZE);
1038 d *= HIGH_HALFWORD_COEFF;
1039 d *= HIGH_HALFWORD_COEFF;
1040 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
1047 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1048 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1049 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1052 __floatdidf (DWtype u)
1056 d = (Wtype) (u >> WORD_SIZE);
1057 d *= HIGH_HALFWORD_COEFF;
1058 d *= HIGH_HALFWORD_COEFF;
1059 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
1066 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1067 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1068 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1069 #define DI_SIZE (sizeof (DWtype) * BITS_PER_UNIT)
1071 /* Define codes for all the float formats that we know of. Note
1072 that this is copied from real.h. */
1074 #define UNKNOWN_FLOAT_FORMAT 0
1075 #define IEEE_FLOAT_FORMAT 1
1076 #define VAX_FLOAT_FORMAT 2
1077 #define IBM_FLOAT_FORMAT 3
1079 /* Default to IEEE float if not specified. Nearly all machines use it. */
1080 #ifndef HOST_FLOAT_FORMAT
1081 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
1084 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1089 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
1094 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
1100 __floatdisf (DWtype u)
1102 /* Do the calculation in DFmode
1103 so that we don't lose any of the precision of the high word
1104 while multiplying it. */
1107 /* Protect against double-rounding error.
1108 Represent any low-order bits, that might be truncated in DFmode,
1109 by a bit that won't be lost. The bit can go in anywhere below the
1110 rounding position of the SFmode. A fixed mask and bit position
1111 handles all usual configurations. It doesn't handle the case
1112 of 128-bit DImode, however. */
1113 if (DF_SIZE < DI_SIZE
1114 && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE))
1116 #define REP_BIT ((UDWtype) 1 << (DI_SIZE - DF_SIZE))
1117 if (! (- ((DWtype) 1 << DF_SIZE) < u
1118 && u < ((DWtype) 1 << DF_SIZE)))
1120 if ((UDWtype) u & (REP_BIT - 1))
1124 f = (Wtype) (u >> WORD_SIZE);
1125 f *= HIGH_HALFWORD_COEFF;
1126 f *= HIGH_HALFWORD_COEFF;
1127 f += (UWtype) (u & (HIGH_WORD_COEFF - 1));
1133 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
1134 /* Reenable the normal types, in case limits.h needs them. */
1147 __fixunsxfSI (XFtype a)
1149 if (a >= - (DFtype) LONG_MIN)
1150 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1156 /* Reenable the normal types, in case limits.h needs them. */
1169 __fixunsdfSI (DFtype a)
1171 if (a >= - (DFtype) LONG_MIN)
1172 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1178 /* Reenable the normal types, in case limits.h needs them. */
1191 __fixunssfSI (SFtype a)
1193 if (a >= - (SFtype) LONG_MIN)
1194 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1199 /* From here on down, the routines use normal data types. */
1201 #define SItype bogus_type
1202 #define USItype bogus_type
1203 #define DItype bogus_type
1204 #define UDItype bogus_type
1205 #define SFtype bogus_type
1206 #define DFtype bogus_type
1224 /* Like bcmp except the sign is meaningful.
1225 Result is negative if S1 is less than S2,
1226 positive if S1 is greater, 0 if S1 and S2 are equal. */
1229 __gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
1233 unsigned char c1 = *s1++, c2 = *s2++;
1250 #if defined(__svr4__) || defined(__alliant__)
1254 /* The Alliant needs the added underscore. */
1255 asm (".globl __builtin_saveregs");
1256 asm ("__builtin_saveregs:");
1257 asm (".globl ___builtin_saveregs");
1258 asm ("___builtin_saveregs:");
1260 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1261 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1262 area and also for a new va_list
1264 /* Save all argument registers in the arg reg save area. The
1265 arg reg save area must have the following layout (according
1277 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1278 asm (" fst.q %f12,16(%sp)");
1280 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1281 asm (" st.l %r17,36(%sp)");
1282 asm (" st.l %r18,40(%sp)");
1283 asm (" st.l %r19,44(%sp)");
1284 asm (" st.l %r20,48(%sp)");
1285 asm (" st.l %r21,52(%sp)");
1286 asm (" st.l %r22,56(%sp)");
1287 asm (" st.l %r23,60(%sp)");
1288 asm (" st.l %r24,64(%sp)");
1289 asm (" st.l %r25,68(%sp)");
1290 asm (" st.l %r26,72(%sp)");
1291 asm (" st.l %r27,76(%sp)");
1293 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1294 va_list structure. Put in into
1295 r16 so that it will be returned
1298 /* Initialize all fields of the new va_list structure. This
1299 structure looks like:
1302 unsigned long ireg_used;
1303 unsigned long freg_used;
1309 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1310 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1311 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1312 asm (" bri %r1"); /* delayed return */
1313 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1315 #else /* not __svr4__ */
1316 #if defined(__PARAGON__)
1318 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1319 * and we stand a better chance of hooking into libraries
1320 * compiled by PGI. [andyp@ssd.intel.com]
1324 asm (".globl __builtin_saveregs");
1325 asm ("__builtin_saveregs:");
1326 asm (".globl ___builtin_saveregs");
1327 asm ("___builtin_saveregs:");
1329 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1330 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1331 area and also for a new va_list
1333 /* Save all argument registers in the arg reg save area. The
1334 arg reg save area must have the following layout (according
1346 asm (" fst.q f8, 0(sp)");
1347 asm (" fst.q f12,16(sp)");
1348 asm (" st.l r16,32(sp)");
1349 asm (" st.l r17,36(sp)");
1350 asm (" st.l r18,40(sp)");
1351 asm (" st.l r19,44(sp)");
1352 asm (" st.l r20,48(sp)");
1353 asm (" st.l r21,52(sp)");
1354 asm (" st.l r22,56(sp)");
1355 asm (" st.l r23,60(sp)");
1356 asm (" st.l r24,64(sp)");
1357 asm (" st.l r25,68(sp)");
1358 asm (" st.l r26,72(sp)");
1359 asm (" st.l r27,76(sp)");
1361 asm (" adds 80,sp,r16"); /* compute the address of the new
1362 va_list structure. Put in into
1363 r16 so that it will be returned
1366 /* Initialize all fields of the new va_list structure. This
1367 structure looks like:
1370 unsigned long ireg_used;
1371 unsigned long freg_used;
1377 asm (" st.l r0, 0(r16)"); /* nfixed */
1378 asm (" st.l r0, 4(r16)"); /* nfloating */
1379 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1380 asm (" bri r1"); /* delayed return */
1381 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1382 #else /* not __PARAGON__ */
1386 asm (".globl ___builtin_saveregs");
1387 asm ("___builtin_saveregs:");
1388 asm (" mov sp,r30");
1389 asm (" andnot 0x0f,sp,sp");
1390 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1392 /* Fill in the __va_struct. */
1393 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1394 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1395 asm (" st.l r18, 8(sp)");
1396 asm (" st.l r19,12(sp)");
1397 asm (" st.l r20,16(sp)");
1398 asm (" st.l r21,20(sp)");
1399 asm (" st.l r22,24(sp)");
1400 asm (" st.l r23,28(sp)");
1401 asm (" st.l r24,32(sp)");
1402 asm (" st.l r25,36(sp)");
1403 asm (" st.l r26,40(sp)");
1404 asm (" st.l r27,44(sp)");
1406 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1407 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1409 /* Fill in the __va_ctl. */
1410 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1411 asm (" st.l r28,84(sp)"); /* pointer to more args */
1412 asm (" st.l r0, 88(sp)"); /* nfixed */
1413 asm (" st.l r0, 92(sp)"); /* nfloating */
1415 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1417 asm (" mov r30,sp");
1418 /* recover stack and pass address to start
1420 #endif /* not __PARAGON__ */
1421 #endif /* not __svr4__ */
1422 #else /* not __i860__ */
1424 asm (".global __builtin_saveregs");
1425 asm ("__builtin_saveregs:");
1426 asm (".global ___builtin_saveregs");
1427 asm ("___builtin_saveregs:");
1428 #ifdef NEED_PROC_COMMAND
1431 asm ("st %i0,[%fp+68]");
1432 asm ("st %i1,[%fp+72]");
1433 asm ("st %i2,[%fp+76]");
1434 asm ("st %i3,[%fp+80]");
1435 asm ("st %i4,[%fp+84]");
1437 asm ("st %i5,[%fp+88]");
1438 #ifdef NEED_TYPE_COMMAND
1439 asm (".type __builtin_saveregs,#function");
1440 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1442 #else /* not __sparc__ */
1443 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1447 asm (" .set nomips16");
1449 asm (" .ent __builtin_saveregs");
1450 asm (" .globl __builtin_saveregs");
1451 asm ("__builtin_saveregs:");
1452 asm (" sw $4,0($30)");
1453 asm (" sw $5,4($30)");
1454 asm (" sw $6,8($30)");
1455 asm (" sw $7,12($30)");
1457 asm (" .end __builtin_saveregs");
1458 #else /* not __mips__, etc. */
1460 void * __attribute__ ((__noreturn__))
1461 __builtin_saveregs (void)
1466 #endif /* not __mips__ */
1467 #endif /* not __sparc__ */
1468 #endif /* not __i860__ */
1472 #ifndef inhibit_libc
1474 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1476 /* This is used by the `assert' macro. */
1478 __eprintf (const char *string, const char *expression,
1479 unsigned int line, const char *filename)
1481 fprintf (stderr, string, expression, line, filename);
1491 /* Structure emitted by -a */
1495 const char *filename;
1499 const unsigned long *addresses;
1501 /* Older GCC's did not emit these fields. */
1503 const char **functions;
1504 const long *line_nums;
1505 const char **filenames;
1509 #ifdef BLOCK_PROFILER_CODE
1512 #ifndef inhibit_libc
1514 /* Simple minded basic block profiling output dumper for
1515 systems that don't provide tcov support. At present,
1516 it requires atexit and stdio. */
1518 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1522 #include "gbl-ctors.h"
1523 #include "gcov-io.h"
1525 #ifdef TARGET_HAS_F_SETLKW
1530 static struct bb *bb_head;
1532 static int num_digits (long value, int base) __attribute__ ((const));
1534 /* Return the number of digits needed to print a value */
1535 /* __inline__ */ static int num_digits (long value, int base)
1537 int minus = (value < 0 && base != 16);
1538 unsigned long v = (minus) ? -value : value;
1552 __bb_exit_func (void)
1554 FILE *da_file, *file;
1561 i = strlen (bb_head->filename) - 3;
1563 if (!strcmp (bb_head->filename+i, ".da"))
1565 /* Must be -fprofile-arcs not -a.
1566 Dump data in a form that gcov expects. */
1570 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1574 /* Make sure the output file exists -
1575 but don't clobber exiting data. */
1576 if ((da_file = fopen (ptr->filename, "a")) != 0)
1579 /* Need to re-open in order to be able to write from the start. */
1580 da_file = fopen (ptr->filename, "r+b");
1581 /* Some old systems might not allow the 'b' mode modifier.
1582 Therefore, try to open without it. This can lead to a race
1583 condition so that when you delete and re-create the file, the
1584 file might be opened in text mode, but then, you shouldn't
1585 delete the file in the first place. */
1587 da_file = fopen (ptr->filename, "r+");
1590 fprintf (stderr, "arc profiling: Can't open output file %s.\n",
1595 /* After a fork, another process might try to read and/or write
1596 the same file simultanously. So if we can, lock the file to
1597 avoid race conditions. */
1598 #if defined (TARGET_HAS_F_SETLKW)
1600 struct flock s_flock;
1602 s_flock.l_type = F_WRLCK;
1603 s_flock.l_whence = SEEK_SET;
1604 s_flock.l_start = 0;
1606 s_flock.l_pid = getpid ();
1608 while (fcntl (fileno (da_file), F_SETLKW, &s_flock)
1613 /* If the file is not empty, and the number of counts in it is the
1614 same, then merge them in. */
1615 firstchar = fgetc (da_file);
1616 if (firstchar == EOF)
1618 if (ferror (da_file))
1620 fprintf (stderr, "arc profiling: Can't read output file ");
1621 perror (ptr->filename);
1628 if (ungetc (firstchar, da_file) == EOF)
1630 if (__read_long (&n_counts, da_file, 8) != 0)
1632 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1637 if (n_counts == ptr->ncounts)
1641 for (i = 0; i < n_counts; i++)
1645 if (__read_long (&v, da_file, 8) != 0)
1647 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1651 ptr->counts[i] += v;
1659 /* ??? Should first write a header to the file. Preferably, a 4 byte
1660 magic number, 4 bytes containing the time the program was
1661 compiled, 4 bytes containing the last modification time of the
1662 source file, and 4 bytes indicating the compiler options used.
1664 That way we can easily verify that the proper source/executable/
1665 data file combination is being used from gcov. */
1667 if (__write_long (ptr->ncounts, da_file, 8) != 0)
1670 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1676 long *count_ptr = ptr->counts;
1678 for (j = ptr->ncounts; j > 0; j--)
1680 if (__write_long (*count_ptr, da_file, 8) != 0)
1688 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1692 if (fclose (da_file) == EOF)
1693 fprintf (stderr, "arc profiling: Error closing output file %s.\n",
1700 /* Must be basic block profiling. Emit a human readable output file. */
1702 file = fopen ("bb.out", "a");
1711 /* This is somewhat type incorrect, but it avoids worrying about
1712 exactly where time.h is included from. It should be ok unless
1713 a void * differs from other pointer formats, or if sizeof (long)
1714 is < sizeof (time_t). It would be nice if we could assume the
1715 use of rationale standards here. */
1717 time ((void *) &time_value);
1718 fprintf (file, "Basic block profiling finished on %s\n", ctime ((void *) &time_value));
1720 /* We check the length field explicitly in order to allow compatibility
1721 with older GCC's which did not provide it. */
1723 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1726 int func_p = (ptr->nwords >= (long) sizeof (struct bb)
1727 && ptr->nwords <= 1000
1729 int line_p = (func_p && ptr->line_nums);
1730 int file_p = (func_p && ptr->filenames);
1731 int addr_p = (ptr->addresses != 0);
1732 long ncounts = ptr->ncounts;
1738 int blk_len = num_digits (ncounts, 10);
1743 fprintf (file, "File %s, %ld basic blocks \n\n",
1744 ptr->filename, ncounts);
1746 /* Get max values for each field. */
1747 for (i = 0; i < ncounts; i++)
1752 if (cnt_max < ptr->counts[i])
1753 cnt_max = ptr->counts[i];
1755 if (addr_p && (unsigned long) addr_max < ptr->addresses[i])
1756 addr_max = ptr->addresses[i];
1758 if (line_p && line_max < ptr->line_nums[i])
1759 line_max = ptr->line_nums[i];
1763 p = (ptr->functions[i]) ? (ptr->functions[i]) : "<none>";
1771 p = (ptr->filenames[i]) ? (ptr->filenames[i]) : "<none>";
1778 addr_len = num_digits (addr_max, 16);
1779 cnt_len = num_digits (cnt_max, 10);
1780 line_len = num_digits (line_max, 10);
1782 /* Now print out the basic block information. */
1783 for (i = 0; i < ncounts; i++)
1786 " Block #%*d: executed %*ld time(s)",
1788 cnt_len, ptr->counts[i]);
1791 fprintf (file, " address= 0x%.*lx", addr_len,
1795 fprintf (file, " function= %-*s", func_len,
1796 (ptr->functions[i]) ? ptr->functions[i] : "<none>");
1799 fprintf (file, " line= %*ld", line_len, ptr->line_nums[i]);
1802 fprintf (file, " file= %s",
1803 (ptr->filenames[i]) ? ptr->filenames[i] : "<none>");
1805 fprintf (file, "\n");
1808 fprintf (file, "\n");
1812 fprintf (file, "\n\n");
1818 __bb_init_func (struct bb *blocks)
1820 /* User is supposed to check whether the first word is non-0,
1821 but just in case.... */
1823 if (blocks->zero_word)
1826 /* Initialize destructor. */
1828 atexit (__bb_exit_func);
1830 /* Set up linked list. */
1831 blocks->zero_word = 1;
1832 blocks->next = bb_head;
1836 /* Called before fork or exec - write out profile information gathered so
1837 far and reset it to zero. This avoids duplication or loss of the
1838 profile information gathered so far. */
1840 __bb_fork_func (void)
1845 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1848 for (i = ptr->ncounts - 1; i >= 0; i--)
1853 #ifndef MACHINE_STATE_SAVE
1854 #define MACHINE_STATE_SAVE(ID)
1856 #ifndef MACHINE_STATE_RESTORE
1857 #define MACHINE_STATE_RESTORE(ID)
1860 /* Number of buckets in hashtable of basic block addresses. */
1862 #define BB_BUCKETS 311
1864 /* Maximum length of string in file bb.in. */
1866 #define BBINBUFSIZE 500
1870 struct bb_edge *next;
1871 unsigned long src_addr;
1872 unsigned long dst_addr;
1873 unsigned long count;
1878 TRACE_KEEP = 0, TRACE_ON = 1, TRACE_OFF = 2
1883 struct bb_func *next;
1886 enum bb_func_mode mode;
1889 /* This is the connection to the outside world.
1890 The BLOCK_PROFILER macro must set __bb.blocks
1891 and __bb.blockno. */
1894 unsigned long blockno;
1898 /* Vars to store addrs of source and destination basic blocks
1901 static unsigned long bb_src = 0;
1902 static unsigned long bb_dst = 0;
1904 static FILE *bb_tracefile = (FILE *) 0;
1905 static struct bb_edge **bb_hashbuckets = (struct bb_edge **) 0;
1906 static struct bb_func *bb_func_head = (struct bb_func *) 0;
1907 static unsigned long bb_callcount = 0;
1908 static int bb_mode = 0;
1910 static unsigned long *bb_stack = (unsigned long *) 0;
1911 static size_t bb_stacksize = 0;
1913 static int reported = 0;
1916 Always : Print execution frequencies of basic blocks
1918 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1919 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1920 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1921 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1926 /*#include <sys/types.h>*/
1927 #include <sys/stat.h>
1928 /*#include <malloc.h>*/
1930 /* Commands executed by gopen. */
1932 #define GOPENDECOMPRESS "gzip -cd "
1933 #define GOPENCOMPRESS "gzip -c >"
1935 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1936 If it does not compile, simply replace gopen by fopen and delete
1937 '.gz' from any first parameter to gopen. */
1940 gopen (char *fn, char *mode)
1948 if (mode[0] != 'r' && mode[0] != 'w')
1951 p = fn + strlen (fn)-1;
1952 use_gzip = ((p[-1] == '.' && (p[0] == 'Z' || p[0] == 'z'))
1953 || (p[-2] == '.' && p[-1] == 'g' && p[0] == 'z'));
1960 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1961 + sizeof (GOPENDECOMPRESS));
1962 strcpy (s, GOPENDECOMPRESS);
1963 strcpy (s + (sizeof (GOPENDECOMPRESS)-1), fn);
1964 f = popen (s, mode);
1972 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1973 + sizeof (GOPENCOMPRESS));
1974 strcpy (s, GOPENCOMPRESS);
1975 strcpy (s + (sizeof (GOPENCOMPRESS)-1), fn);
1976 if (!(f = popen (s, mode)))
1977 f = fopen (s, mode);
1984 return fopen (fn, mode);
1994 if (!fstat (fileno (f), &buf) && S_ISFIFO (buf.st_mode))
2002 #endif /* HAVE_POPEN */
2004 /* Called once per program. */
2007 __bb_exit_trace_func (void)
2009 FILE *file = fopen ("bb.out", "a");
2022 gclose (bb_tracefile);
2024 fclose (bb_tracefile);
2025 #endif /* HAVE_POPEN */
2028 /* Check functions in `bb.in'. */
2033 const struct bb_func *p;
2034 int printed_something = 0;
2038 /* This is somewhat type incorrect. */
2039 time ((void *) &time_value);
2041 for (p = bb_func_head; p != (struct bb_func *) 0; p = p->next)
2043 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
2045 if (!ptr->filename || (p->filename != (char *) 0 && strcmp (p->filename, ptr->filename)))
2047 for (blk = 0; blk < ptr->ncounts; blk++)
2049 if (!strcmp (p->funcname, ptr->functions[blk]))
2054 if (!printed_something)
2056 fprintf (file, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value));
2057 printed_something = 1;
2060 fprintf (file, "\tFunction %s", p->funcname);
2062 fprintf (file, " of file %s", p->filename);
2063 fprintf (file, "\n" );
2068 if (printed_something)
2069 fprintf (file, "\n");
2075 if (!bb_hashbuckets)
2079 fprintf (stderr, "Profiler: out of memory\n");
2089 unsigned long addr_max = 0;
2090 unsigned long cnt_max = 0;
2094 /* This is somewhat type incorrect, but it avoids worrying about
2095 exactly where time.h is included from. It should be ok unless
2096 a void * differs from other pointer formats, or if sizeof (long)
2097 is < sizeof (time_t). It would be nice if we could assume the
2098 use of rationale standards here. */
2100 time ((void *) &time_value);
2101 fprintf (file, "Basic block jump tracing");
2103 switch (bb_mode & 12)
2106 fprintf (file, " (with call)");
2110 /* Print nothing. */
2114 fprintf (file, " (with call & ret)");
2118 fprintf (file, " (with ret)");
2122 fprintf (file, " finished on %s\n", ctime ((void *) &time_value));
2124 for (i = 0; i < BB_BUCKETS; i++)
2126 struct bb_edge *bucket = bb_hashbuckets[i];
2127 for ( ; bucket; bucket = bucket->next )
2129 if (addr_max < bucket->src_addr)
2130 addr_max = bucket->src_addr;
2131 if (addr_max < bucket->dst_addr)
2132 addr_max = bucket->dst_addr;
2133 if (cnt_max < bucket->count)
2134 cnt_max = bucket->count;
2137 addr_len = num_digits (addr_max, 16);
2138 cnt_len = num_digits (cnt_max, 10);
2140 for ( i = 0; i < BB_BUCKETS; i++)
2142 struct bb_edge *bucket = bb_hashbuckets[i];
2143 for ( ; bucket; bucket = bucket->next )
2146 "Jump from block 0x%.*lx to block 0x%.*lx executed %*lu time(s)\n",
2147 addr_len, bucket->src_addr,
2148 addr_len, bucket->dst_addr,
2149 cnt_len, bucket->count);
2153 fprintf (file, "\n");
2161 /* Free allocated memory. */
2166 struct bb_func *old = f;
2169 if (old->funcname) free (old->funcname);
2170 if (old->filename) free (old->filename);
2181 for (i = 0; i < BB_BUCKETS; i++)
2183 struct bb_edge *old, *bucket = bb_hashbuckets[i];
2188 bucket = bucket->next;
2192 free (bb_hashbuckets);
2195 for (b = bb_head; b; b = b->next)
2196 if (b->flags) free (b->flags);
2199 /* Called once per program. */
2202 __bb_init_prg (void)
2205 char buf[BBINBUFSIZE];
2208 enum bb_func_mode m;
2211 /* Initialize destructor. */
2212 atexit (__bb_exit_func);
2214 if (!(file = fopen ("bb.in", "r")))
2217 while(fgets (buf, BBINBUFSIZE, file) != 0)
2233 if (!strcmp (p, "__bb_trace__"))
2235 else if (!strcmp (p, "__bb_jumps__"))
2237 else if (!strcmp (p, "__bb_hidecall__"))
2239 else if (!strcmp (p, "__bb_showret__"))
2243 struct bb_func *f = (struct bb_func *) malloc (sizeof (struct bb_func));
2247 f->next = bb_func_head;
2248 if ((pos = strchr (p, ':')))
2250 if (!(f->funcname = (char *) malloc (strlen (pos+1)+1)))
2252 strcpy (f->funcname, pos+1);
2254 if ((f->filename = (char *) malloc (l+1)))
2256 strncpy (f->filename, p, l);
2257 f->filename[l] = '\0';
2260 f->filename = (char *) 0;
2264 if (!(f->funcname = (char *) malloc (strlen (p)+1)))
2266 strcpy (f->funcname, p);
2267 f->filename = (char *) 0;
2279 bb_tracefile = gopen ("bbtrace.gz", "w");
2284 bb_tracefile = fopen ("bbtrace", "w");
2286 #endif /* HAVE_POPEN */
2290 bb_hashbuckets = (struct bb_edge **)
2291 malloc (BB_BUCKETS * sizeof (struct bb_edge *));
2293 /* Use a loop here rather than calling bzero to avoid having to
2294 conditionalize its existance. */
2295 for (i = 0; i < BB_BUCKETS; i++)
2296 bb_hashbuckets[i] = 0;
2302 bb_stack = (unsigned long *) malloc (bb_stacksize * sizeof (*bb_stack));
2305 /* Initialize destructor. */
2306 atexit (__bb_exit_trace_func);
2309 /* Called upon entering a basic block. */
2312 __bb_trace_func (void)
2314 struct bb_edge *bucket;
2316 MACHINE_STATE_SAVE("1")
2318 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2321 bb_dst = __bb.blocks->addresses[__bb.blockno];
2322 __bb.blocks->counts[__bb.blockno]++;
2326 fwrite (&bb_dst, sizeof (unsigned long), 1, bb_tracefile);
2331 struct bb_edge **startbucket, **oldnext;
2333 oldnext = startbucket
2334 = & bb_hashbuckets[ (((int) bb_src*8) ^ (int) bb_dst) % BB_BUCKETS ];
2335 bucket = *startbucket;
2337 for (bucket = *startbucket; bucket;
2338 oldnext = &(bucket->next), bucket = *oldnext)
2340 if (bucket->src_addr == bb_src
2341 && bucket->dst_addr == bb_dst)
2344 *oldnext = bucket->next;
2345 bucket->next = *startbucket;
2346 *startbucket = bucket;
2351 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2357 fprintf (stderr, "Profiler: out of memory\n");
2364 bucket->src_addr = bb_src;
2365 bucket->dst_addr = bb_dst;
2366 bucket->next = *startbucket;
2367 *startbucket = bucket;
2378 MACHINE_STATE_RESTORE("1")
2382 /* Called when returning from a function and `__bb_showret__' is set. */
2385 __bb_trace_func_ret (void)
2387 struct bb_edge *bucket;
2389 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2394 struct bb_edge **startbucket, **oldnext;
2396 oldnext = startbucket
2397 = & bb_hashbuckets[ (((int) bb_dst * 8) ^ (int) bb_src) % BB_BUCKETS ];
2398 bucket = *startbucket;
2400 for (bucket = *startbucket; bucket;
2401 oldnext = &(bucket->next), bucket = *oldnext)
2403 if (bucket->src_addr == bb_dst
2404 && bucket->dst_addr == bb_src)
2407 *oldnext = bucket->next;
2408 bucket->next = *startbucket;
2409 *startbucket = bucket;
2414 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2420 fprintf (stderr, "Profiler: out of memory\n");
2427 bucket->src_addr = bb_dst;
2428 bucket->dst_addr = bb_src;
2429 bucket->next = *startbucket;
2430 *startbucket = bucket;
2443 /* Called upon entering the first function of a file. */
2446 __bb_init_file (struct bb *blocks)
2449 const struct bb_func *p;
2450 long blk, ncounts = blocks->ncounts;
2451 const char **functions = blocks->functions;
2453 /* Set up linked list. */
2454 blocks->zero_word = 1;
2455 blocks->next = bb_head;
2460 || !(blocks->flags = (char *) malloc (sizeof (char) * blocks->ncounts)))
2463 for (blk = 0; blk < ncounts; blk++)
2464 blocks->flags[blk] = 0;
2466 for (blk = 0; blk < ncounts; blk++)
2468 for (p = bb_func_head; p; p = p->next)
2470 if (!strcmp (p->funcname, functions[blk])
2471 && (!p->filename || !strcmp (p->filename, blocks->filename)))
2473 blocks->flags[blk] |= p->mode;
2480 /* Called when exiting from a function. */
2483 __bb_trace_ret (void)
2486 MACHINE_STATE_SAVE("2")
2490 if ((bb_mode & 12) && bb_stacksize > bb_callcount)
2492 bb_src = bb_stack[bb_callcount];
2494 __bb_trace_func_ret ();
2500 MACHINE_STATE_RESTORE("2")
2504 /* Called when entering a function. */
2507 __bb_init_trace_func (struct bb *blocks, unsigned long blockno)
2509 static int trace_init = 0;
2511 MACHINE_STATE_SAVE("3")
2513 if (!blocks->zero_word)
2520 __bb_init_file (blocks);
2530 if (bb_callcount >= bb_stacksize)
2532 size_t newsize = bb_callcount + 100;
2534 bb_stack = (unsigned long *) realloc (bb_stack, newsize);
2539 fprintf (stderr, "Profiler: out of memory\n");
2543 goto stack_overflow;
2545 bb_stacksize = newsize;
2547 bb_stack[bb_callcount] = bb_src;
2558 else if (blocks->flags && (blocks->flags[blockno] & TRACE_ON))
2564 bb_stack[bb_callcount] = bb_src;
2567 MACHINE_STATE_RESTORE("3")
2570 #endif /* not inhibit_libc */
2571 #endif /* not BLOCK_PROFILER_CODE */
2575 unsigned int __shtab[] = {
2576 0x00000001, 0x00000002, 0x00000004, 0x00000008,
2577 0x00000010, 0x00000020, 0x00000040, 0x00000080,
2578 0x00000100, 0x00000200, 0x00000400, 0x00000800,
2579 0x00001000, 0x00002000, 0x00004000, 0x00008000,
2580 0x00010000, 0x00020000, 0x00040000, 0x00080000,
2581 0x00100000, 0x00200000, 0x00400000, 0x00800000,
2582 0x01000000, 0x02000000, 0x04000000, 0x08000000,
2583 0x10000000, 0x20000000, 0x40000000, 0x80000000
2587 #ifdef L_clear_cache
2588 /* Clear part of an instruction cache. */
2590 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2593 __clear_cache (char *beg __attribute__((__unused__)),
2594 char *end __attribute__((__unused__)))
2596 #ifdef CLEAR_INSN_CACHE
2597 CLEAR_INSN_CACHE (beg, end);
2599 #ifdef INSN_CACHE_SIZE
2600 static char array[INSN_CACHE_SIZE + INSN_CACHE_PLANE_SIZE + INSN_CACHE_LINE_WIDTH];
2601 static int initialized;
2605 typedef (*function_ptr) (void);
2607 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2608 /* It's cheaper to clear the whole cache.
2609 Put in a series of jump instructions so that calling the beginning
2610 of the cache will clear the whole thing. */
2614 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2615 & -INSN_CACHE_LINE_WIDTH);
2616 int end_ptr = ptr + INSN_CACHE_SIZE;
2618 while (ptr < end_ptr)
2620 *(INSTRUCTION_TYPE *)ptr
2621 = JUMP_AHEAD_INSTRUCTION + INSN_CACHE_LINE_WIDTH;
2622 ptr += INSN_CACHE_LINE_WIDTH;
2624 *(INSTRUCTION_TYPE *) (ptr - INSN_CACHE_LINE_WIDTH) = RETURN_INSTRUCTION;
2629 /* Call the beginning of the sequence. */
2630 (((function_ptr) (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2631 & -INSN_CACHE_LINE_WIDTH))
2634 #else /* Cache is large. */
2638 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2639 & -INSN_CACHE_LINE_WIDTH);
2641 while (ptr < (int) array + sizeof array)
2643 *(INSTRUCTION_TYPE *)ptr = RETURN_INSTRUCTION;
2644 ptr += INSN_CACHE_LINE_WIDTH;
2650 /* Find the location in array that occupies the same cache line as BEG. */
2652 offset = ((int) beg & -INSN_CACHE_LINE_WIDTH) & (INSN_CACHE_PLANE_SIZE - 1);
2653 start_addr = (((int) (array + INSN_CACHE_PLANE_SIZE - 1)
2654 & -INSN_CACHE_PLANE_SIZE)
2657 /* Compute the cache alignment of the place to stop clearing. */
2658 #if 0 /* This is not needed for gcc's purposes. */
2659 /* If the block to clear is bigger than a cache plane,
2660 we clear the entire cache, and OFFSET is already correct. */
2661 if (end < beg + INSN_CACHE_PLANE_SIZE)
2663 offset = (((int) (end + INSN_CACHE_LINE_WIDTH - 1)
2664 & -INSN_CACHE_LINE_WIDTH)
2665 & (INSN_CACHE_PLANE_SIZE - 1));
2667 #if INSN_CACHE_DEPTH > 1
2668 end_addr = (start_addr & -INSN_CACHE_PLANE_SIZE) + offset;
2669 if (end_addr <= start_addr)
2670 end_addr += INSN_CACHE_PLANE_SIZE;
2672 for (plane = 0; plane < INSN_CACHE_DEPTH; plane++)
2674 int addr = start_addr + plane * INSN_CACHE_PLANE_SIZE;
2675 int stop = end_addr + plane * INSN_CACHE_PLANE_SIZE;
2677 while (addr != stop)
2679 /* Call the return instruction at ADDR. */
2680 ((function_ptr) addr) ();
2682 addr += INSN_CACHE_LINE_WIDTH;
2685 #else /* just one plane */
2688 /* Call the return instruction at START_ADDR. */
2689 ((function_ptr) start_addr) ();
2691 start_addr += INSN_CACHE_LINE_WIDTH;
2693 while ((start_addr % INSN_CACHE_SIZE) != offset);
2694 #endif /* just one plane */
2695 #endif /* Cache is large */
2696 #endif /* Cache exists */
2697 #endif /* CLEAR_INSN_CACHE */
2700 #endif /* L_clear_cache */
2704 /* Jump to a trampoline, loading the static chain address. */
2706 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2719 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2723 mprotect (char *addr, int len, int prot)
2740 if (VirtualProtect (addr, len, np, &op))
2746 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2748 #ifdef TRANSFER_FROM_TRAMPOLINE
2749 TRANSFER_FROM_TRAMPOLINE
2752 #if defined (NeXT) && defined (__MACH__)
2754 /* Make stack executable so we can call trampolines on stack.
2755 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2759 #include <mach/mach.h>
2763 __enable_execute_stack (char *addr)
2766 char *eaddr = addr + TRAMPOLINE_SIZE;
2767 vm_address_t a = (vm_address_t) addr;
2769 /* turn on execute access on stack */
2770 r = vm_protect (task_self (), a, TRAMPOLINE_SIZE, FALSE, VM_PROT_ALL);
2771 if (r != KERN_SUCCESS)
2773 mach_error("vm_protect VM_PROT_ALL", r);
2777 /* We inline the i-cache invalidation for speed */
2779 #ifdef CLEAR_INSN_CACHE
2780 CLEAR_INSN_CACHE (addr, eaddr);
2782 __clear_cache ((int) addr, (int) eaddr);
2786 #endif /* defined (NeXT) && defined (__MACH__) */
2790 /* Make stack executable so we can call trampolines on stack.
2791 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2793 #include <sys/mman.h>
2794 #include <sys/vmparam.h>
2795 #include <machine/machparam.h>
2798 __enable_execute_stack (void)
2801 static unsigned lowest = USRSTACK;
2802 unsigned current = (unsigned) &fp & -NBPG;
2804 if (lowest > current)
2806 unsigned len = lowest - current;
2807 mremap (current, &len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE);
2811 /* Clear instruction cache in case an old trampoline is in it. */
2814 #endif /* __convex__ */
2818 /* Modified from the convex -code above. */
2820 #include <sys/param.h>
2822 #include <sys/m88kbcs.h>
2825 __enable_execute_stack (void)
2828 static unsigned long lowest = USRSTACK;
2829 unsigned long current = (unsigned long) &save_errno & -NBPC;
2831 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2832 address is seen as 'negative'. That is the case with the stack. */
2835 if (lowest > current)
2837 unsigned len=lowest-current;
2838 memctl(current,len,MCT_TEXT);
2842 memctl(current,NBPC,MCT_TEXT);
2846 #endif /* __sysV88__ */
2850 #include <sys/signal.h>
2853 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2854 so define it here, because we need it in __clear_insn_cache below */
2855 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2856 hence we enable this stuff only if MCT_TEXT is #define'd. */
2871 /* Clear instruction cache so we can call trampolines on stack.
2872 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2875 __clear_insn_cache (void)
2880 /* Preserve errno, because users would be surprised to have
2881 errno changing without explicitly calling any system-call. */
2884 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2885 No need to use an address derived from _start or %sp, as 0 works also. */
2886 memctl(0, 4096, MCT_TEXT);
2891 #endif /* __sysV68__ */
2895 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2897 #include <sys/mman.h>
2898 #include <sys/types.h>
2899 #include <sys/param.h>
2900 #include <sys/vmmac.h>
2902 /* Modified from the convex -code above.
2903 mremap promises to clear the i-cache. */
2906 __enable_execute_stack (void)
2909 if (mprotect (((unsigned int)&fp/PAGSIZ)*PAGSIZ, PAGSIZ,
2910 PROT_READ|PROT_WRITE|PROT_EXEC))
2912 perror ("mprotect in __enable_execute_stack");
2917 #endif /* __pyr__ */
2919 #if defined (sony_news) && defined (SYSTYPE_BSD)
2922 #include <sys/types.h>
2923 #include <sys/param.h>
2924 #include <syscall.h>
2925 #include <machine/sysnews.h>
2927 /* cacheflush function for NEWS-OS 4.2.
2928 This function is called from trampoline-initialize code
2929 defined in config/mips/mips.h. */
2932 cacheflush (char *beg, int size, int flag)
2934 if (syscall (SYS_sysnews, NEWS_CACHEFLUSH, beg, size, FLUSH_BCACHE))
2936 perror ("cache_flush");
2942 #endif /* sony_news */
2943 #endif /* L_trampoline */
2948 #include "gbl-ctors.h"
2949 /* Some systems use __main in a way incompatible with its use in gcc, in these
2950 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2951 give the same symbol without quotes for an alternative entry point. You
2952 must define both, or neither. */
2954 #define NAME__MAIN "__main"
2955 #define SYMBOL__MAIN __main
2958 #ifdef INIT_SECTION_ASM_OP
2959 #undef HAS_INIT_SECTION
2960 #define HAS_INIT_SECTION
2963 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2965 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
2966 code to run constructors. In that case, we need to handle EH here, too. */
2968 #ifdef EH_FRAME_SECTION
2970 extern unsigned char __EH_FRAME_BEGIN__[];
2973 /* Run all the global destructors on exit from the program. */
2976 __do_global_dtors (void)
2978 #ifdef DO_GLOBAL_DTORS_BODY
2979 DO_GLOBAL_DTORS_BODY;
2981 static func_ptr *p = __DTOR_LIST__ + 1;
2988 #if defined (EH_FRAME_SECTION) && !defined (HAS_INIT_SECTION)
2990 static int completed = 0;
2994 __deregister_frame_info (__EH_FRAME_BEGIN__);
3001 #ifndef HAS_INIT_SECTION
3002 /* Run all the global constructors on entry to the program. */
3005 __do_global_ctors (void)
3007 #ifdef EH_FRAME_SECTION
3009 static struct object object;
3010 __register_frame_info (__EH_FRAME_BEGIN__, &object);
3013 DO_GLOBAL_CTORS_BODY;
3014 atexit (__do_global_dtors);
3016 #endif /* no HAS_INIT_SECTION */
3018 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
3019 /* Subroutine called automatically by `main'.
3020 Compiling a global function named `main'
3021 produces an automatic call to this function at the beginning.
3023 For many systems, this routine calls __do_global_ctors.
3024 For systems which support a .init section we use the .init section
3025 to run __do_global_ctors, so we need not do anything here. */
3030 /* Support recursive calls to `main': run initializers just once. */
3031 static int initialized;
3035 __do_global_ctors ();
3038 #endif /* no HAS_INIT_SECTION or INVOKE__main */
3040 #endif /* L__main */
3041 #endif /* __CYGWIN__ */
3045 #include "gbl-ctors.h"
3047 /* Provide default definitions for the lists of constructors and
3048 destructors, so that we don't get linker errors. These symbols are
3049 intentionally bss symbols, so that gld and/or collect will provide
3050 the right values. */
3052 /* We declare the lists here with two elements each,
3053 so that they are valid empty lists if no other definition is loaded.
3055 If we are using the old "set" extensions to have the gnu linker
3056 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
3057 must be in the bss/common section.
3059 Long term no port should use those extensions. But many still do. */
3060 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
3061 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
3062 func_ptr __CTOR_LIST__[2] = {0, 0};
3063 func_ptr __DTOR_LIST__[2] = {0, 0};
3065 func_ptr __CTOR_LIST__[2];
3066 func_ptr __DTOR_LIST__[2];
3068 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
3069 #endif /* L_ctors */
3073 #include "gbl-ctors.h"
3081 static func_ptr *atexit_chain = 0;
3082 static long atexit_chain_length = 0;
3083 static volatile long last_atexit_chain_slot = -1;
3086 atexit (func_ptr func)
3088 if (++last_atexit_chain_slot == atexit_chain_length)
3090 atexit_chain_length += 32;
3092 atexit_chain = (func_ptr *) realloc (atexit_chain, atexit_chain_length
3093 * sizeof (func_ptr));
3095 atexit_chain = (func_ptr *) malloc (atexit_chain_length
3096 * sizeof (func_ptr));
3099 atexit_chain_length = 0;
3100 last_atexit_chain_slot = -1;
3105 atexit_chain[last_atexit_chain_slot] = func;
3109 extern void _cleanup (void);
3110 extern void _exit (int) __attribute__ ((__noreturn__));
3117 for ( ; last_atexit_chain_slot-- >= 0; )
3119 (*atexit_chain[last_atexit_chain_slot + 1]) ();
3120 atexit_chain[last_atexit_chain_slot + 1] = 0;
3122 free (atexit_chain);
3135 /* Simple; we just need a wrapper for ON_EXIT. */
3137 atexit (func_ptr func)
3139 return ON_EXIT (func);
3142 #endif /* ON_EXIT */
3143 #endif /* NEED_ATEXIT */
3151 /* Shared exception handling support routines. */
3154 __default_terminate (void)
3159 void (*__terminate_func)(void) __attribute__ ((__noreturn__)) =
3160 __default_terminate;
3162 void __attribute__((__noreturn__))
3165 (*__terminate_func)();
3169 __throw_type_match (void *catch_type, void *throw_type, void *obj)
3172 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3173 catch_type, throw_type);
3175 if (strcmp ((const char *)catch_type, (const char *)throw_type) == 0)
3186 /* Include definitions of EH context and table layout */
3188 #include "eh-common.h"
3189 #ifndef inhibit_libc
3193 /* Allocate and return a new EH context structure. */
3197 new_eh_context (void)
3199 struct eh_full_context {
3200 struct eh_context c;
3202 } *ehfc = (struct eh_full_context *) malloc (sizeof *ehfc);
3207 memset (ehfc, 0, sizeof *ehfc);
3209 ehfc->c.dynamic_handler_chain = (void **) ehfc->top_elt;
3211 /* This should optimize out entirely. This should always be true,
3212 but just in case it ever isn't, don't allow bogus code to be
3215 if ((void*)(&ehfc->c) != (void*)ehfc)
3221 static __gthread_key_t eh_context_key;
3223 /* Destructor for struct eh_context. */
3225 eh_context_free (void *ptr)
3227 __gthread_key_dtor (eh_context_key, ptr);
3233 /* Pointer to function to return EH context. */
3235 static struct eh_context *eh_context_initialize (void);
3236 static struct eh_context *eh_context_static (void);
3238 static struct eh_context *eh_context_specific (void);
3241 static struct eh_context *(*get_eh_context) (void) = &eh_context_initialize;
3243 /* Routine to get EH context.
3244 This one will simply call the function pointer. */
3247 __get_eh_context (void)
3249 return (void *) (*get_eh_context) ();
3252 /* Get and set the language specific info pointer. */
3255 __get_eh_info (void)
3257 struct eh_context *eh = (*get_eh_context) ();
3261 #ifdef DWARF2_UNWIND_INFO
3262 static int dwarf_reg_size_table_initialized = 0;
3263 static char dwarf_reg_size_table[DWARF_FRAME_REGISTERS];
3266 init_reg_size_table (void)
3268 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table);
3269 dwarf_reg_size_table_initialized = 1;
3275 eh_threads_initialize (void)
3277 /* Try to create the key. If it fails, revert to static method,
3278 otherwise start using thread specific EH contexts. */
3279 if (__gthread_key_create (&eh_context_key, &eh_context_free) == 0)
3280 get_eh_context = &eh_context_specific;
3282 get_eh_context = &eh_context_static;
3284 #endif /* no __GTHREADS */
3286 /* Initialize EH context.
3287 This will be called only once, since we change GET_EH_CONTEXT
3288 pointer to another routine. */
3290 static struct eh_context *
3291 eh_context_initialize (void)
3295 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
3296 /* Make sure that get_eh_context does not point to us anymore.
3297 Some systems have dummy thread routines in their libc that
3298 return a success (Solaris 2.6 for example). */
3299 if (__gthread_once (&once, eh_threads_initialize) != 0
3300 || get_eh_context == &eh_context_initialize)
3302 /* Use static version of EH context. */
3303 get_eh_context = &eh_context_static;
3305 #ifdef DWARF2_UNWIND_INFO
3307 static __gthread_once_t once_regsizes = __GTHREAD_ONCE_INIT;
3308 if (__gthread_once (&once_regsizes, init_reg_size_table) != 0
3309 || ! dwarf_reg_size_table_initialized)
3310 init_reg_size_table ();
3314 #else /* no __GTHREADS */
3316 /* Use static version of EH context. */
3317 get_eh_context = &eh_context_static;
3319 #ifdef DWARF2_UNWIND_INFO
3320 init_reg_size_table ();
3323 #endif /* no __GTHREADS */
3325 return (*get_eh_context) ();
3328 /* Return a static EH context. */
3330 static struct eh_context *
3331 eh_context_static (void)
3333 static struct eh_context eh;
3334 static int initialized;
3335 static void *top_elt[2];
3340 memset (&eh, 0, sizeof eh);
3341 eh.dynamic_handler_chain = top_elt;
3347 /* Return a thread specific EH context. */
3349 static struct eh_context *
3350 eh_context_specific (void)
3352 struct eh_context *eh;
3353 eh = (struct eh_context *) __gthread_getspecific (eh_context_key);
3356 eh = new_eh_context ();
3357 if (__gthread_setspecific (eh_context_key, (void *) eh) != 0)
3363 #endif /* __GTHREADS */
3365 /* Support routines for alloc/free during exception handling */
3367 /* __eh_alloc and __eh_free attempt allocation using malloc, but fall back to
3368 the small arena in the eh_context. This is needed because throwing an
3369 out-of-memory exception would fail otherwise. The emergency space is
3370 allocated in blocks of size EH_ALLOC_ALIGN, the
3371 minimum allocation being two blocks. A bitmask indicates which blocks
3372 have been allocated. To indicate the size of an allocation, the bit for
3373 the final block is not set. Hence each allocation is a run of 1s followed
3376 __eh_alloc (size_t size)
3385 struct eh_context *eh = __get_eh_context ();
3386 unsigned blocks = (size + EH_ALLOC_ALIGN - 1) / EH_ALLOC_ALIGN;
3387 unsigned real_mask = eh->alloc_mask | (eh->alloc_mask << 1);
3391 if (blocks > EH_ALLOC_SIZE / EH_ALLOC_ALIGN)
3393 blocks += blocks == 1;
3394 our_mask = (1 << blocks) - 1;
3396 for (ix = EH_ALLOC_SIZE / EH_ALLOC_ALIGN - blocks; ix; ix--)
3397 if (! ((real_mask >> ix) & our_mask))
3399 /* found some space */
3400 p = &eh->alloc_buffer[ix * EH_ALLOC_ALIGN];
3401 eh->alloc_mask |= (our_mask >> 1) << ix;
3409 /* Free the memory for an cp_eh_info and associated exception, given
3410 a pointer to the cp_eh_info. */
3414 struct eh_context *eh = __get_eh_context ();
3416 ptrdiff_t diff = (char *)p - &eh->alloc_buffer[0];
3417 if (diff >= 0 && diff < EH_ALLOC_SIZE)
3419 unsigned mask = eh->alloc_mask;
3420 unsigned bit = 1 << (diff / EH_ALLOC_ALIGN);
3428 eh->alloc_mask = mask;
3434 /* Support routines for setjmp/longjmp exception handling. */
3436 /* Calls to __sjthrow are generated by the compiler when an exception
3437 is raised when using the setjmp/longjmp exception handling codegen
3440 #ifdef DONT_USE_BUILTIN_SETJMP
3441 extern void longjmp (void *, int);
3444 /* Routine to get the head of the current thread's dynamic handler chain
3445 use for exception handling. */
3448 __get_dynamic_handler_chain (void)
3450 struct eh_context *eh = (*get_eh_context) ();
3451 return &eh->dynamic_handler_chain;
3454 /* This is used to throw an exception when the setjmp/longjmp codegen
3455 method is used for exception handling.
3457 We call __terminate if there are no handlers left. Otherwise we run the
3458 cleanup actions off the dynamic cleanup stack, and pop the top of the
3459 dynamic handler chain, and use longjmp to transfer back to the associated
3465 struct eh_context *eh = (*get_eh_context) ();
3466 void ***dhc = &eh->dynamic_handler_chain;
3468 void (*func)(void *, int);
3470 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3471 void ***cleanup = (void***)&(*dhc)[1];
3473 /* If there are any cleanups in the chain, run them now. */
3477 void **buf = (void**)store;
3482 #ifdef DONT_USE_BUILTIN_SETJMP
3483 if (! setjmp (&buf[2]))
3485 if (! __builtin_setjmp (&buf[2]))
3491 func = (void(*)(void*, int))cleanup[0][1];
3492 arg = (void*)cleanup[0][2];
3494 /* Update this before running the cleanup. */
3495 cleanup[0] = (void **)cleanup[0][0];
3508 /* We must call terminate if we try and rethrow an exception, when
3509 there is no exception currently active and when there are no
3511 if (! eh->info || (*dhc)[0] == 0)
3514 /* Find the jmpbuf associated with the top element of the dynamic
3515 handler chain. The jumpbuf starts two words into the buffer. */
3516 jmpbuf = &(*dhc)[2];
3518 /* Then we pop the top element off the dynamic handler chain. */
3519 *dhc = (void**)(*dhc)[0];
3521 /* And then we jump to the handler. */
3523 #ifdef DONT_USE_BUILTIN_SETJMP
3524 longjmp (jmpbuf, 1);
3526 __builtin_longjmp (jmpbuf, 1);
3530 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3531 handler, then pop the handler off the dynamic handler stack, and
3532 then throw. This is used to skip the first handler, and transfer
3533 control to the next handler in the dynamic handler stack. */
3536 __sjpopnthrow (void)
3538 struct eh_context *eh = (*get_eh_context) ();
3539 void ***dhc = &eh->dynamic_handler_chain;
3540 void (*func)(void *, int);
3542 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3543 void ***cleanup = (void***)&(*dhc)[1];
3545 /* If there are any cleanups in the chain, run them now. */
3549 void **buf = (void**)store;
3554 #ifdef DONT_USE_BUILTIN_SETJMP
3555 if (! setjmp (&buf[2]))
3557 if (! __builtin_setjmp (&buf[2]))
3563 func = (void(*)(void*, int))cleanup[0][1];
3564 arg = (void*)cleanup[0][2];
3566 /* Update this before running the cleanup. */
3567 cleanup[0] = (void **)cleanup[0][0];
3580 /* Then we pop the top element off the dynamic handler chain. */
3581 *dhc = (void**)(*dhc)[0];
3586 /* Support code for all exception region-based exception handling. */
3589 __eh_rtime_match (void *rtime)
3592 __eh_matcher matcher;
3595 info = *(__get_eh_info ());
3596 matcher = ((__eh_info *)info)->match_function;
3599 #ifndef inhibit_libc
3600 fprintf (stderr, "Internal Compiler Bug: No runtime type matcher.");
3604 ret = (*matcher) (info, rtime, (void *)0);
3605 return (ret != NULL);
3608 /* This value identifies the place from which an exception is being
3611 #ifdef EH_TABLE_LOOKUP
3617 #ifdef DWARF2_UNWIND_INFO
3619 /* Return the table version of an exception descriptor */
3622 __get_eh_table_version (exception_descriptor *table)
3624 return table->lang.version;
3627 /* Return the originating table language of an exception descriptor */
3630 __get_eh_table_language (exception_descriptor *table)
3632 return table->lang.language;
3635 /* This routine takes a PC and a pointer to the exception region TABLE for
3636 its translation unit, and returns the address of the exception handler
3637 associated with the closest exception table handler entry associated
3638 with that PC, or 0 if there are no table entries the PC fits in.
3640 In the advent of a tie, we have to give the last entry, as it represents
3644 old_find_exception_handler (void *pc, old_exception_table *table)
3651 /* We can't do a binary search because the table isn't guaranteed
3652 to be sorted from function to function. */
3653 for (pos = 0; table[pos].start_region != (void *) -1; ++pos)
3655 if (table[pos].start_region <= pc && table[pos].end_region > pc)
3657 /* This can apply. Make sure it is at least as small as
3658 the previous best. */
3659 if (best == -1 || (table[pos].end_region <= table[best].end_region
3660 && table[pos].start_region >= table[best].start_region))
3663 /* But it is sorted by starting PC within a function. */
3664 else if (best >= 0 && table[pos].start_region > pc)
3668 return table[best].exception_handler;
3674 /* find_exception_handler finds the correct handler, if there is one, to
3675 handle an exception.
3676 returns a pointer to the handler which controlled should be transferred
3677 to, or NULL if there is nothing left.
3679 PC - pc where the exception originates. If this is a rethrow,
3680 then this starts out as a pointer to the exception table
3681 entry we wish to rethrow out of.
3682 TABLE - exception table for the current module.
3683 EH_INFO - eh info pointer for this exception.
3684 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3685 CLEANUP - returned flag indicating whether this is a cleanup handler.
3688 find_exception_handler (void *pc, exception_descriptor *table,
3689 __eh_info *eh_info, int rethrow, int *cleanup)
3692 void *retval = NULL;
3697 /* The new model assumed the table is sorted inner-most out so the
3698 first region we find which matches is the correct one */
3700 exception_table *tab = &(table->table[0]);
3702 /* Subtract 1 from the PC to avoid hitting the next region */
3705 /* pc is actually the region table entry to rethrow out of */
3706 pos = ((exception_table *) pc) - tab;
3707 pc = ((exception_table *) pc)->end_region - 1;
3709 /* The label is always on the LAST handler entry for a region,
3710 so we know the next entry is a different region, even if the
3711 addresses are the same. Make sure its not end of table tho. */
3712 if (tab[pos].start_region != (void *) -1)
3718 /* We can't do a binary search because the table is in inner-most
3719 to outermost address ranges within functions */
3720 for ( ; tab[pos].start_region != (void *) -1; pos++)
3722 if (tab[pos].start_region <= pc && tab[pos].end_region > pc)
3724 if (tab[pos].match_info)
3726 __eh_matcher matcher = eh_info->match_function;
3727 /* match info but no matcher is NOT a match */
3730 void *ret = (*matcher)((void *) eh_info,
3731 tab[pos].match_info, table);
3735 retval = tab[pos].exception_handler;
3744 retval = tab[pos].exception_handler;
3751 #endif /* DWARF2_UNWIND_INFO */
3752 #endif /* EH_TABLE_LOOKUP */
3754 #ifdef DWARF2_UNWIND_INFO
3755 /* Support code for exception handling using static unwind information. */
3759 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3760 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3761 avoid a warning about casting between int and pointer of different
3764 typedef int ptr_type __attribute__ ((mode (pointer)));
3766 #ifdef INCOMING_REGNO
3767 /* Is the saved value for register REG in frame UDATA stored in a register
3768 window in the previous frame? */
3770 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3771 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3772 compiled functions won't work with the frame-unwind stuff here.
3773 Perhaps the entireity of in_reg_window should be conditional on having
3774 seen a DW_CFA_GNU_window_save? */
3775 #define target_flags 0
3778 in_reg_window (int reg, frame_state *udata)
3780 if (udata->saved[reg] == REG_SAVED_REG)
3781 return INCOMING_REGNO (reg) == reg;
3782 if (udata->saved[reg] != REG_SAVED_OFFSET)
3785 #ifdef STACK_GROWS_DOWNWARD
3786 return udata->reg_or_offset[reg] > 0;
3788 return udata->reg_or_offset[reg] < 0;
3793 in_reg_window (int reg __attribute__ ((__unused__)),
3794 frame_state *udata __attribute__ ((__unused__)))
3798 #endif /* INCOMING_REGNO */
3800 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3801 frame called by UDATA or 0. */
3804 get_reg_addr (unsigned reg, frame_state *udata, frame_state *sub_udata)
3806 while (udata->saved[reg] == REG_SAVED_REG)
3808 reg = udata->reg_or_offset[reg];
3809 if (in_reg_window (reg, udata))
3815 if (udata->saved[reg] == REG_SAVED_OFFSET)
3816 return (word_type *)(udata->cfa + udata->reg_or_offset[reg]);
3821 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3822 frame called by UDATA or 0. */
3824 static inline void *
3825 get_reg (unsigned reg, frame_state *udata, frame_state *sub_udata)
3827 return (void *)(ptr_type) *get_reg_addr (reg, udata, sub_udata);
3830 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3833 put_reg (unsigned reg, void *val, frame_state *udata)
3835 *get_reg_addr (reg, udata, NULL) = (word_type)(ptr_type) val;
3838 /* Copy the saved value for register REG from frame UDATA to frame
3839 TARGET_UDATA. Unlike the previous two functions, this can handle
3840 registers that are not one word large. */
3843 copy_reg (unsigned reg, frame_state *udata, frame_state *target_udata)
3845 word_type *preg = get_reg_addr (reg, udata, NULL);
3846 word_type *ptreg = get_reg_addr (reg, target_udata, NULL);
3848 memcpy (ptreg, preg, dwarf_reg_size_table [reg]);
3851 /* Retrieve the return address for frame UDATA. */
3853 static inline void *
3854 get_return_addr (frame_state *udata, frame_state *sub_udata)
3856 return __builtin_extract_return_addr
3857 (get_reg (udata->retaddr_column, udata, sub_udata));
3860 /* Overwrite the return address for frame UDATA with VAL. */
3863 put_return_addr (void *val, frame_state *udata)
3865 val = __builtin_frob_return_addr (val);
3866 put_reg (udata->retaddr_column, val, udata);
3869 /* Given the current frame UDATA and its return address PC, return the
3870 information about the calling frame in CALLER_UDATA. */
3873 next_stack_level (void *pc, frame_state *udata, frame_state *caller_udata)
3875 caller_udata = __frame_state_for (pc, caller_udata);
3879 /* Now go back to our caller's stack frame. If our caller's CFA register
3880 was saved in our stack frame, restore it; otherwise, assume the CFA
3881 register is SP and restore it to our CFA value. */
3882 if (udata->saved[caller_udata->cfa_reg])
3883 caller_udata->cfa = get_reg (caller_udata->cfa_reg, udata, 0);
3885 caller_udata->cfa = udata->cfa;
3886 if (caller_udata->indirect)
3887 caller_udata->cfa = * (void **) ((unsigned char *)caller_udata->cfa
3888 + caller_udata->base_offset);
3889 caller_udata->cfa += caller_udata->cfa_offset;
3891 return caller_udata;
3894 /* Hook to call before __terminate if only cleanup handlers remain. */
3896 __unwinding_cleanup (void)
3900 /* throw_helper performs some of the common grunt work for a throw. This
3901 routine is called by throw and rethrows. This is pretty much split
3902 out from the old __throw routine. An addition has been added which allows
3903 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3904 but cleanups remaining. This allows a debugger to examine the state
3905 at which the throw was executed, before any cleanups, rather than
3906 at the terminate point after the stack has been unwound.
3908 EH is the current eh_context structure.
3909 PC is the address of the call to __throw.
3910 MY_UDATA is the unwind information for __throw.
3911 OFFSET_P is where we return the SP adjustment offset. */
3914 throw_helper (struct eh_context *eh, void *pc, frame_state *my_udata,
3917 frame_state ustruct2, *udata = &ustruct2;
3918 frame_state ustruct;
3919 frame_state *sub_udata = &ustruct;
3920 void *saved_pc = pc;
3922 void *handler_p = 0;
3924 frame_state saved_ustruct;
3927 int only_cleanup = 0;
3929 int saved_state = 0;
3931 __eh_info *eh_info = (__eh_info *)eh->info;
3933 /* Do we find a handler based on a re-throw PC? */
3934 if (eh->table_index != (void *) 0)
3937 memcpy (udata, my_udata, sizeof (*udata));
3939 handler = (void *) 0;
3942 frame_state *p = udata;
3943 udata = next_stack_level (pc, udata, sub_udata);
3946 /* If we couldn't find the next frame, we lose. */
3950 if (udata->eh_ptr == NULL)
3953 new_eh_model = (((exception_descriptor *)(udata->eh_ptr))->
3954 runtime_id_field == NEW_EH_RUNTIME);
3959 handler = find_exception_handler (eh->table_index, udata->eh_ptr,
3960 eh_info, 1, &cleanup);
3961 eh->table_index = (void *)0;
3965 handler = find_exception_handler (pc, udata->eh_ptr, eh_info,
3968 handler = old_find_exception_handler (pc, udata->eh_ptr);
3970 /* If we found one, we can stop searching, if its not a cleanup.
3971 for cleanups, we save the state, and keep looking. This allows
3972 us to call a debug hook if there are nothing but cleanups left. */
3979 saved_ustruct = *udata;
3980 handler_p = handler;
3993 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3994 hitting the beginning of the next region. */
3995 pc = get_return_addr (udata, sub_udata) - 1;
4000 udata = &saved_ustruct;
4001 handler = handler_p;
4004 __unwinding_cleanup ();
4007 /* If we haven't found a handler by now, this is an unhandled
4012 eh->handler_label = handler;
4014 args_size = udata->args_size;
4017 /* We found a handler in the throw context, no need to unwind. */
4023 /* Unwind all the frames between this one and the handler by copying
4024 their saved register values into our register save slots. */
4026 /* Remember the PC where we found the handler. */
4027 void *handler_pc = pc;
4029 /* Start from the throw context again. */
4031 memcpy (udata, my_udata, sizeof (*udata));
4033 while (pc != handler_pc)
4035 frame_state *p = udata;
4036 udata = next_stack_level (pc, udata, sub_udata);
4039 for (i = 0; i < DWARF_FRAME_REGISTERS; ++i)
4040 if (i != udata->retaddr_column && udata->saved[i])
4042 /* If you modify the saved value of the return address
4043 register on the SPARC, you modify the return address for
4044 your caller's frame. Don't do that here, as it will
4045 confuse get_return_addr. */
4046 if (in_reg_window (i, udata)
4047 && udata->saved[udata->retaddr_column] == REG_SAVED_REG
4048 && udata->reg_or_offset[udata->retaddr_column] == i)
4050 copy_reg (i, udata, my_udata);
4053 pc = get_return_addr (udata, sub_udata) - 1;
4056 /* But we do need to update the saved return address register from
4057 the last frame we unwind, or the handler frame will have the wrong
4059 if (udata->saved[udata->retaddr_column] == REG_SAVED_REG)
4061 i = udata->reg_or_offset[udata->retaddr_column];
4062 if (in_reg_window (i, udata))
4063 copy_reg (i, udata, my_udata);
4066 /* udata now refers to the frame called by the handler frame. */
4068 /* We adjust SP by the difference between __throw's CFA and the CFA for
4069 the frame called by the handler frame, because those CFAs correspond
4070 to the SP values at the two call sites. We need to further adjust by
4071 the args_size of the handler frame itself to get the handler frame's
4072 SP from before the args were pushed for that call. */
4073 #ifdef STACK_GROWS_DOWNWARD
4074 *offset_p = udata->cfa - my_udata->cfa + args_size;
4076 *offset_p = my_udata->cfa - udata->cfa - args_size;
4083 /* We first search for an exception handler, and if we don't find
4084 it, we call __terminate on the current stack frame so that we may
4085 use the debugger to walk the stack and understand why no handler
4088 If we find one, then we unwind the frames down to the one that
4089 has the handler and transfer control into the handler. */
4091 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
4096 struct eh_context *eh = (*get_eh_context) ();
4100 /* XXX maybe make my_ustruct static so we don't have to look it up for
4102 frame_state my_ustruct, *my_udata = &my_ustruct;
4104 /* This is required for C++ semantics. We must call terminate if we
4105 try and rethrow an exception, when there is no exception currently
4110 /* Start at our stack frame. */
4112 my_udata = __frame_state_for (&&label, my_udata);
4116 /* We need to get the value from the CFA register. */
4117 my_udata->cfa = __builtin_dwarf_cfa ();
4119 /* Do any necessary initialization to access arbitrary stack frames.
4120 On the SPARC, this means flushing the register windows. */
4121 __builtin_unwind_init ();
4123 /* Now reset pc to the right throw point. */
4124 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
4126 handler = throw_helper (eh, pc, my_udata, &offset);
4130 __builtin_eh_return ((void *)eh, offset, handler);
4132 /* Epilogue: restore the handler frame's register values and return
4136 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
4139 __rethrow (void *index)
4141 struct eh_context *eh = (*get_eh_context) ();
4145 /* XXX maybe make my_ustruct static so we don't have to look it up for
4147 frame_state my_ustruct, *my_udata = &my_ustruct;
4149 /* This is required for C++ semantics. We must call terminate if we
4150 try and rethrow an exception, when there is no exception currently
4155 /* This is the table index we want to rethrow from. The value of
4156 the END_REGION label is used for the PC of the throw, and the
4157 search begins with the next table entry. */
4158 eh->table_index = index;
4160 /* Start at our stack frame. */
4162 my_udata = __frame_state_for (&&label, my_udata);
4166 /* We need to get the value from the CFA register. */
4167 my_udata->cfa = __builtin_dwarf_cfa ();
4169 /* Do any necessary initialization to access arbitrary stack frames.
4170 On the SPARC, this means flushing the register windows. */
4171 __builtin_unwind_init ();
4173 /* Now reset pc to the right throw point. */
4174 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
4176 handler = throw_helper (eh, pc, my_udata, &offset);
4180 __builtin_eh_return ((void *)eh, offset, handler);
4182 /* Epilogue: restore the handler frame's register values and return
4185 #endif /* DWARF2_UNWIND_INFO */
4187 #ifdef IA64_UNWIND_INFO
4190 /* Return handler to which we want to transfer control, NULL if we don't
4191 intend to handle this exception here. */
4193 __ia64_personality_v1 (void *pc, old_exception_table *table)
4200 for (pos = 0; table[pos].start_region != (void *) -1; ++pos)
4202 if (table[pos].start_region <= pc && table[pos].end_region > pc)
4204 /* This can apply. Make sure it is at least as small as
4205 the previous best. */
4206 if (best == -1 || (table[pos].end_region <= table[best].end_region
4207 && table[pos].start_region >= table[best].start_region))
4210 /* It is sorted by starting PC within a function. */
4211 else if (best >= 0 && table[pos].start_region > pc)
4215 return table[best].exception_handler;
4221 ia64_throw_helper (ia64_frame_state *throw_frame, ia64_frame_state *caller,
4222 void *throw_bsp, void *throw_sp)
4224 void *throw_pc = __builtin_return_address (0);
4225 unwind_info_ptr *info;
4226 void *pc, *handler = NULL;
4231 __builtin_ia64_flushrs (); /* Make the local register stacks available. */
4233 /* Start at our stack frame, get our state. */
4234 __build_ia64_frame_state (throw_pc, throw_frame, throw_bsp, throw_sp,
4237 /* Now we have to find the proper frame for pc, and see if there
4238 is a handler for it. if not, we keep going back frames until
4239 we do find one. Otherwise we call uncaught (). */
4242 memcpy (caller, throw_frame, sizeof (*caller));
4245 void *(*personality) ();
4249 /* We only care about the RP right now, so we dont need to keep
4250 any other information about a call frame right now. */
4251 pc = __get_real_reg_value (&caller->rp) - 1;
4252 bsp = __calc_caller_bsp ((long)__get_real_reg_value (&caller->pfs),
4254 info = __build_ia64_frame_state (pc, caller, bsp, caller->my_psp,
4257 /* If we couldn't find the next frame, we lose. */
4261 personality = __get_personality (info);
4262 /* TODO Haven't figured out how to actually load the personality address
4263 yet, so just always default to the one we expect for now. */
4264 if (personality != 0)
4265 personality = __ia64_personality_v1;
4266 eh_table = __get_except_table (info);
4267 /* If there is no personality routine, we'll keep unwinding. */
4269 /* Pass a segment relative PC address to the personality routine,
4270 because the unwind_info section uses segrel relocs. */
4271 handler = personality (pc - pc_base, eh_table);
4277 /* Handler is a segment relative address, so we must adjust it here. */
4278 handler += (long) pc_base;
4280 /* If we found a handler, we need to unwind the stack to that point.
4281 We do this by copying saved values from previous frames into the
4282 save slot for the throw_frame saved slots. when __throw returns,
4283 it'll pickup the correct values. */
4285 /* Start with where __throw saved things, and copy each saved register
4286 of each previous frame until we get to the one before we're
4287 throwing back to. */
4288 memcpy (caller, throw_frame, sizeof (*caller));
4289 for ( ; frame_count > 0; frame_count--)
4291 pc = __get_real_reg_value (&caller->rp) - 1;
4292 bsp = __calc_caller_bsp ((long)__get_real_reg_value (&caller->pfs),
4294 __build_ia64_frame_state (pc, caller, bsp, caller->my_psp, &pc_base);
4295 /* Any regs that were saved can be put in the throw frame now. */
4296 /* We don't want to copy any saved register from the
4297 target destination, but we do want to load up it's frame. */
4298 if (frame_count > 1)
4299 __copy_saved_reg_state (throw_frame, caller);
4302 /* Set return address of the throw frame to the handler. */
4303 __set_real_reg_value (&throw_frame->rp, handler);
4305 /* TODO, do we need to do anything to make the values we wrote 'stick'? */
4306 /* DO we need to go through the whole loadrs seqeunce? */
4313 register void *stack_pointer __asm__("r12");
4314 struct eh_context *eh = (*get_eh_context) ();
4315 ia64_frame_state my_frame;
4316 ia64_frame_state originator; /* For the context handler is in. */
4317 void *bsp, *tmp_bsp;
4320 /* This is required for C++ semantics. We must call terminate if we
4321 try and rethrow an exception, when there is no exception currently
4326 __builtin_unwind_init ();
4328 /* We have to call another routine to actually process the frame
4329 information, which will force all of __throw's local registers into
4332 /* Get the value of ar.bsp while we're here. */
4334 bsp = __builtin_ia64_bsp ();
4335 ia64_throw_helper (&my_frame, &originator, bsp, stack_pointer);
4337 /* Now we have to fudge the bsp by the amount in our (__throw)
4338 frame marker, since the return is going to adjust it by that much. */
4340 tmp_bsp = __calc_caller_bsp ((long)__get_real_reg_value (&my_frame.pfs),
4342 offset = (char *)my_frame.my_bsp - (char *)tmp_bsp;
4343 tmp_bsp = (char *)originator.my_bsp + offset;
4345 __builtin_eh_return (tmp_bsp, offset, originator.my_sp);
4347 /* The return address was already set by throw_helper. */
4350 #endif /* IA64_UNWIND_INFO */