1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 92, 93, 94, 95, 96, 97, 98, 1999, 2000
4 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GNU CC is distributed in the hope that it will be useful,
23 but WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 GNU General Public License for more details.
27 You should have received a copy of the GNU General Public License
28 along with GNU CC; see the file COPYING. If not, write to
29 the Free Software Foundation, 59 Temple Place - Suite 330,
30 Boston, MA 02111-1307, USA. */
32 /* It is incorrect to include config.h here, because this file is being
33 compiled for the target, and hence definitions concerning only the host
42 /* Don't use `fancy_abort' here even if config.h says to use it. */
49 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
50 #if defined (L_divdi3) || defined (L_moddi3)
56 __addvsi3 (SItype a, SItype b)
62 if (b >= 0 ? w < a : w > a)
70 __addvdi3 (DItype a, DItype b)
76 if (b >= 0 ? w < a : w > a)
85 __subvsi3 (SItype a, SItype b)
88 return __addvsi3 (a, (-b));
94 if (b >= 0 ? w > a : w < a)
104 __subvdi3 (DItype a, DItype b)
113 if (b >= 0 ? w > a : w < a)
123 __mulvsi3 (SItype a, SItype b)
129 if ((a >= 0 && b >= 0) ? w < 0
130 : (a >= 0 || b >= 0) ? w > 0 : w < 0)
145 if (a >= 0 ? w > 0 : w < 0)
160 if (a >= 0 ? w > 0 : w < 0)
209 __mulvdi3 (DItype u, DItype v)
215 if ((u >= 0 && v >= 0) ? w < 0
216 : (u >= 0 || v >= 0) ? w > 0 : w < 0)
232 w.s.high = -uu.s.high - ((UWtype) w.s.low > 0);
238 /* Unless shift functions are defined whith full ANSI prototypes,
239 parameter b will be promoted to int if word_type is smaller than an int. */
242 __lshrdi3 (DWtype u, word_type b)
253 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
257 w.s.low = (UWtype) uu.s.high >> -bm;
261 UWtype carries = (UWtype) uu.s.high << bm;
263 w.s.high = (UWtype) uu.s.high >> b;
264 w.s.low = ((UWtype) uu.s.low >> b) | carries;
273 __ashldi3 (DWtype u, word_type b)
284 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
288 w.s.high = (UWtype) uu.s.low << -bm;
292 UWtype carries = (UWtype) uu.s.low >> bm;
294 w.s.low = (UWtype) uu.s.low << b;
295 w.s.high = ((UWtype) uu.s.high << b) | carries;
304 __ashrdi3 (DWtype u, word_type b)
315 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
318 /* w.s.high = 1..1 or 0..0 */
319 w.s.high = uu.s.high >> (sizeof (Wtype) * BITS_PER_UNIT - 1);
320 w.s.low = uu.s.high >> -bm;
324 UWtype carries = (UWtype) uu.s.high << bm;
326 w.s.high = uu.s.high >> b;
327 w.s.low = ((UWtype) uu.s.low >> b) | carries;
339 UWtype word, count, add;
343 word = uu.s.low, add = 0;
344 else if (uu.s.high != 0)
345 word = uu.s.high, add = BITS_PER_UNIT * sizeof (Wtype);
349 count_trailing_zeros (count, word);
350 return count + add + 1;
356 __muldi3 (DWtype u, DWtype v)
364 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
365 w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
366 + (UWtype) uu.s.high * (UWtype) vv.s.low);
373 #if defined (sdiv_qrnnd)
375 __udiv_w_sdiv (UWtype *rp, UWtype a1, UWtype a0, UWtype d)
382 if (a1 < d - a1 - (a0 >> (W_TYPE_SIZE - 1)))
384 /* dividend, divisor, and quotient are nonnegative */
385 sdiv_qrnnd (q, r, a1, a0, d);
389 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
390 sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (W_TYPE_SIZE - 1));
391 /* Divide (c1*2^32 + c0) by d */
392 sdiv_qrnnd (q, r, c1, c0, d);
393 /* Add 2^31 to quotient */
394 q += (UWtype) 1 << (W_TYPE_SIZE - 1);
399 b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
400 c1 = a1 >> 1; /* A/2 */
401 c0 = (a1 << (W_TYPE_SIZE - 1)) + (a0 >> 1);
403 if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
405 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
407 r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
424 else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
427 c0 = ~c0; /* logical NOT */
429 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
431 q = ~q; /* (A/2)/b1 */
434 r = 2*r + (a0 & 1); /* A/(2*b1) */
452 else /* Implies c1 = b1 */
453 { /* Hence a1 = d - 1 = 2*b1 - 1 */
471 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
473 __udiv_w_sdiv (UWtype *rp __attribute__ ((__unused__)),
474 UWtype a1 __attribute__ ((__unused__)),
475 UWtype a0 __attribute__ ((__unused__)),
476 UWtype d __attribute__ ((__unused__)))
483 #if (defined (L_udivdi3) || defined (L_divdi3) || \
484 defined (L_umoddi3) || defined (L_moddi3))
489 const UQItype __clz_tab[] =
491 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
492 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
493 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
494 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
495 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
496 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
497 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
498 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
504 #if (defined (L_udivdi3) || defined (L_divdi3) || \
505 defined (L_umoddi3) || defined (L_moddi3))
509 __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
514 UWtype d0, d1, n0, n1, n2;
526 #if !UDIV_NEEDS_NORMALIZATION
533 udiv_qrnnd (q0, n0, n1, n0, d0);
536 /* Remainder in n0. */
543 d0 = 1 / d0; /* Divide intentionally by zero. */
545 udiv_qrnnd (q1, n1, 0, n1, d0);
546 udiv_qrnnd (q0, n0, n1, n0, d0);
548 /* Remainder in n0. */
559 #else /* UDIV_NEEDS_NORMALIZATION */
567 count_leading_zeros (bm, d0);
571 /* Normalize, i.e. make the most significant bit of the
575 n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
579 udiv_qrnnd (q0, n0, n1, n0, d0);
582 /* Remainder in n0 >> bm. */
589 d0 = 1 / d0; /* Divide intentionally by zero. */
591 count_leading_zeros (bm, d0);
595 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
596 conclude (the most significant bit of n1 is set) /\ (the
597 leading quotient digit q1 = 1).
599 This special case is necessary, not an optimization.
600 (Shifts counts of W_TYPE_SIZE are undefined.) */
609 b = W_TYPE_SIZE - bm;
613 n1 = (n1 << bm) | (n0 >> b);
616 udiv_qrnnd (q1, n1, n2, n1, d0);
621 udiv_qrnnd (q0, n0, n1, n0, d0);
623 /* Remainder in n0 >> bm. */
633 #endif /* UDIV_NEEDS_NORMALIZATION */
644 /* Remainder in n1n0. */
656 count_leading_zeros (bm, d1);
659 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
660 conclude (the most significant bit of n1 is set) /\ (the
661 quotient digit q0 = 0 or 1).
663 This special case is necessary, not an optimization. */
665 /* The condition on the next line takes advantage of that
666 n1 >= d1 (true due to program flow). */
667 if (n1 > d1 || n0 >= d0)
670 sub_ddmmss (n1, n0, n1, n0, d1, d0);
689 b = W_TYPE_SIZE - bm;
691 d1 = (d1 << bm) | (d0 >> b);
694 n1 = (n1 << bm) | (n0 >> b);
697 udiv_qrnnd (q0, n1, n2, n1, d1);
698 umul_ppmm (m1, m0, q0, d0);
700 if (m1 > n1 || (m1 == n1 && m0 > n0))
703 sub_ddmmss (m1, m0, m1, m0, d1, d0);
708 /* Remainder in (n1n0 - m1m0) >> bm. */
711 sub_ddmmss (n1, n0, n1, n0, m1, m0);
712 rr.s.low = (n1 << b) | (n0 >> bm);
713 rr.s.high = n1 >> bm;
728 __divdi3 (DWtype u, DWtype v)
739 uu.ll = __negdi2 (uu.ll);
742 vv.ll = __negdi2 (vv.ll);
744 w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
754 __moddi3 (DWtype u, DWtype v)
765 uu.ll = __negdi2 (uu.ll);
767 vv.ll = __negdi2 (vv.ll);
769 (void) __udivmoddi4 (uu.ll, vv.ll, &w);
779 __umoddi3 (UDWtype u, UDWtype v)
783 (void) __udivmoddi4 (u, v, &w);
791 __udivdi3 (UDWtype n, UDWtype d)
793 return __udivmoddi4 (n, d, (UDWtype *) 0);
799 __cmpdi2 (DWtype a, DWtype b)
803 au.ll = a, bu.ll = b;
805 if (au.s.high < bu.s.high)
807 else if (au.s.high > bu.s.high)
809 if ((UWtype) au.s.low < (UWtype) bu.s.low)
811 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
819 __ucmpdi2 (DWtype a, DWtype b)
823 au.ll = a, bu.ll = b;
825 if ((UWtype) au.s.high < (UWtype) bu.s.high)
827 else if ((UWtype) au.s.high > (UWtype) bu.s.high)
829 if ((UWtype) au.s.low < (UWtype) bu.s.low)
831 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
837 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
838 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
839 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
842 __fixunstfDI (TFtype a)
850 /* Compute high word of result, as a flonum. */
851 b = (a / HIGH_WORD_COEFF);
852 /* Convert that to fixed (but not to DWtype!),
853 and shift it into the high word. */
856 /* Remove high part from the TFtype, leaving the low part as flonum. */
858 /* Convert that to fixed (but not to DWtype!) and add it in.
859 Sometimes A comes out negative. This is significant, since
860 A has more bits than a long int does. */
869 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
874 return - __fixunstfDI (-a);
875 return __fixunstfDI (a);
879 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
880 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
881 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
884 __fixunsxfDI (XFtype a)
892 /* Compute high word of result, as a flonum. */
893 b = (a / HIGH_WORD_COEFF);
894 /* Convert that to fixed (but not to DWtype!),
895 and shift it into the high word. */
898 /* Remove high part from the XFtype, leaving the low part as flonum. */
900 /* Convert that to fixed (but not to DWtype!) and add it in.
901 Sometimes A comes out negative. This is significant, since
902 A has more bits than a long int does. */
911 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
916 return - __fixunsxfDI (-a);
917 return __fixunsxfDI (a);
922 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
923 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
926 __fixunsdfDI (DFtype a)
934 /* Compute high word of result, as a flonum. */
935 b = (a / HIGH_WORD_COEFF);
936 /* Convert that to fixed (but not to DWtype!),
937 and shift it into the high word. */
940 /* Remove high part from the DFtype, leaving the low part as flonum. */
942 /* Convert that to fixed (but not to DWtype!) and add it in.
943 Sometimes A comes out negative. This is significant, since
944 A has more bits than a long int does. */
958 return - __fixunsdfDI (-a);
959 return __fixunsdfDI (a);
964 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
965 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
968 __fixunssfDI (SFtype original_a)
970 /* Convert the SFtype to a DFtype, because that is surely not going
971 to lose any bits. Some day someone else can write a faster version
972 that avoids converting to DFtype, and verify it really works right. */
973 DFtype a = original_a;
980 /* Compute high word of result, as a flonum. */
981 b = (a / HIGH_WORD_COEFF);
982 /* Convert that to fixed (but not to DWtype!),
983 and shift it into the high word. */
986 /* Remove high part from the DFtype, leaving the low part as flonum. */
988 /* Convert that to fixed (but not to DWtype!) and add it in.
989 Sometimes A comes out negative. This is significant, since
990 A has more bits than a long int does. */
1001 __fixsfdi (SFtype a)
1004 return - __fixunssfDI (-a);
1005 return __fixunssfDI (a);
1009 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
1010 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1011 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1012 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1015 __floatdixf (DWtype u)
1019 d = (Wtype) (u >> WORD_SIZE);
1020 d *= HIGH_HALFWORD_COEFF;
1021 d *= HIGH_HALFWORD_COEFF;
1022 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
1028 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
1029 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1030 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1031 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1034 __floatditf (DWtype u)
1038 d = (Wtype) (u >> WORD_SIZE);
1039 d *= HIGH_HALFWORD_COEFF;
1040 d *= HIGH_HALFWORD_COEFF;
1041 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
1048 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1049 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1050 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1053 __floatdidf (DWtype u)
1057 d = (Wtype) (u >> WORD_SIZE);
1058 d *= HIGH_HALFWORD_COEFF;
1059 d *= HIGH_HALFWORD_COEFF;
1060 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
1067 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1068 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1069 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1070 #define DI_SIZE (sizeof (DWtype) * BITS_PER_UNIT)
1072 /* Define codes for all the float formats that we know of. Note
1073 that this is copied from real.h. */
1075 #define UNKNOWN_FLOAT_FORMAT 0
1076 #define IEEE_FLOAT_FORMAT 1
1077 #define VAX_FLOAT_FORMAT 2
1078 #define IBM_FLOAT_FORMAT 3
1080 /* Default to IEEE float if not specified. Nearly all machines use it. */
1081 #ifndef HOST_FLOAT_FORMAT
1082 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
1085 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1090 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
1095 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
1101 __floatdisf (DWtype u)
1103 /* Do the calculation in DFmode
1104 so that we don't lose any of the precision of the high word
1105 while multiplying it. */
1108 /* Protect against double-rounding error.
1109 Represent any low-order bits, that might be truncated in DFmode,
1110 by a bit that won't be lost. The bit can go in anywhere below the
1111 rounding position of the SFmode. A fixed mask and bit position
1112 handles all usual configurations. It doesn't handle the case
1113 of 128-bit DImode, however. */
1114 if (DF_SIZE < DI_SIZE
1115 && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE))
1117 #define REP_BIT ((UDWtype) 1 << (DI_SIZE - DF_SIZE))
1118 if (! (- ((DWtype) 1 << DF_SIZE) < u
1119 && u < ((DWtype) 1 << DF_SIZE)))
1121 if ((UDWtype) u & (REP_BIT - 1))
1125 f = (Wtype) (u >> WORD_SIZE);
1126 f *= HIGH_HALFWORD_COEFF;
1127 f *= HIGH_HALFWORD_COEFF;
1128 f += (UWtype) (u & (HIGH_WORD_COEFF - 1));
1134 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
1135 /* Reenable the normal types, in case limits.h needs them. */
1148 __fixunsxfSI (XFtype a)
1150 if (a >= - (DFtype) LONG_MIN)
1151 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1157 /* Reenable the normal types, in case limits.h needs them. */
1170 __fixunsdfSI (DFtype a)
1172 if (a >= - (DFtype) LONG_MIN)
1173 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1179 /* Reenable the normal types, in case limits.h needs them. */
1192 __fixunssfSI (SFtype a)
1194 if (a >= - (SFtype) LONG_MIN)
1195 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1200 /* From here on down, the routines use normal data types. */
1202 #define SItype bogus_type
1203 #define USItype bogus_type
1204 #define DItype bogus_type
1205 #define UDItype bogus_type
1206 #define SFtype bogus_type
1207 #define DFtype bogus_type
1225 /* Like bcmp except the sign is meaningful.
1226 Result is negative if S1 is less than S2,
1227 positive if S1 is greater, 0 if S1 and S2 are equal. */
1230 __gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
1234 unsigned char c1 = *s1++, c2 = *s2++;
1251 #if defined(__svr4__) || defined(__alliant__)
1255 /* The Alliant needs the added underscore. */
1256 asm (".globl __builtin_saveregs");
1257 asm ("__builtin_saveregs:");
1258 asm (".globl ___builtin_saveregs");
1259 asm ("___builtin_saveregs:");
1261 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1262 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1263 area and also for a new va_list
1265 /* Save all argument registers in the arg reg save area. The
1266 arg reg save area must have the following layout (according
1278 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1279 asm (" fst.q %f12,16(%sp)");
1281 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1282 asm (" st.l %r17,36(%sp)");
1283 asm (" st.l %r18,40(%sp)");
1284 asm (" st.l %r19,44(%sp)");
1285 asm (" st.l %r20,48(%sp)");
1286 asm (" st.l %r21,52(%sp)");
1287 asm (" st.l %r22,56(%sp)");
1288 asm (" st.l %r23,60(%sp)");
1289 asm (" st.l %r24,64(%sp)");
1290 asm (" st.l %r25,68(%sp)");
1291 asm (" st.l %r26,72(%sp)");
1292 asm (" st.l %r27,76(%sp)");
1294 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1295 va_list structure. Put in into
1296 r16 so that it will be returned
1299 /* Initialize all fields of the new va_list structure. This
1300 structure looks like:
1303 unsigned long ireg_used;
1304 unsigned long freg_used;
1310 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1311 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1312 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1313 asm (" bri %r1"); /* delayed return */
1314 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1316 #else /* not __svr4__ */
1317 #if defined(__PARAGON__)
1319 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1320 * and we stand a better chance of hooking into libraries
1321 * compiled by PGI. [andyp@ssd.intel.com]
1325 asm (".globl __builtin_saveregs");
1326 asm ("__builtin_saveregs:");
1327 asm (".globl ___builtin_saveregs");
1328 asm ("___builtin_saveregs:");
1330 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1331 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1332 area and also for a new va_list
1334 /* Save all argument registers in the arg reg save area. The
1335 arg reg save area must have the following layout (according
1347 asm (" fst.q f8, 0(sp)");
1348 asm (" fst.q f12,16(sp)");
1349 asm (" st.l r16,32(sp)");
1350 asm (" st.l r17,36(sp)");
1351 asm (" st.l r18,40(sp)");
1352 asm (" st.l r19,44(sp)");
1353 asm (" st.l r20,48(sp)");
1354 asm (" st.l r21,52(sp)");
1355 asm (" st.l r22,56(sp)");
1356 asm (" st.l r23,60(sp)");
1357 asm (" st.l r24,64(sp)");
1358 asm (" st.l r25,68(sp)");
1359 asm (" st.l r26,72(sp)");
1360 asm (" st.l r27,76(sp)");
1362 asm (" adds 80,sp,r16"); /* compute the address of the new
1363 va_list structure. Put in into
1364 r16 so that it will be returned
1367 /* Initialize all fields of the new va_list structure. This
1368 structure looks like:
1371 unsigned long ireg_used;
1372 unsigned long freg_used;
1378 asm (" st.l r0, 0(r16)"); /* nfixed */
1379 asm (" st.l r0, 4(r16)"); /* nfloating */
1380 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1381 asm (" bri r1"); /* delayed return */
1382 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1383 #else /* not __PARAGON__ */
1387 asm (".globl ___builtin_saveregs");
1388 asm ("___builtin_saveregs:");
1389 asm (" mov sp,r30");
1390 asm (" andnot 0x0f,sp,sp");
1391 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1393 /* Fill in the __va_struct. */
1394 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1395 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1396 asm (" st.l r18, 8(sp)");
1397 asm (" st.l r19,12(sp)");
1398 asm (" st.l r20,16(sp)");
1399 asm (" st.l r21,20(sp)");
1400 asm (" st.l r22,24(sp)");
1401 asm (" st.l r23,28(sp)");
1402 asm (" st.l r24,32(sp)");
1403 asm (" st.l r25,36(sp)");
1404 asm (" st.l r26,40(sp)");
1405 asm (" st.l r27,44(sp)");
1407 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1408 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1410 /* Fill in the __va_ctl. */
1411 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1412 asm (" st.l r28,84(sp)"); /* pointer to more args */
1413 asm (" st.l r0, 88(sp)"); /* nfixed */
1414 asm (" st.l r0, 92(sp)"); /* nfloating */
1416 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1418 asm (" mov r30,sp");
1419 /* recover stack and pass address to start
1421 #endif /* not __PARAGON__ */
1422 #endif /* not __svr4__ */
1423 #else /* not __i860__ */
1425 asm (".global __builtin_saveregs");
1426 asm ("__builtin_saveregs:");
1427 asm (".global ___builtin_saveregs");
1428 asm ("___builtin_saveregs:");
1429 #ifdef NEED_PROC_COMMAND
1432 asm ("st %i0,[%fp+68]");
1433 asm ("st %i1,[%fp+72]");
1434 asm ("st %i2,[%fp+76]");
1435 asm ("st %i3,[%fp+80]");
1436 asm ("st %i4,[%fp+84]");
1438 asm ("st %i5,[%fp+88]");
1439 #ifdef NEED_TYPE_COMMAND
1440 asm (".type __builtin_saveregs,#function");
1441 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1443 #else /* not __sparc__ */
1444 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1448 asm (" .set nomips16");
1450 asm (" .ent __builtin_saveregs");
1451 asm (" .globl __builtin_saveregs");
1452 asm ("__builtin_saveregs:");
1453 asm (" sw $4,0($30)");
1454 asm (" sw $5,4($30)");
1455 asm (" sw $6,8($30)");
1456 asm (" sw $7,12($30)");
1458 asm (" .end __builtin_saveregs");
1459 #else /* not __mips__, etc. */
1461 void * __attribute__ ((__noreturn__))
1462 __builtin_saveregs (void)
1467 #endif /* not __mips__ */
1468 #endif /* not __sparc__ */
1469 #endif /* not __i860__ */
1473 #ifndef inhibit_libc
1475 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1477 /* This is used by the `assert' macro. */
1479 __eprintf (const char *string, const char *expression,
1480 unsigned int line, const char *filename)
1482 fprintf (stderr, string, expression, line, filename);
1492 /* Structure emitted by -a */
1496 const char *filename;
1500 const unsigned long *addresses;
1502 /* Older GCC's did not emit these fields. */
1504 const char **functions;
1505 const long *line_nums;
1506 const char **filenames;
1510 #ifdef BLOCK_PROFILER_CODE
1513 #ifndef inhibit_libc
1515 /* Simple minded basic block profiling output dumper for
1516 systems that don't provide tcov support. At present,
1517 it requires atexit and stdio. */
1519 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1523 #include "gbl-ctors.h"
1524 #include "gcov-io.h"
1526 #ifdef TARGET_HAS_F_SETLKW
1531 static struct bb *bb_head;
1533 static int num_digits (long value, int base) __attribute__ ((const));
1535 /* Return the number of digits needed to print a value */
1536 /* __inline__ */ static int num_digits (long value, int base)
1538 int minus = (value < 0 && base != 16);
1539 unsigned long v = (minus) ? -value : value;
1553 __bb_exit_func (void)
1555 FILE *da_file, *file;
1562 i = strlen (bb_head->filename) - 3;
1564 if (!strcmp (bb_head->filename+i, ".da"))
1566 /* Must be -fprofile-arcs not -a.
1567 Dump data in a form that gcov expects. */
1571 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1575 /* Make sure the output file exists -
1576 but don't clobber exiting data. */
1577 if ((da_file = fopen (ptr->filename, "a")) != 0)
1580 /* Need to re-open in order to be able to write from the start. */
1581 da_file = fopen (ptr->filename, "r+b");
1582 /* Some old systems might not allow the 'b' mode modifier.
1583 Therefore, try to open without it. This can lead to a race
1584 condition so that when you delete and re-create the file, the
1585 file might be opened in text mode, but then, you shouldn't
1586 delete the file in the first place. */
1588 da_file = fopen (ptr->filename, "r+");
1591 fprintf (stderr, "arc profiling: Can't open output file %s.\n",
1596 /* After a fork, another process might try to read and/or write
1597 the same file simultanously. So if we can, lock the file to
1598 avoid race conditions. */
1599 #if defined (TARGET_HAS_F_SETLKW)
1601 struct flock s_flock;
1603 s_flock.l_type = F_WRLCK;
1604 s_flock.l_whence = SEEK_SET;
1605 s_flock.l_start = 0;
1607 s_flock.l_pid = getpid ();
1609 while (fcntl (fileno (da_file), F_SETLKW, &s_flock)
1614 /* If the file is not empty, and the number of counts in it is the
1615 same, then merge them in. */
1616 firstchar = fgetc (da_file);
1617 if (firstchar == EOF)
1619 if (ferror (da_file))
1621 fprintf (stderr, "arc profiling: Can't read output file ");
1622 perror (ptr->filename);
1629 if (ungetc (firstchar, da_file) == EOF)
1631 if (__read_long (&n_counts, da_file, 8) != 0)
1633 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1638 if (n_counts == ptr->ncounts)
1642 for (i = 0; i < n_counts; i++)
1646 if (__read_long (&v, da_file, 8) != 0)
1648 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1652 ptr->counts[i] += v;
1660 /* ??? Should first write a header to the file. Preferably, a 4 byte
1661 magic number, 4 bytes containing the time the program was
1662 compiled, 4 bytes containing the last modification time of the
1663 source file, and 4 bytes indicating the compiler options used.
1665 That way we can easily verify that the proper source/executable/
1666 data file combination is being used from gcov. */
1668 if (__write_long (ptr->ncounts, da_file, 8) != 0)
1671 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1677 long *count_ptr = ptr->counts;
1679 for (j = ptr->ncounts; j > 0; j--)
1681 if (__write_long (*count_ptr, da_file, 8) != 0)
1689 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1693 if (fclose (da_file) == EOF)
1694 fprintf (stderr, "arc profiling: Error closing output file %s.\n",
1701 /* Must be basic block profiling. Emit a human readable output file. */
1703 file = fopen ("bb.out", "a");
1712 /* This is somewhat type incorrect, but it avoids worrying about
1713 exactly where time.h is included from. It should be ok unless
1714 a void * differs from other pointer formats, or if sizeof (long)
1715 is < sizeof (time_t). It would be nice if we could assume the
1716 use of rationale standards here. */
1718 time ((void *) &time_value);
1719 fprintf (file, "Basic block profiling finished on %s\n", ctime ((void *) &time_value));
1721 /* We check the length field explicitly in order to allow compatibility
1722 with older GCC's which did not provide it. */
1724 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1727 int func_p = (ptr->nwords >= (long) sizeof (struct bb)
1728 && ptr->nwords <= 1000
1730 int line_p = (func_p && ptr->line_nums);
1731 int file_p = (func_p && ptr->filenames);
1732 int addr_p = (ptr->addresses != 0);
1733 long ncounts = ptr->ncounts;
1739 int blk_len = num_digits (ncounts, 10);
1744 fprintf (file, "File %s, %ld basic blocks \n\n",
1745 ptr->filename, ncounts);
1747 /* Get max values for each field. */
1748 for (i = 0; i < ncounts; i++)
1753 if (cnt_max < ptr->counts[i])
1754 cnt_max = ptr->counts[i];
1756 if (addr_p && (unsigned long) addr_max < ptr->addresses[i])
1757 addr_max = ptr->addresses[i];
1759 if (line_p && line_max < ptr->line_nums[i])
1760 line_max = ptr->line_nums[i];
1764 p = (ptr->functions[i]) ? (ptr->functions[i]) : "<none>";
1772 p = (ptr->filenames[i]) ? (ptr->filenames[i]) : "<none>";
1779 addr_len = num_digits (addr_max, 16);
1780 cnt_len = num_digits (cnt_max, 10);
1781 line_len = num_digits (line_max, 10);
1783 /* Now print out the basic block information. */
1784 for (i = 0; i < ncounts; i++)
1787 " Block #%*d: executed %*ld time(s)",
1789 cnt_len, ptr->counts[i]);
1792 fprintf (file, " address= 0x%.*lx", addr_len,
1796 fprintf (file, " function= %-*s", func_len,
1797 (ptr->functions[i]) ? ptr->functions[i] : "<none>");
1800 fprintf (file, " line= %*ld", line_len, ptr->line_nums[i]);
1803 fprintf (file, " file= %s",
1804 (ptr->filenames[i]) ? ptr->filenames[i] : "<none>");
1806 fprintf (file, "\n");
1809 fprintf (file, "\n");
1813 fprintf (file, "\n\n");
1819 __bb_init_func (struct bb *blocks)
1821 /* User is supposed to check whether the first word is non-0,
1822 but just in case.... */
1824 if (blocks->zero_word)
1827 /* Initialize destructor. */
1829 atexit (__bb_exit_func);
1831 /* Set up linked list. */
1832 blocks->zero_word = 1;
1833 blocks->next = bb_head;
1837 /* Called before fork or exec - write out profile information gathered so
1838 far and reset it to zero. This avoids duplication or loss of the
1839 profile information gathered so far. */
1841 __bb_fork_func (void)
1846 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1849 for (i = ptr->ncounts - 1; i >= 0; i--)
1854 #ifndef MACHINE_STATE_SAVE
1855 #define MACHINE_STATE_SAVE(ID)
1857 #ifndef MACHINE_STATE_RESTORE
1858 #define MACHINE_STATE_RESTORE(ID)
1861 /* Number of buckets in hashtable of basic block addresses. */
1863 #define BB_BUCKETS 311
1865 /* Maximum length of string in file bb.in. */
1867 #define BBINBUFSIZE 500
1871 struct bb_edge *next;
1872 unsigned long src_addr;
1873 unsigned long dst_addr;
1874 unsigned long count;
1879 TRACE_KEEP = 0, TRACE_ON = 1, TRACE_OFF = 2
1884 struct bb_func *next;
1887 enum bb_func_mode mode;
1890 /* This is the connection to the outside world.
1891 The BLOCK_PROFILER macro must set __bb.blocks
1892 and __bb.blockno. */
1895 unsigned long blockno;
1899 /* Vars to store addrs of source and destination basic blocks
1902 static unsigned long bb_src = 0;
1903 static unsigned long bb_dst = 0;
1905 static FILE *bb_tracefile = (FILE *) 0;
1906 static struct bb_edge **bb_hashbuckets = (struct bb_edge **) 0;
1907 static struct bb_func *bb_func_head = (struct bb_func *) 0;
1908 static unsigned long bb_callcount = 0;
1909 static int bb_mode = 0;
1911 static unsigned long *bb_stack = (unsigned long *) 0;
1912 static size_t bb_stacksize = 0;
1914 static int reported = 0;
1917 Always : Print execution frequencies of basic blocks
1919 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1920 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1921 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1922 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1927 /*#include <sys/types.h>*/
1928 #include <sys/stat.h>
1929 /*#include <malloc.h>*/
1931 /* Commands executed by gopen. */
1933 #define GOPENDECOMPRESS "gzip -cd "
1934 #define GOPENCOMPRESS "gzip -c >"
1936 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1937 If it does not compile, simply replace gopen by fopen and delete
1938 '.gz' from any first parameter to gopen. */
1941 gopen (char *fn, char *mode)
1949 if (mode[0] != 'r' && mode[0] != 'w')
1952 p = fn + strlen (fn)-1;
1953 use_gzip = ((p[-1] == '.' && (p[0] == 'Z' || p[0] == 'z'))
1954 || (p[-2] == '.' && p[-1] == 'g' && p[0] == 'z'));
1961 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1962 + sizeof (GOPENDECOMPRESS));
1963 strcpy (s, GOPENDECOMPRESS);
1964 strcpy (s + (sizeof (GOPENDECOMPRESS)-1), fn);
1965 f = popen (s, mode);
1973 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1974 + sizeof (GOPENCOMPRESS));
1975 strcpy (s, GOPENCOMPRESS);
1976 strcpy (s + (sizeof (GOPENCOMPRESS)-1), fn);
1977 if (!(f = popen (s, mode)))
1978 f = fopen (s, mode);
1985 return fopen (fn, mode);
1995 if (!fstat (fileno (f), &buf) && S_ISFIFO (buf.st_mode))
2003 #endif /* HAVE_POPEN */
2005 /* Called once per program. */
2008 __bb_exit_trace_func (void)
2010 FILE *file = fopen ("bb.out", "a");
2023 gclose (bb_tracefile);
2025 fclose (bb_tracefile);
2026 #endif /* HAVE_POPEN */
2029 /* Check functions in `bb.in'. */
2034 const struct bb_func *p;
2035 int printed_something = 0;
2039 /* This is somewhat type incorrect. */
2040 time ((void *) &time_value);
2042 for (p = bb_func_head; p != (struct bb_func *) 0; p = p->next)
2044 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
2046 if (!ptr->filename || (p->filename != (char *) 0 && strcmp (p->filename, ptr->filename)))
2048 for (blk = 0; blk < ptr->ncounts; blk++)
2050 if (!strcmp (p->funcname, ptr->functions[blk]))
2055 if (!printed_something)
2057 fprintf (file, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value));
2058 printed_something = 1;
2061 fprintf (file, "\tFunction %s", p->funcname);
2063 fprintf (file, " of file %s", p->filename);
2064 fprintf (file, "\n" );
2069 if (printed_something)
2070 fprintf (file, "\n");
2076 if (!bb_hashbuckets)
2080 fprintf (stderr, "Profiler: out of memory\n");
2090 unsigned long addr_max = 0;
2091 unsigned long cnt_max = 0;
2095 /* This is somewhat type incorrect, but it avoids worrying about
2096 exactly where time.h is included from. It should be ok unless
2097 a void * differs from other pointer formats, or if sizeof (long)
2098 is < sizeof (time_t). It would be nice if we could assume the
2099 use of rationale standards here. */
2101 time ((void *) &time_value);
2102 fprintf (file, "Basic block jump tracing");
2104 switch (bb_mode & 12)
2107 fprintf (file, " (with call)");
2111 /* Print nothing. */
2115 fprintf (file, " (with call & ret)");
2119 fprintf (file, " (with ret)");
2123 fprintf (file, " finished on %s\n", ctime ((void *) &time_value));
2125 for (i = 0; i < BB_BUCKETS; i++)
2127 struct bb_edge *bucket = bb_hashbuckets[i];
2128 for ( ; bucket; bucket = bucket->next )
2130 if (addr_max < bucket->src_addr)
2131 addr_max = bucket->src_addr;
2132 if (addr_max < bucket->dst_addr)
2133 addr_max = bucket->dst_addr;
2134 if (cnt_max < bucket->count)
2135 cnt_max = bucket->count;
2138 addr_len = num_digits (addr_max, 16);
2139 cnt_len = num_digits (cnt_max, 10);
2141 for ( i = 0; i < BB_BUCKETS; i++)
2143 struct bb_edge *bucket = bb_hashbuckets[i];
2144 for ( ; bucket; bucket = bucket->next )
2147 "Jump from block 0x%.*lx to block 0x%.*lx executed %*lu time(s)\n",
2148 addr_len, bucket->src_addr,
2149 addr_len, bucket->dst_addr,
2150 cnt_len, bucket->count);
2154 fprintf (file, "\n");
2162 /* Free allocated memory. */
2167 struct bb_func *old = f;
2170 if (old->funcname) free (old->funcname);
2171 if (old->filename) free (old->filename);
2182 for (i = 0; i < BB_BUCKETS; i++)
2184 struct bb_edge *old, *bucket = bb_hashbuckets[i];
2189 bucket = bucket->next;
2193 free (bb_hashbuckets);
2196 for (b = bb_head; b; b = b->next)
2197 if (b->flags) free (b->flags);
2200 /* Called once per program. */
2203 __bb_init_prg (void)
2206 char buf[BBINBUFSIZE];
2209 enum bb_func_mode m;
2212 /* Initialize destructor. */
2213 atexit (__bb_exit_func);
2215 if (!(file = fopen ("bb.in", "r")))
2218 while(fgets (buf, BBINBUFSIZE, file) != 0)
2234 if (!strcmp (p, "__bb_trace__"))
2236 else if (!strcmp (p, "__bb_jumps__"))
2238 else if (!strcmp (p, "__bb_hidecall__"))
2240 else if (!strcmp (p, "__bb_showret__"))
2244 struct bb_func *f = (struct bb_func *) malloc (sizeof (struct bb_func));
2248 f->next = bb_func_head;
2249 if ((pos = strchr (p, ':')))
2251 if (!(f->funcname = (char *) malloc (strlen (pos+1)+1)))
2253 strcpy (f->funcname, pos+1);
2255 if ((f->filename = (char *) malloc (l+1)))
2257 strncpy (f->filename, p, l);
2258 f->filename[l] = '\0';
2261 f->filename = (char *) 0;
2265 if (!(f->funcname = (char *) malloc (strlen (p)+1)))
2267 strcpy (f->funcname, p);
2268 f->filename = (char *) 0;
2280 bb_tracefile = gopen ("bbtrace.gz", "w");
2285 bb_tracefile = fopen ("bbtrace", "w");
2287 #endif /* HAVE_POPEN */
2291 bb_hashbuckets = (struct bb_edge **)
2292 malloc (BB_BUCKETS * sizeof (struct bb_edge *));
2294 /* Use a loop here rather than calling bzero to avoid having to
2295 conditionalize its existance. */
2296 for (i = 0; i < BB_BUCKETS; i++)
2297 bb_hashbuckets[i] = 0;
2303 bb_stack = (unsigned long *) malloc (bb_stacksize * sizeof (*bb_stack));
2306 /* Initialize destructor. */
2307 atexit (__bb_exit_trace_func);
2310 /* Called upon entering a basic block. */
2313 __bb_trace_func (void)
2315 struct bb_edge *bucket;
2317 MACHINE_STATE_SAVE("1")
2319 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2322 bb_dst = __bb.blocks->addresses[__bb.blockno];
2323 __bb.blocks->counts[__bb.blockno]++;
2327 fwrite (&bb_dst, sizeof (unsigned long), 1, bb_tracefile);
2332 struct bb_edge **startbucket, **oldnext;
2334 oldnext = startbucket
2335 = & bb_hashbuckets[ (((int) bb_src*8) ^ (int) bb_dst) % BB_BUCKETS ];
2336 bucket = *startbucket;
2338 for (bucket = *startbucket; bucket;
2339 oldnext = &(bucket->next), bucket = *oldnext)
2341 if (bucket->src_addr == bb_src
2342 && bucket->dst_addr == bb_dst)
2345 *oldnext = bucket->next;
2346 bucket->next = *startbucket;
2347 *startbucket = bucket;
2352 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2358 fprintf (stderr, "Profiler: out of memory\n");
2365 bucket->src_addr = bb_src;
2366 bucket->dst_addr = bb_dst;
2367 bucket->next = *startbucket;
2368 *startbucket = bucket;
2379 MACHINE_STATE_RESTORE("1")
2383 /* Called when returning from a function and `__bb_showret__' is set. */
2386 __bb_trace_func_ret (void)
2388 struct bb_edge *bucket;
2390 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2395 struct bb_edge **startbucket, **oldnext;
2397 oldnext = startbucket
2398 = & bb_hashbuckets[ (((int) bb_dst * 8) ^ (int) bb_src) % BB_BUCKETS ];
2399 bucket = *startbucket;
2401 for (bucket = *startbucket; bucket;
2402 oldnext = &(bucket->next), bucket = *oldnext)
2404 if (bucket->src_addr == bb_dst
2405 && bucket->dst_addr == bb_src)
2408 *oldnext = bucket->next;
2409 bucket->next = *startbucket;
2410 *startbucket = bucket;
2415 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2421 fprintf (stderr, "Profiler: out of memory\n");
2428 bucket->src_addr = bb_dst;
2429 bucket->dst_addr = bb_src;
2430 bucket->next = *startbucket;
2431 *startbucket = bucket;
2444 /* Called upon entering the first function of a file. */
2447 __bb_init_file (struct bb *blocks)
2450 const struct bb_func *p;
2451 long blk, ncounts = blocks->ncounts;
2452 const char **functions = blocks->functions;
2454 /* Set up linked list. */
2455 blocks->zero_word = 1;
2456 blocks->next = bb_head;
2461 || !(blocks->flags = (char *) malloc (sizeof (char) * blocks->ncounts)))
2464 for (blk = 0; blk < ncounts; blk++)
2465 blocks->flags[blk] = 0;
2467 for (blk = 0; blk < ncounts; blk++)
2469 for (p = bb_func_head; p; p = p->next)
2471 if (!strcmp (p->funcname, functions[blk])
2472 && (!p->filename || !strcmp (p->filename, blocks->filename)))
2474 blocks->flags[blk] |= p->mode;
2481 /* Called when exiting from a function. */
2484 __bb_trace_ret (void)
2487 MACHINE_STATE_SAVE("2")
2491 if ((bb_mode & 12) && bb_stacksize > bb_callcount)
2493 bb_src = bb_stack[bb_callcount];
2495 __bb_trace_func_ret ();
2501 MACHINE_STATE_RESTORE("2")
2505 /* Called when entering a function. */
2508 __bb_init_trace_func (struct bb *blocks, unsigned long blockno)
2510 static int trace_init = 0;
2512 MACHINE_STATE_SAVE("3")
2514 if (!blocks->zero_word)
2521 __bb_init_file (blocks);
2531 if (bb_callcount >= bb_stacksize)
2533 size_t newsize = bb_callcount + 100;
2535 bb_stack = (unsigned long *) realloc (bb_stack, newsize);
2540 fprintf (stderr, "Profiler: out of memory\n");
2544 goto stack_overflow;
2546 bb_stacksize = newsize;
2548 bb_stack[bb_callcount] = bb_src;
2559 else if (blocks->flags && (blocks->flags[blockno] & TRACE_ON))
2565 bb_stack[bb_callcount] = bb_src;
2568 MACHINE_STATE_RESTORE("3")
2571 #endif /* not inhibit_libc */
2572 #endif /* not BLOCK_PROFILER_CODE */
2576 unsigned int __shtab[] = {
2577 0x00000001, 0x00000002, 0x00000004, 0x00000008,
2578 0x00000010, 0x00000020, 0x00000040, 0x00000080,
2579 0x00000100, 0x00000200, 0x00000400, 0x00000800,
2580 0x00001000, 0x00002000, 0x00004000, 0x00008000,
2581 0x00010000, 0x00020000, 0x00040000, 0x00080000,
2582 0x00100000, 0x00200000, 0x00400000, 0x00800000,
2583 0x01000000, 0x02000000, 0x04000000, 0x08000000,
2584 0x10000000, 0x20000000, 0x40000000, 0x80000000
2588 #ifdef L_clear_cache
2589 /* Clear part of an instruction cache. */
2591 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2594 __clear_cache (char *beg __attribute__((__unused__)),
2595 char *end __attribute__((__unused__)))
2597 #ifdef CLEAR_INSN_CACHE
2598 CLEAR_INSN_CACHE (beg, end);
2600 #ifdef INSN_CACHE_SIZE
2601 static char array[INSN_CACHE_SIZE + INSN_CACHE_PLANE_SIZE + INSN_CACHE_LINE_WIDTH];
2602 static int initialized;
2606 typedef (*function_ptr) (void);
2608 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2609 /* It's cheaper to clear the whole cache.
2610 Put in a series of jump instructions so that calling the beginning
2611 of the cache will clear the whole thing. */
2615 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2616 & -INSN_CACHE_LINE_WIDTH);
2617 int end_ptr = ptr + INSN_CACHE_SIZE;
2619 while (ptr < end_ptr)
2621 *(INSTRUCTION_TYPE *)ptr
2622 = JUMP_AHEAD_INSTRUCTION + INSN_CACHE_LINE_WIDTH;
2623 ptr += INSN_CACHE_LINE_WIDTH;
2625 *(INSTRUCTION_TYPE *) (ptr - INSN_CACHE_LINE_WIDTH) = RETURN_INSTRUCTION;
2630 /* Call the beginning of the sequence. */
2631 (((function_ptr) (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2632 & -INSN_CACHE_LINE_WIDTH))
2635 #else /* Cache is large. */
2639 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2640 & -INSN_CACHE_LINE_WIDTH);
2642 while (ptr < (int) array + sizeof array)
2644 *(INSTRUCTION_TYPE *)ptr = RETURN_INSTRUCTION;
2645 ptr += INSN_CACHE_LINE_WIDTH;
2651 /* Find the location in array that occupies the same cache line as BEG. */
2653 offset = ((int) beg & -INSN_CACHE_LINE_WIDTH) & (INSN_CACHE_PLANE_SIZE - 1);
2654 start_addr = (((int) (array + INSN_CACHE_PLANE_SIZE - 1)
2655 & -INSN_CACHE_PLANE_SIZE)
2658 /* Compute the cache alignment of the place to stop clearing. */
2659 #if 0 /* This is not needed for gcc's purposes. */
2660 /* If the block to clear is bigger than a cache plane,
2661 we clear the entire cache, and OFFSET is already correct. */
2662 if (end < beg + INSN_CACHE_PLANE_SIZE)
2664 offset = (((int) (end + INSN_CACHE_LINE_WIDTH - 1)
2665 & -INSN_CACHE_LINE_WIDTH)
2666 & (INSN_CACHE_PLANE_SIZE - 1));
2668 #if INSN_CACHE_DEPTH > 1
2669 end_addr = (start_addr & -INSN_CACHE_PLANE_SIZE) + offset;
2670 if (end_addr <= start_addr)
2671 end_addr += INSN_CACHE_PLANE_SIZE;
2673 for (plane = 0; plane < INSN_CACHE_DEPTH; plane++)
2675 int addr = start_addr + plane * INSN_CACHE_PLANE_SIZE;
2676 int stop = end_addr + plane * INSN_CACHE_PLANE_SIZE;
2678 while (addr != stop)
2680 /* Call the return instruction at ADDR. */
2681 ((function_ptr) addr) ();
2683 addr += INSN_CACHE_LINE_WIDTH;
2686 #else /* just one plane */
2689 /* Call the return instruction at START_ADDR. */
2690 ((function_ptr) start_addr) ();
2692 start_addr += INSN_CACHE_LINE_WIDTH;
2694 while ((start_addr % INSN_CACHE_SIZE) != offset);
2695 #endif /* just one plane */
2696 #endif /* Cache is large */
2697 #endif /* Cache exists */
2698 #endif /* CLEAR_INSN_CACHE */
2701 #endif /* L_clear_cache */
2705 /* Jump to a trampoline, loading the static chain address. */
2707 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2720 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2724 mprotect (char *addr, int len, int prot)
2741 if (VirtualProtect (addr, len, np, &op))
2747 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2749 #ifdef TRANSFER_FROM_TRAMPOLINE
2750 TRANSFER_FROM_TRAMPOLINE
2753 #if defined (NeXT) && defined (__MACH__)
2755 /* Make stack executable so we can call trampolines on stack.
2756 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2760 #include <mach/mach.h>
2764 __enable_execute_stack (char *addr)
2767 char *eaddr = addr + TRAMPOLINE_SIZE;
2768 vm_address_t a = (vm_address_t) addr;
2770 /* turn on execute access on stack */
2771 r = vm_protect (task_self (), a, TRAMPOLINE_SIZE, FALSE, VM_PROT_ALL);
2772 if (r != KERN_SUCCESS)
2774 mach_error("vm_protect VM_PROT_ALL", r);
2778 /* We inline the i-cache invalidation for speed */
2780 #ifdef CLEAR_INSN_CACHE
2781 CLEAR_INSN_CACHE (addr, eaddr);
2783 __clear_cache ((int) addr, (int) eaddr);
2787 #endif /* defined (NeXT) && defined (__MACH__) */
2791 /* Make stack executable so we can call trampolines on stack.
2792 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2794 #include <sys/mman.h>
2795 #include <sys/vmparam.h>
2796 #include <machine/machparam.h>
2799 __enable_execute_stack (void)
2802 static unsigned lowest = USRSTACK;
2803 unsigned current = (unsigned) &fp & -NBPG;
2805 if (lowest > current)
2807 unsigned len = lowest - current;
2808 mremap (current, &len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE);
2812 /* Clear instruction cache in case an old trampoline is in it. */
2815 #endif /* __convex__ */
2819 /* Modified from the convex -code above. */
2821 #include <sys/param.h>
2823 #include <sys/m88kbcs.h>
2826 __enable_execute_stack (void)
2829 static unsigned long lowest = USRSTACK;
2830 unsigned long current = (unsigned long) &save_errno & -NBPC;
2832 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2833 address is seen as 'negative'. That is the case with the stack. */
2836 if (lowest > current)
2838 unsigned len=lowest-current;
2839 memctl(current,len,MCT_TEXT);
2843 memctl(current,NBPC,MCT_TEXT);
2847 #endif /* __sysV88__ */
2851 #include <sys/signal.h>
2854 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2855 so define it here, because we need it in __clear_insn_cache below */
2856 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2857 hence we enable this stuff only if MCT_TEXT is #define'd. */
2872 /* Clear instruction cache so we can call trampolines on stack.
2873 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2876 __clear_insn_cache (void)
2881 /* Preserve errno, because users would be surprised to have
2882 errno changing without explicitly calling any system-call. */
2885 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2886 No need to use an address derived from _start or %sp, as 0 works also. */
2887 memctl(0, 4096, MCT_TEXT);
2892 #endif /* __sysV68__ */
2896 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2898 #include <sys/mman.h>
2899 #include <sys/types.h>
2900 #include <sys/param.h>
2901 #include <sys/vmmac.h>
2903 /* Modified from the convex -code above.
2904 mremap promises to clear the i-cache. */
2907 __enable_execute_stack (void)
2910 if (mprotect (((unsigned int)&fp/PAGSIZ)*PAGSIZ, PAGSIZ,
2911 PROT_READ|PROT_WRITE|PROT_EXEC))
2913 perror ("mprotect in __enable_execute_stack");
2918 #endif /* __pyr__ */
2920 #if defined (sony_news) && defined (SYSTYPE_BSD)
2923 #include <sys/types.h>
2924 #include <sys/param.h>
2925 #include <syscall.h>
2926 #include <machine/sysnews.h>
2928 /* cacheflush function for NEWS-OS 4.2.
2929 This function is called from trampoline-initialize code
2930 defined in config/mips/mips.h. */
2933 cacheflush (char *beg, int size, int flag)
2935 if (syscall (SYS_sysnews, NEWS_CACHEFLUSH, beg, size, FLUSH_BCACHE))
2937 perror ("cache_flush");
2943 #endif /* sony_news */
2944 #endif /* L_trampoline */
2949 #include "gbl-ctors.h"
2950 /* Some systems use __main in a way incompatible with its use in gcc, in these
2951 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2952 give the same symbol without quotes for an alternative entry point. You
2953 must define both, or neither. */
2955 #define NAME__MAIN "__main"
2956 #define SYMBOL__MAIN __main
2959 #ifdef INIT_SECTION_ASM_OP
2960 #undef HAS_INIT_SECTION
2961 #define HAS_INIT_SECTION
2964 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2966 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
2967 code to run constructors. In that case, we need to handle EH here, too. */
2969 #ifdef EH_FRAME_SECTION
2971 extern unsigned char __EH_FRAME_BEGIN__[];
2974 /* Run all the global destructors on exit from the program. */
2977 __do_global_dtors (void)
2979 #ifdef DO_GLOBAL_DTORS_BODY
2980 DO_GLOBAL_DTORS_BODY;
2982 static func_ptr *p = __DTOR_LIST__ + 1;
2989 #if defined (EH_FRAME_SECTION) && !defined (HAS_INIT_SECTION)
2991 static int completed = 0;
2995 __deregister_frame_info (__EH_FRAME_BEGIN__);
3002 #ifndef HAS_INIT_SECTION
3003 /* Run all the global constructors on entry to the program. */
3006 __do_global_ctors (void)
3008 #ifdef EH_FRAME_SECTION
3010 static struct object object;
3011 __register_frame_info (__EH_FRAME_BEGIN__, &object);
3014 DO_GLOBAL_CTORS_BODY;
3015 atexit (__do_global_dtors);
3017 #endif /* no HAS_INIT_SECTION */
3019 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
3020 /* Subroutine called automatically by `main'.
3021 Compiling a global function named `main'
3022 produces an automatic call to this function at the beginning.
3024 For many systems, this routine calls __do_global_ctors.
3025 For systems which support a .init section we use the .init section
3026 to run __do_global_ctors, so we need not do anything here. */
3031 /* Support recursive calls to `main': run initializers just once. */
3032 static int initialized;
3036 __do_global_ctors ();
3039 #endif /* no HAS_INIT_SECTION or INVOKE__main */
3041 #endif /* L__main */
3042 #endif /* __CYGWIN__ */
3046 #include "gbl-ctors.h"
3048 /* Provide default definitions for the lists of constructors and
3049 destructors, so that we don't get linker errors. These symbols are
3050 intentionally bss symbols, so that gld and/or collect will provide
3051 the right values. */
3053 /* We declare the lists here with two elements each,
3054 so that they are valid empty lists if no other definition is loaded.
3056 If we are using the old "set" extensions to have the gnu linker
3057 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
3058 must be in the bss/common section.
3060 Long term no port should use those extensions. But many still do. */
3061 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
3062 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
3063 func_ptr __CTOR_LIST__[2] = {0, 0};
3064 func_ptr __DTOR_LIST__[2] = {0, 0};
3066 func_ptr __CTOR_LIST__[2];
3067 func_ptr __DTOR_LIST__[2];
3069 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
3070 #endif /* L_ctors */
3074 #include "gbl-ctors.h"
3082 static func_ptr *atexit_chain = 0;
3083 static long atexit_chain_length = 0;
3084 static volatile long last_atexit_chain_slot = -1;
3087 atexit (func_ptr func)
3089 if (++last_atexit_chain_slot == atexit_chain_length)
3091 atexit_chain_length += 32;
3093 atexit_chain = (func_ptr *) realloc (atexit_chain, atexit_chain_length
3094 * sizeof (func_ptr));
3096 atexit_chain = (func_ptr *) malloc (atexit_chain_length
3097 * sizeof (func_ptr));
3100 atexit_chain_length = 0;
3101 last_atexit_chain_slot = -1;
3106 atexit_chain[last_atexit_chain_slot] = func;
3110 extern void _cleanup (void);
3111 extern void _exit (int) __attribute__ ((__noreturn__));
3118 for ( ; last_atexit_chain_slot-- >= 0; )
3120 (*atexit_chain[last_atexit_chain_slot + 1]) ();
3121 atexit_chain[last_atexit_chain_slot + 1] = 0;
3123 free (atexit_chain);
3136 /* Simple; we just need a wrapper for ON_EXIT. */
3138 atexit (func_ptr func)
3140 return ON_EXIT (func);
3143 #endif /* ON_EXIT */
3144 #endif /* NEED_ATEXIT */
3152 /* Shared exception handling support routines. */
3155 __default_terminate (void)
3160 void (*__terminate_func)(void) __attribute__ ((__noreturn__)) =
3161 __default_terminate;
3163 void __attribute__((__noreturn__))
3166 (*__terminate_func)();
3170 __throw_type_match (void *catch_type, void *throw_type, void *obj)
3173 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3174 catch_type, throw_type);
3176 if (strcmp ((const char *)catch_type, (const char *)throw_type) == 0)
3187 /* Include definitions of EH context and table layout */
3189 #include "eh-common.h"
3190 #ifndef inhibit_libc
3194 /* Allocate and return a new EH context structure. */
3198 new_eh_context (void)
3200 struct eh_full_context {
3201 struct eh_context c;
3203 } *ehfc = (struct eh_full_context *) malloc (sizeof *ehfc);
3208 memset (ehfc, 0, sizeof *ehfc);
3210 ehfc->c.dynamic_handler_chain = (void **) ehfc->top_elt;
3212 /* This should optimize out entirely. This should always be true,
3213 but just in case it ever isn't, don't allow bogus code to be
3216 if ((void*)(&ehfc->c) != (void*)ehfc)
3222 static __gthread_key_t eh_context_key;
3224 /* Destructor for struct eh_context. */
3226 eh_context_free (void *ptr)
3228 __gthread_key_dtor (eh_context_key, ptr);
3234 /* Pointer to function to return EH context. */
3236 static struct eh_context *eh_context_initialize (void);
3237 static struct eh_context *eh_context_static (void);
3239 static struct eh_context *eh_context_specific (void);
3242 static struct eh_context *(*get_eh_context) (void) = &eh_context_initialize;
3244 /* Routine to get EH context.
3245 This one will simply call the function pointer. */
3248 __get_eh_context (void)
3250 return (void *) (*get_eh_context) ();
3253 /* Get and set the language specific info pointer. */
3256 __get_eh_info (void)
3258 struct eh_context *eh = (*get_eh_context) ();
3262 #ifdef DWARF2_UNWIND_INFO
3263 static int dwarf_reg_size_table_initialized = 0;
3264 static char dwarf_reg_size_table[DWARF_FRAME_REGISTERS];
3267 init_reg_size_table (void)
3269 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table);
3270 dwarf_reg_size_table_initialized = 1;
3276 eh_threads_initialize (void)
3278 /* Try to create the key. If it fails, revert to static method,
3279 otherwise start using thread specific EH contexts. */
3280 if (__gthread_key_create (&eh_context_key, &eh_context_free) == 0)
3281 get_eh_context = &eh_context_specific;
3283 get_eh_context = &eh_context_static;
3285 #endif /* no __GTHREADS */
3287 /* Initialize EH context.
3288 This will be called only once, since we change GET_EH_CONTEXT
3289 pointer to another routine. */
3291 static struct eh_context *
3292 eh_context_initialize (void)
3296 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
3297 /* Make sure that get_eh_context does not point to us anymore.
3298 Some systems have dummy thread routines in their libc that
3299 return a success (Solaris 2.6 for example). */
3300 if (__gthread_once (&once, eh_threads_initialize) != 0
3301 || get_eh_context == &eh_context_initialize)
3303 /* Use static version of EH context. */
3304 get_eh_context = &eh_context_static;
3306 #ifdef DWARF2_UNWIND_INFO
3308 static __gthread_once_t once_regsizes = __GTHREAD_ONCE_INIT;
3309 if (__gthread_once (&once_regsizes, init_reg_size_table) != 0
3310 || ! dwarf_reg_size_table_initialized)
3311 init_reg_size_table ();
3315 #else /* no __GTHREADS */
3317 /* Use static version of EH context. */
3318 get_eh_context = &eh_context_static;
3320 #ifdef DWARF2_UNWIND_INFO
3321 init_reg_size_table ();
3324 #endif /* no __GTHREADS */
3326 return (*get_eh_context) ();
3329 /* Return a static EH context. */
3331 static struct eh_context *
3332 eh_context_static (void)
3334 static struct eh_context eh;
3335 static int initialized;
3336 static void *top_elt[2];
3341 memset (&eh, 0, sizeof eh);
3342 eh.dynamic_handler_chain = top_elt;
3348 /* Return a thread specific EH context. */
3350 static struct eh_context *
3351 eh_context_specific (void)
3353 struct eh_context *eh;
3354 eh = (struct eh_context *) __gthread_getspecific (eh_context_key);
3357 eh = new_eh_context ();
3358 if (__gthread_setspecific (eh_context_key, (void *) eh) != 0)
3364 #endif /* __GTHREADS */
3366 /* Support routines for alloc/free during exception handling */
3368 /* __eh_alloc and __eh_free attempt allocation using malloc, but fall back to
3369 the small arena in the eh_context. This is needed because throwing an
3370 out-of-memory exception would fail otherwise. The emergency space is
3371 allocated in blocks of size EH_ALLOC_ALIGN, the
3372 minimum allocation being two blocks. A bitmask indicates which blocks
3373 have been allocated. To indicate the size of an allocation, the bit for
3374 the final block is not set. Hence each allocation is a run of 1s followed
3377 __eh_alloc (size_t size)
3386 struct eh_context *eh = __get_eh_context ();
3387 unsigned blocks = (size + EH_ALLOC_ALIGN - 1) / EH_ALLOC_ALIGN;
3388 unsigned real_mask = eh->alloc_mask | (eh->alloc_mask << 1);
3392 if (blocks > EH_ALLOC_SIZE / EH_ALLOC_ALIGN)
3394 blocks += blocks == 1;
3395 our_mask = (1 << blocks) - 1;
3397 for (ix = EH_ALLOC_SIZE / EH_ALLOC_ALIGN - blocks; ix; ix--)
3398 if (! ((real_mask >> ix) & our_mask))
3400 /* found some space */
3401 p = &eh->alloc_buffer[ix * EH_ALLOC_ALIGN];
3402 eh->alloc_mask |= (our_mask >> 1) << ix;
3410 /* Free the memory for an cp_eh_info and associated exception, given
3411 a pointer to the cp_eh_info. */
3415 struct eh_context *eh = __get_eh_context ();
3417 ptrdiff_t diff = (char *)p - &eh->alloc_buffer[0];
3418 if (diff >= 0 && diff < EH_ALLOC_SIZE)
3420 unsigned mask = eh->alloc_mask;
3421 unsigned bit = 1 << (diff / EH_ALLOC_ALIGN);
3429 eh->alloc_mask = mask;
3435 /* Support routines for setjmp/longjmp exception handling. */
3437 /* Calls to __sjthrow are generated by the compiler when an exception
3438 is raised when using the setjmp/longjmp exception handling codegen
3441 #ifdef DONT_USE_BUILTIN_SETJMP
3442 extern void longjmp (void *, int);
3445 /* Routine to get the head of the current thread's dynamic handler chain
3446 use for exception handling. */
3449 __get_dynamic_handler_chain (void)
3451 struct eh_context *eh = (*get_eh_context) ();
3452 return &eh->dynamic_handler_chain;
3455 /* This is used to throw an exception when the setjmp/longjmp codegen
3456 method is used for exception handling.
3458 We call __terminate if there are no handlers left. Otherwise we run the
3459 cleanup actions off the dynamic cleanup stack, and pop the top of the
3460 dynamic handler chain, and use longjmp to transfer back to the associated
3466 struct eh_context *eh = (*get_eh_context) ();
3467 void ***dhc = &eh->dynamic_handler_chain;
3469 void (*func)(void *, int);
3471 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3472 void ***cleanup = (void***)&(*dhc)[1];
3474 /* If there are any cleanups in the chain, run them now. */
3478 void **buf = (void**)store;
3483 #ifdef DONT_USE_BUILTIN_SETJMP
3484 if (! setjmp (&buf[2]))
3486 if (! __builtin_setjmp (&buf[2]))
3492 func = (void(*)(void*, int))cleanup[0][1];
3493 arg = (void*)cleanup[0][2];
3495 /* Update this before running the cleanup. */
3496 cleanup[0] = (void **)cleanup[0][0];
3509 /* We must call terminate if we try and rethrow an exception, when
3510 there is no exception currently active and when there are no
3512 if (! eh->info || (*dhc)[0] == 0)
3515 /* Find the jmpbuf associated with the top element of the dynamic
3516 handler chain. The jumpbuf starts two words into the buffer. */
3517 jmpbuf = &(*dhc)[2];
3519 /* Then we pop the top element off the dynamic handler chain. */
3520 *dhc = (void**)(*dhc)[0];
3522 /* And then we jump to the handler. */
3524 #ifdef DONT_USE_BUILTIN_SETJMP
3525 longjmp (jmpbuf, 1);
3527 __builtin_longjmp (jmpbuf, 1);
3531 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3532 handler, then pop the handler off the dynamic handler stack, and
3533 then throw. This is used to skip the first handler, and transfer
3534 control to the next handler in the dynamic handler stack. */
3537 __sjpopnthrow (void)
3539 struct eh_context *eh = (*get_eh_context) ();
3540 void ***dhc = &eh->dynamic_handler_chain;
3541 void (*func)(void *, int);
3543 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3544 void ***cleanup = (void***)&(*dhc)[1];
3546 /* If there are any cleanups in the chain, run them now. */
3550 void **buf = (void**)store;
3555 #ifdef DONT_USE_BUILTIN_SETJMP
3556 if (! setjmp (&buf[2]))
3558 if (! __builtin_setjmp (&buf[2]))
3564 func = (void(*)(void*, int))cleanup[0][1];
3565 arg = (void*)cleanup[0][2];
3567 /* Update this before running the cleanup. */
3568 cleanup[0] = (void **)cleanup[0][0];
3581 /* Then we pop the top element off the dynamic handler chain. */
3582 *dhc = (void**)(*dhc)[0];
3587 /* Support code for all exception region-based exception handling. */
3590 __eh_rtime_match (void *rtime)
3593 __eh_matcher matcher;
3596 info = *(__get_eh_info ());
3597 matcher = ((__eh_info *)info)->match_function;
3600 #ifndef inhibit_libc
3601 fprintf (stderr, "Internal Compiler Bug: No runtime type matcher.");
3605 ret = (*matcher) (info, rtime, (void *)0);
3606 return (ret != NULL);
3609 /* This value identifies the place from which an exception is being
3612 #ifdef EH_TABLE_LOOKUP
3618 #ifdef DWARF2_UNWIND_INFO
3620 /* Return the table version of an exception descriptor */
3623 __get_eh_table_version (exception_descriptor *table)
3625 return table->lang.version;
3628 /* Return the originating table language of an exception descriptor */
3631 __get_eh_table_language (exception_descriptor *table)
3633 return table->lang.language;
3636 /* This routine takes a PC and a pointer to the exception region TABLE for
3637 its translation unit, and returns the address of the exception handler
3638 associated with the closest exception table handler entry associated
3639 with that PC, or 0 if there are no table entries the PC fits in.
3641 In the advent of a tie, we have to give the last entry, as it represents
3645 old_find_exception_handler (void *pc, old_exception_table *table)
3652 /* We can't do a binary search because the table isn't guaranteed
3653 to be sorted from function to function. */
3654 for (pos = 0; table[pos].start_region != (void *) -1; ++pos)
3656 if (table[pos].start_region <= pc && table[pos].end_region > pc)
3658 /* This can apply. Make sure it is at least as small as
3659 the previous best. */
3660 if (best == -1 || (table[pos].end_region <= table[best].end_region
3661 && table[pos].start_region >= table[best].start_region))
3664 /* But it is sorted by starting PC within a function. */
3665 else if (best >= 0 && table[pos].start_region > pc)
3669 return table[best].exception_handler;
3675 /* find_exception_handler finds the correct handler, if there is one, to
3676 handle an exception.
3677 returns a pointer to the handler which controlled should be transferred
3678 to, or NULL if there is nothing left.
3680 PC - pc where the exception originates. If this is a rethrow,
3681 then this starts out as a pointer to the exception table
3682 entry we wish to rethrow out of.
3683 TABLE - exception table for the current module.
3684 EH_INFO - eh info pointer for this exception.
3685 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3686 CLEANUP - returned flag indicating whether this is a cleanup handler.
3689 find_exception_handler (void *pc, exception_descriptor *table,
3690 __eh_info *eh_info, int rethrow, int *cleanup)
3693 void *retval = NULL;
3698 /* The new model assumed the table is sorted inner-most out so the
3699 first region we find which matches is the correct one */
3701 exception_table *tab = &(table->table[0]);
3703 /* Subtract 1 from the PC to avoid hitting the next region */
3706 /* pc is actually the region table entry to rethrow out of */
3707 pos = ((exception_table *) pc) - tab;
3708 pc = ((exception_table *) pc)->end_region - 1;
3710 /* The label is always on the LAST handler entry for a region,
3711 so we know the next entry is a different region, even if the
3712 addresses are the same. Make sure its not end of table tho. */
3713 if (tab[pos].start_region != (void *) -1)
3719 /* We can't do a binary search because the table is in inner-most
3720 to outermost address ranges within functions */
3721 for ( ; tab[pos].start_region != (void *) -1; pos++)
3723 if (tab[pos].start_region <= pc && tab[pos].end_region > pc)
3725 if (tab[pos].match_info)
3727 __eh_matcher matcher = eh_info->match_function;
3728 /* match info but no matcher is NOT a match */
3731 void *ret = (*matcher)((void *) eh_info,
3732 tab[pos].match_info, table);
3736 retval = tab[pos].exception_handler;
3745 retval = tab[pos].exception_handler;
3752 #endif /* DWARF2_UNWIND_INFO */
3753 #endif /* EH_TABLE_LOOKUP */
3755 #ifdef DWARF2_UNWIND_INFO
3756 /* Support code for exception handling using static unwind information. */
3760 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3761 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3762 avoid a warning about casting between int and pointer of different
3765 typedef int ptr_type __attribute__ ((mode (pointer)));
3767 #ifdef INCOMING_REGNO
3768 /* Is the saved value for register REG in frame UDATA stored in a register
3769 window in the previous frame? */
3771 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3772 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3773 compiled functions won't work with the frame-unwind stuff here.
3774 Perhaps the entireity of in_reg_window should be conditional on having
3775 seen a DW_CFA_GNU_window_save? */
3776 #define target_flags 0
3779 in_reg_window (int reg, frame_state *udata)
3781 if (udata->saved[reg] == REG_SAVED_REG)
3782 return INCOMING_REGNO (reg) == reg;
3783 if (udata->saved[reg] != REG_SAVED_OFFSET)
3786 #ifdef STACK_GROWS_DOWNWARD
3787 return udata->reg_or_offset[reg] > 0;
3789 return udata->reg_or_offset[reg] < 0;
3794 in_reg_window (int reg __attribute__ ((__unused__)),
3795 frame_state *udata __attribute__ ((__unused__)))
3799 #endif /* INCOMING_REGNO */
3801 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3802 frame called by UDATA or 0. */
3805 get_reg_addr (unsigned reg, frame_state *udata, frame_state *sub_udata)
3807 while (udata->saved[reg] == REG_SAVED_REG)
3809 reg = udata->reg_or_offset[reg];
3810 if (in_reg_window (reg, udata))
3816 if (udata->saved[reg] == REG_SAVED_OFFSET)
3817 return (word_type *)(udata->cfa + udata->reg_or_offset[reg]);
3822 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3823 frame called by UDATA or 0. */
3825 static inline void *
3826 get_reg (unsigned reg, frame_state *udata, frame_state *sub_udata)
3828 return (void *)(ptr_type) *get_reg_addr (reg, udata, sub_udata);
3831 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3834 put_reg (unsigned reg, void *val, frame_state *udata)
3836 *get_reg_addr (reg, udata, NULL) = (word_type)(ptr_type) val;
3839 /* Copy the saved value for register REG from frame UDATA to frame
3840 TARGET_UDATA. Unlike the previous two functions, this can handle
3841 registers that are not one word large. */
3844 copy_reg (unsigned reg, frame_state *udata, frame_state *target_udata)
3846 word_type *preg = get_reg_addr (reg, udata, NULL);
3847 word_type *ptreg = get_reg_addr (reg, target_udata, NULL);
3849 memcpy (ptreg, preg, dwarf_reg_size_table [reg]);
3852 /* Retrieve the return address for frame UDATA. */
3854 static inline void *
3855 get_return_addr (frame_state *udata, frame_state *sub_udata)
3857 return __builtin_extract_return_addr
3858 (get_reg (udata->retaddr_column, udata, sub_udata));
3861 /* Overwrite the return address for frame UDATA with VAL. */
3864 put_return_addr (void *val, frame_state *udata)
3866 val = __builtin_frob_return_addr (val);
3867 put_reg (udata->retaddr_column, val, udata);
3870 /* Given the current frame UDATA and its return address PC, return the
3871 information about the calling frame in CALLER_UDATA. */
3874 next_stack_level (void *pc, frame_state *udata, frame_state *caller_udata)
3876 caller_udata = __frame_state_for (pc, caller_udata);
3880 /* Now go back to our caller's stack frame. If our caller's CFA register
3881 was saved in our stack frame, restore it; otherwise, assume the CFA
3882 register is SP and restore it to our CFA value. */
3883 if (udata->saved[caller_udata->cfa_reg])
3884 caller_udata->cfa = get_reg (caller_udata->cfa_reg, udata, 0);
3886 caller_udata->cfa = udata->cfa;
3887 if (caller_udata->indirect)
3888 caller_udata->cfa = * (void **) ((unsigned char *)caller_udata->cfa
3889 + caller_udata->base_offset);
3890 caller_udata->cfa += caller_udata->cfa_offset;
3892 return caller_udata;
3895 /* Hook to call before __terminate if only cleanup handlers remain. */
3897 __unwinding_cleanup (void)
3901 /* throw_helper performs some of the common grunt work for a throw. This
3902 routine is called by throw and rethrows. This is pretty much split
3903 out from the old __throw routine. An addition has been added which allows
3904 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3905 but cleanups remaining. This allows a debugger to examine the state
3906 at which the throw was executed, before any cleanups, rather than
3907 at the terminate point after the stack has been unwound.
3909 EH is the current eh_context structure.
3910 PC is the address of the call to __throw.
3911 MY_UDATA is the unwind information for __throw.
3912 OFFSET_P is where we return the SP adjustment offset. */
3915 throw_helper (struct eh_context *eh, void *pc, frame_state *my_udata,
3918 frame_state ustruct2, *udata = &ustruct2;
3919 frame_state ustruct;
3920 frame_state *sub_udata = &ustruct;
3921 void *saved_pc = pc;
3923 void *handler_p = 0;
3925 frame_state saved_ustruct;
3928 int only_cleanup = 0;
3930 int saved_state = 0;
3932 __eh_info *eh_info = (__eh_info *)eh->info;
3934 /* Do we find a handler based on a re-throw PC? */
3935 if (eh->table_index != (void *) 0)
3938 memcpy (udata, my_udata, sizeof (*udata));
3940 handler = (void *) 0;
3943 frame_state *p = udata;
3944 udata = next_stack_level (pc, udata, sub_udata);
3947 /* If we couldn't find the next frame, we lose. */
3951 if (udata->eh_ptr == NULL)
3954 new_eh_model = (((exception_descriptor *)(udata->eh_ptr))->
3955 runtime_id_field == NEW_EH_RUNTIME);
3960 handler = find_exception_handler (eh->table_index, udata->eh_ptr,
3961 eh_info, 1, &cleanup);
3962 eh->table_index = (void *)0;
3966 handler = find_exception_handler (pc, udata->eh_ptr, eh_info,
3969 handler = old_find_exception_handler (pc, udata->eh_ptr);
3971 /* If we found one, we can stop searching, if its not a cleanup.
3972 for cleanups, we save the state, and keep looking. This allows
3973 us to call a debug hook if there are nothing but cleanups left. */
3980 saved_ustruct = *udata;
3981 handler_p = handler;
3994 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3995 hitting the beginning of the next region. */
3996 pc = get_return_addr (udata, sub_udata) - 1;
4001 udata = &saved_ustruct;
4002 handler = handler_p;
4005 __unwinding_cleanup ();
4008 /* If we haven't found a handler by now, this is an unhandled
4013 eh->handler_label = handler;
4015 args_size = udata->args_size;
4018 /* We found a handler in the throw context, no need to unwind. */
4024 /* Unwind all the frames between this one and the handler by copying
4025 their saved register values into our register save slots. */
4027 /* Remember the PC where we found the handler. */
4028 void *handler_pc = pc;
4030 /* Start from the throw context again. */
4032 memcpy (udata, my_udata, sizeof (*udata));
4034 while (pc != handler_pc)
4036 frame_state *p = udata;
4037 udata = next_stack_level (pc, udata, sub_udata);
4040 for (i = 0; i < DWARF_FRAME_REGISTERS; ++i)
4041 if (i != udata->retaddr_column && udata->saved[i])
4043 /* If you modify the saved value of the return address
4044 register on the SPARC, you modify the return address for
4045 your caller's frame. Don't do that here, as it will
4046 confuse get_return_addr. */
4047 if (in_reg_window (i, udata)
4048 && udata->saved[udata->retaddr_column] == REG_SAVED_REG
4049 && udata->reg_or_offset[udata->retaddr_column] == i)
4051 copy_reg (i, udata, my_udata);
4054 pc = get_return_addr (udata, sub_udata) - 1;
4057 /* But we do need to update the saved return address register from
4058 the last frame we unwind, or the handler frame will have the wrong
4060 if (udata->saved[udata->retaddr_column] == REG_SAVED_REG)
4062 i = udata->reg_or_offset[udata->retaddr_column];
4063 if (in_reg_window (i, udata))
4064 copy_reg (i, udata, my_udata);
4067 /* udata now refers to the frame called by the handler frame. */
4069 /* We adjust SP by the difference between __throw's CFA and the CFA for
4070 the frame called by the handler frame, because those CFAs correspond
4071 to the SP values at the two call sites. We need to further adjust by
4072 the args_size of the handler frame itself to get the handler frame's
4073 SP from before the args were pushed for that call. */
4074 #ifdef STACK_GROWS_DOWNWARD
4075 *offset_p = udata->cfa - my_udata->cfa + args_size;
4077 *offset_p = my_udata->cfa - udata->cfa - args_size;
4084 /* We first search for an exception handler, and if we don't find
4085 it, we call __terminate on the current stack frame so that we may
4086 use the debugger to walk the stack and understand why no handler
4089 If we find one, then we unwind the frames down to the one that
4090 has the handler and transfer control into the handler. */
4092 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
4097 struct eh_context *eh = (*get_eh_context) ();
4101 /* XXX maybe make my_ustruct static so we don't have to look it up for
4103 frame_state my_ustruct, *my_udata = &my_ustruct;
4105 /* This is required for C++ semantics. We must call terminate if we
4106 try and rethrow an exception, when there is no exception currently
4111 /* Start at our stack frame. */
4113 my_udata = __frame_state_for (&&label, my_udata);
4117 /* We need to get the value from the CFA register. */
4118 my_udata->cfa = __builtin_dwarf_cfa ();
4120 /* Do any necessary initialization to access arbitrary stack frames.
4121 On the SPARC, this means flushing the register windows. */
4122 __builtin_unwind_init ();
4124 /* Now reset pc to the right throw point. */
4125 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
4127 handler = throw_helper (eh, pc, my_udata, &offset);
4131 __builtin_eh_return ((void *)eh, offset, handler);
4133 /* Epilogue: restore the handler frame's register values and return
4137 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
4140 __rethrow (void *index)
4142 struct eh_context *eh = (*get_eh_context) ();
4146 /* XXX maybe make my_ustruct static so we don't have to look it up for
4148 frame_state my_ustruct, *my_udata = &my_ustruct;
4150 /* This is required for C++ semantics. We must call terminate if we
4151 try and rethrow an exception, when there is no exception currently
4156 /* This is the table index we want to rethrow from. The value of
4157 the END_REGION label is used for the PC of the throw, and the
4158 search begins with the next table entry. */
4159 eh->table_index = index;
4161 /* Start at our stack frame. */
4163 my_udata = __frame_state_for (&&label, my_udata);
4167 /* We need to get the value from the CFA register. */
4168 my_udata->cfa = __builtin_dwarf_cfa ();
4170 /* Do any necessary initialization to access arbitrary stack frames.
4171 On the SPARC, this means flushing the register windows. */
4172 __builtin_unwind_init ();
4174 /* Now reset pc to the right throw point. */
4175 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
4177 handler = throw_helper (eh, pc, my_udata, &offset);
4181 __builtin_eh_return ((void *)eh, offset, handler);
4183 /* Epilogue: restore the handler frame's register values and return
4186 #endif /* DWARF2_UNWIND_INFO */
4188 #ifdef IA64_UNWIND_INFO
4191 /* Return handler to which we want to transfer control, NULL if we don't
4192 intend to handle this exception here. */
4194 __ia64_personality_v1 (void *pc, old_exception_table *table)
4201 for (pos = 0; table[pos].start_region != (void *) -1; ++pos)
4203 if (table[pos].start_region <= pc && table[pos].end_region > pc)
4205 /* This can apply. Make sure it is at least as small as
4206 the previous best. */
4207 if (best == -1 || (table[pos].end_region <= table[best].end_region
4208 && table[pos].start_region >= table[best].start_region))
4211 /* It is sorted by starting PC within a function. */
4212 else if (best >= 0 && table[pos].start_region > pc)
4216 return table[best].exception_handler;
4222 ia64_throw_helper (ia64_frame_state *throw_frame, ia64_frame_state *caller,
4223 void *throw_bsp, void *throw_sp)
4225 void *throw_pc = __builtin_return_address (0);
4226 unwind_info_ptr *info;
4227 void *pc, *handler = NULL;
4232 __builtin_ia64_flushrs (); /* Make the local register stacks available. */
4234 /* Start at our stack frame, get our state. */
4235 __build_ia64_frame_state (throw_pc, throw_frame, throw_bsp, throw_sp,
4238 /* Now we have to find the proper frame for pc, and see if there
4239 is a handler for it. if not, we keep going back frames until
4240 we do find one. Otherwise we call uncaught (). */
4243 memcpy (caller, throw_frame, sizeof (*caller));
4246 void *(*personality) ();
4250 /* We only care about the RP right now, so we dont need to keep
4251 any other information about a call frame right now. */
4252 pc = __get_real_reg_value (&caller->rp) - 1;
4253 bsp = __calc_caller_bsp ((long)__get_real_reg_value (&caller->pfs),
4255 info = __build_ia64_frame_state (pc, caller, bsp, caller->my_psp,
4258 /* If we couldn't find the next frame, we lose. */
4262 personality = __get_personality (info);
4263 /* TODO Haven't figured out how to actually load the personality address
4264 yet, so just always default to the one we expect for now. */
4265 if (personality != 0)
4266 personality = __ia64_personality_v1;
4267 eh_table = __get_except_table (info);
4268 /* If there is no personality routine, we'll keep unwinding. */
4270 /* Pass a segment relative PC address to the personality routine,
4271 because the unwind_info section uses segrel relocs. */
4272 handler = personality (pc - pc_base, eh_table);
4278 /* Handler is a segment relative address, so we must adjust it here. */
4279 handler += (long) pc_base;
4281 /* If we found a handler, we need to unwind the stack to that point.
4282 We do this by copying saved values from previous frames into the
4283 save slot for the throw_frame saved slots. when __throw returns,
4284 it'll pickup the correct values. */
4286 /* Start with where __throw saved things, and copy each saved register
4287 of each previous frame until we get to the one before we're
4288 throwing back to. */
4289 memcpy (caller, throw_frame, sizeof (*caller));
4290 for ( ; frame_count > 0; frame_count--)
4292 pc = __get_real_reg_value (&caller->rp) - 1;
4293 bsp = __calc_caller_bsp ((long)__get_real_reg_value (&caller->pfs),
4295 __build_ia64_frame_state (pc, caller, bsp, caller->my_psp, &pc_base);
4296 /* Any regs that were saved can be put in the throw frame now. */
4297 /* We don't want to copy any saved register from the
4298 target destination, but we do want to load up it's frame. */
4299 if (frame_count > 1)
4300 __copy_saved_reg_state (throw_frame, caller);
4303 /* Set return address of the throw frame to the handler. */
4304 __set_real_reg_value (&throw_frame->rp, handler);
4306 /* TODO, do we need to do anything to make the values we wrote 'stick'? */
4307 /* DO we need to go through the whole loadrs seqeunce? */
4314 register void *stack_pointer __asm__("r12");
4315 struct eh_context *eh = (*get_eh_context) ();
4316 ia64_frame_state my_frame;
4317 ia64_frame_state originator; /* For the context handler is in. */
4318 void *bsp, *tmp_bsp;
4321 /* This is required for C++ semantics. We must call terminate if we
4322 try and rethrow an exception, when there is no exception currently
4327 __builtin_unwind_init ();
4329 /* We have to call another routine to actually process the frame
4330 information, which will force all of __throw's local registers into
4333 /* Get the value of ar.bsp while we're here. */
4335 bsp = __builtin_ia64_bsp ();
4336 ia64_throw_helper (&my_frame, &originator, bsp, stack_pointer);
4338 /* Now we have to fudge the bsp by the amount in our (__throw)
4339 frame marker, since the return is going to adjust it by that much. */
4341 tmp_bsp = __calc_caller_bsp ((long)__get_real_reg_value (&my_frame.pfs),
4343 offset = (char *)my_frame.my_bsp - (char *)tmp_bsp;
4344 tmp_bsp = (char *)originator.my_bsp + offset;
4346 __builtin_eh_return (tmp_bsp, offset, originator.my_sp);
4348 /* The return address was already set by throw_helper. */
4351 #endif /* IA64_UNWIND_INFO */