1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 92, 93, 94, 95, 96, 1997 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 /* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License. */
29 /* It is incorrect to include config.h here, because this file is being
30 compiled for the target, and hence definitions concerning only the host
40 /* Don't use `fancy_abort' here even if config.h says to use it. */
45 #if (SUPPORTS_WEAK == 1) && (defined (ASM_OUTPUT_DEF) || defined (ASM_OUTPUT_WEAK_ALIAS))
49 /* In a cross-compilation situation, default to inhibiting compilation
50 of routines that use libc. */
56 /* Permit the tm.h file to select the endianness to use just for this
57 file. This is used when the endianness is determined when the
60 #ifndef LIBGCC2_WORDS_BIG_ENDIAN
61 #define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
64 /* In the first part of this file, we are interfacing to calls generated
65 by the compiler itself. These calls pass values into these routines
66 which have very specific modes (rather than very specific types), and
67 these compiler-generated calls also expect any return values to have
68 very specific modes (rather than very specific types). Thus, we need
69 to avoid using regular C language type names in this part of the file
70 because the sizes for those types can be configured to be anything.
71 Instead we use the following special type names. */
73 typedef unsigned int UQItype __attribute__ ((mode (QI)));
74 typedef int SItype __attribute__ ((mode (SI)));
75 typedef unsigned int USItype __attribute__ ((mode (SI)));
76 typedef int DItype __attribute__ ((mode (DI)));
77 typedef unsigned int UDItype __attribute__ ((mode (DI)));
79 typedef float SFtype __attribute__ ((mode (SF)));
80 typedef float DFtype __attribute__ ((mode (DF)));
82 #if LONG_DOUBLE_TYPE_SIZE == 96
83 typedef float XFtype __attribute__ ((mode (XF)));
85 #if LONG_DOUBLE_TYPE_SIZE == 128
86 typedef float TFtype __attribute__ ((mode (TF)));
89 typedef int word_type __attribute__ ((mode (__word__)));
91 /* Make sure that we don't accidentally use any normal C language built-in
92 type names in the first part of this file. Instead we want to use *only*
93 the type names defined above. The following macro definitions insure
94 that if we *do* accidentally use some normal C language built-in type name,
95 we will get a syntax error. */
97 #define char bogus_type
98 #define short bogus_type
99 #define int bogus_type
100 #define long bogus_type
101 #define unsigned bogus_type
102 #define float bogus_type
103 #define double bogus_type
105 #define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT)
107 /* DIstructs are pairs of SItype values in the order determined by
108 LIBGCC2_WORDS_BIG_ENDIAN. */
110 #if LIBGCC2_WORDS_BIG_ENDIAN
111 struct DIstruct {SItype high, low;};
113 struct DIstruct {SItype low, high;};
116 /* We need this union to unpack/pack DImode values, since we don't have
117 any arithmetic yet. Incoming DImode parameters are stored into the
118 `ll' field, and the unpacked result is read from the struct `s'. */
126 #if (defined (L_udivmoddi4) || defined (L_muldi3) || defined (L_udiv_w_sdiv)\
127 || defined (L_divdi3) || defined (L_udivdi3) \
128 || defined (L_moddi3) || defined (L_umoddi3))
130 #include "longlong.h"
132 #endif /* udiv or mul */
134 extern DItype __fixunssfdi (SFtype a);
135 extern DItype __fixunsdfdi (DFtype a);
136 #if LONG_DOUBLE_TYPE_SIZE == 96
137 extern DItype __fixunsxfdi (XFtype a);
139 #if LONG_DOUBLE_TYPE_SIZE == 128
140 extern DItype __fixunstfdi (TFtype a);
143 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
144 #if defined (L_divdi3) || defined (L_moddi3)
156 w.s.high = -uu.s.high - ((USItype) w.s.low > 0);
162 /* Unless shift functions are defined whith full ANSI prototypes,
163 parameter b will be promoted to int if word_type is smaller than an int. */
166 __lshrdi3 (DItype u, word_type b)
177 bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
181 w.s.low = (USItype)uu.s.high >> -bm;
185 USItype carries = (USItype)uu.s.high << bm;
186 w.s.high = (USItype)uu.s.high >> b;
187 w.s.low = ((USItype)uu.s.low >> b) | carries;
196 __ashldi3 (DItype u, word_type b)
207 bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
211 w.s.high = (USItype)uu.s.low << -bm;
215 USItype carries = (USItype)uu.s.low >> bm;
216 w.s.low = (USItype)uu.s.low << b;
217 w.s.high = ((USItype)uu.s.high << b) | carries;
226 __ashrdi3 (DItype u, word_type b)
237 bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
240 /* w.s.high = 1..1 or 0..0 */
241 w.s.high = uu.s.high >> (sizeof (SItype) * BITS_PER_UNIT - 1);
242 w.s.low = uu.s.high >> -bm;
246 USItype carries = (USItype)uu.s.high << bm;
247 w.s.high = uu.s.high >> b;
248 w.s.low = ((USItype)uu.s.low >> b) | carries;
262 w.s.low = ffs (uu.s.low);
265 w.s.low = ffs (uu.s.high);
268 w.s.low += BITS_PER_UNIT * sizeof (SItype);
277 __muldi3 (DItype u, DItype v)
285 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
286 w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
287 + (USItype) uu.s.high * (USItype) vv.s.low);
294 #if defined (sdiv_qrnnd)
296 __udiv_w_sdiv (USItype *rp, USItype a1, USItype a0, USItype d)
303 if (a1 < d - a1 - (a0 >> (SI_TYPE_SIZE - 1)))
305 /* dividend, divisor, and quotient are nonnegative */
306 sdiv_qrnnd (q, r, a1, a0, d);
310 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
311 sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (SI_TYPE_SIZE - 1));
312 /* Divide (c1*2^32 + c0) by d */
313 sdiv_qrnnd (q, r, c1, c0, d);
314 /* Add 2^31 to quotient */
315 q += (USItype) 1 << (SI_TYPE_SIZE - 1);
320 b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
321 c1 = a1 >> 1; /* A/2 */
322 c0 = (a1 << (SI_TYPE_SIZE - 1)) + (a0 >> 1);
324 if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
326 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
328 r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
345 else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
348 c0 = ~c0; /* logical NOT */
350 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
352 q = ~q; /* (A/2)/b1 */
355 r = 2*r + (a0 & 1); /* A/(2*b1) */
373 else /* Implies c1 = b1 */
374 { /* Hence a1 = d - 1 = 2*b1 - 1 */
392 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
394 __udiv_w_sdiv (USItype *rp, USItype a1, USItype a0, USItype d)
399 #if (defined (L_udivdi3) || defined (L_divdi3) || \
400 defined (L_umoddi3) || defined (L_moddi3))
405 static const UQItype __clz_tab[] =
407 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
408 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
409 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
410 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
411 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
412 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
413 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
414 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
417 #if (defined (L_udivdi3) || defined (L_divdi3) || \
418 defined (L_umoddi3) || defined (L_moddi3))
422 __udivmoddi4 (UDItype n, UDItype d, UDItype *rp)
427 USItype d0, d1, n0, n1, n2;
439 #if !UDIV_NEEDS_NORMALIZATION
446 udiv_qrnnd (q0, n0, n1, n0, d0);
449 /* Remainder in n0. */
456 d0 = 1 / d0; /* Divide intentionally by zero. */
458 udiv_qrnnd (q1, n1, 0, n1, d0);
459 udiv_qrnnd (q0, n0, n1, n0, d0);
461 /* Remainder in n0. */
472 #else /* UDIV_NEEDS_NORMALIZATION */
480 count_leading_zeros (bm, d0);
484 /* Normalize, i.e. make the most significant bit of the
488 n1 = (n1 << bm) | (n0 >> (SI_TYPE_SIZE - bm));
492 udiv_qrnnd (q0, n0, n1, n0, d0);
495 /* Remainder in n0 >> bm. */
502 d0 = 1 / d0; /* Divide intentionally by zero. */
504 count_leading_zeros (bm, d0);
508 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
509 conclude (the most significant bit of n1 is set) /\ (the
510 leading quotient digit q1 = 1).
512 This special case is necessary, not an optimization.
513 (Shifts counts of SI_TYPE_SIZE are undefined.) */
522 b = SI_TYPE_SIZE - bm;
526 n1 = (n1 << bm) | (n0 >> b);
529 udiv_qrnnd (q1, n1, n2, n1, d0);
534 udiv_qrnnd (q0, n0, n1, n0, d0);
536 /* Remainder in n0 >> bm. */
546 #endif /* UDIV_NEEDS_NORMALIZATION */
557 /* Remainder in n1n0. */
569 count_leading_zeros (bm, d1);
572 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
573 conclude (the most significant bit of n1 is set) /\ (the
574 quotient digit q0 = 0 or 1).
576 This special case is necessary, not an optimization. */
578 /* The condition on the next line takes advantage of that
579 n1 >= d1 (true due to program flow). */
580 if (n1 > d1 || n0 >= d0)
583 sub_ddmmss (n1, n0, n1, n0, d1, d0);
602 b = SI_TYPE_SIZE - bm;
604 d1 = (d1 << bm) | (d0 >> b);
607 n1 = (n1 << bm) | (n0 >> b);
610 udiv_qrnnd (q0, n1, n2, n1, d1);
611 umul_ppmm (m1, m0, q0, d0);
613 if (m1 > n1 || (m1 == n1 && m0 > n0))
616 sub_ddmmss (m1, m0, m1, m0, d1, d0);
621 /* Remainder in (n1n0 - m1m0) >> bm. */
624 sub_ddmmss (n1, n0, n1, n0, m1, m0);
625 rr.s.low = (n1 << b) | (n0 >> bm);
626 rr.s.high = n1 >> bm;
640 UDItype __udivmoddi4 ();
643 __divdi3 (DItype u, DItype v)
654 uu.ll = __negdi2 (uu.ll);
657 vv.ll = __negdi2 (vv.ll);
659 w = __udivmoddi4 (uu.ll, vv.ll, (UDItype *) 0);
668 UDItype __udivmoddi4 ();
670 __moddi3 (DItype u, DItype v)
681 uu.ll = __negdi2 (uu.ll);
683 vv.ll = __negdi2 (vv.ll);
685 (void) __udivmoddi4 (uu.ll, vv.ll, &w);
694 UDItype __udivmoddi4 ();
696 __umoddi3 (UDItype u, UDItype v)
700 (void) __udivmoddi4 (u, v, &w);
707 UDItype __udivmoddi4 ();
709 __udivdi3 (UDItype n, UDItype d)
711 return __udivmoddi4 (n, d, (UDItype *) 0);
717 __cmpdi2 (DItype a, DItype b)
721 au.ll = a, bu.ll = b;
723 if (au.s.high < bu.s.high)
725 else if (au.s.high > bu.s.high)
727 if ((USItype) au.s.low < (USItype) bu.s.low)
729 else if ((USItype) au.s.low > (USItype) bu.s.low)
737 __ucmpdi2 (DItype a, DItype b)
741 au.ll = a, bu.ll = b;
743 if ((USItype) au.s.high < (USItype) bu.s.high)
745 else if ((USItype) au.s.high > (USItype) bu.s.high)
747 if ((USItype) au.s.low < (USItype) bu.s.low)
749 else if ((USItype) au.s.low > (USItype) bu.s.low)
755 #if defined(L_fixunstfdi) && (LONG_DOUBLE_TYPE_SIZE == 128)
756 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
757 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
760 __fixunstfdi (TFtype a)
768 /* Compute high word of result, as a flonum. */
769 b = (a / HIGH_WORD_COEFF);
770 /* Convert that to fixed (but not to DItype!),
771 and shift it into the high word. */
774 /* Remove high part from the TFtype, leaving the low part as flonum. */
776 /* Convert that to fixed (but not to DItype!) and add it in.
777 Sometimes A comes out negative. This is significant, since
778 A has more bits than a long int does. */
780 v -= (USItype) (- a);
787 #if defined(L_fixtfdi) && (LONG_DOUBLE_TYPE_SIZE == 128)
792 return - __fixunstfdi (-a);
793 return __fixunstfdi (a);
797 #if defined(L_fixunsxfdi) && (LONG_DOUBLE_TYPE_SIZE == 96)
798 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
799 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
802 __fixunsxfdi (XFtype a)
810 /* Compute high word of result, as a flonum. */
811 b = (a / HIGH_WORD_COEFF);
812 /* Convert that to fixed (but not to DItype!),
813 and shift it into the high word. */
816 /* Remove high part from the XFtype, leaving the low part as flonum. */
818 /* Convert that to fixed (but not to DItype!) and add it in.
819 Sometimes A comes out negative. This is significant, since
820 A has more bits than a long int does. */
822 v -= (USItype) (- a);
829 #if defined(L_fixxfdi) && (LONG_DOUBLE_TYPE_SIZE == 96)
834 return - __fixunsxfdi (-a);
835 return __fixunsxfdi (a);
840 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
841 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
844 __fixunsdfdi (DFtype a)
852 /* Compute high word of result, as a flonum. */
853 b = (a / HIGH_WORD_COEFF);
854 /* Convert that to fixed (but not to DItype!),
855 and shift it into the high word. */
858 /* Remove high part from the DFtype, leaving the low part as flonum. */
860 /* Convert that to fixed (but not to DItype!) and add it in.
861 Sometimes A comes out negative. This is significant, since
862 A has more bits than a long int does. */
864 v -= (USItype) (- a);
876 return - __fixunsdfdi (-a);
877 return __fixunsdfdi (a);
882 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
883 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
886 __fixunssfdi (SFtype original_a)
888 /* Convert the SFtype to a DFtype, because that is surely not going
889 to lose any bits. Some day someone else can write a faster version
890 that avoids converting to DFtype, and verify it really works right. */
891 DFtype a = original_a;
898 /* Compute high word of result, as a flonum. */
899 b = (a / HIGH_WORD_COEFF);
900 /* Convert that to fixed (but not to DItype!),
901 and shift it into the high word. */
904 /* Remove high part from the DFtype, leaving the low part as flonum. */
906 /* Convert that to fixed (but not to DItype!) and add it in.
907 Sometimes A comes out negative. This is significant, since
908 A has more bits than a long int does. */
910 v -= (USItype) (- a);
922 return - __fixunssfdi (-a);
923 return __fixunssfdi (a);
927 #if defined(L_floatdixf) && (LONG_DOUBLE_TYPE_SIZE == 96)
928 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
929 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
930 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
933 __floatdixf (DItype u)
941 d = (USItype) (u >> WORD_SIZE);
942 d *= HIGH_HALFWORD_COEFF;
943 d *= HIGH_HALFWORD_COEFF;
944 d += (USItype) (u & (HIGH_WORD_COEFF - 1));
946 return (negate ? -d : d);
950 #if defined(L_floatditf) && (LONG_DOUBLE_TYPE_SIZE == 128)
951 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
952 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
953 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
956 __floatditf (DItype u)
964 d = (USItype) (u >> WORD_SIZE);
965 d *= HIGH_HALFWORD_COEFF;
966 d *= HIGH_HALFWORD_COEFF;
967 d += (USItype) (u & (HIGH_WORD_COEFF - 1));
969 return (negate ? -d : d);
974 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
975 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
976 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
979 __floatdidf (DItype u)
987 d = (USItype) (u >> WORD_SIZE);
988 d *= HIGH_HALFWORD_COEFF;
989 d *= HIGH_HALFWORD_COEFF;
990 d += (USItype) (u & (HIGH_WORD_COEFF - 1));
992 return (negate ? -d : d);
997 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
998 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
999 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
1000 #define DI_SIZE (sizeof (DItype) * BITS_PER_UNIT)
1002 /* Define codes for all the float formats that we know of. Note
1003 that this is copied from real.h. */
1005 #define UNKNOWN_FLOAT_FORMAT 0
1006 #define IEEE_FLOAT_FORMAT 1
1007 #define VAX_FLOAT_FORMAT 2
1008 #define IBM_FLOAT_FORMAT 3
1010 /* Default to IEEE float if not specified. Nearly all machines use it. */
1011 #ifndef HOST_FLOAT_FORMAT
1012 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
1015 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1020 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
1025 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
1031 __floatdisf (DItype u)
1033 /* Do the calculation in DFmode
1034 so that we don't lose any of the precision of the high word
1035 while multiplying it. */
1042 /* Protect against double-rounding error.
1043 Represent any low-order bits, that might be truncated in DFmode,
1044 by a bit that won't be lost. The bit can go in anywhere below the
1045 rounding position of the SFmode. A fixed mask and bit position
1046 handles all usual configurations. It doesn't handle the case
1047 of 128-bit DImode, however. */
1048 if (DF_SIZE < DI_SIZE
1049 && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE))
1051 #define REP_BIT ((USItype) 1 << (DI_SIZE - DF_SIZE))
1052 if (u >= ((UDItype) 1 << DF_SIZE))
1054 if ((USItype) u & (REP_BIT - 1))
1058 f = (USItype) (u >> WORD_SIZE);
1059 f *= HIGH_HALFWORD_COEFF;
1060 f *= HIGH_HALFWORD_COEFF;
1061 f += (USItype) (u & (HIGH_WORD_COEFF - 1));
1063 return (SFtype) (negate ? -f : f);
1067 #if defined(L_fixunsxfsi) && LONG_DOUBLE_TYPE_SIZE == 96
1068 /* Reenable the normal types, in case limits.h needs them. */
1081 __fixunsxfsi (XFtype a)
1083 if (a >= - (DFtype) LONG_MIN)
1084 return (SItype) (a + LONG_MIN) - LONG_MIN;
1090 /* Reenable the normal types, in case limits.h needs them. */
1103 __fixunsdfsi (DFtype a)
1105 if (a >= - (DFtype) LONG_MIN)
1106 return (SItype) (a + LONG_MIN) - LONG_MIN;
1112 /* Reenable the normal types, in case limits.h needs them. */
1125 __fixunssfsi (SFtype a)
1127 if (a >= - (SFtype) LONG_MIN)
1128 return (SItype) (a + LONG_MIN) - LONG_MIN;
1133 /* From here on down, the routines use normal data types. */
1135 #define SItype bogus_type
1136 #define USItype bogus_type
1137 #define DItype bogus_type
1138 #define UDItype bogus_type
1139 #define SFtype bogus_type
1140 #define DFtype bogus_type
1152 /* Like bcmp except the sign is meaningful.
1153 Result is negative if S1 is less than S2,
1154 positive if S1 is greater, 0 if S1 and S2 are equal. */
1157 __gcc_bcmp (unsigned char *s1, unsigned char *s2, size_t size)
1161 unsigned char c1 = *s1++, c2 = *s2++;
1178 #if defined(__svr4__) || defined(__alliant__)
1182 /* The Alliant needs the added underscore. */
1183 asm (".globl __builtin_saveregs");
1184 asm ("__builtin_saveregs:");
1185 asm (".globl ___builtin_saveregs");
1186 asm ("___builtin_saveregs:");
1188 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1189 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1190 area and also for a new va_list
1192 /* Save all argument registers in the arg reg save area. The
1193 arg reg save area must have the following layout (according
1205 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1206 asm (" fst.q %f12,16(%sp)");
1208 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1209 asm (" st.l %r17,36(%sp)");
1210 asm (" st.l %r18,40(%sp)");
1211 asm (" st.l %r19,44(%sp)");
1212 asm (" st.l %r20,48(%sp)");
1213 asm (" st.l %r21,52(%sp)");
1214 asm (" st.l %r22,56(%sp)");
1215 asm (" st.l %r23,60(%sp)");
1216 asm (" st.l %r24,64(%sp)");
1217 asm (" st.l %r25,68(%sp)");
1218 asm (" st.l %r26,72(%sp)");
1219 asm (" st.l %r27,76(%sp)");
1221 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1222 va_list structure. Put in into
1223 r16 so that it will be returned
1226 /* Initialize all fields of the new va_list structure. This
1227 structure looks like:
1230 unsigned long ireg_used;
1231 unsigned long freg_used;
1237 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1238 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1239 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1240 asm (" bri %r1"); /* delayed return */
1241 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1243 #else /* not __svr4__ */
1244 #if defined(__PARAGON__)
1246 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1247 * and we stand a better chance of hooking into libraries
1248 * compiled by PGI. [andyp@ssd.intel.com]
1252 asm (".globl __builtin_saveregs");
1253 asm ("__builtin_saveregs:");
1254 asm (".globl ___builtin_saveregs");
1255 asm ("___builtin_saveregs:");
1257 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1258 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1259 area and also for a new va_list
1261 /* Save all argument registers in the arg reg save area. The
1262 arg reg save area must have the following layout (according
1274 asm (" fst.q f8, 0(sp)");
1275 asm (" fst.q f12,16(sp)");
1276 asm (" st.l r16,32(sp)");
1277 asm (" st.l r17,36(sp)");
1278 asm (" st.l r18,40(sp)");
1279 asm (" st.l r19,44(sp)");
1280 asm (" st.l r20,48(sp)");
1281 asm (" st.l r21,52(sp)");
1282 asm (" st.l r22,56(sp)");
1283 asm (" st.l r23,60(sp)");
1284 asm (" st.l r24,64(sp)");
1285 asm (" st.l r25,68(sp)");
1286 asm (" st.l r26,72(sp)");
1287 asm (" st.l r27,76(sp)");
1289 asm (" adds 80,sp,r16"); /* compute the address of the new
1290 va_list structure. Put in into
1291 r16 so that it will be returned
1294 /* Initialize all fields of the new va_list structure. This
1295 structure looks like:
1298 unsigned long ireg_used;
1299 unsigned long freg_used;
1305 asm (" st.l r0, 0(r16)"); /* nfixed */
1306 asm (" st.l r0, 4(r16)"); /* nfloating */
1307 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1308 asm (" bri r1"); /* delayed return */
1309 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1310 #else /* not __PARAGON__ */
1314 asm (".globl ___builtin_saveregs");
1315 asm ("___builtin_saveregs:");
1316 asm (" mov sp,r30");
1317 asm (" andnot 0x0f,sp,sp");
1318 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1320 /* Fill in the __va_struct. */
1321 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1322 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1323 asm (" st.l r18, 8(sp)");
1324 asm (" st.l r19,12(sp)");
1325 asm (" st.l r20,16(sp)");
1326 asm (" st.l r21,20(sp)");
1327 asm (" st.l r22,24(sp)");
1328 asm (" st.l r23,28(sp)");
1329 asm (" st.l r24,32(sp)");
1330 asm (" st.l r25,36(sp)");
1331 asm (" st.l r26,40(sp)");
1332 asm (" st.l r27,44(sp)");
1334 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1335 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1337 /* Fill in the __va_ctl. */
1338 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1339 asm (" st.l r28,84(sp)"); /* pointer to more args */
1340 asm (" st.l r0, 88(sp)"); /* nfixed */
1341 asm (" st.l r0, 92(sp)"); /* nfloating */
1343 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1345 asm (" mov r30,sp");
1346 /* recover stack and pass address to start
1348 #endif /* not __PARAGON__ */
1349 #endif /* not __svr4__ */
1350 #else /* not __i860__ */
1352 asm (".global __builtin_saveregs");
1353 asm ("__builtin_saveregs:");
1354 asm (".global ___builtin_saveregs");
1355 asm ("___builtin_saveregs:");
1356 #ifdef NEED_PROC_COMMAND
1359 asm ("st %i0,[%fp+68]");
1360 asm ("st %i1,[%fp+72]");
1361 asm ("st %i2,[%fp+76]");
1362 asm ("st %i3,[%fp+80]");
1363 asm ("st %i4,[%fp+84]");
1365 asm ("st %i5,[%fp+88]");
1366 #ifdef NEED_TYPE_COMMAND
1367 asm (".type __builtin_saveregs,#function");
1368 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1370 #else /* not __sparc__ */
1371 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1374 asm (" .ent __builtin_saveregs");
1375 asm (" .globl __builtin_saveregs");
1376 asm ("__builtin_saveregs:");
1377 asm (" sw $4,0($30)");
1378 asm (" sw $5,4($30)");
1379 asm (" sw $6,8($30)");
1380 asm (" sw $7,12($30)");
1382 asm (" .end __builtin_saveregs");
1383 #else /* not __mips__, etc. */
1386 __builtin_saveregs ()
1391 #endif /* not __mips__ */
1392 #endif /* not __sparc__ */
1393 #endif /* not __i860__ */
1397 #ifndef inhibit_libc
1399 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1401 /* This is used by the `assert' macro. */
1403 __eprintf (const char *string, const char *expression,
1404 int line, const char *filename)
1406 fprintf (stderr, string, expression, line, filename);
1416 /* Structure emitted by -a */
1420 const char *filename;
1424 const unsigned long *addresses;
1426 /* Older GCC's did not emit these fields. */
1428 const char **functions;
1429 const long *line_nums;
1430 const char **filenames;
1434 #ifdef BLOCK_PROFILER_CODE
1437 #ifndef inhibit_libc
1439 /* Simple minded basic block profiling output dumper for
1440 systems that don't provide tcov support. At present,
1441 it requires atexit and stdio. */
1443 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1447 #include "gbl-ctors.h"
1448 #include "gcov-io.h"
1450 static struct bb *bb_head;
1452 /* Return the number of digits needed to print a value */
1453 /* __inline__ */ static int num_digits (long value, int base)
1455 int minus = (value < 0 && base != 16);
1456 unsigned long v = (minus) ? -value : value;
1470 __bb_exit_func (void)
1472 FILE *da_file, *file;
1479 i = strlen (bb_head->filename) - 3;
1481 if (!strcmp (bb_head->filename+i, ".da"))
1483 /* Must be -fprofile-arcs not -a.
1484 Dump data in a form that gcov expects. */
1488 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1490 /* If the file exists, and the number of counts in it is the same,
1491 then merge them in. */
1493 if ((da_file = fopen (ptr->filename, "r")) != 0)
1501 if (__read_long (&n_counts, da_file, 8) != 0)
1503 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1508 if (n_counts == ptr->ncounts)
1512 for (i = 0; i < n_counts; i++)
1519 if (__read_long (&v, da_file, 8) != 0)
1521 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1525 ptr->counts[i] += v;
1529 if (fclose (da_file) == EOF)
1530 fprintf (stderr, "arc profiling: Error closing output file %s.\n",
1533 if ((da_file = fopen (ptr->filename, "w")) < 0)
1535 fprintf (stderr, "arc profiling: Can't open output file %s.\n",
1540 /* ??? Should first write a header to the file. Preferably, a 4 byte
1541 magic number, 4 bytes containing the time the program was
1542 compiled, 4 bytes containing the last modification time of the
1543 source file, and 4 bytes indicating the compiler options used.
1545 That way we can easily verify that the proper source/executable/
1546 data file combination is being used from gcov. */
1548 if (__write_long (ptr->ncounts, da_file, 8) != 0)
1551 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1557 long *count_ptr = ptr->counts;
1559 for (j = ptr->ncounts; j > 0; j--)
1561 if (__write_long (*count_ptr, da_file, 8) != 0)
1569 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1573 if (fclose (da_file) == EOF)
1574 fprintf (stderr, "arc profiling: Error closing output file %s.\n",
1581 /* Must be basic block profiling. Emit a human readable output file. */
1583 file = fopen ("bb.out", "a");
1592 /* This is somewhat type incorrect, but it avoids worrying about
1593 exactly where time.h is included from. It should be ok unless
1594 a void * differs from other pointer formats, or if sizeof (long)
1595 is < sizeof (time_t). It would be nice if we could assume the
1596 use of rationale standards here. */
1598 time ((void *) &time_value);
1599 fprintf (file, "Basic block profiling finished on %s\n", ctime ((void *) &time_value));
1601 /* We check the length field explicitly in order to allow compatibility
1602 with older GCC's which did not provide it. */
1604 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1607 int func_p = (ptr->nwords >= sizeof (struct bb)
1608 && ptr->nwords <= 1000
1610 int line_p = (func_p && ptr->line_nums);
1611 int file_p = (func_p && ptr->filenames);
1612 int addr_p = (ptr->addresses != 0);
1613 long ncounts = ptr->ncounts;
1619 int blk_len = num_digits (ncounts, 10);
1624 fprintf (file, "File %s, %ld basic blocks \n\n",
1625 ptr->filename, ncounts);
1627 /* Get max values for each field. */
1628 for (i = 0; i < ncounts; i++)
1633 if (cnt_max < ptr->counts[i])
1634 cnt_max = ptr->counts[i];
1636 if (addr_p && addr_max < ptr->addresses[i])
1637 addr_max = ptr->addresses[i];
1639 if (line_p && line_max < ptr->line_nums[i])
1640 line_max = ptr->line_nums[i];
1644 p = (ptr->functions[i]) ? (ptr->functions[i]) : "<none>";
1652 p = (ptr->filenames[i]) ? (ptr->filenames[i]) : "<none>";
1659 addr_len = num_digits (addr_max, 16);
1660 cnt_len = num_digits (cnt_max, 10);
1661 line_len = num_digits (line_max, 10);
1663 /* Now print out the basic block information. */
1664 for (i = 0; i < ncounts; i++)
1667 " Block #%*d: executed %*ld time(s)",
1669 cnt_len, ptr->counts[i]);
1672 fprintf (file, " address= 0x%.*lx", addr_len,
1676 fprintf (file, " function= %-*s", func_len,
1677 (ptr->functions[i]) ? ptr->functions[i] : "<none>");
1680 fprintf (file, " line= %*ld", line_len, ptr->line_nums[i]);
1683 fprintf (file, " file= %s",
1684 (ptr->filenames[i]) ? ptr->filenames[i] : "<none>");
1686 fprintf (file, "\n");
1689 fprintf (file, "\n");
1693 fprintf (file, "\n\n");
1699 __bb_init_func (struct bb *blocks)
1701 /* User is supposed to check whether the first word is non-0,
1702 but just in case.... */
1704 if (blocks->zero_word)
1708 /* Initialize destructor. */
1710 ON_EXIT (__bb_exit_func, 0);
1713 /* Set up linked list. */
1714 blocks->zero_word = 1;
1715 blocks->next = bb_head;
1719 #ifndef MACHINE_STATE_SAVE
1720 #define MACHINE_STATE_SAVE(ID)
1722 #ifndef MACHINE_STATE_RESTORE
1723 #define MACHINE_STATE_RESTORE(ID)
1728 /* Number of buckets in hashtable of basic block addresses. */
1730 #define BB_BUCKETS 311
1732 /* Maximum length of string in file bb.in. */
1734 #define BBINBUFSIZE 500
1736 /* BBINBUFSIZE-1 with double quotes. We could use #BBINBUFSIZE or
1737 "BBINBUFSIZE" but want to avoid trouble with preprocessors. */
1739 #define BBINBUFSIZESTR "499"
1743 struct bb_edge *next;
1744 unsigned long src_addr;
1745 unsigned long dst_addr;
1746 unsigned long count;
1751 TRACE_KEEP = 0, TRACE_ON = 1, TRACE_OFF = 2
1756 struct bb_func *next;
1759 enum bb_func_mode mode;
1762 /* This is the connection to the outside world.
1763 The BLOCK_PROFILER macro must set __bb.blocks
1764 and __bb.blockno. */
1767 unsigned long blockno;
1771 /* Vars to store addrs of source and destination basic blocks
1774 static unsigned long bb_src = 0;
1775 static unsigned long bb_dst = 0;
1777 static FILE *bb_tracefile = (FILE *) 0;
1778 static struct bb_edge **bb_hashbuckets = (struct bb_edge **) 0;
1779 static struct bb_func *bb_func_head = (struct bb_func *) 0;
1780 static unsigned long bb_callcount = 0;
1781 static int bb_mode = 0;
1783 static unsigned long *bb_stack = (unsigned long *) 0;
1784 static size_t bb_stacksize = 0;
1786 static int reported = 0;
1789 Always : Print execution frequencies of basic blocks
1791 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1792 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1793 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1794 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1799 /*#include <sys/types.h>*/
1800 #include <sys/stat.h>
1801 /*#include <malloc.h>*/
1803 /* Commands executed by gopen. */
1805 #define GOPENDECOMPRESS "gzip -cd "
1806 #define GOPENCOMPRESS "gzip -c >"
1808 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1809 If it does not compile, simply replace gopen by fopen and delete
1810 '.gz' from any first parameter to gopen. */
1813 gopen (char *fn, char *mode)
1821 if (mode[0] != 'r' && mode[0] != 'w')
1824 p = fn + strlen (fn)-1;
1825 use_gzip = ((p[-1] == '.' && (p[0] == 'Z' || p[0] == 'z'))
1826 || (p[-2] == '.' && p[-1] == 'g' && p[0] == 'z'));
1833 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1834 + sizeof (GOPENDECOMPRESS));
1835 strcpy (s, GOPENDECOMPRESS);
1836 strcpy (s + (sizeof (GOPENDECOMPRESS)-1), fn);
1837 f = popen (s, mode);
1845 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1846 + sizeof (GOPENCOMPRESS));
1847 strcpy (s, GOPENCOMPRESS);
1848 strcpy (s + (sizeof (GOPENCOMPRESS)-1), fn);
1849 if (!(f = popen (s, mode)))
1850 f = fopen (s, mode);
1857 return fopen (fn, mode);
1867 if (!fstat (fileno (f), &buf) && S_ISFIFO (buf.st_mode))
1875 #endif /* HAVE_POPEN */
1877 /* Called once per program. */
1880 __bb_exit_trace_func ()
1882 FILE *file = fopen ("bb.out", "a");
1896 gclose (bb_tracefile);
1898 fclose (bb_tracefile);
1899 #endif /* HAVE_POPEN */
1902 /* Check functions in `bb.in'. */
1907 const struct bb_func *p;
1908 int printed_something = 0;
1912 /* This is somewhat type incorrect. */
1913 time ((void *) &time_value);
1915 for (p = bb_func_head; p != (struct bb_func *) 0; p = p->next)
1917 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1919 if (!ptr->filename || p->filename != (char *) 0 && strcmp (p->filename, ptr->filename))
1921 for (blk = 0; blk < ptr->ncounts; blk++)
1923 if (!strcmp (p->funcname, ptr->functions[blk]))
1928 if (!printed_something)
1930 fprintf (file, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value));
1931 printed_something = 1;
1934 fprintf (file, "\tFunction %s", p->funcname);
1936 fprintf (file, " of file %s", p->filename);
1937 fprintf (file, "\n" );
1942 if (printed_something)
1943 fprintf (file, "\n");
1949 if (!bb_hashbuckets)
1953 fprintf (stderr, "Profiler: out of memory\n");
1963 unsigned long addr_max = 0;
1964 unsigned long cnt_max = 0;
1968 /* This is somewhat type incorrect, but it avoids worrying about
1969 exactly where time.h is included from. It should be ok unless
1970 a void * differs from other pointer formats, or if sizeof (long)
1971 is < sizeof (time_t). It would be nice if we could assume the
1972 use of rationale standards here. */
1974 time ((void *) &time_value);
1975 fprintf (file, "Basic block jump tracing");
1977 switch (bb_mode & 12)
1980 fprintf (file, " (with call)");
1984 /* Print nothing. */
1988 fprintf (file, " (with call & ret)");
1992 fprintf (file, " (with ret)");
1996 fprintf (file, " finished on %s\n", ctime ((void *) &time_value));
1998 for (i = 0; i < BB_BUCKETS; i++)
2000 struct bb_edge *bucket = bb_hashbuckets[i];
2001 for ( ; bucket; bucket = bucket->next )
2003 if (addr_max < bucket->src_addr)
2004 addr_max = bucket->src_addr;
2005 if (addr_max < bucket->dst_addr)
2006 addr_max = bucket->dst_addr;
2007 if (cnt_max < bucket->count)
2008 cnt_max = bucket->count;
2011 addr_len = num_digits (addr_max, 16);
2012 cnt_len = num_digits (cnt_max, 10);
2014 for ( i = 0; i < BB_BUCKETS; i++)
2016 struct bb_edge *bucket = bb_hashbuckets[i];
2017 for ( ; bucket; bucket = bucket->next )
2019 fprintf (file, "Jump from block 0x%.*lx to "
2020 "block 0x%.*lx executed %*d time(s)\n",
2021 addr_len, bucket->src_addr,
2022 addr_len, bucket->dst_addr,
2023 cnt_len, bucket->count);
2027 fprintf (file, "\n");
2035 /* Free allocated memory. */
2040 struct bb_func *old = f;
2043 if (old->funcname) free (old->funcname);
2044 if (old->filename) free (old->filename);
2055 for (i = 0; i < BB_BUCKETS; i++)
2057 struct bb_edge *old, *bucket = bb_hashbuckets[i];
2062 bucket = bucket->next;
2066 free (bb_hashbuckets);
2069 for (b = bb_head; b; b = b->next)
2070 if (b->flags) free (b->flags);
2073 /* Called once per program. */
2080 char buf[BBINBUFSIZE];
2083 enum bb_func_mode m;
2086 /* Initialize destructor. */
2087 ON_EXIT (__bb_exit_func, 0);
2090 if (!(file = fopen ("bb.in", "r")))
2093 while(fscanf (file, " %" BBINBUFSIZESTR "s ", buf) != EOF)
2105 if (!strcmp (p, "__bb_trace__"))
2107 else if (!strcmp (p, "__bb_jumps__"))
2109 else if (!strcmp (p, "__bb_hidecall__"))
2111 else if (!strcmp (p, "__bb_showret__"))
2115 struct bb_func *f = (struct bb_func *) malloc (sizeof (struct bb_func));
2119 f->next = bb_func_head;
2120 if (pos = strchr (p, ':'))
2122 if (!(f->funcname = (char *) malloc (strlen (pos+1)+1)))
2124 strcpy (f->funcname, pos+1);
2126 if ((f->filename = (char *) malloc (l+1)))
2128 strncpy (f->filename, p, l);
2129 f->filename[l] = '\0';
2132 f->filename = (char *) 0;
2136 if (!(f->funcname = (char *) malloc (strlen (p)+1)))
2138 strcpy (f->funcname, p);
2139 f->filename = (char *) 0;
2151 bb_tracefile = gopen ("bbtrace.gz", "w");
2156 bb_tracefile = fopen ("bbtrace", "w");
2158 #endif /* HAVE_POPEN */
2162 bb_hashbuckets = (struct bb_edge **)
2163 malloc (BB_BUCKETS * sizeof (struct bb_edge *));
2165 bzero ((char *) bb_hashbuckets, BB_BUCKETS);
2171 bb_stack = (unsigned long *) malloc (bb_stacksize * sizeof (*bb_stack));
2175 /* Initialize destructor. */
2176 ON_EXIT (__bb_exit_trace_func, 0);
2181 /* Called upon entering a basic block. */
2186 struct bb_edge *bucket;
2188 MACHINE_STATE_SAVE("1")
2190 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2193 bb_dst = __bb.blocks->addresses[__bb.blockno];
2194 __bb.blocks->counts[__bb.blockno]++;
2198 fwrite (&bb_dst, sizeof (unsigned long), 1, bb_tracefile);
2203 struct bb_edge **startbucket, **oldnext;
2205 oldnext = startbucket
2206 = & bb_hashbuckets[ (((int) bb_src*8) ^ (int) bb_dst) % BB_BUCKETS ];
2207 bucket = *startbucket;
2209 for (bucket = *startbucket; bucket;
2210 oldnext = &(bucket->next), bucket = *oldnext)
2212 if (bucket->src_addr == bb_src
2213 && bucket->dst_addr == bb_dst)
2216 *oldnext = bucket->next;
2217 bucket->next = *startbucket;
2218 *startbucket = bucket;
2223 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2229 fprintf (stderr, "Profiler: out of memory\n");
2236 bucket->src_addr = bb_src;
2237 bucket->dst_addr = bb_dst;
2238 bucket->next = *startbucket;
2239 *startbucket = bucket;
2250 MACHINE_STATE_RESTORE("1")
2254 /* Called when returning from a function and `__bb_showret__' is set. */
2257 __bb_trace_func_ret ()
2259 struct bb_edge *bucket;
2261 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2266 struct bb_edge **startbucket, **oldnext;
2268 oldnext = startbucket
2269 = & bb_hashbuckets[ (((int) bb_dst * 8) ^ (int) bb_src) % BB_BUCKETS ];
2270 bucket = *startbucket;
2272 for (bucket = *startbucket; bucket;
2273 oldnext = &(bucket->next), bucket = *oldnext)
2275 if (bucket->src_addr == bb_dst
2276 && bucket->dst_addr == bb_src)
2279 *oldnext = bucket->next;
2280 bucket->next = *startbucket;
2281 *startbucket = bucket;
2286 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2292 fprintf (stderr, "Profiler: out of memory\n");
2299 bucket->src_addr = bb_dst;
2300 bucket->dst_addr = bb_src;
2301 bucket->next = *startbucket;
2302 *startbucket = bucket;
2315 /* Called upon entering the first function of a file. */
2318 __bb_init_file (struct bb *blocks)
2321 const struct bb_func *p;
2322 long blk, ncounts = blocks->ncounts;
2323 const char **functions = blocks->functions;
2325 /* Set up linked list. */
2326 blocks->zero_word = 1;
2327 blocks->next = bb_head;
2332 || !(blocks->flags = (char *) malloc (sizeof (char) * blocks->ncounts)))
2335 for (blk = 0; blk < ncounts; blk++)
2336 blocks->flags[blk] = 0;
2338 for (blk = 0; blk < ncounts; blk++)
2340 for (p = bb_func_head; p; p = p->next)
2342 if (!strcmp (p->funcname, functions[blk])
2343 && (!p->filename || !strcmp (p->filename, blocks->filename)))
2345 blocks->flags[blk] |= p->mode;
2352 /* Called when exiting from a function. */
2358 MACHINE_STATE_SAVE("2")
2362 if ((bb_mode & 12) && bb_stacksize > bb_callcount)
2364 bb_src = bb_stack[bb_callcount];
2366 __bb_trace_func_ret ();
2372 MACHINE_STATE_RESTORE("2")
2376 /* Called when entering a function. */
2379 __bb_init_trace_func (struct bb *blocks, unsigned long blockno)
2381 static int trace_init = 0;
2383 MACHINE_STATE_SAVE("3")
2385 if (!blocks->zero_word)
2392 __bb_init_file (blocks);
2402 if (bb_callcount >= bb_stacksize)
2404 size_t newsize = bb_callcount + 100;
2406 bb_stack = (unsigned long *) realloc (bb_stack, newsize);
2411 fprintf (stderr, "Profiler: out of memory\n");
2415 goto stack_overflow;
2417 bb_stacksize = newsize;
2419 bb_stack[bb_callcount] = bb_src;
2430 else if (blocks->flags && (blocks->flags[blockno] & TRACE_ON))
2436 bb_stack[bb_callcount] = bb_src;
2439 MACHINE_STATE_RESTORE("3")
2442 #endif /* not inhibit_libc */
2443 #endif /* not BLOCK_PROFILER_CODE */
2447 unsigned int __shtab[] = {
2448 0x00000001, 0x00000002, 0x00000004, 0x00000008,
2449 0x00000010, 0x00000020, 0x00000040, 0x00000080,
2450 0x00000100, 0x00000200, 0x00000400, 0x00000800,
2451 0x00001000, 0x00002000, 0x00004000, 0x00008000,
2452 0x00010000, 0x00020000, 0x00040000, 0x00080000,
2453 0x00100000, 0x00200000, 0x00400000, 0x00800000,
2454 0x01000000, 0x02000000, 0x04000000, 0x08000000,
2455 0x10000000, 0x20000000, 0x40000000, 0x80000000
2459 #ifdef L_clear_cache
2460 /* Clear part of an instruction cache. */
2462 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2465 __clear_cache (char *beg, char *end)
2467 #ifdef CLEAR_INSN_CACHE
2468 CLEAR_INSN_CACHE (beg, end);
2470 #ifdef INSN_CACHE_SIZE
2471 static char array[INSN_CACHE_SIZE + INSN_CACHE_PLANE_SIZE + INSN_CACHE_LINE_WIDTH];
2472 static int initialized;
2476 typedef (*function_ptr) ();
2478 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2479 /* It's cheaper to clear the whole cache.
2480 Put in a series of jump instructions so that calling the beginning
2481 of the cache will clear the whole thing. */
2485 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2486 & -INSN_CACHE_LINE_WIDTH);
2487 int end_ptr = ptr + INSN_CACHE_SIZE;
2489 while (ptr < end_ptr)
2491 *(INSTRUCTION_TYPE *)ptr
2492 = JUMP_AHEAD_INSTRUCTION + INSN_CACHE_LINE_WIDTH;
2493 ptr += INSN_CACHE_LINE_WIDTH;
2495 *(INSTRUCTION_TYPE *) (ptr - INSN_CACHE_LINE_WIDTH) = RETURN_INSTRUCTION;
2500 /* Call the beginning of the sequence. */
2501 (((function_ptr) (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2502 & -INSN_CACHE_LINE_WIDTH))
2505 #else /* Cache is large. */
2509 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2510 & -INSN_CACHE_LINE_WIDTH);
2512 while (ptr < (int) array + sizeof array)
2514 *(INSTRUCTION_TYPE *)ptr = RETURN_INSTRUCTION;
2515 ptr += INSN_CACHE_LINE_WIDTH;
2521 /* Find the location in array that occupies the same cache line as BEG. */
2523 offset = ((int) beg & -INSN_CACHE_LINE_WIDTH) & (INSN_CACHE_PLANE_SIZE - 1);
2524 start_addr = (((int) (array + INSN_CACHE_PLANE_SIZE - 1)
2525 & -INSN_CACHE_PLANE_SIZE)
2528 /* Compute the cache alignment of the place to stop clearing. */
2529 #if 0 /* This is not needed for gcc's purposes. */
2530 /* If the block to clear is bigger than a cache plane,
2531 we clear the entire cache, and OFFSET is already correct. */
2532 if (end < beg + INSN_CACHE_PLANE_SIZE)
2534 offset = (((int) (end + INSN_CACHE_LINE_WIDTH - 1)
2535 & -INSN_CACHE_LINE_WIDTH)
2536 & (INSN_CACHE_PLANE_SIZE - 1));
2538 #if INSN_CACHE_DEPTH > 1
2539 end_addr = (start_addr & -INSN_CACHE_PLANE_SIZE) + offset;
2540 if (end_addr <= start_addr)
2541 end_addr += INSN_CACHE_PLANE_SIZE;
2543 for (plane = 0; plane < INSN_CACHE_DEPTH; plane++)
2545 int addr = start_addr + plane * INSN_CACHE_PLANE_SIZE;
2546 int stop = end_addr + plane * INSN_CACHE_PLANE_SIZE;
2548 while (addr != stop)
2550 /* Call the return instruction at ADDR. */
2551 ((function_ptr) addr) ();
2553 addr += INSN_CACHE_LINE_WIDTH;
2556 #else /* just one plane */
2559 /* Call the return instruction at START_ADDR. */
2560 ((function_ptr) start_addr) ();
2562 start_addr += INSN_CACHE_LINE_WIDTH;
2564 while ((start_addr % INSN_CACHE_SIZE) != offset);
2565 #endif /* just one plane */
2566 #endif /* Cache is large */
2567 #endif /* Cache exists */
2568 #endif /* CLEAR_INSN_CACHE */
2571 #endif /* L_clear_cache */
2575 /* Jump to a trampoline, loading the static chain address. */
2577 #if defined(WINNT) && ! defined(__CYGWIN32__)
2589 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2593 mprotect (char *addr, int len, int prot)
2610 if (VirtualProtect (addr, len, np, &op))
2618 #ifdef TRANSFER_FROM_TRAMPOLINE
2619 TRANSFER_FROM_TRAMPOLINE
2622 #if defined (NeXT) && defined (__MACH__)
2624 /* Make stack executable so we can call trampolines on stack.
2625 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2629 #include <mach/mach.h>
2633 __enable_execute_stack (char *addr)
2636 char *eaddr = addr + TRAMPOLINE_SIZE;
2637 vm_address_t a = (vm_address_t) addr;
2639 /* turn on execute access on stack */
2640 r = vm_protect (task_self (), a, TRAMPOLINE_SIZE, FALSE, VM_PROT_ALL);
2641 if (r != KERN_SUCCESS)
2643 mach_error("vm_protect VM_PROT_ALL", r);
2647 /* We inline the i-cache invalidation for speed */
2649 #ifdef CLEAR_INSN_CACHE
2650 CLEAR_INSN_CACHE (addr, eaddr);
2652 __clear_cache ((int) addr, (int) eaddr);
2656 #endif /* defined (NeXT) && defined (__MACH__) */
2660 /* Make stack executable so we can call trampolines on stack.
2661 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2663 #include <sys/mman.h>
2664 #include <sys/vmparam.h>
2665 #include <machine/machparam.h>
2668 __enable_execute_stack ()
2671 static unsigned lowest = USRSTACK;
2672 unsigned current = (unsigned) &fp & -NBPG;
2674 if (lowest > current)
2676 unsigned len = lowest - current;
2677 mremap (current, &len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE);
2681 /* Clear instruction cache in case an old trampoline is in it. */
2684 #endif /* __convex__ */
2688 /* Modified from the convex -code above. */
2690 #include <sys/param.h>
2692 #include <sys/m88kbcs.h>
2695 __enable_execute_stack ()
2698 static unsigned long lowest = USRSTACK;
2699 unsigned long current = (unsigned long) &save_errno & -NBPC;
2701 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2702 address is seen as 'negative'. That is the case with the stack. */
2705 if (lowest > current)
2707 unsigned len=lowest-current;
2708 memctl(current,len,MCT_TEXT);
2712 memctl(current,NBPC,MCT_TEXT);
2716 #endif /* __sysV88__ */
2720 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2722 #include <sys/mman.h>
2723 #include <sys/types.h>
2724 #include <sys/param.h>
2725 #include <sys/vmmac.h>
2727 /* Modified from the convex -code above.
2728 mremap promises to clear the i-cache. */
2731 __enable_execute_stack ()
2734 if (mprotect (((unsigned int)&fp/PAGSIZ)*PAGSIZ, PAGSIZ,
2735 PROT_READ|PROT_WRITE|PROT_EXEC))
2737 perror ("mprotect in __enable_execute_stack");
2742 #endif /* __pyr__ */
2744 #if defined (sony_news) && defined (SYSTYPE_BSD)
2747 #include <sys/types.h>
2748 #include <sys/param.h>
2749 #include <syscall.h>
2750 #include <machine/sysnews.h>
2752 /* cacheflush function for NEWS-OS 4.2.
2753 This function is called from trampoline-initialize code
2754 defined in config/mips/mips.h. */
2757 cacheflush (char *beg, int size, int flag)
2759 if (syscall (SYS_sysnews, NEWS_CACHEFLUSH, beg, size, FLUSH_BCACHE))
2761 perror ("cache_flush");
2767 #endif /* sony_news */
2768 #endif /* L_trampoline */
2772 #include "gbl-ctors.h"
2773 /* Some systems use __main in a way incompatible with its use in gcc, in these
2774 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2775 give the same symbol without quotes for an alternative entry point. You
2776 must define both, or neither. */
2778 #define NAME__MAIN "__main"
2779 #define SYMBOL__MAIN __main
2782 #ifdef INIT_SECTION_ASM_OP
2783 #undef HAS_INIT_SECTION
2784 #define HAS_INIT_SECTION
2787 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2788 /* Run all the global destructors on exit from the program. */
2791 __do_global_dtors ()
2793 #ifdef DO_GLOBAL_DTORS_BODY
2794 DO_GLOBAL_DTORS_BODY;
2796 static func_ptr *p = __DTOR_LIST__ + 1;
2806 #ifndef HAS_INIT_SECTION
2807 /* Run all the global constructors on entry to the program. */
2810 #define ON_EXIT(a, b)
2812 /* Make sure the exit routine is pulled in to define the globals as
2813 bss symbols, just in case the linker does not automatically pull
2814 bss definitions from the library. */
2816 extern int _exit_dummy_decl;
2817 int *_exit_dummy_ref = &_exit_dummy_decl;
2818 #endif /* ON_EXIT */
2821 __do_global_ctors ()
2823 DO_GLOBAL_CTORS_BODY;
2824 ON_EXIT (__do_global_dtors, 0);
2826 #endif /* no HAS_INIT_SECTION */
2828 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
2829 /* Subroutine called automatically by `main'.
2830 Compiling a global function named `main'
2831 produces an automatic call to this function at the beginning.
2833 For many systems, this routine calls __do_global_ctors.
2834 For systems which support a .init section we use the .init section
2835 to run __do_global_ctors, so we need not do anything here. */
2840 /* Support recursive calls to `main': run initializers just once. */
2841 static int initialized;
2845 __do_global_ctors ();
2848 #endif /* no HAS_INIT_SECTION or INVOKE__main */
2850 #endif /* L__main */
2854 #include "gbl-ctors.h"
2856 /* Provide default definitions for the lists of constructors and
2857 destructors, so that we don't get linker errors. These symbols are
2858 intentionally bss symbols, so that gld and/or collect will provide
2859 the right values. */
2861 /* We declare the lists here with two elements each,
2862 so that they are valid empty lists if no other definition is loaded. */
2863 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
2864 #if defined(__NeXT__) || defined(_AIX)
2865 /* After 2.3, try this definition on all systems. */
2866 func_ptr __CTOR_LIST__[2] = {0, 0};
2867 func_ptr __DTOR_LIST__[2] = {0, 0};
2869 func_ptr __CTOR_LIST__[2];
2870 func_ptr __DTOR_LIST__[2];
2872 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
2873 #endif /* L_ctors */
2877 #include "gbl-ctors.h"
2883 int _exit_dummy_decl = 0; /* prevent compiler & linker warnings */
2891 static func_ptr *atexit_chain = 0;
2892 static long atexit_chain_length = 0;
2893 static volatile long last_atexit_chain_slot = -1;
2895 int atexit (func_ptr func)
2897 if (++last_atexit_chain_slot == atexit_chain_length)
2899 atexit_chain_length += 32;
2901 atexit_chain = (func_ptr *) realloc (atexit_chain, atexit_chain_length
2902 * sizeof (func_ptr));
2904 atexit_chain = (func_ptr *) malloc (atexit_chain_length
2905 * sizeof (func_ptr));
2908 atexit_chain_length = 0;
2909 last_atexit_chain_slot = -1;
2914 atexit_chain[last_atexit_chain_slot] = func;
2917 #endif /* NEED_ATEXIT */
2919 /* If we have no known way of registering our own __do_global_dtors
2920 routine so that it will be invoked at program exit time, then we
2921 have to define our own exit routine which will get this to happen. */
2923 extern void __do_global_dtors ();
2924 extern void __bb_exit_func ();
2925 extern void _cleanup ();
2926 extern void _exit () __attribute__ ((noreturn));
2931 #if !defined (INIT_SECTION_ASM_OP) || !defined (OBJECT_FORMAT_ELF)
2935 for ( ; last_atexit_chain_slot-- >= 0; )
2937 (*atexit_chain[last_atexit_chain_slot + 1]) ();
2938 atexit_chain[last_atexit_chain_slot + 1] = 0;
2940 free (atexit_chain);
2943 #else /* No NEED_ATEXIT */
2944 __do_global_dtors ();
2945 #endif /* No NEED_ATEXIT */
2947 #ifndef inhibit_libc
2959 int _exit_dummy_decl = 0; /* prevent compiler & linker warnings */
2966 /* Shared exception handling support routines. */
2968 /* Language-specific information about the active exception(s). If there
2969 are no active exceptions, it is set to 0. */
2973 __default_terminate ()
2978 void (*__terminate_func)() = __default_terminate;
2983 (*__terminate_func)();
2987 __throw_type_match (void *catch_type, void *throw_type, void *obj)
2990 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
2991 catch_type, throw_type);
2993 if (strcmp ((const char *)catch_type, (const char *)throw_type) == 0)
3003 /* Support routines for setjmp/longjmp exception handling. */
3005 /* Calls to __sjthrow are generated by the compiler when an exception
3006 is raised when using the setjmp/longjmp exception handling codegen
3009 extern void longjmp (void *, int);
3011 static void *top_elt[2];
3012 void **__dynamic_handler_chain = top_elt;
3014 /* Routine to get the head of the current thread's dynamic handler chain
3015 use for exception handling.
3017 TODO: make thread safe. */
3020 __get_dynamic_handler_chain ()
3022 return &__dynamic_handler_chain;
3025 /* This is used to throw an exception when the setjmp/longjmp codegen
3026 method is used for exception handling.
3028 We call __terminate if there are no handlers left (we know this
3029 when the dynamic handler chain is top_elt). Otherwise we run the
3030 cleanup actions off the dynamic cleanup stack, and pop the top of
3031 the dynamic handler chain, and use longjmp to transfer back to the
3032 associated handler. */
3037 void ***dhc = __get_dynamic_handler_chain ();
3039 void (*func)(void *, int);
3043 /* The cleanup chain is one word into the buffer. Get the cleanup
3045 cleanup = (void***)&(*dhc)[1];
3047 /* If there are any cleanups in the chain, run them now. */
3051 void **buf = (void**)store;
3056 #ifdef DONT_USE_BUILTIN_SETJMP
3057 if (! setjmp (&buf[2]))
3059 if (! __builtin_setjmp (&buf[2]))
3065 func = (void(*)(void*, int))cleanup[0][1];
3066 arg = (void*)cleanup[0][2];
3068 /* Update this before running the cleanup. */
3069 cleanup[0] = (void **)cleanup[0][0];
3082 /* We must call terminate if we try and rethrow an exception, when
3083 there is no exception currently active and when there are no
3085 if (! __eh_info || (*dhc) == top_elt)
3088 /* Find the jmpbuf associated with the top element of the dynamic
3089 handler chain. The jumpbuf starts two words into the buffer. */
3090 jmpbuf = &(*dhc)[2];
3092 /* Then we pop the top element off the dynamic handler chain. */
3093 *dhc = (void**)(*dhc)[0];
3095 /* And then we jump to the handler. */
3097 #ifdef DONT_USE_BUILTIN_SETJMP
3098 longjmp (jmpbuf, 1);
3100 __builtin_longjmp (jmpbuf, 1);
3104 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3105 handler, then pop the handler off the dynamic handler stack, and
3106 then throw. This is used to skip the first handler, and transfer
3107 control to the next handler in the dynamic handler stack. */
3112 void ***dhc = __get_dynamic_handler_chain ();
3114 void (*func)(void *, int);
3118 /* The cleanup chain is one word into the buffer. Get the cleanup
3120 cleanup = (void***)&(*dhc)[1];
3122 /* If there are any cleanups in the chain, run them now. */
3126 void **buf = (void**)store;
3131 #ifdef DONT_USE_BUILTIN_SETJMP
3132 if (! setjmp (&buf[2]))
3134 if (! __builtin_setjmp (&buf[2]))
3140 func = (void(*)(void*, int))cleanup[0][1];
3141 arg = (void*)cleanup[0][2];
3143 /* Update this before running the cleanup. */
3144 cleanup[0] = (void **)cleanup[0][0];
3157 /* Then we pop the top element off the dynamic handler chain. */
3158 *dhc = (void**)(*dhc)[0];
3163 /* Support code for all exception region-based exception handling. */
3165 /* This value identifies the place from which an exception is being
3168 #ifdef EH_TABLE_LOOKUP
3174 typedef struct exception_table {
3177 void *exception_handler;
3180 /* This routine takes a PC and a pointer to the exception region TABLE for
3181 its translation unit, and returns the address of the exception handler
3182 associated with the closest exception table handler entry associated
3183 with that PC, or 0 if there are no table entries the PC fits in.
3185 In the advent of a tie, we have to give the last entry, as it represents
3189 find_exception_handler (void *pc, exception_table *table)
3196 /* We can't do a binary search because the table isn't guaranteed
3197 to be sorted from function to function. */
3198 for (pos = 0; table[pos].exception_handler != (void *) -1; ++pos)
3200 if (table[pos].start <= pc && table[pos].end > pc)
3202 /* This can apply. Make sure it is at least as small as
3203 the previous best. */
3204 if (best == -1 || (table[pos].end <= table[best].end
3205 && table[pos].start >= table[best].start))
3208 /* But it is sorted by starting PC within a function. */
3209 else if (best >= 0 && table[pos].start > pc)
3213 return table[best].exception_handler;
3218 #endif /* EH_TABLE_LOOKUP */
3220 #ifndef DWARF2_UNWIND_INFO
3221 /* Support code for exception handling using inline unwinders or
3222 __unwind_function. */
3226 #ifndef EH_TABLE_LOOKUP
3227 typedef struct exception_table_node {
3228 exception_table *table;
3231 struct exception_table_node *next;
3232 } exception_table_node;
3234 static struct exception_table_node *exception_table_list;
3237 __find_first_exception_table_match (void *pc)
3239 register exception_table_node *tnp;
3241 for (tnp = exception_table_list; tnp != 0; tnp = tnp->next)
3243 if (tnp->start <= pc && tnp->end >= pc)
3244 return find_exception_handler (pc, tnp->table);
3251 __register_exceptions (exception_table *table)
3253 exception_table_node *node;
3254 exception_table *range = table + 1;
3256 if (range->start == (void *) -1)
3259 node = (exception_table_node *) malloc (sizeof (exception_table_node));
3260 node->table = table;
3262 /* This look can be optimized away either if the table
3263 is sorted, or if we pass in extra parameters. */
3264 node->start = range->start;
3265 node->end = range->end;
3266 for (range++ ; range->start != (void *) (-1); range++)
3268 if (range->start < node->start)
3269 node->start = range->start;
3270 if (range->end > node->end)
3271 node->end = range->end;
3274 node->next = exception_table_list;
3275 exception_table_list = node;
3277 #endif /* !EH_TABLE_LOOKUP */
3279 /* Throw stub routine.
3281 This is work in progress, but not completed yet. */
3289 /* See expand_builtin_throw for details. */
3291 void **__eh_pcnthrow () {
3292 static void *buf[2] = {
3301 __unwind_function(void *ptr)
3303 asm("movl 8(%esp),%ecx");
3304 /* Undo current frame */
3305 asm("movl %ebp,%esp");
3307 /* like ret, but stay here */
3308 asm("addl $4,%esp");
3310 /* Now, undo previous frame. */
3311 /* This is a test routine, as we have to dynamically probe to find out
3312 what to pop for certain, this is just a guess. */
3313 asm("leal -16(%ebp),%esp");
3317 asm("movl %ebp,%esp");
3320 asm("movl %ecx,0(%esp)");
3323 #elif #machine(rs6000) && !defined _ARCH_PPC
3324 __unwind_function(void *ptr)
3333 /* use 31 as a scratch register to restore the link register. */
3334 asm("l 31, 8(1);mtlr 31 # l lr,8(1)");
3337 asm("mtctr 3;bctr # b 3");
3339 #elif (#machine(rs6000) || #machine(powerpc)) && defined _ARCH_PPC
3340 __unwind_function(void *ptr)
3344 asm("lwz 31,-4(1)");
3349 /* use 31 as a scratch register to restore the link register. */
3350 asm("lwz 31, 8(1);mtlr 31 # l lr,8(1)");
3351 asm("lwz 31,-4(1)");
3353 asm("mtctr 3;bctr # b 3");
3356 __unwind_function(void *ptr)
3358 __label__ return_again;
3360 /* Replace our frame's return address with the label below.
3361 During execution, we will first return here instead of to
3362 caller, then second return takes caller's frame off the stack.
3363 Two returns matches two actual calls, so is less likely to
3364 confuse debuggers. `16' corresponds to RETURN_ADDRESS_OFFSET. */
3365 __asm ("movl %0,16(fp)" : : "p" (&& return_again));
3372 __unwind_function(void *ptr)
3376 #endif /* powerpc */
3378 #else /* DWARF2_UNWIND_INFO */
3379 /* Support code for exception handling using static unwind information. */
3383 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3384 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3385 avoid a warning about casting between int and pointer of different
3388 typedef int ptr_type __attribute__ ((mode (pointer)));
3390 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3391 frame called by UDATA or 0. */
3394 get_reg (unsigned reg, frame_state *udata, frame_state *sub_udata)
3396 if (udata->saved[reg] == REG_SAVED_OFFSET)
3397 return (void *)(ptr_type)
3398 *(word_type *)(udata->cfa + udata->reg_or_offset[reg]);
3399 else if (udata->saved[reg] == REG_SAVED_REG && sub_udata)
3400 return get_reg (udata->reg_or_offset[reg], sub_udata, 0);
3405 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3408 put_reg (unsigned reg, void *val, frame_state *udata)
3410 if (udata->saved[reg] == REG_SAVED_OFFSET)
3411 *(word_type *)(udata->cfa + udata->reg_or_offset[reg])
3412 = (word_type)(ptr_type) val;
3417 /* Copy the saved value for register REG from frame UDATA to frame
3418 TARGET_UDATA. Unlike the previous two functions, this can handle
3419 registers that are not one word large. */
3422 copy_reg (unsigned reg, frame_state *udata, frame_state *target_udata)
3424 if (udata->saved[reg] == REG_SAVED_OFFSET
3425 && target_udata->saved[reg] == REG_SAVED_OFFSET)
3426 memcpy (target_udata->cfa + target_udata->reg_or_offset[reg],
3427 udata->cfa + udata->reg_or_offset[reg],
3428 __builtin_dwarf_reg_size (reg));
3433 /* Retrieve the return address for frame UDATA, where SUB_UDATA is a
3434 frame called by UDATA or 0. */
3436 static inline void *
3437 get_return_addr (frame_state *udata, frame_state *sub_udata)
3439 return __builtin_extract_return_addr
3440 (get_reg (udata->retaddr_column, udata, sub_udata));
3443 /* Overwrite the return address for frame UDATA with VAL. */
3446 put_return_addr (void *val, frame_state *udata)
3448 val = __builtin_frob_return_addr (val);
3449 put_reg (udata->retaddr_column, val, udata);
3452 /* Given the current frame UDATA and its return address PC, return the
3453 information about the calling frame in CALLER_UDATA. */
3456 next_stack_level (void *pc, frame_state *udata, frame_state *caller_udata)
3458 caller_udata = __frame_state_for (pc, caller_udata);
3462 /* Now go back to our caller's stack frame. If our caller's CFA register
3463 was saved in our stack frame, restore it; otherwise, assume the CFA
3464 register is SP and restore it to our CFA value. */
3465 if (udata->saved[caller_udata->cfa_reg])
3466 caller_udata->cfa = get_reg (caller_udata->cfa_reg, udata, 0);
3468 caller_udata->cfa = udata->cfa;
3469 caller_udata->cfa += caller_udata->cfa_offset;
3471 return caller_udata;
3474 #ifdef INCOMING_REGNO
3475 /* Is the saved value for register REG in frame UDATA stored in a register
3476 window in the previous frame? */
3479 in_reg_window (int reg, frame_state *udata)
3481 if (udata->saved[reg] != REG_SAVED_OFFSET)
3484 #ifdef STACK_GROWS_DOWNWARD
3485 return udata->reg_or_offset[reg] > 0;
3487 return udata->reg_or_offset[reg] < 0;
3490 #endif /* INCOMING_REGNO */
3492 /* We first search for an exception handler, and if we don't find
3493 it, we call __terminate on the current stack frame so that we may
3494 use the debugger to walk the stack and understand why no handler
3497 If we find one, then we unwind the frames down to the one that
3498 has the handler and transfer control into the handler. */
3503 void *pc, *handler, *retaddr, *__eh_pc;
3504 frame_state ustruct, ustruct2;
3505 frame_state *udata = &ustruct;
3506 frame_state *sub_udata = &ustruct2;
3507 frame_state my_ustruct, *my_udata = &my_ustruct;
3510 /* This is required for C++ semantics. We must call terminate if we
3511 try and rethrow an exception, when there is no exception currently
3516 /* Start at our stack frame. */
3518 udata = __frame_state_for (&&label, udata);
3522 /* We need to get the value from the CFA register. At this point in
3523 compiling __throw we don't know whether or not we will use the frame
3524 pointer register for the CFA, so we check our unwind info. */
3525 if (udata->cfa_reg == __builtin_dwarf_fp_regnum ())
3526 udata->cfa = __builtin_fp ();
3528 udata->cfa = __builtin_sp ();
3529 udata->cfa += udata->cfa_offset;
3531 memcpy (my_udata, udata, sizeof (*udata));
3533 /* Do any necessary initialization to access arbitrary stack frames.
3534 On the SPARC, this means flushing the register windows. */
3535 __builtin_unwind_init ();
3537 /* Now reset pc to the right throw point. */
3538 __eh_pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3544 frame_state *p = udata;
3545 udata = next_stack_level (pc, udata, sub_udata);
3548 /* If we couldn't find the next frame, we lose. */
3552 handler = find_exception_handler (pc, udata->eh_ptr);
3554 /* If we found one, we can stop searching. */
3557 args_size = udata->args_size;
3561 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3562 hitting the beginning of the next region. */
3563 pc = get_return_addr (udata, sub_udata) - 1;
3566 /* If we haven't found a handler by now, this is an unhandled
3572 /* We found a handler in the throw context, no need to unwind. */
3579 /* Unwind all the frames between this one and the handler by copying
3580 their saved register values into our register save slots. */
3582 /* Remember the PC where we found the handler. */
3583 void *handler_pc = pc;
3585 /* Start from the throw context again. */
3587 memcpy (udata, my_udata, sizeof (*udata));
3589 while (pc != handler_pc)
3591 frame_state *p = udata;
3592 udata = next_stack_level (pc, udata, sub_udata);
3595 for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i)
3596 if (i != udata->retaddr_column && udata->saved[i])
3598 #ifdef INCOMING_REGNO
3599 /* If you modify the saved value of the return address
3600 register on the SPARC, you modify the return address for
3601 your caller's frame. Don't do that here, as it will
3602 confuse get_return_addr. */
3603 if (in_reg_window (i, udata)
3604 && udata->saved[udata->retaddr_column] == REG_SAVED_REG
3605 && udata->reg_or_offset[udata->retaddr_column] == i)
3608 copy_reg (i, udata, my_udata);
3611 pc = get_return_addr (udata, sub_udata) - 1;
3614 #ifdef INCOMING_REGNO
3615 /* But we do need to update the saved return address register from
3616 the last frame we unwind, or the handler frame will have the wrong
3618 if (udata->saved[udata->retaddr_column] == REG_SAVED_REG)
3620 i = udata->reg_or_offset[udata->retaddr_column];
3621 if (in_reg_window (i, udata))
3622 copy_reg (i, udata, my_udata);
3626 /* udata now refers to the frame called by the handler frame. */
3628 /* Emit the stub to adjust sp and jump to the handler. */
3629 retaddr = __builtin_eh_stub ();
3631 /* And then set our return address to point to the stub. */
3632 if (my_udata->saved[my_udata->retaddr_column] == REG_SAVED_OFFSET)
3633 put_return_addr (retaddr, my_udata);
3635 __builtin_set_return_addr_reg (retaddr);
3637 /* Set up the registers we use to communicate with the stub.
3638 We check STACK_GROWS_DOWNWARD so the stub can use adjust_stack. */
3639 __builtin_set_eh_regs (handler,
3640 #ifdef STACK_GROWS_DOWNWARD
3641 udata->cfa - my_udata->cfa
3643 my_udata->cfa - udata->cfa
3648 /* Epilogue: restore the handler frame's register values and return
3651 #endif /* !DWARF2_UNWIND_INFO */
3656 #ifndef inhibit_libc
3657 /* This gets us __GNU_LIBRARY__. */
3658 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
3661 #ifdef __GNU_LIBRARY__
3662 /* Avoid forcing the library's meaning of `write' on the user program
3663 by using the "internal" name (for use within the library) */
3664 #define write(fd, buf, n) __write((fd), (buf), (n))
3666 #endif /* inhibit_libc */
3668 #define MESSAGE "pure virtual method called\n"
3673 #ifndef inhibit_libc
3674 write (2, MESSAGE, sizeof (MESSAGE) - 1);