1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 92, 93, 94, 95, 96, 97, 98, 1999, 2000
4 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
23 /* As a special exception, if you link this library with other files,
24 some of which are compiled with GCC, to produce an executable,
25 this library does not by itself cause the resulting executable
26 to be covered by the GNU General Public License.
27 This exception does not however invalidate any other reasons why
28 the executable file might be covered by the GNU General Public License. */
30 /* It is incorrect to include config.h here, because this file is being
31 compiled for the target, and hence definitions concerning only the host
40 /* Don't use `fancy_abort' here even if config.h says to use it. */
45 /* In a cross-compilation situation, default to inhibiting compilation
46 of routines that use libc. */
48 #if defined(CROSS_COMPILE) && !defined(inhibit_libc)
54 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
55 #if defined (L_divdi3) || defined (L_moddi3)
67 w.s.high = -uu.s.high - ((UWtype) w.s.low > 0);
73 /* Unless shift functions are defined whith full ANSI prototypes,
74 parameter b will be promoted to int if word_type is smaller than an int. */
77 __lshrdi3 (DWtype u, word_type b)
88 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
92 w.s.low = (UWtype) uu.s.high >> -bm;
96 UWtype carries = (UWtype) uu.s.high << bm;
98 w.s.high = (UWtype) uu.s.high >> b;
99 w.s.low = ((UWtype) uu.s.low >> b) | carries;
108 __ashldi3 (DWtype u, word_type b)
119 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
123 w.s.high = (UWtype) uu.s.low << -bm;
127 UWtype carries = (UWtype) uu.s.low >> bm;
129 w.s.low = (UWtype) uu.s.low << b;
130 w.s.high = ((UWtype) uu.s.high << b) | carries;
139 __ashrdi3 (DWtype u, word_type b)
150 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
153 /* w.s.high = 1..1 or 0..0 */
154 w.s.high = uu.s.high >> (sizeof (Wtype) * BITS_PER_UNIT - 1);
155 w.s.low = uu.s.high >> -bm;
159 UWtype carries = (UWtype) uu.s.high << bm;
161 w.s.high = uu.s.high >> b;
162 w.s.low = ((UWtype) uu.s.low >> b) | carries;
176 w.s.low = ffs (uu.s.low);
179 w.s.low = ffs (uu.s.high);
182 w.s.low += BITS_PER_UNIT * sizeof (Wtype);
191 __muldi3 (DWtype u, DWtype v)
199 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
200 w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
201 + (UWtype) uu.s.high * (UWtype) vv.s.low);
208 #if defined (sdiv_qrnnd)
210 __udiv_w_sdiv (UWtype *rp, UWtype a1, UWtype a0, UWtype d)
217 if (a1 < d - a1 - (a0 >> (W_TYPE_SIZE - 1)))
219 /* dividend, divisor, and quotient are nonnegative */
220 sdiv_qrnnd (q, r, a1, a0, d);
224 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
225 sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (W_TYPE_SIZE - 1));
226 /* Divide (c1*2^32 + c0) by d */
227 sdiv_qrnnd (q, r, c1, c0, d);
228 /* Add 2^31 to quotient */
229 q += (UWtype) 1 << (W_TYPE_SIZE - 1);
234 b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
235 c1 = a1 >> 1; /* A/2 */
236 c0 = (a1 << (W_TYPE_SIZE - 1)) + (a0 >> 1);
238 if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
240 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
242 r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
259 else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
262 c0 = ~c0; /* logical NOT */
264 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
266 q = ~q; /* (A/2)/b1 */
269 r = 2*r + (a0 & 1); /* A/(2*b1) */
287 else /* Implies c1 = b1 */
288 { /* Hence a1 = d - 1 = 2*b1 - 1 */
306 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
308 __udiv_w_sdiv (UWtype *rp __attribute__ ((__unused__)),
309 UWtype a1 __attribute__ ((__unused__)),
310 UWtype a0 __attribute__ ((__unused__)),
311 UWtype d __attribute__ ((__unused__)))
318 #if (defined (L_udivdi3) || defined (L_divdi3) || \
319 defined (L_umoddi3) || defined (L_moddi3))
324 static const UQItype __clz_tab[] =
326 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
327 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
328 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
329 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
330 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
331 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
332 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
333 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
336 #if (defined (L_udivdi3) || defined (L_divdi3) || \
337 defined (L_umoddi3) || defined (L_moddi3))
341 __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
346 UWtype d0, d1, n0, n1, n2;
358 #if !UDIV_NEEDS_NORMALIZATION
365 udiv_qrnnd (q0, n0, n1, n0, d0);
368 /* Remainder in n0. */
375 d0 = 1 / d0; /* Divide intentionally by zero. */
377 udiv_qrnnd (q1, n1, 0, n1, d0);
378 udiv_qrnnd (q0, n0, n1, n0, d0);
380 /* Remainder in n0. */
391 #else /* UDIV_NEEDS_NORMALIZATION */
399 count_leading_zeros (bm, d0);
403 /* Normalize, i.e. make the most significant bit of the
407 n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
411 udiv_qrnnd (q0, n0, n1, n0, d0);
414 /* Remainder in n0 >> bm. */
421 d0 = 1 / d0; /* Divide intentionally by zero. */
423 count_leading_zeros (bm, d0);
427 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
428 conclude (the most significant bit of n1 is set) /\ (the
429 leading quotient digit q1 = 1).
431 This special case is necessary, not an optimization.
432 (Shifts counts of W_TYPE_SIZE are undefined.) */
441 b = W_TYPE_SIZE - bm;
445 n1 = (n1 << bm) | (n0 >> b);
448 udiv_qrnnd (q1, n1, n2, n1, d0);
453 udiv_qrnnd (q0, n0, n1, n0, d0);
455 /* Remainder in n0 >> bm. */
465 #endif /* UDIV_NEEDS_NORMALIZATION */
476 /* Remainder in n1n0. */
488 count_leading_zeros (bm, d1);
491 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
492 conclude (the most significant bit of n1 is set) /\ (the
493 quotient digit q0 = 0 or 1).
495 This special case is necessary, not an optimization. */
497 /* The condition on the next line takes advantage of that
498 n1 >= d1 (true due to program flow). */
499 if (n1 > d1 || n0 >= d0)
502 sub_ddmmss (n1, n0, n1, n0, d1, d0);
521 b = W_TYPE_SIZE - bm;
523 d1 = (d1 << bm) | (d0 >> b);
526 n1 = (n1 << bm) | (n0 >> b);
529 udiv_qrnnd (q0, n1, n2, n1, d1);
530 umul_ppmm (m1, m0, q0, d0);
532 if (m1 > n1 || (m1 == n1 && m0 > n0))
535 sub_ddmmss (m1, m0, m1, m0, d1, d0);
540 /* Remainder in (n1n0 - m1m0) >> bm. */
543 sub_ddmmss (n1, n0, n1, n0, m1, m0);
544 rr.s.low = (n1 << b) | (n0 >> bm);
545 rr.s.high = n1 >> bm;
560 __divdi3 (DWtype u, DWtype v)
571 uu.ll = __negdi2 (uu.ll);
574 vv.ll = __negdi2 (vv.ll);
576 w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
586 __moddi3 (DWtype u, DWtype v)
597 uu.ll = __negdi2 (uu.ll);
599 vv.ll = __negdi2 (vv.ll);
601 (void) __udivmoddi4 (uu.ll, vv.ll, &w);
611 __umoddi3 (UDWtype u, UDWtype v)
615 (void) __udivmoddi4 (u, v, &w);
623 __udivdi3 (UDWtype n, UDWtype d)
625 return __udivmoddi4 (n, d, (UDWtype *) 0);
631 __cmpdi2 (DWtype a, DWtype b)
635 au.ll = a, bu.ll = b;
637 if (au.s.high < bu.s.high)
639 else if (au.s.high > bu.s.high)
641 if ((UWtype) au.s.low < (UWtype) bu.s.low)
643 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
651 __ucmpdi2 (DWtype a, DWtype b)
655 au.ll = a, bu.ll = b;
657 if ((UWtype) au.s.high < (UWtype) bu.s.high)
659 else if ((UWtype) au.s.high > (UWtype) bu.s.high)
661 if ((UWtype) au.s.low < (UWtype) bu.s.low)
663 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
669 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
670 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
671 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
674 __fixunstfDI (TFtype a)
682 /* Compute high word of result, as a flonum. */
683 b = (a / HIGH_WORD_COEFF);
684 /* Convert that to fixed (but not to DWtype!),
685 and shift it into the high word. */
688 /* Remove high part from the TFtype, leaving the low part as flonum. */
690 /* Convert that to fixed (but not to DWtype!) and add it in.
691 Sometimes A comes out negative. This is significant, since
692 A has more bits than a long int does. */
701 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
706 return - __fixunstfDI (-a);
707 return __fixunstfDI (a);
711 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
712 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
713 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
716 __fixunsxfDI (XFtype a)
724 /* Compute high word of result, as a flonum. */
725 b = (a / HIGH_WORD_COEFF);
726 /* Convert that to fixed (but not to DWtype!),
727 and shift it into the high word. */
730 /* Remove high part from the XFtype, leaving the low part as flonum. */
732 /* Convert that to fixed (but not to DWtype!) and add it in.
733 Sometimes A comes out negative. This is significant, since
734 A has more bits than a long int does. */
743 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
748 return - __fixunsxfDI (-a);
749 return __fixunsxfDI (a);
754 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
755 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
758 __fixunsdfDI (DFtype a)
766 /* Compute high word of result, as a flonum. */
767 b = (a / HIGH_WORD_COEFF);
768 /* Convert that to fixed (but not to DWtype!),
769 and shift it into the high word. */
772 /* Remove high part from the DFtype, leaving the low part as flonum. */
774 /* Convert that to fixed (but not to DWtype!) and add it in.
775 Sometimes A comes out negative. This is significant, since
776 A has more bits than a long int does. */
790 return - __fixunsdfDI (-a);
791 return __fixunsdfDI (a);
796 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
797 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
800 __fixunssfDI (SFtype original_a)
802 /* Convert the SFtype to a DFtype, because that is surely not going
803 to lose any bits. Some day someone else can write a faster version
804 that avoids converting to DFtype, and verify it really works right. */
805 DFtype a = original_a;
812 /* Compute high word of result, as a flonum. */
813 b = (a / HIGH_WORD_COEFF);
814 /* Convert that to fixed (but not to DWtype!),
815 and shift it into the high word. */
818 /* Remove high part from the DFtype, leaving the low part as flonum. */
820 /* Convert that to fixed (but not to DWtype!) and add it in.
821 Sometimes A comes out negative. This is significant, since
822 A has more bits than a long int does. */
836 return - __fixunssfDI (-a);
837 return __fixunssfDI (a);
841 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
842 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
843 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
844 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
847 __floatdixf (DWtype u)
851 d = (Wtype) (u >> WORD_SIZE);
852 d *= HIGH_HALFWORD_COEFF;
853 d *= HIGH_HALFWORD_COEFF;
854 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
860 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
861 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
862 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
863 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
866 __floatditf (DWtype u)
870 d = (Wtype) (u >> WORD_SIZE);
871 d *= HIGH_HALFWORD_COEFF;
872 d *= HIGH_HALFWORD_COEFF;
873 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
880 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
881 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
882 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
885 __floatdidf (DWtype u)
889 d = (Wtype) (u >> WORD_SIZE);
890 d *= HIGH_HALFWORD_COEFF;
891 d *= HIGH_HALFWORD_COEFF;
892 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
899 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
900 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
901 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
902 #define DI_SIZE (sizeof (DWtype) * BITS_PER_UNIT)
904 /* Define codes for all the float formats that we know of. Note
905 that this is copied from real.h. */
907 #define UNKNOWN_FLOAT_FORMAT 0
908 #define IEEE_FLOAT_FORMAT 1
909 #define VAX_FLOAT_FORMAT 2
910 #define IBM_FLOAT_FORMAT 3
912 /* Default to IEEE float if not specified. Nearly all machines use it. */
913 #ifndef HOST_FLOAT_FORMAT
914 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
917 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
922 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
927 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
933 __floatdisf (DWtype u)
935 /* Do the calculation in DFmode
936 so that we don't lose any of the precision of the high word
937 while multiplying it. */
940 /* Protect against double-rounding error.
941 Represent any low-order bits, that might be truncated in DFmode,
942 by a bit that won't be lost. The bit can go in anywhere below the
943 rounding position of the SFmode. A fixed mask and bit position
944 handles all usual configurations. It doesn't handle the case
945 of 128-bit DImode, however. */
946 if (DF_SIZE < DI_SIZE
947 && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE))
949 #define REP_BIT ((UDWtype) 1 << (DI_SIZE - DF_SIZE))
950 if (! (- ((DWtype) 1 << DF_SIZE) < u
951 && u < ((DWtype) 1 << DF_SIZE)))
953 if ((UDWtype) u & (REP_BIT - 1))
957 f = (Wtype) (u >> WORD_SIZE);
958 f *= HIGH_HALFWORD_COEFF;
959 f *= HIGH_HALFWORD_COEFF;
960 f += (UWtype) (u & (HIGH_WORD_COEFF - 1));
966 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
967 /* Reenable the normal types, in case limits.h needs them. */
980 __fixunsxfSI (XFtype a)
982 if (a >= - (DFtype) LONG_MIN)
983 return (Wtype) (a + LONG_MIN) - LONG_MIN;
989 /* Reenable the normal types, in case limits.h needs them. */
1002 __fixunsdfSI (DFtype a)
1004 if (a >= - (DFtype) LONG_MIN)
1005 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1011 /* Reenable the normal types, in case limits.h needs them. */
1024 __fixunssfSI (SFtype a)
1026 if (a >= - (SFtype) LONG_MIN)
1027 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1032 /* From here on down, the routines use normal data types. */
1034 #define SItype bogus_type
1035 #define USItype bogus_type
1036 #define DItype bogus_type
1037 #define UDItype bogus_type
1038 #define SFtype bogus_type
1039 #define DFtype bogus_type
1057 /* Like bcmp except the sign is meaningful.
1058 Result is negative if S1 is less than S2,
1059 positive if S1 is greater, 0 if S1 and S2 are equal. */
1062 __gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
1066 unsigned char c1 = *s1++, c2 = *s2++;
1083 #if defined(__svr4__) || defined(__alliant__)
1087 /* The Alliant needs the added underscore. */
1088 asm (".globl __builtin_saveregs");
1089 asm ("__builtin_saveregs:");
1090 asm (".globl ___builtin_saveregs");
1091 asm ("___builtin_saveregs:");
1093 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1094 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1095 area and also for a new va_list
1097 /* Save all argument registers in the arg reg save area. The
1098 arg reg save area must have the following layout (according
1110 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1111 asm (" fst.q %f12,16(%sp)");
1113 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1114 asm (" st.l %r17,36(%sp)");
1115 asm (" st.l %r18,40(%sp)");
1116 asm (" st.l %r19,44(%sp)");
1117 asm (" st.l %r20,48(%sp)");
1118 asm (" st.l %r21,52(%sp)");
1119 asm (" st.l %r22,56(%sp)");
1120 asm (" st.l %r23,60(%sp)");
1121 asm (" st.l %r24,64(%sp)");
1122 asm (" st.l %r25,68(%sp)");
1123 asm (" st.l %r26,72(%sp)");
1124 asm (" st.l %r27,76(%sp)");
1126 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1127 va_list structure. Put in into
1128 r16 so that it will be returned
1131 /* Initialize all fields of the new va_list structure. This
1132 structure looks like:
1135 unsigned long ireg_used;
1136 unsigned long freg_used;
1142 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1143 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1144 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1145 asm (" bri %r1"); /* delayed return */
1146 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1148 #else /* not __svr4__ */
1149 #if defined(__PARAGON__)
1151 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1152 * and we stand a better chance of hooking into libraries
1153 * compiled by PGI. [andyp@ssd.intel.com]
1157 asm (".globl __builtin_saveregs");
1158 asm ("__builtin_saveregs:");
1159 asm (".globl ___builtin_saveregs");
1160 asm ("___builtin_saveregs:");
1162 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1163 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1164 area and also for a new va_list
1166 /* Save all argument registers in the arg reg save area. The
1167 arg reg save area must have the following layout (according
1179 asm (" fst.q f8, 0(sp)");
1180 asm (" fst.q f12,16(sp)");
1181 asm (" st.l r16,32(sp)");
1182 asm (" st.l r17,36(sp)");
1183 asm (" st.l r18,40(sp)");
1184 asm (" st.l r19,44(sp)");
1185 asm (" st.l r20,48(sp)");
1186 asm (" st.l r21,52(sp)");
1187 asm (" st.l r22,56(sp)");
1188 asm (" st.l r23,60(sp)");
1189 asm (" st.l r24,64(sp)");
1190 asm (" st.l r25,68(sp)");
1191 asm (" st.l r26,72(sp)");
1192 asm (" st.l r27,76(sp)");
1194 asm (" adds 80,sp,r16"); /* compute the address of the new
1195 va_list structure. Put in into
1196 r16 so that it will be returned
1199 /* Initialize all fields of the new va_list structure. This
1200 structure looks like:
1203 unsigned long ireg_used;
1204 unsigned long freg_used;
1210 asm (" st.l r0, 0(r16)"); /* nfixed */
1211 asm (" st.l r0, 4(r16)"); /* nfloating */
1212 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1213 asm (" bri r1"); /* delayed return */
1214 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1215 #else /* not __PARAGON__ */
1219 asm (".globl ___builtin_saveregs");
1220 asm ("___builtin_saveregs:");
1221 asm (" mov sp,r30");
1222 asm (" andnot 0x0f,sp,sp");
1223 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1225 /* Fill in the __va_struct. */
1226 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1227 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1228 asm (" st.l r18, 8(sp)");
1229 asm (" st.l r19,12(sp)");
1230 asm (" st.l r20,16(sp)");
1231 asm (" st.l r21,20(sp)");
1232 asm (" st.l r22,24(sp)");
1233 asm (" st.l r23,28(sp)");
1234 asm (" st.l r24,32(sp)");
1235 asm (" st.l r25,36(sp)");
1236 asm (" st.l r26,40(sp)");
1237 asm (" st.l r27,44(sp)");
1239 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1240 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1242 /* Fill in the __va_ctl. */
1243 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1244 asm (" st.l r28,84(sp)"); /* pointer to more args */
1245 asm (" st.l r0, 88(sp)"); /* nfixed */
1246 asm (" st.l r0, 92(sp)"); /* nfloating */
1248 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1250 asm (" mov r30,sp");
1251 /* recover stack and pass address to start
1253 #endif /* not __PARAGON__ */
1254 #endif /* not __svr4__ */
1255 #else /* not __i860__ */
1257 asm (".global __builtin_saveregs");
1258 asm ("__builtin_saveregs:");
1259 asm (".global ___builtin_saveregs");
1260 asm ("___builtin_saveregs:");
1261 #ifdef NEED_PROC_COMMAND
1264 asm ("st %i0,[%fp+68]");
1265 asm ("st %i1,[%fp+72]");
1266 asm ("st %i2,[%fp+76]");
1267 asm ("st %i3,[%fp+80]");
1268 asm ("st %i4,[%fp+84]");
1270 asm ("st %i5,[%fp+88]");
1271 #ifdef NEED_TYPE_COMMAND
1272 asm (".type __builtin_saveregs,#function");
1273 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1275 #else /* not __sparc__ */
1276 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1280 asm (" .set nomips16");
1282 asm (" .ent __builtin_saveregs");
1283 asm (" .globl __builtin_saveregs");
1284 asm ("__builtin_saveregs:");
1285 asm (" sw $4,0($30)");
1286 asm (" sw $5,4($30)");
1287 asm (" sw $6,8($30)");
1288 asm (" sw $7,12($30)");
1290 asm (" .end __builtin_saveregs");
1291 #else /* not __mips__, etc. */
1293 void * __attribute__ ((__noreturn__))
1294 __builtin_saveregs (void)
1299 #endif /* not __mips__ */
1300 #endif /* not __sparc__ */
1301 #endif /* not __i860__ */
1305 #ifndef inhibit_libc
1307 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1309 /* This is used by the `assert' macro. */
1311 __eprintf (const char *string, const char *expression,
1312 unsigned int line, const char *filename)
1314 fprintf (stderr, string, expression, line, filename);
1324 /* Structure emitted by -a */
1328 const char *filename;
1332 const unsigned long *addresses;
1334 /* Older GCC's did not emit these fields. */
1336 const char **functions;
1337 const long *line_nums;
1338 const char **filenames;
1342 #ifdef BLOCK_PROFILER_CODE
1345 #ifndef inhibit_libc
1347 /* Simple minded basic block profiling output dumper for
1348 systems that don't provide tcov support. At present,
1349 it requires atexit and stdio. */
1351 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1355 #include "gbl-ctors.h"
1356 #include "gcov-io.h"
1358 #ifdef TARGET_HAS_F_SETLKW
1363 static struct bb *bb_head;
1365 static int num_digits (long value, int base) __attribute__ ((const));
1367 /* Return the number of digits needed to print a value */
1368 /* __inline__ */ static int num_digits (long value, int base)
1370 int minus = (value < 0 && base != 16);
1371 unsigned long v = (minus) ? -value : value;
1385 __bb_exit_func (void)
1387 FILE *da_file, *file;
1394 i = strlen (bb_head->filename) - 3;
1396 if (!strcmp (bb_head->filename+i, ".da"))
1398 /* Must be -fprofile-arcs not -a.
1399 Dump data in a form that gcov expects. */
1403 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1407 /* Make sure the output file exists -
1408 but don't clobber exiting data. */
1409 if ((da_file = fopen (ptr->filename, "a")) != 0)
1412 /* Need to re-open in order to be able to write from the start. */
1413 da_file = fopen (ptr->filename, "r+b");
1414 /* Some old systems might not allow the 'b' mode modifier.
1415 Therefore, try to open without it. This can lead to a race
1416 condition so that when you delete and re-create the file, the
1417 file might be opened in text mode, but then, you shouldn't
1418 delete the file in the first place. */
1420 da_file = fopen (ptr->filename, "r+");
1423 fprintf (stderr, "arc profiling: Can't open output file %s.\n",
1428 /* After a fork, another process might try to read and/or write
1429 the same file simultanously. So if we can, lock the file to
1430 avoid race conditions. */
1431 #if defined (TARGET_HAS_F_SETLKW)
1433 struct flock s_flock;
1435 s_flock.l_type = F_WRLCK;
1436 s_flock.l_whence = SEEK_SET;
1437 s_flock.l_start = 0;
1439 s_flock.l_pid = getpid ();
1441 while (fcntl (fileno (da_file), F_SETLKW, &s_flock)
1446 /* If the file is not empty, and the number of counts in it is the
1447 same, then merge them in. */
1448 firstchar = fgetc (da_file);
1449 if (firstchar == EOF)
1451 if (ferror (da_file))
1453 fprintf (stderr, "arc profiling: Can't read output file ");
1454 perror (ptr->filename);
1461 if (ungetc (firstchar, da_file) == EOF)
1463 if (__read_long (&n_counts, da_file, 8) != 0)
1465 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1470 if (n_counts == ptr->ncounts)
1474 for (i = 0; i < n_counts; i++)
1478 if (__read_long (&v, da_file, 8) != 0)
1480 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1484 ptr->counts[i] += v;
1492 /* ??? Should first write a header to the file. Preferably, a 4 byte
1493 magic number, 4 bytes containing the time the program was
1494 compiled, 4 bytes containing the last modification time of the
1495 source file, and 4 bytes indicating the compiler options used.
1497 That way we can easily verify that the proper source/executable/
1498 data file combination is being used from gcov. */
1500 if (__write_long (ptr->ncounts, da_file, 8) != 0)
1503 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1509 long *count_ptr = ptr->counts;
1511 for (j = ptr->ncounts; j > 0; j--)
1513 if (__write_long (*count_ptr, da_file, 8) != 0)
1521 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1525 if (fclose (da_file) == EOF)
1526 fprintf (stderr, "arc profiling: Error closing output file %s.\n",
1533 /* Must be basic block profiling. Emit a human readable output file. */
1535 file = fopen ("bb.out", "a");
1544 /* This is somewhat type incorrect, but it avoids worrying about
1545 exactly where time.h is included from. It should be ok unless
1546 a void * differs from other pointer formats, or if sizeof (long)
1547 is < sizeof (time_t). It would be nice if we could assume the
1548 use of rationale standards here. */
1550 time ((void *) &time_value);
1551 fprintf (file, "Basic block profiling finished on %s\n", ctime ((void *) &time_value));
1553 /* We check the length field explicitly in order to allow compatibility
1554 with older GCC's which did not provide it. */
1556 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1559 int func_p = (ptr->nwords >= (long) sizeof (struct bb)
1560 && ptr->nwords <= 1000
1562 int line_p = (func_p && ptr->line_nums);
1563 int file_p = (func_p && ptr->filenames);
1564 int addr_p = (ptr->addresses != 0);
1565 long ncounts = ptr->ncounts;
1571 int blk_len = num_digits (ncounts, 10);
1576 fprintf (file, "File %s, %ld basic blocks \n\n",
1577 ptr->filename, ncounts);
1579 /* Get max values for each field. */
1580 for (i = 0; i < ncounts; i++)
1585 if (cnt_max < ptr->counts[i])
1586 cnt_max = ptr->counts[i];
1588 if (addr_p && (unsigned long) addr_max < ptr->addresses[i])
1589 addr_max = ptr->addresses[i];
1591 if (line_p && line_max < ptr->line_nums[i])
1592 line_max = ptr->line_nums[i];
1596 p = (ptr->functions[i]) ? (ptr->functions[i]) : "<none>";
1604 p = (ptr->filenames[i]) ? (ptr->filenames[i]) : "<none>";
1611 addr_len = num_digits (addr_max, 16);
1612 cnt_len = num_digits (cnt_max, 10);
1613 line_len = num_digits (line_max, 10);
1615 /* Now print out the basic block information. */
1616 for (i = 0; i < ncounts; i++)
1619 " Block #%*d: executed %*ld time(s)",
1621 cnt_len, ptr->counts[i]);
1624 fprintf (file, " address= 0x%.*lx", addr_len,
1628 fprintf (file, " function= %-*s", func_len,
1629 (ptr->functions[i]) ? ptr->functions[i] : "<none>");
1632 fprintf (file, " line= %*ld", line_len, ptr->line_nums[i]);
1635 fprintf (file, " file= %s",
1636 (ptr->filenames[i]) ? ptr->filenames[i] : "<none>");
1638 fprintf (file, "\n");
1641 fprintf (file, "\n");
1645 fprintf (file, "\n\n");
1651 __bb_init_func (struct bb *blocks)
1653 /* User is supposed to check whether the first word is non-0,
1654 but just in case.... */
1656 if (blocks->zero_word)
1659 /* Initialize destructor. */
1661 atexit (__bb_exit_func);
1663 /* Set up linked list. */
1664 blocks->zero_word = 1;
1665 blocks->next = bb_head;
1669 /* Called before fork or exec - write out profile information gathered so
1670 far and reset it to zero. This avoids duplication or loss of the
1671 profile information gathered so far. */
1673 __bb_fork_func (void)
1678 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1681 for (i = ptr->ncounts - 1; i >= 0; i--)
1686 #ifndef MACHINE_STATE_SAVE
1687 #define MACHINE_STATE_SAVE(ID)
1689 #ifndef MACHINE_STATE_RESTORE
1690 #define MACHINE_STATE_RESTORE(ID)
1693 /* Number of buckets in hashtable of basic block addresses. */
1695 #define BB_BUCKETS 311
1697 /* Maximum length of string in file bb.in. */
1699 #define BBINBUFSIZE 500
1703 struct bb_edge *next;
1704 unsigned long src_addr;
1705 unsigned long dst_addr;
1706 unsigned long count;
1711 TRACE_KEEP = 0, TRACE_ON = 1, TRACE_OFF = 2
1716 struct bb_func *next;
1719 enum bb_func_mode mode;
1722 /* This is the connection to the outside world.
1723 The BLOCK_PROFILER macro must set __bb.blocks
1724 and __bb.blockno. */
1727 unsigned long blockno;
1731 /* Vars to store addrs of source and destination basic blocks
1734 static unsigned long bb_src = 0;
1735 static unsigned long bb_dst = 0;
1737 static FILE *bb_tracefile = (FILE *) 0;
1738 static struct bb_edge **bb_hashbuckets = (struct bb_edge **) 0;
1739 static struct bb_func *bb_func_head = (struct bb_func *) 0;
1740 static unsigned long bb_callcount = 0;
1741 static int bb_mode = 0;
1743 static unsigned long *bb_stack = (unsigned long *) 0;
1744 static size_t bb_stacksize = 0;
1746 static int reported = 0;
1749 Always : Print execution frequencies of basic blocks
1751 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1752 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1753 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1754 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1759 /*#include <sys/types.h>*/
1760 #include <sys/stat.h>
1761 /*#include <malloc.h>*/
1763 /* Commands executed by gopen. */
1765 #define GOPENDECOMPRESS "gzip -cd "
1766 #define GOPENCOMPRESS "gzip -c >"
1768 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1769 If it does not compile, simply replace gopen by fopen and delete
1770 '.gz' from any first parameter to gopen. */
1773 gopen (char *fn, char *mode)
1781 if (mode[0] != 'r' && mode[0] != 'w')
1784 p = fn + strlen (fn)-1;
1785 use_gzip = ((p[-1] == '.' && (p[0] == 'Z' || p[0] == 'z'))
1786 || (p[-2] == '.' && p[-1] == 'g' && p[0] == 'z'));
1793 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1794 + sizeof (GOPENDECOMPRESS));
1795 strcpy (s, GOPENDECOMPRESS);
1796 strcpy (s + (sizeof (GOPENDECOMPRESS)-1), fn);
1797 f = popen (s, mode);
1805 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1806 + sizeof (GOPENCOMPRESS));
1807 strcpy (s, GOPENCOMPRESS);
1808 strcpy (s + (sizeof (GOPENCOMPRESS)-1), fn);
1809 if (!(f = popen (s, mode)))
1810 f = fopen (s, mode);
1817 return fopen (fn, mode);
1827 if (!fstat (fileno (f), &buf) && S_ISFIFO (buf.st_mode))
1835 #endif /* HAVE_POPEN */
1837 /* Called once per program. */
1840 __bb_exit_trace_func (void)
1842 FILE *file = fopen ("bb.out", "a");
1855 gclose (bb_tracefile);
1857 fclose (bb_tracefile);
1858 #endif /* HAVE_POPEN */
1861 /* Check functions in `bb.in'. */
1866 const struct bb_func *p;
1867 int printed_something = 0;
1871 /* This is somewhat type incorrect. */
1872 time ((void *) &time_value);
1874 for (p = bb_func_head; p != (struct bb_func *) 0; p = p->next)
1876 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1878 if (!ptr->filename || (p->filename != (char *) 0 && strcmp (p->filename, ptr->filename)))
1880 for (blk = 0; blk < ptr->ncounts; blk++)
1882 if (!strcmp (p->funcname, ptr->functions[blk]))
1887 if (!printed_something)
1889 fprintf (file, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value));
1890 printed_something = 1;
1893 fprintf (file, "\tFunction %s", p->funcname);
1895 fprintf (file, " of file %s", p->filename);
1896 fprintf (file, "\n" );
1901 if (printed_something)
1902 fprintf (file, "\n");
1908 if (!bb_hashbuckets)
1912 fprintf (stderr, "Profiler: out of memory\n");
1922 unsigned long addr_max = 0;
1923 unsigned long cnt_max = 0;
1927 /* This is somewhat type incorrect, but it avoids worrying about
1928 exactly where time.h is included from. It should be ok unless
1929 a void * differs from other pointer formats, or if sizeof (long)
1930 is < sizeof (time_t). It would be nice if we could assume the
1931 use of rationale standards here. */
1933 time ((void *) &time_value);
1934 fprintf (file, "Basic block jump tracing");
1936 switch (bb_mode & 12)
1939 fprintf (file, " (with call)");
1943 /* Print nothing. */
1947 fprintf (file, " (with call & ret)");
1951 fprintf (file, " (with ret)");
1955 fprintf (file, " finished on %s\n", ctime ((void *) &time_value));
1957 for (i = 0; i < BB_BUCKETS; i++)
1959 struct bb_edge *bucket = bb_hashbuckets[i];
1960 for ( ; bucket; bucket = bucket->next )
1962 if (addr_max < bucket->src_addr)
1963 addr_max = bucket->src_addr;
1964 if (addr_max < bucket->dst_addr)
1965 addr_max = bucket->dst_addr;
1966 if (cnt_max < bucket->count)
1967 cnt_max = bucket->count;
1970 addr_len = num_digits (addr_max, 16);
1971 cnt_len = num_digits (cnt_max, 10);
1973 for ( i = 0; i < BB_BUCKETS; i++)
1975 struct bb_edge *bucket = bb_hashbuckets[i];
1976 for ( ; bucket; bucket = bucket->next )
1979 "Jump from block 0x%.*lx to block 0x%.*lx executed %*lu time(s)\n",
1980 addr_len, bucket->src_addr,
1981 addr_len, bucket->dst_addr,
1982 cnt_len, bucket->count);
1986 fprintf (file, "\n");
1994 /* Free allocated memory. */
1999 struct bb_func *old = f;
2002 if (old->funcname) free (old->funcname);
2003 if (old->filename) free (old->filename);
2014 for (i = 0; i < BB_BUCKETS; i++)
2016 struct bb_edge *old, *bucket = bb_hashbuckets[i];
2021 bucket = bucket->next;
2025 free (bb_hashbuckets);
2028 for (b = bb_head; b; b = b->next)
2029 if (b->flags) free (b->flags);
2032 /* Called once per program. */
2035 __bb_init_prg (void)
2038 char buf[BBINBUFSIZE];
2041 enum bb_func_mode m;
2044 /* Initialize destructor. */
2045 atexit (__bb_exit_func);
2047 if (!(file = fopen ("bb.in", "r")))
2050 while(fgets (buf, BBINBUFSIZE, file) != 0)
2066 if (!strcmp (p, "__bb_trace__"))
2068 else if (!strcmp (p, "__bb_jumps__"))
2070 else if (!strcmp (p, "__bb_hidecall__"))
2072 else if (!strcmp (p, "__bb_showret__"))
2076 struct bb_func *f = (struct bb_func *) malloc (sizeof (struct bb_func));
2080 f->next = bb_func_head;
2081 if ((pos = strchr (p, ':')))
2083 if (!(f->funcname = (char *) malloc (strlen (pos+1)+1)))
2085 strcpy (f->funcname, pos+1);
2087 if ((f->filename = (char *) malloc (l+1)))
2089 strncpy (f->filename, p, l);
2090 f->filename[l] = '\0';
2093 f->filename = (char *) 0;
2097 if (!(f->funcname = (char *) malloc (strlen (p)+1)))
2099 strcpy (f->funcname, p);
2100 f->filename = (char *) 0;
2112 bb_tracefile = gopen ("bbtrace.gz", "w");
2117 bb_tracefile = fopen ("bbtrace", "w");
2119 #endif /* HAVE_POPEN */
2123 bb_hashbuckets = (struct bb_edge **)
2124 malloc (BB_BUCKETS * sizeof (struct bb_edge *));
2126 /* Use a loop here rather than calling bzero to avoid having to
2127 conditionalize its existance. */
2128 for (i = 0; i < BB_BUCKETS; i++)
2129 bb_hashbuckets[i] = 0;
2135 bb_stack = (unsigned long *) malloc (bb_stacksize * sizeof (*bb_stack));
2138 /* Initialize destructor. */
2139 atexit (__bb_exit_trace_func);
2142 /* Called upon entering a basic block. */
2145 __bb_trace_func (void)
2147 struct bb_edge *bucket;
2149 MACHINE_STATE_SAVE("1")
2151 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2154 bb_dst = __bb.blocks->addresses[__bb.blockno];
2155 __bb.blocks->counts[__bb.blockno]++;
2159 fwrite (&bb_dst, sizeof (unsigned long), 1, bb_tracefile);
2164 struct bb_edge **startbucket, **oldnext;
2166 oldnext = startbucket
2167 = & bb_hashbuckets[ (((int) bb_src*8) ^ (int) bb_dst) % BB_BUCKETS ];
2168 bucket = *startbucket;
2170 for (bucket = *startbucket; bucket;
2171 oldnext = &(bucket->next), bucket = *oldnext)
2173 if (bucket->src_addr == bb_src
2174 && bucket->dst_addr == bb_dst)
2177 *oldnext = bucket->next;
2178 bucket->next = *startbucket;
2179 *startbucket = bucket;
2184 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2190 fprintf (stderr, "Profiler: out of memory\n");
2197 bucket->src_addr = bb_src;
2198 bucket->dst_addr = bb_dst;
2199 bucket->next = *startbucket;
2200 *startbucket = bucket;
2211 MACHINE_STATE_RESTORE("1")
2215 /* Called when returning from a function and `__bb_showret__' is set. */
2218 __bb_trace_func_ret (void)
2220 struct bb_edge *bucket;
2222 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2227 struct bb_edge **startbucket, **oldnext;
2229 oldnext = startbucket
2230 = & bb_hashbuckets[ (((int) bb_dst * 8) ^ (int) bb_src) % BB_BUCKETS ];
2231 bucket = *startbucket;
2233 for (bucket = *startbucket; bucket;
2234 oldnext = &(bucket->next), bucket = *oldnext)
2236 if (bucket->src_addr == bb_dst
2237 && bucket->dst_addr == bb_src)
2240 *oldnext = bucket->next;
2241 bucket->next = *startbucket;
2242 *startbucket = bucket;
2247 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2253 fprintf (stderr, "Profiler: out of memory\n");
2260 bucket->src_addr = bb_dst;
2261 bucket->dst_addr = bb_src;
2262 bucket->next = *startbucket;
2263 *startbucket = bucket;
2276 /* Called upon entering the first function of a file. */
2279 __bb_init_file (struct bb *blocks)
2282 const struct bb_func *p;
2283 long blk, ncounts = blocks->ncounts;
2284 const char **functions = blocks->functions;
2286 /* Set up linked list. */
2287 blocks->zero_word = 1;
2288 blocks->next = bb_head;
2293 || !(blocks->flags = (char *) malloc (sizeof (char) * blocks->ncounts)))
2296 for (blk = 0; blk < ncounts; blk++)
2297 blocks->flags[blk] = 0;
2299 for (blk = 0; blk < ncounts; blk++)
2301 for (p = bb_func_head; p; p = p->next)
2303 if (!strcmp (p->funcname, functions[blk])
2304 && (!p->filename || !strcmp (p->filename, blocks->filename)))
2306 blocks->flags[blk] |= p->mode;
2313 /* Called when exiting from a function. */
2316 __bb_trace_ret (void)
2319 MACHINE_STATE_SAVE("2")
2323 if ((bb_mode & 12) && bb_stacksize > bb_callcount)
2325 bb_src = bb_stack[bb_callcount];
2327 __bb_trace_func_ret ();
2333 MACHINE_STATE_RESTORE("2")
2337 /* Called when entering a function. */
2340 __bb_init_trace_func (struct bb *blocks, unsigned long blockno)
2342 static int trace_init = 0;
2344 MACHINE_STATE_SAVE("3")
2346 if (!blocks->zero_word)
2353 __bb_init_file (blocks);
2363 if (bb_callcount >= bb_stacksize)
2365 size_t newsize = bb_callcount + 100;
2367 bb_stack = (unsigned long *) realloc (bb_stack, newsize);
2372 fprintf (stderr, "Profiler: out of memory\n");
2376 goto stack_overflow;
2378 bb_stacksize = newsize;
2380 bb_stack[bb_callcount] = bb_src;
2391 else if (blocks->flags && (blocks->flags[blockno] & TRACE_ON))
2397 bb_stack[bb_callcount] = bb_src;
2400 MACHINE_STATE_RESTORE("3")
2403 #endif /* not inhibit_libc */
2404 #endif /* not BLOCK_PROFILER_CODE */
2408 unsigned int __shtab[] = {
2409 0x00000001, 0x00000002, 0x00000004, 0x00000008,
2410 0x00000010, 0x00000020, 0x00000040, 0x00000080,
2411 0x00000100, 0x00000200, 0x00000400, 0x00000800,
2412 0x00001000, 0x00002000, 0x00004000, 0x00008000,
2413 0x00010000, 0x00020000, 0x00040000, 0x00080000,
2414 0x00100000, 0x00200000, 0x00400000, 0x00800000,
2415 0x01000000, 0x02000000, 0x04000000, 0x08000000,
2416 0x10000000, 0x20000000, 0x40000000, 0x80000000
2420 #ifdef L_clear_cache
2421 /* Clear part of an instruction cache. */
2423 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2426 __clear_cache (char *beg __attribute__((__unused__)),
2427 char *end __attribute__((__unused__)))
2429 #ifdef CLEAR_INSN_CACHE
2430 CLEAR_INSN_CACHE (beg, end);
2432 #ifdef INSN_CACHE_SIZE
2433 static char array[INSN_CACHE_SIZE + INSN_CACHE_PLANE_SIZE + INSN_CACHE_LINE_WIDTH];
2434 static int initialized;
2438 typedef (*function_ptr) (void);
2440 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2441 /* It's cheaper to clear the whole cache.
2442 Put in a series of jump instructions so that calling the beginning
2443 of the cache will clear the whole thing. */
2447 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2448 & -INSN_CACHE_LINE_WIDTH);
2449 int end_ptr = ptr + INSN_CACHE_SIZE;
2451 while (ptr < end_ptr)
2453 *(INSTRUCTION_TYPE *)ptr
2454 = JUMP_AHEAD_INSTRUCTION + INSN_CACHE_LINE_WIDTH;
2455 ptr += INSN_CACHE_LINE_WIDTH;
2457 *(INSTRUCTION_TYPE *) (ptr - INSN_CACHE_LINE_WIDTH) = RETURN_INSTRUCTION;
2462 /* Call the beginning of the sequence. */
2463 (((function_ptr) (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2464 & -INSN_CACHE_LINE_WIDTH))
2467 #else /* Cache is large. */
2471 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2472 & -INSN_CACHE_LINE_WIDTH);
2474 while (ptr < (int) array + sizeof array)
2476 *(INSTRUCTION_TYPE *)ptr = RETURN_INSTRUCTION;
2477 ptr += INSN_CACHE_LINE_WIDTH;
2483 /* Find the location in array that occupies the same cache line as BEG. */
2485 offset = ((int) beg & -INSN_CACHE_LINE_WIDTH) & (INSN_CACHE_PLANE_SIZE - 1);
2486 start_addr = (((int) (array + INSN_CACHE_PLANE_SIZE - 1)
2487 & -INSN_CACHE_PLANE_SIZE)
2490 /* Compute the cache alignment of the place to stop clearing. */
2491 #if 0 /* This is not needed for gcc's purposes. */
2492 /* If the block to clear is bigger than a cache plane,
2493 we clear the entire cache, and OFFSET is already correct. */
2494 if (end < beg + INSN_CACHE_PLANE_SIZE)
2496 offset = (((int) (end + INSN_CACHE_LINE_WIDTH - 1)
2497 & -INSN_CACHE_LINE_WIDTH)
2498 & (INSN_CACHE_PLANE_SIZE - 1));
2500 #if INSN_CACHE_DEPTH > 1
2501 end_addr = (start_addr & -INSN_CACHE_PLANE_SIZE) + offset;
2502 if (end_addr <= start_addr)
2503 end_addr += INSN_CACHE_PLANE_SIZE;
2505 for (plane = 0; plane < INSN_CACHE_DEPTH; plane++)
2507 int addr = start_addr + plane * INSN_CACHE_PLANE_SIZE;
2508 int stop = end_addr + plane * INSN_CACHE_PLANE_SIZE;
2510 while (addr != stop)
2512 /* Call the return instruction at ADDR. */
2513 ((function_ptr) addr) ();
2515 addr += INSN_CACHE_LINE_WIDTH;
2518 #else /* just one plane */
2521 /* Call the return instruction at START_ADDR. */
2522 ((function_ptr) start_addr) ();
2524 start_addr += INSN_CACHE_LINE_WIDTH;
2526 while ((start_addr % INSN_CACHE_SIZE) != offset);
2527 #endif /* just one plane */
2528 #endif /* Cache is large */
2529 #endif /* Cache exists */
2530 #endif /* CLEAR_INSN_CACHE */
2533 #endif /* L_clear_cache */
2537 /* Jump to a trampoline, loading the static chain address. */
2539 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2552 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2556 mprotect (char *addr, int len, int prot)
2573 if (VirtualProtect (addr, len, np, &op))
2579 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2581 #ifdef TRANSFER_FROM_TRAMPOLINE
2582 TRANSFER_FROM_TRAMPOLINE
2585 #if defined (NeXT) && defined (__MACH__)
2587 /* Make stack executable so we can call trampolines on stack.
2588 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2592 #include <mach/mach.h>
2596 __enable_execute_stack (char *addr)
2599 char *eaddr = addr + TRAMPOLINE_SIZE;
2600 vm_address_t a = (vm_address_t) addr;
2602 /* turn on execute access on stack */
2603 r = vm_protect (task_self (), a, TRAMPOLINE_SIZE, FALSE, VM_PROT_ALL);
2604 if (r != KERN_SUCCESS)
2606 mach_error("vm_protect VM_PROT_ALL", r);
2610 /* We inline the i-cache invalidation for speed */
2612 #ifdef CLEAR_INSN_CACHE
2613 CLEAR_INSN_CACHE (addr, eaddr);
2615 __clear_cache ((int) addr, (int) eaddr);
2619 #endif /* defined (NeXT) && defined (__MACH__) */
2623 /* Make stack executable so we can call trampolines on stack.
2624 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2626 #include <sys/mman.h>
2627 #include <sys/vmparam.h>
2628 #include <machine/machparam.h>
2631 __enable_execute_stack (void)
2634 static unsigned lowest = USRSTACK;
2635 unsigned current = (unsigned) &fp & -NBPG;
2637 if (lowest > current)
2639 unsigned len = lowest - current;
2640 mremap (current, &len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE);
2644 /* Clear instruction cache in case an old trampoline is in it. */
2647 #endif /* __convex__ */
2651 /* Modified from the convex -code above. */
2653 #include <sys/param.h>
2655 #include <sys/m88kbcs.h>
2658 __enable_execute_stack (void)
2661 static unsigned long lowest = USRSTACK;
2662 unsigned long current = (unsigned long) &save_errno & -NBPC;
2664 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2665 address is seen as 'negative'. That is the case with the stack. */
2668 if (lowest > current)
2670 unsigned len=lowest-current;
2671 memctl(current,len,MCT_TEXT);
2675 memctl(current,NBPC,MCT_TEXT);
2679 #endif /* __sysV88__ */
2683 #include <sys/signal.h>
2686 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2687 so define it here, because we need it in __clear_insn_cache below */
2688 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2689 hence we enable this stuff only if MCT_TEXT is #define'd. */
2704 /* Clear instruction cache so we can call trampolines on stack.
2705 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2708 __clear_insn_cache (void)
2713 /* Preserve errno, because users would be surprised to have
2714 errno changing without explicitly calling any system-call. */
2717 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2718 No need to use an address derived from _start or %sp, as 0 works also. */
2719 memctl(0, 4096, MCT_TEXT);
2724 #endif /* __sysV68__ */
2728 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2730 #include <sys/mman.h>
2731 #include <sys/types.h>
2732 #include <sys/param.h>
2733 #include <sys/vmmac.h>
2735 /* Modified from the convex -code above.
2736 mremap promises to clear the i-cache. */
2739 __enable_execute_stack (void)
2742 if (mprotect (((unsigned int)&fp/PAGSIZ)*PAGSIZ, PAGSIZ,
2743 PROT_READ|PROT_WRITE|PROT_EXEC))
2745 perror ("mprotect in __enable_execute_stack");
2750 #endif /* __pyr__ */
2752 #if defined (sony_news) && defined (SYSTYPE_BSD)
2755 #include <sys/types.h>
2756 #include <sys/param.h>
2757 #include <syscall.h>
2758 #include <machine/sysnews.h>
2760 /* cacheflush function for NEWS-OS 4.2.
2761 This function is called from trampoline-initialize code
2762 defined in config/mips/mips.h. */
2765 cacheflush (char *beg, int size, int flag)
2767 if (syscall (SYS_sysnews, NEWS_CACHEFLUSH, beg, size, FLUSH_BCACHE))
2769 perror ("cache_flush");
2775 #endif /* sony_news */
2776 #endif /* L_trampoline */
2781 #include "gbl-ctors.h"
2782 /* Some systems use __main in a way incompatible with its use in gcc, in these
2783 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2784 give the same symbol without quotes for an alternative entry point. You
2785 must define both, or neither. */
2787 #define NAME__MAIN "__main"
2788 #define SYMBOL__MAIN __main
2791 #ifdef INIT_SECTION_ASM_OP
2792 #undef HAS_INIT_SECTION
2793 #define HAS_INIT_SECTION
2796 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2798 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
2799 code to run constructors. In that case, we need to handle EH here, too. */
2801 #ifdef EH_FRAME_SECTION
2803 extern unsigned char __EH_FRAME_BEGIN__[];
2806 /* Run all the global destructors on exit from the program. */
2809 __do_global_dtors (void)
2811 #ifdef DO_GLOBAL_DTORS_BODY
2812 DO_GLOBAL_DTORS_BODY;
2814 static func_ptr *p = __DTOR_LIST__ + 1;
2821 #if defined (EH_FRAME_SECTION) && !defined (HAS_INIT_SECTION)
2823 static int completed = 0;
2827 __deregister_frame_info (__EH_FRAME_BEGIN__);
2834 #ifndef HAS_INIT_SECTION
2835 /* Run all the global constructors on entry to the program. */
2838 __do_global_ctors (void)
2840 #ifdef EH_FRAME_SECTION
2842 static struct object object;
2843 __register_frame_info (__EH_FRAME_BEGIN__, &object);
2846 DO_GLOBAL_CTORS_BODY;
2847 atexit (__do_global_dtors);
2849 #endif /* no HAS_INIT_SECTION */
2851 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
2852 /* Subroutine called automatically by `main'.
2853 Compiling a global function named `main'
2854 produces an automatic call to this function at the beginning.
2856 For many systems, this routine calls __do_global_ctors.
2857 For systems which support a .init section we use the .init section
2858 to run __do_global_ctors, so we need not do anything here. */
2863 /* Support recursive calls to `main': run initializers just once. */
2864 static int initialized;
2868 __do_global_ctors ();
2871 #endif /* no HAS_INIT_SECTION or INVOKE__main */
2873 #endif /* L__main */
2874 #endif /* __CYGWIN__ */
2878 #include "gbl-ctors.h"
2880 /* Provide default definitions for the lists of constructors and
2881 destructors, so that we don't get linker errors. These symbols are
2882 intentionally bss symbols, so that gld and/or collect will provide
2883 the right values. */
2885 /* We declare the lists here with two elements each,
2886 so that they are valid empty lists if no other definition is loaded.
2888 If we are using the old "set" extensions to have the gnu linker
2889 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
2890 must be in the bss/common section.
2892 Long term no port should use those extensions. But many still do. */
2893 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
2894 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
2895 func_ptr __CTOR_LIST__[2] = {0, 0};
2896 func_ptr __DTOR_LIST__[2] = {0, 0};
2898 func_ptr __CTOR_LIST__[2];
2899 func_ptr __DTOR_LIST__[2];
2901 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
2902 #endif /* L_ctors */
2906 #include "gbl-ctors.h"
2914 static func_ptr *atexit_chain = 0;
2915 static long atexit_chain_length = 0;
2916 static volatile long last_atexit_chain_slot = -1;
2919 atexit (func_ptr func)
2921 if (++last_atexit_chain_slot == atexit_chain_length)
2923 atexit_chain_length += 32;
2925 atexit_chain = (func_ptr *) realloc (atexit_chain, atexit_chain_length
2926 * sizeof (func_ptr));
2928 atexit_chain = (func_ptr *) malloc (atexit_chain_length
2929 * sizeof (func_ptr));
2932 atexit_chain_length = 0;
2933 last_atexit_chain_slot = -1;
2938 atexit_chain[last_atexit_chain_slot] = func;
2942 extern void _cleanup (void);
2943 extern void _exit (int) __attribute__ ((__noreturn__));
2950 for ( ; last_atexit_chain_slot-- >= 0; )
2952 (*atexit_chain[last_atexit_chain_slot + 1]) ();
2953 atexit_chain[last_atexit_chain_slot + 1] = 0;
2955 free (atexit_chain);
2968 /* Simple; we just need a wrapper for ON_EXIT. */
2970 atexit (func_ptr func)
2972 return ON_EXIT (func);
2975 #endif /* ON_EXIT */
2976 #endif /* NEED_ATEXIT */
2984 /* Shared exception handling support routines. */
2987 __default_terminate (void)
2992 void (*__terminate_func)(void) __attribute__ ((__noreturn__)) =
2993 __default_terminate;
2998 (*__terminate_func)();
3002 __throw_type_match (void *catch_type, void *throw_type, void *obj)
3005 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3006 catch_type, throw_type);
3008 if (strcmp ((const char *)catch_type, (const char *)throw_type) == 0)
3019 /* Include definitions of EH context and table layout */
3021 #include "eh-common.h"
3022 #ifndef inhibit_libc
3026 /* Allocate and return a new EH context structure. */
3030 new_eh_context (void)
3032 struct eh_full_context {
3033 struct eh_context c;
3035 } *ehfc = (struct eh_full_context *) malloc (sizeof *ehfc);
3040 memset (ehfc, 0, sizeof *ehfc);
3042 ehfc->c.dynamic_handler_chain = (void **) ehfc->top_elt;
3044 /* This should optimize out entirely. This should always be true,
3045 but just in case it ever isn't, don't allow bogus code to be
3048 if ((void*)(&ehfc->c) != (void*)ehfc)
3054 static __gthread_key_t eh_context_key;
3056 /* Destructor for struct eh_context. */
3058 eh_context_free (void *ptr)
3060 __gthread_key_dtor (eh_context_key, ptr);
3066 /* Pointer to function to return EH context. */
3068 static struct eh_context *eh_context_initialize (void);
3069 static struct eh_context *eh_context_static (void);
3071 static struct eh_context *eh_context_specific (void);
3074 static struct eh_context *(*get_eh_context) (void) = &eh_context_initialize;
3076 /* Routine to get EH context.
3077 This one will simply call the function pointer. */
3080 __get_eh_context (void)
3082 return (void *) (*get_eh_context) ();
3085 /* Get and set the language specific info pointer. */
3088 __get_eh_info (void)
3090 struct eh_context *eh = (*get_eh_context) ();
3094 #ifdef DWARF2_UNWIND_INFO
3095 static int dwarf_reg_size_table_initialized = 0;
3096 static char dwarf_reg_size_table[DWARF_FRAME_REGISTERS];
3099 init_reg_size_table (void)
3101 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table);
3102 dwarf_reg_size_table_initialized = 1;
3108 eh_threads_initialize (void)
3110 /* Try to create the key. If it fails, revert to static method,
3111 otherwise start using thread specific EH contexts. */
3112 if (__gthread_key_create (&eh_context_key, &eh_context_free) == 0)
3113 get_eh_context = &eh_context_specific;
3115 get_eh_context = &eh_context_static;
3117 #endif /* no __GTHREADS */
3119 /* Initialize EH context.
3120 This will be called only once, since we change GET_EH_CONTEXT
3121 pointer to another routine. */
3123 static struct eh_context *
3124 eh_context_initialize (void)
3128 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
3129 /* Make sure that get_eh_context does not point to us anymore.
3130 Some systems have dummy thread routines in their libc that
3131 return a success (Solaris 2.6 for example). */
3132 if (__gthread_once (&once, eh_threads_initialize) != 0
3133 || get_eh_context == &eh_context_initialize)
3135 /* Use static version of EH context. */
3136 get_eh_context = &eh_context_static;
3138 #ifdef DWARF2_UNWIND_INFO
3140 static __gthread_once_t once_regsizes = __GTHREAD_ONCE_INIT;
3141 if (__gthread_once (&once_regsizes, init_reg_size_table) != 0
3142 || ! dwarf_reg_size_table_initialized)
3143 init_reg_size_table ();
3147 #else /* no __GTHREADS */
3149 /* Use static version of EH context. */
3150 get_eh_context = &eh_context_static;
3152 #ifdef DWARF2_UNWIND_INFO
3153 init_reg_size_table ();
3156 #endif /* no __GTHREADS */
3158 return (*get_eh_context) ();
3161 /* Return a static EH context. */
3163 static struct eh_context *
3164 eh_context_static (void)
3166 static struct eh_context eh;
3167 static int initialized;
3168 static void *top_elt[2];
3173 memset (&eh, 0, sizeof eh);
3174 eh.dynamic_handler_chain = top_elt;
3180 /* Return a thread specific EH context. */
3182 static struct eh_context *
3183 eh_context_specific (void)
3185 struct eh_context *eh;
3186 eh = (struct eh_context *) __gthread_getspecific (eh_context_key);
3189 eh = new_eh_context ();
3190 if (__gthread_setspecific (eh_context_key, (void *) eh) != 0)
3198 /* Support routines for setjmp/longjmp exception handling. */
3200 /* Calls to __sjthrow are generated by the compiler when an exception
3201 is raised when using the setjmp/longjmp exception handling codegen
3204 #ifdef DONT_USE_BUILTIN_SETJMP
3205 extern void longjmp (void *, int);
3208 /* Routine to get the head of the current thread's dynamic handler chain
3209 use for exception handling. */
3212 __get_dynamic_handler_chain (void)
3214 struct eh_context *eh = (*get_eh_context) ();
3215 return &eh->dynamic_handler_chain;
3218 /* This is used to throw an exception when the setjmp/longjmp codegen
3219 method is used for exception handling.
3221 We call __terminate if there are no handlers left. Otherwise we run the
3222 cleanup actions off the dynamic cleanup stack, and pop the top of the
3223 dynamic handler chain, and use longjmp to transfer back to the associated
3229 struct eh_context *eh = (*get_eh_context) ();
3230 void ***dhc = &eh->dynamic_handler_chain;
3232 void (*func)(void *, int);
3234 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3235 void ***cleanup = (void***)&(*dhc)[1];
3237 /* If there are any cleanups in the chain, run them now. */
3241 void **buf = (void**)store;
3246 #ifdef DONT_USE_BUILTIN_SETJMP
3247 if (! setjmp (&buf[2]))
3249 if (! __builtin_setjmp (&buf[2]))
3255 func = (void(*)(void*, int))cleanup[0][1];
3256 arg = (void*)cleanup[0][2];
3258 /* Update this before running the cleanup. */
3259 cleanup[0] = (void **)cleanup[0][0];
3272 /* We must call terminate if we try and rethrow an exception, when
3273 there is no exception currently active and when there are no
3275 if (! eh->info || (*dhc)[0] == 0)
3278 /* Find the jmpbuf associated with the top element of the dynamic
3279 handler chain. The jumpbuf starts two words into the buffer. */
3280 jmpbuf = &(*dhc)[2];
3282 /* Then we pop the top element off the dynamic handler chain. */
3283 *dhc = (void**)(*dhc)[0];
3285 /* And then we jump to the handler. */
3287 #ifdef DONT_USE_BUILTIN_SETJMP
3288 longjmp (jmpbuf, 1);
3290 __builtin_longjmp (jmpbuf, 1);
3294 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3295 handler, then pop the handler off the dynamic handler stack, and
3296 then throw. This is used to skip the first handler, and transfer
3297 control to the next handler in the dynamic handler stack. */
3300 __sjpopnthrow (void)
3302 struct eh_context *eh = (*get_eh_context) ();
3303 void ***dhc = &eh->dynamic_handler_chain;
3304 void (*func)(void *, int);
3306 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3307 void ***cleanup = (void***)&(*dhc)[1];
3309 /* If there are any cleanups in the chain, run them now. */
3313 void **buf = (void**)store;
3318 #ifdef DONT_USE_BUILTIN_SETJMP
3319 if (! setjmp (&buf[2]))
3321 if (! __builtin_setjmp (&buf[2]))
3327 func = (void(*)(void*, int))cleanup[0][1];
3328 arg = (void*)cleanup[0][2];
3330 /* Update this before running the cleanup. */
3331 cleanup[0] = (void **)cleanup[0][0];
3344 /* Then we pop the top element off the dynamic handler chain. */
3345 *dhc = (void**)(*dhc)[0];
3350 /* Support code for all exception region-based exception handling. */
3353 __eh_rtime_match (void *rtime)
3356 __eh_matcher matcher;
3359 info = *(__get_eh_info ());
3360 matcher = ((__eh_info *)info)->match_function;
3363 #ifndef inhibit_libc
3364 fprintf (stderr, "Internal Compiler Bug: No runtime type matcher.");
3368 ret = (*matcher) (info, rtime, (void *)0);
3369 return (ret != NULL);
3372 /* This value identifies the place from which an exception is being
3375 #ifdef EH_TABLE_LOOKUP
3381 #ifdef DWARF2_UNWIND_INFO
3383 /* Return the table version of an exception descriptor */
3386 __get_eh_table_version (exception_descriptor *table)
3388 return table->lang.version;
3391 /* Return the originating table language of an exception descriptor */
3394 __get_eh_table_language (exception_descriptor *table)
3396 return table->lang.language;
3399 /* This routine takes a PC and a pointer to the exception region TABLE for
3400 its translation unit, and returns the address of the exception handler
3401 associated with the closest exception table handler entry associated
3402 with that PC, or 0 if there are no table entries the PC fits in.
3404 In the advent of a tie, we have to give the last entry, as it represents
3408 old_find_exception_handler (void *pc, old_exception_table *table)
3415 /* We can't do a binary search because the table isn't guaranteed
3416 to be sorted from function to function. */
3417 for (pos = 0; table[pos].start_region != (void *) -1; ++pos)
3419 if (table[pos].start_region <= pc && table[pos].end_region > pc)
3421 /* This can apply. Make sure it is at least as small as
3422 the previous best. */
3423 if (best == -1 || (table[pos].end_region <= table[best].end_region
3424 && table[pos].start_region >= table[best].start_region))
3427 /* But it is sorted by starting PC within a function. */
3428 else if (best >= 0 && table[pos].start_region > pc)
3432 return table[best].exception_handler;
3438 /* find_exception_handler finds the correct handler, if there is one, to
3439 handle an exception.
3440 returns a pointer to the handler which controlled should be transferred
3441 to, or NULL if there is nothing left.
3443 PC - pc where the exception originates. If this is a rethrow,
3444 then this starts out as a pointer to the exception table
3445 entry we wish to rethrow out of.
3446 TABLE - exception table for the current module.
3447 EH_INFO - eh info pointer for this exception.
3448 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3449 CLEANUP - returned flag indicating whether this is a cleanup handler.
3452 find_exception_handler (void *pc, exception_descriptor *table,
3453 __eh_info *eh_info, int rethrow, int *cleanup)
3456 void *retval = NULL;
3461 /* The new model assumed the table is sorted inner-most out so the
3462 first region we find which matches is the correct one */
3464 exception_table *tab = &(table->table[0]);
3466 /* Subtract 1 from the PC to avoid hitting the next region */
3469 /* pc is actually the region table entry to rethrow out of */
3470 pos = ((exception_table *) pc) - tab;
3471 pc = ((exception_table *) pc)->end_region - 1;
3473 /* The label is always on the LAST handler entry for a region,
3474 so we know the next entry is a different region, even if the
3475 addresses are the same. Make sure its not end of table tho. */
3476 if (tab[pos].start_region != (void *) -1)
3482 /* We can't do a binary search because the table is in inner-most
3483 to outermost address ranges within functions */
3484 for ( ; tab[pos].start_region != (void *) -1; pos++)
3486 if (tab[pos].start_region <= pc && tab[pos].end_region > pc)
3488 if (tab[pos].match_info)
3490 __eh_matcher matcher = eh_info->match_function;
3491 /* match info but no matcher is NOT a match */
3494 void *ret = (*matcher)((void *) eh_info,
3495 tab[pos].match_info, table);
3499 retval = tab[pos].exception_handler;
3508 retval = tab[pos].exception_handler;
3515 #endif /* DWARF2_UNWIND_INFO */
3516 #endif /* EH_TABLE_LOOKUP */
3518 #ifdef DWARF2_UNWIND_INFO
3519 /* Support code for exception handling using static unwind information. */
3523 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3524 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3525 avoid a warning about casting between int and pointer of different
3528 typedef int ptr_type __attribute__ ((mode (pointer)));
3530 #ifdef INCOMING_REGNO
3531 /* Is the saved value for register REG in frame UDATA stored in a register
3532 window in the previous frame? */
3534 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3535 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3536 compiled functions won't work with the frame-unwind stuff here.
3537 Perhaps the entireity of in_reg_window should be conditional on having
3538 seen a DW_CFA_GNU_window_save? */
3539 #define target_flags 0
3542 in_reg_window (int reg, frame_state *udata)
3544 if (udata->saved[reg] == REG_SAVED_REG)
3545 return INCOMING_REGNO (reg) == reg;
3546 if (udata->saved[reg] != REG_SAVED_OFFSET)
3549 #ifdef STACK_GROWS_DOWNWARD
3550 return udata->reg_or_offset[reg] > 0;
3552 return udata->reg_or_offset[reg] < 0;
3557 in_reg_window (int reg __attribute__ ((__unused__)),
3558 frame_state *udata __attribute__ ((__unused__)))
3562 #endif /* INCOMING_REGNO */
3564 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3565 frame called by UDATA or 0. */
3568 get_reg_addr (unsigned reg, frame_state *udata, frame_state *sub_udata)
3570 while (udata->saved[reg] == REG_SAVED_REG)
3572 reg = udata->reg_or_offset[reg];
3573 if (in_reg_window (reg, udata))
3579 if (udata->saved[reg] == REG_SAVED_OFFSET)
3580 return (word_type *)(udata->cfa + udata->reg_or_offset[reg]);
3585 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3586 frame called by UDATA or 0. */
3588 static inline void *
3589 get_reg (unsigned reg, frame_state *udata, frame_state *sub_udata)
3591 return (void *)(ptr_type) *get_reg_addr (reg, udata, sub_udata);
3594 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3597 put_reg (unsigned reg, void *val, frame_state *udata)
3599 *get_reg_addr (reg, udata, NULL) = (word_type)(ptr_type) val;
3602 /* Copy the saved value for register REG from frame UDATA to frame
3603 TARGET_UDATA. Unlike the previous two functions, this can handle
3604 registers that are not one word large. */
3607 copy_reg (unsigned reg, frame_state *udata, frame_state *target_udata)
3609 word_type *preg = get_reg_addr (reg, udata, NULL);
3610 word_type *ptreg = get_reg_addr (reg, target_udata, NULL);
3612 memcpy (ptreg, preg, dwarf_reg_size_table [reg]);
3615 /* Retrieve the return address for frame UDATA. */
3617 static inline void *
3618 get_return_addr (frame_state *udata, frame_state *sub_udata)
3620 return __builtin_extract_return_addr
3621 (get_reg (udata->retaddr_column, udata, sub_udata));
3624 /* Overwrite the return address for frame UDATA with VAL. */
3627 put_return_addr (void *val, frame_state *udata)
3629 val = __builtin_frob_return_addr (val);
3630 put_reg (udata->retaddr_column, val, udata);
3633 /* Given the current frame UDATA and its return address PC, return the
3634 information about the calling frame in CALLER_UDATA. */
3637 next_stack_level (void *pc, frame_state *udata, frame_state *caller_udata)
3639 caller_udata = __frame_state_for (pc, caller_udata);
3643 /* Now go back to our caller's stack frame. If our caller's CFA register
3644 was saved in our stack frame, restore it; otherwise, assume the CFA
3645 register is SP and restore it to our CFA value. */
3646 if (udata->saved[caller_udata->cfa_reg])
3647 caller_udata->cfa = get_reg (caller_udata->cfa_reg, udata, 0);
3649 caller_udata->cfa = udata->cfa;
3650 caller_udata->cfa += caller_udata->cfa_offset;
3652 return caller_udata;
3655 /* Hook to call before __terminate if only cleanup handlers remain. */
3657 __unwinding_cleanup (void)
3661 /* throw_helper performs some of the common grunt work for a throw. This
3662 routine is called by throw and rethrows. This is pretty much split
3663 out from the old __throw routine. An addition has been added which allows
3664 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3665 but cleanups remaining. This allows a debugger to examine the state
3666 at which the throw was executed, before any cleanups, rather than
3667 at the terminate point after the stack has been unwound.
3669 EH is the current eh_context structure.
3670 PC is the address of the call to __throw.
3671 MY_UDATA is the unwind information for __throw.
3672 OFFSET_P is where we return the SP adjustment offset. */
3675 throw_helper (struct eh_context *eh, void *pc, frame_state *my_udata,
3678 frame_state ustruct2, *udata = &ustruct2;
3679 frame_state ustruct;
3680 frame_state *sub_udata = &ustruct;
3681 void *saved_pc = pc;
3683 void *handler_p = 0;
3685 frame_state saved_ustruct;
3688 int only_cleanup = 0;
3690 int saved_state = 0;
3692 __eh_info *eh_info = (__eh_info *)eh->info;
3694 /* Do we find a handler based on a re-throw PC? */
3695 if (eh->table_index != (void *) 0)
3698 memcpy (udata, my_udata, sizeof (*udata));
3700 handler = (void *) 0;
3703 frame_state *p = udata;
3704 udata = next_stack_level (pc, udata, sub_udata);
3707 /* If we couldn't find the next frame, we lose. */
3711 if (udata->eh_ptr == NULL)
3714 new_eh_model = (((exception_descriptor *)(udata->eh_ptr))->
3715 runtime_id_field == NEW_EH_RUNTIME);
3720 handler = find_exception_handler (eh->table_index, udata->eh_ptr,
3721 eh_info, 1, &cleanup);
3722 eh->table_index = (void *)0;
3726 handler = find_exception_handler (pc, udata->eh_ptr, eh_info,
3729 handler = old_find_exception_handler (pc, udata->eh_ptr);
3731 /* If we found one, we can stop searching, if its not a cleanup.
3732 for cleanups, we save the state, and keep looking. This allows
3733 us to call a debug hook if there are nothing but cleanups left. */
3740 saved_ustruct = *udata;
3741 handler_p = handler;
3754 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3755 hitting the beginning of the next region. */
3756 pc = get_return_addr (udata, sub_udata) - 1;
3761 udata = &saved_ustruct;
3762 handler = handler_p;
3765 __unwinding_cleanup ();
3768 /* If we haven't found a handler by now, this is an unhandled
3773 eh->handler_label = handler;
3775 args_size = udata->args_size;
3778 /* We found a handler in the throw context, no need to unwind. */
3784 /* Unwind all the frames between this one and the handler by copying
3785 their saved register values into our register save slots. */
3787 /* Remember the PC where we found the handler. */
3788 void *handler_pc = pc;
3790 /* Start from the throw context again. */
3792 memcpy (udata, my_udata, sizeof (*udata));
3794 while (pc != handler_pc)
3796 frame_state *p = udata;
3797 udata = next_stack_level (pc, udata, sub_udata);
3800 for (i = 0; i < DWARF_FRAME_REGISTERS; ++i)
3801 if (i != udata->retaddr_column && udata->saved[i])
3803 /* If you modify the saved value of the return address
3804 register on the SPARC, you modify the return address for
3805 your caller's frame. Don't do that here, as it will
3806 confuse get_return_addr. */
3807 if (in_reg_window (i, udata)
3808 && udata->saved[udata->retaddr_column] == REG_SAVED_REG
3809 && udata->reg_or_offset[udata->retaddr_column] == i)
3811 copy_reg (i, udata, my_udata);
3814 pc = get_return_addr (udata, sub_udata) - 1;
3817 /* But we do need to update the saved return address register from
3818 the last frame we unwind, or the handler frame will have the wrong
3820 if (udata->saved[udata->retaddr_column] == REG_SAVED_REG)
3822 i = udata->reg_or_offset[udata->retaddr_column];
3823 if (in_reg_window (i, udata))
3824 copy_reg (i, udata, my_udata);
3827 /* udata now refers to the frame called by the handler frame. */
3829 /* We adjust SP by the difference between __throw's CFA and the CFA for
3830 the frame called by the handler frame, because those CFAs correspond
3831 to the SP values at the two call sites. We need to further adjust by
3832 the args_size of the handler frame itself to get the handler frame's
3833 SP from before the args were pushed for that call. */
3834 #ifdef STACK_GROWS_DOWNWARD
3835 *offset_p = udata->cfa - my_udata->cfa + args_size;
3837 *offset_p = my_udata->cfa - udata->cfa - args_size;
3844 /* We first search for an exception handler, and if we don't find
3845 it, we call __terminate on the current stack frame so that we may
3846 use the debugger to walk the stack and understand why no handler
3849 If we find one, then we unwind the frames down to the one that
3850 has the handler and transfer control into the handler. */
3852 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
3857 struct eh_context *eh = (*get_eh_context) ();
3861 /* XXX maybe make my_ustruct static so we don't have to look it up for
3863 frame_state my_ustruct, *my_udata = &my_ustruct;
3865 /* This is required for C++ semantics. We must call terminate if we
3866 try and rethrow an exception, when there is no exception currently
3871 /* Start at our stack frame. */
3873 my_udata = __frame_state_for (&&label, my_udata);
3877 /* We need to get the value from the CFA register. */
3878 my_udata->cfa = __builtin_dwarf_cfa ();
3880 /* Do any necessary initialization to access arbitrary stack frames.
3881 On the SPARC, this means flushing the register windows. */
3882 __builtin_unwind_init ();
3884 /* Now reset pc to the right throw point. */
3885 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3887 handler = throw_helper (eh, pc, my_udata, &offset);
3891 __builtin_eh_return ((void *)eh, offset, handler);
3893 /* Epilogue: restore the handler frame's register values and return
3897 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
3900 __rethrow (void *index)
3902 struct eh_context *eh = (*get_eh_context) ();
3906 /* XXX maybe make my_ustruct static so we don't have to look it up for
3908 frame_state my_ustruct, *my_udata = &my_ustruct;
3910 /* This is required for C++ semantics. We must call terminate if we
3911 try and rethrow an exception, when there is no exception currently
3916 /* This is the table index we want to rethrow from. The value of
3917 the END_REGION label is used for the PC of the throw, and the
3918 search begins with the next table entry. */
3919 eh->table_index = index;
3921 /* Start at our stack frame. */
3923 my_udata = __frame_state_for (&&label, my_udata);
3927 /* We need to get the value from the CFA register. */
3928 my_udata->cfa = __builtin_dwarf_cfa ();
3930 /* Do any necessary initialization to access arbitrary stack frames.
3931 On the SPARC, this means flushing the register windows. */
3932 __builtin_unwind_init ();
3934 /* Now reset pc to the right throw point. */
3935 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3937 handler = throw_helper (eh, pc, my_udata, &offset);
3941 __builtin_eh_return ((void *)eh, offset, handler);
3943 /* Epilogue: restore the handler frame's register values and return
3946 #endif /* DWARF2_UNWIND_INFO */
3951 #ifndef inhibit_libc
3952 /* This gets us __GNU_LIBRARY__. */
3953 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
3956 #ifdef __GNU_LIBRARY__
3957 /* Avoid forcing the library's meaning of `write' on the user program
3958 by using the "internal" name (for use within the library) */
3959 #define write(fd, buf, n) __write((fd), (buf), (n))
3961 #endif /* inhibit_libc */
3963 #define MESSAGE "pure virtual method called\n"
3966 __pure_virtual (void)
3968 #ifndef inhibit_libc
3969 write (2, MESSAGE, sizeof (MESSAGE) - 1);