1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 92, 93, 94, 95, 96, 97, 98, 1999, 2000
4 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
23 /* As a special exception, if you link this library with other files,
24 some of which are compiled with GCC, to produce an executable,
25 this library does not by itself cause the resulting executable
26 to be covered by the GNU General Public License.
27 This exception does not however invalidate any other reasons why
28 the executable file might be covered by the GNU General Public License. */
30 /* It is incorrect to include config.h here, because this file is being
31 compiled for the target, and hence definitions concerning only the host
40 /* Don't use `fancy_abort' here even if config.h says to use it. */
45 /* In a cross-compilation situation, default to inhibiting compilation
46 of routines that use libc. */
48 #if defined(CROSS_COMPILE) && !defined(inhibit_libc)
54 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
55 #if defined (L_divdi3) || defined (L_moddi3)
67 w.s.high = -uu.s.high - ((UWtype) w.s.low > 0);
73 /* Unless shift functions are defined whith full ANSI prototypes,
74 parameter b will be promoted to int if word_type is smaller than an int. */
77 __lshrdi3 (DWtype u, word_type b)
88 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
92 w.s.low = (UWtype)uu.s.high >> -bm;
96 UWtype carries = (UWtype)uu.s.high << bm;
97 w.s.high = (UWtype)uu.s.high >> b;
98 w.s.low = ((UWtype)uu.s.low >> b) | carries;
107 __ashldi3 (DWtype u, word_type b)
118 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
122 w.s.high = (UWtype)uu.s.low << -bm;
126 UWtype carries = (UWtype)uu.s.low >> bm;
127 w.s.low = (UWtype)uu.s.low << b;
128 w.s.high = ((UWtype)uu.s.high << b) | carries;
137 __ashrdi3 (DWtype u, word_type b)
148 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
151 /* w.s.high = 1..1 or 0..0 */
152 w.s.high = uu.s.high >> (sizeof (Wtype) * BITS_PER_UNIT - 1);
153 w.s.low = uu.s.high >> -bm;
157 UWtype carries = (UWtype)uu.s.high << bm;
158 w.s.high = uu.s.high >> b;
159 w.s.low = ((UWtype)uu.s.low >> b) | carries;
173 w.s.low = ffs (uu.s.low);
176 w.s.low = ffs (uu.s.high);
179 w.s.low += BITS_PER_UNIT * sizeof (Wtype);
188 __muldi3 (DWtype u, DWtype v)
196 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
197 w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
198 + (UWtype) uu.s.high * (UWtype) vv.s.low);
205 #if defined (sdiv_qrnnd)
207 __udiv_w_sdiv (UWtype *rp, UWtype a1, UWtype a0, UWtype d)
214 if (a1 < d - a1 - (a0 >> (W_TYPE_SIZE - 1)))
216 /* dividend, divisor, and quotient are nonnegative */
217 sdiv_qrnnd (q, r, a1, a0, d);
221 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
222 sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (W_TYPE_SIZE - 1));
223 /* Divide (c1*2^32 + c0) by d */
224 sdiv_qrnnd (q, r, c1, c0, d);
225 /* Add 2^31 to quotient */
226 q += (UWtype) 1 << (W_TYPE_SIZE - 1);
231 b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
232 c1 = a1 >> 1; /* A/2 */
233 c0 = (a1 << (W_TYPE_SIZE - 1)) + (a0 >> 1);
235 if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
237 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
239 r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
256 else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
259 c0 = ~c0; /* logical NOT */
261 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
263 q = ~q; /* (A/2)/b1 */
266 r = 2*r + (a0 & 1); /* A/(2*b1) */
284 else /* Implies c1 = b1 */
285 { /* Hence a1 = d - 1 = 2*b1 - 1 */
303 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
305 __udiv_w_sdiv (UWtype *rp __attribute__ ((__unused__)),
306 UWtype a1 __attribute__ ((__unused__)),
307 UWtype a0 __attribute__ ((__unused__)),
308 UWtype d __attribute__ ((__unused__)))
315 #if (defined (L_udivdi3) || defined (L_divdi3) || \
316 defined (L_umoddi3) || defined (L_moddi3))
321 static const UQItype __clz_tab[] =
323 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
324 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
325 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
326 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
327 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
328 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
329 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
330 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
333 #if (defined (L_udivdi3) || defined (L_divdi3) || \
334 defined (L_umoddi3) || defined (L_moddi3))
338 __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
343 UWtype d0, d1, n0, n1, n2;
355 #if !UDIV_NEEDS_NORMALIZATION
362 udiv_qrnnd (q0, n0, n1, n0, d0);
365 /* Remainder in n0. */
372 d0 = 1 / d0; /* Divide intentionally by zero. */
374 udiv_qrnnd (q1, n1, 0, n1, d0);
375 udiv_qrnnd (q0, n0, n1, n0, d0);
377 /* Remainder in n0. */
388 #else /* UDIV_NEEDS_NORMALIZATION */
396 count_leading_zeros (bm, d0);
400 /* Normalize, i.e. make the most significant bit of the
404 n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
408 udiv_qrnnd (q0, n0, n1, n0, d0);
411 /* Remainder in n0 >> bm. */
418 d0 = 1 / d0; /* Divide intentionally by zero. */
420 count_leading_zeros (bm, d0);
424 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
425 conclude (the most significant bit of n1 is set) /\ (the
426 leading quotient digit q1 = 1).
428 This special case is necessary, not an optimization.
429 (Shifts counts of W_TYPE_SIZE are undefined.) */
438 b = W_TYPE_SIZE - bm;
442 n1 = (n1 << bm) | (n0 >> b);
445 udiv_qrnnd (q1, n1, n2, n1, d0);
450 udiv_qrnnd (q0, n0, n1, n0, d0);
452 /* Remainder in n0 >> bm. */
462 #endif /* UDIV_NEEDS_NORMALIZATION */
473 /* Remainder in n1n0. */
485 count_leading_zeros (bm, d1);
488 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
489 conclude (the most significant bit of n1 is set) /\ (the
490 quotient digit q0 = 0 or 1).
492 This special case is necessary, not an optimization. */
494 /* The condition on the next line takes advantage of that
495 n1 >= d1 (true due to program flow). */
496 if (n1 > d1 || n0 >= d0)
499 sub_ddmmss (n1, n0, n1, n0, d1, d0);
518 b = W_TYPE_SIZE - bm;
520 d1 = (d1 << bm) | (d0 >> b);
523 n1 = (n1 << bm) | (n0 >> b);
526 udiv_qrnnd (q0, n1, n2, n1, d1);
527 umul_ppmm (m1, m0, q0, d0);
529 if (m1 > n1 || (m1 == n1 && m0 > n0))
532 sub_ddmmss (m1, m0, m1, m0, d1, d0);
537 /* Remainder in (n1n0 - m1m0) >> bm. */
540 sub_ddmmss (n1, n0, n1, n0, m1, m0);
541 rr.s.low = (n1 << b) | (n0 >> bm);
542 rr.s.high = n1 >> bm;
557 __divdi3 (DWtype u, DWtype v)
568 uu.ll = __negdi2 (uu.ll);
571 vv.ll = __negdi2 (vv.ll);
573 w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
583 __moddi3 (DWtype u, DWtype v)
594 uu.ll = __negdi2 (uu.ll);
596 vv.ll = __negdi2 (vv.ll);
598 (void) __udivmoddi4 (uu.ll, vv.ll, &w);
608 __umoddi3 (UDWtype u, UDWtype v)
612 (void) __udivmoddi4 (u, v, &w);
620 __udivdi3 (UDWtype n, UDWtype d)
622 return __udivmoddi4 (n, d, (UDWtype *) 0);
628 __cmpdi2 (DWtype a, DWtype b)
632 au.ll = a, bu.ll = b;
634 if (au.s.high < bu.s.high)
636 else if (au.s.high > bu.s.high)
638 if ((UWtype) au.s.low < (UWtype) bu.s.low)
640 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
648 __ucmpdi2 (DWtype a, DWtype b)
652 au.ll = a, bu.ll = b;
654 if ((UWtype) au.s.high < (UWtype) bu.s.high)
656 else if ((UWtype) au.s.high > (UWtype) bu.s.high)
658 if ((UWtype) au.s.low < (UWtype) bu.s.low)
660 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
666 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
667 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
668 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
671 __fixunstfdi (TFtype a)
679 /* Compute high word of result, as a flonum. */
680 b = (a / HIGH_WORD_COEFF);
681 /* Convert that to fixed (but not to DWtype!),
682 and shift it into the high word. */
685 /* Remove high part from the TFtype, leaving the low part as flonum. */
687 /* Convert that to fixed (but not to DWtype!) and add it in.
688 Sometimes A comes out negative. This is significant, since
689 A has more bits than a long int does. */
698 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
703 return - __fixunstfdi (-a);
704 return __fixunstfdi (a);
708 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
709 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
710 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
713 __fixunsxfdi (XFtype a)
721 /* Compute high word of result, as a flonum. */
722 b = (a / HIGH_WORD_COEFF);
723 /* Convert that to fixed (but not to DWtype!),
724 and shift it into the high word. */
727 /* Remove high part from the XFtype, leaving the low part as flonum. */
729 /* Convert that to fixed (but not to DWtype!) and add it in.
730 Sometimes A comes out negative. This is significant, since
731 A has more bits than a long int does. */
740 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
745 return - __fixunsxfdi (-a);
746 return __fixunsxfdi (a);
751 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
752 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
755 __fixunsdfdi (DFtype a)
763 /* Compute high word of result, as a flonum. */
764 b = (a / HIGH_WORD_COEFF);
765 /* Convert that to fixed (but not to DWtype!),
766 and shift it into the high word. */
769 /* Remove high part from the DFtype, leaving the low part as flonum. */
771 /* Convert that to fixed (but not to DWtype!) and add it in.
772 Sometimes A comes out negative. This is significant, since
773 A has more bits than a long int does. */
787 return - __fixunsdfdi (-a);
788 return __fixunsdfdi (a);
793 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
794 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
797 __fixunssfdi (SFtype original_a)
799 /* Convert the SFtype to a DFtype, because that is surely not going
800 to lose any bits. Some day someone else can write a faster version
801 that avoids converting to DFtype, and verify it really works right. */
802 DFtype a = original_a;
809 /* Compute high word of result, as a flonum. */
810 b = (a / HIGH_WORD_COEFF);
811 /* Convert that to fixed (but not to DWtype!),
812 and shift it into the high word. */
815 /* Remove high part from the DFtype, leaving the low part as flonum. */
817 /* Convert that to fixed (but not to DWtype!) and add it in.
818 Sometimes A comes out negative. This is significant, since
819 A has more bits than a long int does. */
833 return - __fixunssfdi (-a);
834 return __fixunssfdi (a);
838 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
839 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
840 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
841 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
844 __floatdixf (DWtype u)
848 d = (Wtype) (u >> WORD_SIZE);
849 d *= HIGH_HALFWORD_COEFF;
850 d *= HIGH_HALFWORD_COEFF;
851 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
857 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
858 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
859 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
860 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
863 __floatditf (DWtype u)
867 d = (Wtype) (u >> WORD_SIZE);
868 d *= HIGH_HALFWORD_COEFF;
869 d *= HIGH_HALFWORD_COEFF;
870 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
877 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
878 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
879 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
882 __floatdidf (DWtype u)
886 d = (Wtype) (u >> WORD_SIZE);
887 d *= HIGH_HALFWORD_COEFF;
888 d *= HIGH_HALFWORD_COEFF;
889 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
896 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
897 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
898 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
899 #define DI_SIZE (sizeof (DWtype) * BITS_PER_UNIT)
901 /* Define codes for all the float formats that we know of. Note
902 that this is copied from real.h. */
904 #define UNKNOWN_FLOAT_FORMAT 0
905 #define IEEE_FLOAT_FORMAT 1
906 #define VAX_FLOAT_FORMAT 2
907 #define IBM_FLOAT_FORMAT 3
909 /* Default to IEEE float if not specified. Nearly all machines use it. */
910 #ifndef HOST_FLOAT_FORMAT
911 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
914 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
919 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
924 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
930 __floatdisf (DWtype u)
932 /* Do the calculation in DFmode
933 so that we don't lose any of the precision of the high word
934 while multiplying it. */
937 /* Protect against double-rounding error.
938 Represent any low-order bits, that might be truncated in DFmode,
939 by a bit that won't be lost. The bit can go in anywhere below the
940 rounding position of the SFmode. A fixed mask and bit position
941 handles all usual configurations. It doesn't handle the case
942 of 128-bit DImode, however. */
943 if (DF_SIZE < DI_SIZE
944 && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE))
946 #define REP_BIT ((UWtype) 1 << (DI_SIZE - DF_SIZE))
947 if (! (- ((DWtype) 1 << DF_SIZE) < u
948 && u < ((DWtype) 1 << DF_SIZE)))
950 if ((UWtype) u & (REP_BIT - 1))
954 f = (Wtype) (u >> WORD_SIZE);
955 f *= HIGH_HALFWORD_COEFF;
956 f *= HIGH_HALFWORD_COEFF;
957 f += (UWtype) (u & (HIGH_WORD_COEFF - 1));
963 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
964 /* Reenable the normal types, in case limits.h needs them. */
977 __fixunsxfsi (XFtype a)
979 if (a >= - (DFtype) LONG_MIN)
980 return (Wtype) (a + LONG_MIN) - LONG_MIN;
986 /* Reenable the normal types, in case limits.h needs them. */
999 __fixunsdfsi (DFtype a)
1001 if (a >= - (DFtype) LONG_MIN)
1002 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1008 /* Reenable the normal types, in case limits.h needs them. */
1021 __fixunssfsi (SFtype a)
1023 if (a >= - (SFtype) LONG_MIN)
1024 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1029 /* From here on down, the routines use normal data types. */
1031 #define SItype bogus_type
1032 #define USItype bogus_type
1033 #define DItype bogus_type
1034 #define UDItype bogus_type
1035 #define SFtype bogus_type
1036 #define DFtype bogus_type
1054 /* Like bcmp except the sign is meaningful.
1055 Result is negative if S1 is less than S2,
1056 positive if S1 is greater, 0 if S1 and S2 are equal. */
1059 __gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
1063 unsigned char c1 = *s1++, c2 = *s2++;
1080 #if defined(__svr4__) || defined(__alliant__)
1084 /* The Alliant needs the added underscore. */
1085 asm (".globl __builtin_saveregs");
1086 asm ("__builtin_saveregs:");
1087 asm (".globl ___builtin_saveregs");
1088 asm ("___builtin_saveregs:");
1090 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1091 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1092 area and also for a new va_list
1094 /* Save all argument registers in the arg reg save area. The
1095 arg reg save area must have the following layout (according
1107 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1108 asm (" fst.q %f12,16(%sp)");
1110 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1111 asm (" st.l %r17,36(%sp)");
1112 asm (" st.l %r18,40(%sp)");
1113 asm (" st.l %r19,44(%sp)");
1114 asm (" st.l %r20,48(%sp)");
1115 asm (" st.l %r21,52(%sp)");
1116 asm (" st.l %r22,56(%sp)");
1117 asm (" st.l %r23,60(%sp)");
1118 asm (" st.l %r24,64(%sp)");
1119 asm (" st.l %r25,68(%sp)");
1120 asm (" st.l %r26,72(%sp)");
1121 asm (" st.l %r27,76(%sp)");
1123 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1124 va_list structure. Put in into
1125 r16 so that it will be returned
1128 /* Initialize all fields of the new va_list structure. This
1129 structure looks like:
1132 unsigned long ireg_used;
1133 unsigned long freg_used;
1139 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1140 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1141 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1142 asm (" bri %r1"); /* delayed return */
1143 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1145 #else /* not __svr4__ */
1146 #if defined(__PARAGON__)
1148 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1149 * and we stand a better chance of hooking into libraries
1150 * compiled by PGI. [andyp@ssd.intel.com]
1154 asm (".globl __builtin_saveregs");
1155 asm ("__builtin_saveregs:");
1156 asm (".globl ___builtin_saveregs");
1157 asm ("___builtin_saveregs:");
1159 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1160 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1161 area and also for a new va_list
1163 /* Save all argument registers in the arg reg save area. The
1164 arg reg save area must have the following layout (according
1176 asm (" fst.q f8, 0(sp)");
1177 asm (" fst.q f12,16(sp)");
1178 asm (" st.l r16,32(sp)");
1179 asm (" st.l r17,36(sp)");
1180 asm (" st.l r18,40(sp)");
1181 asm (" st.l r19,44(sp)");
1182 asm (" st.l r20,48(sp)");
1183 asm (" st.l r21,52(sp)");
1184 asm (" st.l r22,56(sp)");
1185 asm (" st.l r23,60(sp)");
1186 asm (" st.l r24,64(sp)");
1187 asm (" st.l r25,68(sp)");
1188 asm (" st.l r26,72(sp)");
1189 asm (" st.l r27,76(sp)");
1191 asm (" adds 80,sp,r16"); /* compute the address of the new
1192 va_list structure. Put in into
1193 r16 so that it will be returned
1196 /* Initialize all fields of the new va_list structure. This
1197 structure looks like:
1200 unsigned long ireg_used;
1201 unsigned long freg_used;
1207 asm (" st.l r0, 0(r16)"); /* nfixed */
1208 asm (" st.l r0, 4(r16)"); /* nfloating */
1209 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1210 asm (" bri r1"); /* delayed return */
1211 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1212 #else /* not __PARAGON__ */
1216 asm (".globl ___builtin_saveregs");
1217 asm ("___builtin_saveregs:");
1218 asm (" mov sp,r30");
1219 asm (" andnot 0x0f,sp,sp");
1220 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1222 /* Fill in the __va_struct. */
1223 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1224 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1225 asm (" st.l r18, 8(sp)");
1226 asm (" st.l r19,12(sp)");
1227 asm (" st.l r20,16(sp)");
1228 asm (" st.l r21,20(sp)");
1229 asm (" st.l r22,24(sp)");
1230 asm (" st.l r23,28(sp)");
1231 asm (" st.l r24,32(sp)");
1232 asm (" st.l r25,36(sp)");
1233 asm (" st.l r26,40(sp)");
1234 asm (" st.l r27,44(sp)");
1236 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1237 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1239 /* Fill in the __va_ctl. */
1240 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1241 asm (" st.l r28,84(sp)"); /* pointer to more args */
1242 asm (" st.l r0, 88(sp)"); /* nfixed */
1243 asm (" st.l r0, 92(sp)"); /* nfloating */
1245 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1247 asm (" mov r30,sp");
1248 /* recover stack and pass address to start
1250 #endif /* not __PARAGON__ */
1251 #endif /* not __svr4__ */
1252 #else /* not __i860__ */
1254 asm (".global __builtin_saveregs");
1255 asm ("__builtin_saveregs:");
1256 asm (".global ___builtin_saveregs");
1257 asm ("___builtin_saveregs:");
1258 #ifdef NEED_PROC_COMMAND
1261 asm ("st %i0,[%fp+68]");
1262 asm ("st %i1,[%fp+72]");
1263 asm ("st %i2,[%fp+76]");
1264 asm ("st %i3,[%fp+80]");
1265 asm ("st %i4,[%fp+84]");
1267 asm ("st %i5,[%fp+88]");
1268 #ifdef NEED_TYPE_COMMAND
1269 asm (".type __builtin_saveregs,#function");
1270 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1272 #else /* not __sparc__ */
1273 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1277 asm (" .set nomips16");
1279 asm (" .ent __builtin_saveregs");
1280 asm (" .globl __builtin_saveregs");
1281 asm ("__builtin_saveregs:");
1282 asm (" sw $4,0($30)");
1283 asm (" sw $5,4($30)");
1284 asm (" sw $6,8($30)");
1285 asm (" sw $7,12($30)");
1287 asm (" .end __builtin_saveregs");
1288 #else /* not __mips__, etc. */
1290 void * __attribute__ ((__noreturn__))
1291 __builtin_saveregs (void)
1296 #endif /* not __mips__ */
1297 #endif /* not __sparc__ */
1298 #endif /* not __i860__ */
1302 #ifndef inhibit_libc
1304 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1306 /* This is used by the `assert' macro. */
1308 __eprintf (const char *string, const char *expression,
1309 unsigned int line, const char *filename)
1311 fprintf (stderr, string, expression, line, filename);
1321 /* Structure emitted by -a */
1325 const char *filename;
1329 const unsigned long *addresses;
1331 /* Older GCC's did not emit these fields. */
1333 const char **functions;
1334 const long *line_nums;
1335 const char **filenames;
1339 #ifdef BLOCK_PROFILER_CODE
1342 #ifndef inhibit_libc
1344 /* Simple minded basic block profiling output dumper for
1345 systems that don't provide tcov support. At present,
1346 it requires atexit and stdio. */
1348 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1352 #include "gbl-ctors.h"
1353 #include "gcov-io.h"
1355 #ifdef TARGET_HAS_F_SETLKW
1360 static struct bb *bb_head;
1362 static int num_digits (long value, int base) __attribute__ ((const));
1364 /* Return the number of digits needed to print a value */
1365 /* __inline__ */ static int num_digits (long value, int base)
1367 int minus = (value < 0 && base != 16);
1368 unsigned long v = (minus) ? -value : value;
1382 __bb_exit_func (void)
1384 FILE *da_file, *file;
1391 i = strlen (bb_head->filename) - 3;
1393 if (!strcmp (bb_head->filename+i, ".da"))
1395 /* Must be -fprofile-arcs not -a.
1396 Dump data in a form that gcov expects. */
1400 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1404 /* Make sure the output file exists -
1405 but don't clobber exiting data. */
1406 if ((da_file = fopen (ptr->filename, "a")) != 0)
1409 /* Need to re-open in order to be able to write from the start. */
1410 da_file = fopen (ptr->filename, "r+b");
1411 /* Some old systems might not allow the 'b' mode modifier.
1412 Therefore, try to open without it. This can lead to a race
1413 condition so that when you delete and re-create the file, the
1414 file might be opened in text mode, but then, you shouldn't
1415 delete the file in the first place. */
1417 da_file = fopen (ptr->filename, "r+");
1420 fprintf (stderr, "arc profiling: Can't open output file %s.\n",
1425 /* After a fork, another process might try to read and/or write
1426 the same file simultanously. So if we can, lock the file to
1427 avoid race conditions. */
1428 #if defined (TARGET_HAS_F_SETLKW)
1430 struct flock s_flock;
1432 s_flock.l_type = F_WRLCK;
1433 s_flock.l_whence = SEEK_SET;
1434 s_flock.l_start = 0;
1436 s_flock.l_pid = getpid ();
1438 while (fcntl (fileno (da_file), F_SETLKW, &s_flock)
1443 /* If the file is not empty, and the number of counts in it is the
1444 same, then merge them in. */
1445 firstchar = fgetc (da_file);
1446 if (firstchar == EOF)
1448 if (ferror (da_file))
1450 fprintf (stderr, "arc profiling: Can't read output file ");
1451 perror (ptr->filename);
1458 if (ungetc (firstchar, da_file) == EOF)
1460 if (__read_long (&n_counts, da_file, 8) != 0)
1462 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1467 if (n_counts == ptr->ncounts)
1471 for (i = 0; i < n_counts; i++)
1475 if (__read_long (&v, da_file, 8) != 0)
1477 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1481 ptr->counts[i] += v;
1489 /* ??? Should first write a header to the file. Preferably, a 4 byte
1490 magic number, 4 bytes containing the time the program was
1491 compiled, 4 bytes containing the last modification time of the
1492 source file, and 4 bytes indicating the compiler options used.
1494 That way we can easily verify that the proper source/executable/
1495 data file combination is being used from gcov. */
1497 if (__write_long (ptr->ncounts, da_file, 8) != 0)
1500 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1506 long *count_ptr = ptr->counts;
1508 for (j = ptr->ncounts; j > 0; j--)
1510 if (__write_long (*count_ptr, da_file, 8) != 0)
1518 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1522 if (fclose (da_file) == EOF)
1523 fprintf (stderr, "arc profiling: Error closing output file %s.\n",
1530 /* Must be basic block profiling. Emit a human readable output file. */
1532 file = fopen ("bb.out", "a");
1541 /* This is somewhat type incorrect, but it avoids worrying about
1542 exactly where time.h is included from. It should be ok unless
1543 a void * differs from other pointer formats, or if sizeof (long)
1544 is < sizeof (time_t). It would be nice if we could assume the
1545 use of rationale standards here. */
1547 time ((void *) &time_value);
1548 fprintf (file, "Basic block profiling finished on %s\n", ctime ((void *) &time_value));
1550 /* We check the length field explicitly in order to allow compatibility
1551 with older GCC's which did not provide it. */
1553 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1556 int func_p = (ptr->nwords >= (long) sizeof (struct bb)
1557 && ptr->nwords <= 1000
1559 int line_p = (func_p && ptr->line_nums);
1560 int file_p = (func_p && ptr->filenames);
1561 int addr_p = (ptr->addresses != 0);
1562 long ncounts = ptr->ncounts;
1568 int blk_len = num_digits (ncounts, 10);
1573 fprintf (file, "File %s, %ld basic blocks \n\n",
1574 ptr->filename, ncounts);
1576 /* Get max values for each field. */
1577 for (i = 0; i < ncounts; i++)
1582 if (cnt_max < ptr->counts[i])
1583 cnt_max = ptr->counts[i];
1585 if (addr_p && (unsigned long) addr_max < ptr->addresses[i])
1586 addr_max = ptr->addresses[i];
1588 if (line_p && line_max < ptr->line_nums[i])
1589 line_max = ptr->line_nums[i];
1593 p = (ptr->functions[i]) ? (ptr->functions[i]) : "<none>";
1601 p = (ptr->filenames[i]) ? (ptr->filenames[i]) : "<none>";
1608 addr_len = num_digits (addr_max, 16);
1609 cnt_len = num_digits (cnt_max, 10);
1610 line_len = num_digits (line_max, 10);
1612 /* Now print out the basic block information. */
1613 for (i = 0; i < ncounts; i++)
1616 " Block #%*d: executed %*ld time(s)",
1618 cnt_len, ptr->counts[i]);
1621 fprintf (file, " address= 0x%.*lx", addr_len,
1625 fprintf (file, " function= %-*s", func_len,
1626 (ptr->functions[i]) ? ptr->functions[i] : "<none>");
1629 fprintf (file, " line= %*ld", line_len, ptr->line_nums[i]);
1632 fprintf (file, " file= %s",
1633 (ptr->filenames[i]) ? ptr->filenames[i] : "<none>");
1635 fprintf (file, "\n");
1638 fprintf (file, "\n");
1642 fprintf (file, "\n\n");
1648 __bb_init_func (struct bb *blocks)
1650 /* User is supposed to check whether the first word is non-0,
1651 but just in case.... */
1653 if (blocks->zero_word)
1656 /* Initialize destructor. */
1658 atexit (__bb_exit_func);
1660 /* Set up linked list. */
1661 blocks->zero_word = 1;
1662 blocks->next = bb_head;
1666 /* Called before fork or exec - write out profile information gathered so
1667 far and reset it to zero. This avoids duplication or loss of the
1668 profile information gathered so far. */
1670 __bb_fork_func (void)
1675 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1678 for (i = ptr->ncounts - 1; i >= 0; i--)
1683 #ifndef MACHINE_STATE_SAVE
1684 #define MACHINE_STATE_SAVE(ID)
1686 #ifndef MACHINE_STATE_RESTORE
1687 #define MACHINE_STATE_RESTORE(ID)
1690 /* Number of buckets in hashtable of basic block addresses. */
1692 #define BB_BUCKETS 311
1694 /* Maximum length of string in file bb.in. */
1696 #define BBINBUFSIZE 500
1700 struct bb_edge *next;
1701 unsigned long src_addr;
1702 unsigned long dst_addr;
1703 unsigned long count;
1708 TRACE_KEEP = 0, TRACE_ON = 1, TRACE_OFF = 2
1713 struct bb_func *next;
1716 enum bb_func_mode mode;
1719 /* This is the connection to the outside world.
1720 The BLOCK_PROFILER macro must set __bb.blocks
1721 and __bb.blockno. */
1724 unsigned long blockno;
1728 /* Vars to store addrs of source and destination basic blocks
1731 static unsigned long bb_src = 0;
1732 static unsigned long bb_dst = 0;
1734 static FILE *bb_tracefile = (FILE *) 0;
1735 static struct bb_edge **bb_hashbuckets = (struct bb_edge **) 0;
1736 static struct bb_func *bb_func_head = (struct bb_func *) 0;
1737 static unsigned long bb_callcount = 0;
1738 static int bb_mode = 0;
1740 static unsigned long *bb_stack = (unsigned long *) 0;
1741 static size_t bb_stacksize = 0;
1743 static int reported = 0;
1746 Always : Print execution frequencies of basic blocks
1748 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1749 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1750 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1751 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1756 /*#include <sys/types.h>*/
1757 #include <sys/stat.h>
1758 /*#include <malloc.h>*/
1760 /* Commands executed by gopen. */
1762 #define GOPENDECOMPRESS "gzip -cd "
1763 #define GOPENCOMPRESS "gzip -c >"
1765 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1766 If it does not compile, simply replace gopen by fopen and delete
1767 '.gz' from any first parameter to gopen. */
1770 gopen (char *fn, char *mode)
1778 if (mode[0] != 'r' && mode[0] != 'w')
1781 p = fn + strlen (fn)-1;
1782 use_gzip = ((p[-1] == '.' && (p[0] == 'Z' || p[0] == 'z'))
1783 || (p[-2] == '.' && p[-1] == 'g' && p[0] == 'z'));
1790 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1791 + sizeof (GOPENDECOMPRESS));
1792 strcpy (s, GOPENDECOMPRESS);
1793 strcpy (s + (sizeof (GOPENDECOMPRESS)-1), fn);
1794 f = popen (s, mode);
1802 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1803 + sizeof (GOPENCOMPRESS));
1804 strcpy (s, GOPENCOMPRESS);
1805 strcpy (s + (sizeof (GOPENCOMPRESS)-1), fn);
1806 if (!(f = popen (s, mode)))
1807 f = fopen (s, mode);
1814 return fopen (fn, mode);
1824 if (!fstat (fileno (f), &buf) && S_ISFIFO (buf.st_mode))
1832 #endif /* HAVE_POPEN */
1834 /* Called once per program. */
1837 __bb_exit_trace_func (void)
1839 FILE *file = fopen ("bb.out", "a");
1852 gclose (bb_tracefile);
1854 fclose (bb_tracefile);
1855 #endif /* HAVE_POPEN */
1858 /* Check functions in `bb.in'. */
1863 const struct bb_func *p;
1864 int printed_something = 0;
1868 /* This is somewhat type incorrect. */
1869 time ((void *) &time_value);
1871 for (p = bb_func_head; p != (struct bb_func *) 0; p = p->next)
1873 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1875 if (!ptr->filename || (p->filename != (char *) 0 && strcmp (p->filename, ptr->filename)))
1877 for (blk = 0; blk < ptr->ncounts; blk++)
1879 if (!strcmp (p->funcname, ptr->functions[blk]))
1884 if (!printed_something)
1886 fprintf (file, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value));
1887 printed_something = 1;
1890 fprintf (file, "\tFunction %s", p->funcname);
1892 fprintf (file, " of file %s", p->filename);
1893 fprintf (file, "\n" );
1898 if (printed_something)
1899 fprintf (file, "\n");
1905 if (!bb_hashbuckets)
1909 fprintf (stderr, "Profiler: out of memory\n");
1919 unsigned long addr_max = 0;
1920 unsigned long cnt_max = 0;
1924 /* This is somewhat type incorrect, but it avoids worrying about
1925 exactly where time.h is included from. It should be ok unless
1926 a void * differs from other pointer formats, or if sizeof (long)
1927 is < sizeof (time_t). It would be nice if we could assume the
1928 use of rationale standards here. */
1930 time ((void *) &time_value);
1931 fprintf (file, "Basic block jump tracing");
1933 switch (bb_mode & 12)
1936 fprintf (file, " (with call)");
1940 /* Print nothing. */
1944 fprintf (file, " (with call & ret)");
1948 fprintf (file, " (with ret)");
1952 fprintf (file, " finished on %s\n", ctime ((void *) &time_value));
1954 for (i = 0; i < BB_BUCKETS; i++)
1956 struct bb_edge *bucket = bb_hashbuckets[i];
1957 for ( ; bucket; bucket = bucket->next )
1959 if (addr_max < bucket->src_addr)
1960 addr_max = bucket->src_addr;
1961 if (addr_max < bucket->dst_addr)
1962 addr_max = bucket->dst_addr;
1963 if (cnt_max < bucket->count)
1964 cnt_max = bucket->count;
1967 addr_len = num_digits (addr_max, 16);
1968 cnt_len = num_digits (cnt_max, 10);
1970 for ( i = 0; i < BB_BUCKETS; i++)
1972 struct bb_edge *bucket = bb_hashbuckets[i];
1973 for ( ; bucket; bucket = bucket->next )
1976 "Jump from block 0x%.*lx to block 0x%.*lx executed %*lu time(s)\n",
1977 addr_len, bucket->src_addr,
1978 addr_len, bucket->dst_addr,
1979 cnt_len, bucket->count);
1983 fprintf (file, "\n");
1991 /* Free allocated memory. */
1996 struct bb_func *old = f;
1999 if (old->funcname) free (old->funcname);
2000 if (old->filename) free (old->filename);
2011 for (i = 0; i < BB_BUCKETS; i++)
2013 struct bb_edge *old, *bucket = bb_hashbuckets[i];
2018 bucket = bucket->next;
2022 free (bb_hashbuckets);
2025 for (b = bb_head; b; b = b->next)
2026 if (b->flags) free (b->flags);
2029 /* Called once per program. */
2032 __bb_init_prg (void)
2035 char buf[BBINBUFSIZE];
2038 enum bb_func_mode m;
2041 /* Initialize destructor. */
2042 atexit (__bb_exit_func);
2044 if (!(file = fopen ("bb.in", "r")))
2047 while(fgets (buf, BBINBUFSIZE, file) != 0)
2063 if (!strcmp (p, "__bb_trace__"))
2065 else if (!strcmp (p, "__bb_jumps__"))
2067 else if (!strcmp (p, "__bb_hidecall__"))
2069 else if (!strcmp (p, "__bb_showret__"))
2073 struct bb_func *f = (struct bb_func *) malloc (sizeof (struct bb_func));
2077 f->next = bb_func_head;
2078 if ((pos = strchr (p, ':')))
2080 if (!(f->funcname = (char *) malloc (strlen (pos+1)+1)))
2082 strcpy (f->funcname, pos+1);
2084 if ((f->filename = (char *) malloc (l+1)))
2086 strncpy (f->filename, p, l);
2087 f->filename[l] = '\0';
2090 f->filename = (char *) 0;
2094 if (!(f->funcname = (char *) malloc (strlen (p)+1)))
2096 strcpy (f->funcname, p);
2097 f->filename = (char *) 0;
2109 bb_tracefile = gopen ("bbtrace.gz", "w");
2114 bb_tracefile = fopen ("bbtrace", "w");
2116 #endif /* HAVE_POPEN */
2120 bb_hashbuckets = (struct bb_edge **)
2121 malloc (BB_BUCKETS * sizeof (struct bb_edge *));
2123 /* Use a loop here rather than calling bzero to avoid having to
2124 conditionalize its existance. */
2125 for (i = 0; i < BB_BUCKETS; i++)
2126 bb_hashbuckets[i] = 0;
2132 bb_stack = (unsigned long *) malloc (bb_stacksize * sizeof (*bb_stack));
2135 /* Initialize destructor. */
2136 atexit (__bb_exit_trace_func);
2139 /* Called upon entering a basic block. */
2142 __bb_trace_func (void)
2144 struct bb_edge *bucket;
2146 MACHINE_STATE_SAVE("1")
2148 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2151 bb_dst = __bb.blocks->addresses[__bb.blockno];
2152 __bb.blocks->counts[__bb.blockno]++;
2156 fwrite (&bb_dst, sizeof (unsigned long), 1, bb_tracefile);
2161 struct bb_edge **startbucket, **oldnext;
2163 oldnext = startbucket
2164 = & bb_hashbuckets[ (((int) bb_src*8) ^ (int) bb_dst) % BB_BUCKETS ];
2165 bucket = *startbucket;
2167 for (bucket = *startbucket; bucket;
2168 oldnext = &(bucket->next), bucket = *oldnext)
2170 if (bucket->src_addr == bb_src
2171 && bucket->dst_addr == bb_dst)
2174 *oldnext = bucket->next;
2175 bucket->next = *startbucket;
2176 *startbucket = bucket;
2181 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2187 fprintf (stderr, "Profiler: out of memory\n");
2194 bucket->src_addr = bb_src;
2195 bucket->dst_addr = bb_dst;
2196 bucket->next = *startbucket;
2197 *startbucket = bucket;
2208 MACHINE_STATE_RESTORE("1")
2212 /* Called when returning from a function and `__bb_showret__' is set. */
2215 __bb_trace_func_ret (void)
2217 struct bb_edge *bucket;
2219 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2224 struct bb_edge **startbucket, **oldnext;
2226 oldnext = startbucket
2227 = & bb_hashbuckets[ (((int) bb_dst * 8) ^ (int) bb_src) % BB_BUCKETS ];
2228 bucket = *startbucket;
2230 for (bucket = *startbucket; bucket;
2231 oldnext = &(bucket->next), bucket = *oldnext)
2233 if (bucket->src_addr == bb_dst
2234 && bucket->dst_addr == bb_src)
2237 *oldnext = bucket->next;
2238 bucket->next = *startbucket;
2239 *startbucket = bucket;
2244 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2250 fprintf (stderr, "Profiler: out of memory\n");
2257 bucket->src_addr = bb_dst;
2258 bucket->dst_addr = bb_src;
2259 bucket->next = *startbucket;
2260 *startbucket = bucket;
2273 /* Called upon entering the first function of a file. */
2276 __bb_init_file (struct bb *blocks)
2279 const struct bb_func *p;
2280 long blk, ncounts = blocks->ncounts;
2281 const char **functions = blocks->functions;
2283 /* Set up linked list. */
2284 blocks->zero_word = 1;
2285 blocks->next = bb_head;
2290 || !(blocks->flags = (char *) malloc (sizeof (char) * blocks->ncounts)))
2293 for (blk = 0; blk < ncounts; blk++)
2294 blocks->flags[blk] = 0;
2296 for (blk = 0; blk < ncounts; blk++)
2298 for (p = bb_func_head; p; p = p->next)
2300 if (!strcmp (p->funcname, functions[blk])
2301 && (!p->filename || !strcmp (p->filename, blocks->filename)))
2303 blocks->flags[blk] |= p->mode;
2310 /* Called when exiting from a function. */
2313 __bb_trace_ret (void)
2316 MACHINE_STATE_SAVE("2")
2320 if ((bb_mode & 12) && bb_stacksize > bb_callcount)
2322 bb_src = bb_stack[bb_callcount];
2324 __bb_trace_func_ret ();
2330 MACHINE_STATE_RESTORE("2")
2334 /* Called when entering a function. */
2337 __bb_init_trace_func (struct bb *blocks, unsigned long blockno)
2339 static int trace_init = 0;
2341 MACHINE_STATE_SAVE("3")
2343 if (!blocks->zero_word)
2350 __bb_init_file (blocks);
2360 if (bb_callcount >= bb_stacksize)
2362 size_t newsize = bb_callcount + 100;
2364 bb_stack = (unsigned long *) realloc (bb_stack, newsize);
2369 fprintf (stderr, "Profiler: out of memory\n");
2373 goto stack_overflow;
2375 bb_stacksize = newsize;
2377 bb_stack[bb_callcount] = bb_src;
2388 else if (blocks->flags && (blocks->flags[blockno] & TRACE_ON))
2394 bb_stack[bb_callcount] = bb_src;
2397 MACHINE_STATE_RESTORE("3")
2400 #endif /* not inhibit_libc */
2401 #endif /* not BLOCK_PROFILER_CODE */
2405 unsigned int __shtab[] = {
2406 0x00000001, 0x00000002, 0x00000004, 0x00000008,
2407 0x00000010, 0x00000020, 0x00000040, 0x00000080,
2408 0x00000100, 0x00000200, 0x00000400, 0x00000800,
2409 0x00001000, 0x00002000, 0x00004000, 0x00008000,
2410 0x00010000, 0x00020000, 0x00040000, 0x00080000,
2411 0x00100000, 0x00200000, 0x00400000, 0x00800000,
2412 0x01000000, 0x02000000, 0x04000000, 0x08000000,
2413 0x10000000, 0x20000000, 0x40000000, 0x80000000
2417 #ifdef L_clear_cache
2418 /* Clear part of an instruction cache. */
2420 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2423 __clear_cache (char *beg __attribute__((__unused__)),
2424 char *end __attribute__((__unused__)))
2426 #ifdef CLEAR_INSN_CACHE
2427 CLEAR_INSN_CACHE (beg, end);
2429 #ifdef INSN_CACHE_SIZE
2430 static char array[INSN_CACHE_SIZE + INSN_CACHE_PLANE_SIZE + INSN_CACHE_LINE_WIDTH];
2431 static int initialized;
2435 typedef (*function_ptr) (void);
2437 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2438 /* It's cheaper to clear the whole cache.
2439 Put in a series of jump instructions so that calling the beginning
2440 of the cache will clear the whole thing. */
2444 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2445 & -INSN_CACHE_LINE_WIDTH);
2446 int end_ptr = ptr + INSN_CACHE_SIZE;
2448 while (ptr < end_ptr)
2450 *(INSTRUCTION_TYPE *)ptr
2451 = JUMP_AHEAD_INSTRUCTION + INSN_CACHE_LINE_WIDTH;
2452 ptr += INSN_CACHE_LINE_WIDTH;
2454 *(INSTRUCTION_TYPE *) (ptr - INSN_CACHE_LINE_WIDTH) = RETURN_INSTRUCTION;
2459 /* Call the beginning of the sequence. */
2460 (((function_ptr) (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2461 & -INSN_CACHE_LINE_WIDTH))
2464 #else /* Cache is large. */
2468 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2469 & -INSN_CACHE_LINE_WIDTH);
2471 while (ptr < (int) array + sizeof array)
2473 *(INSTRUCTION_TYPE *)ptr = RETURN_INSTRUCTION;
2474 ptr += INSN_CACHE_LINE_WIDTH;
2480 /* Find the location in array that occupies the same cache line as BEG. */
2482 offset = ((int) beg & -INSN_CACHE_LINE_WIDTH) & (INSN_CACHE_PLANE_SIZE - 1);
2483 start_addr = (((int) (array + INSN_CACHE_PLANE_SIZE - 1)
2484 & -INSN_CACHE_PLANE_SIZE)
2487 /* Compute the cache alignment of the place to stop clearing. */
2488 #if 0 /* This is not needed for gcc's purposes. */
2489 /* If the block to clear is bigger than a cache plane,
2490 we clear the entire cache, and OFFSET is already correct. */
2491 if (end < beg + INSN_CACHE_PLANE_SIZE)
2493 offset = (((int) (end + INSN_CACHE_LINE_WIDTH - 1)
2494 & -INSN_CACHE_LINE_WIDTH)
2495 & (INSN_CACHE_PLANE_SIZE - 1));
2497 #if INSN_CACHE_DEPTH > 1
2498 end_addr = (start_addr & -INSN_CACHE_PLANE_SIZE) + offset;
2499 if (end_addr <= start_addr)
2500 end_addr += INSN_CACHE_PLANE_SIZE;
2502 for (plane = 0; plane < INSN_CACHE_DEPTH; plane++)
2504 int addr = start_addr + plane * INSN_CACHE_PLANE_SIZE;
2505 int stop = end_addr + plane * INSN_CACHE_PLANE_SIZE;
2507 while (addr != stop)
2509 /* Call the return instruction at ADDR. */
2510 ((function_ptr) addr) ();
2512 addr += INSN_CACHE_LINE_WIDTH;
2515 #else /* just one plane */
2518 /* Call the return instruction at START_ADDR. */
2519 ((function_ptr) start_addr) ();
2521 start_addr += INSN_CACHE_LINE_WIDTH;
2523 while ((start_addr % INSN_CACHE_SIZE) != offset);
2524 #endif /* just one plane */
2525 #endif /* Cache is large */
2526 #endif /* Cache exists */
2527 #endif /* CLEAR_INSN_CACHE */
2530 #endif /* L_clear_cache */
2534 /* Jump to a trampoline, loading the static chain address. */
2536 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2549 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2553 mprotect (char *addr, int len, int prot)
2570 if (VirtualProtect (addr, len, np, &op))
2576 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2578 #ifdef TRANSFER_FROM_TRAMPOLINE
2579 TRANSFER_FROM_TRAMPOLINE
2582 #if defined (NeXT) && defined (__MACH__)
2584 /* Make stack executable so we can call trampolines on stack.
2585 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2589 #include <mach/mach.h>
2593 __enable_execute_stack (char *addr)
2596 char *eaddr = addr + TRAMPOLINE_SIZE;
2597 vm_address_t a = (vm_address_t) addr;
2599 /* turn on execute access on stack */
2600 r = vm_protect (task_self (), a, TRAMPOLINE_SIZE, FALSE, VM_PROT_ALL);
2601 if (r != KERN_SUCCESS)
2603 mach_error("vm_protect VM_PROT_ALL", r);
2607 /* We inline the i-cache invalidation for speed */
2609 #ifdef CLEAR_INSN_CACHE
2610 CLEAR_INSN_CACHE (addr, eaddr);
2612 __clear_cache ((int) addr, (int) eaddr);
2616 #endif /* defined (NeXT) && defined (__MACH__) */
2620 /* Make stack executable so we can call trampolines on stack.
2621 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2623 #include <sys/mman.h>
2624 #include <sys/vmparam.h>
2625 #include <machine/machparam.h>
2628 __enable_execute_stack (void)
2631 static unsigned lowest = USRSTACK;
2632 unsigned current = (unsigned) &fp & -NBPG;
2634 if (lowest > current)
2636 unsigned len = lowest - current;
2637 mremap (current, &len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE);
2641 /* Clear instruction cache in case an old trampoline is in it. */
2644 #endif /* __convex__ */
2648 /* Modified from the convex -code above. */
2650 #include <sys/param.h>
2652 #include <sys/m88kbcs.h>
2655 __enable_execute_stack (void)
2658 static unsigned long lowest = USRSTACK;
2659 unsigned long current = (unsigned long) &save_errno & -NBPC;
2661 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2662 address is seen as 'negative'. That is the case with the stack. */
2665 if (lowest > current)
2667 unsigned len=lowest-current;
2668 memctl(current,len,MCT_TEXT);
2672 memctl(current,NBPC,MCT_TEXT);
2676 #endif /* __sysV88__ */
2680 #include <sys/signal.h>
2683 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2684 so define it here, because we need it in __clear_insn_cache below */
2685 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2686 hence we enable this stuff only if MCT_TEXT is #define'd. */
2701 /* Clear instruction cache so we can call trampolines on stack.
2702 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2705 __clear_insn_cache (void)
2710 /* Preserve errno, because users would be surprised to have
2711 errno changing without explicitly calling any system-call. */
2714 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2715 No need to use an address derived from _start or %sp, as 0 works also. */
2716 memctl(0, 4096, MCT_TEXT);
2721 #endif /* __sysV68__ */
2725 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2727 #include <sys/mman.h>
2728 #include <sys/types.h>
2729 #include <sys/param.h>
2730 #include <sys/vmmac.h>
2732 /* Modified from the convex -code above.
2733 mremap promises to clear the i-cache. */
2736 __enable_execute_stack (void)
2739 if (mprotect (((unsigned int)&fp/PAGSIZ)*PAGSIZ, PAGSIZ,
2740 PROT_READ|PROT_WRITE|PROT_EXEC))
2742 perror ("mprotect in __enable_execute_stack");
2747 #endif /* __pyr__ */
2749 #if defined (sony_news) && defined (SYSTYPE_BSD)
2752 #include <sys/types.h>
2753 #include <sys/param.h>
2754 #include <syscall.h>
2755 #include <machine/sysnews.h>
2757 /* cacheflush function for NEWS-OS 4.2.
2758 This function is called from trampoline-initialize code
2759 defined in config/mips/mips.h. */
2762 cacheflush (char *beg, int size, int flag)
2764 if (syscall (SYS_sysnews, NEWS_CACHEFLUSH, beg, size, FLUSH_BCACHE))
2766 perror ("cache_flush");
2772 #endif /* sony_news */
2773 #endif /* L_trampoline */
2778 #include "gbl-ctors.h"
2779 /* Some systems use __main in a way incompatible with its use in gcc, in these
2780 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2781 give the same symbol without quotes for an alternative entry point. You
2782 must define both, or neither. */
2784 #define NAME__MAIN "__main"
2785 #define SYMBOL__MAIN __main
2788 #ifdef INIT_SECTION_ASM_OP
2789 #undef HAS_INIT_SECTION
2790 #define HAS_INIT_SECTION
2793 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2795 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
2796 code to run constructors. In that case, we need to handle EH here, too. */
2798 #ifdef EH_FRAME_SECTION
2800 extern unsigned char __EH_FRAME_BEGIN__[];
2803 /* Run all the global destructors on exit from the program. */
2806 __do_global_dtors (void)
2808 #ifdef DO_GLOBAL_DTORS_BODY
2809 DO_GLOBAL_DTORS_BODY;
2811 static func_ptr *p = __DTOR_LIST__ + 1;
2818 #if defined (EH_FRAME_SECTION) && !defined (HAS_INIT_SECTION)
2820 static int completed = 0;
2824 __deregister_frame_info (__EH_FRAME_BEGIN__);
2831 #ifndef HAS_INIT_SECTION
2832 /* Run all the global constructors on entry to the program. */
2835 __do_global_ctors (void)
2837 #ifdef EH_FRAME_SECTION
2839 static struct object object;
2840 __register_frame_info (__EH_FRAME_BEGIN__, &object);
2843 DO_GLOBAL_CTORS_BODY;
2844 atexit (__do_global_dtors);
2846 #endif /* no HAS_INIT_SECTION */
2848 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
2849 /* Subroutine called automatically by `main'.
2850 Compiling a global function named `main'
2851 produces an automatic call to this function at the beginning.
2853 For many systems, this routine calls __do_global_ctors.
2854 For systems which support a .init section we use the .init section
2855 to run __do_global_ctors, so we need not do anything here. */
2860 /* Support recursive calls to `main': run initializers just once. */
2861 static int initialized;
2865 __do_global_ctors ();
2868 #endif /* no HAS_INIT_SECTION or INVOKE__main */
2870 #endif /* L__main */
2871 #endif /* __CYGWIN__ */
2875 #include "gbl-ctors.h"
2877 /* Provide default definitions for the lists of constructors and
2878 destructors, so that we don't get linker errors. These symbols are
2879 intentionally bss symbols, so that gld and/or collect will provide
2880 the right values. */
2882 /* We declare the lists here with two elements each,
2883 so that they are valid empty lists if no other definition is loaded.
2885 If we are using the old "set" extensions to have the gnu linker
2886 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
2887 must be in the bss/common section.
2889 Long term no port should use those extensions. But many still do. */
2890 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
2891 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
2892 func_ptr __CTOR_LIST__[2] = {0, 0};
2893 func_ptr __DTOR_LIST__[2] = {0, 0};
2895 func_ptr __CTOR_LIST__[2];
2896 func_ptr __DTOR_LIST__[2];
2898 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
2899 #endif /* L_ctors */
2903 #include "gbl-ctors.h"
2911 static func_ptr *atexit_chain = 0;
2912 static long atexit_chain_length = 0;
2913 static volatile long last_atexit_chain_slot = -1;
2916 atexit (func_ptr func)
2918 if (++last_atexit_chain_slot == atexit_chain_length)
2920 atexit_chain_length += 32;
2922 atexit_chain = (func_ptr *) realloc (atexit_chain, atexit_chain_length
2923 * sizeof (func_ptr));
2925 atexit_chain = (func_ptr *) malloc (atexit_chain_length
2926 * sizeof (func_ptr));
2929 atexit_chain_length = 0;
2930 last_atexit_chain_slot = -1;
2935 atexit_chain[last_atexit_chain_slot] = func;
2939 extern void _cleanup (void);
2940 extern void _exit (int) __attribute__ ((__noreturn__));
2947 for ( ; last_atexit_chain_slot-- >= 0; )
2949 (*atexit_chain[last_atexit_chain_slot + 1]) ();
2950 atexit_chain[last_atexit_chain_slot + 1] = 0;
2952 free (atexit_chain);
2965 /* Simple; we just need a wrapper for ON_EXIT. */
2967 atexit (func_ptr func)
2969 return ON_EXIT (func);
2972 #endif /* ON_EXIT */
2973 #endif /* NEED_ATEXIT */
2981 /* Shared exception handling support routines. */
2984 __default_terminate (void)
2989 void (*__terminate_func)(void) __attribute__ ((__noreturn__)) =
2990 __default_terminate;
2995 (*__terminate_func)();
2999 __throw_type_match (void *catch_type, void *throw_type, void *obj)
3002 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3003 catch_type, throw_type);
3005 if (strcmp ((const char *)catch_type, (const char *)throw_type) == 0)
3016 /* Include definitions of EH context and table layout */
3018 #include "eh-common.h"
3019 #ifndef inhibit_libc
3023 /* Allocate and return a new EH context structure. */
3027 new_eh_context (void)
3029 struct eh_full_context {
3030 struct eh_context c;
3032 } *ehfc = (struct eh_full_context *) malloc (sizeof *ehfc);
3037 memset (ehfc, 0, sizeof *ehfc);
3039 ehfc->c.dynamic_handler_chain = (void **) ehfc->top_elt;
3041 /* This should optimize out entirely. This should always be true,
3042 but just in case it ever isn't, don't allow bogus code to be
3045 if ((void*)(&ehfc->c) != (void*)ehfc)
3051 static __gthread_key_t eh_context_key;
3053 /* Destructor for struct eh_context. */
3055 eh_context_free (void *ptr)
3057 __gthread_key_dtor (eh_context_key, ptr);
3063 /* Pointer to function to return EH context. */
3065 static struct eh_context *eh_context_initialize (void);
3066 static struct eh_context *eh_context_static (void);
3068 static struct eh_context *eh_context_specific (void);
3071 static struct eh_context *(*get_eh_context) (void) = &eh_context_initialize;
3073 /* Routine to get EH context.
3074 This one will simply call the function pointer. */
3077 __get_eh_context (void)
3079 return (void *) (*get_eh_context) ();
3082 /* Get and set the language specific info pointer. */
3085 __get_eh_info (void)
3087 struct eh_context *eh = (*get_eh_context) ();
3091 #ifdef DWARF2_UNWIND_INFO
3092 static int dwarf_reg_size_table_initialized = 0;
3093 static char dwarf_reg_size_table[DWARF_FRAME_REGISTERS];
3096 init_reg_size_table (void)
3098 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table);
3099 dwarf_reg_size_table_initialized = 1;
3105 eh_threads_initialize (void)
3107 /* Try to create the key. If it fails, revert to static method,
3108 otherwise start using thread specific EH contexts. */
3109 if (__gthread_key_create (&eh_context_key, &eh_context_free) == 0)
3110 get_eh_context = &eh_context_specific;
3112 get_eh_context = &eh_context_static;
3114 #endif /* no __GTHREADS */
3116 /* Initialize EH context.
3117 This will be called only once, since we change GET_EH_CONTEXT
3118 pointer to another routine. */
3120 static struct eh_context *
3121 eh_context_initialize (void)
3125 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
3126 /* Make sure that get_eh_context does not point to us anymore.
3127 Some systems have dummy thread routines in their libc that
3128 return a success (Solaris 2.6 for example). */
3129 if (__gthread_once (&once, eh_threads_initialize) != 0
3130 || get_eh_context == &eh_context_initialize)
3132 /* Use static version of EH context. */
3133 get_eh_context = &eh_context_static;
3135 #ifdef DWARF2_UNWIND_INFO
3137 static __gthread_once_t once_regsizes = __GTHREAD_ONCE_INIT;
3138 if (__gthread_once (&once_regsizes, init_reg_size_table) != 0
3139 || ! dwarf_reg_size_table_initialized)
3140 init_reg_size_table ();
3144 #else /* no __GTHREADS */
3146 /* Use static version of EH context. */
3147 get_eh_context = &eh_context_static;
3149 #ifdef DWARF2_UNWIND_INFO
3150 init_reg_size_table ();
3153 #endif /* no __GTHREADS */
3155 return (*get_eh_context) ();
3158 /* Return a static EH context. */
3160 static struct eh_context *
3161 eh_context_static (void)
3163 static struct eh_context eh;
3164 static int initialized;
3165 static void *top_elt[2];
3170 memset (&eh, 0, sizeof eh);
3171 eh.dynamic_handler_chain = top_elt;
3177 /* Return a thread specific EH context. */
3179 static struct eh_context *
3180 eh_context_specific (void)
3182 struct eh_context *eh;
3183 eh = (struct eh_context *) __gthread_getspecific (eh_context_key);
3186 eh = new_eh_context ();
3187 if (__gthread_setspecific (eh_context_key, (void *) eh) != 0)
3195 /* Support routines for setjmp/longjmp exception handling. */
3197 /* Calls to __sjthrow are generated by the compiler when an exception
3198 is raised when using the setjmp/longjmp exception handling codegen
3201 #ifdef DONT_USE_BUILTIN_SETJMP
3202 extern void longjmp (void *, int);
3205 /* Routine to get the head of the current thread's dynamic handler chain
3206 use for exception handling. */
3209 __get_dynamic_handler_chain (void)
3211 struct eh_context *eh = (*get_eh_context) ();
3212 return &eh->dynamic_handler_chain;
3215 /* This is used to throw an exception when the setjmp/longjmp codegen
3216 method is used for exception handling.
3218 We call __terminate if there are no handlers left. Otherwise we run the
3219 cleanup actions off the dynamic cleanup stack, and pop the top of the
3220 dynamic handler chain, and use longjmp to transfer back to the associated
3226 struct eh_context *eh = (*get_eh_context) ();
3227 void ***dhc = &eh->dynamic_handler_chain;
3229 void (*func)(void *, int);
3231 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3232 void ***cleanup = (void***)&(*dhc)[1];
3234 /* If there are any cleanups in the chain, run them now. */
3238 void **buf = (void**)store;
3243 #ifdef DONT_USE_BUILTIN_SETJMP
3244 if (! setjmp (&buf[2]))
3246 if (! __builtin_setjmp (&buf[2]))
3252 func = (void(*)(void*, int))cleanup[0][1];
3253 arg = (void*)cleanup[0][2];
3255 /* Update this before running the cleanup. */
3256 cleanup[0] = (void **)cleanup[0][0];
3269 /* We must call terminate if we try and rethrow an exception, when
3270 there is no exception currently active and when there are no
3272 if (! eh->info || (*dhc)[0] == 0)
3275 /* Find the jmpbuf associated with the top element of the dynamic
3276 handler chain. The jumpbuf starts two words into the buffer. */
3277 jmpbuf = &(*dhc)[2];
3279 /* Then we pop the top element off the dynamic handler chain. */
3280 *dhc = (void**)(*dhc)[0];
3282 /* And then we jump to the handler. */
3284 #ifdef DONT_USE_BUILTIN_SETJMP
3285 longjmp (jmpbuf, 1);
3287 __builtin_longjmp (jmpbuf, 1);
3291 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3292 handler, then pop the handler off the dynamic handler stack, and
3293 then throw. This is used to skip the first handler, and transfer
3294 control to the next handler in the dynamic handler stack. */
3297 __sjpopnthrow (void)
3299 struct eh_context *eh = (*get_eh_context) ();
3300 void ***dhc = &eh->dynamic_handler_chain;
3301 void (*func)(void *, int);
3303 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3304 void ***cleanup = (void***)&(*dhc)[1];
3306 /* If there are any cleanups in the chain, run them now. */
3310 void **buf = (void**)store;
3315 #ifdef DONT_USE_BUILTIN_SETJMP
3316 if (! setjmp (&buf[2]))
3318 if (! __builtin_setjmp (&buf[2]))
3324 func = (void(*)(void*, int))cleanup[0][1];
3325 arg = (void*)cleanup[0][2];
3327 /* Update this before running the cleanup. */
3328 cleanup[0] = (void **)cleanup[0][0];
3341 /* Then we pop the top element off the dynamic handler chain. */
3342 *dhc = (void**)(*dhc)[0];
3347 /* Support code for all exception region-based exception handling. */
3350 __eh_rtime_match (void *rtime)
3353 __eh_matcher matcher;
3356 info = *(__get_eh_info ());
3357 matcher = ((__eh_info *)info)->match_function;
3360 #ifndef inhibit_libc
3361 fprintf (stderr, "Internal Compiler Bug: No runtime type matcher.");
3365 ret = (*matcher) (info, rtime, (void *)0);
3366 return (ret != NULL);
3369 /* This value identifies the place from which an exception is being
3372 #ifdef EH_TABLE_LOOKUP
3378 #ifdef DWARF2_UNWIND_INFO
3380 /* Return the table version of an exception descriptor */
3383 __get_eh_table_version (exception_descriptor *table)
3385 return table->lang.version;
3388 /* Return the originating table language of an exception descriptor */
3391 __get_eh_table_language (exception_descriptor *table)
3393 return table->lang.language;
3396 /* This routine takes a PC and a pointer to the exception region TABLE for
3397 its translation unit, and returns the address of the exception handler
3398 associated with the closest exception table handler entry associated
3399 with that PC, or 0 if there are no table entries the PC fits in.
3401 In the advent of a tie, we have to give the last entry, as it represents
3405 old_find_exception_handler (void *pc, old_exception_table *table)
3412 /* We can't do a binary search because the table isn't guaranteed
3413 to be sorted from function to function. */
3414 for (pos = 0; table[pos].start_region != (void *) -1; ++pos)
3416 if (table[pos].start_region <= pc && table[pos].end_region > pc)
3418 /* This can apply. Make sure it is at least as small as
3419 the previous best. */
3420 if (best == -1 || (table[pos].end_region <= table[best].end_region
3421 && table[pos].start_region >= table[best].start_region))
3424 /* But it is sorted by starting PC within a function. */
3425 else if (best >= 0 && table[pos].start_region > pc)
3429 return table[best].exception_handler;
3435 /* find_exception_handler finds the correct handler, if there is one, to
3436 handle an exception.
3437 returns a pointer to the handler which controlled should be transferred
3438 to, or NULL if there is nothing left.
3440 PC - pc where the exception originates. If this is a rethrow,
3441 then this starts out as a pointer to the exception table
3442 entry we wish to rethrow out of.
3443 TABLE - exception table for the current module.
3444 EH_INFO - eh info pointer for this exception.
3445 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3446 CLEANUP - returned flag indicating whether this is a cleanup handler.
3449 find_exception_handler (void *pc, exception_descriptor *table,
3450 __eh_info *eh_info, int rethrow, int *cleanup)
3453 void *retval = NULL;
3458 /* The new model assumed the table is sorted inner-most out so the
3459 first region we find which matches is the correct one */
3461 exception_table *tab = &(table->table[0]);
3463 /* Subtract 1 from the PC to avoid hitting the next region */
3466 /* pc is actually the region table entry to rethrow out of */
3467 pos = ((exception_table *) pc) - tab;
3468 pc = ((exception_table *) pc)->end_region - 1;
3470 /* The label is always on the LAST handler entry for a region,
3471 so we know the next entry is a different region, even if the
3472 addresses are the same. Make sure its not end of table tho. */
3473 if (tab[pos].start_region != (void *) -1)
3479 /* We can't do a binary search because the table is in inner-most
3480 to outermost address ranges within functions */
3481 for ( ; tab[pos].start_region != (void *) -1; pos++)
3483 if (tab[pos].start_region <= pc && tab[pos].end_region > pc)
3485 if (tab[pos].match_info)
3487 __eh_matcher matcher = eh_info->match_function;
3488 /* match info but no matcher is NOT a match */
3491 void *ret = (*matcher)((void *) eh_info,
3492 tab[pos].match_info, table);
3496 retval = tab[pos].exception_handler;
3505 retval = tab[pos].exception_handler;
3512 #endif /* DWARF2_UNWIND_INFO */
3513 #endif /* EH_TABLE_LOOKUP */
3515 #ifdef DWARF2_UNWIND_INFO
3516 /* Support code for exception handling using static unwind information. */
3520 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3521 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3522 avoid a warning about casting between int and pointer of different
3525 typedef int ptr_type __attribute__ ((mode (pointer)));
3527 #ifdef INCOMING_REGNO
3528 /* Is the saved value for register REG in frame UDATA stored in a register
3529 window in the previous frame? */
3531 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3532 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3533 compiled functions won't work with the frame-unwind stuff here.
3534 Perhaps the entireity of in_reg_window should be conditional on having
3535 seen a DW_CFA_GNU_window_save? */
3536 #define target_flags 0
3539 in_reg_window (int reg, frame_state *udata)
3541 if (udata->saved[reg] == REG_SAVED_REG)
3542 return INCOMING_REGNO (reg) == reg;
3543 if (udata->saved[reg] != REG_SAVED_OFFSET)
3546 #ifdef STACK_GROWS_DOWNWARD
3547 return udata->reg_or_offset[reg] > 0;
3549 return udata->reg_or_offset[reg] < 0;
3554 in_reg_window (int reg __attribute__ ((__unused__)),
3555 frame_state *udata __attribute__ ((__unused__)))
3559 #endif /* INCOMING_REGNO */
3561 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3562 frame called by UDATA or 0. */
3565 get_reg_addr (unsigned reg, frame_state *udata, frame_state *sub_udata)
3567 while (udata->saved[reg] == REG_SAVED_REG)
3569 reg = udata->reg_or_offset[reg];
3570 if (in_reg_window (reg, udata))
3576 if (udata->saved[reg] == REG_SAVED_OFFSET)
3577 return (word_type *)(udata->cfa + udata->reg_or_offset[reg]);
3582 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3583 frame called by UDATA or 0. */
3585 static inline void *
3586 get_reg (unsigned reg, frame_state *udata, frame_state *sub_udata)
3588 return (void *)(ptr_type) *get_reg_addr (reg, udata, sub_udata);
3591 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3594 put_reg (unsigned reg, void *val, frame_state *udata)
3596 *get_reg_addr (reg, udata, NULL) = (word_type)(ptr_type) val;
3599 /* Copy the saved value for register REG from frame UDATA to frame
3600 TARGET_UDATA. Unlike the previous two functions, this can handle
3601 registers that are not one word large. */
3604 copy_reg (unsigned reg, frame_state *udata, frame_state *target_udata)
3606 word_type *preg = get_reg_addr (reg, udata, NULL);
3607 word_type *ptreg = get_reg_addr (reg, target_udata, NULL);
3609 memcpy (ptreg, preg, dwarf_reg_size_table [reg]);
3612 /* Retrieve the return address for frame UDATA. */
3614 static inline void *
3615 get_return_addr (frame_state *udata, frame_state *sub_udata)
3617 return __builtin_extract_return_addr
3618 (get_reg (udata->retaddr_column, udata, sub_udata));
3621 /* Overwrite the return address for frame UDATA with VAL. */
3624 put_return_addr (void *val, frame_state *udata)
3626 val = __builtin_frob_return_addr (val);
3627 put_reg (udata->retaddr_column, val, udata);
3630 /* Given the current frame UDATA and its return address PC, return the
3631 information about the calling frame in CALLER_UDATA. */
3634 next_stack_level (void *pc, frame_state *udata, frame_state *caller_udata)
3636 caller_udata = __frame_state_for (pc, caller_udata);
3640 /* Now go back to our caller's stack frame. If our caller's CFA register
3641 was saved in our stack frame, restore it; otherwise, assume the CFA
3642 register is SP and restore it to our CFA value. */
3643 if (udata->saved[caller_udata->cfa_reg])
3644 caller_udata->cfa = get_reg (caller_udata->cfa_reg, udata, 0);
3646 caller_udata->cfa = udata->cfa;
3647 caller_udata->cfa += caller_udata->cfa_offset;
3649 return caller_udata;
3652 /* Hook to call before __terminate if only cleanup handlers remain. */
3654 __unwinding_cleanup (void)
3658 /* throw_helper performs some of the common grunt work for a throw. This
3659 routine is called by throw and rethrows. This is pretty much split
3660 out from the old __throw routine. An addition has been added which allows
3661 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3662 but cleanups remaining. This allows a debugger to examine the state
3663 at which the throw was executed, before any cleanups, rather than
3664 at the terminate point after the stack has been unwound.
3666 EH is the current eh_context structure.
3667 PC is the address of the call to __throw.
3668 MY_UDATA is the unwind information for __throw.
3669 OFFSET_P is where we return the SP adjustment offset. */
3672 throw_helper (struct eh_context *eh, void *pc, frame_state *my_udata,
3675 frame_state ustruct2, *udata = &ustruct2;
3676 frame_state ustruct;
3677 frame_state *sub_udata = &ustruct;
3678 void *saved_pc = pc;
3680 void *handler_p = 0;
3682 frame_state saved_ustruct;
3685 int only_cleanup = 0;
3687 int saved_state = 0;
3689 __eh_info *eh_info = (__eh_info *)eh->info;
3691 /* Do we find a handler based on a re-throw PC? */
3692 if (eh->table_index != (void *) 0)
3695 memcpy (udata, my_udata, sizeof (*udata));
3697 handler = (void *) 0;
3700 frame_state *p = udata;
3701 udata = next_stack_level (pc, udata, sub_udata);
3704 /* If we couldn't find the next frame, we lose. */
3708 if (udata->eh_ptr == NULL)
3711 new_eh_model = (((exception_descriptor *)(udata->eh_ptr))->
3712 runtime_id_field == NEW_EH_RUNTIME);
3717 handler = find_exception_handler (eh->table_index, udata->eh_ptr,
3718 eh_info, 1, &cleanup);
3719 eh->table_index = (void *)0;
3723 handler = find_exception_handler (pc, udata->eh_ptr, eh_info,
3726 handler = old_find_exception_handler (pc, udata->eh_ptr);
3728 /* If we found one, we can stop searching, if its not a cleanup.
3729 for cleanups, we save the state, and keep looking. This allows
3730 us to call a debug hook if there are nothing but cleanups left. */
3737 saved_ustruct = *udata;
3738 handler_p = handler;
3751 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3752 hitting the beginning of the next region. */
3753 pc = get_return_addr (udata, sub_udata) - 1;
3758 udata = &saved_ustruct;
3759 handler = handler_p;
3762 __unwinding_cleanup ();
3765 /* If we haven't found a handler by now, this is an unhandled
3770 eh->handler_label = handler;
3772 args_size = udata->args_size;
3775 /* We found a handler in the throw context, no need to unwind. */
3781 /* Unwind all the frames between this one and the handler by copying
3782 their saved register values into our register save slots. */
3784 /* Remember the PC where we found the handler. */
3785 void *handler_pc = pc;
3787 /* Start from the throw context again. */
3789 memcpy (udata, my_udata, sizeof (*udata));
3791 while (pc != handler_pc)
3793 frame_state *p = udata;
3794 udata = next_stack_level (pc, udata, sub_udata);
3797 for (i = 0; i < DWARF_FRAME_REGISTERS; ++i)
3798 if (i != udata->retaddr_column && udata->saved[i])
3800 /* If you modify the saved value of the return address
3801 register on the SPARC, you modify the return address for
3802 your caller's frame. Don't do that here, as it will
3803 confuse get_return_addr. */
3804 if (in_reg_window (i, udata)
3805 && udata->saved[udata->retaddr_column] == REG_SAVED_REG
3806 && udata->reg_or_offset[udata->retaddr_column] == i)
3808 copy_reg (i, udata, my_udata);
3811 pc = get_return_addr (udata, sub_udata) - 1;
3814 /* But we do need to update the saved return address register from
3815 the last frame we unwind, or the handler frame will have the wrong
3817 if (udata->saved[udata->retaddr_column] == REG_SAVED_REG)
3819 i = udata->reg_or_offset[udata->retaddr_column];
3820 if (in_reg_window (i, udata))
3821 copy_reg (i, udata, my_udata);
3824 /* udata now refers to the frame called by the handler frame. */
3826 /* We adjust SP by the difference between __throw's CFA and the CFA for
3827 the frame called by the handler frame, because those CFAs correspond
3828 to the SP values at the two call sites. We need to further adjust by
3829 the args_size of the handler frame itself to get the handler frame's
3830 SP from before the args were pushed for that call. */
3831 #ifdef STACK_GROWS_DOWNWARD
3832 *offset_p = udata->cfa - my_udata->cfa + args_size;
3834 *offset_p = my_udata->cfa - udata->cfa - args_size;
3841 /* We first search for an exception handler, and if we don't find
3842 it, we call __terminate on the current stack frame so that we may
3843 use the debugger to walk the stack and understand why no handler
3846 If we find one, then we unwind the frames down to the one that
3847 has the handler and transfer control into the handler. */
3849 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
3854 struct eh_context *eh = (*get_eh_context) ();
3858 /* XXX maybe make my_ustruct static so we don't have to look it up for
3860 frame_state my_ustruct, *my_udata = &my_ustruct;
3862 /* This is required for C++ semantics. We must call terminate if we
3863 try and rethrow an exception, when there is no exception currently
3868 /* Start at our stack frame. */
3870 my_udata = __frame_state_for (&&label, my_udata);
3874 /* We need to get the value from the CFA register. */
3875 my_udata->cfa = __builtin_dwarf_cfa ();
3877 /* Do any necessary initialization to access arbitrary stack frames.
3878 On the SPARC, this means flushing the register windows. */
3879 __builtin_unwind_init ();
3881 /* Now reset pc to the right throw point. */
3882 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3884 handler = throw_helper (eh, pc, my_udata, &offset);
3888 __builtin_eh_return ((void *)eh, offset, handler);
3890 /* Epilogue: restore the handler frame's register values and return
3894 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
3897 __rethrow (void *index)
3899 struct eh_context *eh = (*get_eh_context) ();
3903 /* XXX maybe make my_ustruct static so we don't have to look it up for
3905 frame_state my_ustruct, *my_udata = &my_ustruct;
3907 /* This is required for C++ semantics. We must call terminate if we
3908 try and rethrow an exception, when there is no exception currently
3913 /* This is the table index we want to rethrow from. The value of
3914 the END_REGION label is used for the PC of the throw, and the
3915 search begins with the next table entry. */
3916 eh->table_index = index;
3918 /* Start at our stack frame. */
3920 my_udata = __frame_state_for (&&label, my_udata);
3924 /* We need to get the value from the CFA register. */
3925 my_udata->cfa = __builtin_dwarf_cfa ();
3927 /* Do any necessary initialization to access arbitrary stack frames.
3928 On the SPARC, this means flushing the register windows. */
3929 __builtin_unwind_init ();
3931 /* Now reset pc to the right throw point. */
3932 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3934 handler = throw_helper (eh, pc, my_udata, &offset);
3938 __builtin_eh_return ((void *)eh, offset, handler);
3940 /* Epilogue: restore the handler frame's register values and return
3943 #endif /* DWARF2_UNWIND_INFO */
3948 #ifndef inhibit_libc
3949 /* This gets us __GNU_LIBRARY__. */
3950 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
3953 #ifdef __GNU_LIBRARY__
3954 /* Avoid forcing the library's meaning of `write' on the user program
3955 by using the "internal" name (for use within the library) */
3956 #define write(fd, buf, n) __write((fd), (buf), (n))
3958 #endif /* inhibit_libc */
3960 #define MESSAGE "pure virtual method called\n"
3963 __pure_virtual (void)
3965 #ifndef inhibit_libc
3966 write (2, MESSAGE, sizeof (MESSAGE) - 1);