1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
58 /* The size of the target's pointer type. */
60 #define PTR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
63 /* Maximum size (in bytes) of an artificially generated label. */
64 #define MAX_ARTIFICIAL_LABEL_BYTES 30
66 /* The size of addresses as they appear in the Dwarf 2 data.
67 Some architectures use word addresses to refer to code locations,
68 but Dwarf 2 info always uses byte addresses. On such machines,
69 Dwarf 2 addresses need to be larger than the architecture's
71 #ifndef DWARF2_ADDR_SIZE
72 #define DWARF2_ADDR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
75 /* The size in bytes of a DWARF field indicating an offset or length
76 relative to a debug info section, specified to be 4 bytes in the
77 DWARF-2 specification. The SGI/MIPS ABI defines it to be the same
80 #ifndef DWARF_OFFSET_SIZE
81 #define DWARF_OFFSET_SIZE 4
84 /* According to the (draft) DWARF 3 specification, the initial length
85 should either be 4 or 12 bytes. When it's 12 bytes, the first 4
86 bytes are 0xffffffff, followed by the length stored in the next 8
89 However, the SGI/MIPS ABI uses an initial length which is equal to
90 DWARF_OFFSET_SIZE. It is defined (elsewhere) accordingly. */
92 #ifndef DWARF_INITIAL_LENGTH_SIZE
93 #define DWARF_INITIAL_LENGTH_SIZE (DWARF_OFFSET_SIZE == 4 ? 4 : 12)
96 /* Round SIZE up to the nearest BOUNDARY. */
97 #define DWARF_ROUND(SIZE,BOUNDARY) \
98 ((((SIZE) + (BOUNDARY) - 1) / (BOUNDARY)) * (BOUNDARY))
100 /* Offsets recorded in opcodes are a multiple of this alignment factor. */
101 #ifndef DWARF_CIE_DATA_ALIGNMENT
102 #ifdef STACK_GROWS_DOWNWARD
103 #define DWARF_CIE_DATA_ALIGNMENT (-((int) UNITS_PER_WORD))
105 #define DWARF_CIE_DATA_ALIGNMENT ((int) UNITS_PER_WORD)
109 /* CIE identifier. */
110 #if HOST_BITS_PER_WIDE_INT >= 64
111 #define DWARF_CIE_ID \
112 (unsigned HOST_WIDE_INT) (DWARF_OFFSET_SIZE == 4 ? DW_CIE_ID : DW64_CIE_ID)
114 #define DWARF_CIE_ID DW_CIE_ID
117 /* The DWARF 2 CFA column which tracks the return address. Normally this
118 is the column for PC, or the first column after all of the hard
120 #ifndef DWARF_FRAME_RETURN_COLUMN
122 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (PC_REGNUM)
124 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGISTERS
128 /* The mapping from gcc register number to DWARF 2 CFA column number. By
129 default, we just provide columns for all registers. */
130 #ifndef DWARF_FRAME_REGNUM
131 #define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG)
134 /* Map register numbers held in the call frame info that gcc has
135 collected using DWARF_FRAME_REGNUM to those that should be output in
136 .debug_frame and .eh_frame. */
137 #ifndef DWARF2_FRAME_REG_OUT
138 #define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) (REGNO)
141 /* A vector of call frame insns for the CIE. */
144 static GTY(()) unsigned long dwarf2out_cfi_label_num;
146 /* The insn after which a new CFI note should be emitted. */
149 /* When non-null, add_cfi will add the CFI to this vector. */
150 static cfi_vec *add_cfi_vec;
152 /* True if remember_state should be emitted before following CFI directive. */
153 static bool emit_cfa_remember;
155 /* True if any CFI directives were emitted at the current insn. */
156 static bool any_cfis_emitted;
159 static void dwarf2out_cfi_begin_epilogue (rtx insn);
160 static void dwarf2out_frame_debug_restore_state (void);
163 /* Hook used by __throw. */
166 expand_builtin_dwarf_sp_column (void)
168 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
169 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
172 /* MEM is a memory reference for the register size table, each element of
173 which has mode MODE. Initialize column C as a return address column. */
176 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
178 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
179 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
180 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
183 /* Generate code to initialize the register size table. */
186 expand_builtin_init_dwarf_reg_sizes (tree address)
189 enum machine_mode mode = TYPE_MODE (char_type_node);
190 rtx addr = expand_normal (address);
191 rtx mem = gen_rtx_MEM (BLKmode, addr);
192 bool wrote_return_column = false;
194 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
196 int rnum = DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), 1);
198 if (rnum < DWARF_FRAME_REGISTERS)
200 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
201 enum machine_mode save_mode = reg_raw_mode[i];
204 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
205 save_mode = choose_hard_reg_mode (i, 1, true);
206 if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN)
208 if (save_mode == VOIDmode)
210 wrote_return_column = true;
212 size = GET_MODE_SIZE (save_mode);
216 emit_move_insn (adjust_address (mem, mode, offset),
217 gen_int_mode (size, mode));
221 if (!wrote_return_column)
222 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
224 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
225 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
228 targetm.init_dwarf_reg_sizes_extra (address);
231 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
233 static inline HOST_WIDE_INT
234 div_data_align (HOST_WIDE_INT off)
236 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
237 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
241 /* Return true if we need a signed version of a given opcode
242 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
245 need_data_align_sf_opcode (HOST_WIDE_INT off)
247 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
250 /* Return a pointer to a newly allocated Call Frame Instruction. */
252 static inline dw_cfi_ref
255 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
257 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
258 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
263 /* Generate a new label for the CFI info to refer to. */
266 dwarf2out_cfi_label (void)
268 int num = dwarf2out_cfi_label_num++;
271 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
273 return xstrdup (label);
276 /* Add CFI either to the current insn stream or to a vector, or both. */
279 add_cfi (dw_cfi_ref cfi)
281 if (emit_cfa_remember)
283 dw_cfi_ref cfi_remember;
285 /* Emit the state save. */
286 emit_cfa_remember = false;
287 cfi_remember = new_cfi ();
288 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
289 add_cfi (cfi_remember);
292 any_cfis_emitted = true;
293 if (cfi_insn != NULL)
295 cfi_insn = emit_note_after (NOTE_INSN_CFI, cfi_insn);
296 NOTE_CFI (cfi_insn) = cfi;
298 if (add_cfi_vec != NULL)
299 VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
302 /* This function fills in aa dw_cfa_location structure from a dwarf location
303 descriptor sequence. */
306 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
308 struct dw_loc_descr_struct *ptr;
310 cfa->base_offset = 0;
314 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
316 enum dwarf_location_atom op = ptr->dw_loc_opc;
352 cfa->reg = op - DW_OP_reg0;
355 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
389 cfa->reg = op - DW_OP_breg0;
390 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
393 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
394 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
399 case DW_OP_plus_uconst:
400 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
408 /* Find the previous value for the CFA, iteratively. CFI is the opcode
409 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
410 one level of remember/restore state processing. */
413 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
415 switch (cfi->dw_cfi_opc)
417 case DW_CFA_def_cfa_offset:
418 case DW_CFA_def_cfa_offset_sf:
419 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
421 case DW_CFA_def_cfa_register:
422 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
425 case DW_CFA_def_cfa_sf:
426 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
427 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
429 case DW_CFA_def_cfa_expression:
430 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
433 case DW_CFA_remember_state:
434 gcc_assert (!remember->in_use);
436 remember->in_use = 1;
438 case DW_CFA_restore_state:
439 gcc_assert (remember->in_use);
441 remember->in_use = 0;
449 /* The current rule for calculating the DWARF2 canonical frame address. */
450 static dw_cfa_location cfa;
452 /* A copy of the CFA, for comparison purposes. */
453 static dw_cfa_location old_cfa;
455 /* The register used for saving registers to the stack, and its offset
457 static dw_cfa_location cfa_store;
459 /* The current save location around an epilogue. */
460 static dw_cfa_location cfa_remember;
462 /* Like cfa_remember, but a copy of old_cfa. */
463 static dw_cfa_location old_cfa_remember;
465 /* The running total of the size of arguments pushed onto the stack. */
466 static HOST_WIDE_INT args_size;
468 /* The last args_size we actually output. */
469 static HOST_WIDE_INT old_args_size;
471 /* Determine if two dw_cfa_location structures define the same data. */
474 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
476 return (loc1->reg == loc2->reg
477 && loc1->offset == loc2->offset
478 && loc1->indirect == loc2->indirect
479 && (loc1->indirect == 0
480 || loc1->base_offset == loc2->base_offset));
483 /* This routine does the actual work. The CFA is now calculated from
484 the dw_cfa_location structure. */
487 def_cfa_1 (dw_cfa_location *loc_p)
495 if (cfa_store.reg == loc.reg && loc.indirect == 0)
496 cfa_store.offset = loc.offset;
498 loc.reg = DWARF_FRAME_REGNUM (loc.reg);
500 /* If nothing changed, no need to issue any call frame instructions. */
501 if (cfa_equal_p (&loc, &old_cfa))
506 if (loc.reg == old_cfa.reg && !loc.indirect && !old_cfa.indirect)
508 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
509 the CFA register did not change but the offset did. The data
510 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
511 in the assembler via the .cfi_def_cfa_offset directive. */
513 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
515 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
516 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
519 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
520 else if (loc.offset == old_cfa.offset
521 && old_cfa.reg != INVALID_REGNUM
523 && !old_cfa.indirect)
525 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
526 indicating the CFA register has changed to <register> but the
527 offset has not changed. */
528 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
529 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
533 else if (loc.indirect == 0)
535 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
536 indicating the CFA register has changed to <register> with
537 the specified offset. The data factoring for DW_CFA_def_cfa_sf
538 happens in output_cfi, or in the assembler via the .cfi_def_cfa
541 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
543 cfi->dw_cfi_opc = DW_CFA_def_cfa;
544 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
545 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
549 /* Construct a DW_CFA_def_cfa_expression instruction to
550 calculate the CFA using a full location expression since no
551 register-offset pair is available. */
552 struct dw_loc_descr_struct *loc_list;
554 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
555 loc_list = build_cfa_loc (&loc, 0);
556 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
563 /* Add the CFI for saving a register. REG is the CFA column number.
564 If SREG is -1, the register is saved at OFFSET from the CFA;
565 otherwise it is saved in SREG. */
568 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
570 dw_fde_ref fde = cfun ? cfun->fde : NULL;
571 dw_cfi_ref cfi = new_cfi ();
573 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
575 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
577 && fde->stack_realign
578 && sreg == INVALID_REGNUM)
580 cfi->dw_cfi_opc = DW_CFA_expression;
581 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
582 cfi->dw_cfi_oprnd2.dw_cfi_loc
583 = build_cfa_aligned_loc (&cfa, offset, fde->stack_realignment);
585 else if (sreg == INVALID_REGNUM)
587 if (need_data_align_sf_opcode (offset))
588 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
589 else if (reg & ~0x3f)
590 cfi->dw_cfi_opc = DW_CFA_offset_extended;
592 cfi->dw_cfi_opc = DW_CFA_offset;
593 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
595 else if (sreg == reg)
596 cfi->dw_cfi_opc = DW_CFA_same_value;
599 cfi->dw_cfi_opc = DW_CFA_register;
600 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
606 /* Record the initial position of the return address. RTL is
607 INCOMING_RETURN_ADDR_RTX. */
610 initial_return_save (rtx rtl)
612 unsigned int reg = INVALID_REGNUM;
613 HOST_WIDE_INT offset = 0;
615 switch (GET_CODE (rtl))
618 /* RA is in a register. */
619 reg = DWARF_FRAME_REGNUM (REGNO (rtl));
623 /* RA is on the stack. */
625 switch (GET_CODE (rtl))
628 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
633 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
634 offset = INTVAL (XEXP (rtl, 1));
638 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
639 offset = -INTVAL (XEXP (rtl, 1));
649 /* The return address is at some offset from any value we can
650 actually load. For instance, on the SPARC it is in %i7+8. Just
651 ignore the offset for now; it doesn't matter for unwinding frames. */
652 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
653 initial_return_save (XEXP (rtl, 0));
660 if (reg != DWARF_FRAME_RETURN_COLUMN)
661 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cfa.offset);
664 /* Given a SET, calculate the amount of stack adjustment it
668 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
669 HOST_WIDE_INT cur_offset)
671 const_rtx src = SET_SRC (pattern);
672 const_rtx dest = SET_DEST (pattern);
673 HOST_WIDE_INT offset = 0;
676 if (dest == stack_pointer_rtx)
678 code = GET_CODE (src);
680 /* Assume (set (reg sp) (reg whatever)) sets args_size
682 if (code == REG && src != stack_pointer_rtx)
684 offset = -cur_args_size;
685 #ifndef STACK_GROWS_DOWNWARD
688 return offset - cur_offset;
691 if (! (code == PLUS || code == MINUS)
692 || XEXP (src, 0) != stack_pointer_rtx
693 || !CONST_INT_P (XEXP (src, 1)))
696 /* (set (reg sp) (plus (reg sp) (const_int))) */
697 offset = INTVAL (XEXP (src, 1));
703 if (MEM_P (src) && !MEM_P (dest))
707 /* (set (mem (pre_dec (reg sp))) (foo)) */
708 src = XEXP (dest, 0);
709 code = GET_CODE (src);
715 if (XEXP (src, 0) == stack_pointer_rtx)
717 rtx val = XEXP (XEXP (src, 1), 1);
718 /* We handle only adjustments by constant amount. */
719 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
720 && CONST_INT_P (val));
721 offset = -INTVAL (val);
728 if (XEXP (src, 0) == stack_pointer_rtx)
730 offset = GET_MODE_SIZE (GET_MODE (dest));
737 if (XEXP (src, 0) == stack_pointer_rtx)
739 offset = -GET_MODE_SIZE (GET_MODE (dest));
754 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
755 indexed by INSN_UID. */
757 static HOST_WIDE_INT *barrier_args_size;
759 /* Helper function for compute_barrier_args_size. Handle one insn. */
762 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
763 VEC (rtx, heap) **next)
765 HOST_WIDE_INT offset = 0;
768 if (! RTX_FRAME_RELATED_P (insn))
770 if (prologue_epilogue_contains (insn))
772 else if (GET_CODE (PATTERN (insn)) == SET)
773 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
774 else if (GET_CODE (PATTERN (insn)) == PARALLEL
775 || GET_CODE (PATTERN (insn)) == SEQUENCE)
777 /* There may be stack adjustments inside compound insns. Search
779 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
780 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
781 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
782 cur_args_size, offset);
787 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
791 expr = XEXP (expr, 0);
792 if (GET_CODE (expr) == PARALLEL
793 || GET_CODE (expr) == SEQUENCE)
794 for (i = 1; i < XVECLEN (expr, 0); i++)
796 rtx elem = XVECEXP (expr, 0, i);
798 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
799 offset += stack_adjust_offset (elem, cur_args_size, offset);
804 #ifndef STACK_GROWS_DOWNWARD
808 cur_args_size += offset;
809 if (cur_args_size < 0)
814 rtx dest = JUMP_LABEL (insn);
818 if (barrier_args_size [INSN_UID (dest)] < 0)
820 barrier_args_size [INSN_UID (dest)] = cur_args_size;
821 VEC_safe_push (rtx, heap, *next, dest);
826 return cur_args_size;
829 /* Walk the whole function and compute args_size on BARRIERs. */
832 compute_barrier_args_size (void)
834 int max_uid = get_max_uid (), i;
836 VEC (rtx, heap) *worklist, *next, *tmp;
838 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
839 for (i = 0; i < max_uid; i++)
840 barrier_args_size[i] = -1;
842 worklist = VEC_alloc (rtx, heap, 20);
843 next = VEC_alloc (rtx, heap, 20);
845 barrier_args_size[INSN_UID (insn)] = 0;
846 VEC_quick_push (rtx, worklist, insn);
849 while (!VEC_empty (rtx, worklist))
851 rtx prev, body, first_insn;
852 HOST_WIDE_INT cur_args_size;
854 first_insn = insn = VEC_pop (rtx, worklist);
855 cur_args_size = barrier_args_size[INSN_UID (insn)];
856 prev = prev_nonnote_insn (insn);
857 if (prev && BARRIER_P (prev))
858 barrier_args_size[INSN_UID (prev)] = cur_args_size;
860 for (; insn; insn = NEXT_INSN (insn))
862 if (INSN_DELETED_P (insn) || NOTE_P (insn))
864 if (BARRIER_P (insn))
869 if (insn == first_insn)
871 else if (barrier_args_size[INSN_UID (insn)] < 0)
873 barrier_args_size[INSN_UID (insn)] = cur_args_size;
878 /* The insns starting with this label have been
879 already scanned or are in the worklist. */
884 body = PATTERN (insn);
885 if (GET_CODE (body) == SEQUENCE)
887 HOST_WIDE_INT dest_args_size = cur_args_size;
888 for (i = 1; i < XVECLEN (body, 0); i++)
889 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
890 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
892 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
893 dest_args_size, &next);
896 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
897 cur_args_size, &next);
899 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
900 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
901 dest_args_size, &next);
904 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
905 cur_args_size, &next);
909 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
913 if (VEC_empty (rtx, next))
916 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
920 VEC_truncate (rtx, next, 0);
923 VEC_free (rtx, heap, worklist);
924 VEC_free (rtx, heap, next);
927 /* Add a CFI to update the running total of the size of arguments
928 pushed onto the stack. */
931 dwarf2out_args_size (HOST_WIDE_INT size)
935 if (size == old_args_size)
938 old_args_size = size;
941 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
942 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
946 /* Record a stack adjustment of OFFSET bytes. */
949 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
951 if (cfa.reg == STACK_POINTER_REGNUM)
952 cfa.offset += offset;
954 if (cfa_store.reg == STACK_POINTER_REGNUM)
955 cfa_store.offset += offset;
957 if (ACCUMULATE_OUTGOING_ARGS)
960 #ifndef STACK_GROWS_DOWNWARD
969 if (flag_asynchronous_unwind_tables)
970 dwarf2out_args_size (args_size);
973 /* Check INSN to see if it looks like a push or a stack adjustment, and
974 make a note of it if it does. EH uses this information to find out
975 how much extra space it needs to pop off the stack. */
978 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
980 HOST_WIDE_INT offset;
983 /* Don't handle epilogues at all. Certainly it would be wrong to do so
984 with this function. Proper support would require all frame-related
985 insns to be marked, and to be able to handle saving state around
986 epilogues textually in the middle of the function. */
987 if (prologue_epilogue_contains (insn))
990 /* If INSN is an instruction from target of an annulled branch, the
991 effects are for the target only and so current argument size
992 shouldn't change at all. */
994 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
995 && INSN_FROM_TARGET_P (insn))
998 /* If only calls can throw, and we have a frame pointer,
999 save up adjustments until we see the CALL_INSN. */
1000 if (!flag_asynchronous_unwind_tables && cfa.reg != STACK_POINTER_REGNUM)
1002 if (CALL_P (insn) && !after_p)
1004 /* Extract the size of the args from the CALL rtx itself. */
1005 insn = PATTERN (insn);
1006 if (GET_CODE (insn) == PARALLEL)
1007 insn = XVECEXP (insn, 0, 0);
1008 if (GET_CODE (insn) == SET)
1009 insn = SET_SRC (insn);
1010 gcc_assert (GET_CODE (insn) == CALL);
1011 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
1016 if (CALL_P (insn) && !after_p)
1018 if (!flag_asynchronous_unwind_tables)
1019 dwarf2out_args_size (args_size);
1022 else if (BARRIER_P (insn))
1024 /* Don't call compute_barrier_args_size () if the only
1025 BARRIER is at the end of function. */
1026 if (barrier_args_size == NULL && next_nonnote_insn (insn))
1027 compute_barrier_args_size ();
1028 if (barrier_args_size == NULL)
1032 offset = barrier_args_size[INSN_UID (insn)];
1037 offset -= args_size;
1038 #ifndef STACK_GROWS_DOWNWARD
1042 else if (GET_CODE (PATTERN (insn)) == SET)
1043 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
1044 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1045 || GET_CODE (PATTERN (insn)) == SEQUENCE)
1047 /* There may be stack adjustments inside compound insns. Search
1049 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
1050 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1051 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
1060 dwarf2out_stack_adjust (offset);
1063 /* We delay emitting a register save until either (a) we reach the end
1064 of the prologue or (b) the register is clobbered. This clusters
1065 register saves so that there are fewer pc advances. */
1067 struct GTY(()) queued_reg_save {
1068 struct queued_reg_save *next;
1070 HOST_WIDE_INT cfa_offset;
1074 static GTY(()) struct queued_reg_save *queued_reg_saves;
1076 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
1077 typedef struct GTY(()) reg_saved_in_data {
1080 } reg_saved_in_data;
1082 DEF_VEC_O (reg_saved_in_data);
1083 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
1085 /* A set of registers saved in other registers. This is implemented as
1086 a flat array because it normally contains zero or 1 entry, depending
1087 on the target. IA-64 is the big spender here, using a maximum of
1089 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
1091 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1094 compare_reg_or_pc (rtx x, rtx y)
1096 if (REG_P (x) && REG_P (y))
1097 return REGNO (x) == REGNO (y);
1101 /* Record SRC as being saved in DEST. DEST may be null to delete an
1102 existing entry. SRC may be a register or PC_RTX. */
1105 record_reg_saved_in_reg (rtx dest, rtx src)
1107 reg_saved_in_data *elt;
1110 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
1111 if (compare_reg_or_pc (elt->orig_reg, src))
1114 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
1116 elt->saved_in_reg = dest;
1123 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1124 elt->orig_reg = src;
1125 elt->saved_in_reg = dest;
1128 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1129 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1132 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1134 struct queued_reg_save *q;
1136 /* Duplicates waste space, but it's also necessary to remove them
1137 for correctness, since the queue gets output in reverse
1139 for (q = queued_reg_saves; q != NULL; q = q->next)
1140 if (REGNO (q->reg) == REGNO (reg))
1145 q = ggc_alloc_queued_reg_save ();
1146 q->next = queued_reg_saves;
1147 queued_reg_saves = q;
1151 q->cfa_offset = offset;
1152 q->saved_reg = sreg;
1155 /* Output all the entries in QUEUED_REG_SAVES. */
1158 dwarf2out_flush_queued_reg_saves (void)
1160 struct queued_reg_save *q;
1162 for (q = queued_reg_saves; q; q = q->next)
1164 unsigned int reg, sreg;
1166 record_reg_saved_in_reg (q->saved_reg, q->reg);
1168 reg = DWARF_FRAME_REGNUM (REGNO (q->reg));
1170 sreg = DWARF_FRAME_REGNUM (REGNO (q->saved_reg));
1172 sreg = INVALID_REGNUM;
1173 reg_save (reg, sreg, q->cfa_offset);
1176 queued_reg_saves = NULL;
1179 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1180 location for? Or, does it clobber a register which we've previously
1181 said that some other register is saved in, and for which we now
1182 have a new location for? */
1185 clobbers_queued_reg_save (const_rtx insn)
1187 struct queued_reg_save *q;
1189 for (q = queued_reg_saves; q; q = q->next)
1192 reg_saved_in_data *rir;
1194 if (modified_in_p (q->reg, insn))
1197 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1198 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1199 && modified_in_p (rir->saved_in_reg, insn))
1206 /* What register, if any, is currently saved in REG? */
1209 reg_saved_in (rtx reg)
1211 unsigned int regn = REGNO (reg);
1212 struct queued_reg_save *q;
1213 reg_saved_in_data *rir;
1216 for (q = queued_reg_saves; q; q = q->next)
1217 if (q->saved_reg && regn == REGNO (q->saved_reg))
1220 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1221 if (regn == REGNO (rir->saved_in_reg))
1222 return rir->orig_reg;
1228 /* A temporary register holding an integral value used in adjusting SP
1229 or setting up the store_reg. The "offset" field holds the integer
1230 value, not an offset. */
1231 static dw_cfa_location cfa_temp;
1233 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1236 dwarf2out_frame_debug_def_cfa (rtx pat)
1238 memset (&cfa, 0, sizeof (cfa));
1240 switch (GET_CODE (pat))
1243 cfa.reg = REGNO (XEXP (pat, 0));
1244 cfa.offset = INTVAL (XEXP (pat, 1));
1248 cfa.reg = REGNO (pat);
1253 pat = XEXP (pat, 0);
1254 if (GET_CODE (pat) == PLUS)
1256 cfa.base_offset = INTVAL (XEXP (pat, 1));
1257 pat = XEXP (pat, 0);
1259 cfa.reg = REGNO (pat);
1263 /* Recurse and define an expression. */
1270 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1273 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1277 gcc_assert (GET_CODE (pat) == SET);
1278 dest = XEXP (pat, 0);
1279 src = XEXP (pat, 1);
1281 switch (GET_CODE (src))
1284 gcc_assert (REGNO (XEXP (src, 0)) == cfa.reg);
1285 cfa.offset -= INTVAL (XEXP (src, 1));
1295 cfa.reg = REGNO (dest);
1296 gcc_assert (cfa.indirect == 0);
1301 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1304 dwarf2out_frame_debug_cfa_offset (rtx set)
1306 HOST_WIDE_INT offset;
1307 rtx src, addr, span;
1308 unsigned int sregno;
1310 src = XEXP (set, 1);
1311 addr = XEXP (set, 0);
1312 gcc_assert (MEM_P (addr));
1313 addr = XEXP (addr, 0);
1315 /* As documented, only consider extremely simple addresses. */
1316 switch (GET_CODE (addr))
1319 gcc_assert (REGNO (addr) == cfa.reg);
1320 offset = -cfa.offset;
1323 gcc_assert (REGNO (XEXP (addr, 0)) == cfa.reg);
1324 offset = INTVAL (XEXP (addr, 1)) - cfa.offset;
1333 sregno = DWARF_FRAME_RETURN_COLUMN;
1337 span = targetm.dwarf_register_span (src);
1338 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1341 /* ??? We'd like to use queue_reg_save, but we need to come up with
1342 a different flushing heuristic for epilogues. */
1344 reg_save (sregno, INVALID_REGNUM, offset);
1347 /* We have a PARALLEL describing where the contents of SRC live.
1348 Queue register saves for each piece of the PARALLEL. */
1351 HOST_WIDE_INT span_offset = offset;
1353 gcc_assert (GET_CODE (span) == PARALLEL);
1355 limit = XVECLEN (span, 0);
1356 for (par_index = 0; par_index < limit; par_index++)
1358 rtx elem = XVECEXP (span, 0, par_index);
1360 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1361 reg_save (sregno, INVALID_REGNUM, span_offset);
1362 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1367 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1370 dwarf2out_frame_debug_cfa_register (rtx set)
1373 unsigned sregno, dregno;
1375 src = XEXP (set, 1);
1376 dest = XEXP (set, 0);
1379 sregno = DWARF_FRAME_RETURN_COLUMN;
1382 record_reg_saved_in_reg (dest, src);
1383 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1386 dregno = DWARF_FRAME_REGNUM (REGNO (dest));
1388 /* ??? We'd like to use queue_reg_save, but we need to come up with
1389 a different flushing heuristic for epilogues. */
1390 reg_save (sregno, dregno, 0);
1393 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1396 dwarf2out_frame_debug_cfa_expression (rtx set)
1398 rtx src, dest, span;
1399 dw_cfi_ref cfi = new_cfi ();
1401 dest = SET_DEST (set);
1402 src = SET_SRC (set);
1404 gcc_assert (REG_P (src));
1405 gcc_assert (MEM_P (dest));
1407 span = targetm.dwarf_register_span (src);
1410 cfi->dw_cfi_opc = DW_CFA_expression;
1411 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = DWARF_FRAME_REGNUM (REGNO (src));
1412 cfi->dw_cfi_oprnd2.dw_cfi_loc
1413 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1414 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1416 /* ??? We'd like to use queue_reg_save, were the interface different,
1417 and, as above, we could manage flushing for epilogues. */
1421 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1424 dwarf2out_frame_debug_cfa_restore (rtx reg)
1426 dw_cfi_ref cfi = new_cfi ();
1427 unsigned int regno = DWARF_FRAME_REGNUM (REGNO (reg));
1429 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1430 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1435 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1436 ??? Perhaps we should note in the CIE where windows are saved (instead of
1437 assuming 0(cfa)) and what registers are in the window. */
1440 dwarf2out_frame_debug_cfa_window_save (void)
1442 dw_cfi_ref cfi = new_cfi ();
1444 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1448 /* Record call frame debugging information for an expression EXPR,
1449 which either sets SP or FP (adjusting how we calculate the frame
1450 address) or saves a register to the stack or another register.
1451 LABEL indicates the address of EXPR.
1453 This function encodes a state machine mapping rtxes to actions on
1454 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1455 users need not read the source code.
1457 The High-Level Picture
1459 Changes in the register we use to calculate the CFA: Currently we
1460 assume that if you copy the CFA register into another register, we
1461 should take the other one as the new CFA register; this seems to
1462 work pretty well. If it's wrong for some target, it's simple
1463 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1465 Changes in the register we use for saving registers to the stack:
1466 This is usually SP, but not always. Again, we deduce that if you
1467 copy SP into another register (and SP is not the CFA register),
1468 then the new register is the one we will be using for register
1469 saves. This also seems to work.
1471 Register saves: There's not much guesswork about this one; if
1472 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1473 register save, and the register used to calculate the destination
1474 had better be the one we think we're using for this purpose.
1475 It's also assumed that a copy from a call-saved register to another
1476 register is saving that register if RTX_FRAME_RELATED_P is set on
1477 that instruction. If the copy is from a call-saved register to
1478 the *same* register, that means that the register is now the same
1479 value as in the caller.
1481 Except: If the register being saved is the CFA register, and the
1482 offset is nonzero, we are saving the CFA, so we assume we have to
1483 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1484 the intent is to save the value of SP from the previous frame.
1486 In addition, if a register has previously been saved to a different
1489 Invariants / Summaries of Rules
1491 cfa current rule for calculating the CFA. It usually
1492 consists of a register and an offset.
1493 cfa_store register used by prologue code to save things to the stack
1494 cfa_store.offset is the offset from the value of
1495 cfa_store.reg to the actual CFA
1496 cfa_temp register holding an integral value. cfa_temp.offset
1497 stores the value, which will be used to adjust the
1498 stack pointer. cfa_temp is also used like cfa_store,
1499 to track stores to the stack via fp or a temp reg.
1501 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1502 with cfa.reg as the first operand changes the cfa.reg and its
1503 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1506 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1507 expression yielding a constant. This sets cfa_temp.reg
1508 and cfa_temp.offset.
1510 Rule 5: Create a new register cfa_store used to save items to the
1513 Rules 10-14: Save a register to the stack. Define offset as the
1514 difference of the original location and cfa_store's
1515 location (or cfa_temp's location if cfa_temp is used).
1517 Rules 16-20: If AND operation happens on sp in prologue, we assume
1518 stack is realigned. We will use a group of DW_OP_XXX
1519 expressions to represent the location of the stored
1520 register instead of CFA+offset.
1524 "{a,b}" indicates a choice of a xor b.
1525 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1528 (set <reg1> <reg2>:cfa.reg)
1529 effects: cfa.reg = <reg1>
1530 cfa.offset unchanged
1531 cfa_temp.reg = <reg1>
1532 cfa_temp.offset = cfa.offset
1535 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1536 {<const_int>,<reg>:cfa_temp.reg}))
1537 effects: cfa.reg = sp if fp used
1538 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1539 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1540 if cfa_store.reg==sp
1543 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1544 effects: cfa.reg = fp
1545 cfa_offset += +/- <const_int>
1548 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1549 constraints: <reg1> != fp
1551 effects: cfa.reg = <reg1>
1552 cfa_temp.reg = <reg1>
1553 cfa_temp.offset = cfa.offset
1556 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1557 constraints: <reg1> != fp
1559 effects: cfa_store.reg = <reg1>
1560 cfa_store.offset = cfa.offset - cfa_temp.offset
1563 (set <reg> <const_int>)
1564 effects: cfa_temp.reg = <reg>
1565 cfa_temp.offset = <const_int>
1568 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1569 effects: cfa_temp.reg = <reg1>
1570 cfa_temp.offset |= <const_int>
1573 (set <reg> (high <exp>))
1577 (set <reg> (lo_sum <exp> <const_int>))
1578 effects: cfa_temp.reg = <reg>
1579 cfa_temp.offset = <const_int>
1582 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1583 effects: cfa_store.offset -= <const_int>
1584 cfa.offset = cfa_store.offset if cfa.reg == sp
1586 cfa.base_offset = -cfa_store.offset
1589 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1590 effects: cfa_store.offset += -/+ mode_size(mem)
1591 cfa.offset = cfa_store.offset if cfa.reg == sp
1593 cfa.base_offset = -cfa_store.offset
1596 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1599 effects: cfa.reg = <reg1>
1600 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1603 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1604 effects: cfa.reg = <reg1>
1605 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1608 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1609 effects: cfa.reg = <reg1>
1610 cfa.base_offset = -cfa_temp.offset
1611 cfa_temp.offset -= mode_size(mem)
1614 (set <reg> {unspec, unspec_volatile})
1615 effects: target-dependent
1618 (set sp (and: sp <const_int>))
1619 constraints: cfa_store.reg == sp
1620 effects: cfun->fde.stack_realign = 1
1621 cfa_store.offset = 0
1622 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1625 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1626 effects: cfa_store.offset += -/+ mode_size(mem)
1629 (set (mem ({pre_inc, pre_dec} sp)) fp)
1630 constraints: fde->stack_realign == 1
1631 effects: cfa_store.offset = 0
1632 cfa.reg != HARD_FRAME_POINTER_REGNUM
1635 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1636 constraints: fde->stack_realign == 1
1638 && cfa.indirect == 0
1639 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1640 effects: Use DW_CFA_def_cfa_expression to define cfa
1641 cfa.reg == fde->drap_reg */
1644 dwarf2out_frame_debug_expr (rtx expr)
1646 rtx src, dest, span;
1647 HOST_WIDE_INT offset;
1650 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1651 the PARALLEL independently. The first element is always processed if
1652 it is a SET. This is for backward compatibility. Other elements
1653 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1654 flag is set in them. */
1655 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1658 int limit = XVECLEN (expr, 0);
1661 /* PARALLELs have strict read-modify-write semantics, so we
1662 ought to evaluate every rvalue before changing any lvalue.
1663 It's cumbersome to do that in general, but there's an
1664 easy approximation that is enough for all current users:
1665 handle register saves before register assignments. */
1666 if (GET_CODE (expr) == PARALLEL)
1667 for (par_index = 0; par_index < limit; par_index++)
1669 elem = XVECEXP (expr, 0, par_index);
1670 if (GET_CODE (elem) == SET
1671 && MEM_P (SET_DEST (elem))
1672 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1673 dwarf2out_frame_debug_expr (elem);
1676 for (par_index = 0; par_index < limit; par_index++)
1678 elem = XVECEXP (expr, 0, par_index);
1679 if (GET_CODE (elem) == SET
1680 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1681 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1682 dwarf2out_frame_debug_expr (elem);
1683 else if (GET_CODE (elem) == SET
1685 && !RTX_FRAME_RELATED_P (elem))
1687 /* Stack adjustment combining might combine some post-prologue
1688 stack adjustment into a prologue stack adjustment. */
1689 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1692 dwarf2out_stack_adjust (offset);
1698 gcc_assert (GET_CODE (expr) == SET);
1700 src = SET_SRC (expr);
1701 dest = SET_DEST (expr);
1705 rtx rsi = reg_saved_in (src);
1712 switch (GET_CODE (dest))
1715 switch (GET_CODE (src))
1717 /* Setting FP from SP. */
1719 if (cfa.reg == (unsigned) REGNO (src))
1722 /* Update the CFA rule wrt SP or FP. Make sure src is
1723 relative to the current CFA register.
1725 We used to require that dest be either SP or FP, but the
1726 ARM copies SP to a temporary register, and from there to
1727 FP. So we just rely on the backends to only set
1728 RTX_FRAME_RELATED_P on appropriate insns. */
1729 cfa.reg = REGNO (dest);
1730 cfa_temp.reg = cfa.reg;
1731 cfa_temp.offset = cfa.offset;
1735 /* Saving a register in a register. */
1736 gcc_assert (!fixed_regs [REGNO (dest)]
1737 /* For the SPARC and its register window. */
1738 || (DWARF_FRAME_REGNUM (REGNO (src))
1739 == DWARF_FRAME_RETURN_COLUMN));
1741 /* After stack is aligned, we can only save SP in FP
1742 if drap register is used. In this case, we have
1743 to restore stack pointer with the CFA value and we
1744 don't generate this DWARF information. */
1746 && fde->stack_realign
1747 && REGNO (src) == STACK_POINTER_REGNUM)
1748 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1749 && fde->drap_reg != INVALID_REGNUM
1750 && cfa.reg != REGNO (src));
1752 queue_reg_save (src, dest, 0);
1759 if (dest == stack_pointer_rtx)
1763 switch (GET_CODE (XEXP (src, 1)))
1766 offset = INTVAL (XEXP (src, 1));
1769 gcc_assert ((unsigned) REGNO (XEXP (src, 1))
1771 offset = cfa_temp.offset;
1777 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1779 /* Restoring SP from FP in the epilogue. */
1780 gcc_assert (cfa.reg == (unsigned) HARD_FRAME_POINTER_REGNUM);
1781 cfa.reg = STACK_POINTER_REGNUM;
1783 else if (GET_CODE (src) == LO_SUM)
1784 /* Assume we've set the source reg of the LO_SUM from sp. */
1787 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1789 if (GET_CODE (src) != MINUS)
1791 if (cfa.reg == STACK_POINTER_REGNUM)
1792 cfa.offset += offset;
1793 if (cfa_store.reg == STACK_POINTER_REGNUM)
1794 cfa_store.offset += offset;
1796 else if (dest == hard_frame_pointer_rtx)
1799 /* Either setting the FP from an offset of the SP,
1800 or adjusting the FP */
1801 gcc_assert (frame_pointer_needed);
1803 gcc_assert (REG_P (XEXP (src, 0))
1804 && (unsigned) REGNO (XEXP (src, 0)) == cfa.reg
1805 && CONST_INT_P (XEXP (src, 1)));
1806 offset = INTVAL (XEXP (src, 1));
1807 if (GET_CODE (src) != MINUS)
1809 cfa.offset += offset;
1810 cfa.reg = HARD_FRAME_POINTER_REGNUM;
1814 gcc_assert (GET_CODE (src) != MINUS);
1817 if (REG_P (XEXP (src, 0))
1818 && REGNO (XEXP (src, 0)) == cfa.reg
1819 && CONST_INT_P (XEXP (src, 1)))
1821 /* Setting a temporary CFA register that will be copied
1822 into the FP later on. */
1823 offset = - INTVAL (XEXP (src, 1));
1824 cfa.offset += offset;
1825 cfa.reg = REGNO (dest);
1826 /* Or used to save regs to the stack. */
1827 cfa_temp.reg = cfa.reg;
1828 cfa_temp.offset = cfa.offset;
1832 else if (REG_P (XEXP (src, 0))
1833 && REGNO (XEXP (src, 0)) == cfa_temp.reg
1834 && XEXP (src, 1) == stack_pointer_rtx)
1836 /* Setting a scratch register that we will use instead
1837 of SP for saving registers to the stack. */
1838 gcc_assert (cfa.reg == STACK_POINTER_REGNUM);
1839 cfa_store.reg = REGNO (dest);
1840 cfa_store.offset = cfa.offset - cfa_temp.offset;
1844 else if (GET_CODE (src) == LO_SUM
1845 && CONST_INT_P (XEXP (src, 1)))
1847 cfa_temp.reg = REGNO (dest);
1848 cfa_temp.offset = INTVAL (XEXP (src, 1));
1857 cfa_temp.reg = REGNO (dest);
1858 cfa_temp.offset = INTVAL (src);
1863 gcc_assert (REG_P (XEXP (src, 0))
1864 && (unsigned) REGNO (XEXP (src, 0)) == cfa_temp.reg
1865 && CONST_INT_P (XEXP (src, 1)));
1867 if ((unsigned) REGNO (dest) != cfa_temp.reg)
1868 cfa_temp.reg = REGNO (dest);
1869 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1872 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1873 which will fill in all of the bits. */
1880 case UNSPEC_VOLATILE:
1881 /* All unspecs should be represented by REG_CFA_* notes. */
1887 /* If this AND operation happens on stack pointer in prologue,
1888 we assume the stack is realigned and we extract the
1890 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1892 /* We interpret reg_save differently with stack_realign set.
1893 Thus we must flush whatever we have queued first. */
1894 dwarf2out_flush_queued_reg_saves ();
1896 gcc_assert (cfa_store.reg == REGNO (XEXP (src, 0)));
1897 fde->stack_realign = 1;
1898 fde->stack_realignment = INTVAL (XEXP (src, 1));
1899 cfa_store.offset = 0;
1901 if (cfa.reg != STACK_POINTER_REGNUM
1902 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
1903 fde->drap_reg = cfa.reg;
1916 /* Saving a register to the stack. Make sure dest is relative to the
1918 switch (GET_CODE (XEXP (dest, 0)))
1924 /* We can't handle variable size modifications. */
1925 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1927 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1929 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1930 && cfa_store.reg == STACK_POINTER_REGNUM);
1932 cfa_store.offset += offset;
1933 if (cfa.reg == STACK_POINTER_REGNUM)
1934 cfa.offset = cfa_store.offset;
1936 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1937 offset -= cfa_store.offset;
1939 offset = -cfa_store.offset;
1946 offset = GET_MODE_SIZE (GET_MODE (dest));
1947 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1950 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1951 == STACK_POINTER_REGNUM)
1952 && cfa_store.reg == STACK_POINTER_REGNUM);
1954 cfa_store.offset += offset;
1956 /* Rule 18: If stack is aligned, we will use FP as a
1957 reference to represent the address of the stored
1960 && fde->stack_realign
1961 && src == hard_frame_pointer_rtx)
1963 gcc_assert (cfa.reg != HARD_FRAME_POINTER_REGNUM);
1964 cfa_store.offset = 0;
1967 if (cfa.reg == STACK_POINTER_REGNUM)
1968 cfa.offset = cfa_store.offset;
1970 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1971 offset += -cfa_store.offset;
1973 offset = -cfa_store.offset;
1977 /* With an offset. */
1984 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1985 && REG_P (XEXP (XEXP (dest, 0), 0)));
1986 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1987 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1990 regno = REGNO (XEXP (XEXP (dest, 0), 0));
1992 if (cfa.reg == (unsigned) regno)
1993 offset -= cfa.offset;
1994 else if (cfa_store.reg == (unsigned) regno)
1995 offset -= cfa_store.offset;
1998 gcc_assert (cfa_temp.reg == (unsigned) regno);
1999 offset -= cfa_temp.offset;
2005 /* Without an offset. */
2008 int regno = REGNO (XEXP (dest, 0));
2010 if (cfa.reg == (unsigned) regno)
2011 offset = -cfa.offset;
2012 else if (cfa_store.reg == (unsigned) regno)
2013 offset = -cfa_store.offset;
2016 gcc_assert (cfa_temp.reg == (unsigned) regno);
2017 offset = -cfa_temp.offset;
2024 gcc_assert (cfa_temp.reg
2025 == (unsigned) REGNO (XEXP (XEXP (dest, 0), 0)));
2026 offset = -cfa_temp.offset;
2027 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
2035 /* If the source operand of this MEM operation is not a
2036 register, basically the source is return address. Here
2037 we only care how much stack grew and we don't save it. */
2041 if (REGNO (src) != STACK_POINTER_REGNUM
2042 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
2043 && (unsigned) REGNO (src) == cfa.reg)
2045 /* We're storing the current CFA reg into the stack. */
2047 if (cfa.offset == 0)
2050 /* If stack is aligned, putting CFA reg into stack means
2051 we can no longer use reg + offset to represent CFA.
2052 Here we use DW_CFA_def_cfa_expression instead. The
2053 result of this expression equals to the original CFA
2056 && fde->stack_realign
2057 && cfa.indirect == 0
2058 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
2060 dw_cfa_location cfa_exp;
2062 gcc_assert (fde->drap_reg == cfa.reg);
2064 cfa_exp.indirect = 1;
2065 cfa_exp.reg = HARD_FRAME_POINTER_REGNUM;
2066 cfa_exp.base_offset = offset;
2069 fde->drap_reg_saved = 1;
2071 def_cfa_1 (&cfa_exp);
2075 /* If the source register is exactly the CFA, assume
2076 we're saving SP like any other register; this happens
2079 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2084 /* Otherwise, we'll need to look in the stack to
2085 calculate the CFA. */
2086 rtx x = XEXP (dest, 0);
2090 gcc_assert (REG_P (x));
2092 cfa.reg = REGNO (x);
2093 cfa.base_offset = offset;
2102 span = targetm.dwarf_register_span (src);
2105 queue_reg_save (src, NULL_RTX, offset);
2108 /* We have a PARALLEL describing where the contents of SRC
2109 live. Queue register saves for each piece of the
2113 HOST_WIDE_INT span_offset = offset;
2115 gcc_assert (GET_CODE (span) == PARALLEL);
2117 limit = XVECLEN (span, 0);
2118 for (par_index = 0; par_index < limit; par_index++)
2120 rtx elem = XVECEXP (span, 0, par_index);
2122 queue_reg_save (elem, NULL_RTX, span_offset);
2123 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2134 /* Record call frame debugging information for INSN, which either
2135 sets SP or FP (adjusting how we calculate the frame address) or saves a
2136 register to the stack. If INSN is NULL_RTX, initialize our state.
2138 If AFTER_P is false, we're being called before the insn is emitted,
2139 otherwise after. Call instructions get invoked twice. */
2142 dwarf2out_frame_debug (rtx insn, bool after_p)
2145 bool handled_one = false;
2146 bool need_flush = false;
2148 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2149 dwarf2out_flush_queued_reg_saves ();
2151 if (!RTX_FRAME_RELATED_P (insn))
2153 /* ??? This should be done unconditionally since stack adjustments
2154 matter if the stack pointer is not the CFA register anymore but
2155 is still used to save registers. */
2156 if (!ACCUMULATE_OUTGOING_ARGS)
2157 dwarf2out_notice_stack_adjust (insn, after_p);
2161 any_cfis_emitted = false;
2163 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2164 switch (REG_NOTE_KIND (note))
2166 case REG_FRAME_RELATED_EXPR:
2167 insn = XEXP (note, 0);
2170 case REG_CFA_DEF_CFA:
2171 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2175 case REG_CFA_ADJUST_CFA:
2180 if (GET_CODE (n) == PARALLEL)
2181 n = XVECEXP (n, 0, 0);
2183 dwarf2out_frame_debug_adjust_cfa (n);
2187 case REG_CFA_OFFSET:
2190 n = single_set (insn);
2191 dwarf2out_frame_debug_cfa_offset (n);
2195 case REG_CFA_REGISTER:
2200 if (GET_CODE (n) == PARALLEL)
2201 n = XVECEXP (n, 0, 0);
2203 dwarf2out_frame_debug_cfa_register (n);
2207 case REG_CFA_EXPRESSION:
2210 n = single_set (insn);
2211 dwarf2out_frame_debug_cfa_expression (n);
2215 case REG_CFA_RESTORE:
2220 if (GET_CODE (n) == PARALLEL)
2221 n = XVECEXP (n, 0, 0);
2224 dwarf2out_frame_debug_cfa_restore (n);
2228 case REG_CFA_SET_VDRAP:
2232 dw_fde_ref fde = cfun->fde;
2235 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2237 fde->vdrap_reg = REGNO (n);
2243 case REG_CFA_WINDOW_SAVE:
2244 dwarf2out_frame_debug_cfa_window_save ();
2248 case REG_CFA_FLUSH_QUEUE:
2249 /* The actual flush happens below. */
2260 /* Minimize the number of advances by emitting the entire queue
2261 once anything is emitted. */
2262 need_flush |= any_cfis_emitted;
2266 insn = PATTERN (insn);
2268 dwarf2out_frame_debug_expr (insn);
2270 /* Check again. A parallel can save and update the same register.
2271 We could probably check just once, here, but this is safer than
2272 removing the check at the start of the function. */
2273 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2278 dwarf2out_flush_queued_reg_saves ();
2281 /* Examine CFI and return true if a cfi label and set_loc is needed
2282 beforehand. Even when generating CFI assembler instructions, we
2283 still have to add the cfi to the list so that lookup_cfa_1 works
2284 later on. When -g2 and above we even need to force emitting of
2285 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2286 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2287 and so don't use convert_cfa_to_fb_loc_list. */
2290 cfi_label_required_p (dw_cfi_ref cfi)
2292 if (!dwarf2out_do_cfi_asm ())
2295 if (dwarf_version == 2
2296 && debug_info_level > DINFO_LEVEL_TERSE
2297 && (write_symbols == DWARF2_DEBUG
2298 || write_symbols == VMS_AND_DWARF2_DEBUG))
2300 switch (cfi->dw_cfi_opc)
2302 case DW_CFA_def_cfa_offset:
2303 case DW_CFA_def_cfa_offset_sf:
2304 case DW_CFA_def_cfa_register:
2305 case DW_CFA_def_cfa:
2306 case DW_CFA_def_cfa_sf:
2307 case DW_CFA_def_cfa_expression:
2308 case DW_CFA_restore_state:
2317 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2318 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2321 add_cfis_to_fde (void)
2323 dw_fde_ref fde = cfun->fde;
2325 /* We always start with a function_begin label. */
2328 for (insn = get_insns (); insn; insn = next)
2330 next = NEXT_INSN (insn);
2332 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2334 /* Don't attempt to advance_loc4 between labels
2335 in different sections. */
2339 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2341 bool required = cfi_label_required_p (NOTE_CFI (insn));
2342 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2344 required |= cfi_label_required_p (NOTE_CFI (next));
2345 next = NEXT_INSN (next);
2349 int num = dwarf2out_cfi_label_num;
2350 const char *label = dwarf2out_cfi_label ();
2354 /* Set the location counter to the new label. */
2356 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2357 : DW_CFA_advance_loc4);
2358 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2359 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2361 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2362 NOTE_LABEL_NUMBER (tmp) = num;
2367 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2368 insn = NEXT_INSN (insn);
2370 while (insn != next);
2376 /* Scan the function and create the initial set of CFI notes. */
2379 create_cfi_notes (void)
2383 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2387 cfi_insn = PREV_INSN (insn);
2389 if (BARRIER_P (insn))
2391 dwarf2out_frame_debug (insn, false);
2397 switch (NOTE_KIND (insn))
2399 case NOTE_INSN_PROLOGUE_END:
2400 dwarf2out_flush_queued_reg_saves ();
2403 case NOTE_INSN_EPILOGUE_BEG:
2404 #if defined(HAVE_epilogue)
2405 dwarf2out_cfi_begin_epilogue (insn);
2409 case NOTE_INSN_CFA_RESTORE_STATE:
2411 dwarf2out_frame_debug_restore_state ();
2417 if (!NONDEBUG_INSN_P (insn))
2420 pat = PATTERN (insn);
2421 if (asm_noperands (pat) >= 0)
2423 dwarf2out_frame_debug (insn, false);
2427 if (GET_CODE (pat) == SEQUENCE)
2429 int i, n = XVECLEN (pat, 0);
2430 for (i = 1; i < n; ++i)
2431 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2435 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2436 dwarf2out_frame_debug (insn, false);
2438 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2439 Putting the note after the VEC should be ok. */
2440 if (!tablejump_p (insn, NULL, &cfi_insn))
2443 dwarf2out_frame_debug (insn, true);
2449 /* Determine if we need to save and restore CFI information around this
2450 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2451 we do need to save/restore, then emit the save now, and insert a
2452 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2455 dwarf2out_cfi_begin_epilogue (rtx insn)
2457 bool saw_frp = false;
2460 /* Scan forward to the return insn, noticing if there are possible
2461 frame related insns. */
2462 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2467 /* Look for both regular and sibcalls to end the block. */
2468 if (returnjump_p (i))
2470 if (CALL_P (i) && SIBLING_CALL_P (i))
2473 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2476 rtx seq = PATTERN (i);
2478 if (returnjump_p (XVECEXP (seq, 0, 0)))
2480 if (CALL_P (XVECEXP (seq, 0, 0))
2481 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2484 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2485 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2489 if (RTX_FRAME_RELATED_P (i))
2493 /* If the port doesn't emit epilogue unwind info, we don't need a
2494 save/restore pair. */
2498 /* Otherwise, search forward to see if the return insn was the last
2499 basic block of the function. If so, we don't need save/restore. */
2500 gcc_assert (i != NULL);
2501 i = next_real_insn (i);
2505 /* Insert the restore before that next real insn in the stream, and before
2506 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2507 properly nested. This should be after any label or alignment. This
2508 will be pushed into the CFI stream by the function below. */
2511 rtx p = PREV_INSN (i);
2514 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2518 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2520 emit_cfa_remember = true;
2522 /* And emulate the state save. */
2523 gcc_assert (!cfa_remember.in_use);
2525 old_cfa_remember = old_cfa;
2526 cfa_remember.in_use = 1;
2529 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2533 dwarf2out_frame_debug_restore_state (void)
2535 dw_cfi_ref cfi = new_cfi ();
2537 cfi->dw_cfi_opc = DW_CFA_restore_state;
2540 gcc_assert (cfa_remember.in_use);
2542 old_cfa = old_cfa_remember;
2543 cfa_remember.in_use = 0;
2547 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2548 state at each location within the function. These notes will be
2549 emitted during pass_final. */
2552 execute_dwarf2_frame (void)
2554 /* The first time we're called, compute the incoming frame state. */
2555 if (cie_cfi_vec == NULL)
2557 dw_cfa_location loc;
2559 add_cfi_vec = &cie_cfi_vec;
2561 memset(&old_cfa, 0, sizeof (old_cfa));
2562 old_cfa.reg = INVALID_REGNUM;
2564 /* On entry, the Canonical Frame Address is at SP. */
2565 memset(&loc, 0, sizeof (loc));
2566 loc.reg = STACK_POINTER_REGNUM;
2567 loc.offset = INCOMING_FRAME_SP_OFFSET;
2570 if (targetm.debug_unwind_info () == UI_DWARF2
2571 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2572 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2577 /* Set up state for generating call frame debug info. */
2578 gcc_checking_assert (queued_reg_saves == NULL);
2579 gcc_checking_assert (regs_saved_in_regs == NULL);
2581 memset (&cfa, 0, sizeof(cfa));
2582 cfa.reg = STACK_POINTER_REGNUM;
2583 cfa.offset = INCOMING_FRAME_SP_OFFSET;
2588 memset (&cfa_temp, 0, sizeof(cfa_temp));
2589 cfa_temp.reg = INVALID_REGNUM;
2591 dwarf2out_alloc_current_fde ();
2594 create_cfi_notes ();
2597 /* Reset all function-specific information, particularly for GC. */
2598 XDELETEVEC (barrier_args_size);
2599 barrier_args_size = NULL;
2600 regs_saved_in_regs = NULL;
2601 queued_reg_saves = NULL;
2607 /* Save the result of dwarf2out_do_frame across PCH.
2608 This variable is tri-state, with 0 unset, >0 true, <0 false. */
2609 static GTY(()) signed char saved_do_cfi_asm = 0;
2611 /* Decide whether we want to emit frame unwind information for the current
2612 translation unit. */
2615 dwarf2out_do_frame (void)
2617 /* We want to emit correct CFA location expressions or lists, so we
2618 have to return true if we're going to output debug info, even if
2619 we're not going to output frame or unwind info. */
2620 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
2623 if (saved_do_cfi_asm > 0)
2626 if (targetm.debug_unwind_info () == UI_DWARF2)
2629 if ((flag_unwind_tables || flag_exceptions)
2630 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2636 /* Decide whether to emit frame unwind via assembler directives. */
2639 dwarf2out_do_cfi_asm (void)
2643 #ifdef MIPS_DEBUGGING_INFO
2647 if (saved_do_cfi_asm != 0)
2648 return saved_do_cfi_asm > 0;
2650 /* Assume failure for a moment. */
2651 saved_do_cfi_asm = -1;
2653 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
2655 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
2658 /* Make sure the personality encoding is one the assembler can support.
2659 In particular, aligned addresses can't be handled. */
2660 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
2661 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2663 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
2664 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2667 /* If we can't get the assembler to emit only .debug_frame, and we don't need
2668 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
2669 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
2670 && !flag_unwind_tables && !flag_exceptions
2671 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
2675 saved_do_cfi_asm = 1;
2680 gate_dwarf2_frame (void)
2682 #ifndef HAVE_prologue
2683 /* Targets which still implement the prologue in assembler text
2684 cannot use the generic dwarf2 unwinding. */
2688 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
2689 from the optimized shrink-wrapping annotations that we will compute.
2690 For now, only produce the CFI notes for dwarf2. */
2691 return dwarf2out_do_frame ();
2694 struct rtl_opt_pass pass_dwarf2_frame =
2698 "dwarf2", /* name */
2699 gate_dwarf2_frame, /* gate */
2700 execute_dwarf2_frame, /* execute */
2703 0, /* static_pass_number */
2704 TV_FINAL, /* tv_id */
2705 0, /* properties_required */
2706 0, /* properties_provided */
2707 0, /* properties_destroyed */
2708 0, /* todo_flags_start */
2709 0 /* todo_flags_finish */
2713 #include "gt-dwarf2cfi.h"