1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
61 /* A collected description of an entire row of the abstract CFI table. */
62 typedef struct GTY(()) dw_cfi_row_struct
64 /* The expression that computes the CFA, expressed in two different ways.
65 The CFA member for the simple cases, and the full CFI expression for
66 the complex cases. The later will be a DW_CFA_cfa_expression. */
70 /* The expressions for any register column that is saved. */
73 /* The value of any DW_CFA_GNU_args_size. */
74 HOST_WIDE_INT args_size;
78 /* A vector of call frame insns for the CIE. */
81 /* The state of the first row of the FDE table, which includes the
82 state provided by the CIE. */
83 static GTY(()) dw_cfi_row *cie_cfi_row;
85 static GTY(()) unsigned long dwarf2out_cfi_label_num;
87 /* The insn after which a new CFI note should be emitted. */
88 static rtx add_cfi_insn;
90 /* When non-null, add_cfi will add the CFI to this vector. */
91 static cfi_vec *add_cfi_vec;
93 /* True if remember_state should be emitted before following CFI directive. */
94 static bool emit_cfa_remember;
96 /* True if any CFI directives were emitted at the current insn. */
97 static bool any_cfis_emitted;
99 /* Short-hand for commonly used register numbers. */
100 static unsigned dw_stack_pointer_regnum;
101 static unsigned dw_frame_pointer_regnum;
104 static void dwarf2out_cfi_begin_epilogue (rtx insn);
105 static void dwarf2out_frame_debug_restore_state (void);
108 /* Hook used by __throw. */
111 expand_builtin_dwarf_sp_column (void)
113 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
114 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
117 /* MEM is a memory reference for the register size table, each element of
118 which has mode MODE. Initialize column C as a return address column. */
121 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
123 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
124 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
125 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
128 /* Generate code to initialize the register size table. */
131 expand_builtin_init_dwarf_reg_sizes (tree address)
134 enum machine_mode mode = TYPE_MODE (char_type_node);
135 rtx addr = expand_normal (address);
136 rtx mem = gen_rtx_MEM (BLKmode, addr);
137 bool wrote_return_column = false;
139 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
141 unsigned int dnum = DWARF_FRAME_REGNUM (i);
142 unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
144 if (rnum < DWARF_FRAME_REGISTERS)
146 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
147 enum machine_mode save_mode = reg_raw_mode[i];
150 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
151 save_mode = choose_hard_reg_mode (i, 1, true);
152 if (dnum == DWARF_FRAME_RETURN_COLUMN)
154 if (save_mode == VOIDmode)
156 wrote_return_column = true;
158 size = GET_MODE_SIZE (save_mode);
162 emit_move_insn (adjust_address (mem, mode, offset),
163 gen_int_mode (size, mode));
167 if (!wrote_return_column)
168 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
170 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
171 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
174 targetm.init_dwarf_reg_sizes_extra (address);
177 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
179 static inline HOST_WIDE_INT
180 div_data_align (HOST_WIDE_INT off)
182 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
183 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
187 /* Return true if we need a signed version of a given opcode
188 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
191 need_data_align_sf_opcode (HOST_WIDE_INT off)
193 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
196 /* Return a pointer to a newly allocated Call Frame Instruction. */
198 static inline dw_cfi_ref
201 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
203 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
204 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
209 /* Return a newly allocated CFI row, with no defined data. */
214 dw_cfi_row *row = ggc_alloc_cleared_dw_cfi_row ();
216 row->cfa.reg = INVALID_REGNUM;
221 /* Return a copy of an existing CFI row. */
224 copy_cfi_row (dw_cfi_row *src)
226 dw_cfi_row *dst = ggc_alloc_dw_cfi_row ();
229 dst->reg_save = VEC_copy (dw_cfi_ref, gc, src->reg_save);
234 /* Free an allocated CFI row. */
237 free_cfi_row (dw_cfi_row *row)
241 VEC_free (dw_cfi_ref, gc, row->reg_save);
246 /* Generate a new label for the CFI info to refer to. */
249 dwarf2out_cfi_label (void)
251 int num = dwarf2out_cfi_label_num++;
254 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
256 return xstrdup (label);
259 /* Add CFI either to the current insn stream or to a vector, or both. */
262 add_cfi (dw_cfi_ref cfi)
264 if (emit_cfa_remember)
266 dw_cfi_ref cfi_remember;
268 /* Emit the state save. */
269 emit_cfa_remember = false;
270 cfi_remember = new_cfi ();
271 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
272 add_cfi (cfi_remember);
275 any_cfis_emitted = true;
277 if (add_cfi_insn != NULL)
279 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
280 NOTE_CFI (add_cfi_insn) = cfi;
283 if (add_cfi_vec != NULL)
284 VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
288 add_cfi_args_size (HOST_WIDE_INT size)
290 dw_cfi_ref cfi = new_cfi ();
292 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
293 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
299 add_cfi_restore (unsigned reg)
301 dw_cfi_ref cfi = new_cfi ();
303 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
304 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
309 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
310 that the register column is no longer saved. */
313 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
315 if (VEC_length (dw_cfi_ref, row->reg_save) <= column)
316 VEC_safe_grow_cleared (dw_cfi_ref, gc, row->reg_save, column + 1);
317 VEC_replace (dw_cfi_ref, row->reg_save, column, cfi);
320 /* This function fills in aa dw_cfa_location structure from a dwarf location
321 descriptor sequence. */
324 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
326 struct dw_loc_descr_struct *ptr;
328 cfa->base_offset = 0;
332 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
334 enum dwarf_location_atom op = ptr->dw_loc_opc;
370 cfa->reg = op - DW_OP_reg0;
373 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
407 cfa->reg = op - DW_OP_breg0;
408 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
411 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
412 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
417 case DW_OP_plus_uconst:
418 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
426 /* Find the previous value for the CFA, iteratively. CFI is the opcode
427 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
428 one level of remember/restore state processing. */
431 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
433 switch (cfi->dw_cfi_opc)
435 case DW_CFA_def_cfa_offset:
436 case DW_CFA_def_cfa_offset_sf:
437 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
439 case DW_CFA_def_cfa_register:
440 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
443 case DW_CFA_def_cfa_sf:
444 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
445 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
447 case DW_CFA_def_cfa_expression:
448 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
451 case DW_CFA_remember_state:
452 gcc_assert (!remember->in_use);
454 remember->in_use = 1;
456 case DW_CFA_restore_state:
457 gcc_assert (remember->in_use);
459 remember->in_use = 0;
467 /* The current, i.e. most recently generated, row of the CFI table. */
468 static dw_cfi_row *cur_row;
470 /* The row state from a preceeding DW_CFA_remember_state. */
471 static dw_cfi_row *remember_row;
473 /* The register used for saving registers to the stack, and its offset
475 static dw_cfa_location cfa_store;
477 /* A temporary register holding an integral value used in adjusting SP
478 or setting up the store_reg. The "offset" field holds the integer
479 value, not an offset. */
480 static dw_cfa_location cfa_temp;
482 /* The (really) current value for DW_CFA_GNU_args_size. We delay actually
483 emitting this data, i.e. updating CUR_ROW, without async unwind. */
484 static HOST_WIDE_INT args_size;
486 /* Determine if two dw_cfa_location structures define the same data. */
489 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
491 return (loc1->reg == loc2->reg
492 && loc1->offset == loc2->offset
493 && loc1->indirect == loc2->indirect
494 && (loc1->indirect == 0
495 || loc1->base_offset == loc2->base_offset));
498 /* Determine if two CFI operands are identical. */
501 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
505 case dw_cfi_oprnd_unused:
507 case dw_cfi_oprnd_reg_num:
508 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
509 case dw_cfi_oprnd_offset:
510 return a->dw_cfi_offset == b->dw_cfi_offset;
511 case dw_cfi_oprnd_addr:
512 return (a->dw_cfi_addr == b->dw_cfi_addr
513 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
514 case dw_cfi_oprnd_loc:
515 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
520 /* Determine if two CFI entries are identical. */
523 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
525 enum dwarf_call_frame_info opc;
527 /* Make things easier for our callers, including missing operands. */
530 if (a == NULL || b == NULL)
533 /* Obviously, the opcodes must match. */
535 if (opc != b->dw_cfi_opc)
538 /* Compare the two operands, re-using the type of the operands as
539 already exposed elsewhere. */
540 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
541 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
542 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
543 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
546 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
547 what opcode to emit. Returns the CFI opcode to effect the change, or
548 NULL if NEW_CFA == OLD_CFA. */
551 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
555 /* If nothing changed, no need to issue any call frame instructions. */
556 if (cfa_equal_p (old_cfa, new_cfa))
561 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
563 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
564 the CFA register did not change but the offset did. The data
565 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
566 in the assembler via the .cfi_def_cfa_offset directive. */
567 if (new_cfa->offset < 0)
568 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
570 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
571 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
574 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
575 else if (new_cfa->offset == old_cfa->offset
576 && old_cfa->reg != INVALID_REGNUM
577 && !new_cfa->indirect
578 && !old_cfa->indirect)
580 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
581 indicating the CFA register has changed to <register> but the
582 offset has not changed. */
583 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
584 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
588 else if (new_cfa->indirect == 0)
590 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
591 indicating the CFA register has changed to <register> with
592 the specified offset. The data factoring for DW_CFA_def_cfa_sf
593 happens in output_cfi, or in the assembler via the .cfi_def_cfa
595 if (new_cfa->offset < 0)
596 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
598 cfi->dw_cfi_opc = DW_CFA_def_cfa;
599 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
600 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
604 /* Construct a DW_CFA_def_cfa_expression instruction to
605 calculate the CFA using a full location expression since no
606 register-offset pair is available. */
607 struct dw_loc_descr_struct *loc_list;
609 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
610 loc_list = build_cfa_loc (new_cfa, 0);
611 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
617 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
620 def_cfa_1 (dw_cfa_location *new_cfa)
624 if (cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
625 cfa_store.offset = new_cfa->offset;
627 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
630 cur_row->cfa = *new_cfa;
631 if (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression)
632 cur_row->cfa_cfi = cfi;
638 /* Add the CFI for saving a register. REG is the CFA column number.
639 If SREG is -1, the register is saved at OFFSET from the CFA;
640 otherwise it is saved in SREG. */
643 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
645 dw_fde_ref fde = cfun ? cfun->fde : NULL;
646 dw_cfi_ref cfi = new_cfi ();
648 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
650 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
652 && fde->stack_realign
653 && sreg == INVALID_REGNUM)
655 cfi->dw_cfi_opc = DW_CFA_expression;
656 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
657 cfi->dw_cfi_oprnd2.dw_cfi_loc
658 = build_cfa_aligned_loc (&cur_row->cfa, offset,
659 fde->stack_realignment);
661 else if (sreg == INVALID_REGNUM)
663 if (need_data_align_sf_opcode (offset))
664 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
665 else if (reg & ~0x3f)
666 cfi->dw_cfi_opc = DW_CFA_offset_extended;
668 cfi->dw_cfi_opc = DW_CFA_offset;
669 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
671 else if (sreg == reg)
673 /* While we could emit something like DW_CFA_same_value or
674 DW_CFA_restore, we never expect to see something like that
675 in a prologue. This is more likely to be a bug. A backend
676 can always bypass this by using REG_CFA_RESTORE directly. */
681 cfi->dw_cfi_opc = DW_CFA_register;
682 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
686 update_row_reg_save (cur_row, reg, cfi);
689 /* Given a SET, calculate the amount of stack adjustment it
693 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
694 HOST_WIDE_INT cur_offset)
696 const_rtx src = SET_SRC (pattern);
697 const_rtx dest = SET_DEST (pattern);
698 HOST_WIDE_INT offset = 0;
701 if (dest == stack_pointer_rtx)
703 code = GET_CODE (src);
705 /* Assume (set (reg sp) (reg whatever)) sets args_size
707 if (code == REG && src != stack_pointer_rtx)
709 offset = -cur_args_size;
710 #ifndef STACK_GROWS_DOWNWARD
713 return offset - cur_offset;
716 if (! (code == PLUS || code == MINUS)
717 || XEXP (src, 0) != stack_pointer_rtx
718 || !CONST_INT_P (XEXP (src, 1)))
721 /* (set (reg sp) (plus (reg sp) (const_int))) */
722 offset = INTVAL (XEXP (src, 1));
728 if (MEM_P (src) && !MEM_P (dest))
732 /* (set (mem (pre_dec (reg sp))) (foo)) */
733 src = XEXP (dest, 0);
734 code = GET_CODE (src);
740 if (XEXP (src, 0) == stack_pointer_rtx)
742 rtx val = XEXP (XEXP (src, 1), 1);
743 /* We handle only adjustments by constant amount. */
744 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
745 && CONST_INT_P (val));
746 offset = -INTVAL (val);
753 if (XEXP (src, 0) == stack_pointer_rtx)
755 offset = GET_MODE_SIZE (GET_MODE (dest));
762 if (XEXP (src, 0) == stack_pointer_rtx)
764 offset = -GET_MODE_SIZE (GET_MODE (dest));
779 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
780 indexed by INSN_UID. */
782 static HOST_WIDE_INT *barrier_args_size;
784 /* Helper function for compute_barrier_args_size. Handle one insn. */
787 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
788 VEC (rtx, heap) **next)
790 HOST_WIDE_INT offset = 0;
793 if (! RTX_FRAME_RELATED_P (insn))
795 if (prologue_epilogue_contains (insn))
797 else if (GET_CODE (PATTERN (insn)) == SET)
798 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
799 else if (GET_CODE (PATTERN (insn)) == PARALLEL
800 || GET_CODE (PATTERN (insn)) == SEQUENCE)
802 /* There may be stack adjustments inside compound insns. Search
804 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
805 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
806 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
807 cur_args_size, offset);
812 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
816 expr = XEXP (expr, 0);
817 if (GET_CODE (expr) == PARALLEL
818 || GET_CODE (expr) == SEQUENCE)
819 for (i = 1; i < XVECLEN (expr, 0); i++)
821 rtx elem = XVECEXP (expr, 0, i);
823 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
824 offset += stack_adjust_offset (elem, cur_args_size, offset);
829 #ifndef STACK_GROWS_DOWNWARD
833 cur_args_size += offset;
834 if (cur_args_size < 0)
839 rtx dest = JUMP_LABEL (insn);
843 if (barrier_args_size [INSN_UID (dest)] < 0)
845 barrier_args_size [INSN_UID (dest)] = cur_args_size;
846 VEC_safe_push (rtx, heap, *next, dest);
851 return cur_args_size;
854 /* Walk the whole function and compute args_size on BARRIERs. */
857 compute_barrier_args_size (void)
859 int max_uid = get_max_uid (), i;
861 VEC (rtx, heap) *worklist, *next, *tmp;
863 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
864 for (i = 0; i < max_uid; i++)
865 barrier_args_size[i] = -1;
867 worklist = VEC_alloc (rtx, heap, 20);
868 next = VEC_alloc (rtx, heap, 20);
870 barrier_args_size[INSN_UID (insn)] = 0;
871 VEC_quick_push (rtx, worklist, insn);
874 while (!VEC_empty (rtx, worklist))
876 rtx prev, body, first_insn;
877 HOST_WIDE_INT cur_args_size;
879 first_insn = insn = VEC_pop (rtx, worklist);
880 cur_args_size = barrier_args_size[INSN_UID (insn)];
881 prev = prev_nonnote_insn (insn);
882 if (prev && BARRIER_P (prev))
883 barrier_args_size[INSN_UID (prev)] = cur_args_size;
885 for (; insn; insn = NEXT_INSN (insn))
887 if (INSN_DELETED_P (insn) || NOTE_P (insn))
889 if (BARRIER_P (insn))
894 if (insn == first_insn)
896 else if (barrier_args_size[INSN_UID (insn)] < 0)
898 barrier_args_size[INSN_UID (insn)] = cur_args_size;
903 /* The insns starting with this label have been
904 already scanned or are in the worklist. */
909 body = PATTERN (insn);
910 if (GET_CODE (body) == SEQUENCE)
912 HOST_WIDE_INT dest_args_size = cur_args_size;
913 for (i = 1; i < XVECLEN (body, 0); i++)
914 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
915 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
917 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
918 dest_args_size, &next);
921 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
922 cur_args_size, &next);
924 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
925 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
926 dest_args_size, &next);
929 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
930 cur_args_size, &next);
934 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
938 if (VEC_empty (rtx, next))
941 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
945 VEC_truncate (rtx, next, 0);
948 VEC_free (rtx, heap, worklist);
949 VEC_free (rtx, heap, next);
952 /* Add a CFI to update the running total of the size of arguments
953 pushed onto the stack. */
956 dwarf2out_args_size (HOST_WIDE_INT size)
958 if (size == cur_row->args_size)
961 cur_row->args_size = size;
962 add_cfi_args_size (size);
965 /* Record a stack adjustment of OFFSET bytes. */
968 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
970 dw_cfa_location loc = cur_row->cfa;
972 if (loc.reg == dw_stack_pointer_regnum)
973 loc.offset += offset;
975 if (cfa_store.reg == dw_stack_pointer_regnum)
976 cfa_store.offset += offset;
978 /* ??? The assumption seems to be that if A_O_A, the only CFA adjustments
979 involving the stack pointer are inside the prologue and marked as
980 RTX_FRAME_RELATED_P. That said, should we not verify this assumption
981 by *asserting* A_O_A at this point? Why else would we have a change
982 to the stack pointer? */
983 if (ACCUMULATE_OUTGOING_ARGS)
986 #ifndef STACK_GROWS_DOWNWARD
995 if (flag_asynchronous_unwind_tables)
996 dwarf2out_args_size (args_size);
999 /* Check INSN to see if it looks like a push or a stack adjustment, and
1000 make a note of it if it does. EH uses this information to find out
1001 how much extra space it needs to pop off the stack. */
1004 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
1006 HOST_WIDE_INT offset;
1009 /* Don't handle epilogues at all. Certainly it would be wrong to do so
1010 with this function. Proper support would require all frame-related
1011 insns to be marked, and to be able to handle saving state around
1012 epilogues textually in the middle of the function. */
1013 if (prologue_epilogue_contains (insn))
1016 /* If INSN is an instruction from target of an annulled branch, the
1017 effects are for the target only and so current argument size
1018 shouldn't change at all. */
1020 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
1021 && INSN_FROM_TARGET_P (insn))
1024 /* If only calls can throw, and we have a frame pointer,
1025 save up adjustments until we see the CALL_INSN. */
1026 if (!flag_asynchronous_unwind_tables
1027 && cur_row->cfa.reg != dw_stack_pointer_regnum)
1029 if (CALL_P (insn) && !after_p)
1031 /* Extract the size of the args from the CALL rtx itself. */
1032 insn = PATTERN (insn);
1033 if (GET_CODE (insn) == PARALLEL)
1034 insn = XVECEXP (insn, 0, 0);
1035 if (GET_CODE (insn) == SET)
1036 insn = SET_SRC (insn);
1037 gcc_assert (GET_CODE (insn) == CALL);
1038 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
1043 if (CALL_P (insn) && !after_p)
1045 if (!flag_asynchronous_unwind_tables)
1046 dwarf2out_args_size (args_size);
1049 else if (BARRIER_P (insn))
1051 /* Don't call compute_barrier_args_size () if the only
1052 BARRIER is at the end of function. */
1053 if (barrier_args_size == NULL && next_nonnote_insn (insn))
1054 compute_barrier_args_size ();
1055 if (barrier_args_size == NULL)
1059 offset = barrier_args_size[INSN_UID (insn)];
1064 offset -= args_size;
1065 #ifndef STACK_GROWS_DOWNWARD
1069 else if (GET_CODE (PATTERN (insn)) == SET)
1070 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
1071 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1072 || GET_CODE (PATTERN (insn)) == SEQUENCE)
1074 /* There may be stack adjustments inside compound insns. Search
1076 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
1077 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1078 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
1087 dwarf2out_stack_adjust (offset);
1090 /* We delay emitting a register save until either (a) we reach the end
1091 of the prologue or (b) the register is clobbered. This clusters
1092 register saves so that there are fewer pc advances. */
1097 HOST_WIDE_INT cfa_offset;
1100 DEF_VEC_O (queued_reg_save);
1101 DEF_VEC_ALLOC_O (queued_reg_save, heap);
1103 static VEC(queued_reg_save, heap) *queued_reg_saves;
1105 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
1106 typedef struct GTY(()) reg_saved_in_data {
1109 } reg_saved_in_data;
1111 DEF_VEC_O (reg_saved_in_data);
1112 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
1114 /* A set of registers saved in other registers. This is implemented as
1115 a flat array because it normally contains zero or 1 entry, depending
1116 on the target. IA-64 is the big spender here, using a maximum of
1118 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
1120 static GTY(()) reg_saved_in_data *cie_return_save;
1122 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
1123 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
1124 used in places where rtl is prohibited. */
1126 static inline unsigned
1127 dwf_regno (const_rtx reg)
1129 return DWARF_FRAME_REGNUM (REGNO (reg));
1132 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1135 compare_reg_or_pc (rtx x, rtx y)
1137 if (REG_P (x) && REG_P (y))
1138 return REGNO (x) == REGNO (y);
1142 /* Record SRC as being saved in DEST. DEST may be null to delete an
1143 existing entry. SRC may be a register or PC_RTX. */
1146 record_reg_saved_in_reg (rtx dest, rtx src)
1148 reg_saved_in_data *elt;
1151 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
1152 if (compare_reg_or_pc (elt->orig_reg, src))
1155 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
1157 elt->saved_in_reg = dest;
1164 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1165 elt->orig_reg = src;
1166 elt->saved_in_reg = dest;
1169 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1170 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1173 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1178 /* Duplicates waste space, but it's also necessary to remove them
1179 for correctness, since the queue gets output in reverse order. */
1180 FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, i, q)
1181 if (compare_reg_or_pc (q->reg, reg))
1184 q = VEC_safe_push (queued_reg_save, heap, queued_reg_saves, NULL);
1188 q->saved_reg = sreg;
1189 q->cfa_offset = offset;
1192 /* Output all the entries in QUEUED_REG_SAVES. */
1195 dwarf2out_flush_queued_reg_saves (void)
1200 FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, i, q)
1202 unsigned int reg, sreg;
1204 record_reg_saved_in_reg (q->saved_reg, q->reg);
1206 if (q->reg == pc_rtx)
1207 reg = DWARF_FRAME_RETURN_COLUMN;
1209 reg = dwf_regno (q->reg);
1211 sreg = dwf_regno (q->saved_reg);
1213 sreg = INVALID_REGNUM;
1214 reg_save (reg, sreg, q->cfa_offset);
1217 VEC_truncate (queued_reg_save, queued_reg_saves, 0);
1220 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1221 location for? Or, does it clobber a register which we've previously
1222 said that some other register is saved in, and for which we now
1223 have a new location for? */
1226 clobbers_queued_reg_save (const_rtx insn)
1231 FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, iq, q)
1234 reg_saved_in_data *rir;
1236 if (modified_in_p (q->reg, insn))
1239 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, ir, rir)
1240 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1241 && modified_in_p (rir->saved_in_reg, insn))
1248 /* What register, if any, is currently saved in REG? */
1251 reg_saved_in (rtx reg)
1253 unsigned int regn = REGNO (reg);
1255 reg_saved_in_data *rir;
1258 FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, i, q)
1259 if (q->saved_reg && regn == REGNO (q->saved_reg))
1262 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1263 if (regn == REGNO (rir->saved_in_reg))
1264 return rir->orig_reg;
1269 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1272 dwarf2out_frame_debug_def_cfa (rtx pat)
1274 dw_cfa_location loc;
1276 memset (&loc, 0, sizeof (loc));
1278 switch (GET_CODE (pat))
1281 loc.reg = dwf_regno (XEXP (pat, 0));
1282 loc.offset = INTVAL (XEXP (pat, 1));
1286 loc.reg = dwf_regno (pat);
1291 pat = XEXP (pat, 0);
1292 if (GET_CODE (pat) == PLUS)
1294 loc.base_offset = INTVAL (XEXP (pat, 1));
1295 pat = XEXP (pat, 0);
1297 loc.reg = dwf_regno (pat);
1301 /* Recurse and define an expression. */
1308 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1311 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1313 dw_cfa_location loc = cur_row->cfa;
1316 gcc_assert (GET_CODE (pat) == SET);
1317 dest = XEXP (pat, 0);
1318 src = XEXP (pat, 1);
1320 switch (GET_CODE (src))
1323 gcc_assert (dwf_regno (XEXP (src, 0)) == loc.reg);
1324 loc.offset -= INTVAL (XEXP (src, 1));
1334 loc.reg = dwf_regno (dest);
1335 gcc_assert (loc.indirect == 0);
1340 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1343 dwarf2out_frame_debug_cfa_offset (rtx set)
1345 HOST_WIDE_INT offset;
1346 rtx src, addr, span;
1347 unsigned int sregno;
1349 src = XEXP (set, 1);
1350 addr = XEXP (set, 0);
1351 gcc_assert (MEM_P (addr));
1352 addr = XEXP (addr, 0);
1354 /* As documented, only consider extremely simple addresses. */
1355 switch (GET_CODE (addr))
1358 gcc_assert (dwf_regno (addr) == cur_row->cfa.reg);
1359 offset = -cur_row->cfa.offset;
1362 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_row->cfa.reg);
1363 offset = INTVAL (XEXP (addr, 1)) - cur_row->cfa.offset;
1372 sregno = DWARF_FRAME_RETURN_COLUMN;
1376 span = targetm.dwarf_register_span (src);
1377 sregno = dwf_regno (src);
1380 /* ??? We'd like to use queue_reg_save, but we need to come up with
1381 a different flushing heuristic for epilogues. */
1383 reg_save (sregno, INVALID_REGNUM, offset);
1386 /* We have a PARALLEL describing where the contents of SRC live.
1387 Queue register saves for each piece of the PARALLEL. */
1390 HOST_WIDE_INT span_offset = offset;
1392 gcc_assert (GET_CODE (span) == PARALLEL);
1394 limit = XVECLEN (span, 0);
1395 for (par_index = 0; par_index < limit; par_index++)
1397 rtx elem = XVECEXP (span, 0, par_index);
1399 sregno = dwf_regno (src);
1400 reg_save (sregno, INVALID_REGNUM, span_offset);
1401 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1406 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1409 dwarf2out_frame_debug_cfa_register (rtx set)
1412 unsigned sregno, dregno;
1414 src = XEXP (set, 1);
1415 dest = XEXP (set, 0);
1417 record_reg_saved_in_reg (dest, src);
1419 sregno = DWARF_FRAME_RETURN_COLUMN;
1421 sregno = dwf_regno (src);
1423 dregno = dwf_regno (dest);
1425 /* ??? We'd like to use queue_reg_save, but we need to come up with
1426 a different flushing heuristic for epilogues. */
1427 reg_save (sregno, dregno, 0);
1430 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1433 dwarf2out_frame_debug_cfa_expression (rtx set)
1435 rtx src, dest, span;
1436 dw_cfi_ref cfi = new_cfi ();
1439 dest = SET_DEST (set);
1440 src = SET_SRC (set);
1442 gcc_assert (REG_P (src));
1443 gcc_assert (MEM_P (dest));
1445 span = targetm.dwarf_register_span (src);
1448 regno = dwf_regno (src);
1450 cfi->dw_cfi_opc = DW_CFA_expression;
1451 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1452 cfi->dw_cfi_oprnd2.dw_cfi_loc
1453 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1454 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1456 /* ??? We'd like to use queue_reg_save, were the interface different,
1457 and, as above, we could manage flushing for epilogues. */
1459 update_row_reg_save (cur_row, regno, cfi);
1462 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1465 dwarf2out_frame_debug_cfa_restore (rtx reg)
1467 unsigned int regno = dwf_regno (reg);
1469 add_cfi_restore (regno);
1470 update_row_reg_save (cur_row, regno, NULL);
1473 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1474 ??? Perhaps we should note in the CIE where windows are saved (instead of
1475 assuming 0(cfa)) and what registers are in the window. */
1478 dwarf2out_frame_debug_cfa_window_save (void)
1480 dw_cfi_ref cfi = new_cfi ();
1482 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1486 /* Record call frame debugging information for an expression EXPR,
1487 which either sets SP or FP (adjusting how we calculate the frame
1488 address) or saves a register to the stack or another register.
1489 LABEL indicates the address of EXPR.
1491 This function encodes a state machine mapping rtxes to actions on
1492 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1493 users need not read the source code.
1495 The High-Level Picture
1497 Changes in the register we use to calculate the CFA: Currently we
1498 assume that if you copy the CFA register into another register, we
1499 should take the other one as the new CFA register; this seems to
1500 work pretty well. If it's wrong for some target, it's simple
1501 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1503 Changes in the register we use for saving registers to the stack:
1504 This is usually SP, but not always. Again, we deduce that if you
1505 copy SP into another register (and SP is not the CFA register),
1506 then the new register is the one we will be using for register
1507 saves. This also seems to work.
1509 Register saves: There's not much guesswork about this one; if
1510 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1511 register save, and the register used to calculate the destination
1512 had better be the one we think we're using for this purpose.
1513 It's also assumed that a copy from a call-saved register to another
1514 register is saving that register if RTX_FRAME_RELATED_P is set on
1515 that instruction. If the copy is from a call-saved register to
1516 the *same* register, that means that the register is now the same
1517 value as in the caller.
1519 Except: If the register being saved is the CFA register, and the
1520 offset is nonzero, we are saving the CFA, so we assume we have to
1521 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1522 the intent is to save the value of SP from the previous frame.
1524 In addition, if a register has previously been saved to a different
1527 Invariants / Summaries of Rules
1529 cfa current rule for calculating the CFA. It usually
1530 consists of a register and an offset. This is
1531 actually stored in cur_row->cfa, but abbreviated
1532 for the purposes of this documentation.
1533 cfa_store register used by prologue code to save things to the stack
1534 cfa_store.offset is the offset from the value of
1535 cfa_store.reg to the actual CFA
1536 cfa_temp register holding an integral value. cfa_temp.offset
1537 stores the value, which will be used to adjust the
1538 stack pointer. cfa_temp is also used like cfa_store,
1539 to track stores to the stack via fp or a temp reg.
1541 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1542 with cfa.reg as the first operand changes the cfa.reg and its
1543 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1546 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1547 expression yielding a constant. This sets cfa_temp.reg
1548 and cfa_temp.offset.
1550 Rule 5: Create a new register cfa_store used to save items to the
1553 Rules 10-14: Save a register to the stack. Define offset as the
1554 difference of the original location and cfa_store's
1555 location (or cfa_temp's location if cfa_temp is used).
1557 Rules 16-20: If AND operation happens on sp in prologue, we assume
1558 stack is realigned. We will use a group of DW_OP_XXX
1559 expressions to represent the location of the stored
1560 register instead of CFA+offset.
1564 "{a,b}" indicates a choice of a xor b.
1565 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1568 (set <reg1> <reg2>:cfa.reg)
1569 effects: cfa.reg = <reg1>
1570 cfa.offset unchanged
1571 cfa_temp.reg = <reg1>
1572 cfa_temp.offset = cfa.offset
1575 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1576 {<const_int>,<reg>:cfa_temp.reg}))
1577 effects: cfa.reg = sp if fp used
1578 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1579 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1580 if cfa_store.reg==sp
1583 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1584 effects: cfa.reg = fp
1585 cfa_offset += +/- <const_int>
1588 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1589 constraints: <reg1> != fp
1591 effects: cfa.reg = <reg1>
1592 cfa_temp.reg = <reg1>
1593 cfa_temp.offset = cfa.offset
1596 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1597 constraints: <reg1> != fp
1599 effects: cfa_store.reg = <reg1>
1600 cfa_store.offset = cfa.offset - cfa_temp.offset
1603 (set <reg> <const_int>)
1604 effects: cfa_temp.reg = <reg>
1605 cfa_temp.offset = <const_int>
1608 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1609 effects: cfa_temp.reg = <reg1>
1610 cfa_temp.offset |= <const_int>
1613 (set <reg> (high <exp>))
1617 (set <reg> (lo_sum <exp> <const_int>))
1618 effects: cfa_temp.reg = <reg>
1619 cfa_temp.offset = <const_int>
1622 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1623 effects: cfa_store.offset -= <const_int>
1624 cfa.offset = cfa_store.offset if cfa.reg == sp
1626 cfa.base_offset = -cfa_store.offset
1629 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1630 effects: cfa_store.offset += -/+ mode_size(mem)
1631 cfa.offset = cfa_store.offset if cfa.reg == sp
1633 cfa.base_offset = -cfa_store.offset
1636 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1639 effects: cfa.reg = <reg1>
1640 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1643 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1644 effects: cfa.reg = <reg1>
1645 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1648 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1649 effects: cfa.reg = <reg1>
1650 cfa.base_offset = -cfa_temp.offset
1651 cfa_temp.offset -= mode_size(mem)
1654 (set <reg> {unspec, unspec_volatile})
1655 effects: target-dependent
1658 (set sp (and: sp <const_int>))
1659 constraints: cfa_store.reg == sp
1660 effects: cfun->fde.stack_realign = 1
1661 cfa_store.offset = 0
1662 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1665 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1666 effects: cfa_store.offset += -/+ mode_size(mem)
1669 (set (mem ({pre_inc, pre_dec} sp)) fp)
1670 constraints: fde->stack_realign == 1
1671 effects: cfa_store.offset = 0
1672 cfa.reg != HARD_FRAME_POINTER_REGNUM
1675 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1676 constraints: fde->stack_realign == 1
1678 && cfa.indirect == 0
1679 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1680 effects: Use DW_CFA_def_cfa_expression to define cfa
1681 cfa.reg == fde->drap_reg */
1684 dwarf2out_frame_debug_expr (rtx expr)
1686 dw_cfa_location cfa = cur_row->cfa;
1687 rtx src, dest, span;
1688 HOST_WIDE_INT offset;
1691 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1692 the PARALLEL independently. The first element is always processed if
1693 it is a SET. This is for backward compatibility. Other elements
1694 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1695 flag is set in them. */
1696 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1699 int limit = XVECLEN (expr, 0);
1702 /* PARALLELs have strict read-modify-write semantics, so we
1703 ought to evaluate every rvalue before changing any lvalue.
1704 It's cumbersome to do that in general, but there's an
1705 easy approximation that is enough for all current users:
1706 handle register saves before register assignments. */
1707 if (GET_CODE (expr) == PARALLEL)
1708 for (par_index = 0; par_index < limit; par_index++)
1710 elem = XVECEXP (expr, 0, par_index);
1711 if (GET_CODE (elem) == SET
1712 && MEM_P (SET_DEST (elem))
1713 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1714 dwarf2out_frame_debug_expr (elem);
1717 for (par_index = 0; par_index < limit; par_index++)
1719 elem = XVECEXP (expr, 0, par_index);
1720 if (GET_CODE (elem) == SET
1721 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1722 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1723 dwarf2out_frame_debug_expr (elem);
1724 else if (GET_CODE (elem) == SET
1726 && !RTX_FRAME_RELATED_P (elem))
1728 /* Stack adjustment combining might combine some post-prologue
1729 stack adjustment into a prologue stack adjustment. */
1730 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1733 dwarf2out_stack_adjust (offset);
1739 gcc_assert (GET_CODE (expr) == SET);
1741 src = SET_SRC (expr);
1742 dest = SET_DEST (expr);
1746 rtx rsi = reg_saved_in (src);
1753 switch (GET_CODE (dest))
1756 switch (GET_CODE (src))
1758 /* Setting FP from SP. */
1760 if (cfa.reg == dwf_regno (src))
1763 /* Update the CFA rule wrt SP or FP. Make sure src is
1764 relative to the current CFA register.
1766 We used to require that dest be either SP or FP, but the
1767 ARM copies SP to a temporary register, and from there to
1768 FP. So we just rely on the backends to only set
1769 RTX_FRAME_RELATED_P on appropriate insns. */
1770 cfa.reg = dwf_regno (dest);
1771 cfa_temp.reg = cfa.reg;
1772 cfa_temp.offset = cfa.offset;
1776 /* Saving a register in a register. */
1777 gcc_assert (!fixed_regs [REGNO (dest)]
1778 /* For the SPARC and its register window. */
1779 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1781 /* After stack is aligned, we can only save SP in FP
1782 if drap register is used. In this case, we have
1783 to restore stack pointer with the CFA value and we
1784 don't generate this DWARF information. */
1786 && fde->stack_realign
1787 && REGNO (src) == STACK_POINTER_REGNUM)
1788 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1789 && fde->drap_reg != INVALID_REGNUM
1790 && cfa.reg != dwf_regno (src));
1792 queue_reg_save (src, dest, 0);
1799 if (dest == stack_pointer_rtx)
1803 switch (GET_CODE (XEXP (src, 1)))
1806 offset = INTVAL (XEXP (src, 1));
1809 gcc_assert (dwf_regno (XEXP (src, 1)) == cfa_temp.reg);
1810 offset = cfa_temp.offset;
1816 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1818 /* Restoring SP from FP in the epilogue. */
1819 gcc_assert (cfa.reg == dw_frame_pointer_regnum);
1820 cfa.reg = dw_stack_pointer_regnum;
1822 else if (GET_CODE (src) == LO_SUM)
1823 /* Assume we've set the source reg of the LO_SUM from sp. */
1826 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1828 if (GET_CODE (src) != MINUS)
1830 if (cfa.reg == dw_stack_pointer_regnum)
1831 cfa.offset += offset;
1832 if (cfa_store.reg == dw_stack_pointer_regnum)
1833 cfa_store.offset += offset;
1835 else if (dest == hard_frame_pointer_rtx)
1838 /* Either setting the FP from an offset of the SP,
1839 or adjusting the FP */
1840 gcc_assert (frame_pointer_needed);
1842 gcc_assert (REG_P (XEXP (src, 0))
1843 && dwf_regno (XEXP (src, 0)) == cfa.reg
1844 && CONST_INT_P (XEXP (src, 1)));
1845 offset = INTVAL (XEXP (src, 1));
1846 if (GET_CODE (src) != MINUS)
1848 cfa.offset += offset;
1849 cfa.reg = dw_frame_pointer_regnum;
1853 gcc_assert (GET_CODE (src) != MINUS);
1856 if (REG_P (XEXP (src, 0))
1857 && dwf_regno (XEXP (src, 0)) == cfa.reg
1858 && CONST_INT_P (XEXP (src, 1)))
1860 /* Setting a temporary CFA register that will be copied
1861 into the FP later on. */
1862 offset = - INTVAL (XEXP (src, 1));
1863 cfa.offset += offset;
1864 cfa.reg = dwf_regno (dest);
1865 /* Or used to save regs to the stack. */
1866 cfa_temp.reg = cfa.reg;
1867 cfa_temp.offset = cfa.offset;
1871 else if (REG_P (XEXP (src, 0))
1872 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1873 && XEXP (src, 1) == stack_pointer_rtx)
1875 /* Setting a scratch register that we will use instead
1876 of SP for saving registers to the stack. */
1877 gcc_assert (cfa.reg == dw_stack_pointer_regnum);
1878 cfa_store.reg = dwf_regno (dest);
1879 cfa_store.offset = cfa.offset - cfa_temp.offset;
1883 else if (GET_CODE (src) == LO_SUM
1884 && CONST_INT_P (XEXP (src, 1)))
1886 cfa_temp.reg = dwf_regno (dest);
1887 cfa_temp.offset = INTVAL (XEXP (src, 1));
1896 cfa_temp.reg = dwf_regno (dest);
1897 cfa_temp.offset = INTVAL (src);
1902 gcc_assert (REG_P (XEXP (src, 0))
1903 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1904 && CONST_INT_P (XEXP (src, 1)));
1906 cfa_temp.reg = dwf_regno (dest);
1907 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1910 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1911 which will fill in all of the bits. */
1918 case UNSPEC_VOLATILE:
1919 /* All unspecs should be represented by REG_CFA_* notes. */
1925 /* If this AND operation happens on stack pointer in prologue,
1926 we assume the stack is realigned and we extract the
1928 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1930 /* We interpret reg_save differently with stack_realign set.
1931 Thus we must flush whatever we have queued first. */
1932 dwarf2out_flush_queued_reg_saves ();
1934 gcc_assert (cfa_store.reg == dwf_regno (XEXP (src, 0)));
1935 fde->stack_realign = 1;
1936 fde->stack_realignment = INTVAL (XEXP (src, 1));
1937 cfa_store.offset = 0;
1939 if (cfa.reg != dw_stack_pointer_regnum
1940 && cfa.reg != dw_frame_pointer_regnum)
1941 fde->drap_reg = cfa.reg;
1954 /* Saving a register to the stack. Make sure dest is relative to the
1956 switch (GET_CODE (XEXP (dest, 0)))
1962 /* We can't handle variable size modifications. */
1963 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1965 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1967 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1968 && cfa_store.reg == dw_stack_pointer_regnum);
1970 cfa_store.offset += offset;
1971 if (cfa.reg == dw_stack_pointer_regnum)
1972 cfa.offset = cfa_store.offset;
1974 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1975 offset -= cfa_store.offset;
1977 offset = -cfa_store.offset;
1984 offset = GET_MODE_SIZE (GET_MODE (dest));
1985 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1988 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1989 == STACK_POINTER_REGNUM)
1990 && cfa_store.reg == dw_stack_pointer_regnum);
1992 cfa_store.offset += offset;
1994 /* Rule 18: If stack is aligned, we will use FP as a
1995 reference to represent the address of the stored
1998 && fde->stack_realign
1999 && src == hard_frame_pointer_rtx)
2001 gcc_assert (cfa.reg != dw_frame_pointer_regnum);
2002 cfa_store.offset = 0;
2005 if (cfa.reg == dw_stack_pointer_regnum)
2006 cfa.offset = cfa_store.offset;
2008 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
2009 offset += -cfa_store.offset;
2011 offset = -cfa_store.offset;
2015 /* With an offset. */
2022 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
2023 && REG_P (XEXP (XEXP (dest, 0), 0)));
2024 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
2025 if (GET_CODE (XEXP (dest, 0)) == MINUS)
2028 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
2030 if (cfa.reg == regno)
2031 offset -= cfa.offset;
2032 else if (cfa_store.reg == regno)
2033 offset -= cfa_store.offset;
2036 gcc_assert (cfa_temp.reg == regno);
2037 offset -= cfa_temp.offset;
2043 /* Without an offset. */
2046 unsigned int regno = dwf_regno (XEXP (dest, 0));
2048 if (cfa.reg == regno)
2049 offset = -cfa.offset;
2050 else if (cfa_store.reg == regno)
2051 offset = -cfa_store.offset;
2054 gcc_assert (cfa_temp.reg == regno);
2055 offset = -cfa_temp.offset;
2062 gcc_assert (cfa_temp.reg == dwf_regno (XEXP (XEXP (dest, 0), 0)));
2063 offset = -cfa_temp.offset;
2064 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
2072 /* If the source operand of this MEM operation is a memory,
2073 we only care how much stack grew. */
2078 && REGNO (src) != STACK_POINTER_REGNUM
2079 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
2080 && dwf_regno (src) == cfa.reg)
2082 /* We're storing the current CFA reg into the stack. */
2084 if (cfa.offset == 0)
2087 /* If stack is aligned, putting CFA reg into stack means
2088 we can no longer use reg + offset to represent CFA.
2089 Here we use DW_CFA_def_cfa_expression instead. The
2090 result of this expression equals to the original CFA
2093 && fde->stack_realign
2094 && cfa.indirect == 0
2095 && cfa.reg != dw_frame_pointer_regnum)
2097 dw_cfa_location cfa_exp;
2099 gcc_assert (fde->drap_reg == cfa.reg);
2101 cfa_exp.indirect = 1;
2102 cfa_exp.reg = dw_frame_pointer_regnum;
2103 cfa_exp.base_offset = offset;
2106 fde->drap_reg_saved = 1;
2108 def_cfa_1 (&cfa_exp);
2112 /* If the source register is exactly the CFA, assume
2113 we're saving SP like any other register; this happens
2116 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2121 /* Otherwise, we'll need to look in the stack to
2122 calculate the CFA. */
2123 rtx x = XEXP (dest, 0);
2127 gcc_assert (REG_P (x));
2129 cfa.reg = dwf_regno (x);
2130 cfa.base_offset = offset;
2141 span = targetm.dwarf_register_span (src);
2143 queue_reg_save (src, NULL_RTX, offset);
2146 /* We have a PARALLEL describing where the contents of SRC live.
2147 Queue register saves for each piece of the PARALLEL. */
2150 HOST_WIDE_INT span_offset = offset;
2152 gcc_assert (GET_CODE (span) == PARALLEL);
2154 limit = XVECLEN (span, 0);
2155 for (par_index = 0; par_index < limit; par_index++)
2157 rtx elem = XVECEXP (span, 0, par_index);
2158 queue_reg_save (elem, NULL_RTX, span_offset);
2159 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2169 /* Record call frame debugging information for INSN, which either
2170 sets SP or FP (adjusting how we calculate the frame address) or saves a
2171 register to the stack. If INSN is NULL_RTX, initialize our state.
2173 If AFTER_P is false, we're being called before the insn is emitted,
2174 otherwise after. Call instructions get invoked twice. */
2177 dwarf2out_frame_debug (rtx insn, bool after_p)
2180 bool handled_one = false;
2181 bool need_flush = false;
2183 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2184 dwarf2out_flush_queued_reg_saves ();
2186 if (!RTX_FRAME_RELATED_P (insn))
2188 /* ??? This should be done unconditionally since stack adjustments
2189 matter if the stack pointer is not the CFA register anymore but
2190 is still used to save registers. */
2191 if (!ACCUMULATE_OUTGOING_ARGS)
2192 dwarf2out_notice_stack_adjust (insn, after_p);
2196 any_cfis_emitted = false;
2198 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2199 switch (REG_NOTE_KIND (note))
2201 case REG_FRAME_RELATED_EXPR:
2202 insn = XEXP (note, 0);
2205 case REG_CFA_DEF_CFA:
2206 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2210 case REG_CFA_ADJUST_CFA:
2215 if (GET_CODE (n) == PARALLEL)
2216 n = XVECEXP (n, 0, 0);
2218 dwarf2out_frame_debug_adjust_cfa (n);
2222 case REG_CFA_OFFSET:
2225 n = single_set (insn);
2226 dwarf2out_frame_debug_cfa_offset (n);
2230 case REG_CFA_REGISTER:
2235 if (GET_CODE (n) == PARALLEL)
2236 n = XVECEXP (n, 0, 0);
2238 dwarf2out_frame_debug_cfa_register (n);
2242 case REG_CFA_EXPRESSION:
2245 n = single_set (insn);
2246 dwarf2out_frame_debug_cfa_expression (n);
2250 case REG_CFA_RESTORE:
2255 if (GET_CODE (n) == PARALLEL)
2256 n = XVECEXP (n, 0, 0);
2259 dwarf2out_frame_debug_cfa_restore (n);
2263 case REG_CFA_SET_VDRAP:
2267 dw_fde_ref fde = cfun->fde;
2270 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2272 fde->vdrap_reg = dwf_regno (n);
2278 case REG_CFA_WINDOW_SAVE:
2279 dwarf2out_frame_debug_cfa_window_save ();
2283 case REG_CFA_FLUSH_QUEUE:
2284 /* The actual flush happens below. */
2295 /* Minimize the number of advances by emitting the entire queue
2296 once anything is emitted. */
2297 need_flush |= any_cfis_emitted;
2301 insn = PATTERN (insn);
2303 dwarf2out_frame_debug_expr (insn);
2305 /* Check again. A parallel can save and update the same register.
2306 We could probably check just once, here, but this is safer than
2307 removing the check at the start of the function. */
2308 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2313 dwarf2out_flush_queued_reg_saves ();
2316 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2319 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2321 size_t i, n_old, n_new, n_max;
2324 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2325 add_cfi (new_row->cfa_cfi);
2328 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2333 if (old_row->args_size != new_row->args_size)
2334 add_cfi_args_size (new_row->args_size);
2336 n_old = VEC_length (dw_cfi_ref, old_row->reg_save);
2337 n_new = VEC_length (dw_cfi_ref, new_row->reg_save);
2338 n_max = MAX (n_old, n_new);
2340 for (i = 0; i < n_max; ++i)
2342 dw_cfi_ref r_old = NULL, r_new = NULL;
2345 r_old = VEC_index (dw_cfi_ref, old_row->reg_save, i);
2347 r_new = VEC_index (dw_cfi_ref, new_row->reg_save, i);
2351 else if (r_new == NULL)
2352 add_cfi_restore (i);
2353 else if (!cfi_equal_p (r_old, r_new))
2358 /* Examine CFI and return true if a cfi label and set_loc is needed
2359 beforehand. Even when generating CFI assembler instructions, we
2360 still have to add the cfi to the list so that lookup_cfa_1 works
2361 later on. When -g2 and above we even need to force emitting of
2362 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2363 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2364 and so don't use convert_cfa_to_fb_loc_list. */
2367 cfi_label_required_p (dw_cfi_ref cfi)
2369 if (!dwarf2out_do_cfi_asm ())
2372 if (dwarf_version == 2
2373 && debug_info_level > DINFO_LEVEL_TERSE
2374 && (write_symbols == DWARF2_DEBUG
2375 || write_symbols == VMS_AND_DWARF2_DEBUG))
2377 switch (cfi->dw_cfi_opc)
2379 case DW_CFA_def_cfa_offset:
2380 case DW_CFA_def_cfa_offset_sf:
2381 case DW_CFA_def_cfa_register:
2382 case DW_CFA_def_cfa:
2383 case DW_CFA_def_cfa_sf:
2384 case DW_CFA_def_cfa_expression:
2385 case DW_CFA_restore_state:
2394 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2395 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2398 add_cfis_to_fde (void)
2400 dw_fde_ref fde = cfun->fde;
2402 /* We always start with a function_begin label. */
2405 for (insn = get_insns (); insn; insn = next)
2407 next = NEXT_INSN (insn);
2409 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2411 fde->dw_fde_switch_cfi_index
2412 = VEC_length (dw_cfi_ref, fde->dw_fde_cfi);
2413 /* Don't attempt to advance_loc4 between labels
2414 in different sections. */
2418 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2420 bool required = cfi_label_required_p (NOTE_CFI (insn));
2421 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2423 required |= cfi_label_required_p (NOTE_CFI (next));
2424 next = NEXT_INSN (next);
2428 int num = dwarf2out_cfi_label_num;
2429 const char *label = dwarf2out_cfi_label ();
2433 /* Set the location counter to the new label. */
2435 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2436 : DW_CFA_advance_loc4);
2437 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2438 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2440 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2441 NOTE_LABEL_NUMBER (tmp) = num;
2446 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2447 insn = NEXT_INSN (insn);
2449 while (insn != next);
2455 /* Scan the function and create the initial set of CFI notes. */
2458 create_cfi_notes (void)
2462 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2466 add_cfi_insn = PREV_INSN (insn);
2468 if (BARRIER_P (insn))
2470 dwarf2out_frame_debug (insn, false);
2476 switch (NOTE_KIND (insn))
2478 case NOTE_INSN_PROLOGUE_END:
2479 dwarf2out_flush_queued_reg_saves ();
2482 case NOTE_INSN_EPILOGUE_BEG:
2483 #if defined(HAVE_epilogue)
2484 dwarf2out_cfi_begin_epilogue (insn);
2488 case NOTE_INSN_CFA_RESTORE_STATE:
2489 add_cfi_insn = insn;
2490 dwarf2out_frame_debug_restore_state ();
2493 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
2494 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2495 for the portion of the function in the alternate text
2496 section. The row state at the very beginning of that
2497 new FDE will be exactly the row state from the CIE.
2498 Emit whatever CFIs are necessary to make CUR_ROW current. */
2499 add_cfi_insn = insn;
2500 change_cfi_row (cie_cfi_row, cur_row);
2506 if (!NONDEBUG_INSN_P (insn))
2509 pat = PATTERN (insn);
2510 if (asm_noperands (pat) >= 0)
2512 dwarf2out_frame_debug (insn, false);
2516 if (GET_CODE (pat) == SEQUENCE)
2518 int i, n = XVECLEN (pat, 0);
2519 for (i = 1; i < n; ++i)
2520 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2524 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2525 dwarf2out_frame_debug (insn, false);
2527 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2528 Putting the note after the VEC should be ok. */
2529 if (!tablejump_p (insn, NULL, &add_cfi_insn))
2530 add_cfi_insn = insn;
2532 dwarf2out_frame_debug (insn, true);
2535 add_cfi_insn = NULL;
2538 /* Determine if we need to save and restore CFI information around this
2539 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2540 we do need to save/restore, then emit the save now, and insert a
2541 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2544 dwarf2out_cfi_begin_epilogue (rtx insn)
2546 bool saw_frp = false;
2549 /* Scan forward to the return insn, noticing if there are possible
2550 frame related insns. */
2551 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2556 /* Look for both regular and sibcalls to end the block. */
2557 if (returnjump_p (i))
2559 if (CALL_P (i) && SIBLING_CALL_P (i))
2562 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2565 rtx seq = PATTERN (i);
2567 if (returnjump_p (XVECEXP (seq, 0, 0)))
2569 if (CALL_P (XVECEXP (seq, 0, 0))
2570 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2573 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2574 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2578 if (RTX_FRAME_RELATED_P (i))
2582 /* If the port doesn't emit epilogue unwind info, we don't need a
2583 save/restore pair. */
2587 /* Otherwise, search forward to see if the return insn was the last
2588 basic block of the function. If so, we don't need save/restore. */
2589 gcc_assert (i != NULL);
2590 i = next_real_insn (i);
2594 /* Insert the restore before that next real insn in the stream, and before
2595 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2596 properly nested. This should be after any label or alignment. This
2597 will be pushed into the CFI stream by the function below. */
2600 rtx p = PREV_INSN (i);
2603 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2607 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2609 emit_cfa_remember = true;
2611 /* And emulate the state save. */
2612 gcc_assert (remember_row == NULL);
2613 remember_row = copy_cfi_row (cur_row);
2616 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2620 dwarf2out_frame_debug_restore_state (void)
2622 dw_cfi_ref cfi = new_cfi ();
2624 cfi->dw_cfi_opc = DW_CFA_restore_state;
2627 gcc_assert (remember_row != NULL);
2628 free_cfi_row (cur_row);
2629 cur_row = remember_row;
2630 remember_row = NULL;
2633 /* Record the initial position of the return address. RTL is
2634 INCOMING_RETURN_ADDR_RTX. */
2637 initial_return_save (rtx rtl)
2639 unsigned int reg = INVALID_REGNUM;
2640 HOST_WIDE_INT offset = 0;
2642 switch (GET_CODE (rtl))
2645 /* RA is in a register. */
2646 reg = dwf_regno (rtl);
2650 /* RA is on the stack. */
2651 rtl = XEXP (rtl, 0);
2652 switch (GET_CODE (rtl))
2655 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2660 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2661 offset = INTVAL (XEXP (rtl, 1));
2665 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2666 offset = -INTVAL (XEXP (rtl, 1));
2676 /* The return address is at some offset from any value we can
2677 actually load. For instance, on the SPARC it is in %i7+8. Just
2678 ignore the offset for now; it doesn't matter for unwinding frames. */
2679 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2680 initial_return_save (XEXP (rtl, 0));
2687 if (reg != DWARF_FRAME_RETURN_COLUMN)
2689 if (reg != INVALID_REGNUM)
2690 record_reg_saved_in_reg (rtl, pc_rtx);
2691 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2695 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2696 state at each location within the function. These notes will be
2697 emitted during pass_final. */
2700 execute_dwarf2_frame (void)
2702 /* The first time we're called, compute the incoming frame state. */
2703 if (cie_cfi_vec == NULL)
2705 dw_cfa_location loc;
2707 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2708 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2710 add_cfi_vec = &cie_cfi_vec;
2711 cie_cfi_row = cur_row = new_cfi_row ();
2713 /* On entry, the Canonical Frame Address is at SP. */
2714 memset(&loc, 0, sizeof (loc));
2715 loc.reg = dw_stack_pointer_regnum;
2716 loc.offset = INCOMING_FRAME_SP_OFFSET;
2719 if (targetm.debug_unwind_info () == UI_DWARF2
2720 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2722 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2724 /* For a few targets, we have the return address incoming into a
2725 register, but choose a different return column. This will result
2726 in a DW_CFA_register for the return, and an entry in
2727 regs_saved_in_regs to match. If the target later stores that
2728 return address register to the stack, we want to be able to emit
2729 the DW_CFA_offset against the return column, not the intermediate
2730 save register. Save the contents of regs_saved_in_regs so that
2731 we can re-initialize it at the start of each function. */
2732 switch (VEC_length (reg_saved_in_data, regs_saved_in_regs))
2737 cie_return_save = ggc_alloc_reg_saved_in_data ();
2738 *cie_return_save = *VEC_index (reg_saved_in_data,
2739 regs_saved_in_regs, 0);
2740 regs_saved_in_regs = NULL;
2750 /* Set up state for generating call frame debug info. */
2751 gcc_checking_assert (queued_reg_saves == NULL);
2752 gcc_checking_assert (regs_saved_in_regs == NULL);
2754 cur_row = copy_cfi_row (cie_cfi_row);
2755 if (cie_return_save)
2756 VEC_safe_push (reg_saved_in_data, gc, regs_saved_in_regs, cie_return_save);
2758 cfa_store = cur_row->cfa;
2761 memset (&cfa_temp, 0, sizeof(cfa_temp));
2762 cfa_temp.reg = INVALID_REGNUM;
2764 dwarf2out_alloc_current_fde ();
2767 create_cfi_notes ();
2770 /* Reset all function-specific information, particularly for GC. */
2771 XDELETEVEC (barrier_args_size);
2772 barrier_args_size = NULL;
2773 regs_saved_in_regs = NULL;
2774 VEC_free (queued_reg_save, heap, queued_reg_saves);
2776 free_cfi_row (cur_row);
2782 /* Convert a DWARF call frame info. operation to its string name */
2785 dwarf_cfi_name (unsigned int cfi_opc)
2789 case DW_CFA_advance_loc:
2790 return "DW_CFA_advance_loc";
2792 return "DW_CFA_offset";
2793 case DW_CFA_restore:
2794 return "DW_CFA_restore";
2796 return "DW_CFA_nop";
2797 case DW_CFA_set_loc:
2798 return "DW_CFA_set_loc";
2799 case DW_CFA_advance_loc1:
2800 return "DW_CFA_advance_loc1";
2801 case DW_CFA_advance_loc2:
2802 return "DW_CFA_advance_loc2";
2803 case DW_CFA_advance_loc4:
2804 return "DW_CFA_advance_loc4";
2805 case DW_CFA_offset_extended:
2806 return "DW_CFA_offset_extended";
2807 case DW_CFA_restore_extended:
2808 return "DW_CFA_restore_extended";
2809 case DW_CFA_undefined:
2810 return "DW_CFA_undefined";
2811 case DW_CFA_same_value:
2812 return "DW_CFA_same_value";
2813 case DW_CFA_register:
2814 return "DW_CFA_register";
2815 case DW_CFA_remember_state:
2816 return "DW_CFA_remember_state";
2817 case DW_CFA_restore_state:
2818 return "DW_CFA_restore_state";
2819 case DW_CFA_def_cfa:
2820 return "DW_CFA_def_cfa";
2821 case DW_CFA_def_cfa_register:
2822 return "DW_CFA_def_cfa_register";
2823 case DW_CFA_def_cfa_offset:
2824 return "DW_CFA_def_cfa_offset";
2827 case DW_CFA_def_cfa_expression:
2828 return "DW_CFA_def_cfa_expression";
2829 case DW_CFA_expression:
2830 return "DW_CFA_expression";
2831 case DW_CFA_offset_extended_sf:
2832 return "DW_CFA_offset_extended_sf";
2833 case DW_CFA_def_cfa_sf:
2834 return "DW_CFA_def_cfa_sf";
2835 case DW_CFA_def_cfa_offset_sf:
2836 return "DW_CFA_def_cfa_offset_sf";
2838 /* SGI/MIPS specific */
2839 case DW_CFA_MIPS_advance_loc8:
2840 return "DW_CFA_MIPS_advance_loc8";
2842 /* GNU extensions */
2843 case DW_CFA_GNU_window_save:
2844 return "DW_CFA_GNU_window_save";
2845 case DW_CFA_GNU_args_size:
2846 return "DW_CFA_GNU_args_size";
2847 case DW_CFA_GNU_negative_offset_extended:
2848 return "DW_CFA_GNU_negative_offset_extended";
2851 return "DW_CFA_<unknown>";
2855 /* This routine will generate the correct assembly data for a location
2856 description based on a cfi entry with a complex address. */
2859 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
2861 dw_loc_descr_ref loc;
2864 if (cfi->dw_cfi_opc == DW_CFA_expression)
2867 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2868 dw2_asm_output_data (1, r, NULL);
2869 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2872 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2874 /* Output the size of the block. */
2875 size = size_of_locs (loc);
2876 dw2_asm_output_data_uleb128 (size, NULL);
2878 /* Now output the operations themselves. */
2879 output_loc_sequence (loc, for_eh);
2882 /* Similar, but used for .cfi_escape. */
2885 output_cfa_loc_raw (dw_cfi_ref cfi)
2887 dw_loc_descr_ref loc;
2890 if (cfi->dw_cfi_opc == DW_CFA_expression)
2893 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2894 fprintf (asm_out_file, "%#x,", r);
2895 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2898 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2900 /* Output the size of the block. */
2901 size = size_of_locs (loc);
2902 dw2_asm_output_data_uleb128_raw (size);
2903 fputc (',', asm_out_file);
2905 /* Now output the operations themselves. */
2906 output_loc_sequence_raw (loc);
2909 /* Output a Call Frame Information opcode and its operand(s). */
2912 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
2917 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
2918 dw2_asm_output_data (1, (cfi->dw_cfi_opc
2919 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
2920 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
2921 ((unsigned HOST_WIDE_INT)
2922 cfi->dw_cfi_oprnd1.dw_cfi_offset));
2923 else if (cfi->dw_cfi_opc == DW_CFA_offset)
2925 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2926 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2927 "DW_CFA_offset, column %#lx", r);
2928 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2929 dw2_asm_output_data_uleb128 (off, NULL);
2931 else if (cfi->dw_cfi_opc == DW_CFA_restore)
2933 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2934 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2935 "DW_CFA_restore, column %#lx", r);
2939 dw2_asm_output_data (1, cfi->dw_cfi_opc,
2940 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
2942 switch (cfi->dw_cfi_opc)
2944 case DW_CFA_set_loc:
2946 dw2_asm_output_encoded_addr_rtx (
2947 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
2948 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
2951 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
2952 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
2953 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2956 case DW_CFA_advance_loc1:
2957 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2958 fde->dw_fde_current_label, NULL);
2959 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2962 case DW_CFA_advance_loc2:
2963 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2964 fde->dw_fde_current_label, NULL);
2965 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2968 case DW_CFA_advance_loc4:
2969 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2970 fde->dw_fde_current_label, NULL);
2971 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2974 case DW_CFA_MIPS_advance_loc8:
2975 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2976 fde->dw_fde_current_label, NULL);
2977 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2980 case DW_CFA_offset_extended:
2981 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2982 dw2_asm_output_data_uleb128 (r, NULL);
2983 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2984 dw2_asm_output_data_uleb128 (off, NULL);
2987 case DW_CFA_def_cfa:
2988 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2989 dw2_asm_output_data_uleb128 (r, NULL);
2990 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
2993 case DW_CFA_offset_extended_sf:
2994 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2995 dw2_asm_output_data_uleb128 (r, NULL);
2996 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2997 dw2_asm_output_data_sleb128 (off, NULL);
3000 case DW_CFA_def_cfa_sf:
3001 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3002 dw2_asm_output_data_uleb128 (r, NULL);
3003 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3004 dw2_asm_output_data_sleb128 (off, NULL);
3007 case DW_CFA_restore_extended:
3008 case DW_CFA_undefined:
3009 case DW_CFA_same_value:
3010 case DW_CFA_def_cfa_register:
3011 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3012 dw2_asm_output_data_uleb128 (r, NULL);
3015 case DW_CFA_register:
3016 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3017 dw2_asm_output_data_uleb128 (r, NULL);
3018 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3019 dw2_asm_output_data_uleb128 (r, NULL);
3022 case DW_CFA_def_cfa_offset:
3023 case DW_CFA_GNU_args_size:
3024 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3027 case DW_CFA_def_cfa_offset_sf:
3028 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3029 dw2_asm_output_data_sleb128 (off, NULL);
3032 case DW_CFA_GNU_window_save:
3035 case DW_CFA_def_cfa_expression:
3036 case DW_CFA_expression:
3037 output_cfa_loc (cfi, for_eh);
3040 case DW_CFA_GNU_negative_offset_extended:
3041 /* Obsoleted by DW_CFA_offset_extended_sf. */
3050 /* Similar, but do it via assembler directives instead. */
3053 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3055 unsigned long r, r2;
3057 switch (cfi->dw_cfi_opc)
3059 case DW_CFA_advance_loc:
3060 case DW_CFA_advance_loc1:
3061 case DW_CFA_advance_loc2:
3062 case DW_CFA_advance_loc4:
3063 case DW_CFA_MIPS_advance_loc8:
3064 case DW_CFA_set_loc:
3065 /* Should only be created in a code path not followed when emitting
3066 via directives. The assembler is going to take care of this for
3067 us. But this routines is also used for debugging dumps, so
3069 gcc_assert (f != asm_out_file);
3070 fprintf (f, "\t.cfi_advance_loc\n");
3074 case DW_CFA_offset_extended:
3075 case DW_CFA_offset_extended_sf:
3076 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3077 fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3078 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3081 case DW_CFA_restore:
3082 case DW_CFA_restore_extended:
3083 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3084 fprintf (f, "\t.cfi_restore %lu\n", r);
3087 case DW_CFA_undefined:
3088 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3089 fprintf (f, "\t.cfi_undefined %lu\n", r);
3092 case DW_CFA_same_value:
3093 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3094 fprintf (f, "\t.cfi_same_value %lu\n", r);
3097 case DW_CFA_def_cfa:
3098 case DW_CFA_def_cfa_sf:
3099 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3100 fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3101 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3104 case DW_CFA_def_cfa_register:
3105 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3106 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3109 case DW_CFA_register:
3110 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3111 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3112 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3115 case DW_CFA_def_cfa_offset:
3116 case DW_CFA_def_cfa_offset_sf:
3117 fprintf (f, "\t.cfi_def_cfa_offset "
3118 HOST_WIDE_INT_PRINT_DEC"\n",
3119 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3122 case DW_CFA_remember_state:
3123 fprintf (f, "\t.cfi_remember_state\n");
3125 case DW_CFA_restore_state:
3126 fprintf (f, "\t.cfi_restore_state\n");
3129 case DW_CFA_GNU_args_size:
3130 if (f == asm_out_file)
3132 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3133 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3135 fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
3136 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3141 fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
3142 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3146 case DW_CFA_GNU_window_save:
3147 fprintf (f, "\t.cfi_window_save\n");
3150 case DW_CFA_def_cfa_expression:
3151 if (f != asm_out_file)
3153 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3157 case DW_CFA_expression:
3158 if (f != asm_out_file)
3160 fprintf (f, "\t.cfi_cfa_expression ...\n");
3163 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3164 output_cfa_loc_raw (cfi);
3174 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3176 if (dwarf2out_do_cfi_asm ())
3177 output_cfi_directive (asm_out_file, cfi);
3181 /* Save the result of dwarf2out_do_frame across PCH.
3182 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3183 static GTY(()) signed char saved_do_cfi_asm = 0;
3185 /* Decide whether we want to emit frame unwind information for the current
3186 translation unit. */
3189 dwarf2out_do_frame (void)
3191 /* We want to emit correct CFA location expressions or lists, so we
3192 have to return true if we're going to output debug info, even if
3193 we're not going to output frame or unwind info. */
3194 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3197 if (saved_do_cfi_asm > 0)
3200 if (targetm.debug_unwind_info () == UI_DWARF2)
3203 if ((flag_unwind_tables || flag_exceptions)
3204 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3210 /* Decide whether to emit frame unwind via assembler directives. */
3213 dwarf2out_do_cfi_asm (void)
3217 #ifdef MIPS_DEBUGGING_INFO
3221 if (saved_do_cfi_asm != 0)
3222 return saved_do_cfi_asm > 0;
3224 /* Assume failure for a moment. */
3225 saved_do_cfi_asm = -1;
3227 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3229 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3232 /* Make sure the personality encoding is one the assembler can support.
3233 In particular, aligned addresses can't be handled. */
3234 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3235 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3237 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3238 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3241 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3242 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3243 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3244 && !flag_unwind_tables && !flag_exceptions
3245 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3249 saved_do_cfi_asm = 1;
3254 gate_dwarf2_frame (void)
3256 #ifndef HAVE_prologue
3257 /* Targets which still implement the prologue in assembler text
3258 cannot use the generic dwarf2 unwinding. */
3262 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3263 from the optimized shrink-wrapping annotations that we will compute.
3264 For now, only produce the CFI notes for dwarf2. */
3265 return dwarf2out_do_frame ();
3268 struct rtl_opt_pass pass_dwarf2_frame =
3272 "dwarf2", /* name */
3273 gate_dwarf2_frame, /* gate */
3274 execute_dwarf2_frame, /* execute */
3277 0, /* static_pass_number */
3278 TV_FINAL, /* tv_id */
3279 0, /* properties_required */
3280 0, /* properties_provided */
3281 0, /* properties_destroyed */
3282 0, /* todo_flags_start */
3283 0 /* todo_flags_finish */
3287 #include "gt-dwarf2cfi.h"