1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
61 /* A collected description of an entire row of the abstract CFI table. */
62 typedef struct GTY(()) dw_cfi_row_struct
64 /* The expression that computes the CFA, expressed in two different ways.
65 The CFA member for the simple cases, and the full CFI expression for
66 the complex cases. The later will be a DW_CFA_cfa_expression. */
70 /* The expressions for any register column that is saved. */
73 /* The value of any DW_CFA_GNU_args_size. */
74 HOST_WIDE_INT args_size;
77 typedef dw_cfi_row *dw_cfi_row_ref;
79 /* A vector of call frame insns for the CIE. */
82 /* The state of the first row of the FDE table, which includes the
83 state provided by the CIE. */
84 static GTY(()) dw_cfi_row_ref cie_cfi_row;
86 static GTY(()) unsigned long dwarf2out_cfi_label_num;
88 /* The insn after which a new CFI note should be emitted. */
89 static rtx add_cfi_insn;
91 /* When non-null, add_cfi will add the CFI to this vector. */
92 static cfi_vec *add_cfi_vec;
94 /* True if remember_state should be emitted before following CFI directive. */
95 static bool emit_cfa_remember;
97 /* True if any CFI directives were emitted at the current insn. */
98 static bool any_cfis_emitted;
100 /* Short-hand for commonly used register numbers. */
101 static unsigned dw_stack_pointer_regnum;
102 static unsigned dw_frame_pointer_regnum;
105 static void dwarf2out_cfi_begin_epilogue (rtx insn);
106 static void dwarf2out_frame_debug_restore_state (void);
109 /* Hook used by __throw. */
112 expand_builtin_dwarf_sp_column (void)
114 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
115 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
118 /* MEM is a memory reference for the register size table, each element of
119 which has mode MODE. Initialize column C as a return address column. */
122 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
124 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
125 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
126 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
129 /* Generate code to initialize the register size table. */
132 expand_builtin_init_dwarf_reg_sizes (tree address)
135 enum machine_mode mode = TYPE_MODE (char_type_node);
136 rtx addr = expand_normal (address);
137 rtx mem = gen_rtx_MEM (BLKmode, addr);
138 bool wrote_return_column = false;
140 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
142 unsigned int dnum = DWARF_FRAME_REGNUM (i);
143 unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
145 if (rnum < DWARF_FRAME_REGISTERS)
147 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
148 enum machine_mode save_mode = reg_raw_mode[i];
151 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
152 save_mode = choose_hard_reg_mode (i, 1, true);
153 if (dnum == DWARF_FRAME_RETURN_COLUMN)
155 if (save_mode == VOIDmode)
157 wrote_return_column = true;
159 size = GET_MODE_SIZE (save_mode);
163 emit_move_insn (adjust_address (mem, mode, offset),
164 gen_int_mode (size, mode));
168 if (!wrote_return_column)
169 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
171 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
172 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
175 targetm.init_dwarf_reg_sizes_extra (address);
178 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
180 static inline HOST_WIDE_INT
181 div_data_align (HOST_WIDE_INT off)
183 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
184 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
188 /* Return true if we need a signed version of a given opcode
189 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
192 need_data_align_sf_opcode (HOST_WIDE_INT off)
194 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
197 /* Return a pointer to a newly allocated Call Frame Instruction. */
199 static inline dw_cfi_ref
202 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
204 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
205 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
210 /* Return a newly allocated CFI row, with no defined data. */
212 static dw_cfi_row_ref
215 dw_cfi_row_ref row = ggc_alloc_cleared_dw_cfi_row ();
217 row->cfa.reg = INVALID_REGNUM;
222 /* Return a copy of an existing CFI row. */
224 static dw_cfi_row_ref
225 copy_cfi_row (dw_cfi_row_ref src)
227 dw_cfi_row_ref dst = ggc_alloc_dw_cfi_row ();
230 dst->reg_save = VEC_copy (dw_cfi_ref, gc, src->reg_save);
235 /* Free an allocated CFI row. */
238 free_cfi_row (dw_cfi_row_ref row)
242 VEC_free (dw_cfi_ref, gc, row->reg_save);
247 /* Generate a new label for the CFI info to refer to. */
250 dwarf2out_cfi_label (void)
252 int num = dwarf2out_cfi_label_num++;
255 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
257 return xstrdup (label);
260 /* Add CFI either to the current insn stream or to a vector, or both. */
263 add_cfi (dw_cfi_ref cfi)
265 if (emit_cfa_remember)
267 dw_cfi_ref cfi_remember;
269 /* Emit the state save. */
270 emit_cfa_remember = false;
271 cfi_remember = new_cfi ();
272 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
273 add_cfi (cfi_remember);
276 any_cfis_emitted = true;
278 if (add_cfi_insn != NULL)
280 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
281 NOTE_CFI (add_cfi_insn) = cfi;
284 if (add_cfi_vec != NULL)
285 VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
288 /* This function fills in aa dw_cfa_location structure from a dwarf location
289 descriptor sequence. */
292 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
294 struct dw_loc_descr_struct *ptr;
296 cfa->base_offset = 0;
300 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
302 enum dwarf_location_atom op = ptr->dw_loc_opc;
338 cfa->reg = op - DW_OP_reg0;
341 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
375 cfa->reg = op - DW_OP_breg0;
376 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
379 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
380 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
385 case DW_OP_plus_uconst:
386 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
394 /* Find the previous value for the CFA, iteratively. CFI is the opcode
395 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
396 one level of remember/restore state processing. */
399 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
401 switch (cfi->dw_cfi_opc)
403 case DW_CFA_def_cfa_offset:
404 case DW_CFA_def_cfa_offset_sf:
405 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
407 case DW_CFA_def_cfa_register:
408 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
411 case DW_CFA_def_cfa_sf:
412 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
413 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
415 case DW_CFA_def_cfa_expression:
416 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
419 case DW_CFA_remember_state:
420 gcc_assert (!remember->in_use);
422 remember->in_use = 1;
424 case DW_CFA_restore_state:
425 gcc_assert (remember->in_use);
427 remember->in_use = 0;
435 /* The current, i.e. most recently generated, row of the CFI table. */
436 static dw_cfi_row_ref cur_row;
438 /* The row state from a preceeding DW_CFA_remember_state. */
439 static dw_cfi_row_ref remember_row;
441 /* The register used for saving registers to the stack, and its offset
443 static dw_cfa_location cfa_store;
445 /* A temporary register holding an integral value used in adjusting SP
446 or setting up the store_reg. The "offset" field holds the integer
447 value, not an offset. */
448 static dw_cfa_location cfa_temp;
450 /* The (really) current value for DW_CFA_GNU_args_size. We delay actually
451 emitting this data, i.e. updating CUR_ROW, without async unwind. */
452 static HOST_WIDE_INT args_size;
454 /* Determine if two dw_cfa_location structures define the same data. */
457 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
459 return (loc1->reg == loc2->reg
460 && loc1->offset == loc2->offset
461 && loc1->indirect == loc2->indirect
462 && (loc1->indirect == 0
463 || loc1->base_offset == loc2->base_offset));
466 /* This routine does the actual work. The CFA is now calculated from
467 the dw_cfa_location structure. */
470 def_cfa_1 (dw_cfa_location *loc_p)
473 dw_cfa_location loc = *loc_p;
475 if (cfa_store.reg == loc.reg && loc.indirect == 0)
476 cfa_store.offset = loc.offset;
478 /* If nothing changed, no need to issue any call frame instructions. */
479 if (cfa_equal_p (&loc, &cur_row->cfa))
484 if (loc.reg == cur_row->cfa.reg && !loc.indirect && !cur_row->cfa.indirect)
486 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
487 the CFA register did not change but the offset did. The data
488 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
489 in the assembler via the .cfi_def_cfa_offset directive. */
491 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
493 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
494 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
497 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
498 else if (loc.offset == cur_row->cfa.offset
499 && cur_row->cfa.reg != INVALID_REGNUM
501 && !cur_row->cfa.indirect)
503 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
504 indicating the CFA register has changed to <register> but the
505 offset has not changed. */
506 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
507 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
511 else if (loc.indirect == 0)
513 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
514 indicating the CFA register has changed to <register> with
515 the specified offset. The data factoring for DW_CFA_def_cfa_sf
516 happens in output_cfi, or in the assembler via the .cfi_def_cfa
519 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
521 cfi->dw_cfi_opc = DW_CFA_def_cfa;
522 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
523 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
527 /* Construct a DW_CFA_def_cfa_expression instruction to
528 calculate the CFA using a full location expression since no
529 register-offset pair is available. */
530 struct dw_loc_descr_struct *loc_list;
532 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
533 loc_list = build_cfa_loc (&loc, 0);
534 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
536 cur_row->cfa_cfi = cfi;
543 /* Add the CFI for saving a register. REG is the CFA column number.
544 If SREG is -1, the register is saved at OFFSET from the CFA;
545 otherwise it is saved in SREG. */
548 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
550 dw_fde_ref fde = cfun ? cfun->fde : NULL;
551 dw_cfi_ref cfi = new_cfi ();
553 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
555 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
557 && fde->stack_realign
558 && sreg == INVALID_REGNUM)
560 cfi->dw_cfi_opc = DW_CFA_expression;
561 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
562 cfi->dw_cfi_oprnd2.dw_cfi_loc
563 = build_cfa_aligned_loc (&cur_row->cfa, offset,
564 fde->stack_realignment);
566 else if (sreg == INVALID_REGNUM)
568 if (need_data_align_sf_opcode (offset))
569 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
570 else if (reg & ~0x3f)
571 cfi->dw_cfi_opc = DW_CFA_offset_extended;
573 cfi->dw_cfi_opc = DW_CFA_offset;
574 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
576 else if (sreg == reg)
577 cfi->dw_cfi_opc = DW_CFA_same_value;
580 cfi->dw_cfi_opc = DW_CFA_register;
581 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
587 /* Given a SET, calculate the amount of stack adjustment it
591 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
592 HOST_WIDE_INT cur_offset)
594 const_rtx src = SET_SRC (pattern);
595 const_rtx dest = SET_DEST (pattern);
596 HOST_WIDE_INT offset = 0;
599 if (dest == stack_pointer_rtx)
601 code = GET_CODE (src);
603 /* Assume (set (reg sp) (reg whatever)) sets args_size
605 if (code == REG && src != stack_pointer_rtx)
607 offset = -cur_args_size;
608 #ifndef STACK_GROWS_DOWNWARD
611 return offset - cur_offset;
614 if (! (code == PLUS || code == MINUS)
615 || XEXP (src, 0) != stack_pointer_rtx
616 || !CONST_INT_P (XEXP (src, 1)))
619 /* (set (reg sp) (plus (reg sp) (const_int))) */
620 offset = INTVAL (XEXP (src, 1));
626 if (MEM_P (src) && !MEM_P (dest))
630 /* (set (mem (pre_dec (reg sp))) (foo)) */
631 src = XEXP (dest, 0);
632 code = GET_CODE (src);
638 if (XEXP (src, 0) == stack_pointer_rtx)
640 rtx val = XEXP (XEXP (src, 1), 1);
641 /* We handle only adjustments by constant amount. */
642 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
643 && CONST_INT_P (val));
644 offset = -INTVAL (val);
651 if (XEXP (src, 0) == stack_pointer_rtx)
653 offset = GET_MODE_SIZE (GET_MODE (dest));
660 if (XEXP (src, 0) == stack_pointer_rtx)
662 offset = -GET_MODE_SIZE (GET_MODE (dest));
677 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
678 indexed by INSN_UID. */
680 static HOST_WIDE_INT *barrier_args_size;
682 /* Helper function for compute_barrier_args_size. Handle one insn. */
685 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
686 VEC (rtx, heap) **next)
688 HOST_WIDE_INT offset = 0;
691 if (! RTX_FRAME_RELATED_P (insn))
693 if (prologue_epilogue_contains (insn))
695 else if (GET_CODE (PATTERN (insn)) == SET)
696 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
697 else if (GET_CODE (PATTERN (insn)) == PARALLEL
698 || GET_CODE (PATTERN (insn)) == SEQUENCE)
700 /* There may be stack adjustments inside compound insns. Search
702 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
703 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
704 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
705 cur_args_size, offset);
710 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
714 expr = XEXP (expr, 0);
715 if (GET_CODE (expr) == PARALLEL
716 || GET_CODE (expr) == SEQUENCE)
717 for (i = 1; i < XVECLEN (expr, 0); i++)
719 rtx elem = XVECEXP (expr, 0, i);
721 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
722 offset += stack_adjust_offset (elem, cur_args_size, offset);
727 #ifndef STACK_GROWS_DOWNWARD
731 cur_args_size += offset;
732 if (cur_args_size < 0)
737 rtx dest = JUMP_LABEL (insn);
741 if (barrier_args_size [INSN_UID (dest)] < 0)
743 barrier_args_size [INSN_UID (dest)] = cur_args_size;
744 VEC_safe_push (rtx, heap, *next, dest);
749 return cur_args_size;
752 /* Walk the whole function and compute args_size on BARRIERs. */
755 compute_barrier_args_size (void)
757 int max_uid = get_max_uid (), i;
759 VEC (rtx, heap) *worklist, *next, *tmp;
761 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
762 for (i = 0; i < max_uid; i++)
763 barrier_args_size[i] = -1;
765 worklist = VEC_alloc (rtx, heap, 20);
766 next = VEC_alloc (rtx, heap, 20);
768 barrier_args_size[INSN_UID (insn)] = 0;
769 VEC_quick_push (rtx, worklist, insn);
772 while (!VEC_empty (rtx, worklist))
774 rtx prev, body, first_insn;
775 HOST_WIDE_INT cur_args_size;
777 first_insn = insn = VEC_pop (rtx, worklist);
778 cur_args_size = barrier_args_size[INSN_UID (insn)];
779 prev = prev_nonnote_insn (insn);
780 if (prev && BARRIER_P (prev))
781 barrier_args_size[INSN_UID (prev)] = cur_args_size;
783 for (; insn; insn = NEXT_INSN (insn))
785 if (INSN_DELETED_P (insn) || NOTE_P (insn))
787 if (BARRIER_P (insn))
792 if (insn == first_insn)
794 else if (barrier_args_size[INSN_UID (insn)] < 0)
796 barrier_args_size[INSN_UID (insn)] = cur_args_size;
801 /* The insns starting with this label have been
802 already scanned or are in the worklist. */
807 body = PATTERN (insn);
808 if (GET_CODE (body) == SEQUENCE)
810 HOST_WIDE_INT dest_args_size = cur_args_size;
811 for (i = 1; i < XVECLEN (body, 0); i++)
812 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
813 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
815 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
816 dest_args_size, &next);
819 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
820 cur_args_size, &next);
822 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
823 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
824 dest_args_size, &next);
827 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
828 cur_args_size, &next);
832 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
836 if (VEC_empty (rtx, next))
839 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
843 VEC_truncate (rtx, next, 0);
846 VEC_free (rtx, heap, worklist);
847 VEC_free (rtx, heap, next);
850 /* Add a CFI to update the running total of the size of arguments
851 pushed onto the stack. */
854 dwarf2out_args_size (HOST_WIDE_INT size)
858 if (size == cur_row->args_size)
861 cur_row->args_size = size;
864 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
865 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
869 /* Record a stack adjustment of OFFSET bytes. */
872 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
874 dw_cfa_location loc = cur_row->cfa;
876 if (loc.reg == dw_stack_pointer_regnum)
877 loc.offset += offset;
879 if (cfa_store.reg == dw_stack_pointer_regnum)
880 cfa_store.offset += offset;
882 /* ??? The assumption seems to be that if A_O_A, the only CFA adjustments
883 involving the stack pointer are inside the prologue and marked as
884 RTX_FRAME_RELATED_P. That said, should we not verify this assumption
885 by *asserting* A_O_A at this point? Why else would we have a change
886 to the stack pointer? */
887 if (ACCUMULATE_OUTGOING_ARGS)
890 #ifndef STACK_GROWS_DOWNWARD
899 if (flag_asynchronous_unwind_tables)
900 dwarf2out_args_size (args_size);
903 /* Check INSN to see if it looks like a push or a stack adjustment, and
904 make a note of it if it does. EH uses this information to find out
905 how much extra space it needs to pop off the stack. */
908 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
910 HOST_WIDE_INT offset;
913 /* Don't handle epilogues at all. Certainly it would be wrong to do so
914 with this function. Proper support would require all frame-related
915 insns to be marked, and to be able to handle saving state around
916 epilogues textually in the middle of the function. */
917 if (prologue_epilogue_contains (insn))
920 /* If INSN is an instruction from target of an annulled branch, the
921 effects are for the target only and so current argument size
922 shouldn't change at all. */
924 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
925 && INSN_FROM_TARGET_P (insn))
928 /* If only calls can throw, and we have a frame pointer,
929 save up adjustments until we see the CALL_INSN. */
930 if (!flag_asynchronous_unwind_tables
931 && cur_row->cfa.reg != dw_stack_pointer_regnum)
933 if (CALL_P (insn) && !after_p)
935 /* Extract the size of the args from the CALL rtx itself. */
936 insn = PATTERN (insn);
937 if (GET_CODE (insn) == PARALLEL)
938 insn = XVECEXP (insn, 0, 0);
939 if (GET_CODE (insn) == SET)
940 insn = SET_SRC (insn);
941 gcc_assert (GET_CODE (insn) == CALL);
942 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
947 if (CALL_P (insn) && !after_p)
949 if (!flag_asynchronous_unwind_tables)
950 dwarf2out_args_size (args_size);
953 else if (BARRIER_P (insn))
955 /* Don't call compute_barrier_args_size () if the only
956 BARRIER is at the end of function. */
957 if (barrier_args_size == NULL && next_nonnote_insn (insn))
958 compute_barrier_args_size ();
959 if (barrier_args_size == NULL)
963 offset = barrier_args_size[INSN_UID (insn)];
969 #ifndef STACK_GROWS_DOWNWARD
973 else if (GET_CODE (PATTERN (insn)) == SET)
974 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
975 else if (GET_CODE (PATTERN (insn)) == PARALLEL
976 || GET_CODE (PATTERN (insn)) == SEQUENCE)
978 /* There may be stack adjustments inside compound insns. Search
980 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
981 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
982 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
991 dwarf2out_stack_adjust (offset);
994 /* We delay emitting a register save until either (a) we reach the end
995 of the prologue or (b) the register is clobbered. This clusters
996 register saves so that there are fewer pc advances. */
998 struct GTY(()) queued_reg_save {
999 struct queued_reg_save *next;
1001 HOST_WIDE_INT cfa_offset;
1005 static GTY(()) struct queued_reg_save *queued_reg_saves;
1007 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
1008 typedef struct GTY(()) reg_saved_in_data {
1011 } reg_saved_in_data;
1013 DEF_VEC_O (reg_saved_in_data);
1014 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
1016 /* A set of registers saved in other registers. This is implemented as
1017 a flat array because it normally contains zero or 1 entry, depending
1018 on the target. IA-64 is the big spender here, using a maximum of
1020 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
1022 static GTY(()) reg_saved_in_data *cie_return_save;
1024 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
1025 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
1026 used in places where rtl is prohibited. */
1028 static inline unsigned
1029 dwf_regno (const_rtx reg)
1031 return DWARF_FRAME_REGNUM (REGNO (reg));
1034 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1037 compare_reg_or_pc (rtx x, rtx y)
1039 if (REG_P (x) && REG_P (y))
1040 return REGNO (x) == REGNO (y);
1044 /* Record SRC as being saved in DEST. DEST may be null to delete an
1045 existing entry. SRC may be a register or PC_RTX. */
1048 record_reg_saved_in_reg (rtx dest, rtx src)
1050 reg_saved_in_data *elt;
1053 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
1054 if (compare_reg_or_pc (elt->orig_reg, src))
1057 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
1059 elt->saved_in_reg = dest;
1066 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1067 elt->orig_reg = src;
1068 elt->saved_in_reg = dest;
1071 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1072 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1075 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1077 struct queued_reg_save *q;
1079 /* Duplicates waste space, but it's also necessary to remove them
1080 for correctness, since the queue gets output in reverse order. */
1081 for (q = queued_reg_saves; q != NULL; q = q->next)
1082 if (compare_reg_or_pc (q->reg, reg))
1087 q = ggc_alloc_queued_reg_save ();
1088 q->next = queued_reg_saves;
1089 queued_reg_saves = q;
1093 q->cfa_offset = offset;
1094 q->saved_reg = sreg;
1097 /* Output all the entries in QUEUED_REG_SAVES. */
1100 dwarf2out_flush_queued_reg_saves (void)
1102 struct queued_reg_save *q;
1104 for (q = queued_reg_saves; q; q = q->next)
1106 unsigned int reg, sreg;
1108 record_reg_saved_in_reg (q->saved_reg, q->reg);
1110 if (q->reg == pc_rtx)
1111 reg = DWARF_FRAME_RETURN_COLUMN;
1113 reg = dwf_regno (q->reg);
1115 sreg = dwf_regno (q->saved_reg);
1117 sreg = INVALID_REGNUM;
1118 reg_save (reg, sreg, q->cfa_offset);
1121 queued_reg_saves = NULL;
1124 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1125 location for? Or, does it clobber a register which we've previously
1126 said that some other register is saved in, and for which we now
1127 have a new location for? */
1130 clobbers_queued_reg_save (const_rtx insn)
1132 struct queued_reg_save *q;
1134 for (q = queued_reg_saves; q; q = q->next)
1137 reg_saved_in_data *rir;
1139 if (modified_in_p (q->reg, insn))
1142 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1143 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1144 && modified_in_p (rir->saved_in_reg, insn))
1151 /* What register, if any, is currently saved in REG? */
1154 reg_saved_in (rtx reg)
1156 unsigned int regn = REGNO (reg);
1157 struct queued_reg_save *q;
1158 reg_saved_in_data *rir;
1161 for (q = queued_reg_saves; q; q = q->next)
1162 if (q->saved_reg && regn == REGNO (q->saved_reg))
1165 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1166 if (regn == REGNO (rir->saved_in_reg))
1167 return rir->orig_reg;
1172 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1175 dwarf2out_frame_debug_def_cfa (rtx pat)
1177 dw_cfa_location loc;
1179 memset (&loc, 0, sizeof (loc));
1181 switch (GET_CODE (pat))
1184 loc.reg = dwf_regno (XEXP (pat, 0));
1185 loc.offset = INTVAL (XEXP (pat, 1));
1189 loc.reg = dwf_regno (pat);
1194 pat = XEXP (pat, 0);
1195 if (GET_CODE (pat) == PLUS)
1197 loc.base_offset = INTVAL (XEXP (pat, 1));
1198 pat = XEXP (pat, 0);
1200 loc.reg = dwf_regno (pat);
1204 /* Recurse and define an expression. */
1211 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1214 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1216 dw_cfa_location loc = cur_row->cfa;
1219 gcc_assert (GET_CODE (pat) == SET);
1220 dest = XEXP (pat, 0);
1221 src = XEXP (pat, 1);
1223 switch (GET_CODE (src))
1226 gcc_assert (dwf_regno (XEXP (src, 0)) == loc.reg);
1227 loc.offset -= INTVAL (XEXP (src, 1));
1237 loc.reg = dwf_regno (dest);
1238 gcc_assert (loc.indirect == 0);
1243 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1246 dwarf2out_frame_debug_cfa_offset (rtx set)
1248 HOST_WIDE_INT offset;
1249 rtx src, addr, span;
1250 unsigned int sregno;
1252 src = XEXP (set, 1);
1253 addr = XEXP (set, 0);
1254 gcc_assert (MEM_P (addr));
1255 addr = XEXP (addr, 0);
1257 /* As documented, only consider extremely simple addresses. */
1258 switch (GET_CODE (addr))
1261 gcc_assert (dwf_regno (addr) == cur_row->cfa.reg);
1262 offset = -cur_row->cfa.offset;
1265 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_row->cfa.reg);
1266 offset = INTVAL (XEXP (addr, 1)) - cur_row->cfa.offset;
1275 sregno = DWARF_FRAME_RETURN_COLUMN;
1279 span = targetm.dwarf_register_span (src);
1280 sregno = dwf_regno (src);
1283 /* ??? We'd like to use queue_reg_save, but we need to come up with
1284 a different flushing heuristic for epilogues. */
1286 reg_save (sregno, INVALID_REGNUM, offset);
1289 /* We have a PARALLEL describing where the contents of SRC live.
1290 Queue register saves for each piece of the PARALLEL. */
1293 HOST_WIDE_INT span_offset = offset;
1295 gcc_assert (GET_CODE (span) == PARALLEL);
1297 limit = XVECLEN (span, 0);
1298 for (par_index = 0; par_index < limit; par_index++)
1300 rtx elem = XVECEXP (span, 0, par_index);
1302 sregno = dwf_regno (src);
1303 reg_save (sregno, INVALID_REGNUM, span_offset);
1304 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1309 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1312 dwarf2out_frame_debug_cfa_register (rtx set)
1315 unsigned sregno, dregno;
1317 src = XEXP (set, 1);
1318 dest = XEXP (set, 0);
1320 record_reg_saved_in_reg (dest, src);
1322 sregno = DWARF_FRAME_RETURN_COLUMN;
1324 sregno = dwf_regno (src);
1326 dregno = dwf_regno (dest);
1328 /* ??? We'd like to use queue_reg_save, but we need to come up with
1329 a different flushing heuristic for epilogues. */
1330 reg_save (sregno, dregno, 0);
1333 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1336 dwarf2out_frame_debug_cfa_expression (rtx set)
1338 rtx src, dest, span;
1339 dw_cfi_ref cfi = new_cfi ();
1341 dest = SET_DEST (set);
1342 src = SET_SRC (set);
1344 gcc_assert (REG_P (src));
1345 gcc_assert (MEM_P (dest));
1347 span = targetm.dwarf_register_span (src);
1350 cfi->dw_cfi_opc = DW_CFA_expression;
1351 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (src);
1352 cfi->dw_cfi_oprnd2.dw_cfi_loc
1353 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1354 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1356 /* ??? We'd like to use queue_reg_save, were the interface different,
1357 and, as above, we could manage flushing for epilogues. */
1361 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1364 dwarf2out_frame_debug_cfa_restore (rtx reg)
1366 dw_cfi_ref cfi = new_cfi ();
1367 unsigned int regno = dwf_regno (reg);
1369 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1370 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1375 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1376 ??? Perhaps we should note in the CIE where windows are saved (instead of
1377 assuming 0(cfa)) and what registers are in the window. */
1380 dwarf2out_frame_debug_cfa_window_save (void)
1382 dw_cfi_ref cfi = new_cfi ();
1384 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1388 /* Record call frame debugging information for an expression EXPR,
1389 which either sets SP or FP (adjusting how we calculate the frame
1390 address) or saves a register to the stack or another register.
1391 LABEL indicates the address of EXPR.
1393 This function encodes a state machine mapping rtxes to actions on
1394 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1395 users need not read the source code.
1397 The High-Level Picture
1399 Changes in the register we use to calculate the CFA: Currently we
1400 assume that if you copy the CFA register into another register, we
1401 should take the other one as the new CFA register; this seems to
1402 work pretty well. If it's wrong for some target, it's simple
1403 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1405 Changes in the register we use for saving registers to the stack:
1406 This is usually SP, but not always. Again, we deduce that if you
1407 copy SP into another register (and SP is not the CFA register),
1408 then the new register is the one we will be using for register
1409 saves. This also seems to work.
1411 Register saves: There's not much guesswork about this one; if
1412 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1413 register save, and the register used to calculate the destination
1414 had better be the one we think we're using for this purpose.
1415 It's also assumed that a copy from a call-saved register to another
1416 register is saving that register if RTX_FRAME_RELATED_P is set on
1417 that instruction. If the copy is from a call-saved register to
1418 the *same* register, that means that the register is now the same
1419 value as in the caller.
1421 Except: If the register being saved is the CFA register, and the
1422 offset is nonzero, we are saving the CFA, so we assume we have to
1423 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1424 the intent is to save the value of SP from the previous frame.
1426 In addition, if a register has previously been saved to a different
1429 Invariants / Summaries of Rules
1431 cfa current rule for calculating the CFA. It usually
1432 consists of a register and an offset. This is
1433 actually stored in cur_row->cfa, but abbreviated
1434 for the purposes of this documentation.
1435 cfa_store register used by prologue code to save things to the stack
1436 cfa_store.offset is the offset from the value of
1437 cfa_store.reg to the actual CFA
1438 cfa_temp register holding an integral value. cfa_temp.offset
1439 stores the value, which will be used to adjust the
1440 stack pointer. cfa_temp is also used like cfa_store,
1441 to track stores to the stack via fp or a temp reg.
1443 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1444 with cfa.reg as the first operand changes the cfa.reg and its
1445 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1448 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1449 expression yielding a constant. This sets cfa_temp.reg
1450 and cfa_temp.offset.
1452 Rule 5: Create a new register cfa_store used to save items to the
1455 Rules 10-14: Save a register to the stack. Define offset as the
1456 difference of the original location and cfa_store's
1457 location (or cfa_temp's location if cfa_temp is used).
1459 Rules 16-20: If AND operation happens on sp in prologue, we assume
1460 stack is realigned. We will use a group of DW_OP_XXX
1461 expressions to represent the location of the stored
1462 register instead of CFA+offset.
1466 "{a,b}" indicates a choice of a xor b.
1467 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1470 (set <reg1> <reg2>:cfa.reg)
1471 effects: cfa.reg = <reg1>
1472 cfa.offset unchanged
1473 cfa_temp.reg = <reg1>
1474 cfa_temp.offset = cfa.offset
1477 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1478 {<const_int>,<reg>:cfa_temp.reg}))
1479 effects: cfa.reg = sp if fp used
1480 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1481 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1482 if cfa_store.reg==sp
1485 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1486 effects: cfa.reg = fp
1487 cfa_offset += +/- <const_int>
1490 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1491 constraints: <reg1> != fp
1493 effects: cfa.reg = <reg1>
1494 cfa_temp.reg = <reg1>
1495 cfa_temp.offset = cfa.offset
1498 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1499 constraints: <reg1> != fp
1501 effects: cfa_store.reg = <reg1>
1502 cfa_store.offset = cfa.offset - cfa_temp.offset
1505 (set <reg> <const_int>)
1506 effects: cfa_temp.reg = <reg>
1507 cfa_temp.offset = <const_int>
1510 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1511 effects: cfa_temp.reg = <reg1>
1512 cfa_temp.offset |= <const_int>
1515 (set <reg> (high <exp>))
1519 (set <reg> (lo_sum <exp> <const_int>))
1520 effects: cfa_temp.reg = <reg>
1521 cfa_temp.offset = <const_int>
1524 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1525 effects: cfa_store.offset -= <const_int>
1526 cfa.offset = cfa_store.offset if cfa.reg == sp
1528 cfa.base_offset = -cfa_store.offset
1531 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1532 effects: cfa_store.offset += -/+ mode_size(mem)
1533 cfa.offset = cfa_store.offset if cfa.reg == sp
1535 cfa.base_offset = -cfa_store.offset
1538 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1541 effects: cfa.reg = <reg1>
1542 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1545 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1546 effects: cfa.reg = <reg1>
1547 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1550 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1551 effects: cfa.reg = <reg1>
1552 cfa.base_offset = -cfa_temp.offset
1553 cfa_temp.offset -= mode_size(mem)
1556 (set <reg> {unspec, unspec_volatile})
1557 effects: target-dependent
1560 (set sp (and: sp <const_int>))
1561 constraints: cfa_store.reg == sp
1562 effects: cfun->fde.stack_realign = 1
1563 cfa_store.offset = 0
1564 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1567 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1568 effects: cfa_store.offset += -/+ mode_size(mem)
1571 (set (mem ({pre_inc, pre_dec} sp)) fp)
1572 constraints: fde->stack_realign == 1
1573 effects: cfa_store.offset = 0
1574 cfa.reg != HARD_FRAME_POINTER_REGNUM
1577 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1578 constraints: fde->stack_realign == 1
1580 && cfa.indirect == 0
1581 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1582 effects: Use DW_CFA_def_cfa_expression to define cfa
1583 cfa.reg == fde->drap_reg */
1586 dwarf2out_frame_debug_expr (rtx expr)
1588 dw_cfa_location cfa = cur_row->cfa;
1589 rtx src, dest, span;
1590 HOST_WIDE_INT offset;
1593 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1594 the PARALLEL independently. The first element is always processed if
1595 it is a SET. This is for backward compatibility. Other elements
1596 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1597 flag is set in them. */
1598 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1601 int limit = XVECLEN (expr, 0);
1604 /* PARALLELs have strict read-modify-write semantics, so we
1605 ought to evaluate every rvalue before changing any lvalue.
1606 It's cumbersome to do that in general, but there's an
1607 easy approximation that is enough for all current users:
1608 handle register saves before register assignments. */
1609 if (GET_CODE (expr) == PARALLEL)
1610 for (par_index = 0; par_index < limit; par_index++)
1612 elem = XVECEXP (expr, 0, par_index);
1613 if (GET_CODE (elem) == SET
1614 && MEM_P (SET_DEST (elem))
1615 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1616 dwarf2out_frame_debug_expr (elem);
1619 for (par_index = 0; par_index < limit; par_index++)
1621 elem = XVECEXP (expr, 0, par_index);
1622 if (GET_CODE (elem) == SET
1623 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1624 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1625 dwarf2out_frame_debug_expr (elem);
1626 else if (GET_CODE (elem) == SET
1628 && !RTX_FRAME_RELATED_P (elem))
1630 /* Stack adjustment combining might combine some post-prologue
1631 stack adjustment into a prologue stack adjustment. */
1632 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1635 dwarf2out_stack_adjust (offset);
1641 gcc_assert (GET_CODE (expr) == SET);
1643 src = SET_SRC (expr);
1644 dest = SET_DEST (expr);
1648 rtx rsi = reg_saved_in (src);
1655 switch (GET_CODE (dest))
1658 switch (GET_CODE (src))
1660 /* Setting FP from SP. */
1662 if (cfa.reg == dwf_regno (src))
1665 /* Update the CFA rule wrt SP or FP. Make sure src is
1666 relative to the current CFA register.
1668 We used to require that dest be either SP or FP, but the
1669 ARM copies SP to a temporary register, and from there to
1670 FP. So we just rely on the backends to only set
1671 RTX_FRAME_RELATED_P on appropriate insns. */
1672 cfa.reg = dwf_regno (dest);
1673 cfa_temp.reg = cfa.reg;
1674 cfa_temp.offset = cfa.offset;
1678 /* Saving a register in a register. */
1679 gcc_assert (!fixed_regs [REGNO (dest)]
1680 /* For the SPARC and its register window. */
1681 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1683 /* After stack is aligned, we can only save SP in FP
1684 if drap register is used. In this case, we have
1685 to restore stack pointer with the CFA value and we
1686 don't generate this DWARF information. */
1688 && fde->stack_realign
1689 && REGNO (src) == STACK_POINTER_REGNUM)
1690 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1691 && fde->drap_reg != INVALID_REGNUM
1692 && cfa.reg != dwf_regno (src));
1694 queue_reg_save (src, dest, 0);
1701 if (dest == stack_pointer_rtx)
1705 switch (GET_CODE (XEXP (src, 1)))
1708 offset = INTVAL (XEXP (src, 1));
1711 gcc_assert (dwf_regno (XEXP (src, 1)) == cfa_temp.reg);
1712 offset = cfa_temp.offset;
1718 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1720 /* Restoring SP from FP in the epilogue. */
1721 gcc_assert (cfa.reg == dw_frame_pointer_regnum);
1722 cfa.reg = dw_stack_pointer_regnum;
1724 else if (GET_CODE (src) == LO_SUM)
1725 /* Assume we've set the source reg of the LO_SUM from sp. */
1728 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1730 if (GET_CODE (src) != MINUS)
1732 if (cfa.reg == dw_stack_pointer_regnum)
1733 cfa.offset += offset;
1734 if (cfa_store.reg == dw_stack_pointer_regnum)
1735 cfa_store.offset += offset;
1737 else if (dest == hard_frame_pointer_rtx)
1740 /* Either setting the FP from an offset of the SP,
1741 or adjusting the FP */
1742 gcc_assert (frame_pointer_needed);
1744 gcc_assert (REG_P (XEXP (src, 0))
1745 && dwf_regno (XEXP (src, 0)) == cfa.reg
1746 && CONST_INT_P (XEXP (src, 1)));
1747 offset = INTVAL (XEXP (src, 1));
1748 if (GET_CODE (src) != MINUS)
1750 cfa.offset += offset;
1751 cfa.reg = dw_frame_pointer_regnum;
1755 gcc_assert (GET_CODE (src) != MINUS);
1758 if (REG_P (XEXP (src, 0))
1759 && dwf_regno (XEXP (src, 0)) == cfa.reg
1760 && CONST_INT_P (XEXP (src, 1)))
1762 /* Setting a temporary CFA register that will be copied
1763 into the FP later on. */
1764 offset = - INTVAL (XEXP (src, 1));
1765 cfa.offset += offset;
1766 cfa.reg = dwf_regno (dest);
1767 /* Or used to save regs to the stack. */
1768 cfa_temp.reg = cfa.reg;
1769 cfa_temp.offset = cfa.offset;
1773 else if (REG_P (XEXP (src, 0))
1774 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1775 && XEXP (src, 1) == stack_pointer_rtx)
1777 /* Setting a scratch register that we will use instead
1778 of SP for saving registers to the stack. */
1779 gcc_assert (cfa.reg == dw_stack_pointer_regnum);
1780 cfa_store.reg = dwf_regno (dest);
1781 cfa_store.offset = cfa.offset - cfa_temp.offset;
1785 else if (GET_CODE (src) == LO_SUM
1786 && CONST_INT_P (XEXP (src, 1)))
1788 cfa_temp.reg = dwf_regno (dest);
1789 cfa_temp.offset = INTVAL (XEXP (src, 1));
1798 cfa_temp.reg = dwf_regno (dest);
1799 cfa_temp.offset = INTVAL (src);
1804 gcc_assert (REG_P (XEXP (src, 0))
1805 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1806 && CONST_INT_P (XEXP (src, 1)));
1808 cfa_temp.reg = dwf_regno (dest);
1809 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1812 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1813 which will fill in all of the bits. */
1820 case UNSPEC_VOLATILE:
1821 /* All unspecs should be represented by REG_CFA_* notes. */
1827 /* If this AND operation happens on stack pointer in prologue,
1828 we assume the stack is realigned and we extract the
1830 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1832 /* We interpret reg_save differently with stack_realign set.
1833 Thus we must flush whatever we have queued first. */
1834 dwarf2out_flush_queued_reg_saves ();
1836 gcc_assert (cfa_store.reg == dwf_regno (XEXP (src, 0)));
1837 fde->stack_realign = 1;
1838 fde->stack_realignment = INTVAL (XEXP (src, 1));
1839 cfa_store.offset = 0;
1841 if (cfa.reg != dw_stack_pointer_regnum
1842 && cfa.reg != dw_frame_pointer_regnum)
1843 fde->drap_reg = cfa.reg;
1856 /* Saving a register to the stack. Make sure dest is relative to the
1858 switch (GET_CODE (XEXP (dest, 0)))
1864 /* We can't handle variable size modifications. */
1865 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1867 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1869 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1870 && cfa_store.reg == dw_stack_pointer_regnum);
1872 cfa_store.offset += offset;
1873 if (cfa.reg == dw_stack_pointer_regnum)
1874 cfa.offset = cfa_store.offset;
1876 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1877 offset -= cfa_store.offset;
1879 offset = -cfa_store.offset;
1886 offset = GET_MODE_SIZE (GET_MODE (dest));
1887 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1890 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1891 == STACK_POINTER_REGNUM)
1892 && cfa_store.reg == dw_stack_pointer_regnum);
1894 cfa_store.offset += offset;
1896 /* Rule 18: If stack is aligned, we will use FP as a
1897 reference to represent the address of the stored
1900 && fde->stack_realign
1901 && src == hard_frame_pointer_rtx)
1903 gcc_assert (cfa.reg != dw_frame_pointer_regnum);
1904 cfa_store.offset = 0;
1907 if (cfa.reg == dw_stack_pointer_regnum)
1908 cfa.offset = cfa_store.offset;
1910 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1911 offset += -cfa_store.offset;
1913 offset = -cfa_store.offset;
1917 /* With an offset. */
1924 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1925 && REG_P (XEXP (XEXP (dest, 0), 0)));
1926 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1927 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1930 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1932 if (cfa.reg == regno)
1933 offset -= cfa.offset;
1934 else if (cfa_store.reg == regno)
1935 offset -= cfa_store.offset;
1938 gcc_assert (cfa_temp.reg == regno);
1939 offset -= cfa_temp.offset;
1945 /* Without an offset. */
1948 unsigned int regno = dwf_regno (XEXP (dest, 0));
1950 if (cfa.reg == regno)
1951 offset = -cfa.offset;
1952 else if (cfa_store.reg == regno)
1953 offset = -cfa_store.offset;
1956 gcc_assert (cfa_temp.reg == regno);
1957 offset = -cfa_temp.offset;
1964 gcc_assert (cfa_temp.reg == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1965 offset = -cfa_temp.offset;
1966 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1974 /* If the source operand of this MEM operation is a memory,
1975 we only care how much stack grew. */
1980 && REGNO (src) != STACK_POINTER_REGNUM
1981 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1982 && dwf_regno (src) == cfa.reg)
1984 /* We're storing the current CFA reg into the stack. */
1986 if (cfa.offset == 0)
1989 /* If stack is aligned, putting CFA reg into stack means
1990 we can no longer use reg + offset to represent CFA.
1991 Here we use DW_CFA_def_cfa_expression instead. The
1992 result of this expression equals to the original CFA
1995 && fde->stack_realign
1996 && cfa.indirect == 0
1997 && cfa.reg != dw_frame_pointer_regnum)
1999 dw_cfa_location cfa_exp;
2001 gcc_assert (fde->drap_reg == cfa.reg);
2003 cfa_exp.indirect = 1;
2004 cfa_exp.reg = dw_frame_pointer_regnum;
2005 cfa_exp.base_offset = offset;
2008 fde->drap_reg_saved = 1;
2010 def_cfa_1 (&cfa_exp);
2014 /* If the source register is exactly the CFA, assume
2015 we're saving SP like any other register; this happens
2018 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2023 /* Otherwise, we'll need to look in the stack to
2024 calculate the CFA. */
2025 rtx x = XEXP (dest, 0);
2029 gcc_assert (REG_P (x));
2031 cfa.reg = dwf_regno (x);
2032 cfa.base_offset = offset;
2043 span = targetm.dwarf_register_span (src);
2045 queue_reg_save (src, NULL_RTX, offset);
2048 /* We have a PARALLEL describing where the contents of SRC live.
2049 Queue register saves for each piece of the PARALLEL. */
2052 HOST_WIDE_INT span_offset = offset;
2054 gcc_assert (GET_CODE (span) == PARALLEL);
2056 limit = XVECLEN (span, 0);
2057 for (par_index = 0; par_index < limit; par_index++)
2059 rtx elem = XVECEXP (span, 0, par_index);
2060 queue_reg_save (elem, NULL_RTX, span_offset);
2061 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2071 /* Record call frame debugging information for INSN, which either
2072 sets SP or FP (adjusting how we calculate the frame address) or saves a
2073 register to the stack. If INSN is NULL_RTX, initialize our state.
2075 If AFTER_P is false, we're being called before the insn is emitted,
2076 otherwise after. Call instructions get invoked twice. */
2079 dwarf2out_frame_debug (rtx insn, bool after_p)
2082 bool handled_one = false;
2083 bool need_flush = false;
2085 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2086 dwarf2out_flush_queued_reg_saves ();
2088 if (!RTX_FRAME_RELATED_P (insn))
2090 /* ??? This should be done unconditionally since stack adjustments
2091 matter if the stack pointer is not the CFA register anymore but
2092 is still used to save registers. */
2093 if (!ACCUMULATE_OUTGOING_ARGS)
2094 dwarf2out_notice_stack_adjust (insn, after_p);
2098 any_cfis_emitted = false;
2100 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2101 switch (REG_NOTE_KIND (note))
2103 case REG_FRAME_RELATED_EXPR:
2104 insn = XEXP (note, 0);
2107 case REG_CFA_DEF_CFA:
2108 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2112 case REG_CFA_ADJUST_CFA:
2117 if (GET_CODE (n) == PARALLEL)
2118 n = XVECEXP (n, 0, 0);
2120 dwarf2out_frame_debug_adjust_cfa (n);
2124 case REG_CFA_OFFSET:
2127 n = single_set (insn);
2128 dwarf2out_frame_debug_cfa_offset (n);
2132 case REG_CFA_REGISTER:
2137 if (GET_CODE (n) == PARALLEL)
2138 n = XVECEXP (n, 0, 0);
2140 dwarf2out_frame_debug_cfa_register (n);
2144 case REG_CFA_EXPRESSION:
2147 n = single_set (insn);
2148 dwarf2out_frame_debug_cfa_expression (n);
2152 case REG_CFA_RESTORE:
2157 if (GET_CODE (n) == PARALLEL)
2158 n = XVECEXP (n, 0, 0);
2161 dwarf2out_frame_debug_cfa_restore (n);
2165 case REG_CFA_SET_VDRAP:
2169 dw_fde_ref fde = cfun->fde;
2172 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2174 fde->vdrap_reg = dwf_regno (n);
2180 case REG_CFA_WINDOW_SAVE:
2181 dwarf2out_frame_debug_cfa_window_save ();
2185 case REG_CFA_FLUSH_QUEUE:
2186 /* The actual flush happens below. */
2197 /* Minimize the number of advances by emitting the entire queue
2198 once anything is emitted. */
2199 need_flush |= any_cfis_emitted;
2203 insn = PATTERN (insn);
2205 dwarf2out_frame_debug_expr (insn);
2207 /* Check again. A parallel can save and update the same register.
2208 We could probably check just once, here, but this is safer than
2209 removing the check at the start of the function. */
2210 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2215 dwarf2out_flush_queued_reg_saves ();
2218 /* Examine CFI and return true if a cfi label and set_loc is needed
2219 beforehand. Even when generating CFI assembler instructions, we
2220 still have to add the cfi to the list so that lookup_cfa_1 works
2221 later on. When -g2 and above we even need to force emitting of
2222 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2223 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2224 and so don't use convert_cfa_to_fb_loc_list. */
2227 cfi_label_required_p (dw_cfi_ref cfi)
2229 if (!dwarf2out_do_cfi_asm ())
2232 if (dwarf_version == 2
2233 && debug_info_level > DINFO_LEVEL_TERSE
2234 && (write_symbols == DWARF2_DEBUG
2235 || write_symbols == VMS_AND_DWARF2_DEBUG))
2237 switch (cfi->dw_cfi_opc)
2239 case DW_CFA_def_cfa_offset:
2240 case DW_CFA_def_cfa_offset_sf:
2241 case DW_CFA_def_cfa_register:
2242 case DW_CFA_def_cfa:
2243 case DW_CFA_def_cfa_sf:
2244 case DW_CFA_def_cfa_expression:
2245 case DW_CFA_restore_state:
2254 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2255 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2258 add_cfis_to_fde (void)
2260 dw_fde_ref fde = cfun->fde;
2262 /* We always start with a function_begin label. */
2265 for (insn = get_insns (); insn; insn = next)
2267 next = NEXT_INSN (insn);
2269 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2271 /* Don't attempt to advance_loc4 between labels
2272 in different sections. */
2276 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2278 bool required = cfi_label_required_p (NOTE_CFI (insn));
2279 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2281 required |= cfi_label_required_p (NOTE_CFI (next));
2282 next = NEXT_INSN (next);
2286 int num = dwarf2out_cfi_label_num;
2287 const char *label = dwarf2out_cfi_label ();
2291 /* Set the location counter to the new label. */
2293 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2294 : DW_CFA_advance_loc4);
2295 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2296 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2298 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2299 NOTE_LABEL_NUMBER (tmp) = num;
2304 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2305 insn = NEXT_INSN (insn);
2307 while (insn != next);
2313 /* Scan the function and create the initial set of CFI notes. */
2316 create_cfi_notes (void)
2320 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2324 add_cfi_insn = PREV_INSN (insn);
2326 if (BARRIER_P (insn))
2328 dwarf2out_frame_debug (insn, false);
2334 switch (NOTE_KIND (insn))
2336 case NOTE_INSN_PROLOGUE_END:
2337 dwarf2out_flush_queued_reg_saves ();
2340 case NOTE_INSN_EPILOGUE_BEG:
2341 #if defined(HAVE_epilogue)
2342 dwarf2out_cfi_begin_epilogue (insn);
2346 case NOTE_INSN_CFA_RESTORE_STATE:
2347 add_cfi_insn = insn;
2348 dwarf2out_frame_debug_restore_state ();
2354 if (!NONDEBUG_INSN_P (insn))
2357 pat = PATTERN (insn);
2358 if (asm_noperands (pat) >= 0)
2360 dwarf2out_frame_debug (insn, false);
2364 if (GET_CODE (pat) == SEQUENCE)
2366 int i, n = XVECLEN (pat, 0);
2367 for (i = 1; i < n; ++i)
2368 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2372 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2373 dwarf2out_frame_debug (insn, false);
2375 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2376 Putting the note after the VEC should be ok. */
2377 if (!tablejump_p (insn, NULL, &add_cfi_insn))
2378 add_cfi_insn = insn;
2380 dwarf2out_frame_debug (insn, true);
2383 add_cfi_insn = NULL;
2386 /* Determine if we need to save and restore CFI information around this
2387 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2388 we do need to save/restore, then emit the save now, and insert a
2389 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2392 dwarf2out_cfi_begin_epilogue (rtx insn)
2394 bool saw_frp = false;
2397 /* Scan forward to the return insn, noticing if there are possible
2398 frame related insns. */
2399 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2404 /* Look for both regular and sibcalls to end the block. */
2405 if (returnjump_p (i))
2407 if (CALL_P (i) && SIBLING_CALL_P (i))
2410 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2413 rtx seq = PATTERN (i);
2415 if (returnjump_p (XVECEXP (seq, 0, 0)))
2417 if (CALL_P (XVECEXP (seq, 0, 0))
2418 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2421 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2422 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2426 if (RTX_FRAME_RELATED_P (i))
2430 /* If the port doesn't emit epilogue unwind info, we don't need a
2431 save/restore pair. */
2435 /* Otherwise, search forward to see if the return insn was the last
2436 basic block of the function. If so, we don't need save/restore. */
2437 gcc_assert (i != NULL);
2438 i = next_real_insn (i);
2442 /* Insert the restore before that next real insn in the stream, and before
2443 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2444 properly nested. This should be after any label or alignment. This
2445 will be pushed into the CFI stream by the function below. */
2448 rtx p = PREV_INSN (i);
2451 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2455 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2457 emit_cfa_remember = true;
2459 /* And emulate the state save. */
2460 gcc_assert (remember_row == NULL);
2461 remember_row = copy_cfi_row (cur_row);
2464 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2468 dwarf2out_frame_debug_restore_state (void)
2470 dw_cfi_ref cfi = new_cfi ();
2472 cfi->dw_cfi_opc = DW_CFA_restore_state;
2475 gcc_assert (remember_row != NULL);
2476 free_cfi_row (cur_row);
2477 cur_row = remember_row;
2478 remember_row = NULL;
2481 /* Record the initial position of the return address. RTL is
2482 INCOMING_RETURN_ADDR_RTX. */
2485 initial_return_save (rtx rtl)
2487 unsigned int reg = INVALID_REGNUM;
2488 HOST_WIDE_INT offset = 0;
2490 switch (GET_CODE (rtl))
2493 /* RA is in a register. */
2494 reg = dwf_regno (rtl);
2498 /* RA is on the stack. */
2499 rtl = XEXP (rtl, 0);
2500 switch (GET_CODE (rtl))
2503 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2508 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2509 offset = INTVAL (XEXP (rtl, 1));
2513 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2514 offset = -INTVAL (XEXP (rtl, 1));
2524 /* The return address is at some offset from any value we can
2525 actually load. For instance, on the SPARC it is in %i7+8. Just
2526 ignore the offset for now; it doesn't matter for unwinding frames. */
2527 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2528 initial_return_save (XEXP (rtl, 0));
2535 if (reg != DWARF_FRAME_RETURN_COLUMN)
2537 if (reg != INVALID_REGNUM)
2538 record_reg_saved_in_reg (rtl, pc_rtx);
2539 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2543 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2544 state at each location within the function. These notes will be
2545 emitted during pass_final. */
2548 execute_dwarf2_frame (void)
2550 /* The first time we're called, compute the incoming frame state. */
2551 if (cie_cfi_vec == NULL)
2553 dw_cfa_location loc;
2555 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2556 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2558 add_cfi_vec = &cie_cfi_vec;
2559 cie_cfi_row = cur_row = new_cfi_row ();
2561 /* On entry, the Canonical Frame Address is at SP. */
2562 memset(&loc, 0, sizeof (loc));
2563 loc.reg = dw_stack_pointer_regnum;
2564 loc.offset = INCOMING_FRAME_SP_OFFSET;
2567 if (targetm.debug_unwind_info () == UI_DWARF2
2568 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2570 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2572 /* For a few targets, we have the return address incoming into a
2573 register, but choose a different return column. This will result
2574 in a DW_CFA_register for the return, and an entry in
2575 regs_saved_in_regs to match. If the target later stores that
2576 return address register to the stack, we want to be able to emit
2577 the DW_CFA_offset against the return column, not the intermediate
2578 save register. Save the contents of regs_saved_in_regs so that
2579 we can re-initialize it at the start of each function. */
2580 switch (VEC_length (reg_saved_in_data, regs_saved_in_regs))
2585 cie_return_save = ggc_alloc_reg_saved_in_data ();
2586 *cie_return_save = *VEC_index (reg_saved_in_data,
2587 regs_saved_in_regs, 0);
2588 regs_saved_in_regs = NULL;
2598 /* Set up state for generating call frame debug info. */
2599 gcc_checking_assert (queued_reg_saves == NULL);
2600 gcc_checking_assert (regs_saved_in_regs == NULL);
2602 cur_row = copy_cfi_row (cie_cfi_row);
2603 if (cie_return_save)
2604 VEC_safe_push (reg_saved_in_data, gc, regs_saved_in_regs, cie_return_save);
2606 cfa_store = cur_row->cfa;
2609 memset (&cfa_temp, 0, sizeof(cfa_temp));
2610 cfa_temp.reg = INVALID_REGNUM;
2612 dwarf2out_alloc_current_fde ();
2615 create_cfi_notes ();
2618 /* Reset all function-specific information, particularly for GC. */
2619 XDELETEVEC (barrier_args_size);
2620 barrier_args_size = NULL;
2621 regs_saved_in_regs = NULL;
2622 queued_reg_saves = NULL;
2624 free_cfi_row (cur_row);
2630 /* Convert a DWARF call frame info. operation to its string name */
2633 dwarf_cfi_name (unsigned int cfi_opc)
2637 case DW_CFA_advance_loc:
2638 return "DW_CFA_advance_loc";
2640 return "DW_CFA_offset";
2641 case DW_CFA_restore:
2642 return "DW_CFA_restore";
2644 return "DW_CFA_nop";
2645 case DW_CFA_set_loc:
2646 return "DW_CFA_set_loc";
2647 case DW_CFA_advance_loc1:
2648 return "DW_CFA_advance_loc1";
2649 case DW_CFA_advance_loc2:
2650 return "DW_CFA_advance_loc2";
2651 case DW_CFA_advance_loc4:
2652 return "DW_CFA_advance_loc4";
2653 case DW_CFA_offset_extended:
2654 return "DW_CFA_offset_extended";
2655 case DW_CFA_restore_extended:
2656 return "DW_CFA_restore_extended";
2657 case DW_CFA_undefined:
2658 return "DW_CFA_undefined";
2659 case DW_CFA_same_value:
2660 return "DW_CFA_same_value";
2661 case DW_CFA_register:
2662 return "DW_CFA_register";
2663 case DW_CFA_remember_state:
2664 return "DW_CFA_remember_state";
2665 case DW_CFA_restore_state:
2666 return "DW_CFA_restore_state";
2667 case DW_CFA_def_cfa:
2668 return "DW_CFA_def_cfa";
2669 case DW_CFA_def_cfa_register:
2670 return "DW_CFA_def_cfa_register";
2671 case DW_CFA_def_cfa_offset:
2672 return "DW_CFA_def_cfa_offset";
2675 case DW_CFA_def_cfa_expression:
2676 return "DW_CFA_def_cfa_expression";
2677 case DW_CFA_expression:
2678 return "DW_CFA_expression";
2679 case DW_CFA_offset_extended_sf:
2680 return "DW_CFA_offset_extended_sf";
2681 case DW_CFA_def_cfa_sf:
2682 return "DW_CFA_def_cfa_sf";
2683 case DW_CFA_def_cfa_offset_sf:
2684 return "DW_CFA_def_cfa_offset_sf";
2686 /* SGI/MIPS specific */
2687 case DW_CFA_MIPS_advance_loc8:
2688 return "DW_CFA_MIPS_advance_loc8";
2690 /* GNU extensions */
2691 case DW_CFA_GNU_window_save:
2692 return "DW_CFA_GNU_window_save";
2693 case DW_CFA_GNU_args_size:
2694 return "DW_CFA_GNU_args_size";
2695 case DW_CFA_GNU_negative_offset_extended:
2696 return "DW_CFA_GNU_negative_offset_extended";
2699 return "DW_CFA_<unknown>";
2703 /* This routine will generate the correct assembly data for a location
2704 description based on a cfi entry with a complex address. */
2707 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
2709 dw_loc_descr_ref loc;
2712 if (cfi->dw_cfi_opc == DW_CFA_expression)
2715 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2716 dw2_asm_output_data (1, r, NULL);
2717 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2720 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2722 /* Output the size of the block. */
2723 size = size_of_locs (loc);
2724 dw2_asm_output_data_uleb128 (size, NULL);
2726 /* Now output the operations themselves. */
2727 output_loc_sequence (loc, for_eh);
2730 /* Similar, but used for .cfi_escape. */
2733 output_cfa_loc_raw (dw_cfi_ref cfi)
2735 dw_loc_descr_ref loc;
2738 if (cfi->dw_cfi_opc == DW_CFA_expression)
2741 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2742 fprintf (asm_out_file, "%#x,", r);
2743 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2746 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2748 /* Output the size of the block. */
2749 size = size_of_locs (loc);
2750 dw2_asm_output_data_uleb128_raw (size);
2751 fputc (',', asm_out_file);
2753 /* Now output the operations themselves. */
2754 output_loc_sequence_raw (loc);
2757 /* Output a Call Frame Information opcode and its operand(s). */
2760 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
2765 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
2766 dw2_asm_output_data (1, (cfi->dw_cfi_opc
2767 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
2768 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
2769 ((unsigned HOST_WIDE_INT)
2770 cfi->dw_cfi_oprnd1.dw_cfi_offset));
2771 else if (cfi->dw_cfi_opc == DW_CFA_offset)
2773 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2774 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2775 "DW_CFA_offset, column %#lx", r);
2776 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2777 dw2_asm_output_data_uleb128 (off, NULL);
2779 else if (cfi->dw_cfi_opc == DW_CFA_restore)
2781 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2782 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2783 "DW_CFA_restore, column %#lx", r);
2787 dw2_asm_output_data (1, cfi->dw_cfi_opc,
2788 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
2790 switch (cfi->dw_cfi_opc)
2792 case DW_CFA_set_loc:
2794 dw2_asm_output_encoded_addr_rtx (
2795 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
2796 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
2799 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
2800 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
2801 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2804 case DW_CFA_advance_loc1:
2805 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2806 fde->dw_fde_current_label, NULL);
2807 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2810 case DW_CFA_advance_loc2:
2811 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2812 fde->dw_fde_current_label, NULL);
2813 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2816 case DW_CFA_advance_loc4:
2817 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2818 fde->dw_fde_current_label, NULL);
2819 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2822 case DW_CFA_MIPS_advance_loc8:
2823 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2824 fde->dw_fde_current_label, NULL);
2825 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2828 case DW_CFA_offset_extended:
2829 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2830 dw2_asm_output_data_uleb128 (r, NULL);
2831 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2832 dw2_asm_output_data_uleb128 (off, NULL);
2835 case DW_CFA_def_cfa:
2836 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2837 dw2_asm_output_data_uleb128 (r, NULL);
2838 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
2841 case DW_CFA_offset_extended_sf:
2842 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2843 dw2_asm_output_data_uleb128 (r, NULL);
2844 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2845 dw2_asm_output_data_sleb128 (off, NULL);
2848 case DW_CFA_def_cfa_sf:
2849 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2850 dw2_asm_output_data_uleb128 (r, NULL);
2851 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2852 dw2_asm_output_data_sleb128 (off, NULL);
2855 case DW_CFA_restore_extended:
2856 case DW_CFA_undefined:
2857 case DW_CFA_same_value:
2858 case DW_CFA_def_cfa_register:
2859 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2860 dw2_asm_output_data_uleb128 (r, NULL);
2863 case DW_CFA_register:
2864 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2865 dw2_asm_output_data_uleb128 (r, NULL);
2866 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
2867 dw2_asm_output_data_uleb128 (r, NULL);
2870 case DW_CFA_def_cfa_offset:
2871 case DW_CFA_GNU_args_size:
2872 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
2875 case DW_CFA_def_cfa_offset_sf:
2876 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
2877 dw2_asm_output_data_sleb128 (off, NULL);
2880 case DW_CFA_GNU_window_save:
2883 case DW_CFA_def_cfa_expression:
2884 case DW_CFA_expression:
2885 output_cfa_loc (cfi, for_eh);
2888 case DW_CFA_GNU_negative_offset_extended:
2889 /* Obsoleted by DW_CFA_offset_extended_sf. */
2898 /* Similar, but do it via assembler directives instead. */
2901 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
2903 unsigned long r, r2;
2905 switch (cfi->dw_cfi_opc)
2907 case DW_CFA_advance_loc:
2908 case DW_CFA_advance_loc1:
2909 case DW_CFA_advance_loc2:
2910 case DW_CFA_advance_loc4:
2911 case DW_CFA_MIPS_advance_loc8:
2912 case DW_CFA_set_loc:
2913 /* Should only be created in a code path not followed when emitting
2914 via directives. The assembler is going to take care of this for
2915 us. But this routines is also used for debugging dumps, so
2917 gcc_assert (f != asm_out_file);
2918 fprintf (f, "\t.cfi_advance_loc\n");
2922 case DW_CFA_offset_extended:
2923 case DW_CFA_offset_extended_sf:
2924 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2925 fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
2926 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
2929 case DW_CFA_restore:
2930 case DW_CFA_restore_extended:
2931 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2932 fprintf (f, "\t.cfi_restore %lu\n", r);
2935 case DW_CFA_undefined:
2936 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2937 fprintf (f, "\t.cfi_undefined %lu\n", r);
2940 case DW_CFA_same_value:
2941 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2942 fprintf (f, "\t.cfi_same_value %lu\n", r);
2945 case DW_CFA_def_cfa:
2946 case DW_CFA_def_cfa_sf:
2947 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2948 fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
2949 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
2952 case DW_CFA_def_cfa_register:
2953 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2954 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
2957 case DW_CFA_register:
2958 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2959 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
2960 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
2963 case DW_CFA_def_cfa_offset:
2964 case DW_CFA_def_cfa_offset_sf:
2965 fprintf (f, "\t.cfi_def_cfa_offset "
2966 HOST_WIDE_INT_PRINT_DEC"\n",
2967 cfi->dw_cfi_oprnd1.dw_cfi_offset);
2970 case DW_CFA_remember_state:
2971 fprintf (f, "\t.cfi_remember_state\n");
2973 case DW_CFA_restore_state:
2974 fprintf (f, "\t.cfi_restore_state\n");
2977 case DW_CFA_GNU_args_size:
2978 if (f == asm_out_file)
2980 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
2981 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
2983 fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
2984 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
2989 fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
2990 cfi->dw_cfi_oprnd1.dw_cfi_offset);
2994 case DW_CFA_GNU_window_save:
2995 fprintf (f, "\t.cfi_window_save\n");
2998 case DW_CFA_def_cfa_expression:
2999 if (f != asm_out_file)
3001 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3005 case DW_CFA_expression:
3006 if (f != asm_out_file)
3008 fprintf (f, "\t.cfi_cfa_expression ...\n");
3011 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3012 output_cfa_loc_raw (cfi);
3022 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3024 if (dwarf2out_do_cfi_asm ())
3025 output_cfi_directive (asm_out_file, cfi);
3028 /* Output CFIs from VEC, up to index UPTO, to bring current FDE to the
3029 same state as after executing CFIs in CFI chain. DO_CFI_ASM is
3030 true if .cfi_* directives shall be emitted, false otherwise. If it
3031 is false, FDE and FOR_EH are the other arguments to pass to
3035 output_cfis (cfi_vec vec, int upto, bool do_cfi_asm,
3036 dw_fde_ref fde, bool for_eh)
3039 struct dw_cfi_struct cfi_buf;
3041 dw_cfi_ref cfi_args_size = NULL, cfi_cfa = NULL, cfi_cfa_offset = NULL;
3042 VEC(dw_cfi_ref, heap) *regs = VEC_alloc (dw_cfi_ref, heap, 32);
3043 unsigned int len, idx;
3045 for (ix = 0; ix < upto + 1; ix++)
3047 dw_cfi_ref cfi = ix < upto ? VEC_index (dw_cfi_ref, vec, ix) : NULL;
3048 switch (cfi ? cfi->dw_cfi_opc : DW_CFA_nop)
3050 case DW_CFA_advance_loc:
3051 case DW_CFA_advance_loc1:
3052 case DW_CFA_advance_loc2:
3053 case DW_CFA_advance_loc4:
3054 case DW_CFA_MIPS_advance_loc8:
3055 case DW_CFA_set_loc:
3056 /* All advances should be ignored. */
3058 case DW_CFA_remember_state:
3060 dw_cfi_ref args_size = cfi_args_size;
3062 /* Skip everything between .cfi_remember_state and
3063 .cfi_restore_state. */
3068 for (; ix < upto; ix++)
3070 cfi2 = VEC_index (dw_cfi_ref, vec, ix);
3071 if (cfi2->dw_cfi_opc == DW_CFA_restore_state)
3073 else if (cfi2->dw_cfi_opc == DW_CFA_GNU_args_size)
3076 gcc_assert (cfi2->dw_cfi_opc != DW_CFA_remember_state);
3079 cfi_args_size = args_size;
3082 case DW_CFA_GNU_args_size:
3083 cfi_args_size = cfi;
3085 case DW_CFA_GNU_window_save:
3088 case DW_CFA_offset_extended:
3089 case DW_CFA_offset_extended_sf:
3090 case DW_CFA_restore:
3091 case DW_CFA_restore_extended:
3092 case DW_CFA_undefined:
3093 case DW_CFA_same_value:
3094 case DW_CFA_register:
3095 case DW_CFA_val_offset:
3096 case DW_CFA_val_offset_sf:
3097 case DW_CFA_expression:
3098 case DW_CFA_val_expression:
3099 case DW_CFA_GNU_negative_offset_extended:
3100 if (VEC_length (dw_cfi_ref, regs)
3101 <= cfi->dw_cfi_oprnd1.dw_cfi_reg_num)
3102 VEC_safe_grow_cleared (dw_cfi_ref, heap, regs,
3103 cfi->dw_cfi_oprnd1.dw_cfi_reg_num + 1);
3104 VEC_replace (dw_cfi_ref, regs, cfi->dw_cfi_oprnd1.dw_cfi_reg_num,
3107 case DW_CFA_def_cfa:
3108 case DW_CFA_def_cfa_sf:
3109 case DW_CFA_def_cfa_expression:
3111 cfi_cfa_offset = cfi;
3113 case DW_CFA_def_cfa_register:
3116 case DW_CFA_def_cfa_offset:
3117 case DW_CFA_def_cfa_offset_sf:
3118 cfi_cfa_offset = cfi;
3121 gcc_assert (cfi == NULL);
3123 len = VEC_length (dw_cfi_ref, regs);
3124 for (idx = 0; idx < len; idx++)
3126 cfi2 = VEC_replace (dw_cfi_ref, regs, idx, NULL);
3128 && cfi2->dw_cfi_opc != DW_CFA_restore
3129 && cfi2->dw_cfi_opc != DW_CFA_restore_extended)
3132 output_cfi_directive (asm_out_file, cfi2);
3134 output_cfi (cfi2, fde, for_eh);
3137 if (cfi_cfa && cfi_cfa_offset && cfi_cfa_offset != cfi_cfa)
3139 gcc_assert (cfi_cfa->dw_cfi_opc != DW_CFA_def_cfa_expression);
3141 switch (cfi_cfa_offset->dw_cfi_opc)
3143 case DW_CFA_def_cfa_offset:
3144 cfi_buf.dw_cfi_opc = DW_CFA_def_cfa;
3145 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd1;
3147 case DW_CFA_def_cfa_offset_sf:
3148 cfi_buf.dw_cfi_opc = DW_CFA_def_cfa_sf;
3149 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd1;
3151 case DW_CFA_def_cfa:
3152 case DW_CFA_def_cfa_sf:
3153 cfi_buf.dw_cfi_opc = cfi_cfa_offset->dw_cfi_opc;
3154 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd2;
3161 else if (cfi_cfa_offset)
3162 cfi_cfa = cfi_cfa_offset;
3166 output_cfi_directive (asm_out_file, cfi_cfa);
3168 output_cfi (cfi_cfa, fde, for_eh);
3171 cfi_cfa_offset = NULL;
3173 && cfi_args_size->dw_cfi_oprnd1.dw_cfi_offset)
3176 output_cfi_directive (asm_out_file, cfi_args_size);
3178 output_cfi (cfi_args_size, fde, for_eh);
3180 cfi_args_size = NULL;
3183 VEC_free (dw_cfi_ref, heap, regs);
3186 else if (do_cfi_asm)
3187 output_cfi_directive (asm_out_file, cfi);
3189 output_cfi (cfi, fde, for_eh);
3198 /* Save the result of dwarf2out_do_frame across PCH.
3199 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3200 static GTY(()) signed char saved_do_cfi_asm = 0;
3202 /* Decide whether we want to emit frame unwind information for the current
3203 translation unit. */
3206 dwarf2out_do_frame (void)
3208 /* We want to emit correct CFA location expressions or lists, so we
3209 have to return true if we're going to output debug info, even if
3210 we're not going to output frame or unwind info. */
3211 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3214 if (saved_do_cfi_asm > 0)
3217 if (targetm.debug_unwind_info () == UI_DWARF2)
3220 if ((flag_unwind_tables || flag_exceptions)
3221 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3227 /* Decide whether to emit frame unwind via assembler directives. */
3230 dwarf2out_do_cfi_asm (void)
3234 #ifdef MIPS_DEBUGGING_INFO
3238 if (saved_do_cfi_asm != 0)
3239 return saved_do_cfi_asm > 0;
3241 /* Assume failure for a moment. */
3242 saved_do_cfi_asm = -1;
3244 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3246 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3249 /* Make sure the personality encoding is one the assembler can support.
3250 In particular, aligned addresses can't be handled. */
3251 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3252 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3254 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3255 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3258 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3259 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3260 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3261 && !flag_unwind_tables && !flag_exceptions
3262 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3266 saved_do_cfi_asm = 1;
3271 gate_dwarf2_frame (void)
3273 #ifndef HAVE_prologue
3274 /* Targets which still implement the prologue in assembler text
3275 cannot use the generic dwarf2 unwinding. */
3279 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3280 from the optimized shrink-wrapping annotations that we will compute.
3281 For now, only produce the CFI notes for dwarf2. */
3282 return dwarf2out_do_frame ();
3285 struct rtl_opt_pass pass_dwarf2_frame =
3289 "dwarf2", /* name */
3290 gate_dwarf2_frame, /* gate */
3291 execute_dwarf2_frame, /* execute */
3294 0, /* static_pass_number */
3295 TV_FINAL, /* tv_id */
3296 0, /* properties_required */
3297 0, /* properties_provided */
3298 0, /* properties_destroyed */
3299 0, /* todo_flags_start */
3300 0 /* todo_flags_finish */
3304 #include "gt-dwarf2cfi.h"