1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
61 /* A vector of call frame insns for the CIE. */
64 static GTY(()) unsigned long dwarf2out_cfi_label_num;
66 /* The insn after which a new CFI note should be emitted. */
69 /* When non-null, add_cfi will add the CFI to this vector. */
70 static cfi_vec *add_cfi_vec;
72 /* True if remember_state should be emitted before following CFI directive. */
73 static bool emit_cfa_remember;
75 /* True if any CFI directives were emitted at the current insn. */
76 static bool any_cfis_emitted;
79 static void dwarf2out_cfi_begin_epilogue (rtx insn);
80 static void dwarf2out_frame_debug_restore_state (void);
83 /* Hook used by __throw. */
86 expand_builtin_dwarf_sp_column (void)
88 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
89 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
92 /* MEM is a memory reference for the register size table, each element of
93 which has mode MODE. Initialize column C as a return address column. */
96 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
98 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
99 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
100 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
103 /* Generate code to initialize the register size table. */
106 expand_builtin_init_dwarf_reg_sizes (tree address)
109 enum machine_mode mode = TYPE_MODE (char_type_node);
110 rtx addr = expand_normal (address);
111 rtx mem = gen_rtx_MEM (BLKmode, addr);
112 bool wrote_return_column = false;
114 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
116 int rnum = DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), 1);
118 if (rnum < DWARF_FRAME_REGISTERS)
120 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
121 enum machine_mode save_mode = reg_raw_mode[i];
124 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
125 save_mode = choose_hard_reg_mode (i, 1, true);
126 if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN)
128 if (save_mode == VOIDmode)
130 wrote_return_column = true;
132 size = GET_MODE_SIZE (save_mode);
136 emit_move_insn (adjust_address (mem, mode, offset),
137 gen_int_mode (size, mode));
141 if (!wrote_return_column)
142 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
144 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
145 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
148 targetm.init_dwarf_reg_sizes_extra (address);
151 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
153 static inline HOST_WIDE_INT
154 div_data_align (HOST_WIDE_INT off)
156 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
157 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
161 /* Return true if we need a signed version of a given opcode
162 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
165 need_data_align_sf_opcode (HOST_WIDE_INT off)
167 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
170 /* Return a pointer to a newly allocated Call Frame Instruction. */
172 static inline dw_cfi_ref
175 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
177 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
178 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
183 /* Generate a new label for the CFI info to refer to. */
186 dwarf2out_cfi_label (void)
188 int num = dwarf2out_cfi_label_num++;
191 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
193 return xstrdup (label);
196 /* Add CFI either to the current insn stream or to a vector, or both. */
199 add_cfi (dw_cfi_ref cfi)
201 if (emit_cfa_remember)
203 dw_cfi_ref cfi_remember;
205 /* Emit the state save. */
206 emit_cfa_remember = false;
207 cfi_remember = new_cfi ();
208 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
209 add_cfi (cfi_remember);
212 any_cfis_emitted = true;
213 if (cfi_insn != NULL)
215 cfi_insn = emit_note_after (NOTE_INSN_CFI, cfi_insn);
216 NOTE_CFI (cfi_insn) = cfi;
218 if (add_cfi_vec != NULL)
219 VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
222 /* This function fills in aa dw_cfa_location structure from a dwarf location
223 descriptor sequence. */
226 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
228 struct dw_loc_descr_struct *ptr;
230 cfa->base_offset = 0;
234 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
236 enum dwarf_location_atom op = ptr->dw_loc_opc;
272 cfa->reg = op - DW_OP_reg0;
275 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
309 cfa->reg = op - DW_OP_breg0;
310 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
313 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
314 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
319 case DW_OP_plus_uconst:
320 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
328 /* Find the previous value for the CFA, iteratively. CFI is the opcode
329 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
330 one level of remember/restore state processing. */
333 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
335 switch (cfi->dw_cfi_opc)
337 case DW_CFA_def_cfa_offset:
338 case DW_CFA_def_cfa_offset_sf:
339 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
341 case DW_CFA_def_cfa_register:
342 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
345 case DW_CFA_def_cfa_sf:
346 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
347 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
349 case DW_CFA_def_cfa_expression:
350 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
353 case DW_CFA_remember_state:
354 gcc_assert (!remember->in_use);
356 remember->in_use = 1;
358 case DW_CFA_restore_state:
359 gcc_assert (remember->in_use);
361 remember->in_use = 0;
369 /* The current rule for calculating the DWARF2 canonical frame address. */
370 static dw_cfa_location cfa;
372 /* A copy of the CFA, for comparison purposes. */
373 static dw_cfa_location old_cfa;
375 /* The register used for saving registers to the stack, and its offset
377 static dw_cfa_location cfa_store;
379 /* The current save location around an epilogue. */
380 static dw_cfa_location cfa_remember;
382 /* Like cfa_remember, but a copy of old_cfa. */
383 static dw_cfa_location old_cfa_remember;
385 /* The running total of the size of arguments pushed onto the stack. */
386 static HOST_WIDE_INT args_size;
388 /* The last args_size we actually output. */
389 static HOST_WIDE_INT old_args_size;
391 /* Determine if two dw_cfa_location structures define the same data. */
394 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
396 return (loc1->reg == loc2->reg
397 && loc1->offset == loc2->offset
398 && loc1->indirect == loc2->indirect
399 && (loc1->indirect == 0
400 || loc1->base_offset == loc2->base_offset));
403 /* This routine does the actual work. The CFA is now calculated from
404 the dw_cfa_location structure. */
407 def_cfa_1 (dw_cfa_location *loc_p)
415 if (cfa_store.reg == loc.reg && loc.indirect == 0)
416 cfa_store.offset = loc.offset;
418 loc.reg = DWARF_FRAME_REGNUM (loc.reg);
420 /* If nothing changed, no need to issue any call frame instructions. */
421 if (cfa_equal_p (&loc, &old_cfa))
426 if (loc.reg == old_cfa.reg && !loc.indirect && !old_cfa.indirect)
428 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
429 the CFA register did not change but the offset did. The data
430 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
431 in the assembler via the .cfi_def_cfa_offset directive. */
433 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
435 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
436 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
439 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
440 else if (loc.offset == old_cfa.offset
441 && old_cfa.reg != INVALID_REGNUM
443 && !old_cfa.indirect)
445 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
446 indicating the CFA register has changed to <register> but the
447 offset has not changed. */
448 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
449 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
453 else if (loc.indirect == 0)
455 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
456 indicating the CFA register has changed to <register> with
457 the specified offset. The data factoring for DW_CFA_def_cfa_sf
458 happens in output_cfi, or in the assembler via the .cfi_def_cfa
461 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
463 cfi->dw_cfi_opc = DW_CFA_def_cfa;
464 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
465 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
469 /* Construct a DW_CFA_def_cfa_expression instruction to
470 calculate the CFA using a full location expression since no
471 register-offset pair is available. */
472 struct dw_loc_descr_struct *loc_list;
474 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
475 loc_list = build_cfa_loc (&loc, 0);
476 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
483 /* Add the CFI for saving a register. REG is the CFA column number.
484 If SREG is -1, the register is saved at OFFSET from the CFA;
485 otherwise it is saved in SREG. */
488 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
490 dw_fde_ref fde = cfun ? cfun->fde : NULL;
491 dw_cfi_ref cfi = new_cfi ();
493 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
495 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
497 && fde->stack_realign
498 && sreg == INVALID_REGNUM)
500 cfi->dw_cfi_opc = DW_CFA_expression;
501 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
502 cfi->dw_cfi_oprnd2.dw_cfi_loc
503 = build_cfa_aligned_loc (&cfa, offset, fde->stack_realignment);
505 else if (sreg == INVALID_REGNUM)
507 if (need_data_align_sf_opcode (offset))
508 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
509 else if (reg & ~0x3f)
510 cfi->dw_cfi_opc = DW_CFA_offset_extended;
512 cfi->dw_cfi_opc = DW_CFA_offset;
513 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
515 else if (sreg == reg)
516 cfi->dw_cfi_opc = DW_CFA_same_value;
519 cfi->dw_cfi_opc = DW_CFA_register;
520 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
526 /* Given a SET, calculate the amount of stack adjustment it
530 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
531 HOST_WIDE_INT cur_offset)
533 const_rtx src = SET_SRC (pattern);
534 const_rtx dest = SET_DEST (pattern);
535 HOST_WIDE_INT offset = 0;
538 if (dest == stack_pointer_rtx)
540 code = GET_CODE (src);
542 /* Assume (set (reg sp) (reg whatever)) sets args_size
544 if (code == REG && src != stack_pointer_rtx)
546 offset = -cur_args_size;
547 #ifndef STACK_GROWS_DOWNWARD
550 return offset - cur_offset;
553 if (! (code == PLUS || code == MINUS)
554 || XEXP (src, 0) != stack_pointer_rtx
555 || !CONST_INT_P (XEXP (src, 1)))
558 /* (set (reg sp) (plus (reg sp) (const_int))) */
559 offset = INTVAL (XEXP (src, 1));
565 if (MEM_P (src) && !MEM_P (dest))
569 /* (set (mem (pre_dec (reg sp))) (foo)) */
570 src = XEXP (dest, 0);
571 code = GET_CODE (src);
577 if (XEXP (src, 0) == stack_pointer_rtx)
579 rtx val = XEXP (XEXP (src, 1), 1);
580 /* We handle only adjustments by constant amount. */
581 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
582 && CONST_INT_P (val));
583 offset = -INTVAL (val);
590 if (XEXP (src, 0) == stack_pointer_rtx)
592 offset = GET_MODE_SIZE (GET_MODE (dest));
599 if (XEXP (src, 0) == stack_pointer_rtx)
601 offset = -GET_MODE_SIZE (GET_MODE (dest));
616 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
617 indexed by INSN_UID. */
619 static HOST_WIDE_INT *barrier_args_size;
621 /* Helper function for compute_barrier_args_size. Handle one insn. */
624 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
625 VEC (rtx, heap) **next)
627 HOST_WIDE_INT offset = 0;
630 if (! RTX_FRAME_RELATED_P (insn))
632 if (prologue_epilogue_contains (insn))
634 else if (GET_CODE (PATTERN (insn)) == SET)
635 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
636 else if (GET_CODE (PATTERN (insn)) == PARALLEL
637 || GET_CODE (PATTERN (insn)) == SEQUENCE)
639 /* There may be stack adjustments inside compound insns. Search
641 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
642 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
643 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
644 cur_args_size, offset);
649 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
653 expr = XEXP (expr, 0);
654 if (GET_CODE (expr) == PARALLEL
655 || GET_CODE (expr) == SEQUENCE)
656 for (i = 1; i < XVECLEN (expr, 0); i++)
658 rtx elem = XVECEXP (expr, 0, i);
660 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
661 offset += stack_adjust_offset (elem, cur_args_size, offset);
666 #ifndef STACK_GROWS_DOWNWARD
670 cur_args_size += offset;
671 if (cur_args_size < 0)
676 rtx dest = JUMP_LABEL (insn);
680 if (barrier_args_size [INSN_UID (dest)] < 0)
682 barrier_args_size [INSN_UID (dest)] = cur_args_size;
683 VEC_safe_push (rtx, heap, *next, dest);
688 return cur_args_size;
691 /* Walk the whole function and compute args_size on BARRIERs. */
694 compute_barrier_args_size (void)
696 int max_uid = get_max_uid (), i;
698 VEC (rtx, heap) *worklist, *next, *tmp;
700 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
701 for (i = 0; i < max_uid; i++)
702 barrier_args_size[i] = -1;
704 worklist = VEC_alloc (rtx, heap, 20);
705 next = VEC_alloc (rtx, heap, 20);
707 barrier_args_size[INSN_UID (insn)] = 0;
708 VEC_quick_push (rtx, worklist, insn);
711 while (!VEC_empty (rtx, worklist))
713 rtx prev, body, first_insn;
714 HOST_WIDE_INT cur_args_size;
716 first_insn = insn = VEC_pop (rtx, worklist);
717 cur_args_size = barrier_args_size[INSN_UID (insn)];
718 prev = prev_nonnote_insn (insn);
719 if (prev && BARRIER_P (prev))
720 barrier_args_size[INSN_UID (prev)] = cur_args_size;
722 for (; insn; insn = NEXT_INSN (insn))
724 if (INSN_DELETED_P (insn) || NOTE_P (insn))
726 if (BARRIER_P (insn))
731 if (insn == first_insn)
733 else if (barrier_args_size[INSN_UID (insn)] < 0)
735 barrier_args_size[INSN_UID (insn)] = cur_args_size;
740 /* The insns starting with this label have been
741 already scanned or are in the worklist. */
746 body = PATTERN (insn);
747 if (GET_CODE (body) == SEQUENCE)
749 HOST_WIDE_INT dest_args_size = cur_args_size;
750 for (i = 1; i < XVECLEN (body, 0); i++)
751 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
752 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
754 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
755 dest_args_size, &next);
758 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
759 cur_args_size, &next);
761 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
762 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
763 dest_args_size, &next);
766 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
767 cur_args_size, &next);
771 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
775 if (VEC_empty (rtx, next))
778 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
782 VEC_truncate (rtx, next, 0);
785 VEC_free (rtx, heap, worklist);
786 VEC_free (rtx, heap, next);
789 /* Add a CFI to update the running total of the size of arguments
790 pushed onto the stack. */
793 dwarf2out_args_size (HOST_WIDE_INT size)
797 if (size == old_args_size)
800 old_args_size = size;
803 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
804 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
808 /* Record a stack adjustment of OFFSET bytes. */
811 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
813 if (cfa.reg == STACK_POINTER_REGNUM)
814 cfa.offset += offset;
816 if (cfa_store.reg == STACK_POINTER_REGNUM)
817 cfa_store.offset += offset;
819 if (ACCUMULATE_OUTGOING_ARGS)
822 #ifndef STACK_GROWS_DOWNWARD
831 if (flag_asynchronous_unwind_tables)
832 dwarf2out_args_size (args_size);
835 /* Check INSN to see if it looks like a push or a stack adjustment, and
836 make a note of it if it does. EH uses this information to find out
837 how much extra space it needs to pop off the stack. */
840 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
842 HOST_WIDE_INT offset;
845 /* Don't handle epilogues at all. Certainly it would be wrong to do so
846 with this function. Proper support would require all frame-related
847 insns to be marked, and to be able to handle saving state around
848 epilogues textually in the middle of the function. */
849 if (prologue_epilogue_contains (insn))
852 /* If INSN is an instruction from target of an annulled branch, the
853 effects are for the target only and so current argument size
854 shouldn't change at all. */
856 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
857 && INSN_FROM_TARGET_P (insn))
860 /* If only calls can throw, and we have a frame pointer,
861 save up adjustments until we see the CALL_INSN. */
862 if (!flag_asynchronous_unwind_tables && cfa.reg != STACK_POINTER_REGNUM)
864 if (CALL_P (insn) && !after_p)
866 /* Extract the size of the args from the CALL rtx itself. */
867 insn = PATTERN (insn);
868 if (GET_CODE (insn) == PARALLEL)
869 insn = XVECEXP (insn, 0, 0);
870 if (GET_CODE (insn) == SET)
871 insn = SET_SRC (insn);
872 gcc_assert (GET_CODE (insn) == CALL);
873 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
878 if (CALL_P (insn) && !after_p)
880 if (!flag_asynchronous_unwind_tables)
881 dwarf2out_args_size (args_size);
884 else if (BARRIER_P (insn))
886 /* Don't call compute_barrier_args_size () if the only
887 BARRIER is at the end of function. */
888 if (barrier_args_size == NULL && next_nonnote_insn (insn))
889 compute_barrier_args_size ();
890 if (barrier_args_size == NULL)
894 offset = barrier_args_size[INSN_UID (insn)];
900 #ifndef STACK_GROWS_DOWNWARD
904 else if (GET_CODE (PATTERN (insn)) == SET)
905 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
906 else if (GET_CODE (PATTERN (insn)) == PARALLEL
907 || GET_CODE (PATTERN (insn)) == SEQUENCE)
909 /* There may be stack adjustments inside compound insns. Search
911 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
912 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
913 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
922 dwarf2out_stack_adjust (offset);
925 /* We delay emitting a register save until either (a) we reach the end
926 of the prologue or (b) the register is clobbered. This clusters
927 register saves so that there are fewer pc advances. */
929 struct GTY(()) queued_reg_save {
930 struct queued_reg_save *next;
932 HOST_WIDE_INT cfa_offset;
936 static GTY(()) struct queued_reg_save *queued_reg_saves;
938 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
939 typedef struct GTY(()) reg_saved_in_data {
944 DEF_VEC_O (reg_saved_in_data);
945 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
947 /* A set of registers saved in other registers. This is implemented as
948 a flat array because it normally contains zero or 1 entry, depending
949 on the target. IA-64 is the big spender here, using a maximum of
951 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
953 static GTY(()) reg_saved_in_data *cie_return_save;
955 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
958 compare_reg_or_pc (rtx x, rtx y)
960 if (REG_P (x) && REG_P (y))
961 return REGNO (x) == REGNO (y);
965 /* Record SRC as being saved in DEST. DEST may be null to delete an
966 existing entry. SRC may be a register or PC_RTX. */
969 record_reg_saved_in_reg (rtx dest, rtx src)
971 reg_saved_in_data *elt;
974 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
975 if (compare_reg_or_pc (elt->orig_reg, src))
978 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
980 elt->saved_in_reg = dest;
987 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
989 elt->saved_in_reg = dest;
992 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
993 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
996 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
998 struct queued_reg_save *q;
1000 /* Duplicates waste space, but it's also necessary to remove them
1001 for correctness, since the queue gets output in reverse order. */
1002 for (q = queued_reg_saves; q != NULL; q = q->next)
1003 if (compare_reg_or_pc (q->reg, reg))
1008 q = ggc_alloc_queued_reg_save ();
1009 q->next = queued_reg_saves;
1010 queued_reg_saves = q;
1014 q->cfa_offset = offset;
1015 q->saved_reg = sreg;
1018 /* Output all the entries in QUEUED_REG_SAVES. */
1021 dwarf2out_flush_queued_reg_saves (void)
1023 struct queued_reg_save *q;
1025 for (q = queued_reg_saves; q; q = q->next)
1027 unsigned int reg, sreg;
1029 record_reg_saved_in_reg (q->saved_reg, q->reg);
1031 if (q->reg == pc_rtx)
1032 reg = DWARF_FRAME_RETURN_COLUMN;
1034 reg = DWARF_FRAME_REGNUM (REGNO (q->reg));
1036 sreg = DWARF_FRAME_REGNUM (REGNO (q->saved_reg));
1038 sreg = INVALID_REGNUM;
1039 reg_save (reg, sreg, q->cfa_offset);
1042 queued_reg_saves = NULL;
1045 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1046 location for? Or, does it clobber a register which we've previously
1047 said that some other register is saved in, and for which we now
1048 have a new location for? */
1051 clobbers_queued_reg_save (const_rtx insn)
1053 struct queued_reg_save *q;
1055 for (q = queued_reg_saves; q; q = q->next)
1058 reg_saved_in_data *rir;
1060 if (modified_in_p (q->reg, insn))
1063 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1064 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1065 && modified_in_p (rir->saved_in_reg, insn))
1072 /* What register, if any, is currently saved in REG? */
1075 reg_saved_in (rtx reg)
1077 unsigned int regn = REGNO (reg);
1078 struct queued_reg_save *q;
1079 reg_saved_in_data *rir;
1082 for (q = queued_reg_saves; q; q = q->next)
1083 if (q->saved_reg && regn == REGNO (q->saved_reg))
1086 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1087 if (regn == REGNO (rir->saved_in_reg))
1088 return rir->orig_reg;
1094 /* A temporary register holding an integral value used in adjusting SP
1095 or setting up the store_reg. The "offset" field holds the integer
1096 value, not an offset. */
1097 static dw_cfa_location cfa_temp;
1099 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1102 dwarf2out_frame_debug_def_cfa (rtx pat)
1104 memset (&cfa, 0, sizeof (cfa));
1106 switch (GET_CODE (pat))
1109 cfa.reg = REGNO (XEXP (pat, 0));
1110 cfa.offset = INTVAL (XEXP (pat, 1));
1114 cfa.reg = REGNO (pat);
1119 pat = XEXP (pat, 0);
1120 if (GET_CODE (pat) == PLUS)
1122 cfa.base_offset = INTVAL (XEXP (pat, 1));
1123 pat = XEXP (pat, 0);
1125 cfa.reg = REGNO (pat);
1129 /* Recurse and define an expression. */
1136 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1139 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1143 gcc_assert (GET_CODE (pat) == SET);
1144 dest = XEXP (pat, 0);
1145 src = XEXP (pat, 1);
1147 switch (GET_CODE (src))
1150 gcc_assert (REGNO (XEXP (src, 0)) == cfa.reg);
1151 cfa.offset -= INTVAL (XEXP (src, 1));
1161 cfa.reg = REGNO (dest);
1162 gcc_assert (cfa.indirect == 0);
1167 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1170 dwarf2out_frame_debug_cfa_offset (rtx set)
1172 HOST_WIDE_INT offset;
1173 rtx src, addr, span;
1174 unsigned int sregno;
1176 src = XEXP (set, 1);
1177 addr = XEXP (set, 0);
1178 gcc_assert (MEM_P (addr));
1179 addr = XEXP (addr, 0);
1181 /* As documented, only consider extremely simple addresses. */
1182 switch (GET_CODE (addr))
1185 gcc_assert (REGNO (addr) == cfa.reg);
1186 offset = -cfa.offset;
1189 gcc_assert (REGNO (XEXP (addr, 0)) == cfa.reg);
1190 offset = INTVAL (XEXP (addr, 1)) - cfa.offset;
1199 sregno = DWARF_FRAME_RETURN_COLUMN;
1203 span = targetm.dwarf_register_span (src);
1204 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1207 /* ??? We'd like to use queue_reg_save, but we need to come up with
1208 a different flushing heuristic for epilogues. */
1210 reg_save (sregno, INVALID_REGNUM, offset);
1213 /* We have a PARALLEL describing where the contents of SRC live.
1214 Queue register saves for each piece of the PARALLEL. */
1217 HOST_WIDE_INT span_offset = offset;
1219 gcc_assert (GET_CODE (span) == PARALLEL);
1221 limit = XVECLEN (span, 0);
1222 for (par_index = 0; par_index < limit; par_index++)
1224 rtx elem = XVECEXP (span, 0, par_index);
1226 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1227 reg_save (sregno, INVALID_REGNUM, span_offset);
1228 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1233 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1236 dwarf2out_frame_debug_cfa_register (rtx set)
1239 unsigned sregno, dregno;
1241 src = XEXP (set, 1);
1242 dest = XEXP (set, 0);
1244 record_reg_saved_in_reg (dest, src);
1246 sregno = DWARF_FRAME_RETURN_COLUMN;
1248 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1250 dregno = DWARF_FRAME_REGNUM (REGNO (dest));
1252 /* ??? We'd like to use queue_reg_save, but we need to come up with
1253 a different flushing heuristic for epilogues. */
1254 reg_save (sregno, dregno, 0);
1257 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1260 dwarf2out_frame_debug_cfa_expression (rtx set)
1262 rtx src, dest, span;
1263 dw_cfi_ref cfi = new_cfi ();
1265 dest = SET_DEST (set);
1266 src = SET_SRC (set);
1268 gcc_assert (REG_P (src));
1269 gcc_assert (MEM_P (dest));
1271 span = targetm.dwarf_register_span (src);
1274 cfi->dw_cfi_opc = DW_CFA_expression;
1275 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = DWARF_FRAME_REGNUM (REGNO (src));
1276 cfi->dw_cfi_oprnd2.dw_cfi_loc
1277 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1278 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1280 /* ??? We'd like to use queue_reg_save, were the interface different,
1281 and, as above, we could manage flushing for epilogues. */
1285 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1288 dwarf2out_frame_debug_cfa_restore (rtx reg)
1290 dw_cfi_ref cfi = new_cfi ();
1291 unsigned int regno = DWARF_FRAME_REGNUM (REGNO (reg));
1293 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1294 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1299 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1300 ??? Perhaps we should note in the CIE where windows are saved (instead of
1301 assuming 0(cfa)) and what registers are in the window. */
1304 dwarf2out_frame_debug_cfa_window_save (void)
1306 dw_cfi_ref cfi = new_cfi ();
1308 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1312 /* Record call frame debugging information for an expression EXPR,
1313 which either sets SP or FP (adjusting how we calculate the frame
1314 address) or saves a register to the stack or another register.
1315 LABEL indicates the address of EXPR.
1317 This function encodes a state machine mapping rtxes to actions on
1318 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1319 users need not read the source code.
1321 The High-Level Picture
1323 Changes in the register we use to calculate the CFA: Currently we
1324 assume that if you copy the CFA register into another register, we
1325 should take the other one as the new CFA register; this seems to
1326 work pretty well. If it's wrong for some target, it's simple
1327 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1329 Changes in the register we use for saving registers to the stack:
1330 This is usually SP, but not always. Again, we deduce that if you
1331 copy SP into another register (and SP is not the CFA register),
1332 then the new register is the one we will be using for register
1333 saves. This also seems to work.
1335 Register saves: There's not much guesswork about this one; if
1336 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1337 register save, and the register used to calculate the destination
1338 had better be the one we think we're using for this purpose.
1339 It's also assumed that a copy from a call-saved register to another
1340 register is saving that register if RTX_FRAME_RELATED_P is set on
1341 that instruction. If the copy is from a call-saved register to
1342 the *same* register, that means that the register is now the same
1343 value as in the caller.
1345 Except: If the register being saved is the CFA register, and the
1346 offset is nonzero, we are saving the CFA, so we assume we have to
1347 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1348 the intent is to save the value of SP from the previous frame.
1350 In addition, if a register has previously been saved to a different
1353 Invariants / Summaries of Rules
1355 cfa current rule for calculating the CFA. It usually
1356 consists of a register and an offset.
1357 cfa_store register used by prologue code to save things to the stack
1358 cfa_store.offset is the offset from the value of
1359 cfa_store.reg to the actual CFA
1360 cfa_temp register holding an integral value. cfa_temp.offset
1361 stores the value, which will be used to adjust the
1362 stack pointer. cfa_temp is also used like cfa_store,
1363 to track stores to the stack via fp or a temp reg.
1365 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1366 with cfa.reg as the first operand changes the cfa.reg and its
1367 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1370 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1371 expression yielding a constant. This sets cfa_temp.reg
1372 and cfa_temp.offset.
1374 Rule 5: Create a new register cfa_store used to save items to the
1377 Rules 10-14: Save a register to the stack. Define offset as the
1378 difference of the original location and cfa_store's
1379 location (or cfa_temp's location if cfa_temp is used).
1381 Rules 16-20: If AND operation happens on sp in prologue, we assume
1382 stack is realigned. We will use a group of DW_OP_XXX
1383 expressions to represent the location of the stored
1384 register instead of CFA+offset.
1388 "{a,b}" indicates a choice of a xor b.
1389 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1392 (set <reg1> <reg2>:cfa.reg)
1393 effects: cfa.reg = <reg1>
1394 cfa.offset unchanged
1395 cfa_temp.reg = <reg1>
1396 cfa_temp.offset = cfa.offset
1399 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1400 {<const_int>,<reg>:cfa_temp.reg}))
1401 effects: cfa.reg = sp if fp used
1402 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1403 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1404 if cfa_store.reg==sp
1407 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1408 effects: cfa.reg = fp
1409 cfa_offset += +/- <const_int>
1412 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1413 constraints: <reg1> != fp
1415 effects: cfa.reg = <reg1>
1416 cfa_temp.reg = <reg1>
1417 cfa_temp.offset = cfa.offset
1420 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1421 constraints: <reg1> != fp
1423 effects: cfa_store.reg = <reg1>
1424 cfa_store.offset = cfa.offset - cfa_temp.offset
1427 (set <reg> <const_int>)
1428 effects: cfa_temp.reg = <reg>
1429 cfa_temp.offset = <const_int>
1432 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1433 effects: cfa_temp.reg = <reg1>
1434 cfa_temp.offset |= <const_int>
1437 (set <reg> (high <exp>))
1441 (set <reg> (lo_sum <exp> <const_int>))
1442 effects: cfa_temp.reg = <reg>
1443 cfa_temp.offset = <const_int>
1446 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1447 effects: cfa_store.offset -= <const_int>
1448 cfa.offset = cfa_store.offset if cfa.reg == sp
1450 cfa.base_offset = -cfa_store.offset
1453 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1454 effects: cfa_store.offset += -/+ mode_size(mem)
1455 cfa.offset = cfa_store.offset if cfa.reg == sp
1457 cfa.base_offset = -cfa_store.offset
1460 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1463 effects: cfa.reg = <reg1>
1464 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1467 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1468 effects: cfa.reg = <reg1>
1469 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1472 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1473 effects: cfa.reg = <reg1>
1474 cfa.base_offset = -cfa_temp.offset
1475 cfa_temp.offset -= mode_size(mem)
1478 (set <reg> {unspec, unspec_volatile})
1479 effects: target-dependent
1482 (set sp (and: sp <const_int>))
1483 constraints: cfa_store.reg == sp
1484 effects: cfun->fde.stack_realign = 1
1485 cfa_store.offset = 0
1486 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1489 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1490 effects: cfa_store.offset += -/+ mode_size(mem)
1493 (set (mem ({pre_inc, pre_dec} sp)) fp)
1494 constraints: fde->stack_realign == 1
1495 effects: cfa_store.offset = 0
1496 cfa.reg != HARD_FRAME_POINTER_REGNUM
1499 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1500 constraints: fde->stack_realign == 1
1502 && cfa.indirect == 0
1503 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1504 effects: Use DW_CFA_def_cfa_expression to define cfa
1505 cfa.reg == fde->drap_reg */
1508 dwarf2out_frame_debug_expr (rtx expr)
1510 rtx src, dest, span;
1511 HOST_WIDE_INT offset;
1514 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1515 the PARALLEL independently. The first element is always processed if
1516 it is a SET. This is for backward compatibility. Other elements
1517 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1518 flag is set in them. */
1519 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1522 int limit = XVECLEN (expr, 0);
1525 /* PARALLELs have strict read-modify-write semantics, so we
1526 ought to evaluate every rvalue before changing any lvalue.
1527 It's cumbersome to do that in general, but there's an
1528 easy approximation that is enough for all current users:
1529 handle register saves before register assignments. */
1530 if (GET_CODE (expr) == PARALLEL)
1531 for (par_index = 0; par_index < limit; par_index++)
1533 elem = XVECEXP (expr, 0, par_index);
1534 if (GET_CODE (elem) == SET
1535 && MEM_P (SET_DEST (elem))
1536 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1537 dwarf2out_frame_debug_expr (elem);
1540 for (par_index = 0; par_index < limit; par_index++)
1542 elem = XVECEXP (expr, 0, par_index);
1543 if (GET_CODE (elem) == SET
1544 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1545 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1546 dwarf2out_frame_debug_expr (elem);
1547 else if (GET_CODE (elem) == SET
1549 && !RTX_FRAME_RELATED_P (elem))
1551 /* Stack adjustment combining might combine some post-prologue
1552 stack adjustment into a prologue stack adjustment. */
1553 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1556 dwarf2out_stack_adjust (offset);
1562 gcc_assert (GET_CODE (expr) == SET);
1564 src = SET_SRC (expr);
1565 dest = SET_DEST (expr);
1569 rtx rsi = reg_saved_in (src);
1576 switch (GET_CODE (dest))
1579 switch (GET_CODE (src))
1581 /* Setting FP from SP. */
1583 if (cfa.reg == (unsigned) REGNO (src))
1586 /* Update the CFA rule wrt SP or FP. Make sure src is
1587 relative to the current CFA register.
1589 We used to require that dest be either SP or FP, but the
1590 ARM copies SP to a temporary register, and from there to
1591 FP. So we just rely on the backends to only set
1592 RTX_FRAME_RELATED_P on appropriate insns. */
1593 cfa.reg = REGNO (dest);
1594 cfa_temp.reg = cfa.reg;
1595 cfa_temp.offset = cfa.offset;
1599 /* Saving a register in a register. */
1600 gcc_assert (!fixed_regs [REGNO (dest)]
1601 /* For the SPARC and its register window. */
1602 || (DWARF_FRAME_REGNUM (REGNO (src))
1603 == DWARF_FRAME_RETURN_COLUMN));
1605 /* After stack is aligned, we can only save SP in FP
1606 if drap register is used. In this case, we have
1607 to restore stack pointer with the CFA value and we
1608 don't generate this DWARF information. */
1610 && fde->stack_realign
1611 && REGNO (src) == STACK_POINTER_REGNUM)
1612 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1613 && fde->drap_reg != INVALID_REGNUM
1614 && cfa.reg != REGNO (src));
1616 queue_reg_save (src, dest, 0);
1623 if (dest == stack_pointer_rtx)
1627 switch (GET_CODE (XEXP (src, 1)))
1630 offset = INTVAL (XEXP (src, 1));
1633 gcc_assert ((unsigned) REGNO (XEXP (src, 1))
1635 offset = cfa_temp.offset;
1641 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1643 /* Restoring SP from FP in the epilogue. */
1644 gcc_assert (cfa.reg == (unsigned) HARD_FRAME_POINTER_REGNUM);
1645 cfa.reg = STACK_POINTER_REGNUM;
1647 else if (GET_CODE (src) == LO_SUM)
1648 /* Assume we've set the source reg of the LO_SUM from sp. */
1651 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1653 if (GET_CODE (src) != MINUS)
1655 if (cfa.reg == STACK_POINTER_REGNUM)
1656 cfa.offset += offset;
1657 if (cfa_store.reg == STACK_POINTER_REGNUM)
1658 cfa_store.offset += offset;
1660 else if (dest == hard_frame_pointer_rtx)
1663 /* Either setting the FP from an offset of the SP,
1664 or adjusting the FP */
1665 gcc_assert (frame_pointer_needed);
1667 gcc_assert (REG_P (XEXP (src, 0))
1668 && (unsigned) REGNO (XEXP (src, 0)) == cfa.reg
1669 && CONST_INT_P (XEXP (src, 1)));
1670 offset = INTVAL (XEXP (src, 1));
1671 if (GET_CODE (src) != MINUS)
1673 cfa.offset += offset;
1674 cfa.reg = HARD_FRAME_POINTER_REGNUM;
1678 gcc_assert (GET_CODE (src) != MINUS);
1681 if (REG_P (XEXP (src, 0))
1682 && REGNO (XEXP (src, 0)) == cfa.reg
1683 && CONST_INT_P (XEXP (src, 1)))
1685 /* Setting a temporary CFA register that will be copied
1686 into the FP later on. */
1687 offset = - INTVAL (XEXP (src, 1));
1688 cfa.offset += offset;
1689 cfa.reg = REGNO (dest);
1690 /* Or used to save regs to the stack. */
1691 cfa_temp.reg = cfa.reg;
1692 cfa_temp.offset = cfa.offset;
1696 else if (REG_P (XEXP (src, 0))
1697 && REGNO (XEXP (src, 0)) == cfa_temp.reg
1698 && XEXP (src, 1) == stack_pointer_rtx)
1700 /* Setting a scratch register that we will use instead
1701 of SP for saving registers to the stack. */
1702 gcc_assert (cfa.reg == STACK_POINTER_REGNUM);
1703 cfa_store.reg = REGNO (dest);
1704 cfa_store.offset = cfa.offset - cfa_temp.offset;
1708 else if (GET_CODE (src) == LO_SUM
1709 && CONST_INT_P (XEXP (src, 1)))
1711 cfa_temp.reg = REGNO (dest);
1712 cfa_temp.offset = INTVAL (XEXP (src, 1));
1721 cfa_temp.reg = REGNO (dest);
1722 cfa_temp.offset = INTVAL (src);
1727 gcc_assert (REG_P (XEXP (src, 0))
1728 && (unsigned) REGNO (XEXP (src, 0)) == cfa_temp.reg
1729 && CONST_INT_P (XEXP (src, 1)));
1731 if ((unsigned) REGNO (dest) != cfa_temp.reg)
1732 cfa_temp.reg = REGNO (dest);
1733 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1736 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1737 which will fill in all of the bits. */
1744 case UNSPEC_VOLATILE:
1745 /* All unspecs should be represented by REG_CFA_* notes. */
1751 /* If this AND operation happens on stack pointer in prologue,
1752 we assume the stack is realigned and we extract the
1754 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1756 /* We interpret reg_save differently with stack_realign set.
1757 Thus we must flush whatever we have queued first. */
1758 dwarf2out_flush_queued_reg_saves ();
1760 gcc_assert (cfa_store.reg == REGNO (XEXP (src, 0)));
1761 fde->stack_realign = 1;
1762 fde->stack_realignment = INTVAL (XEXP (src, 1));
1763 cfa_store.offset = 0;
1765 if (cfa.reg != STACK_POINTER_REGNUM
1766 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
1767 fde->drap_reg = cfa.reg;
1780 /* Saving a register to the stack. Make sure dest is relative to the
1782 switch (GET_CODE (XEXP (dest, 0)))
1788 /* We can't handle variable size modifications. */
1789 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1791 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1793 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1794 && cfa_store.reg == STACK_POINTER_REGNUM);
1796 cfa_store.offset += offset;
1797 if (cfa.reg == STACK_POINTER_REGNUM)
1798 cfa.offset = cfa_store.offset;
1800 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1801 offset -= cfa_store.offset;
1803 offset = -cfa_store.offset;
1810 offset = GET_MODE_SIZE (GET_MODE (dest));
1811 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1814 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1815 == STACK_POINTER_REGNUM)
1816 && cfa_store.reg == STACK_POINTER_REGNUM);
1818 cfa_store.offset += offset;
1820 /* Rule 18: If stack is aligned, we will use FP as a
1821 reference to represent the address of the stored
1824 && fde->stack_realign
1825 && src == hard_frame_pointer_rtx)
1827 gcc_assert (cfa.reg != HARD_FRAME_POINTER_REGNUM);
1828 cfa_store.offset = 0;
1831 if (cfa.reg == STACK_POINTER_REGNUM)
1832 cfa.offset = cfa_store.offset;
1834 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1835 offset += -cfa_store.offset;
1837 offset = -cfa_store.offset;
1841 /* With an offset. */
1848 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1849 && REG_P (XEXP (XEXP (dest, 0), 0)));
1850 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1851 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1854 regno = REGNO (XEXP (XEXP (dest, 0), 0));
1856 if (cfa.reg == (unsigned) regno)
1857 offset -= cfa.offset;
1858 else if (cfa_store.reg == (unsigned) regno)
1859 offset -= cfa_store.offset;
1862 gcc_assert (cfa_temp.reg == (unsigned) regno);
1863 offset -= cfa_temp.offset;
1869 /* Without an offset. */
1872 int regno = REGNO (XEXP (dest, 0));
1874 if (cfa.reg == (unsigned) regno)
1875 offset = -cfa.offset;
1876 else if (cfa_store.reg == (unsigned) regno)
1877 offset = -cfa_store.offset;
1880 gcc_assert (cfa_temp.reg == (unsigned) regno);
1881 offset = -cfa_temp.offset;
1888 gcc_assert (cfa_temp.reg
1889 == (unsigned) REGNO (XEXP (XEXP (dest, 0), 0)));
1890 offset = -cfa_temp.offset;
1891 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1899 /* If the source operand of this MEM operation is a memory,
1900 we only care how much stack grew. */
1905 && REGNO (src) != STACK_POINTER_REGNUM
1906 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1907 && (unsigned) REGNO (src) == cfa.reg)
1909 /* We're storing the current CFA reg into the stack. */
1911 if (cfa.offset == 0)
1914 /* If stack is aligned, putting CFA reg into stack means
1915 we can no longer use reg + offset to represent CFA.
1916 Here we use DW_CFA_def_cfa_expression instead. The
1917 result of this expression equals to the original CFA
1920 && fde->stack_realign
1921 && cfa.indirect == 0
1922 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
1924 dw_cfa_location cfa_exp;
1926 gcc_assert (fde->drap_reg == cfa.reg);
1928 cfa_exp.indirect = 1;
1929 cfa_exp.reg = HARD_FRAME_POINTER_REGNUM;
1930 cfa_exp.base_offset = offset;
1933 fde->drap_reg_saved = 1;
1935 def_cfa_1 (&cfa_exp);
1939 /* If the source register is exactly the CFA, assume
1940 we're saving SP like any other register; this happens
1943 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1948 /* Otherwise, we'll need to look in the stack to
1949 calculate the CFA. */
1950 rtx x = XEXP (dest, 0);
1954 gcc_assert (REG_P (x));
1956 cfa.reg = REGNO (x);
1957 cfa.base_offset = offset;
1968 span = targetm.dwarf_register_span (src);
1970 queue_reg_save (src, NULL_RTX, offset);
1973 /* We have a PARALLEL describing where the contents of SRC live.
1974 Queue register saves for each piece of the PARALLEL. */
1977 HOST_WIDE_INT span_offset = offset;
1979 gcc_assert (GET_CODE (span) == PARALLEL);
1981 limit = XVECLEN (span, 0);
1982 for (par_index = 0; par_index < limit; par_index++)
1984 rtx elem = XVECEXP (span, 0, par_index);
1985 queue_reg_save (elem, NULL_RTX, span_offset);
1986 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1996 /* Record call frame debugging information for INSN, which either
1997 sets SP or FP (adjusting how we calculate the frame address) or saves a
1998 register to the stack. If INSN is NULL_RTX, initialize our state.
2000 If AFTER_P is false, we're being called before the insn is emitted,
2001 otherwise after. Call instructions get invoked twice. */
2004 dwarf2out_frame_debug (rtx insn, bool after_p)
2007 bool handled_one = false;
2008 bool need_flush = false;
2010 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2011 dwarf2out_flush_queued_reg_saves ();
2013 if (!RTX_FRAME_RELATED_P (insn))
2015 /* ??? This should be done unconditionally since stack adjustments
2016 matter if the stack pointer is not the CFA register anymore but
2017 is still used to save registers. */
2018 if (!ACCUMULATE_OUTGOING_ARGS)
2019 dwarf2out_notice_stack_adjust (insn, after_p);
2023 any_cfis_emitted = false;
2025 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2026 switch (REG_NOTE_KIND (note))
2028 case REG_FRAME_RELATED_EXPR:
2029 insn = XEXP (note, 0);
2032 case REG_CFA_DEF_CFA:
2033 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2037 case REG_CFA_ADJUST_CFA:
2042 if (GET_CODE (n) == PARALLEL)
2043 n = XVECEXP (n, 0, 0);
2045 dwarf2out_frame_debug_adjust_cfa (n);
2049 case REG_CFA_OFFSET:
2052 n = single_set (insn);
2053 dwarf2out_frame_debug_cfa_offset (n);
2057 case REG_CFA_REGISTER:
2062 if (GET_CODE (n) == PARALLEL)
2063 n = XVECEXP (n, 0, 0);
2065 dwarf2out_frame_debug_cfa_register (n);
2069 case REG_CFA_EXPRESSION:
2072 n = single_set (insn);
2073 dwarf2out_frame_debug_cfa_expression (n);
2077 case REG_CFA_RESTORE:
2082 if (GET_CODE (n) == PARALLEL)
2083 n = XVECEXP (n, 0, 0);
2086 dwarf2out_frame_debug_cfa_restore (n);
2090 case REG_CFA_SET_VDRAP:
2094 dw_fde_ref fde = cfun->fde;
2097 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2099 fde->vdrap_reg = REGNO (n);
2105 case REG_CFA_WINDOW_SAVE:
2106 dwarf2out_frame_debug_cfa_window_save ();
2110 case REG_CFA_FLUSH_QUEUE:
2111 /* The actual flush happens below. */
2122 /* Minimize the number of advances by emitting the entire queue
2123 once anything is emitted. */
2124 need_flush |= any_cfis_emitted;
2128 insn = PATTERN (insn);
2130 dwarf2out_frame_debug_expr (insn);
2132 /* Check again. A parallel can save and update the same register.
2133 We could probably check just once, here, but this is safer than
2134 removing the check at the start of the function. */
2135 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2140 dwarf2out_flush_queued_reg_saves ();
2143 /* Examine CFI and return true if a cfi label and set_loc is needed
2144 beforehand. Even when generating CFI assembler instructions, we
2145 still have to add the cfi to the list so that lookup_cfa_1 works
2146 later on. When -g2 and above we even need to force emitting of
2147 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2148 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2149 and so don't use convert_cfa_to_fb_loc_list. */
2152 cfi_label_required_p (dw_cfi_ref cfi)
2154 if (!dwarf2out_do_cfi_asm ())
2157 if (dwarf_version == 2
2158 && debug_info_level > DINFO_LEVEL_TERSE
2159 && (write_symbols == DWARF2_DEBUG
2160 || write_symbols == VMS_AND_DWARF2_DEBUG))
2162 switch (cfi->dw_cfi_opc)
2164 case DW_CFA_def_cfa_offset:
2165 case DW_CFA_def_cfa_offset_sf:
2166 case DW_CFA_def_cfa_register:
2167 case DW_CFA_def_cfa:
2168 case DW_CFA_def_cfa_sf:
2169 case DW_CFA_def_cfa_expression:
2170 case DW_CFA_restore_state:
2179 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2180 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2183 add_cfis_to_fde (void)
2185 dw_fde_ref fde = cfun->fde;
2187 /* We always start with a function_begin label. */
2190 for (insn = get_insns (); insn; insn = next)
2192 next = NEXT_INSN (insn);
2194 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2196 /* Don't attempt to advance_loc4 between labels
2197 in different sections. */
2201 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2203 bool required = cfi_label_required_p (NOTE_CFI (insn));
2204 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2206 required |= cfi_label_required_p (NOTE_CFI (next));
2207 next = NEXT_INSN (next);
2211 int num = dwarf2out_cfi_label_num;
2212 const char *label = dwarf2out_cfi_label ();
2216 /* Set the location counter to the new label. */
2218 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2219 : DW_CFA_advance_loc4);
2220 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2221 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2223 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2224 NOTE_LABEL_NUMBER (tmp) = num;
2229 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2230 insn = NEXT_INSN (insn);
2232 while (insn != next);
2238 /* Scan the function and create the initial set of CFI notes. */
2241 create_cfi_notes (void)
2245 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2249 cfi_insn = PREV_INSN (insn);
2251 if (BARRIER_P (insn))
2253 dwarf2out_frame_debug (insn, false);
2259 switch (NOTE_KIND (insn))
2261 case NOTE_INSN_PROLOGUE_END:
2262 dwarf2out_flush_queued_reg_saves ();
2265 case NOTE_INSN_EPILOGUE_BEG:
2266 #if defined(HAVE_epilogue)
2267 dwarf2out_cfi_begin_epilogue (insn);
2271 case NOTE_INSN_CFA_RESTORE_STATE:
2273 dwarf2out_frame_debug_restore_state ();
2279 if (!NONDEBUG_INSN_P (insn))
2282 pat = PATTERN (insn);
2283 if (asm_noperands (pat) >= 0)
2285 dwarf2out_frame_debug (insn, false);
2289 if (GET_CODE (pat) == SEQUENCE)
2291 int i, n = XVECLEN (pat, 0);
2292 for (i = 1; i < n; ++i)
2293 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2297 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2298 dwarf2out_frame_debug (insn, false);
2300 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2301 Putting the note after the VEC should be ok. */
2302 if (!tablejump_p (insn, NULL, &cfi_insn))
2305 dwarf2out_frame_debug (insn, true);
2311 /* Determine if we need to save and restore CFI information around this
2312 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2313 we do need to save/restore, then emit the save now, and insert a
2314 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2317 dwarf2out_cfi_begin_epilogue (rtx insn)
2319 bool saw_frp = false;
2322 /* Scan forward to the return insn, noticing if there are possible
2323 frame related insns. */
2324 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2329 /* Look for both regular and sibcalls to end the block. */
2330 if (returnjump_p (i))
2332 if (CALL_P (i) && SIBLING_CALL_P (i))
2335 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2338 rtx seq = PATTERN (i);
2340 if (returnjump_p (XVECEXP (seq, 0, 0)))
2342 if (CALL_P (XVECEXP (seq, 0, 0))
2343 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2346 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2347 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2351 if (RTX_FRAME_RELATED_P (i))
2355 /* If the port doesn't emit epilogue unwind info, we don't need a
2356 save/restore pair. */
2360 /* Otherwise, search forward to see if the return insn was the last
2361 basic block of the function. If so, we don't need save/restore. */
2362 gcc_assert (i != NULL);
2363 i = next_real_insn (i);
2367 /* Insert the restore before that next real insn in the stream, and before
2368 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2369 properly nested. This should be after any label or alignment. This
2370 will be pushed into the CFI stream by the function below. */
2373 rtx p = PREV_INSN (i);
2376 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2380 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2382 emit_cfa_remember = true;
2384 /* And emulate the state save. */
2385 gcc_assert (!cfa_remember.in_use);
2387 old_cfa_remember = old_cfa;
2388 cfa_remember.in_use = 1;
2391 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2395 dwarf2out_frame_debug_restore_state (void)
2397 dw_cfi_ref cfi = new_cfi ();
2399 cfi->dw_cfi_opc = DW_CFA_restore_state;
2402 gcc_assert (cfa_remember.in_use);
2404 old_cfa = old_cfa_remember;
2405 cfa_remember.in_use = 0;
2408 /* Record the initial position of the return address. RTL is
2409 INCOMING_RETURN_ADDR_RTX. */
2412 initial_return_save (rtx rtl)
2414 unsigned int reg = INVALID_REGNUM;
2415 HOST_WIDE_INT offset = 0;
2417 switch (GET_CODE (rtl))
2420 /* RA is in a register. */
2421 reg = DWARF_FRAME_REGNUM (REGNO (rtl));
2425 /* RA is on the stack. */
2426 rtl = XEXP (rtl, 0);
2427 switch (GET_CODE (rtl))
2430 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2435 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2436 offset = INTVAL (XEXP (rtl, 1));
2440 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2441 offset = -INTVAL (XEXP (rtl, 1));
2451 /* The return address is at some offset from any value we can
2452 actually load. For instance, on the SPARC it is in %i7+8. Just
2453 ignore the offset for now; it doesn't matter for unwinding frames. */
2454 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2455 initial_return_save (XEXP (rtl, 0));
2462 if (reg != DWARF_FRAME_RETURN_COLUMN)
2464 if (reg != INVALID_REGNUM)
2465 record_reg_saved_in_reg (rtl, pc_rtx);
2466 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cfa.offset);
2470 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2471 state at each location within the function. These notes will be
2472 emitted during pass_final. */
2475 execute_dwarf2_frame (void)
2477 /* The first time we're called, compute the incoming frame state. */
2478 if (cie_cfi_vec == NULL)
2480 dw_cfa_location loc;
2482 add_cfi_vec = &cie_cfi_vec;
2484 memset(&old_cfa, 0, sizeof (old_cfa));
2485 old_cfa.reg = INVALID_REGNUM;
2487 /* On entry, the Canonical Frame Address is at SP. */
2488 memset(&loc, 0, sizeof (loc));
2489 loc.reg = STACK_POINTER_REGNUM;
2490 loc.offset = INCOMING_FRAME_SP_OFFSET;
2493 if (targetm.debug_unwind_info () == UI_DWARF2
2494 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2496 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2498 /* For a few targets, we have the return address incoming into a
2499 register, but choose a different return column. This will result
2500 in a DW_CFA_register for the return, and an entry in
2501 regs_saved_in_regs to match. If the target later stores that
2502 return address register to the stack, we want to be able to emit
2503 the DW_CFA_offset against the return column, not the intermediate
2504 save register. Save the contents of regs_saved_in_regs so that
2505 we can re-initialize it at the start of each function. */
2506 switch (VEC_length (reg_saved_in_data, regs_saved_in_regs))
2511 cie_return_save = ggc_alloc_reg_saved_in_data ();
2512 *cie_return_save = *VEC_index (reg_saved_in_data,
2513 regs_saved_in_regs, 0);
2514 regs_saved_in_regs = NULL;
2524 /* Set up state for generating call frame debug info. */
2525 gcc_checking_assert (queued_reg_saves == NULL);
2526 gcc_checking_assert (regs_saved_in_regs == NULL);
2528 memset (&cfa, 0, sizeof(cfa));
2529 cfa.reg = STACK_POINTER_REGNUM;
2530 cfa.offset = INCOMING_FRAME_SP_OFFSET;
2535 memset (&cfa_temp, 0, sizeof(cfa_temp));
2536 cfa_temp.reg = INVALID_REGNUM;
2538 if (cie_return_save)
2539 VEC_safe_push (reg_saved_in_data, gc, regs_saved_in_regs, cie_return_save);
2541 dwarf2out_alloc_current_fde ();
2544 create_cfi_notes ();
2547 /* Reset all function-specific information, particularly for GC. */
2548 XDELETEVEC (barrier_args_size);
2549 barrier_args_size = NULL;
2550 regs_saved_in_regs = NULL;
2551 queued_reg_saves = NULL;
2557 /* Save the result of dwarf2out_do_frame across PCH.
2558 This variable is tri-state, with 0 unset, >0 true, <0 false. */
2559 static GTY(()) signed char saved_do_cfi_asm = 0;
2561 /* Decide whether we want to emit frame unwind information for the current
2562 translation unit. */
2565 dwarf2out_do_frame (void)
2567 /* We want to emit correct CFA location expressions or lists, so we
2568 have to return true if we're going to output debug info, even if
2569 we're not going to output frame or unwind info. */
2570 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
2573 if (saved_do_cfi_asm > 0)
2576 if (targetm.debug_unwind_info () == UI_DWARF2)
2579 if ((flag_unwind_tables || flag_exceptions)
2580 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2586 /* Decide whether to emit frame unwind via assembler directives. */
2589 dwarf2out_do_cfi_asm (void)
2593 #ifdef MIPS_DEBUGGING_INFO
2597 if (saved_do_cfi_asm != 0)
2598 return saved_do_cfi_asm > 0;
2600 /* Assume failure for a moment. */
2601 saved_do_cfi_asm = -1;
2603 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
2605 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
2608 /* Make sure the personality encoding is one the assembler can support.
2609 In particular, aligned addresses can't be handled. */
2610 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
2611 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2613 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
2614 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2617 /* If we can't get the assembler to emit only .debug_frame, and we don't need
2618 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
2619 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
2620 && !flag_unwind_tables && !flag_exceptions
2621 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
2625 saved_do_cfi_asm = 1;
2630 gate_dwarf2_frame (void)
2632 #ifndef HAVE_prologue
2633 /* Targets which still implement the prologue in assembler text
2634 cannot use the generic dwarf2 unwinding. */
2638 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
2639 from the optimized shrink-wrapping annotations that we will compute.
2640 For now, only produce the CFI notes for dwarf2. */
2641 return dwarf2out_do_frame ();
2644 struct rtl_opt_pass pass_dwarf2_frame =
2648 "dwarf2", /* name */
2649 gate_dwarf2_frame, /* gate */
2650 execute_dwarf2_frame, /* execute */
2653 0, /* static_pass_number */
2654 TV_FINAL, /* tv_id */
2655 0, /* properties_required */
2656 0, /* properties_provided */
2657 0, /* properties_destroyed */
2658 0, /* todo_flags_start */
2659 0 /* todo_flags_finish */
2663 #include "gt-dwarf2cfi.h"