1 /* Output Dwarf2 format symbol table information from GCC.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4 Contributed by Gary Funck (gary@intrepid.com).
5 Derived from DWARF 1 implementation of Ron Guilmette (rfg@monkeys.com).
6 Extensively modified by Jason Merrill (jason@cygnus.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 /* TODO: Emit .debug_line header even when there are no functions, since
25 the file numbers are used by .debug_info. Alternately, leave
26 out locations for types and decls.
27 Avoid talking about ctors and op= for PODs.
28 Factor out common prologue sequences into multiple CIEs. */
30 /* The first part of this file deals with the DWARF 2 frame unwind
31 information, which is also used by the GCC efficient exception handling
32 mechanism. The second part, controlled only by an #ifdef
33 DWARF2_DEBUGGING_INFO, deals with the other DWARF 2 debugging
36 /* DWARF2 Abbreviation Glossary:
38 CFA = Canonical Frame Address
39 a fixed address on the stack which identifies a call frame.
40 We define it to be the value of SP just before the call insn.
41 The CFA register and offset, which may change during the course
42 of the function, are used to calculate its value at runtime.
44 CFI = Call Frame Instruction
45 an instruction for the DWARF2 abstract machine
47 CIE = Common Information Entry
48 information describing information common to one or more FDEs
50 DIE = Debugging Information Entry
52 FDE = Frame Description Entry
53 information describing the stack call frame, in particular,
54 how to restore registers
56 DW_CFA_... = DWARF2 CFA call frame instruction
57 DW_TAG_... = DWARF2 DIE tag */
61 #include "coretypes.h"
68 #include "hard-reg-set.h"
70 #include "insn-config.h"
78 #include "dwarf2out.h"
79 #include "dwarf2asm.h"
85 #include "diagnostic.h"
88 #include "langhooks.h"
93 #ifdef DWARF2_DEBUGGING_INFO
94 static void dwarf2out_source_line (unsigned int, const char *, int);
97 #ifndef DWARF2_FRAME_INFO
98 # ifdef DWARF2_DEBUGGING_INFO
99 # define DWARF2_FRAME_INFO \
100 (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
102 # define DWARF2_FRAME_INFO 0
106 /* Map register numbers held in the call frame info that gcc has
107 collected using DWARF_FRAME_REGNUM to those that should be output in
108 .debug_frame and .eh_frame. */
109 #ifndef DWARF2_FRAME_REG_OUT
110 #define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) (REGNO)
113 /* Save the result of dwarf2out_do_frame across PCH. */
114 static GTY(()) bool saved_do_cfi_asm = 0;
116 /* Decide whether we want to emit frame unwind information for the current
120 dwarf2out_do_frame (void)
122 /* We want to emit correct CFA location expressions or lists, so we
123 have to return true if we're going to output debug info, even if
124 we're not going to output frame or unwind info. */
125 return (write_symbols == DWARF2_DEBUG
126 || write_symbols == VMS_AND_DWARF2_DEBUG
127 || DWARF2_FRAME_INFO || saved_do_cfi_asm
128 #ifdef DWARF2_UNWIND_INFO
129 || (DWARF2_UNWIND_INFO
130 && (flag_unwind_tables
131 || (flag_exceptions && ! USING_SJLJ_EXCEPTIONS)))
136 /* Decide whether to emit frame unwind via assembler directives. */
139 dwarf2out_do_cfi_asm (void)
143 #ifdef MIPS_DEBUGGING_INFO
146 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
148 if (saved_do_cfi_asm || !eh_personality_libfunc)
150 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
153 /* Make sure the personality encoding is one the assembler can support.
154 In particular, aligned addresses can't be handled. */
155 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
156 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
158 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
159 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
162 saved_do_cfi_asm = true;
166 /* The size of the target's pointer type. */
168 #define PTR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
171 /* Array of RTXes referenced by the debugging information, which therefore
172 must be kept around forever. */
173 static GTY(()) VEC(rtx,gc) *used_rtx_array;
175 /* A pointer to the base of a list of incomplete types which might be
176 completed at some later time. incomplete_types_list needs to be a
177 VEC(tree,gc) because we want to tell the garbage collector about
179 static GTY(()) VEC(tree,gc) *incomplete_types;
181 /* A pointer to the base of a table of references to declaration
182 scopes. This table is a display which tracks the nesting
183 of declaration scopes at the current scope and containing
184 scopes. This table is used to find the proper place to
185 define type declaration DIE's. */
186 static GTY(()) VEC(tree,gc) *decl_scope_table;
188 /* Pointers to various DWARF2 sections. */
189 static GTY(()) section *debug_info_section;
190 static GTY(()) section *debug_abbrev_section;
191 static GTY(()) section *debug_aranges_section;
192 static GTY(()) section *debug_macinfo_section;
193 static GTY(()) section *debug_line_section;
194 static GTY(()) section *debug_loc_section;
195 static GTY(()) section *debug_pubnames_section;
196 static GTY(()) section *debug_pubtypes_section;
197 static GTY(()) section *debug_str_section;
198 static GTY(()) section *debug_ranges_section;
199 static GTY(()) section *debug_frame_section;
201 /* How to start an assembler comment. */
202 #ifndef ASM_COMMENT_START
203 #define ASM_COMMENT_START ";#"
206 typedef struct dw_cfi_struct *dw_cfi_ref;
207 typedef struct dw_fde_struct *dw_fde_ref;
208 typedef union dw_cfi_oprnd_struct *dw_cfi_oprnd_ref;
210 /* Call frames are described using a sequence of Call Frame
211 Information instructions. The register number, offset
212 and address fields are provided as possible operands;
213 their use is selected by the opcode field. */
215 enum dw_cfi_oprnd_type {
217 dw_cfi_oprnd_reg_num,
223 typedef union GTY(()) dw_cfi_oprnd_struct {
224 unsigned int GTY ((tag ("dw_cfi_oprnd_reg_num"))) dw_cfi_reg_num;
225 HOST_WIDE_INT GTY ((tag ("dw_cfi_oprnd_offset"))) dw_cfi_offset;
226 const char * GTY ((tag ("dw_cfi_oprnd_addr"))) dw_cfi_addr;
227 struct dw_loc_descr_struct * GTY ((tag ("dw_cfi_oprnd_loc"))) dw_cfi_loc;
231 typedef struct GTY(()) dw_cfi_struct {
232 dw_cfi_ref dw_cfi_next;
233 enum dwarf_call_frame_info dw_cfi_opc;
234 dw_cfi_oprnd GTY ((desc ("dw_cfi_oprnd1_desc (%1.dw_cfi_opc)")))
236 dw_cfi_oprnd GTY ((desc ("dw_cfi_oprnd2_desc (%1.dw_cfi_opc)")))
241 /* This is how we define the location of the CFA. We use to handle it
242 as REG + OFFSET all the time, but now it can be more complex.
243 It can now be either REG + CFA_OFFSET or *(REG + BASE_OFFSET) + CFA_OFFSET.
244 Instead of passing around REG and OFFSET, we pass a copy
245 of this structure. */
246 typedef struct GTY(()) cfa_loc {
247 HOST_WIDE_INT offset;
248 HOST_WIDE_INT base_offset;
250 BOOL_BITFIELD indirect : 1; /* 1 if CFA is accessed via a dereference. */
251 BOOL_BITFIELD in_use : 1; /* 1 if a saved cfa is stored here. */
254 /* All call frame descriptions (FDE's) in the GCC generated DWARF
255 refer to a single Common Information Entry (CIE), defined at
256 the beginning of the .debug_frame section. This use of a single
257 CIE obviates the need to keep track of multiple CIE's
258 in the DWARF generation routines below. */
260 typedef struct GTY(()) dw_fde_struct {
262 const char *dw_fde_begin;
263 const char *dw_fde_current_label;
264 const char *dw_fde_end;
265 const char *dw_fde_hot_section_label;
266 const char *dw_fde_hot_section_end_label;
267 const char *dw_fde_unlikely_section_label;
268 const char *dw_fde_unlikely_section_end_label;
269 bool dw_fde_switched_sections;
270 dw_cfi_ref dw_fde_cfi;
271 unsigned funcdef_number;
272 HOST_WIDE_INT stack_realignment;
273 /* Dynamic realign argument pointer register. */
274 unsigned int drap_reg;
275 /* Virtual dynamic realign argument pointer register. */
276 unsigned int vdrap_reg;
277 unsigned all_throwers_are_sibcalls : 1;
278 unsigned nothrow : 1;
279 unsigned uses_eh_lsda : 1;
280 /* Whether we did stack realign in this call frame. */
281 unsigned stack_realign : 1;
282 /* Whether dynamic realign argument pointer register has been saved. */
283 unsigned drap_reg_saved: 1;
287 /* Maximum size (in bytes) of an artificially generated label. */
288 #define MAX_ARTIFICIAL_LABEL_BYTES 30
290 /* The size of addresses as they appear in the Dwarf 2 data.
291 Some architectures use word addresses to refer to code locations,
292 but Dwarf 2 info always uses byte addresses. On such machines,
293 Dwarf 2 addresses need to be larger than the architecture's
295 #ifndef DWARF2_ADDR_SIZE
296 #define DWARF2_ADDR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
299 /* The size in bytes of a DWARF field indicating an offset or length
300 relative to a debug info section, specified to be 4 bytes in the
301 DWARF-2 specification. The SGI/MIPS ABI defines it to be the same
304 #ifndef DWARF_OFFSET_SIZE
305 #define DWARF_OFFSET_SIZE 4
308 /* According to the (draft) DWARF 3 specification, the initial length
309 should either be 4 or 12 bytes. When it's 12 bytes, the first 4
310 bytes are 0xffffffff, followed by the length stored in the next 8
313 However, the SGI/MIPS ABI uses an initial length which is equal to
314 DWARF_OFFSET_SIZE. It is defined (elsewhere) accordingly. */
316 #ifndef DWARF_INITIAL_LENGTH_SIZE
317 #define DWARF_INITIAL_LENGTH_SIZE (DWARF_OFFSET_SIZE == 4 ? 4 : 12)
320 #define DWARF_VERSION 2
322 /* Round SIZE up to the nearest BOUNDARY. */
323 #define DWARF_ROUND(SIZE,BOUNDARY) \
324 ((((SIZE) + (BOUNDARY) - 1) / (BOUNDARY)) * (BOUNDARY))
326 /* Offsets recorded in opcodes are a multiple of this alignment factor. */
327 #ifndef DWARF_CIE_DATA_ALIGNMENT
328 #ifdef STACK_GROWS_DOWNWARD
329 #define DWARF_CIE_DATA_ALIGNMENT (-((int) UNITS_PER_WORD))
331 #define DWARF_CIE_DATA_ALIGNMENT ((int) UNITS_PER_WORD)
335 /* CIE identifier. */
336 #if HOST_BITS_PER_WIDE_INT >= 64
337 #define DWARF_CIE_ID \
338 (unsigned HOST_WIDE_INT) (DWARF_OFFSET_SIZE == 4 ? DW_CIE_ID : DW64_CIE_ID)
340 #define DWARF_CIE_ID DW_CIE_ID
343 /* A pointer to the base of a table that contains frame description
344 information for each routine. */
345 static GTY((length ("fde_table_allocated"))) dw_fde_ref fde_table;
347 /* Number of elements currently allocated for fde_table. */
348 static GTY(()) unsigned fde_table_allocated;
350 /* Number of elements in fde_table currently in use. */
351 static GTY(()) unsigned fde_table_in_use;
353 /* Size (in elements) of increments by which we may expand the
355 #define FDE_TABLE_INCREMENT 256
357 /* Get the current fde_table entry we should use. */
359 static inline dw_fde_ref
362 return fde_table_in_use ? &fde_table[fde_table_in_use - 1] : NULL;
365 /* A list of call frame insns for the CIE. */
366 static GTY(()) dw_cfi_ref cie_cfi_head;
368 #if defined (DWARF2_DEBUGGING_INFO) || defined (DWARF2_UNWIND_INFO)
369 /* Some DWARF extensions (e.g., MIPS/SGI) implement a subprogram
370 attribute that accelerates the lookup of the FDE associated
371 with the subprogram. This variable holds the table index of the FDE
372 associated with the current function (body) definition. */
373 static unsigned current_funcdef_fde;
376 struct GTY(()) indirect_string_node {
378 unsigned int refcount;
379 enum dwarf_form form;
383 static GTY ((param_is (struct indirect_string_node))) htab_t debug_str_hash;
385 static GTY(()) int dw2_string_counter;
386 static GTY(()) unsigned long dwarf2out_cfi_label_num;
388 /* True if the compilation unit places functions in more than one section. */
389 static GTY(()) bool have_multiple_function_sections = false;
391 /* Whether the default text and cold text sections have been used at all. */
393 static GTY(()) bool text_section_used = false;
394 static GTY(()) bool cold_text_section_used = false;
396 /* The default cold text section. */
397 static GTY(()) section *cold_text_section;
399 #if defined (DWARF2_DEBUGGING_INFO) || defined (DWARF2_UNWIND_INFO)
401 /* Forward declarations for functions defined in this file. */
403 static char *stripattributes (const char *);
404 static const char *dwarf_cfi_name (unsigned);
405 static dw_cfi_ref new_cfi (void);
406 static void add_cfi (dw_cfi_ref *, dw_cfi_ref);
407 static void add_fde_cfi (const char *, dw_cfi_ref);
408 static void lookup_cfa_1 (dw_cfi_ref, dw_cfa_location *, dw_cfa_location *);
409 static void lookup_cfa (dw_cfa_location *);
410 static void reg_save (const char *, unsigned, unsigned, HOST_WIDE_INT);
411 #ifdef DWARF2_UNWIND_INFO
412 static void initial_return_save (rtx);
414 static HOST_WIDE_INT stack_adjust_offset (const_rtx, HOST_WIDE_INT,
416 static void output_cfi (dw_cfi_ref, dw_fde_ref, int);
417 static void output_cfi_directive (dw_cfi_ref);
418 static void output_call_frame_info (int);
419 static void dwarf2out_note_section_used (void);
420 static void dwarf2out_stack_adjust (rtx, bool);
421 static void dwarf2out_args_size_adjust (HOST_WIDE_INT, const char *);
422 static void flush_queued_reg_saves (void);
423 static bool clobbers_queued_reg_save (const_rtx);
424 static void dwarf2out_frame_debug_expr (rtx, const char *);
426 /* Support for complex CFA locations. */
427 static void output_cfa_loc (dw_cfi_ref);
428 static void output_cfa_loc_raw (dw_cfi_ref);
429 static void get_cfa_from_loc_descr (dw_cfa_location *,
430 struct dw_loc_descr_struct *);
431 static struct dw_loc_descr_struct *build_cfa_loc
432 (dw_cfa_location *, HOST_WIDE_INT);
433 static struct dw_loc_descr_struct *build_cfa_aligned_loc
434 (HOST_WIDE_INT, HOST_WIDE_INT);
435 static void def_cfa_1 (const char *, dw_cfa_location *);
437 /* How to start an assembler comment. */
438 #ifndef ASM_COMMENT_START
439 #define ASM_COMMENT_START ";#"
442 /* Data and reference forms for relocatable data. */
443 #define DW_FORM_data (DWARF_OFFSET_SIZE == 8 ? DW_FORM_data8 : DW_FORM_data4)
444 #define DW_FORM_ref (DWARF_OFFSET_SIZE == 8 ? DW_FORM_ref8 : DW_FORM_ref4)
446 #ifndef DEBUG_FRAME_SECTION
447 #define DEBUG_FRAME_SECTION ".debug_frame"
450 #ifndef FUNC_BEGIN_LABEL
451 #define FUNC_BEGIN_LABEL "LFB"
454 #ifndef FUNC_END_LABEL
455 #define FUNC_END_LABEL "LFE"
458 #ifndef FRAME_BEGIN_LABEL
459 #define FRAME_BEGIN_LABEL "Lframe"
461 #define CIE_AFTER_SIZE_LABEL "LSCIE"
462 #define CIE_END_LABEL "LECIE"
463 #define FDE_LABEL "LSFDE"
464 #define FDE_AFTER_SIZE_LABEL "LASFDE"
465 #define FDE_END_LABEL "LEFDE"
466 #define LINE_NUMBER_BEGIN_LABEL "LSLT"
467 #define LINE_NUMBER_END_LABEL "LELT"
468 #define LN_PROLOG_AS_LABEL "LASLTP"
469 #define LN_PROLOG_END_LABEL "LELTP"
470 #define DIE_LABEL_PREFIX "DW"
472 /* The DWARF 2 CFA column which tracks the return address. Normally this
473 is the column for PC, or the first column after all of the hard
475 #ifndef DWARF_FRAME_RETURN_COLUMN
477 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (PC_REGNUM)
479 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGISTERS
483 /* The mapping from gcc register number to DWARF 2 CFA column number. By
484 default, we just provide columns for all registers. */
485 #ifndef DWARF_FRAME_REGNUM
486 #define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG)
489 /* Hook used by __throw. */
492 expand_builtin_dwarf_sp_column (void)
494 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
495 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
498 /* Return a pointer to a copy of the section string name S with all
499 attributes stripped off, and an asterisk prepended (for assemble_name). */
502 stripattributes (const char *s)
504 char *stripped = XNEWVEC (char, strlen (s) + 2);
509 while (*s && *s != ',')
516 /* MEM is a memory reference for the register size table, each element of
517 which has mode MODE. Initialize column C as a return address column. */
520 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
522 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
523 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
524 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
527 /* Generate code to initialize the register size table. */
530 expand_builtin_init_dwarf_reg_sizes (tree address)
533 enum machine_mode mode = TYPE_MODE (char_type_node);
534 rtx addr = expand_normal (address);
535 rtx mem = gen_rtx_MEM (BLKmode, addr);
536 bool wrote_return_column = false;
538 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
540 int rnum = DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), 1);
542 if (rnum < DWARF_FRAME_REGISTERS)
544 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
545 enum machine_mode save_mode = reg_raw_mode[i];
548 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
549 save_mode = choose_hard_reg_mode (i, 1, true);
550 if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN)
552 if (save_mode == VOIDmode)
554 wrote_return_column = true;
556 size = GET_MODE_SIZE (save_mode);
560 emit_move_insn (adjust_address (mem, mode, offset),
561 gen_int_mode (size, mode));
565 if (!wrote_return_column)
566 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
568 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
569 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
572 targetm.init_dwarf_reg_sizes_extra (address);
575 /* Convert a DWARF call frame info. operation to its string name */
578 dwarf_cfi_name (unsigned int cfi_opc)
582 case DW_CFA_advance_loc:
583 return "DW_CFA_advance_loc";
585 return "DW_CFA_offset";
587 return "DW_CFA_restore";
591 return "DW_CFA_set_loc";
592 case DW_CFA_advance_loc1:
593 return "DW_CFA_advance_loc1";
594 case DW_CFA_advance_loc2:
595 return "DW_CFA_advance_loc2";
596 case DW_CFA_advance_loc4:
597 return "DW_CFA_advance_loc4";
598 case DW_CFA_offset_extended:
599 return "DW_CFA_offset_extended";
600 case DW_CFA_restore_extended:
601 return "DW_CFA_restore_extended";
602 case DW_CFA_undefined:
603 return "DW_CFA_undefined";
604 case DW_CFA_same_value:
605 return "DW_CFA_same_value";
606 case DW_CFA_register:
607 return "DW_CFA_register";
608 case DW_CFA_remember_state:
609 return "DW_CFA_remember_state";
610 case DW_CFA_restore_state:
611 return "DW_CFA_restore_state";
613 return "DW_CFA_def_cfa";
614 case DW_CFA_def_cfa_register:
615 return "DW_CFA_def_cfa_register";
616 case DW_CFA_def_cfa_offset:
617 return "DW_CFA_def_cfa_offset";
620 case DW_CFA_def_cfa_expression:
621 return "DW_CFA_def_cfa_expression";
622 case DW_CFA_expression:
623 return "DW_CFA_expression";
624 case DW_CFA_offset_extended_sf:
625 return "DW_CFA_offset_extended_sf";
626 case DW_CFA_def_cfa_sf:
627 return "DW_CFA_def_cfa_sf";
628 case DW_CFA_def_cfa_offset_sf:
629 return "DW_CFA_def_cfa_offset_sf";
631 /* SGI/MIPS specific */
632 case DW_CFA_MIPS_advance_loc8:
633 return "DW_CFA_MIPS_advance_loc8";
636 case DW_CFA_GNU_window_save:
637 return "DW_CFA_GNU_window_save";
638 case DW_CFA_GNU_args_size:
639 return "DW_CFA_GNU_args_size";
640 case DW_CFA_GNU_negative_offset_extended:
641 return "DW_CFA_GNU_negative_offset_extended";
644 return "DW_CFA_<unknown>";
648 /* Return a pointer to a newly allocated Call Frame Instruction. */
650 static inline dw_cfi_ref
653 dw_cfi_ref cfi = GGC_NEW (dw_cfi_node);
655 cfi->dw_cfi_next = NULL;
656 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
657 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
662 /* Add a Call Frame Instruction to list of instructions. */
665 add_cfi (dw_cfi_ref *list_head, dw_cfi_ref cfi)
668 dw_fde_ref fde = current_fde ();
670 /* When DRAP is used, CFA is defined with an expression. Redefine
671 CFA may lead to a different CFA value. */
672 /* ??? Of course, this heuristic fails when we're annotating epilogues,
673 because of course we'll always want to redefine the CFA back to the
674 stack pointer on the way out. Where should we move this check? */
675 if (0 && fde && fde->drap_reg != INVALID_REGNUM)
676 switch (cfi->dw_cfi_opc)
678 case DW_CFA_def_cfa_register:
679 case DW_CFA_def_cfa_offset:
680 case DW_CFA_def_cfa_offset_sf:
682 case DW_CFA_def_cfa_sf:
689 /* Find the end of the chain. */
690 for (p = list_head; (*p) != NULL; p = &(*p)->dw_cfi_next)
696 /* Generate a new label for the CFI info to refer to. FORCE is true
697 if a label needs to be output even when using .cfi_* directives. */
700 dwarf2out_cfi_label (bool force)
702 static char label[20];
704 if (!force && dwarf2out_do_cfi_asm ())
706 /* In this case, we will be emitting the asm directive instead of
707 the label, so just return a placeholder to keep the rest of the
709 strcpy (label, "<do not output>");
713 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", dwarf2out_cfi_label_num++);
714 ASM_OUTPUT_LABEL (asm_out_file, label);
720 /* Add CFI to the current fde at the PC value indicated by LABEL if specified,
721 or to the CIE if LABEL is NULL. */
724 add_fde_cfi (const char *label, dw_cfi_ref cfi)
726 dw_cfi_ref *list_head = &cie_cfi_head;
728 if (dwarf2out_do_cfi_asm ())
732 dw_fde_ref fde = current_fde ();
734 gcc_assert (fde != NULL);
736 /* We still have to add the cfi to the list so that
737 lookup_cfa works later on. When -g2 and above we
738 even need to force emitting of CFI labels and
739 add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
741 switch (cfi->dw_cfi_opc)
743 case DW_CFA_def_cfa_offset:
744 case DW_CFA_def_cfa_offset_sf:
745 case DW_CFA_def_cfa_register:
747 case DW_CFA_def_cfa_sf:
748 case DW_CFA_def_cfa_expression:
749 case DW_CFA_restore_state:
750 if (write_symbols != DWARF2_DEBUG
751 && write_symbols != VMS_AND_DWARF2_DEBUG)
753 if (debug_info_level <= DINFO_LEVEL_TERSE)
756 if (*label == 0 || strcmp (label, "<do not output>") == 0)
757 label = dwarf2out_cfi_label (true);
759 if (fde->dw_fde_current_label == NULL
760 || strcmp (label, fde->dw_fde_current_label) != 0)
764 label = xstrdup (label);
766 /* Set the location counter to the new label. */
768 /* It doesn't metter whether DW_CFA_set_loc
769 or DW_CFA_advance_loc4 is added here, those aren't
770 emitted into assembly, only looked up by
771 convert_cfa_to_fb_loc_list. */
772 xcfi->dw_cfi_opc = DW_CFA_set_loc;
773 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
774 add_cfi (&fde->dw_fde_cfi, xcfi);
775 fde->dw_fde_current_label = label;
782 output_cfi_directive (cfi);
784 list_head = &fde->dw_fde_cfi;
786 /* ??? If this is a CFI for the CIE, we don't emit. This
787 assumes that the standard CIE contents that the assembler
788 uses matches the standard CIE contents that the compiler
789 uses. This is probably a bad assumption. I'm not quite
790 sure how to address this for now. */
794 dw_fde_ref fde = current_fde ();
796 gcc_assert (fde != NULL);
799 label = dwarf2out_cfi_label (false);
801 if (fde->dw_fde_current_label == NULL
802 || strcmp (label, fde->dw_fde_current_label) != 0)
806 label = xstrdup (label);
808 /* Set the location counter to the new label. */
810 /* If we have a current label, advance from there, otherwise
811 set the location directly using set_loc. */
812 xcfi->dw_cfi_opc = fde->dw_fde_current_label
813 ? DW_CFA_advance_loc4
815 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
816 add_cfi (&fde->dw_fde_cfi, xcfi);
818 fde->dw_fde_current_label = label;
821 list_head = &fde->dw_fde_cfi;
824 add_cfi (list_head, cfi);
827 /* Subroutine of lookup_cfa. */
830 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
832 switch (cfi->dw_cfi_opc)
834 case DW_CFA_def_cfa_offset:
835 case DW_CFA_def_cfa_offset_sf:
836 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
838 case DW_CFA_def_cfa_register:
839 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
842 case DW_CFA_def_cfa_sf:
843 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
844 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
846 case DW_CFA_def_cfa_expression:
847 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
850 case DW_CFA_remember_state:
851 gcc_assert (!remember->in_use);
853 remember->in_use = 1;
855 case DW_CFA_restore_state:
856 gcc_assert (remember->in_use);
858 remember->in_use = 0;
866 /* Find the previous value for the CFA. */
869 lookup_cfa (dw_cfa_location *loc)
873 dw_cfa_location remember;
875 memset (loc, 0, sizeof (*loc));
876 loc->reg = INVALID_REGNUM;
879 for (cfi = cie_cfi_head; cfi; cfi = cfi->dw_cfi_next)
880 lookup_cfa_1 (cfi, loc, &remember);
882 fde = current_fde ();
884 for (cfi = fde->dw_fde_cfi; cfi; cfi = cfi->dw_cfi_next)
885 lookup_cfa_1 (cfi, loc, &remember);
888 /* The current rule for calculating the DWARF2 canonical frame address. */
889 static dw_cfa_location cfa;
891 /* The register used for saving registers to the stack, and its offset
893 static dw_cfa_location cfa_store;
895 /* The current save location around an epilogue. */
896 static dw_cfa_location cfa_remember;
898 /* The running total of the size of arguments pushed onto the stack. */
899 static HOST_WIDE_INT args_size;
901 /* The last args_size we actually output. */
902 static HOST_WIDE_INT old_args_size;
904 /* Entry point to update the canonical frame address (CFA).
905 LABEL is passed to add_fde_cfi. The value of CFA is now to be
906 calculated from REG+OFFSET. */
909 dwarf2out_def_cfa (const char *label, unsigned int reg, HOST_WIDE_INT offset)
916 def_cfa_1 (label, &loc);
919 /* Determine if two dw_cfa_location structures define the same data. */
922 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
924 return (loc1->reg == loc2->reg
925 && loc1->offset == loc2->offset
926 && loc1->indirect == loc2->indirect
927 && (loc1->indirect == 0
928 || loc1->base_offset == loc2->base_offset));
931 /* This routine does the actual work. The CFA is now calculated from
932 the dw_cfa_location structure. */
935 def_cfa_1 (const char *label, dw_cfa_location *loc_p)
938 dw_cfa_location old_cfa, loc;
943 if (cfa_store.reg == loc.reg && loc.indirect == 0)
944 cfa_store.offset = loc.offset;
946 loc.reg = DWARF_FRAME_REGNUM (loc.reg);
947 lookup_cfa (&old_cfa);
949 /* If nothing changed, no need to issue any call frame instructions. */
950 if (cfa_equal_p (&loc, &old_cfa))
955 if (loc.reg == old_cfa.reg && !loc.indirect)
957 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
958 the CFA register did not change but the offset did. The data
959 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
960 in the assembler via the .cfi_def_cfa_offset directive. */
962 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
964 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
965 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
968 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
969 else if (loc.offset == old_cfa.offset
970 && old_cfa.reg != INVALID_REGNUM
973 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
974 indicating the CFA register has changed to <register> but the
975 offset has not changed. */
976 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
977 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
981 else if (loc.indirect == 0)
983 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
984 indicating the CFA register has changed to <register> with
985 the specified offset. The data factoring for DW_CFA_def_cfa_sf
986 happens in output_cfi, or in the assembler via the .cfi_def_cfa
989 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
991 cfi->dw_cfi_opc = DW_CFA_def_cfa;
992 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
993 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
997 /* Construct a DW_CFA_def_cfa_expression instruction to
998 calculate the CFA using a full location expression since no
999 register-offset pair is available. */
1000 struct dw_loc_descr_struct *loc_list;
1002 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
1003 loc_list = build_cfa_loc (&loc, 0);
1004 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
1007 add_fde_cfi (label, cfi);
1010 /* Add the CFI for saving a register. REG is the CFA column number.
1011 LABEL is passed to add_fde_cfi.
1012 If SREG is -1, the register is saved at OFFSET from the CFA;
1013 otherwise it is saved in SREG. */
1016 reg_save (const char *label, unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
1018 dw_cfi_ref cfi = new_cfi ();
1019 dw_fde_ref fde = current_fde ();
1021 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
1023 /* When stack is aligned, store REG using DW_CFA_expression with
1026 && fde->stack_realign
1027 && sreg == INVALID_REGNUM)
1029 cfi->dw_cfi_opc = DW_CFA_expression;
1030 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = reg;
1031 cfi->dw_cfi_oprnd1.dw_cfi_loc
1032 = build_cfa_aligned_loc (offset, fde->stack_realignment);
1034 else if (sreg == INVALID_REGNUM)
1037 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
1038 else if (reg & ~0x3f)
1039 cfi->dw_cfi_opc = DW_CFA_offset_extended;
1041 cfi->dw_cfi_opc = DW_CFA_offset;
1042 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
1044 else if (sreg == reg)
1045 cfi->dw_cfi_opc = DW_CFA_same_value;
1048 cfi->dw_cfi_opc = DW_CFA_register;
1049 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
1052 add_fde_cfi (label, cfi);
1055 /* Add the CFI for saving a register window. LABEL is passed to reg_save.
1056 This CFI tells the unwinder that it needs to restore the window registers
1057 from the previous frame's window save area.
1059 ??? Perhaps we should note in the CIE where windows are saved (instead of
1060 assuming 0(cfa)) and what registers are in the window. */
1063 dwarf2out_window_save (const char *label)
1065 dw_cfi_ref cfi = new_cfi ();
1067 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1068 add_fde_cfi (label, cfi);
1071 /* Add a CFI to update the running total of the size of arguments
1072 pushed onto the stack. */
1075 dwarf2out_args_size (const char *label, HOST_WIDE_INT size)
1079 if (size == old_args_size)
1082 old_args_size = size;
1085 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
1086 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
1087 add_fde_cfi (label, cfi);
1090 /* Entry point for saving a register to the stack. REG is the GCC register
1091 number. LABEL and OFFSET are passed to reg_save. */
1094 dwarf2out_reg_save (const char *label, unsigned int reg, HOST_WIDE_INT offset)
1096 reg_save (label, DWARF_FRAME_REGNUM (reg), INVALID_REGNUM, offset);
1099 /* Entry point for saving the return address in the stack.
1100 LABEL and OFFSET are passed to reg_save. */
1103 dwarf2out_return_save (const char *label, HOST_WIDE_INT offset)
1105 reg_save (label, DWARF_FRAME_RETURN_COLUMN, INVALID_REGNUM, offset);
1108 /* Entry point for saving the return address in a register.
1109 LABEL and SREG are passed to reg_save. */
1112 dwarf2out_return_reg (const char *label, unsigned int sreg)
1114 reg_save (label, DWARF_FRAME_RETURN_COLUMN, DWARF_FRAME_REGNUM (sreg), 0);
1117 #ifdef DWARF2_UNWIND_INFO
1118 /* Record the initial position of the return address. RTL is
1119 INCOMING_RETURN_ADDR_RTX. */
1122 initial_return_save (rtx rtl)
1124 unsigned int reg = INVALID_REGNUM;
1125 HOST_WIDE_INT offset = 0;
1127 switch (GET_CODE (rtl))
1130 /* RA is in a register. */
1131 reg = DWARF_FRAME_REGNUM (REGNO (rtl));
1135 /* RA is on the stack. */
1136 rtl = XEXP (rtl, 0);
1137 switch (GET_CODE (rtl))
1140 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
1145 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
1146 offset = INTVAL (XEXP (rtl, 1));
1150 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
1151 offset = -INTVAL (XEXP (rtl, 1));
1161 /* The return address is at some offset from any value we can
1162 actually load. For instance, on the SPARC it is in %i7+8. Just
1163 ignore the offset for now; it doesn't matter for unwinding frames. */
1164 gcc_assert (GET_CODE (XEXP (rtl, 1)) == CONST_INT);
1165 initial_return_save (XEXP (rtl, 0));
1172 if (reg != DWARF_FRAME_RETURN_COLUMN)
1173 reg_save (NULL, DWARF_FRAME_RETURN_COLUMN, reg, offset - cfa.offset);
1177 /* Given a SET, calculate the amount of stack adjustment it
1180 static HOST_WIDE_INT
1181 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
1182 HOST_WIDE_INT cur_offset)
1184 const_rtx src = SET_SRC (pattern);
1185 const_rtx dest = SET_DEST (pattern);
1186 HOST_WIDE_INT offset = 0;
1189 if (dest == stack_pointer_rtx)
1191 code = GET_CODE (src);
1193 /* Assume (set (reg sp) (reg whatever)) sets args_size
1195 if (code == REG && src != stack_pointer_rtx)
1197 offset = -cur_args_size;
1198 #ifndef STACK_GROWS_DOWNWARD
1201 return offset - cur_offset;
1204 if (! (code == PLUS || code == MINUS)
1205 || XEXP (src, 0) != stack_pointer_rtx
1206 || GET_CODE (XEXP (src, 1)) != CONST_INT)
1209 /* (set (reg sp) (plus (reg sp) (const_int))) */
1210 offset = INTVAL (XEXP (src, 1));
1216 if (MEM_P (src) && !MEM_P (dest))
1220 /* (set (mem (pre_dec (reg sp))) (foo)) */
1221 src = XEXP (dest, 0);
1222 code = GET_CODE (src);
1228 if (XEXP (src, 0) == stack_pointer_rtx)
1230 rtx val = XEXP (XEXP (src, 1), 1);
1231 /* We handle only adjustments by constant amount. */
1232 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
1233 && GET_CODE (val) == CONST_INT);
1234 offset = -INTVAL (val);
1241 if (XEXP (src, 0) == stack_pointer_rtx)
1243 offset = GET_MODE_SIZE (GET_MODE (dest));
1250 if (XEXP (src, 0) == stack_pointer_rtx)
1252 offset = -GET_MODE_SIZE (GET_MODE (dest));
1267 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
1268 indexed by INSN_UID. */
1270 static HOST_WIDE_INT *barrier_args_size;
1272 /* Helper function for compute_barrier_args_size. Handle one insn. */
1274 static HOST_WIDE_INT
1275 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
1276 VEC (rtx, heap) **next)
1278 HOST_WIDE_INT offset = 0;
1281 if (! RTX_FRAME_RELATED_P (insn))
1283 if (prologue_epilogue_contains (insn))
1285 else if (GET_CODE (PATTERN (insn)) == SET)
1286 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
1287 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1288 || GET_CODE (PATTERN (insn)) == SEQUENCE)
1290 /* There may be stack adjustments inside compound insns. Search
1292 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
1293 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1294 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
1295 cur_args_size, offset);
1300 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
1304 expr = XEXP (expr, 0);
1305 if (GET_CODE (expr) == PARALLEL
1306 || GET_CODE (expr) == SEQUENCE)
1307 for (i = 1; i < XVECLEN (expr, 0); i++)
1309 rtx elem = XVECEXP (expr, 0, i);
1311 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
1312 offset += stack_adjust_offset (elem, cur_args_size, offset);
1317 #ifndef STACK_GROWS_DOWNWARD
1321 cur_args_size += offset;
1322 if (cur_args_size < 0)
1327 rtx dest = JUMP_LABEL (insn);
1331 if (barrier_args_size [INSN_UID (dest)] < 0)
1333 barrier_args_size [INSN_UID (dest)] = cur_args_size;
1334 VEC_safe_push (rtx, heap, *next, dest);
1339 return cur_args_size;
1342 /* Walk the whole function and compute args_size on BARRIERs. */
1345 compute_barrier_args_size (void)
1347 int max_uid = get_max_uid (), i;
1349 VEC (rtx, heap) *worklist, *next, *tmp;
1351 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
1352 for (i = 0; i < max_uid; i++)
1353 barrier_args_size[i] = -1;
1355 worklist = VEC_alloc (rtx, heap, 20);
1356 next = VEC_alloc (rtx, heap, 20);
1357 insn = get_insns ();
1358 barrier_args_size[INSN_UID (insn)] = 0;
1359 VEC_quick_push (rtx, worklist, insn);
1362 while (!VEC_empty (rtx, worklist))
1364 rtx prev, body, first_insn;
1365 HOST_WIDE_INT cur_args_size;
1367 first_insn = insn = VEC_pop (rtx, worklist);
1368 cur_args_size = barrier_args_size[INSN_UID (insn)];
1369 prev = prev_nonnote_insn (insn);
1370 if (prev && BARRIER_P (prev))
1371 barrier_args_size[INSN_UID (prev)] = cur_args_size;
1373 for (; insn; insn = NEXT_INSN (insn))
1375 if (INSN_DELETED_P (insn) || NOTE_P (insn))
1377 if (BARRIER_P (insn))
1382 if (insn == first_insn)
1384 else if (barrier_args_size[INSN_UID (insn)] < 0)
1386 barrier_args_size[INSN_UID (insn)] = cur_args_size;
1391 /* The insns starting with this label have been
1392 already scanned or are in the worklist. */
1397 body = PATTERN (insn);
1398 if (GET_CODE (body) == SEQUENCE)
1400 HOST_WIDE_INT dest_args_size = cur_args_size;
1401 for (i = 1; i < XVECLEN (body, 0); i++)
1402 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
1403 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
1405 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
1406 dest_args_size, &next);
1409 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
1410 cur_args_size, &next);
1412 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
1413 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
1414 dest_args_size, &next);
1417 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
1418 cur_args_size, &next);
1422 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
1426 if (VEC_empty (rtx, next))
1429 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
1433 VEC_truncate (rtx, next, 0);
1436 VEC_free (rtx, heap, worklist);
1437 VEC_free (rtx, heap, next);
1441 /* Check INSN to see if it looks like a push or a stack adjustment, and
1442 make a note of it if it does. EH uses this information to find out how
1443 much extra space it needs to pop off the stack. */
1446 dwarf2out_stack_adjust (rtx insn, bool after_p)
1448 HOST_WIDE_INT offset;
1452 /* Don't handle epilogues at all. Certainly it would be wrong to do so
1453 with this function. Proper support would require all frame-related
1454 insns to be marked, and to be able to handle saving state around
1455 epilogues textually in the middle of the function. */
1456 if (prologue_epilogue_contains (insn))
1459 /* If INSN is an instruction from target of an annulled branch, the
1460 effects are for the target only and so current argument size
1461 shouldn't change at all. */
1463 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
1464 && INSN_FROM_TARGET_P (insn))
1467 /* If only calls can throw, and we have a frame pointer,
1468 save up adjustments until we see the CALL_INSN. */
1469 if (!flag_asynchronous_unwind_tables && cfa.reg != STACK_POINTER_REGNUM)
1471 if (CALL_P (insn) && !after_p)
1473 /* Extract the size of the args from the CALL rtx itself. */
1474 insn = PATTERN (insn);
1475 if (GET_CODE (insn) == PARALLEL)
1476 insn = XVECEXP (insn, 0, 0);
1477 if (GET_CODE (insn) == SET)
1478 insn = SET_SRC (insn);
1479 gcc_assert (GET_CODE (insn) == CALL);
1480 dwarf2out_args_size ("", INTVAL (XEXP (insn, 1)));
1485 if (CALL_P (insn) && !after_p)
1487 if (!flag_asynchronous_unwind_tables)
1488 dwarf2out_args_size ("", args_size);
1491 else if (BARRIER_P (insn))
1493 /* Don't call compute_barrier_args_size () if the only
1494 BARRIER is at the end of function. */
1495 if (barrier_args_size == NULL && next_nonnote_insn (insn))
1496 compute_barrier_args_size ();
1497 if (barrier_args_size == NULL)
1501 offset = barrier_args_size[INSN_UID (insn)];
1506 offset -= args_size;
1507 #ifndef STACK_GROWS_DOWNWARD
1511 else if (GET_CODE (PATTERN (insn)) == SET)
1512 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
1513 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1514 || GET_CODE (PATTERN (insn)) == SEQUENCE)
1516 /* There may be stack adjustments inside compound insns. Search
1518 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
1519 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1520 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
1529 label = dwarf2out_cfi_label (false);
1530 dwarf2out_args_size_adjust (offset, label);
1533 /* Adjust args_size based on stack adjustment OFFSET. */
1536 dwarf2out_args_size_adjust (HOST_WIDE_INT offset, const char *label)
1538 if (cfa.reg == STACK_POINTER_REGNUM)
1539 cfa.offset += offset;
1541 if (cfa_store.reg == STACK_POINTER_REGNUM)
1542 cfa_store.offset += offset;
1544 #ifndef STACK_GROWS_DOWNWARD
1548 args_size += offset;
1552 def_cfa_1 (label, &cfa);
1553 if (flag_asynchronous_unwind_tables)
1554 dwarf2out_args_size (label, args_size);
1559 /* We delay emitting a register save until either (a) we reach the end
1560 of the prologue or (b) the register is clobbered. This clusters
1561 register saves so that there are fewer pc advances. */
1563 struct GTY(()) queued_reg_save {
1564 struct queued_reg_save *next;
1566 HOST_WIDE_INT cfa_offset;
1570 static GTY(()) struct queued_reg_save *queued_reg_saves;
1572 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
1573 struct GTY(()) reg_saved_in_data {
1578 /* A list of registers saved in other registers.
1579 The list intentionally has a small maximum capacity of 4; if your
1580 port needs more than that, you might consider implementing a
1581 more efficient data structure. */
1582 static GTY(()) struct reg_saved_in_data regs_saved_in_regs[4];
1583 static GTY(()) size_t num_regs_saved_in_regs;
1585 #if defined (DWARF2_DEBUGGING_INFO) || defined (DWARF2_UNWIND_INFO)
1586 static const char *last_reg_save_label;
1588 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1589 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1592 queue_reg_save (const char *label, rtx reg, rtx sreg, HOST_WIDE_INT offset)
1594 struct queued_reg_save *q;
1596 /* Duplicates waste space, but it's also necessary to remove them
1597 for correctness, since the queue gets output in reverse
1599 for (q = queued_reg_saves; q != NULL; q = q->next)
1600 if (REGNO (q->reg) == REGNO (reg))
1605 q = GGC_NEW (struct queued_reg_save);
1606 q->next = queued_reg_saves;
1607 queued_reg_saves = q;
1611 q->cfa_offset = offset;
1612 q->saved_reg = sreg;
1614 last_reg_save_label = label;
1617 /* Output all the entries in QUEUED_REG_SAVES. */
1620 flush_queued_reg_saves (void)
1622 struct queued_reg_save *q;
1624 for (q = queued_reg_saves; q; q = q->next)
1627 unsigned int reg, sreg;
1629 for (i = 0; i < num_regs_saved_in_regs; i++)
1630 if (REGNO (regs_saved_in_regs[i].orig_reg) == REGNO (q->reg))
1632 if (q->saved_reg && i == num_regs_saved_in_regs)
1634 gcc_assert (i != ARRAY_SIZE (regs_saved_in_regs));
1635 num_regs_saved_in_regs++;
1637 if (i != num_regs_saved_in_regs)
1639 regs_saved_in_regs[i].orig_reg = q->reg;
1640 regs_saved_in_regs[i].saved_in_reg = q->saved_reg;
1643 reg = DWARF_FRAME_REGNUM (REGNO (q->reg));
1645 sreg = DWARF_FRAME_REGNUM (REGNO (q->saved_reg));
1647 sreg = INVALID_REGNUM;
1648 reg_save (last_reg_save_label, reg, sreg, q->cfa_offset);
1651 queued_reg_saves = NULL;
1652 last_reg_save_label = NULL;
1655 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1656 location for? Or, does it clobber a register which we've previously
1657 said that some other register is saved in, and for which we now
1658 have a new location for? */
1661 clobbers_queued_reg_save (const_rtx insn)
1663 struct queued_reg_save *q;
1665 for (q = queued_reg_saves; q; q = q->next)
1668 if (modified_in_p (q->reg, insn))
1670 for (i = 0; i < num_regs_saved_in_regs; i++)
1671 if (REGNO (q->reg) == REGNO (regs_saved_in_regs[i].orig_reg)
1672 && modified_in_p (regs_saved_in_regs[i].saved_in_reg, insn))
1679 /* Entry point for saving the first register into the second. */
1682 dwarf2out_reg_save_reg (const char *label, rtx reg, rtx sreg)
1685 unsigned int regno, sregno;
1687 for (i = 0; i < num_regs_saved_in_regs; i++)
1688 if (REGNO (regs_saved_in_regs[i].orig_reg) == REGNO (reg))
1690 if (i == num_regs_saved_in_regs)
1692 gcc_assert (i != ARRAY_SIZE (regs_saved_in_regs));
1693 num_regs_saved_in_regs++;
1695 regs_saved_in_regs[i].orig_reg = reg;
1696 regs_saved_in_regs[i].saved_in_reg = sreg;
1698 regno = DWARF_FRAME_REGNUM (REGNO (reg));
1699 sregno = DWARF_FRAME_REGNUM (REGNO (sreg));
1700 reg_save (label, regno, sregno, 0);
1703 /* What register, if any, is currently saved in REG? */
1706 reg_saved_in (rtx reg)
1708 unsigned int regn = REGNO (reg);
1710 struct queued_reg_save *q;
1712 for (q = queued_reg_saves; q; q = q->next)
1713 if (q->saved_reg && regn == REGNO (q->saved_reg))
1716 for (i = 0; i < num_regs_saved_in_regs; i++)
1717 if (regs_saved_in_regs[i].saved_in_reg
1718 && regn == REGNO (regs_saved_in_regs[i].saved_in_reg))
1719 return regs_saved_in_regs[i].orig_reg;
1725 /* A temporary register holding an integral value used in adjusting SP
1726 or setting up the store_reg. The "offset" field holds the integer
1727 value, not an offset. */
1728 static dw_cfa_location cfa_temp;
1730 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1733 dwarf2out_frame_debug_def_cfa (rtx pat, const char *label)
1735 memset (&cfa, 0, sizeof (cfa));
1737 switch (GET_CODE (pat))
1740 cfa.reg = REGNO (XEXP (pat, 0));
1741 cfa.offset = INTVAL (XEXP (pat, 1));
1745 cfa.reg = REGNO (pat);
1749 /* Recurse and define an expression. */
1753 def_cfa_1 (label, &cfa);
1756 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1759 dwarf2out_frame_debug_adjust_cfa (rtx pat, const char *label)
1763 gcc_assert (GET_CODE (pat) == SET);
1764 dest = XEXP (pat, 0);
1765 src = XEXP (pat, 1);
1767 switch (GET_CODE (src))
1770 gcc_assert (REGNO (XEXP (src, 0)) == cfa.reg);
1771 cfa.offset -= INTVAL (XEXP (src, 1));
1781 cfa.reg = REGNO (dest);
1782 gcc_assert (cfa.indirect == 0);
1784 def_cfa_1 (label, &cfa);
1787 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1790 dwarf2out_frame_debug_cfa_offset (rtx set, const char *label)
1792 HOST_WIDE_INT offset;
1793 rtx src, addr, span;
1795 src = XEXP (set, 1);
1796 addr = XEXP (set, 0);
1797 gcc_assert (MEM_P (addr));
1798 addr = XEXP (addr, 0);
1800 /* As documented, only consider extremely simple addresses. */
1801 switch (GET_CODE (addr))
1804 gcc_assert (REGNO (addr) == cfa.reg);
1805 offset = -cfa.offset;
1808 gcc_assert (REGNO (XEXP (addr, 0)) == cfa.reg);
1809 offset = INTVAL (XEXP (addr, 1)) - cfa.offset;
1815 span = targetm.dwarf_register_span (src);
1817 /* ??? We'd like to use queue_reg_save, but we need to come up with
1818 a different flushing heuristic for epilogues. */
1820 reg_save (label, DWARF_FRAME_REGNUM (REGNO (src)), INVALID_REGNUM, offset);
1823 /* We have a PARALLEL describing where the contents of SRC live.
1824 Queue register saves for each piece of the PARALLEL. */
1827 HOST_WIDE_INT span_offset = offset;
1829 gcc_assert (GET_CODE (span) == PARALLEL);
1831 limit = XVECLEN (span, 0);
1832 for (par_index = 0; par_index < limit; par_index++)
1834 rtx elem = XVECEXP (span, 0, par_index);
1836 reg_save (label, DWARF_FRAME_REGNUM (REGNO (elem)),
1837 INVALID_REGNUM, span_offset);
1838 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1843 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1846 dwarf2out_frame_debug_cfa_register (rtx set, const char *label)
1849 unsigned sregno, dregno;
1851 src = XEXP (set, 1);
1852 dest = XEXP (set, 0);
1855 sregno = DWARF_FRAME_RETURN_COLUMN;
1857 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1859 dregno = DWARF_FRAME_REGNUM (REGNO (dest));
1861 /* ??? We'd like to use queue_reg_save, but we need to come up with
1862 a different flushing heuristic for epilogues. */
1863 reg_save (label, sregno, dregno, 0);
1866 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1869 dwarf2out_frame_debug_cfa_restore (rtx reg, const char *label)
1871 dw_cfi_ref cfi = new_cfi ();
1872 unsigned int regno = DWARF_FRAME_REGNUM (REGNO (reg));
1874 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1875 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1877 add_fde_cfi (label, cfi);
1880 /* Record call frame debugging information for an expression EXPR,
1881 which either sets SP or FP (adjusting how we calculate the frame
1882 address) or saves a register to the stack or another register.
1883 LABEL indicates the address of EXPR.
1885 This function encodes a state machine mapping rtxes to actions on
1886 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1887 users need not read the source code.
1889 The High-Level Picture
1891 Changes in the register we use to calculate the CFA: Currently we
1892 assume that if you copy the CFA register into another register, we
1893 should take the other one as the new CFA register; this seems to
1894 work pretty well. If it's wrong for some target, it's simple
1895 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1897 Changes in the register we use for saving registers to the stack:
1898 This is usually SP, but not always. Again, we deduce that if you
1899 copy SP into another register (and SP is not the CFA register),
1900 then the new register is the one we will be using for register
1901 saves. This also seems to work.
1903 Register saves: There's not much guesswork about this one; if
1904 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1905 register save, and the register used to calculate the destination
1906 had better be the one we think we're using for this purpose.
1907 It's also assumed that a copy from a call-saved register to another
1908 register is saving that register if RTX_FRAME_RELATED_P is set on
1909 that instruction. If the copy is from a call-saved register to
1910 the *same* register, that means that the register is now the same
1911 value as in the caller.
1913 Except: If the register being saved is the CFA register, and the
1914 offset is nonzero, we are saving the CFA, so we assume we have to
1915 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1916 the intent is to save the value of SP from the previous frame.
1918 In addition, if a register has previously been saved to a different
1921 Invariants / Summaries of Rules
1923 cfa current rule for calculating the CFA. It usually
1924 consists of a register and an offset.
1925 cfa_store register used by prologue code to save things to the stack
1926 cfa_store.offset is the offset from the value of
1927 cfa_store.reg to the actual CFA
1928 cfa_temp register holding an integral value. cfa_temp.offset
1929 stores the value, which will be used to adjust the
1930 stack pointer. cfa_temp is also used like cfa_store,
1931 to track stores to the stack via fp or a temp reg.
1933 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1934 with cfa.reg as the first operand changes the cfa.reg and its
1935 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1938 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1939 expression yielding a constant. This sets cfa_temp.reg
1940 and cfa_temp.offset.
1942 Rule 5: Create a new register cfa_store used to save items to the
1945 Rules 10-14: Save a register to the stack. Define offset as the
1946 difference of the original location and cfa_store's
1947 location (or cfa_temp's location if cfa_temp is used).
1949 Rules 16-20: If AND operation happens on sp in prologue, we assume
1950 stack is realigned. We will use a group of DW_OP_XXX
1951 expressions to represent the location of the stored
1952 register instead of CFA+offset.
1956 "{a,b}" indicates a choice of a xor b.
1957 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1960 (set <reg1> <reg2>:cfa.reg)
1961 effects: cfa.reg = <reg1>
1962 cfa.offset unchanged
1963 cfa_temp.reg = <reg1>
1964 cfa_temp.offset = cfa.offset
1967 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1968 {<const_int>,<reg>:cfa_temp.reg}))
1969 effects: cfa.reg = sp if fp used
1970 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1971 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1972 if cfa_store.reg==sp
1975 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1976 effects: cfa.reg = fp
1977 cfa_offset += +/- <const_int>
1980 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1981 constraints: <reg1> != fp
1983 effects: cfa.reg = <reg1>
1984 cfa_temp.reg = <reg1>
1985 cfa_temp.offset = cfa.offset
1988 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1989 constraints: <reg1> != fp
1991 effects: cfa_store.reg = <reg1>
1992 cfa_store.offset = cfa.offset - cfa_temp.offset
1995 (set <reg> <const_int>)
1996 effects: cfa_temp.reg = <reg>
1997 cfa_temp.offset = <const_int>
2000 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
2001 effects: cfa_temp.reg = <reg1>
2002 cfa_temp.offset |= <const_int>
2005 (set <reg> (high <exp>))
2009 (set <reg> (lo_sum <exp> <const_int>))
2010 effects: cfa_temp.reg = <reg>
2011 cfa_temp.offset = <const_int>
2014 (set (mem (pre_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
2015 effects: cfa_store.offset -= <const_int>
2016 cfa.offset = cfa_store.offset if cfa.reg == sp
2018 cfa.base_offset = -cfa_store.offset
2021 (set (mem ({pre_inc,pre_dec} sp:cfa_store.reg)) <reg>)
2022 effects: cfa_store.offset += -/+ mode_size(mem)
2023 cfa.offset = cfa_store.offset if cfa.reg == sp
2025 cfa.base_offset = -cfa_store.offset
2028 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
2031 effects: cfa.reg = <reg1>
2032 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
2035 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
2036 effects: cfa.reg = <reg1>
2037 cfa.base_offset = -{cfa_store,cfa_temp}.offset
2040 (set (mem (postinc <reg1>:cfa_temp <const_int>)) <reg2>)
2041 effects: cfa.reg = <reg1>
2042 cfa.base_offset = -cfa_temp.offset
2043 cfa_temp.offset -= mode_size(mem)
2046 (set <reg> {unspec, unspec_volatile})
2047 effects: target-dependent
2050 (set sp (and: sp <const_int>))
2051 constraints: cfa_store.reg == sp
2052 effects: current_fde.stack_realign = 1
2053 cfa_store.offset = 0
2054 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
2057 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
2058 effects: cfa_store.offset += -/+ mode_size(mem)
2061 (set (mem ({pre_inc, pre_dec} sp)) fp)
2062 constraints: fde->stack_realign == 1
2063 effects: cfa_store.offset = 0
2064 cfa.reg != HARD_FRAME_POINTER_REGNUM
2067 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
2068 constraints: fde->stack_realign == 1
2070 && cfa.indirect == 0
2071 && cfa.reg != HARD_FRAME_POINTER_REGNUM
2072 effects: Use DW_CFA_def_cfa_expression to define cfa
2073 cfa.reg == fde->drap_reg
2076 (set reg fde->drap_reg)
2077 constraints: fde->vdrap_reg == INVALID_REGNUM
2078 effects: fde->vdrap_reg = reg.
2079 (set mem fde->drap_reg)
2080 constraints: fde->drap_reg_saved == 1
2084 dwarf2out_frame_debug_expr (rtx expr, const char *label)
2086 rtx src, dest, span;
2087 HOST_WIDE_INT offset;
2090 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
2091 the PARALLEL independently. The first element is always processed if
2092 it is a SET. This is for backward compatibility. Other elements
2093 are processed only if they are SETs and the RTX_FRAME_RELATED_P
2094 flag is set in them. */
2095 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
2098 int limit = XVECLEN (expr, 0);
2101 /* PARALLELs have strict read-modify-write semantics, so we
2102 ought to evaluate every rvalue before changing any lvalue.
2103 It's cumbersome to do that in general, but there's an
2104 easy approximation that is enough for all current users:
2105 handle register saves before register assignments. */
2106 if (GET_CODE (expr) == PARALLEL)
2107 for (par_index = 0; par_index < limit; par_index++)
2109 elem = XVECEXP (expr, 0, par_index);
2110 if (GET_CODE (elem) == SET
2111 && MEM_P (SET_DEST (elem))
2112 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
2113 dwarf2out_frame_debug_expr (elem, label);
2116 for (par_index = 0; par_index < limit; par_index++)
2118 elem = XVECEXP (expr, 0, par_index);
2119 if (GET_CODE (elem) == SET
2120 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
2121 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
2122 dwarf2out_frame_debug_expr (elem, label);
2123 else if (GET_CODE (elem) == SET
2125 && !RTX_FRAME_RELATED_P (elem))
2127 /* Stack adjustment combining might combine some post-prologue
2128 stack adjustment into a prologue stack adjustment. */
2129 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
2132 dwarf2out_args_size_adjust (offset, label);
2138 gcc_assert (GET_CODE (expr) == SET);
2140 src = SET_SRC (expr);
2141 dest = SET_DEST (expr);
2145 rtx rsi = reg_saved_in (src);
2150 fde = current_fde ();
2152 if (GET_CODE (src) == REG
2154 && fde->drap_reg == REGNO (src)
2155 && (fde->drap_reg_saved
2156 || GET_CODE (dest) == REG))
2159 /* If we are saving dynamic realign argument pointer to a
2160 register, the destination is virtual dynamic realign
2161 argument pointer. It may be used to access argument. */
2162 if (GET_CODE (dest) == REG)
2164 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2165 fde->vdrap_reg = REGNO (dest);
2170 switch (GET_CODE (dest))
2173 switch (GET_CODE (src))
2175 /* Setting FP from SP. */
2177 if (cfa.reg == (unsigned) REGNO (src))
2180 /* Update the CFA rule wrt SP or FP. Make sure src is
2181 relative to the current CFA register.
2183 We used to require that dest be either SP or FP, but the
2184 ARM copies SP to a temporary register, and from there to
2185 FP. So we just rely on the backends to only set
2186 RTX_FRAME_RELATED_P on appropriate insns. */
2187 cfa.reg = REGNO (dest);
2188 cfa_temp.reg = cfa.reg;
2189 cfa_temp.offset = cfa.offset;
2193 /* Saving a register in a register. */
2194 gcc_assert (!fixed_regs [REGNO (dest)]
2195 /* For the SPARC and its register window. */
2196 || (DWARF_FRAME_REGNUM (REGNO (src))
2197 == DWARF_FRAME_RETURN_COLUMN));
2199 /* After stack is aligned, we can only save SP in FP
2200 if drap register is used. In this case, we have
2201 to restore stack pointer with the CFA value and we
2202 don't generate this DWARF information. */
2204 && fde->stack_realign
2205 && REGNO (src) == STACK_POINTER_REGNUM)
2206 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
2207 && fde->drap_reg != INVALID_REGNUM
2208 && cfa.reg != REGNO (src));
2210 queue_reg_save (label, src, dest, 0);
2217 if (dest == stack_pointer_rtx)
2221 switch (GET_CODE (XEXP (src, 1)))
2224 offset = INTVAL (XEXP (src, 1));
2227 gcc_assert ((unsigned) REGNO (XEXP (src, 1))
2229 offset = cfa_temp.offset;
2235 if (XEXP (src, 0) == hard_frame_pointer_rtx)
2237 /* Restoring SP from FP in the epilogue. */
2238 gcc_assert (cfa.reg == (unsigned) HARD_FRAME_POINTER_REGNUM);
2239 cfa.reg = STACK_POINTER_REGNUM;
2241 else if (GET_CODE (src) == LO_SUM)
2242 /* Assume we've set the source reg of the LO_SUM from sp. */
2245 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
2247 if (GET_CODE (src) != MINUS)
2249 if (cfa.reg == STACK_POINTER_REGNUM)
2250 cfa.offset += offset;
2251 if (cfa_store.reg == STACK_POINTER_REGNUM)
2252 cfa_store.offset += offset;
2254 else if (dest == hard_frame_pointer_rtx)
2257 /* Either setting the FP from an offset of the SP,
2258 or adjusting the FP */
2259 gcc_assert (frame_pointer_needed);
2261 gcc_assert (REG_P (XEXP (src, 0))
2262 && (unsigned) REGNO (XEXP (src, 0)) == cfa.reg
2263 && GET_CODE (XEXP (src, 1)) == CONST_INT);
2264 offset = INTVAL (XEXP (src, 1));
2265 if (GET_CODE (src) != MINUS)
2267 cfa.offset += offset;
2268 cfa.reg = HARD_FRAME_POINTER_REGNUM;
2272 gcc_assert (GET_CODE (src) != MINUS);
2275 if (REG_P (XEXP (src, 0))
2276 && REGNO (XEXP (src, 0)) == cfa.reg
2277 && GET_CODE (XEXP (src, 1)) == CONST_INT)
2279 /* Setting a temporary CFA register that will be copied
2280 into the FP later on. */
2281 offset = - INTVAL (XEXP (src, 1));
2282 cfa.offset += offset;
2283 cfa.reg = REGNO (dest);
2284 /* Or used to save regs to the stack. */
2285 cfa_temp.reg = cfa.reg;
2286 cfa_temp.offset = cfa.offset;
2290 else if (REG_P (XEXP (src, 0))
2291 && REGNO (XEXP (src, 0)) == cfa_temp.reg
2292 && XEXP (src, 1) == stack_pointer_rtx)
2294 /* Setting a scratch register that we will use instead
2295 of SP for saving registers to the stack. */
2296 gcc_assert (cfa.reg == STACK_POINTER_REGNUM);
2297 cfa_store.reg = REGNO (dest);
2298 cfa_store.offset = cfa.offset - cfa_temp.offset;
2302 else if (GET_CODE (src) == LO_SUM
2303 && GET_CODE (XEXP (src, 1)) == CONST_INT)
2305 cfa_temp.reg = REGNO (dest);
2306 cfa_temp.offset = INTVAL (XEXP (src, 1));
2315 cfa_temp.reg = REGNO (dest);
2316 cfa_temp.offset = INTVAL (src);
2321 gcc_assert (REG_P (XEXP (src, 0))
2322 && (unsigned) REGNO (XEXP (src, 0)) == cfa_temp.reg
2323 && GET_CODE (XEXP (src, 1)) == CONST_INT);
2325 if ((unsigned) REGNO (dest) != cfa_temp.reg)
2326 cfa_temp.reg = REGNO (dest);
2327 cfa_temp.offset |= INTVAL (XEXP (src, 1));
2330 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
2331 which will fill in all of the bits. */
2338 case UNSPEC_VOLATILE:
2339 gcc_assert (targetm.dwarf_handle_frame_unspec);
2340 targetm.dwarf_handle_frame_unspec (label, expr, XINT (src, 1));
2345 /* If this AND operation happens on stack pointer in prologue,
2346 we assume the stack is realigned and we extract the
2348 if (fde && XEXP (src, 0) == stack_pointer_rtx)
2350 gcc_assert (cfa_store.reg == REGNO (XEXP (src, 0)));
2351 fde->stack_realign = 1;
2352 fde->stack_realignment = INTVAL (XEXP (src, 1));
2353 cfa_store.offset = 0;
2355 if (cfa.reg != STACK_POINTER_REGNUM
2356 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
2357 fde->drap_reg = cfa.reg;
2365 def_cfa_1 (label, &cfa);
2370 /* Saving a register to the stack. Make sure dest is relative to the
2372 switch (GET_CODE (XEXP (dest, 0)))
2377 /* We can't handle variable size modifications. */
2378 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
2380 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
2382 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
2383 && cfa_store.reg == STACK_POINTER_REGNUM);
2385 cfa_store.offset += offset;
2386 if (cfa.reg == STACK_POINTER_REGNUM)
2387 cfa.offset = cfa_store.offset;
2389 offset = -cfa_store.offset;
2395 offset = GET_MODE_SIZE (GET_MODE (dest));
2396 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
2399 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
2400 == STACK_POINTER_REGNUM)
2401 && cfa_store.reg == STACK_POINTER_REGNUM);
2403 cfa_store.offset += offset;
2405 /* Rule 18: If stack is aligned, we will use FP as a
2406 reference to represent the address of the stored
2409 && fde->stack_realign
2410 && src == hard_frame_pointer_rtx)
2412 gcc_assert (cfa.reg != HARD_FRAME_POINTER_REGNUM);
2413 cfa_store.offset = 0;
2416 if (cfa.reg == STACK_POINTER_REGNUM)
2417 cfa.offset = cfa_store.offset;
2419 offset = -cfa_store.offset;
2423 /* With an offset. */
2430 gcc_assert (GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT
2431 && REG_P (XEXP (XEXP (dest, 0), 0)));
2432 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
2433 if (GET_CODE (XEXP (dest, 0)) == MINUS)
2436 regno = REGNO (XEXP (XEXP (dest, 0), 0));
2438 if (cfa_store.reg == (unsigned) regno)
2439 offset -= cfa_store.offset;
2442 gcc_assert (cfa_temp.reg == (unsigned) regno);
2443 offset -= cfa_temp.offset;
2449 /* Without an offset. */
2452 int regno = REGNO (XEXP (dest, 0));
2454 if (cfa_store.reg == (unsigned) regno)
2455 offset = -cfa_store.offset;
2458 gcc_assert (cfa_temp.reg == (unsigned) regno);
2459 offset = -cfa_temp.offset;
2466 gcc_assert (cfa_temp.reg
2467 == (unsigned) REGNO (XEXP (XEXP (dest, 0), 0)));
2468 offset = -cfa_temp.offset;
2469 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
2477 /* If the source operand of this MEM operation is not a
2478 register, basically the source is return address. Here
2479 we only care how much stack grew and we don't save it. */
2483 if (REGNO (src) != STACK_POINTER_REGNUM
2484 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
2485 && (unsigned) REGNO (src) == cfa.reg)
2487 /* We're storing the current CFA reg into the stack. */
2489 if (cfa.offset == 0)
2492 /* If stack is aligned, putting CFA reg into stack means
2493 we can no longer use reg + offset to represent CFA.
2494 Here we use DW_CFA_def_cfa_expression instead. The
2495 result of this expression equals to the original CFA
2498 && fde->stack_realign
2499 && cfa.indirect == 0
2500 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
2502 dw_cfa_location cfa_exp;
2504 gcc_assert (fde->drap_reg == cfa.reg);
2506 cfa_exp.indirect = 1;
2507 cfa_exp.reg = HARD_FRAME_POINTER_REGNUM;
2508 cfa_exp.base_offset = offset;
2511 fde->drap_reg_saved = 1;
2513 def_cfa_1 (label, &cfa_exp);
2517 /* If the source register is exactly the CFA, assume
2518 we're saving SP like any other register; this happens
2520 def_cfa_1 (label, &cfa);
2521 queue_reg_save (label, stack_pointer_rtx, NULL_RTX, offset);
2526 /* Otherwise, we'll need to look in the stack to
2527 calculate the CFA. */
2528 rtx x = XEXP (dest, 0);
2532 gcc_assert (REG_P (x));
2534 cfa.reg = REGNO (x);
2535 cfa.base_offset = offset;
2537 def_cfa_1 (label, &cfa);
2542 def_cfa_1 (label, &cfa);
2544 span = targetm.dwarf_register_span (src);
2547 queue_reg_save (label, src, NULL_RTX, offset);
2550 /* We have a PARALLEL describing where the contents of SRC
2551 live. Queue register saves for each piece of the
2555 HOST_WIDE_INT span_offset = offset;
2557 gcc_assert (GET_CODE (span) == PARALLEL);
2559 limit = XVECLEN (span, 0);
2560 for (par_index = 0; par_index < limit; par_index++)
2562 rtx elem = XVECEXP (span, 0, par_index);
2564 queue_reg_save (label, elem, NULL_RTX, span_offset);
2565 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2576 /* Record call frame debugging information for INSN, which either
2577 sets SP or FP (adjusting how we calculate the frame address) or saves a
2578 register to the stack. If INSN is NULL_RTX, initialize our state.
2580 If AFTER_P is false, we're being called before the insn is emitted,
2581 otherwise after. Call instructions get invoked twice. */
2584 dwarf2out_frame_debug (rtx insn, bool after_p)
2588 bool handled_one = false;
2590 if (insn == NULL_RTX)
2594 /* Flush any queued register saves. */
2595 flush_queued_reg_saves ();
2597 /* Set up state for generating call frame debug info. */
2600 == (unsigned long)DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM));
2602 cfa.reg = STACK_POINTER_REGNUM;
2605 cfa_temp.offset = 0;
2607 for (i = 0; i < num_regs_saved_in_regs; i++)
2609 regs_saved_in_regs[i].orig_reg = NULL_RTX;
2610 regs_saved_in_regs[i].saved_in_reg = NULL_RTX;
2612 num_regs_saved_in_regs = 0;
2614 if (barrier_args_size)
2616 XDELETEVEC (barrier_args_size);
2617 barrier_args_size = NULL;
2622 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2623 flush_queued_reg_saves ();
2625 if (! RTX_FRAME_RELATED_P (insn))
2627 if (!ACCUMULATE_OUTGOING_ARGS)
2628 dwarf2out_stack_adjust (insn, after_p);
2632 label = dwarf2out_cfi_label (false);
2634 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2635 switch (REG_NOTE_KIND (note))
2637 case REG_FRAME_RELATED_EXPR:
2638 insn = XEXP (note, 0);
2641 case REG_CFA_DEF_CFA:
2642 dwarf2out_frame_debug_def_cfa (XEXP (note, 0), label);
2646 case REG_CFA_ADJUST_CFA:
2651 if (GET_CODE (n) == PARALLEL)
2652 n = XVECEXP (n, 0, 0);
2654 dwarf2out_frame_debug_adjust_cfa (n, label);
2658 case REG_CFA_OFFSET:
2661 n = single_set (insn);
2662 dwarf2out_frame_debug_cfa_offset (n, label);
2666 case REG_CFA_REGISTER:
2671 if (GET_CODE (n) == PARALLEL)
2672 n = XVECEXP (n, 0, 0);
2674 dwarf2out_frame_debug_cfa_register (n, label);
2678 case REG_CFA_RESTORE:
2683 if (GET_CODE (n) == PARALLEL)
2684 n = XVECEXP (n, 0, 0);
2687 dwarf2out_frame_debug_cfa_restore (n, label);
2697 insn = PATTERN (insn);
2699 dwarf2out_frame_debug_expr (insn, label);
2702 /* Determine if we need to save and restore CFI information around this
2703 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2704 we do need to save/restore, then emit the save now, and insert a
2705 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2708 dwarf2out_begin_epilogue (rtx insn)
2710 bool saw_frp = false;
2714 /* Scan forward to the return insn, noticing if there are possible
2715 frame related insns. */
2716 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2721 /* Look for both regular and sibcalls to end the block. */
2722 if (returnjump_p (i))
2724 if (CALL_P (i) && SIBLING_CALL_P (i))
2727 if (RTX_FRAME_RELATED_P (i))
2731 /* If the port doesn't emit epilogue unwind info, we don't need a
2732 save/restore pair. */
2736 /* Otherwise, search forward to see if the return insn was the last
2737 basic block of the function. If so, we don't need save/restore. */
2738 gcc_assert (i != NULL);
2739 i = next_real_insn (i);
2743 /* Insert the restore before that next real insn in the stream, and before
2744 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2745 properly nested. This should be after any label or alignment. This
2746 will be pushed into the CFI stream by the function below. */
2749 rtx p = PREV_INSN (i);
2752 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2756 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2758 /* Emit the state save. */
2760 cfi->dw_cfi_opc = DW_CFA_remember_state;
2761 add_fde_cfi (dwarf2out_cfi_label (false), cfi);
2763 /* And emulate the state save. */
2764 gcc_assert (!cfa_remember.in_use);
2766 cfa_remember.in_use = 1;
2769 /* A "subroutine" of dwarf2out_begin_epilogue. Emit the restore required. */
2772 dwarf2out_frame_debug_restore_state (void)
2774 dw_cfi_ref cfi = new_cfi ();
2775 const char *label = dwarf2out_cfi_label (false);
2777 cfi->dw_cfi_opc = DW_CFA_restore_state;
2778 add_fde_cfi (label, cfi);
2780 gcc_assert (cfa_remember.in_use);
2782 cfa_remember.in_use = 0;
2787 /* Describe for the GTY machinery what parts of dw_cfi_oprnd1 are used. */
2788 static enum dw_cfi_oprnd_type dw_cfi_oprnd1_desc
2789 (enum dwarf_call_frame_info cfi);
2791 static enum dw_cfi_oprnd_type
2792 dw_cfi_oprnd1_desc (enum dwarf_call_frame_info cfi)
2797 case DW_CFA_GNU_window_save:
2798 case DW_CFA_remember_state:
2799 case DW_CFA_restore_state:
2800 return dw_cfi_oprnd_unused;
2802 case DW_CFA_set_loc:
2803 case DW_CFA_advance_loc1:
2804 case DW_CFA_advance_loc2:
2805 case DW_CFA_advance_loc4:
2806 case DW_CFA_MIPS_advance_loc8:
2807 return dw_cfi_oprnd_addr;
2810 case DW_CFA_offset_extended:
2811 case DW_CFA_def_cfa:
2812 case DW_CFA_offset_extended_sf:
2813 case DW_CFA_def_cfa_sf:
2814 case DW_CFA_restore:
2815 case DW_CFA_restore_extended:
2816 case DW_CFA_undefined:
2817 case DW_CFA_same_value:
2818 case DW_CFA_def_cfa_register:
2819 case DW_CFA_register:
2820 return dw_cfi_oprnd_reg_num;
2822 case DW_CFA_def_cfa_offset:
2823 case DW_CFA_GNU_args_size:
2824 case DW_CFA_def_cfa_offset_sf:
2825 return dw_cfi_oprnd_offset;
2827 case DW_CFA_def_cfa_expression:
2828 case DW_CFA_expression:
2829 return dw_cfi_oprnd_loc;
2836 /* Describe for the GTY machinery what parts of dw_cfi_oprnd2 are used. */
2837 static enum dw_cfi_oprnd_type dw_cfi_oprnd2_desc
2838 (enum dwarf_call_frame_info cfi);
2840 static enum dw_cfi_oprnd_type
2841 dw_cfi_oprnd2_desc (enum dwarf_call_frame_info cfi)
2845 case DW_CFA_def_cfa:
2846 case DW_CFA_def_cfa_sf:
2848 case DW_CFA_offset_extended_sf:
2849 case DW_CFA_offset_extended:
2850 return dw_cfi_oprnd_offset;
2852 case DW_CFA_register:
2853 return dw_cfi_oprnd_reg_num;
2856 return dw_cfi_oprnd_unused;
2860 #if defined (DWARF2_DEBUGGING_INFO) || defined (DWARF2_UNWIND_INFO)
2862 /* Switch to eh_frame_section. If we don't have an eh_frame_section,
2863 switch to the data section instead, and write out a synthetic label
2867 switch_to_eh_frame_section (void)
2871 #ifdef EH_FRAME_SECTION_NAME
2872 if (eh_frame_section == 0)
2876 if (EH_TABLES_CAN_BE_READ_ONLY)
2882 fde_encoding = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1,
2884 per_encoding = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,
2886 lsda_encoding = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,
2888 flags = ((! flag_pic
2889 || ((fde_encoding & 0x70) != DW_EH_PE_absptr
2890 && (fde_encoding & 0x70) != DW_EH_PE_aligned
2891 && (per_encoding & 0x70) != DW_EH_PE_absptr
2892 && (per_encoding & 0x70) != DW_EH_PE_aligned
2893 && (lsda_encoding & 0x70) != DW_EH_PE_absptr
2894 && (lsda_encoding & 0x70) != DW_EH_PE_aligned))
2895 ? 0 : SECTION_WRITE);
2898 flags = SECTION_WRITE;
2899 eh_frame_section = get_section (EH_FRAME_SECTION_NAME, flags, NULL);
2903 if (eh_frame_section)
2904 switch_to_section (eh_frame_section);
2907 /* We have no special eh_frame section. Put the information in
2908 the data section and emit special labels to guide collect2. */
2909 switch_to_section (data_section);
2910 label = get_file_function_name ("F");
2911 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (PTR_SIZE));
2912 targetm.asm_out.globalize_label (asm_out_file,
2913 IDENTIFIER_POINTER (label));
2914 ASM_OUTPUT_LABEL (asm_out_file, IDENTIFIER_POINTER (label));
2918 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
2920 static HOST_WIDE_INT
2921 div_data_align (HOST_WIDE_INT off)
2923 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
2924 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
2928 /* Output a Call Frame Information opcode and its operand(s). */
2931 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
2936 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
2937 dw2_asm_output_data (1, (cfi->dw_cfi_opc
2938 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
2939 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
2940 ((unsigned HOST_WIDE_INT)
2941 cfi->dw_cfi_oprnd1.dw_cfi_offset));
2942 else if (cfi->dw_cfi_opc == DW_CFA_offset)
2944 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2945 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2946 "DW_CFA_offset, column 0x%lx", r);
2947 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2948 dw2_asm_output_data_uleb128 (off, NULL);
2950 else if (cfi->dw_cfi_opc == DW_CFA_restore)
2952 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2953 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2954 "DW_CFA_restore, column 0x%lx", r);
2958 dw2_asm_output_data (1, cfi->dw_cfi_opc,
2959 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
2961 switch (cfi->dw_cfi_opc)
2963 case DW_CFA_set_loc:
2965 dw2_asm_output_encoded_addr_rtx (
2966 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
2967 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
2970 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
2971 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
2972 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2975 case DW_CFA_advance_loc1:
2976 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2977 fde->dw_fde_current_label, NULL);
2978 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2981 case DW_CFA_advance_loc2:
2982 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2983 fde->dw_fde_current_label, NULL);
2984 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2987 case DW_CFA_advance_loc4:
2988 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2989 fde->dw_fde_current_label, NULL);
2990 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2993 case DW_CFA_MIPS_advance_loc8:
2994 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2995 fde->dw_fde_current_label, NULL);
2996 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2999 case DW_CFA_offset_extended:
3000 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3001 dw2_asm_output_data_uleb128 (r, NULL);
3002 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3003 dw2_asm_output_data_uleb128 (off, NULL);
3006 case DW_CFA_def_cfa:
3007 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3008 dw2_asm_output_data_uleb128 (r, NULL);
3009 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3012 case DW_CFA_offset_extended_sf:
3013 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3014 dw2_asm_output_data_uleb128 (r, NULL);
3015 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3016 dw2_asm_output_data_sleb128 (off, NULL);
3019 case DW_CFA_def_cfa_sf:
3020 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3021 dw2_asm_output_data_uleb128 (r, NULL);
3022 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3023 dw2_asm_output_data_sleb128 (off, NULL);
3026 case DW_CFA_restore_extended:
3027 case DW_CFA_undefined:
3028 case DW_CFA_same_value:
3029 case DW_CFA_def_cfa_register:
3030 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3031 dw2_asm_output_data_uleb128 (r, NULL);
3034 case DW_CFA_register:
3035 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3036 dw2_asm_output_data_uleb128 (r, NULL);
3037 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3038 dw2_asm_output_data_uleb128 (r, NULL);
3041 case DW_CFA_def_cfa_offset:
3042 case DW_CFA_GNU_args_size:
3043 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3046 case DW_CFA_def_cfa_offset_sf:
3047 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3048 dw2_asm_output_data_sleb128 (off, NULL);
3051 case DW_CFA_GNU_window_save:
3054 case DW_CFA_def_cfa_expression:
3055 case DW_CFA_expression:
3056 output_cfa_loc (cfi);
3059 case DW_CFA_GNU_negative_offset_extended:
3060 /* Obsoleted by DW_CFA_offset_extended_sf. */
3069 /* Similar, but do it via assembler directives instead. */
3072 output_cfi_directive (dw_cfi_ref cfi)
3074 unsigned long r, r2;
3076 switch (cfi->dw_cfi_opc)
3078 case DW_CFA_advance_loc:
3079 case DW_CFA_advance_loc1:
3080 case DW_CFA_advance_loc2:
3081 case DW_CFA_advance_loc4:
3082 case DW_CFA_MIPS_advance_loc8:
3083 case DW_CFA_set_loc:
3084 /* Should only be created by add_fde_cfi in a code path not
3085 followed when emitting via directives. The assembler is
3086 going to take care of this for us. */
3090 case DW_CFA_offset_extended:
3091 case DW_CFA_offset_extended_sf:
3092 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3093 fprintf (asm_out_file, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3094 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3097 case DW_CFA_restore:
3098 case DW_CFA_restore_extended:
3099 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3100 fprintf (asm_out_file, "\t.cfi_restore %lu\n", r);
3103 case DW_CFA_undefined:
3104 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3105 fprintf (asm_out_file, "\t.cfi_undefined %lu\n", r);
3108 case DW_CFA_same_value:
3109 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3110 fprintf (asm_out_file, "\t.cfi_same_value %lu\n", r);
3113 case DW_CFA_def_cfa:
3114 case DW_CFA_def_cfa_sf:
3115 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3116 fprintf (asm_out_file, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3117 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3120 case DW_CFA_def_cfa_register:
3121 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3122 fprintf (asm_out_file, "\t.cfi_def_cfa_register %lu\n", r);
3125 case DW_CFA_register:
3126 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3127 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3128 fprintf (asm_out_file, "\t.cfi_register %lu, %lu\n", r, r2);
3131 case DW_CFA_def_cfa_offset:
3132 case DW_CFA_def_cfa_offset_sf:
3133 fprintf (asm_out_file, "\t.cfi_def_cfa_offset "
3134 HOST_WIDE_INT_PRINT_DEC"\n",
3135 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3138 case DW_CFA_remember_state:
3139 fprintf (asm_out_file, "\t.cfi_remember_state\n");
3141 case DW_CFA_restore_state:
3142 fprintf (asm_out_file, "\t.cfi_restore_state\n");
3145 case DW_CFA_GNU_args_size:
3146 fprintf (asm_out_file, "\t.cfi_escape 0x%x,", DW_CFA_GNU_args_size);
3147 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3149 fprintf (asm_out_file, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
3150 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3151 fputc ('\n', asm_out_file);
3154 case DW_CFA_GNU_window_save:
3155 fprintf (asm_out_file, "\t.cfi_window_save\n");
3158 case DW_CFA_def_cfa_expression:
3159 case DW_CFA_expression:
3160 fprintf (asm_out_file, "\t.cfi_escape 0x%x,", cfi->dw_cfi_opc);
3161 output_cfa_loc_raw (cfi);
3162 fputc ('\n', asm_out_file);
3170 /* Output the call frame information used to record information
3171 that relates to calculating the frame pointer, and records the
3172 location of saved registers. */
3175 output_call_frame_info (int for_eh)
3180 char l1[20], l2[20], section_start_label[20];
3181 bool any_lsda_needed = false;
3182 char augmentation[6];
3183 int augmentation_size;
3184 int fde_encoding = DW_EH_PE_absptr;
3185 int per_encoding = DW_EH_PE_absptr;
3186 int lsda_encoding = DW_EH_PE_absptr;
3189 /* Don't emit a CIE if there won't be any FDEs. */
3190 if (fde_table_in_use == 0)
3193 /* Nothing to do if the assembler's doing it all. */
3194 if (dwarf2out_do_cfi_asm ())
3197 /* If we make FDEs linkonce, we may have to emit an empty label for
3198 an FDE that wouldn't otherwise be emitted. We want to avoid
3199 having an FDE kept around when the function it refers to is
3200 discarded. Example where this matters: a primary function
3201 template in C++ requires EH information, but an explicit
3202 specialization doesn't. */
3203 if (TARGET_USES_WEAK_UNWIND_INFO
3204 && ! flag_asynchronous_unwind_tables
3207 for (i = 0; i < fde_table_in_use; i++)
3208 if ((fde_table[i].nothrow || fde_table[i].all_throwers_are_sibcalls)
3209 && !fde_table[i].uses_eh_lsda
3210 && ! DECL_WEAK (fde_table[i].decl))
3211 targetm.asm_out.unwind_label (asm_out_file, fde_table[i].decl,
3212 for_eh, /* empty */ 1);
3214 /* If we don't have any functions we'll want to unwind out of, don't
3215 emit any EH unwind information. Note that if exceptions aren't
3216 enabled, we won't have collected nothrow information, and if we
3217 asked for asynchronous tables, we always want this info. */
3220 bool any_eh_needed = !flag_exceptions || flag_asynchronous_unwind_tables;
3222 for (i = 0; i < fde_table_in_use; i++)
3223 if (fde_table[i].uses_eh_lsda)
3224 any_eh_needed = any_lsda_needed = true;
3225 else if (TARGET_USES_WEAK_UNWIND_INFO && DECL_WEAK (fde_table[i].decl))
3226 any_eh_needed = true;
3227 else if (! fde_table[i].nothrow
3228 && ! fde_table[i].all_throwers_are_sibcalls)
3229 any_eh_needed = true;
3231 if (! any_eh_needed)
3235 /* We're going to be generating comments, so turn on app. */
3240 switch_to_eh_frame_section ();
3243 if (!debug_frame_section)
3244 debug_frame_section = get_section (DEBUG_FRAME_SECTION,
3245 SECTION_DEBUG, NULL);
3246 switch_to_section (debug_frame_section);
3249 ASM_GENERATE_INTERNAL_LABEL (section_start_label, FRAME_BEGIN_LABEL, for_eh);
3250 ASM_OUTPUT_LABEL (asm_out_file, section_start_label);
3252 /* Output the CIE. */
3253 ASM_GENERATE_INTERNAL_LABEL (l1, CIE_AFTER_SIZE_LABEL, for_eh);
3254 ASM_GENERATE_INTERNAL_LABEL (l2, CIE_END_LABEL, for_eh);
3255 if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4 && !for_eh)
3256 dw2_asm_output_data (4, 0xffffffff,
3257 "Initial length escape value indicating 64-bit DWARF extension");
3258 dw2_asm_output_delta (for_eh ? 4 : DWARF_OFFSET_SIZE, l2, l1,
3259 "Length of Common Information Entry");
3260 ASM_OUTPUT_LABEL (asm_out_file, l1);
3262 /* Now that the CIE pointer is PC-relative for EH,
3263 use 0 to identify the CIE. */
3264 dw2_asm_output_data ((for_eh ? 4 : DWARF_OFFSET_SIZE),
3265 (for_eh ? 0 : DWARF_CIE_ID),
3266 "CIE Identifier Tag");
3268 dw2_asm_output_data (1, DW_CIE_VERSION, "CIE Version");
3270 augmentation[0] = 0;
3271 augmentation_size = 0;
3277 z Indicates that a uleb128 is present to size the
3278 augmentation section.
3279 L Indicates the encoding (and thus presence) of
3280 an LSDA pointer in the FDE augmentation.
3281 R Indicates a non-default pointer encoding for
3283 P Indicates the presence of an encoding + language
3284 personality routine in the CIE augmentation. */
3286 fde_encoding = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0);
3287 per_encoding = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2, /*global=*/1);
3288 lsda_encoding = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0, /*global=*/0);
3290 p = augmentation + 1;
3291 if (eh_personality_libfunc)
3294 augmentation_size += 1 + size_of_encoded_value (per_encoding);
3295 assemble_external_libcall (eh_personality_libfunc);
3297 if (any_lsda_needed)
3300 augmentation_size += 1;
3302 if (fde_encoding != DW_EH_PE_absptr)
3305 augmentation_size += 1;
3307 if (p > augmentation + 1)
3309 augmentation[0] = 'z';
3313 /* Ug. Some platforms can't do unaligned dynamic relocations at all. */
3314 if (eh_personality_libfunc && per_encoding == DW_EH_PE_aligned)
3316 int offset = ( 4 /* Length */
3318 + 1 /* CIE version */
3319 + strlen (augmentation) + 1 /* Augmentation */
3320 + size_of_uleb128 (1) /* Code alignment */
3321 + size_of_sleb128 (DWARF_CIE_DATA_ALIGNMENT)
3323 + 1 /* Augmentation size */
3324 + 1 /* Personality encoding */ );
3325 int pad = -offset & (PTR_SIZE - 1);
3327 augmentation_size += pad;
3329 /* Augmentations should be small, so there's scarce need to
3330 iterate for a solution. Die if we exceed one uleb128 byte. */
3331 gcc_assert (size_of_uleb128 (augmentation_size) == 1);
3335 dw2_asm_output_nstring (augmentation, -1, "CIE Augmentation");
3336 dw2_asm_output_data_uleb128 (1, "CIE Code Alignment Factor");
3337 dw2_asm_output_data_sleb128 (DWARF_CIE_DATA_ALIGNMENT,
3338 "CIE Data Alignment Factor");
3340 return_reg = DWARF2_FRAME_REG_OUT (DWARF_FRAME_RETURN_COLUMN, for_eh);
3341 if (DW_CIE_VERSION == 1)
3342 dw2_asm_output_data (1, return_reg, "CIE RA Column");
3344 dw2_asm_output_data_uleb128 (return_reg, "CIE RA Column");
3346 if (augmentation[0])
3348 dw2_asm_output_data_uleb128 (augmentation_size, "Augmentation size");
3349 if (eh_personality_libfunc)
3351 dw2_asm_output_data (1, per_encoding, "Personality (%s)",
3352 eh_data_format_name (per_encoding));
3353 dw2_asm_output_encoded_addr_rtx (per_encoding,
3354 eh_personality_libfunc,
3358 if (any_lsda_needed)
3359 dw2_asm_output_data (1, lsda_encoding, "LSDA Encoding (%s)",
3360 eh_data_format_name (lsda_encoding));
3362 if (fde_encoding != DW_EH_PE_absptr)
3363 dw2_asm_output_data (1, fde_encoding, "FDE Encoding (%s)",
3364 eh_data_format_name (fde_encoding));
3367 for (cfi = cie_cfi_head; cfi != NULL; cfi = cfi->dw_cfi_next)
3368 output_cfi (cfi, NULL, for_eh);
3370 /* Pad the CIE out to an address sized boundary. */
3371 ASM_OUTPUT_ALIGN (asm_out_file,
3372 floor_log2 (for_eh ? PTR_SIZE : DWARF2_ADDR_SIZE));
3373 ASM_OUTPUT_LABEL (asm_out_file, l2);
3375 /* Loop through all of the FDE's. */
3376 for (i = 0; i < fde_table_in_use; i++)
3378 fde = &fde_table[i];
3380 /* Don't emit EH unwind info for leaf functions that don't need it. */
3381 if (for_eh && !flag_asynchronous_unwind_tables && flag_exceptions
3382 && (fde->nothrow || fde->all_throwers_are_sibcalls)
3383 && ! (TARGET_USES_WEAK_UNWIND_INFO && DECL_WEAK (fde_table[i].decl))
3384 && !fde->uses_eh_lsda)
3387 targetm.asm_out.unwind_label (asm_out_file, fde->decl, for_eh, /* empty */ 0);
3388 targetm.asm_out.internal_label (asm_out_file, FDE_LABEL, for_eh + i * 2);
3389 ASM_GENERATE_INTERNAL_LABEL (l1, FDE_AFTER_SIZE_LABEL, for_eh + i * 2);
3390 ASM_GENERATE_INTERNAL_LABEL (l2, FDE_END_LABEL, for_eh + i * 2);
3391 if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4 && !for_eh)
3392 dw2_asm_output_data (4, 0xffffffff,
3393 "Initial length escape value indicating 64-bit DWARF extension");
3394 dw2_asm_output_delta (for_eh ? 4 : DWARF_OFFSET_SIZE, l2, l1,
3396 ASM_OUTPUT_LABEL (asm_out_file, l1);
3399 dw2_asm_output_delta (4, l1, section_start_label, "FDE CIE offset");
3401 dw2_asm_output_offset (DWARF_OFFSET_SIZE, section_start_label,
3402 debug_frame_section, "FDE CIE offset");
3406 if (fde->dw_fde_switched_sections)
3408 rtx sym_ref2 = gen_rtx_SYMBOL_REF (Pmode,
3409 fde->dw_fde_unlikely_section_label);
3410 rtx sym_ref3= gen_rtx_SYMBOL_REF (Pmode,
3411 fde->dw_fde_hot_section_label);
3412 SYMBOL_REF_FLAGS (sym_ref2) |= SYMBOL_FLAG_LOCAL;
3413 SYMBOL_REF_FLAGS (sym_ref3) |= SYMBOL_FLAG_LOCAL;
3414 dw2_asm_output_encoded_addr_rtx (fde_encoding, sym_ref3, false,
3415 "FDE initial location");
3416 dw2_asm_output_delta (size_of_encoded_value (fde_encoding),
3417 fde->dw_fde_hot_section_end_label,
3418 fde->dw_fde_hot_section_label,
3419 "FDE address range");
3420 dw2_asm_output_encoded_addr_rtx (fde_encoding, sym_ref2, false,
3421 "FDE initial location");
3422 dw2_asm_output_delta (size_of_encoded_value (fde_encoding),
3423 fde->dw_fde_unlikely_section_end_label,
3424 fde->dw_fde_unlikely_section_label,
3425 "FDE address range");
3429 rtx sym_ref = gen_rtx_SYMBOL_REF (Pmode, fde->dw_fde_begin);
3430 SYMBOL_REF_FLAGS (sym_ref) |= SYMBOL_FLAG_LOCAL;
3431 dw2_asm_output_encoded_addr_rtx (fde_encoding,
3434 "FDE initial location");
3435 dw2_asm_output_delta (size_of_encoded_value (fde_encoding),
3436 fde->dw_fde_end, fde->dw_fde_begin,
3437 "FDE address range");
3442 if (fde->dw_fde_switched_sections)
3444 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3445 fde->dw_fde_hot_section_label,
3446 "FDE initial location");
3447 dw2_asm_output_delta (DWARF2_ADDR_SIZE,
3448 fde->dw_fde_hot_section_end_label,
3449 fde->dw_fde_hot_section_label,
3450 "FDE address range");
3451 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3452 fde->dw_fde_unlikely_section_label,
3453 "FDE initial location");
3454 dw2_asm_output_delta (DWARF2_ADDR_SIZE,
3455 fde->dw_fde_unlikely_section_end_label,
3456 fde->dw_fde_unlikely_section_label,
3457 "FDE address range");
3461 dw2_asm_output_addr (DWARF2_ADDR_SIZE, fde->dw_fde_begin,
3462 "FDE initial location");
3463 dw2_asm_output_delta (DWARF2_ADDR_SIZE,
3464 fde->dw_fde_end, fde->dw_fde_begin,
3465 "FDE address range");
3469 if (augmentation[0])
3471 if (any_lsda_needed)
3473 int size = size_of_encoded_value (lsda_encoding);
3475 if (lsda_encoding == DW_EH_PE_aligned)
3477 int offset = ( 4 /* Length */
3478 + 4 /* CIE offset */
3479 + 2 * size_of_encoded_value (fde_encoding)
3480 + 1 /* Augmentation size */ );
3481 int pad = -offset & (PTR_SIZE - 1);
3484 gcc_assert (size_of_uleb128 (size) == 1);
3487 dw2_asm_output_data_uleb128 (size, "Augmentation size");
3489 if (fde->uses_eh_lsda)
3491 ASM_GENERATE_INTERNAL_LABEL (l1, "LLSDA",
3492 fde->funcdef_number);
3493 dw2_asm_output_encoded_addr_rtx (
3494 lsda_encoding, gen_rtx_SYMBOL_REF (Pmode, l1),
3495 false, "Language Specific Data Area");
3499 if (lsda_encoding == DW_EH_PE_aligned)
3500 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (PTR_SIZE));
3502 (size_of_encoded_value (lsda_encoding), 0,
3503 "Language Specific Data Area (none)");
3507 dw2_asm_output_data_uleb128 (0, "Augmentation size");
3510 /* Loop through the Call Frame Instructions associated with
3512 fde->dw_fde_current_label = fde->dw_fde_begin;
3513 for (cfi = fde->dw_fde_cfi; cfi != NULL; cfi = cfi->dw_cfi_next)
3514 output_cfi (cfi, fde, for_eh);
3516 /* Pad the FDE out to an address sized boundary. */
3517 ASM_OUTPUT_ALIGN (asm_out_file,
3518 floor_log2 ((for_eh ? PTR_SIZE : DWARF2_ADDR_SIZE)));
3519 ASM_OUTPUT_LABEL (asm_out_file, l2);
3522 if (for_eh && targetm.terminate_dw2_eh_frame_info)
3523 dw2_asm_output_data (4, 0, "End of Table");
3524 #ifdef MIPS_DEBUGGING_INFO
3525 /* Work around Irix 6 assembler bug whereby labels at the end of a section
3526 get a value of 0. Putting .align 0 after the label fixes it. */
3527 ASM_OUTPUT_ALIGN (asm_out_file, 0);
3530 /* Turn off app to make assembly quicker. */