const char *dw_fde_unlikely_section_end_label;
dw_cfi_ref dw_fde_cfi;
dw_cfi_ref dw_fde_switch_cfi; /* Last CFI before switching sections. */
- unsigned funcdef_number;
HOST_WIDE_INT stack_realignment;
+ unsigned funcdef_number;
/* Dynamic realign argument pointer register. */
unsigned int drap_reg;
/* Virtual dynamic realign argument pointer register. */
unsigned int vdrap_reg;
+ /* These 3 flags are copied from rtl_data in function.h. */
unsigned all_throwers_are_sibcalls : 1;
- unsigned nothrow : 1;
unsigned uses_eh_lsda : 1;
+ unsigned nothrow : 1;
/* Whether we did stack realign in this call frame. */
unsigned stack_realign : 1;
/* Whether dynamic realign argument pointer register has been saved. */
j += 2;
}
+/* Return true if frame description entry FDE is needed for EH. */
+
+static bool
+fde_needed_for_eh_p (dw_fde_ref fde)
+{
+ if (flag_asynchronous_unwind_tables)
+ return true;
+
+ if (TARGET_USES_WEAK_UNWIND_INFO && DECL_WEAK (fde->decl))
+ return true;
+
+ if (fde->uses_eh_lsda)
+ return true;
+
+ /* If exceptions are enabled, we have collected nothrow info. */
+ if (flag_exceptions && (fde->all_throwers_are_sibcalls || fde->nothrow))
+ return false;
+
+ return true;
+}
+
/* Output the call frame information used to record information
that relates to calculating the frame pointer, and records the
location of saved registers. */
if (dwarf2out_do_cfi_asm ())
return;
- /* If we make FDEs linkonce, we may have to emit an empty label for
- an FDE that wouldn't otherwise be emitted. We want to avoid
- having an FDE kept around when the function it refers to is
- discarded. Example where this matters: a primary function
- template in C++ requires EH information, but an explicit
- specialization doesn't. */
- if (TARGET_USES_WEAK_UNWIND_INFO
- && ! flag_asynchronous_unwind_tables
- && flag_exceptions
- && for_eh)
- for (i = 0; i < fde_table_in_use; i++)
- if ((fde_table[i].nothrow || fde_table[i].all_throwers_are_sibcalls)
- && !fde_table[i].uses_eh_lsda
- && ! DECL_WEAK (fde_table[i].decl))
- targetm.asm_out.unwind_label (asm_out_file, fde_table[i].decl,
- for_eh, /* empty */ 1);
-
- /* If we don't have any functions we'll want to unwind out of, don't
- emit any EH unwind information. Note that if exceptions aren't
- enabled, we won't have collected nothrow information, and if we
- asked for asynchronous tables, we always want this info. */
+ /* If we don't have any functions we'll want to unwind out of, don't emit
+ any EH unwind information. If we make FDEs linkonce, we may have to
+ emit an empty label for an FDE that wouldn't otherwise be emitted. We
+ want to avoid having an FDE kept around when the function it refers to
+ is discarded. Example where this matters: a primary function template
+ in C++ requires EH information, an explicit specialization doesn't. */
if (for_eh)
{
- bool any_eh_needed = !flag_exceptions || flag_asynchronous_unwind_tables;
+ bool any_eh_needed = false;
for (i = 0; i < fde_table_in_use; i++)
if (fde_table[i].uses_eh_lsda)
any_eh_needed = any_lsda_needed = true;
- else if (TARGET_USES_WEAK_UNWIND_INFO && DECL_WEAK (fde_table[i].decl))
- any_eh_needed = true;
- else if (! fde_table[i].nothrow
- && ! fde_table[i].all_throwers_are_sibcalls)
+ else if (fde_needed_for_eh_p (&fde_table[i]))
any_eh_needed = true;
+ else if (TARGET_USES_WEAK_UNWIND_INFO)
+ targetm.asm_out.unwind_label (asm_out_file, fde_table[i].decl, 1, 1);
- if (! any_eh_needed)
+ if (!any_eh_needed)
return;
}
fde = &fde_table[i];
/* Don't emit EH unwind info for leaf functions that don't need it. */
- if (for_eh && !flag_asynchronous_unwind_tables && flag_exceptions
- && (fde->nothrow || fde->all_throwers_are_sibcalls)
- && ! (TARGET_USES_WEAK_UNWIND_INFO && DECL_WEAK (fde_table[i].decl))
- && !fde->uses_eh_lsda)
+ if (for_eh && !fde_needed_for_eh_p (fde))
continue;
for (k = 0; k < (fde->dw_fde_switched_sections ? 2 : 1); k++)
fde->dw_fde_cfi = NULL;
fde->dw_fde_switch_cfi = NULL;
fde->funcdef_number = current_function_funcdef_no;
- fde->nothrow = crtl->nothrow;
- fde->uses_eh_lsda = crtl->uses_eh_lsda;
fde->all_throwers_are_sibcalls = crtl->all_throwers_are_sibcalls;
+ fde->uses_eh_lsda = crtl->uses_eh_lsda;
+ fde->nothrow = crtl->nothrow;
fde->drap_reg = INVALID_REGNUM;
fde->vdrap_reg = INVALID_REGNUM;
if (flag_reorder_blocks_and_partition)
current_unit_personality = personality;
/* We cannot keep a current personality per function as without CFI
- asm at the point where we emit the CFI data there is no current
+ asm, at the point where we emit the CFI data, there is no current
function anymore. */
- if (personality
- && current_unit_personality != personality)
- sorry ("Multiple EH personalities are supported only with assemblers "
- "supporting .cfi.personality directive.");
+ if (personality && current_unit_personality != personality)
+ sorry ("multiple EH personalities are supported only with assemblers "
+ "supporting .cfi_personality directive");
}
}
static void gen_block_die (tree, dw_die_ref, int);
static void decls_for_scope (tree, dw_die_ref, int);
static int is_redundant_typedef (const_tree);
+static bool is_naming_typedef_decl (const_tree);
static inline dw_die_ref get_context_die (tree);
static void gen_namespace_die (tree, dw_die_ref);
static void gen_decl_die (tree, tree, dw_die_ref);
static void retry_incomplete_types (void);
static void gen_type_die_for_member (tree, tree, dw_die_ref);
static void gen_generic_params_dies (tree);
+static void gen_tagged_type_die (tree, dw_die_ref, enum debug_info_usage);
+static void gen_type_die_with_usage (tree, dw_die_ref, enum debug_info_usage);
static void splice_child_die (dw_die_ref, dw_die_ref);
static int file_info_cmp (const void *, const void *);
static dw_loc_list_ref new_loc_list (dw_loc_descr_ref, const char *,
/* Print the information collected for a given DIE. */
-void
+DEBUG_FUNCTION void
debug_dwarf_die (dw_die_ref die)
{
print_die (die, stderr);
/* Print all DWARF information collected for the compilation unit.
This routine is a debugging aid only. */
-void
+DEBUG_FUNCTION void
debug_dwarf (void)
{
print_indent = 0;
return TYPE_ALIGN (type);
}
+/* Similarly, but return a double_int instead of UHWI. */
+
+static inline double_int
+double_int_type_size_in_bits (const_tree type)
+{
+ if (TREE_CODE (type) == ERROR_MARK)
+ return uhwi_to_double_int (BITS_PER_WORD);
+ else if (TYPE_SIZE (type) == NULL_TREE)
+ return double_int_zero;
+ else if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
+ return tree_to_double_int (TYPE_SIZE (type));
+ else
+ return uhwi_to_double_int (TYPE_ALIGN (type));
+}
+
/* Given a pointer to a tree node for a subrange type, return a pointer
to a DIE that describes the given type. */
/* Return the result of rounding T up to ALIGN. */
-static inline HOST_WIDE_INT
-round_up_to_align (HOST_WIDE_INT t, unsigned int align)
+static inline double_int
+round_up_to_align (double_int t, unsigned int align)
{
- /* We must be careful if T is negative because HOST_WIDE_INT can be
- either "above" or "below" unsigned int as per the C promotion
- rules, depending on the host, thus making the signedness of the
- direct multiplication and division unpredictable. */
- unsigned HOST_WIDE_INT u = (unsigned HOST_WIDE_INT) t;
-
- u += align - 1;
- u /= align;
- u *= align;
-
- return (HOST_WIDE_INT) u;
+ double_int alignd = uhwi_to_double_int (align);
+ t = double_int_add (t, alignd);
+ t = double_int_add (t, double_int_minus_one);
+ t = double_int_div (t, alignd, true, TRUNC_DIV_EXPR);
+ t = double_int_mul (t, alignd);
+ return t;
}
/* Given a pointer to a FIELD_DECL, compute and return the byte offset of the
static HOST_WIDE_INT
field_byte_offset (const_tree decl)
{
- HOST_WIDE_INT object_offset_in_bits;
- HOST_WIDE_INT bitpos_int;
+ double_int object_offset_in_bits;
+ double_int object_offset_in_bytes;
+ double_int bitpos_int;
if (TREE_CODE (decl) == ERROR_MARK)
return 0;
/* We cannot yet cope with fields whose positions are variable, so
for now, when we see such things, we simply return 0. Someday, we may
be able to handle such cases, but it will be damn difficult. */
- if (! host_integerp (bit_position (decl), 0))
+ if (TREE_CODE (bit_position (decl)) != INTEGER_CST)
return 0;
- bitpos_int = int_bit_position (decl);
+ bitpos_int = tree_to_double_int (bit_position (decl));
#ifdef PCC_BITFIELD_TYPE_MATTERS
if (PCC_BITFIELD_TYPE_MATTERS)
{
tree type;
tree field_size_tree;
- HOST_WIDE_INT deepest_bitpos;
- unsigned HOST_WIDE_INT field_size_in_bits;
+ double_int deepest_bitpos;
+ double_int field_size_in_bits;
unsigned int type_align_in_bits;
unsigned int decl_align_in_bits;
- unsigned HOST_WIDE_INT type_size_in_bits;
+ double_int type_size_in_bits;
type = field_type (decl);
- type_size_in_bits = simple_type_size_in_bits (type);
+ type_size_in_bits = double_int_type_size_in_bits (type);
type_align_in_bits = simple_type_align_in_bits (type);
field_size_tree = DECL_SIZE (decl);
field_size_tree = bitsize_zero_node;
/* If the size of the field is not constant, use the type size. */
- if (host_integerp (field_size_tree, 1))
- field_size_in_bits = tree_low_cst (field_size_tree, 1);
+ if (TREE_CODE (field_size_tree) == INTEGER_CST)
+ field_size_in_bits = tree_to_double_int (field_size_tree);
else
- field_size_in_bits = type_size_in_bits;
+ field_size_in_bits = type_size_in_bits;
decl_align_in_bits = simple_decl_align_in_bits (decl);
/* The GCC front-end doesn't make any attempt to keep track of the
- starting bit offset (relative to the start of the containing
- structure type) of the hypothetical "containing object" for a
- bit-field. Thus, when computing the byte offset value for the
- start of the "containing object" of a bit-field, we must deduce
- this information on our own. This can be rather tricky to do in
- some cases. For example, handling the following structure type
- definition when compiling for an i386/i486 target (which only
- aligns long long's to 32-bit boundaries) can be very tricky:
+ starting bit offset (relative to the start of the containing
+ structure type) of the hypothetical "containing object" for a
+ bit-field. Thus, when computing the byte offset value for the
+ start of the "containing object" of a bit-field, we must deduce
+ this information on our own. This can be rather tricky to do in
+ some cases. For example, handling the following structure type
+ definition when compiling for an i386/i486 target (which only
+ aligns long long's to 32-bit boundaries) can be very tricky:
struct S { int field1; long long field2:31; };
- Fortunately, there is a simple rule-of-thumb which can be used
- in such cases. When compiling for an i386/i486, GCC will
- allocate 8 bytes for the structure shown above. It decides to
- do this based upon one simple rule for bit-field allocation.
- GCC allocates each "containing object" for each bit-field at
- the first (i.e. lowest addressed) legitimate alignment boundary
- (based upon the required minimum alignment for the declared
- type of the field) which it can possibly use, subject to the
- condition that there is still enough available space remaining
- in the containing object (when allocated at the selected point)
- to fully accommodate all of the bits of the bit-field itself.
-
- This simple rule makes it obvious why GCC allocates 8 bytes for
- each object of the structure type shown above. When looking
- for a place to allocate the "containing object" for `field2',
- the compiler simply tries to allocate a 64-bit "containing
- object" at each successive 32-bit boundary (starting at zero)
- until it finds a place to allocate that 64- bit field such that
- at least 31 contiguous (and previously unallocated) bits remain
- within that selected 64 bit field. (As it turns out, for the
- example above, the compiler finds it is OK to allocate the
- "containing object" 64-bit field at bit-offset zero within the
- structure type.)
-
- Here we attempt to work backwards from the limited set of facts
- we're given, and we try to deduce from those facts, where GCC
- must have believed that the containing object started (within
- the structure type). The value we deduce is then used (by the
- callers of this routine) to generate DW_AT_location and
- DW_AT_bit_offset attributes for fields (both bit-fields and, in
- the case of DW_AT_location, regular fields as well). */
+ Fortunately, there is a simple rule-of-thumb which can be used
+ in such cases. When compiling for an i386/i486, GCC will
+ allocate 8 bytes for the structure shown above. It decides to
+ do this based upon one simple rule for bit-field allocation.
+ GCC allocates each "containing object" for each bit-field at
+ the first (i.e. lowest addressed) legitimate alignment boundary
+ (based upon the required minimum alignment for the declared
+ type of the field) which it can possibly use, subject to the
+ condition that there is still enough available space remaining
+ in the containing object (when allocated at the selected point)
+ to fully accommodate all of the bits of the bit-field itself.
+
+ This simple rule makes it obvious why GCC allocates 8 bytes for
+ each object of the structure type shown above. When looking
+ for a place to allocate the "containing object" for `field2',
+ the compiler simply tries to allocate a 64-bit "containing
+ object" at each successive 32-bit boundary (starting at zero)
+ until it finds a place to allocate that 64- bit field such that
+ at least 31 contiguous (and previously unallocated) bits remain
+ within that selected 64 bit field. (As it turns out, for the
+ example above, the compiler finds it is OK to allocate the
+ "containing object" 64-bit field at bit-offset zero within the
+ structure type.)
+
+ Here we attempt to work backwards from the limited set of facts
+ we're given, and we try to deduce from those facts, where GCC
+ must have believed that the containing object started (within
+ the structure type). The value we deduce is then used (by the
+ callers of this routine) to generate DW_AT_location and
+ DW_AT_bit_offset attributes for fields (both bit-fields and, in
+ the case of DW_AT_location, regular fields as well). */
/* Figure out the bit-distance from the start of the structure to
- the "deepest" bit of the bit-field. */
- deepest_bitpos = bitpos_int + field_size_in_bits;
+ the "deepest" bit of the bit-field. */
+ deepest_bitpos = double_int_add (bitpos_int, field_size_in_bits);
/* This is the tricky part. Use some fancy footwork to deduce
- where the lowest addressed bit of the containing object must
- be. */
- object_offset_in_bits = deepest_bitpos - type_size_in_bits;
+ where the lowest addressed bit of the containing object must
+ be. */
+ object_offset_in_bits
+ = double_int_add (deepest_bitpos, double_int_neg (type_size_in_bits));
/* Round up to type_align by default. This works best for
- bitfields. */
+ bitfields. */
object_offset_in_bits
- = round_up_to_align (object_offset_in_bits, type_align_in_bits);
+ = round_up_to_align (object_offset_in_bits, type_align_in_bits);
- if (object_offset_in_bits > bitpos_int)
- {
- object_offset_in_bits = deepest_bitpos - type_size_in_bits;
+ if (double_int_ucmp (object_offset_in_bits, bitpos_int) > 0)
+ {
+ object_offset_in_bits
+ = double_int_add (deepest_bitpos,
+ double_int_neg (type_size_in_bits));
- /* Round up to decl_align instead. */
- object_offset_in_bits
- = round_up_to_align (object_offset_in_bits, decl_align_in_bits);
- }
+ /* Round up to decl_align instead. */
+ object_offset_in_bits
+ = round_up_to_align (object_offset_in_bits, decl_align_in_bits);
+ }
}
else
#endif
object_offset_in_bits = bitpos_int;
- return object_offset_in_bits / BITS_PER_UNIT;
+ object_offset_in_bytes
+ = double_int_div (object_offset_in_bits,
+ uhwi_to_double_int (BITS_PER_UNIT), true,
+ TRUNC_DIV_EXPR);
+ return double_int_to_shwi (object_offset_in_bytes);
}
\f
/* The following routines define various Dwarf attributes and any data
return *tp;
else if (TREE_CODE (*tp) == VAR_DECL)
{
- struct varpool_node *node = varpool_node (*tp);
- if (!node->needed)
+ struct varpool_node *node = varpool_get_node (*tp);
+ if (!node || !node->needed)
return *tp;
}
else if (TREE_CODE (*tp) == FUNCTION_DECL
origin = ultimate_origin;
if (origin != NULL)
add_abstract_origin_attribute (parm_die, origin);
- else
+ else if (emit_name_p)
+ add_name_and_src_coords_attributes (parm_die, node);
+ if (origin == NULL
+ || (! DECL_ABSTRACT (node_or_origin)
+ && variably_modified_type_p (TREE_TYPE (node_or_origin),
+ decl_function_context
+ (node_or_origin))))
{
- tree type = TREE_TYPE (node);
- if (emit_name_p)
- add_name_and_src_coords_attributes (parm_die, node);
- if (decl_by_reference_p (node))
+ tree type = TREE_TYPE (node_or_origin);
+ if (decl_by_reference_p (node_or_origin))
add_type_attribute (parm_die, TREE_TYPE (type), 0, 0,
context_die);
else
add_type_attribute (parm_die, type,
- TREE_READONLY (node),
- TREE_THIS_VOLATILE (node),
+ TREE_READONLY (node_or_origin),
+ TREE_THIS_VOLATILE (node_or_origin),
context_die);
- if (DECL_ARTIFICIAL (node))
- add_AT_flag (parm_die, DW_AT_artificial, 1);
}
+ if (origin == NULL && DECL_ARTIFICIAL (node))
+ add_AT_flag (parm_die, DW_AT_artificial, 1);
if (node && node != origin)
equate_decl_number_to_die (node, parm_die);
{
/* Ask cgraph if the global variable really is to be emitted.
If yes, then we'll keep the DIE of ENTRY->TYPE. */
- struct varpool_node *node = varpool_node (entry->var_decl);
- if (node->needed)
+ struct varpool_node *node = varpool_get_node (entry->var_decl);
+ if (node && node->needed)
{
die->die_perennial_p = 1;
/* Keep the parent DIEs as well. */
dw_die_ref var_die;
dw_die_ref old_die = decl ? lookup_decl_die (decl) : NULL;
dw_die_ref origin_die;
- int declaration = (DECL_EXTERNAL (decl_or_origin)
- || class_or_namespace_scope_p (context_die));
+ bool declaration = (DECL_EXTERNAL (decl_or_origin)
+ || class_or_namespace_scope_p (context_die));
+ bool specialization_p = false;
ultimate_origin = decl_ultimate_origin (decl_or_origin);
if (decl || ultimate_origin)
{
/* This is a definition of a C++ class level static. */
add_AT_specification (var_die, old_die);
+ specialization_p = true;
if (DECL_NAME (decl))
{
expanded_location s = expand_location (DECL_SOURCE_LOCATION (decl));
}
}
else
+ add_name_and_src_coords_attributes (var_die, decl);
+
+ if ((origin == NULL && !specialization_p)
+ || (origin != NULL
+ && !DECL_ABSTRACT (decl_or_origin)
+ && variably_modified_type_p (TREE_TYPE (decl_or_origin),
+ decl_function_context
+ (decl_or_origin))))
{
- tree type = TREE_TYPE (decl);
+ tree type = TREE_TYPE (decl_or_origin);
- add_name_and_src_coords_attributes (var_die, decl);
- if (decl_by_reference_p (decl))
+ if (decl_by_reference_p (decl_or_origin))
add_type_attribute (var_die, TREE_TYPE (type), 0, 0, context_die);
else
- add_type_attribute (var_die, type, TREE_READONLY (decl),
- TREE_THIS_VOLATILE (decl), context_die);
+ add_type_attribute (var_die, type, TREE_READONLY (decl_or_origin),
+ TREE_THIS_VOLATILE (decl_or_origin), context_die);
+ }
+ if (origin == NULL && !specialization_p)
+ {
if (TREE_PUBLIC (decl))
add_AT_flag (var_die, DW_AT_external, 1);
equate_type_number_to_die (TREE_TYPE (decl), type_die);
}
else
- type = TREE_TYPE (decl);
+ {
+ type = TREE_TYPE (decl);
+
+ if (is_naming_typedef_decl (TYPE_NAME (type)))
+ /*
+ Here, we are in the case of decl being a typedef naming
+ an anonymous type, e.g:
+ typedef struct {...} foo;
+ In that case TREE_TYPE (decl) is not a typedef variant
+ type and TYPE_NAME of the anonymous type is set to the
+ TYPE_DECL of the typedef. This construct is emitted by
+ the C++ FE.
+
+ TYPE is the anonymous struct named by the typedef
+ DECL. As we need the DW_AT_type attribute of the
+ DW_TAG_typedef to point to the DIE of TYPE, let's
+ generate that DIE right away. add_type_attribute
+ called below will then pick (via lookup_type_die) that
+ anonymous struct DIE. */
+ gen_tagged_type_die (type, context_die, DINFO_USAGE_DIR_USE);
+ }
add_type_attribute (type_die, type, TREE_READONLY (decl),
TREE_THIS_VOLATILE (decl), context_die);
+
+ if (is_naming_typedef_decl (decl))
+ /* We want that all subsequent calls to lookup_type_die with
+ TYPE in argument yield the DW_TAG_typedef we have just
+ created. */
+ equate_type_number_to_die (type, type_die);
}
if (DECL_ABSTRACT (decl))
add_pubtype (decl, type_die);
}
+/* Generate a DIE for a struct, class, enum or union type. */
+
+static void
+gen_tagged_type_die (tree type,
+ dw_die_ref context_die,
+ enum debug_info_usage usage)
+{
+ int need_pop;
+
+ if (type == NULL_TREE
+ || !is_tagged_type (type))
+ return;
+
+ /* If this is a nested type whose containing class hasn't been written
+ out yet, writing it out will cover this one, too. This does not apply
+ to instantiations of member class templates; they need to be added to
+ the containing class as they are generated. FIXME: This hurts the
+ idea of combining type decls from multiple TUs, since we can't predict
+ what set of template instantiations we'll get. */
+ if (TYPE_CONTEXT (type)
+ && AGGREGATE_TYPE_P (TYPE_CONTEXT (type))
+ && ! TREE_ASM_WRITTEN (TYPE_CONTEXT (type)))
+ {
+ gen_type_die_with_usage (TYPE_CONTEXT (type), context_die, usage);
+
+ if (TREE_ASM_WRITTEN (type))
+ return;
+
+ /* If that failed, attach ourselves to the stub. */
+ push_decl_scope (TYPE_CONTEXT (type));
+ context_die = lookup_type_die (TYPE_CONTEXT (type));
+ need_pop = 1;
+ }
+ else if (TYPE_CONTEXT (type) != NULL_TREE
+ && (TREE_CODE (TYPE_CONTEXT (type)) == FUNCTION_DECL))
+ {
+ /* If this type is local to a function that hasn't been written
+ out yet, use a NULL context for now; it will be fixed up in
+ decls_for_scope. */
+ context_die = lookup_decl_die (TYPE_CONTEXT (type));
+ need_pop = 0;
+ }
+ else
+ {
+ context_die = declare_in_namespace (type, context_die);
+ need_pop = 0;
+ }
+
+ if (TREE_CODE (type) == ENUMERAL_TYPE)
+ {
+ /* This might have been written out by the call to
+ declare_in_namespace. */
+ if (!TREE_ASM_WRITTEN (type))
+ gen_enumeration_type_die (type, context_die);
+ }
+ else
+ gen_struct_or_union_type_die (type, context_die, usage);
+
+ if (need_pop)
+ pop_decl_scope ();
+
+ /* Don't set TREE_ASM_WRITTEN on an incomplete struct; we want to fix
+ it up if it is ever completed. gen_*_type_die will set it for us
+ when appropriate. */
+}
+
/* Generate a type description DIE. */
static void
gen_type_die_with_usage (tree type, dw_die_ref context_die,
enum debug_info_usage usage)
{
- int need_pop;
struct array_descr_info info;
if (type == NULL_TREE || type == error_mark_node)
/* If TYPE is a typedef type variant, let's generate debug info
for the parent typedef which TYPE is a type of. */
- if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
- && DECL_ORIGINAL_TYPE (TYPE_NAME (type)))
+ if (typedef_variant_p (type))
{
if (TREE_ASM_WRITTEN (type))
return;
context_die = get_context_die (DECL_CONTEXT (TYPE_NAME (type)));
TREE_ASM_WRITTEN (type) = 1;
+
+ gen_decl_die (TYPE_NAME (type), NULL, context_die);
+ return;
+ }
+
+ /* If type is an anonymous tagged type named by a typedef, let's
+ generate debug info for the typedef. */
+ if (is_naming_typedef_decl (TYPE_NAME (type)))
+ {
+ /* Use the DIE of the containing namespace as the parent DIE of
+ the type description DIE we want to generate. */
+ if (DECL_CONTEXT (TYPE_NAME (type))
+ && TREE_CODE (DECL_CONTEXT (TYPE_NAME (type))) == NAMESPACE_DECL)
+ context_die = get_context_die (DECL_CONTEXT (TYPE_NAME (type)));
+
gen_decl_die (TYPE_NAME (type), NULL, context_die);
return;
}
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
- /* If this is a nested type whose containing class hasn't been written
- out yet, writing it out will cover this one, too. This does not apply
- to instantiations of member class templates; they need to be added to
- the containing class as they are generated. FIXME: This hurts the
- idea of combining type decls from multiple TUs, since we can't predict
- what set of template instantiations we'll get. */
- if (TYPE_CONTEXT (type)
- && AGGREGATE_TYPE_P (TYPE_CONTEXT (type))
- && ! TREE_ASM_WRITTEN (TYPE_CONTEXT (type)))
- {
- gen_type_die_with_usage (TYPE_CONTEXT (type), context_die, usage);
-
- if (TREE_ASM_WRITTEN (type))
- return;
-
- /* If that failed, attach ourselves to the stub. */
- push_decl_scope (TYPE_CONTEXT (type));
- context_die = lookup_type_die (TYPE_CONTEXT (type));
- need_pop = 1;
- }
- else if (TYPE_CONTEXT (type) != NULL_TREE
- && (TREE_CODE (TYPE_CONTEXT (type)) == FUNCTION_DECL))
- {
- /* If this type is local to a function that hasn't been written
- out yet, use a NULL context for now; it will be fixed up in
- decls_for_scope. */
- context_die = lookup_decl_die (TYPE_CONTEXT (type));
- need_pop = 0;
- }
- else
- {
- context_die = declare_in_namespace (type, context_die);
- need_pop = 0;
- }
-
- if (TREE_CODE (type) == ENUMERAL_TYPE)
- {
- /* This might have been written out by the call to
- declare_in_namespace. */
- if (!TREE_ASM_WRITTEN (type))
- gen_enumeration_type_die (type, context_die);
- }
- else
- gen_struct_or_union_type_die (type, context_die, usage);
-
- if (need_pop)
- pop_decl_scope ();
-
- /* Don't set TREE_ASM_WRITTEN on an incomplete struct; we want to fix
- it up if it is ever completed. gen_*_type_die will set it for us
- when appropriate. */
+ gen_tagged_type_die (type, context_die, usage);
return;
case VOID_TYPE:
return 0;
}
+/* Return TRUE if TYPE is a typedef that names a type for linkage
+ purposes. This kind of typedefs is produced by the C++ FE for
+ constructs like:
+
+ typedef struct {...} foo;
+
+ In that case, there is no typedef variant type produced for foo.
+ Rather, the TREE_TYPE of the TYPE_DECL of foo is the anonymous
+ struct type. */
+
+static bool
+is_naming_typedef_decl (const_tree decl)
+{
+ if (decl == NULL_TREE
+ || TREE_CODE (decl) != TYPE_DECL
+ || !is_tagged_type (TREE_TYPE (decl))
+ || is_redundant_typedef (decl)
+ /* It looks like Ada produces TYPE_DECLs that are very similar
+ to C++ naming typedefs but that have different
+ semantics. Let's be specific to c++ for now. */
+ || !is_cxx ())
+ return FALSE;
+
+ return (DECL_ORIGINAL_TYPE (decl) == NULL_TREE
+ && TYPE_NAME (TREE_TYPE (decl)) == decl
+ && (TYPE_STUB_DECL (TREE_TYPE (decl))
+ != TYPE_NAME (TREE_TYPE (decl))));
+}
+
/* Returns the DIE for a context. */
static inline dw_die_ref
add_child_die (origin->die_parent, die);
else if (die == comp_unit_die)
;
- else if (errorcount > 0 || sorrycount > 0)
+ else if (seen_error ())
/* It's OK to be confused by errors in the input. */
add_child_die (comp_unit_die, die);
else