/* C-compiler utilities for types and variables storage layout
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
This file is part of GCC.
#include "function.h"
#include "expr.h"
#include "output.h"
+#include "diagnostic-core.h"
#include "toplev.h"
#include "ggc.h"
#include "target.h"
#include "langhooks.h"
#include "regs.h"
#include "params.h"
+#include "cgraph.h"
+#include "tree-inline.h"
+#include "tree-dump.h"
+#include "gimple.h"
/* Data type for the expressions representing sizes of data types.
It is the first integer type laid out. */
/* ... and its original value in bytes, specified via -fpack-struct=<value>. */
unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
-/* Nonzero if all REFERENCE_TYPEs are internal and hence should be
- allocated in Pmode, not ptr_mode. Set only by internal_reference_types
- called only by a front end. */
+/* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
+ in the address spaces' address_mode, not pointer_mode. Set only by
+ internal_reference_types called only by a front end. */
static int reference_types_internal = 0;
+static tree self_referential_size (tree);
static void finalize_record_size (record_layout_info);
static void finalize_type_size (tree);
static void place_union_field (record_layout_info, tree);
\f
/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
-static GTY(()) tree pending_sizes;
+static GTY(()) VEC(tree,gc) *pending_sizes;
-/* Show that REFERENCE_TYPES are internal and should be Pmode. Called only
- by front end. */
+/* Show that REFERENCE_TYPES are internal and should use address_mode.
+ Called only by front end. */
void
internal_reference_types (void)
reference_types_internal = 1;
}
-/* Get a list of all the objects put on the pending sizes list. */
+/* Get a VEC of all the objects put on the pending sizes list. */
-tree
+VEC(tree,gc) *
get_pending_sizes (void)
{
- tree chain = pending_sizes;
+ VEC(tree,gc) *chain = pending_sizes;
pending_sizes = 0;
return chain;
expr = skip_simple_arithmetic (expr);
if (TREE_CODE (expr) == SAVE_EXPR)
- pending_sizes = tree_cons (NULL_TREE, expr, pending_sizes);
+ VEC_safe_push (tree, gc, pending_sizes, expr);
}
/* Put a chain of objects into the pending sizes list, which must be
empty. */
void
-put_pending_sizes (tree chain)
+put_pending_sizes (VEC(tree,gc) *chain)
{
gcc_assert (!pending_sizes);
pending_sizes = chain;
{
tree save;
+ /* Obviously. */
+ if (TREE_CONSTANT (size))
+ return size;
+
+ /* If the size is self-referential, we can't make a SAVE_EXPR (see
+ save_expr for the rationale). But we can do something else. */
+ if (CONTAINS_PLACEHOLDER_P (size))
+ return self_referential_size (size);
+
/* If the language-processor is to take responsibility for variable-sized
items (e.g., languages which have elaboration procedures like Ada),
- just return SIZE unchanged. Likewise for self-referential sizes and
- constant sizes. */
- if (TREE_CONSTANT (size)
- || lang_hooks.decls.global_bindings_p () < 0
- || CONTAINS_PLACEHOLDER_P (size))
+ just return SIZE unchanged. */
+ if (lang_hooks.decls.global_bindings_p () < 0)
return size;
size = save_expr (size);
places. */
save = skip_simple_arithmetic (size);
- if (cfun && cfun->x_dont_save_pending_sizes_p)
+ if (cfun && cfun->dont_save_pending_sizes_p)
/* The front-end doesn't want us to keep a list of the expressions
that determine sizes for variable size objects. Trust it. */
return size;
return size;
}
-\f
-#ifndef MAX_FIXED_MODE_SIZE
-#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
-#endif
+/* An array of functions used for self-referential size computation. */
+static GTY(()) VEC (tree, gc) *size_functions;
+
+/* Similar to copy_tree_r but do not copy component references involving
+ PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
+ and substituted in substitute_in_expr. */
+
+static tree
+copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
+{
+ enum tree_code code = TREE_CODE (*tp);
+
+ /* Stop at types, decls, constants like copy_tree_r. */
+ if (TREE_CODE_CLASS (code) == tcc_type
+ || TREE_CODE_CLASS (code) == tcc_declaration
+ || TREE_CODE_CLASS (code) == tcc_constant)
+ {
+ *walk_subtrees = 0;
+ return NULL_TREE;
+ }
+
+ /* This is the pattern built in ada/make_aligning_type. */
+ else if (code == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
+ {
+ *walk_subtrees = 0;
+ return NULL_TREE;
+ }
+
+ /* Default case: the component reference. */
+ else if (code == COMPONENT_REF)
+ {
+ tree inner;
+ for (inner = TREE_OPERAND (*tp, 0);
+ REFERENCE_CLASS_P (inner);
+ inner = TREE_OPERAND (inner, 0))
+ ;
+
+ if (TREE_CODE (inner) == PLACEHOLDER_EXPR)
+ {
+ *walk_subtrees = 0;
+ return NULL_TREE;
+ }
+ }
+
+ /* We're not supposed to have them in self-referential size trees
+ because we wouldn't properly control when they are evaluated.
+ However, not creating superfluous SAVE_EXPRs requires accurate
+ tracking of readonly-ness all the way down to here, which we
+ cannot always guarantee in practice. So punt in this case. */
+ else if (code == SAVE_EXPR)
+ return error_mark_node;
+
+ return copy_tree_r (tp, walk_subtrees, data);
+}
+
+/* Given a SIZE expression that is self-referential, return an equivalent
+ expression to serve as the actual size expression for a type. */
+
+static tree
+self_referential_size (tree size)
+{
+ static unsigned HOST_WIDE_INT fnno = 0;
+ VEC (tree, heap) *self_refs = NULL;
+ tree param_type_list = NULL, param_decl_list = NULL, arg_list = NULL;
+ tree t, ref, return_type, fntype, fnname, fndecl;
+ unsigned int i;
+ char buf[128];
+
+ /* Do not factor out simple operations. */
+ t = skip_simple_arithmetic (size);
+ if (TREE_CODE (t) == CALL_EXPR)
+ return size;
+
+ /* Collect the list of self-references in the expression. */
+ find_placeholder_in_expr (size, &self_refs);
+ gcc_assert (VEC_length (tree, self_refs) > 0);
+
+ /* Obtain a private copy of the expression. */
+ t = size;
+ if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
+ return size;
+ size = t;
+
+ /* Build the parameter and argument lists in parallel; also
+ substitute the former for the latter in the expression. */
+ for (i = 0; VEC_iterate (tree, self_refs, i, ref); i++)
+ {
+ tree subst, param_name, param_type, param_decl;
+
+ if (DECL_P (ref))
+ {
+ /* We shouldn't have true variables here. */
+ gcc_assert (TREE_READONLY (ref));
+ subst = ref;
+ }
+ /* This is the pattern built in ada/make_aligning_type. */
+ else if (TREE_CODE (ref) == ADDR_EXPR)
+ subst = ref;
+ /* Default case: the component reference. */
+ else
+ subst = TREE_OPERAND (ref, 1);
+
+ sprintf (buf, "p%d", i);
+ param_name = get_identifier (buf);
+ param_type = TREE_TYPE (ref);
+ param_decl
+ = build_decl (input_location, PARM_DECL, param_name, param_type);
+ if (targetm.calls.promote_prototypes (NULL_TREE)
+ && INTEGRAL_TYPE_P (param_type)
+ && TYPE_PRECISION (param_type) < TYPE_PRECISION (integer_type_node))
+ DECL_ARG_TYPE (param_decl) = integer_type_node;
+ else
+ DECL_ARG_TYPE (param_decl) = param_type;
+ DECL_ARTIFICIAL (param_decl) = 1;
+ TREE_READONLY (param_decl) = 1;
+
+ size = substitute_in_expr (size, subst, param_decl);
+
+ param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
+ param_decl_list = chainon (param_decl, param_decl_list);
+ arg_list = tree_cons (NULL_TREE, ref, arg_list);
+ }
+
+ VEC_free (tree, heap, self_refs);
+
+ /* Append 'void' to indicate that the number of parameters is fixed. */
+ param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
+
+ /* The 3 lists have been created in reverse order. */
+ param_type_list = nreverse (param_type_list);
+ param_decl_list = nreverse (param_decl_list);
+ arg_list = nreverse (arg_list);
+
+ /* Build the function type. */
+ return_type = TREE_TYPE (size);
+ fntype = build_function_type (return_type, param_type_list);
+
+ /* Build the function declaration. */
+ sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
+ fnname = get_file_function_name (buf);
+ fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
+ for (t = param_decl_list; t; t = TREE_CHAIN (t))
+ DECL_CONTEXT (t) = fndecl;
+ DECL_ARGUMENTS (fndecl) = param_decl_list;
+ DECL_RESULT (fndecl)
+ = build_decl (input_location, RESULT_DECL, 0, return_type);
+ DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
+
+ /* The function has been created by the compiler and we don't
+ want to emit debug info for it. */
+ DECL_ARTIFICIAL (fndecl) = 1;
+ DECL_IGNORED_P (fndecl) = 1;
+
+ /* It is supposed to be "const" and never throw. */
+ TREE_READONLY (fndecl) = 1;
+ TREE_NOTHROW (fndecl) = 1;
+
+ /* We want it to be inlined when this is deemed profitable, as
+ well as discarded if every call has been integrated. */
+ DECL_DECLARED_INLINE_P (fndecl) = 1;
+
+ /* It is made up of a unique return statement. */
+ DECL_INITIAL (fndecl) = make_node (BLOCK);
+ BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
+ t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
+ DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
+ TREE_STATIC (fndecl) = 1;
+
+ /* Put it onto the list of size functions. */
+ VEC_safe_push (tree, gc, size_functions, fndecl);
+
+ /* Replace the original expression with a call to the size function. */
+ return build_function_call_expr (UNKNOWN_LOCATION, fndecl, arg_list);
+}
+
+/* Take, queue and compile all the size functions. It is essential that
+ the size functions be gimplified at the very end of the compilation
+ in order to guarantee transparent handling of self-referential sizes.
+ Otherwise the GENERIC inliner would not be able to inline them back
+ at each of their call sites, thus creating artificial non-constant
+ size expressions which would trigger nasty problems later on. */
+
+void
+finalize_size_functions (void)
+{
+ unsigned int i;
+ tree fndecl;
+
+ for (i = 0; VEC_iterate(tree, size_functions, i, fndecl); i++)
+ {
+ dump_function (TDI_original, fndecl);
+ gimplify_function_tree (fndecl);
+ dump_function (TDI_generic, fndecl);
+ cgraph_finalize_function (fndecl, false);
+ }
+
+ VEC_free (tree, gc, size_functions);
+}
+\f
/* Return the machine mode to use for a nonscalar of SIZE bits. The
- mode must be in class CLASS, and have exactly that many value bits;
+ mode must be in class MCLASS, and have exactly that many value bits;
it may have padding as well. If LIMIT is nonzero, modes of wider
than MAX_FIXED_MODE_SIZE will not be used. */
enum machine_mode
-mode_for_size (unsigned int size, enum mode_class class, int limit)
+mode_for_size (unsigned int size, enum mode_class mclass, int limit)
{
enum machine_mode mode;
return BLKmode;
/* Get the first mode which has this size, in the specified class. */
- for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
+ for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
if (GET_MODE_PRECISION (mode) == size)
return mode;
/* Similar, except passed a tree node. */
enum machine_mode
-mode_for_size_tree (const_tree size, enum mode_class class, int limit)
+mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
{
unsigned HOST_WIDE_INT uhwi;
unsigned int ui;
ui = uhwi;
if (uhwi != ui)
return BLKmode;
- return mode_for_size (ui, class, limit);
+ return mode_for_size (ui, mclass, limit);
}
/* Similar, but never return BLKmode; return the narrowest mode that
contains at least the requested number of value bits. */
enum machine_mode
-smallest_mode_for_size (unsigned int size, enum mode_class class)
+smallest_mode_for_size (unsigned int size, enum mode_class mclass)
{
enum machine_mode mode;
/* Get the first mode which has at least this size, in the
specified class. */
- for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
+ for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
if (GET_MODE_PRECISION (mode) >= size)
return mode;
tree type = TREE_TYPE (decl);
enum tree_code code = TREE_CODE (decl);
rtx rtl = NULL_RTX;
+ location_t loc = DECL_SOURCE_LOCATION (decl);
if (code == CONST_DECL)
return;
}
else if (DECL_SIZE_UNIT (decl) == 0)
DECL_SIZE_UNIT (decl)
- = fold_convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl),
- bitsize_unit_node));
+ = fold_convert_loc (loc, sizetype,
+ size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
+ bitsize_unit_node));
if (code != FIELD_DECL)
/* For non-fields, update the alignment from the type. */
{
enum machine_mode xmode
= mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
+ unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
if (xmode != BLKmode
- && (known_align == 0
- || known_align >= GET_MODE_ALIGNMENT (xmode)))
+ && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
+ && (known_align == 0 || known_align >= xalign))
{
- DECL_ALIGN (decl) = MAX (GET_MODE_ALIGNMENT (xmode),
- DECL_ALIGN (decl));
+ DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl));
DECL_MODE (decl) = xmode;
DECL_BIT_FIELD (decl) = 0;
}
else
do_type_align (type, decl);
- /* If the field is of variable size, we can't misalign it since we
- have no way to make a temporary to align the result. But this
- isn't an issue if the decl is not addressable. Likewise if it
- is of unknown size.
-
- Note that do_type_align may set DECL_USER_ALIGN, so we need to
- check old_user_align instead. */
+ /* If the field is packed and not explicitly aligned, give it the
+ minimum alignment. Note that do_type_align may set
+ DECL_USER_ALIGN, so we need to check old_user_align instead. */
if (packed_p
- && !old_user_align
- && (DECL_NONADDRESSABLE_P (decl)
- || DECL_SIZE_UNIT (decl) == 0
- || TREE_CODE (DECL_SIZE_UNIT (decl)) == INTEGER_CST))
+ && !old_user_align)
DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
if (! packed_p && ! DECL_USER_ALIGN (decl))
int size_as_int = TREE_INT_CST_LOW (size);
if (compare_tree_int (size, size_as_int) == 0)
- warning (0, "size of %q+D is %d bytes", decl, size_as_int);
+ warning (OPT_Wlarger_than_eq, "size of %q+D is %d bytes", decl, size_as_int);
else
- warning (0, "size of %q+D is larger than %wd bytes",
+ warning (OPT_Wlarger_than_eq, "size of %q+D is larger than %wd bytes",
decl, larger_than_size);
}
}
layout_decl (decl, 0);
}
\f
-/* Hook for a front-end function that can modify the record layout as needed
- immediately before it is finalized. */
-
-static void (*lang_adjust_rli) (record_layout_info) = 0;
-
-void
-set_lang_adjust_rli (void (*f) (record_layout_info))
-{
- lang_adjust_rli = f;
-}
-
/* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
is to be passed to all other layout functions for this record. It is the
record_layout_info
start_record_layout (tree t)
{
- record_layout_info rli = xmalloc (sizeof (struct record_layout_info_s));
+ record_layout_info rli = XNEW (struct record_layout_info_s);
rli->t = t;
rli->offset = size_zero_node;
rli->bitpos = bitsize_zero_node;
rli->prev_field = 0;
- rli->pending_statics = 0;
+ rli->pending_statics = NULL;
rli->packed_maybe_necessary = 0;
rli->remaining_in_alignment = 0;
/* Print debugging information about the information in RLI. */
-void
+DEBUG_FUNCTION void
debug_rli (record_layout_info rli)
{
print_node_brief (stderr, "type", rli->t, 0);
if (rli->packed_maybe_necessary)
fprintf (stderr, "packed may be necessary\n");
- if (rli->pending_statics)
+ if (!VEC_empty (tree, rli->pending_statics))
{
fprintf (stderr, "pending statics:\n");
- debug_tree (rli->pending_statics);
+ debug_vec_tree (rli->pending_statics);
}
}
if (TREE_CODE (rli->t) == UNION_TYPE)
rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
- rli->offset = fold_build3 (COND_EXPR, sizetype,
+ rli->offset = fold_build3_loc (input_location, COND_EXPR, sizetype,
DECL_QUALIFIER (field),
DECL_SIZE_UNIT (field), rli->offset);
}
it *after* the record is laid out. */
if (TREE_CODE (field) == VAR_DECL)
{
- rli->pending_statics = tree_cons (NULL_TREE, field,
- rli->pending_statics);
+ VEC_safe_push (tree, gc, rli->pending_statics, field);
return;
}
if (STRICT_ALIGNMENT)
warning (OPT_Wattributes, "packed attribute causes "
"inefficient alignment for %q+D", field);
- else
+ /* Don't warn if DECL_PACKED was set by the type. */
+ else if (!TYPE_PACKED (rli->t))
warning (OPT_Wattributes, "packed attribute is "
"unnecessary for %q+D", field);
}
/* No, we need to skip space before this field.
Bump the cumulative size to multiple of field alignment. */
- warning (OPT_Wpadded, "padding struct to align %q+D", field);
+ if (DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
+ warning (OPT_Wpadded, "padding struct to align %q+D", field);
/* If the alignment is still within offset_align, just align
the bit position. */
&& TREE_CODE (field) == FIELD_DECL
&& type != error_mark_node
&& DECL_BIT_FIELD (field)
- && ! DECL_PACKED (field)
+ && (! DECL_PACKED (field)
+ /* Enter for these packed fields only to issue a warning. */
+ || TYPE_ALIGN (type) <= BITS_PER_UNIT)
&& maximum_field_alignment == 0
&& ! integer_zerop (DECL_SIZE (field))
&& host_integerp (DECL_SIZE (field), 1)
/* A bit field may not span more units of alignment of its type
than its type itself. Advance to next boundary if necessary. */
if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
- rli->bitpos = round_up (rli->bitpos, type_align);
+ {
+ if (DECL_PACKED (field))
+ {
+ if (warn_packed_bitfield_compat == 1)
+ inform
+ (input_location,
+ "Offset of packed bit-field %qD has changed in GCC 4.4",
+ field);
+ }
+ else
+ rli->bitpos = round_up_loc (input_location, rli->bitpos, type_align);
+ }
- TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
+ if (! DECL_PACKED (field))
+ TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
}
#endif
until we see a bitfield (and come by here again) we just skip
calculating it. */
if (DECL_SIZE (field) != NULL
- && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 0)
- && host_integerp (DECL_SIZE (field), 0))
+ && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1)
+ && host_integerp (DECL_SIZE (field), 1))
{
- HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
- HOST_WIDE_INT typesize
+ unsigned HOST_WIDE_INT bitsize
+ = tree_low_cst (DECL_SIZE (field), 1);
+ unsigned HOST_WIDE_INT typesize
= tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
if (typesize < bitsize)
if (maximum_field_alignment != 0)
type_align = MIN (type_align, maximum_field_alignment);
- rli->bitpos = round_up (rli->bitpos, type_align);
+ rli->bitpos = round_up_loc (input_location, rli->bitpos, type_align);
/* If we really aligned, don't allow subsequent bitfields
to undo that. */
= size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
/* Round the size up to be a multiple of the required alignment. */
- TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
+ TYPE_SIZE (rli->t) = round_up_loc (input_location, unpadded_size,
+ TYPE_ALIGN (rli->t));
TYPE_SIZE_UNIT (rli->t)
- = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
+ = round_up_loc (input_location, unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
if (TREE_CONSTANT (unpadded_size)
- && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0)
+ && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
+ && input_location != BUILTINS_LOCATION)
warning (OPT_Wpadded, "padding struct size to alignment boundary");
if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
#endif
- unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
+ unpacked_size = round_up_loc (input_location, TYPE_SIZE (rli->t), rli->unpacked_align);
if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
{
- TYPE_PACKED (rli->t) = 0;
-
if (TYPE_NAME (rli->t))
{
- const char *name;
+ tree name;
if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
- name = IDENTIFIER_POINTER (TYPE_NAME (rli->t));
+ name = TYPE_NAME (rli->t);
else
- name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (rli->t)));
+ name = DECL_NAME (TYPE_NAME (rli->t));
if (STRICT_ALIGNMENT)
warning (OPT_Wpacked, "packed attribute causes inefficient "
- "alignment for %qs", name);
+ "alignment for %qE", name);
else
warning (OPT_Wpacked,
- "packed attribute is unnecessary for %qs", name);
+ "packed attribute is unnecessary for %qE", name);
}
else
{
However, if possible, we use a mode that fits in a register
instead, in order to allow for better optimization down the
line. */
- TYPE_MODE (type) = BLKmode;
+ SET_TYPE_MODE (type, BLKmode);
if (! host_integerp (TYPE_SIZE (type), 1))
return;
if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
&& host_integerp (TYPE_SIZE (type), 1)
&& GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
- TYPE_MODE (type) = mode;
+ SET_TYPE_MODE (type, mode);
else
- TYPE_MODE (type) = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
+ SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1));
/* If structure's known alignment is less than what the scalar
mode would need, and it matters, then stick with BLKmode. */
/* If this is the only reason this type is BLKmode, then
don't force containing types to be BLKmode. */
TYPE_NO_FORCE_BLK (type) = 1;
- TYPE_MODE (type) = BLKmode;
+ SET_TYPE_MODE (type, BLKmode);
}
}
if (TYPE_SIZE (type) != 0)
{
- TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
- TYPE_SIZE_UNIT (type) = round_up (TYPE_SIZE_UNIT (type),
+ TYPE_SIZE (type) = round_up_loc (input_location,
+ TYPE_SIZE (type), TYPE_ALIGN (type));
+ TYPE_SIZE_UNIT (type) = round_up_loc (input_location, TYPE_SIZE_UNIT (type),
TYPE_ALIGN_UNIT (type));
}
TYPE_SIZE_UNIT (variant) = size_unit;
TYPE_ALIGN (variant) = align;
TYPE_USER_ALIGN (variant) = user_align;
- TYPE_MODE (variant) = mode;
+ SET_TYPE_MODE (variant, mode);
}
}
}
/* Lay out any static members. This is done now because their type
may use the record's type. */
- while (rli->pending_statics)
- {
- layout_decl (TREE_VALUE (rli->pending_statics), 0);
- rli->pending_statics = TREE_CHAIN (rli->pending_statics);
- }
+ while (!VEC_empty (tree, rli->pending_statics))
+ layout_decl (VEC_pop (tree, rli->pending_statics), 0);
/* Clean up. */
if (free_p)
- free (rli);
+ {
+ VEC_free (tree, gc, rli->pending_statics);
+ free (rli);
+ }
}
\f
#if 0 /* not yet, should get fixed properly later */
TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
#else
- TYPE_NAME (type) = build_decl (TYPE_DECL, get_identifier (name), type);
+ TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
+ TYPE_DECL, get_identifier (name), type);
#endif
TYPE_STUB_DECL (type) = TYPE_NAME (type);
layout_decl (TYPE_NAME (type), 0);
&& tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
TYPE_UNSIGNED (type) = 1;
- TYPE_MODE (type) = smallest_mode_for_size (TYPE_PRECISION (type),
- MODE_INT);
+ SET_TYPE_MODE (type,
+ smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT));
TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
break;
case REAL_TYPE:
- TYPE_MODE (type) = mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0);
+ SET_TYPE_MODE (type,
+ mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0));
TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
break;
case COMPLEX_TYPE:
TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
- TYPE_MODE (type)
- = mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
- (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
- ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
- 0);
+ SET_TYPE_MODE (type,
+ mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
+ (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
+ ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
+ 0));
TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
break;
else
mode = MIN_MODE_VECTOR_INT;
+ /* Do not check vector_mode_supported_p here. We'll do that
+ later in vector_type_mode. */
for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
if (GET_MODE_NUNITS (mode) == nunits
- && GET_MODE_INNER (mode) == innermode
- && targetm.vector_mode_supported_p (mode))
+ && GET_MODE_INNER (mode) == innermode)
break;
/* For integers, try mapping it to a same-sized scalar mode. */
mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
MODE_INT, 0);
- if (mode == VOIDmode || !have_regs_of_mode[mode])
- TYPE_MODE (type) = BLKmode;
+ if (mode == VOIDmode ||
+ (GET_MODE_CLASS (mode) == MODE_INT
+ && !have_regs_of_mode[mode]))
+ SET_TYPE_MODE (type, BLKmode);
else
- TYPE_MODE (type) = mode;
+ SET_TYPE_MODE (type, mode);
}
TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
/* This is an incomplete type and so doesn't have a size. */
TYPE_ALIGN (type) = 1;
TYPE_USER_ALIGN (type) = 0;
- TYPE_MODE (type) = VOIDmode;
+ SET_TYPE_MODE (type, VOIDmode);
break;
case OFFSET_TYPE:
TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
/* A pointer might be MODE_PARTIAL_INT,
but ptrdiff_t must be integral. */
- TYPE_MODE (type) = mode_for_size (POINTER_SIZE, MODE_INT, 0);
+ SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
+ TYPE_PRECISION (type) = POINTER_SIZE;
break;
case FUNCTION_TYPE:
/* It's hard to see what the mode and size of a function ought to
be, but we do know the alignment is FUNCTION_BOUNDARY, so
make it consistent with that. */
- TYPE_MODE (type) = mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0);
+ SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0));
TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
{
+ enum machine_mode mode = TYPE_MODE (type);
+ if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal)
+ {
+ addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
+ mode = targetm.addr_space.address_mode (as);
+ }
- enum machine_mode mode = ((TREE_CODE (type) == REFERENCE_TYPE
- && reference_types_internal)
- ? Pmode : TYPE_MODE (type));
-
- int nbits = GET_MODE_BITSIZE (mode);
-
- TYPE_SIZE (type) = bitsize_int (nbits);
+ TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
TYPE_UNSIGNED (type) = 1;
- TYPE_PRECISION (type) = nbits;
+ TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode);
}
break;
{
tree ub = TYPE_MAX_VALUE (index);
tree lb = TYPE_MIN_VALUE (index);
+ tree element_size = TYPE_SIZE (element);
tree length;
- tree element_size;
+
+ /* Make sure that an array of zero-sized element is zero-sized
+ regardless of its extent. */
+ if (integer_zerop (element_size))
+ length = size_zero_node;
/* The initial subtraction should happen in the original type so
that (possible) negative values are handled appropriately. */
- length = size_binop (PLUS_EXPR, size_one_node,
- fold_convert (sizetype,
- fold_build2 (MINUS_EXPR,
- TREE_TYPE (lb),
- ub, lb)));
-
- /* Special handling for arrays of bits (for Chill). */
- element_size = TYPE_SIZE (element);
- if (TYPE_PACKED (type) && INTEGRAL_TYPE_P (element)
- && (integer_zerop (TYPE_MAX_VALUE (element))
- || integer_onep (TYPE_MAX_VALUE (element)))
- && host_integerp (TYPE_MIN_VALUE (element), 1))
- {
- HOST_WIDE_INT maxvalue
- = tree_low_cst (TYPE_MAX_VALUE (element), 1);
- HOST_WIDE_INT minvalue
- = tree_low_cst (TYPE_MIN_VALUE (element), 1);
-
- if (maxvalue - minvalue == 1
- && (maxvalue == 1 || maxvalue == 0))
- element_size = integer_one_node;
- }
-
- /* If neither bound is a constant and sizetype is signed, make
- sure the size is never negative. We should really do this
- if *either* bound is non-constant, but this is the best
- compromise between C and Ada. */
- if (!TYPE_UNSIGNED (sizetype)
- && TREE_CODE (TYPE_MIN_VALUE (index)) != INTEGER_CST
- && TREE_CODE (TYPE_MAX_VALUE (index)) != INTEGER_CST)
- length = size_binop (MAX_EXPR, length, size_zero_node);
+ else
+ length
+ = size_binop (PLUS_EXPR, size_one_node,
+ fold_convert (sizetype,
+ fold_build2_loc (input_location,
+ MINUS_EXPR,
+ TREE_TYPE (lb),
+ ub, lb)));
TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
fold_convert (bitsizetype,
length));
- /* If we know the size of the element, calculate the total
- size directly, rather than do some division thing below.
- This optimization helps Fortran assumed-size arrays
- (where the size of the array is determined at runtime)
- substantially.
- Note that we can't do this in the case where the size of
- the elements is one bit since TYPE_SIZE_UNIT cannot be
- set correctly in that case. */
- if (TYPE_SIZE_UNIT (element) != 0 && ! integer_onep (element_size))
+ /* If we know the size of the element, calculate the total size
+ directly, rather than do some division thing below. This
+ optimization helps Fortran assumed-size arrays (where the
+ size of the array is determined at runtime) substantially. */
+ if (TYPE_SIZE_UNIT (element))
TYPE_SIZE_UNIT (type)
= size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
}
fall back on structural equality. */
SET_TYPE_STRUCTURAL_EQUALITY (type);
TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
- TYPE_MODE (type) = BLKmode;
+ SET_TYPE_MODE (type, BLKmode);
if (TYPE_SIZE (type) != 0
#ifdef MEMBER_TYPE_FORCES_BLK
&& ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode)
/* One-element arrays get the component type's mode. */
if (simple_cst_equal (TYPE_SIZE (type),
TYPE_SIZE (TREE_TYPE (type))))
- TYPE_MODE (type) = TYPE_MODE (TREE_TYPE (type));
+ SET_TYPE_MODE (type, TYPE_MODE (TREE_TYPE (type)));
else
- TYPE_MODE (type)
- = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
+ SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type),
+ MODE_INT, 1));
if (TYPE_MODE (type) != BLKmode
&& STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
&& TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
{
TYPE_NO_FORCE_BLK (type) = 1;
- TYPE_MODE (type) = BLKmode;
+ SET_TYPE_MODE (type, BLKmode);
}
}
/* When the element size is constant, check that it is at least as
if (TREE_CODE (type) == QUAL_UNION_TYPE)
TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
- if (lang_adjust_rli)
- (*lang_adjust_rli) (rli);
-
/* Finish laying out the record. */
finish_record_layout (rli, /*free_p=*/true);
}
&& TREE_CODE (type) != QUAL_UNION_TYPE)
finalize_type_size (type);
- /* If an alias set has been set for this aggregate when it was incomplete,
- force it into alias set 0.
- This is too conservative, but we cannot call record_component_aliases
- here because some frontends still change the aggregates after
- layout_type. */
- if (AGGREGATE_TYPE_P (type) && TYPE_ALIAS_SET_KNOWN_P (type))
- TYPE_ALIAS_SET (type) = 0;
+ /* We should never see alias sets on incomplete aggregates. And we
+ should not call layout_type on not incomplete aggregates. */
+ if (AGGREGATE_TYPE_P (type))
+ gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
+}
+
+/* Vector types need to re-check the target flags each time we report
+ the machine mode. We need to do this because attribute target can
+ change the result of vector_mode_supported_p and have_regs_of_mode
+ on a per-function basis. Thus the TYPE_MODE of a VECTOR_TYPE can
+ change on a per-function basis. */
+/* ??? Possibly a better solution is to run through all the types
+ referenced by a function and re-compute the TYPE_MODE once, rather
+ than make the TYPE_MODE macro call a function. */
+
+enum machine_mode
+vector_type_mode (const_tree t)
+{
+ enum machine_mode mode;
+
+ gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
+
+ mode = t->type.mode;
+ if (VECTOR_MODE_P (mode)
+ && (!targetm.vector_mode_supported_p (mode)
+ || !have_regs_of_mode[mode]))
+ {
+ enum machine_mode innermode = TREE_TYPE (t)->type.mode;
+
+ /* For integers, try mapping it to a same-sized scalar mode. */
+ if (GET_MODE_CLASS (innermode) == MODE_INT)
+ {
+ mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
+ * GET_MODE_BITSIZE (innermode), MODE_INT, 0);
+
+ if (mode != VOIDmode && have_regs_of_mode[mode])
+ return mode;
+ }
+
+ return BLKmode;
+ }
+
+ return mode;
}
\f
/* Create and return a type for signed integers of PRECISION bits. */
if (unsignedp)
{
TYPE_UNSIGNED (type) = 1;
- TYPE_MODE (type) = mode_for_size (precision, MODE_UFRACT, 0);
+ SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0));
}
else
- TYPE_MODE (type) = mode_for_size (precision, MODE_FRACT, 0);
+ SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0));
layout_type (type);
return type;
if (unsignedp)
{
TYPE_UNSIGNED (type) = 1;
- TYPE_MODE (type) = mode_for_size (precision, MODE_UACCUM, 0);
+ SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0));
}
else
- TYPE_MODE (type) = mode_for_size (precision, MODE_ACCUM, 0);
+ SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0));
layout_type (type);
return type;
value to enable integer types to be created. */
void
-initialize_sizetypes (bool signed_p)
+initialize_sizetypes (void)
{
tree t = make_node (INTEGER_TYPE);
int precision = GET_MODE_BITSIZE (SImode);
- TYPE_MODE (t) = SImode;
+ SET_TYPE_MODE (t, SImode);
TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
- TYPE_USER_ALIGN (t) = 0;
TYPE_IS_SIZETYPE (t) = 1;
- TYPE_UNSIGNED (t) = !signed_p;
+ TYPE_UNSIGNED (t) = 1;
TYPE_SIZE (t) = build_int_cst (t, precision);
TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
TYPE_PRECISION (t) = precision;
- /* Set TYPE_MIN_VALUE and TYPE_MAX_VALUE. */
- set_min_and_max_values_for_integral_type (t, precision, !signed_p);
+ set_min_and_max_values_for_integral_type (t, precision, true);
sizetype = t;
bitsizetype = build_distinct_type_copy (t);
}
-/* Make sizetype a version of TYPE, and initialize *sizetype
- accordingly. We do this by overwriting the stub sizetype and
- bitsizetype nodes created by initialize_sizetypes. This makes sure
- that (a) anything stubby about them no longer exists, (b) any
- INTEGER_CSTs created with such a type, remain valid. */
+/* Make sizetype a version of TYPE, and initialize *sizetype accordingly.
+ We do this by overwriting the stub sizetype and bitsizetype nodes created
+ by initialize_sizetypes. This makes sure that (a) anything stubby about
+ them no longer exists and (b) any INTEGER_CSTs created with such a type,
+ remain valid. */
void
set_sizetype (tree type)
{
+ tree t, max;
int oprecision = TYPE_PRECISION (type);
/* The *bitsizetype types use a precision that avoids overflows when
calculating signed sizes / offsets in bits. However, when
cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
precision. */
- int precision = MIN (MIN (oprecision + BITS_PER_UNIT_LOG + 1,
- MAX_FIXED_MODE_SIZE),
- 2 * HOST_BITS_PER_WIDE_INT);
- tree t;
+ int precision
+ = MIN (oprecision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
+ precision
+ = GET_MODE_PRECISION (smallest_mode_for_size (precision, MODE_INT));
+ if (precision > HOST_BITS_PER_WIDE_INT * 2)
+ precision = HOST_BITS_PER_WIDE_INT * 2;
- gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
+ /* sizetype must be an unsigned type. */
+ gcc_assert (TYPE_UNSIGNED (type));
t = build_distinct_type_copy (type);
- /* We do want to use sizetype's cache, as we will be replacing that
- type. */
+ /* We want to use sizetype's cache, as we will be replacing that type. */
TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
/* Replace our original stub sizetype. */
memcpy (sizetype, t, tree_size (sizetype));
TYPE_MAIN_VARIANT (sizetype) = sizetype;
+ TYPE_CANONICAL (sizetype) = sizetype;
+
+ /* sizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
+ sign-extended in a way consistent with force_fit_type. */
+ max = TYPE_MAX_VALUE (sizetype);
+ TYPE_MAX_VALUE (sizetype)
+ = double_int_to_tree (sizetype, tree_to_double_int (max));
t = make_node (INTEGER_TYPE);
TYPE_NAME (t) = get_identifier ("bit_size_type");
- /* We do want to use bitsizetype's cache, as we will be replacing that
- type. */
+ /* We want to use bitsizetype's cache, as we will be replacing that type. */
TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
TYPE_PRECISION (t) = precision;
/* Replace our original stub bitsizetype. */
memcpy (bitsizetype, t, tree_size (bitsizetype));
TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
+ TYPE_CANONICAL (bitsizetype) = bitsizetype;
- if (TYPE_UNSIGNED (type))
- {
- fixup_unsigned_type (bitsizetype);
- ssizetype = build_distinct_type_copy (make_signed_type (oprecision));
- TYPE_IS_SIZETYPE (ssizetype) = 1;
- sbitsizetype = build_distinct_type_copy (make_signed_type (precision));
- TYPE_IS_SIZETYPE (sbitsizetype) = 1;
- }
- else
- {
- fixup_signed_type (bitsizetype);
- ssizetype = sizetype;
- sbitsizetype = bitsizetype;
- }
-
- /* If SIZETYPE is unsigned, we need to fix TYPE_MAX_VALUE so that
- it is sign extended in a way consistent with force_fit_type. */
- if (TYPE_UNSIGNED (type))
- {
- tree orig_max, new_max;
-
- orig_max = TYPE_MAX_VALUE (sizetype);
+ fixup_unsigned_type (bitsizetype);
- /* Build a new node with the same values, but a different type.
- Sign extend it to ensure consistency. */
- new_max = build_int_cst_wide_type (sizetype,
- TREE_INT_CST_LOW (orig_max),
- TREE_INT_CST_HIGH (orig_max));
- TYPE_MAX_VALUE (sizetype) = new_max;
- }
+ /* Create the signed variants of *sizetype. */
+ ssizetype = make_signed_type (oprecision);
+ TYPE_IS_SIZETYPE (ssizetype) = 1;
+ sbitsizetype = make_signed_type (precision);
+ TYPE_IS_SIZETYPE (sbitsizetype) = 1;
}
\f
/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE