/* C-compiler utilities for types and variables storage layout
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
This file is part of GCC.
/* ... and its original value in bytes, specified via -fpack-struct=<value>. */
unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
-/* Nonzero if all REFERENCE_TYPEs are internal and hence should be
- allocated in Pmode, not ptr_mode. Set only by internal_reference_types
- called only by a front end. */
+/* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
+ in the address spaces' address_mode, not pointer_mode. Set only by
+ internal_reference_types called only by a front end. */
static int reference_types_internal = 0;
static tree self_referential_size (tree);
\f
/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
-static GTY(()) tree pending_sizes;
+static GTY(()) VEC(tree,gc) *pending_sizes;
-/* Show that REFERENCE_TYPES are internal and should be Pmode. Called only
- by front end. */
+/* Show that REFERENCE_TYPES are internal and should use address_mode.
+ Called only by front end. */
void
internal_reference_types (void)
reference_types_internal = 1;
}
-/* Get a list of all the objects put on the pending sizes list. */
+/* Get a VEC of all the objects put on the pending sizes list. */
-tree
+VEC(tree,gc) *
get_pending_sizes (void)
{
- tree chain = pending_sizes;
+ VEC(tree,gc) *chain = pending_sizes;
pending_sizes = 0;
return chain;
expr = skip_simple_arithmetic (expr);
if (TREE_CODE (expr) == SAVE_EXPR)
- pending_sizes = tree_cons (NULL_TREE, expr, pending_sizes);
+ VEC_safe_push (tree, gc, pending_sizes, expr);
}
/* Put a chain of objects into the pending sizes list, which must be
empty. */
void
-put_pending_sizes (tree chain)
+put_pending_sizes (VEC(tree,gc) *chain)
{
gcc_assert (!pending_sizes);
pending_sizes = chain;
if (STRICT_ALIGNMENT)
warning (OPT_Wattributes, "packed attribute causes "
"inefficient alignment for %q+D", field);
- else
+ /* Don't warn if DECL_PACKED was set by the type. */
+ else if (!TYPE_PACKED (rli->t))
warning (OPT_Wattributes, "packed attribute is "
"unnecessary for %q+D", field);
}
/* No, we need to skip space before this field.
Bump the cumulative size to multiple of field alignment. */
- warning (OPT_Wpadded, "padding struct to align %q+D", field);
+ if (DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
+ warning (OPT_Wpadded, "padding struct to align %q+D", field);
/* If the alignment is still within offset_align, just align
the bit position. */
until we see a bitfield (and come by here again) we just skip
calculating it. */
if (DECL_SIZE (field) != NULL
- && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 0)
- && host_integerp (DECL_SIZE (field), 0))
+ && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1)
+ && host_integerp (DECL_SIZE (field), 1))
{
- HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
- HOST_WIDE_INT typesize
+ unsigned HOST_WIDE_INT bitsize
+ = tree_low_cst (DECL_SIZE (field), 1);
+ unsigned HOST_WIDE_INT typesize
= tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
if (typesize < bitsize)
= round_up_loc (input_location, unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
if (TREE_CONSTANT (unpadded_size)
- && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0)
+ && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
+ && input_location != BUILTINS_LOCATION)
warning (OPT_Wpadded, "padding struct size to alignment boundary");
if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
unpacked_size = round_up_loc (input_location, TYPE_SIZE (rli->t), rli->unpacked_align);
if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
{
- TYPE_PACKED (rli->t) = 0;
-
if (TYPE_NAME (rli->t))
{
tree name;
/* A pointer might be MODE_PARTIAL_INT,
but ptrdiff_t must be integral. */
SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
+ TYPE_PRECISION (type) = POINTER_SIZE;
break;
case FUNCTION_TYPE:
case POINTER_TYPE:
case REFERENCE_TYPE:
{
- enum machine_mode mode = ((TREE_CODE (type) == REFERENCE_TYPE
- && reference_types_internal)
- ? Pmode : TYPE_MODE (type));
-
- int nbits = GET_MODE_BITSIZE (mode);
+ enum machine_mode mode = TYPE_MODE (type);
+ if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal)
+ {
+ addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
+ mode = targetm.addr_space.address_mode (as);
+ }
- TYPE_SIZE (type) = bitsize_int (nbits);
+ TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
TYPE_UNSIGNED (type) = 1;
- TYPE_PRECISION (type) = nbits;
+ TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode);
}
break;
tree element_size = TYPE_SIZE (element);
tree length;
+ /* Make sure that an array of zero-sized element is zero-sized
+ regardless of its extent. */
+ if (integer_zerop (element_size))
+ length = size_zero_node;
+
/* The initial subtraction should happen in the original type so
that (possible) negative values are handled appropriately. */
- length = size_binop (PLUS_EXPR, size_one_node,
- fold_convert (sizetype,
- fold_build2_loc (input_location,
- MINUS_EXPR,
- TREE_TYPE (lb),
- ub, lb)));
+ else
+ length
+ = size_binop (PLUS_EXPR, size_one_node,
+ fold_convert (sizetype,
+ fold_build2_loc (input_location,
+ MINUS_EXPR,
+ TREE_TYPE (lb),
+ ub, lb)));
TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
fold_convert (bitsizetype,
change the result of vector_mode_supported_p and have_regs_of_mode
on a per-function basis. Thus the TYPE_MODE of a VECTOR_TYPE can
change on a per-function basis. */
-/* ??? Possibly a better solution is to run through all the types
+/* ??? Possibly a better solution is to run through all the types
referenced by a function and re-compute the TYPE_MODE once, rather
than make the TYPE_MODE macro call a function. */
value to enable integer types to be created. */
void
-initialize_sizetypes (bool signed_p)
+initialize_sizetypes (void)
{
tree t = make_node (INTEGER_TYPE);
int precision = GET_MODE_BITSIZE (SImode);
SET_TYPE_MODE (t, SImode);
TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
- TYPE_USER_ALIGN (t) = 0;
TYPE_IS_SIZETYPE (t) = 1;
- TYPE_UNSIGNED (t) = !signed_p;
+ TYPE_UNSIGNED (t) = 1;
TYPE_SIZE (t) = build_int_cst (t, precision);
TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
TYPE_PRECISION (t) = precision;
- /* Set TYPE_MIN_VALUE and TYPE_MAX_VALUE. */
- set_min_and_max_values_for_integral_type (t, precision, !signed_p);
+ set_min_and_max_values_for_integral_type (t, precision, true);
sizetype = t;
bitsizetype = build_distinct_type_copy (t);
}
-/* Make sizetype a version of TYPE, and initialize *sizetype
- accordingly. We do this by overwriting the stub sizetype and
- bitsizetype nodes created by initialize_sizetypes. This makes sure
- that (a) anything stubby about them no longer exists, (b) any
- INTEGER_CSTs created with such a type, remain valid. */
+/* Make sizetype a version of TYPE, and initialize *sizetype accordingly.
+ We do this by overwriting the stub sizetype and bitsizetype nodes created
+ by initialize_sizetypes. This makes sure that (a) anything stubby about
+ them no longer exists and (b) any INTEGER_CSTs created with such a type,
+ remain valid. */
void
set_sizetype (tree type)
{
- tree t;
+ tree t, max;
int oprecision = TYPE_PRECISION (type);
/* The *bitsizetype types use a precision that avoids overflows when
calculating signed sizes / offsets in bits. However, when
if (precision > HOST_BITS_PER_WIDE_INT * 2)
precision = HOST_BITS_PER_WIDE_INT * 2;
- gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
+ /* sizetype must be an unsigned type. */
+ gcc_assert (TYPE_UNSIGNED (type));
t = build_distinct_type_copy (type);
- /* We do want to use sizetype's cache, as we will be replacing that
- type. */
+ /* We want to use sizetype's cache, as we will be replacing that type. */
TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
TYPE_MAIN_VARIANT (sizetype) = sizetype;
TYPE_CANONICAL (sizetype) = sizetype;
+ /* sizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
+ sign-extended in a way consistent with force_fit_type. */
+ max = TYPE_MAX_VALUE (sizetype);
+ TYPE_MAX_VALUE (sizetype)
+ = build_int_cst_wide_type (sizetype,
+ TREE_INT_CST_LOW (max),
+ TREE_INT_CST_HIGH (max));
+
t = make_node (INTEGER_TYPE);
TYPE_NAME (t) = get_identifier ("bit_size_type");
- /* We do want to use bitsizetype's cache, as we will be replacing that
- type. */
+ /* We want to use bitsizetype's cache, as we will be replacing that type. */
TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
TYPE_PRECISION (t) = precision;
TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
TYPE_CANONICAL (bitsizetype) = bitsizetype;
- if (TYPE_UNSIGNED (type))
- {
- fixup_unsigned_type (bitsizetype);
- ssizetype = make_signed_type (oprecision);
- TYPE_IS_SIZETYPE (ssizetype) = 1;
- sbitsizetype = make_signed_type (precision);
- TYPE_IS_SIZETYPE (sbitsizetype) = 1;
- }
- else
- {
- fixup_signed_type (bitsizetype);
- ssizetype = sizetype;
- sbitsizetype = bitsizetype;
- }
+ fixup_unsigned_type (bitsizetype);
- /* If SIZETYPE is unsigned, we need to fix TYPE_MAX_VALUE so that
- it is sign extended in a way consistent with force_fit_type. */
- if (TYPE_UNSIGNED (type))
- {
- tree orig_max, new_max;
-
- orig_max = TYPE_MAX_VALUE (sizetype);
-
- /* Build a new node with the same values, but a different type.
- Sign extend it to ensure consistency. */
- new_max = build_int_cst_wide_type (sizetype,
- TREE_INT_CST_LOW (orig_max),
- TREE_INT_CST_HIGH (orig_max));
- TYPE_MAX_VALUE (sizetype) = new_max;
- }
+ /* Create the signed variants of *sizetype. */
+ ssizetype = make_signed_type (oprecision);
+ TYPE_IS_SIZETYPE (ssizetype) = 1;
+ sbitsizetype = make_signed_type (precision);
+ TYPE_IS_SIZETYPE (sbitsizetype) = 1;
}
\f
/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE