#include "expr.h"
#include "output.h"
#include "diagnostic-core.h"
-#include "toplev.h"
#include "ggc.h"
#include "target.h"
#include "langhooks.h"
/* If nonzero, this is an upper limit on alignment of structure fields.
The value is measured in bits. */
unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
-/* ... and its original value in bytes, specified via -fpack-struct=<value>. */
-unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
/* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
in the address spaces' address_mode, not pointer_mode. Set only by
#endif
extern void debug_rli (record_layout_info);
\f
-/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
-
-static GTY(()) VEC(tree,gc) *pending_sizes;
-
/* Show that REFERENCE_TYPES are internal and should use address_mode.
Called only by front end. */
reference_types_internal = 1;
}
-/* Get a VEC of all the objects put on the pending sizes list. */
-
-VEC(tree,gc) *
-get_pending_sizes (void)
-{
- VEC(tree,gc) *chain = pending_sizes;
-
- pending_sizes = 0;
- return chain;
-}
-
-/* Add EXPR to the pending sizes list. */
-
-void
-put_pending_size (tree expr)
-{
- /* Strip any simple arithmetic from EXPR to see if it has an underlying
- SAVE_EXPR. */
- expr = skip_simple_arithmetic (expr);
-
- if (TREE_CODE (expr) == SAVE_EXPR)
- VEC_safe_push (tree, gc, pending_sizes, expr);
-}
-
-/* Put a chain of objects into the pending sizes list, which must be
- empty. */
-
-void
-put_pending_sizes (VEC(tree,gc) *chain)
-{
- gcc_assert (!pending_sizes);
- pending_sizes = chain;
-}
-
/* Given a size SIZE that may not be a constant, return a SAVE_EXPR
to serve as the actual size-expression for a type or decl. */
tree
variable_size (tree size)
{
- tree save;
-
/* Obviously. */
if (TREE_CONSTANT (size))
return size;
if (CONTAINS_PLACEHOLDER_P (size))
return self_referential_size (size);
- /* If the language-processor is to take responsibility for variable-sized
- items (e.g., languages which have elaboration procedures like Ada),
- just return SIZE unchanged. */
- if (lang_hooks.decls.global_bindings_p () < 0)
+ /* If we are in the global binding level, we can't make a SAVE_EXPR
+ since it may end up being shared across functions, so it is up
+ to the front-end to deal with this case. */
+ if (lang_hooks.decls.global_bindings_p ())
return size;
- size = save_expr (size);
-
- /* If an array with a variable number of elements is declared, and
- the elements require destruction, we will emit a cleanup for the
- array. That cleanup is run both on normal exit from the block
- and in the exception-handler for the block. Normally, when code
- is used in both ordinary code and in an exception handler it is
- `unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do
- not wish to do that here; the array-size is the same in both
- places. */
- save = skip_simple_arithmetic (size);
-
- if (cfun && cfun->dont_save_pending_sizes_p)
- /* The front-end doesn't want us to keep a list of the expressions
- that determine sizes for variable size objects. Trust it. */
- return size;
+ return save_expr (size);
+}
- if (lang_hooks.decls.global_bindings_p ())
+/* An array of functions used for self-referential size computation. */
+static GTY(()) VEC (tree, gc) *size_functions;
+
+/* Look inside EXPR into simple arithmetic operations involving constants.
+ Return the outermost non-arithmetic or non-constant node. */
+
+static tree
+skip_simple_constant_arithmetic (tree expr)
+{
+ while (true)
{
- if (TREE_CONSTANT (size))
- error ("type size can%'t be explicitly evaluated");
+ if (UNARY_CLASS_P (expr))
+ expr = TREE_OPERAND (expr, 0);
+ else if (BINARY_CLASS_P (expr))
+ {
+ if (TREE_CONSTANT (TREE_OPERAND (expr, 1)))
+ expr = TREE_OPERAND (expr, 0);
+ else if (TREE_CONSTANT (TREE_OPERAND (expr, 0)))
+ expr = TREE_OPERAND (expr, 1);
+ else
+ break;
+ }
else
- error ("variable-size type declared outside of any function");
-
- return size_one_node;
+ break;
}
- put_pending_size (save);
-
- return size;
+ return expr;
}
-/* An array of functions used for self-referential size computation. */
-static GTY(()) VEC (tree, gc) *size_functions;
-
/* Similar to copy_tree_r but do not copy component references involving
PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
and substituted in substitute_in_expr. */
else if (code == SAVE_EXPR)
return error_mark_node;
+ else if (code == STATEMENT_LIST)
+ gcc_unreachable ();
+
return copy_tree_r (tp, walk_subtrees, data);
}
VEC(tree,gc) *args = NULL;
/* Do not factor out simple operations. */
- t = skip_simple_arithmetic (size);
+ t = skip_simple_constant_arithmetic (size);
if (TREE_CODE (t) == CALL_EXPR)
return size;
VEC_safe_push (tree, gc, size_functions, fndecl);
/* Replace the original expression with a call to the size function. */
- return build_call_expr_loc_vec (input_location, fndecl, args);
+ return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
}
/* Take, queue and compile all the size functions. It is essential that
return mode;
}
+/* Find a mode that is suitable for representing a vector with
+ NUNITS elements of mode INNERMODE. Returns BLKmode if there
+ is no suitable mode. */
+
+enum machine_mode
+mode_for_vector (enum machine_mode innermode, unsigned nunits)
+{
+ enum machine_mode mode;
+
+ /* First, look for a supported vector type. */
+ if (SCALAR_FLOAT_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_FLOAT;
+ else if (SCALAR_FRACT_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_FRACT;
+ else if (SCALAR_UFRACT_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_UFRACT;
+ else if (SCALAR_ACCUM_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_ACCUM;
+ else if (SCALAR_UACCUM_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_UACCUM;
+ else
+ mode = MIN_MODE_VECTOR_INT;
+
+ /* Do not check vector_mode_supported_p here. We'll do that
+ later in vector_type_mode. */
+ for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_NUNITS (mode) == nunits
+ && GET_MODE_INNER (mode) == innermode)
+ break;
+
+ /* For integers, try mapping it to a same-sized scalar mode. */
+ if (mode == VOIDmode
+ && GET_MODE_CLASS (innermode) == MODE_INT)
+ mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
+ MODE_INT, 0);
+
+ if (mode == VOIDmode
+ || (GET_MODE_CLASS (mode) == MODE_INT
+ && !have_regs_of_mode[mode]))
+ return BLKmode;
+
+ return mode;
+}
+
/* Return the alignment of MODE. This will be bounded by 1 and
BIGGEST_ALIGNMENT. */
return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
}
+/* Return the natural mode of an array, given that it is SIZE bytes in
+ total and has elements of type ELEM_TYPE. */
+
+static enum machine_mode
+mode_for_array (tree elem_type, tree size)
+{
+ tree elem_size;
+ unsigned HOST_WIDE_INT int_size, int_elem_size;
+ bool limit_p;
+
+ /* One-element arrays get the component type's mode. */
+ elem_size = TYPE_SIZE (elem_type);
+ if (simple_cst_equal (size, elem_size))
+ return TYPE_MODE (elem_type);
+
+ limit_p = true;
+ if (host_integerp (size, 1) && host_integerp (elem_size, 1))
+ {
+ int_size = tree_low_cst (size, 1);
+ int_elem_size = tree_low_cst (elem_size, 1);
+ if (int_elem_size > 0
+ && int_size % int_elem_size == 0
+ && targetm.array_mode_supported_p (TYPE_MODE (elem_type),
+ int_size / int_elem_size))
+ limit_p = false;
+ }
+ return mode_for_size_tree (size, MODE_INT, limit_p);
+}
\f
/* Subroutine of layout_decl: Force alignment required for the data type.
But if the decl itself wants greater alignment, don't override that. */
}
/* See if we can use an ordinary integer mode for a bit-field.
- Conditions are: a fixed size that is correct for another mode
- and occupying a complete byte or bytes on proper boundary. */
+ Conditions are: a fixed size that is correct for another mode,
+ occupying a complete byte or bytes on proper boundary,
+ and not volatile or not -fstrict-volatile-bitfields. */
if (TYPE_SIZE (type) != 0
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
+ && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
+ && !(TREE_THIS_VOLATILE (decl)
+ && flag_strict_volatile_bitfields > 0))
{
enum machine_mode xmode
= mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
if (TREE_CODE (rli->t) == UNION_TYPE)
rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
- rli->offset = fold_build3_loc (input_location, COND_EXPR, sizetype,
- DECL_QUALIFIER (field),
+ rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
DECL_SIZE_UNIT (field), rli->offset);
}
if (warn_packed_bitfield_compat == 1)
inform
(input_location,
- "Offset of packed bit-field %qD has changed in GCC 4.4",
+ "offset of packed bit-field %qD has changed in GCC 4.4",
field);
}
else
- rli->bitpos = round_up_loc (input_location, rli->bitpos, type_align);
+ rli->bitpos = round_up (rli->bitpos, type_align);
}
if (! DECL_PACKED (field))
if (maximum_field_alignment != 0)
type_align = MIN (type_align, maximum_field_alignment);
- rli->bitpos = round_up_loc (input_location, rli->bitpos, type_align);
+ rli->bitpos = round_up (rli->bitpos, type_align);
/* If we really aligned, don't allow subsequent bitfields
to undo that. */
= size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
/* Round the size up to be a multiple of the required alignment. */
- TYPE_SIZE (rli->t) = round_up_loc (input_location, unpadded_size,
- TYPE_ALIGN (rli->t));
+ TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
TYPE_SIZE_UNIT (rli->t)
- = round_up_loc (input_location, unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
+ = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
if (TREE_CONSTANT (unpadded_size)
&& simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
#endif
- unpacked_size = round_up_loc (input_location, TYPE_SIZE (rli->t), rli->unpacked_align);
+ unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
{
if (TYPE_NAME (rli->t))
if (TYPE_SIZE (type) != 0)
{
- TYPE_SIZE (type) = round_up_loc (input_location,
- TYPE_SIZE (type), TYPE_ALIGN (type));
- TYPE_SIZE_UNIT (type) = round_up_loc (input_location, TYPE_SIZE_UNIT (type),
- TYPE_ALIGN_UNIT (type));
+ TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
+ TYPE_SIZE_UNIT (type)
+ = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
}
/* Evaluate nonconstant sizes only once, either now or as soon as safe. */
/* Find an appropriate mode for the vector type. */
if (TYPE_MODE (type) == VOIDmode)
- {
- enum machine_mode innermode = TYPE_MODE (innertype);
- enum machine_mode mode;
-
- /* First, look for a supported vector type. */
- if (SCALAR_FLOAT_MODE_P (innermode))
- mode = MIN_MODE_VECTOR_FLOAT;
- else if (SCALAR_FRACT_MODE_P (innermode))
- mode = MIN_MODE_VECTOR_FRACT;
- else if (SCALAR_UFRACT_MODE_P (innermode))
- mode = MIN_MODE_VECTOR_UFRACT;
- else if (SCALAR_ACCUM_MODE_P (innermode))
- mode = MIN_MODE_VECTOR_ACCUM;
- else if (SCALAR_UACCUM_MODE_P (innermode))
- mode = MIN_MODE_VECTOR_UACCUM;
- else
- mode = MIN_MODE_VECTOR_INT;
-
- /* Do not check vector_mode_supported_p here. We'll do that
- later in vector_type_mode. */
- for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
- if (GET_MODE_NUNITS (mode) == nunits
- && GET_MODE_INNER (mode) == innermode)
- break;
-
- /* For integers, try mapping it to a same-sized scalar mode. */
- if (mode == VOIDmode
- && GET_MODE_CLASS (innermode) == MODE_INT)
- mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
- MODE_INT, 0);
-
- if (mode == VOIDmode ||
- (GET_MODE_CLASS (mode) == MODE_INT
- && !have_regs_of_mode[mode]))
- SET_TYPE_MODE (type, BLKmode);
- else
- SET_TYPE_MODE (type, mode);
- }
+ SET_TYPE_MODE (type,
+ mode_for_vector (TYPE_MODE (innertype), nunits));
TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
TYPE_SIZE_UNIT (innertype),
- size_int (nunits), 0);
+ size_int (nunits));
TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
- bitsize_int (nunits), 0);
+ bitsize_int (nunits));
/* Always naturally align vectors. This prevents ABI changes
depending on whether or not native vector modes are supported. */
if (integer_zerop (element_size))
length = size_zero_node;
- /* The initial subtraction should happen in the original type so
+ /* The computation should happen in the original type so
that (possible) negative values are handled appropriately. */
else
length
- = size_binop (PLUS_EXPR, size_one_node,
- fold_convert (sizetype,
- fold_build2_loc (input_location,
- MINUS_EXPR,
- TREE_TYPE (lb),
- ub, lb)));
+ = fold_convert (sizetype,
+ fold_build2 (PLUS_EXPR, TREE_TYPE (lb),
+ build_int_cst (TREE_TYPE (lb), 1),
+ fold_build2 (MINUS_EXPR,
+ TREE_TYPE (lb),
+ ub, lb)));
TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
fold_convert (bitsizetype,
#else
TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
#endif
- if (!TYPE_SIZE (element))
- /* We don't know the size of the underlying element type, so
- our alignment calculations will be wrong, forcing us to
- fall back on structural equality. */
- SET_TYPE_STRUCTURAL_EQUALITY (type);
TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
SET_TYPE_MODE (type, BLKmode);
if (TYPE_SIZE (type) != 0
&& (TYPE_MODE (TREE_TYPE (type)) != BLKmode
|| TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
{
- /* One-element arrays get the component type's mode. */
- if (simple_cst_equal (TYPE_SIZE (type),
- TYPE_SIZE (TREE_TYPE (type))))
- SET_TYPE_MODE (type, TYPE_MODE (TREE_TYPE (type)));
- else
- SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type),
- MODE_INT, 1));
-
+ SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
+ TYPE_SIZE (type)));
if (TYPE_MODE (type) != BLKmode
&& STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
&& TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
- mode = t->type.mode;
+ mode = t->type_common.mode;
if (VECTOR_MODE_P (mode)
&& (!targetm.vector_mode_supported_p (mode)
|| !have_regs_of_mode[mode]))
{
- enum machine_mode innermode = TREE_TYPE (t)->type.mode;
+ enum machine_mode innermode = TREE_TYPE (t)->type_common.mode;
/* For integers, try mapping it to a same-sized scalar mode. */
if (GET_MODE_CLASS (innermode) == MODE_INT)
TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
TYPE_PRECISION (t) = precision;
- set_min_and_max_values_for_integral_type (t, precision, true);
+ set_min_and_max_values_for_integral_type (t, precision,
+ /*is_unsigned=*/true);
sizetype = t;
bitsizetype = build_distinct_type_copy (t);
/* We want to use sizetype's cache, as we will be replacing that type. */
TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
- TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
TYPE_UID (t) = TYPE_UID (sizetype);
TYPE_IS_SIZETYPE (t) = 1;