/* C-compiler utilities for types and variables storage layout
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
- Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ 2011 Free Software Foundation, Inc.
This file is part of GCC.
#include "function.h"
#include "expr.h"
#include "output.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "ggc.h"
#include "target.h"
#include "langhooks.h"
#include "regs.h"
#include "params.h"
+#include "cgraph.h"
+#include "tree-inline.h"
+#include "tree-dump.h"
+#include "gimple.h"
/* Data type for the expressions representing sizes of data types.
It is the first integer type laid out. */
/* If nonzero, this is an upper limit on alignment of structure fields.
The value is measured in bits. */
unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
-/* ... and its original value in bytes, specified via -fpack-struct=<value>. */
-unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
-/* Nonzero if all REFERENCE_TYPEs are internal and hence should be
- allocated in Pmode, not ptr_mode. Set only by internal_reference_types
- called only by a front end. */
+/* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
+ in the address spaces' address_mode, not pointer_mode. Set only by
+ internal_reference_types called only by a front end. */
static int reference_types_internal = 0;
+static tree self_referential_size (tree);
static void finalize_record_size (record_layout_info);
static void finalize_type_size (tree);
static void place_union_field (record_layout_info, tree);
#endif
extern void debug_rli (record_layout_info);
\f
-/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
-
-static GTY(()) tree pending_sizes;
-
-/* Show that REFERENCE_TYPES are internal and should be Pmode. Called only
- by front end. */
+/* Show that REFERENCE_TYPES are internal and should use address_mode.
+ Called only by front end. */
void
internal_reference_types (void)
reference_types_internal = 1;
}
-/* Get a list of all the objects put on the pending sizes list. */
+/* Given a size SIZE that may not be a constant, return a SAVE_EXPR
+ to serve as the actual size-expression for a type or decl. */
tree
-get_pending_sizes (void)
+variable_size (tree size)
{
- tree chain = pending_sizes;
+ /* Obviously. */
+ if (TREE_CONSTANT (size))
+ return size;
- pending_sizes = 0;
- return chain;
+ /* If the size is self-referential, we can't make a SAVE_EXPR (see
+ save_expr for the rationale). But we can do something else. */
+ if (CONTAINS_PLACEHOLDER_P (size))
+ return self_referential_size (size);
+
+ /* If we are in the global binding level, we can't make a SAVE_EXPR
+ since it may end up being shared across functions, so it is up
+ to the front-end to deal with this case. */
+ if (lang_hooks.decls.global_bindings_p ())
+ return size;
+
+ return save_expr (size);
}
-/* Add EXPR to the pending sizes list. */
+/* An array of functions used for self-referential size computation. */
+static GTY(()) VEC (tree, gc) *size_functions;
-void
-put_pending_size (tree expr)
+/* Look inside EXPR into simple arithmetic operations involving constants.
+ Return the outermost non-arithmetic or non-constant node. */
+
+static tree
+skip_simple_constant_arithmetic (tree expr)
{
- /* Strip any simple arithmetic from EXPR to see if it has an underlying
- SAVE_EXPR. */
- expr = skip_simple_arithmetic (expr);
+ while (true)
+ {
+ if (UNARY_CLASS_P (expr))
+ expr = TREE_OPERAND (expr, 0);
+ else if (BINARY_CLASS_P (expr))
+ {
+ if (TREE_CONSTANT (TREE_OPERAND (expr, 1)))
+ expr = TREE_OPERAND (expr, 0);
+ else if (TREE_CONSTANT (TREE_OPERAND (expr, 0)))
+ expr = TREE_OPERAND (expr, 1);
+ else
+ break;
+ }
+ else
+ break;
+ }
- if (TREE_CODE (expr) == SAVE_EXPR)
- pending_sizes = tree_cons (NULL_TREE, expr, pending_sizes);
+ return expr;
}
-/* Put a chain of objects into the pending sizes list, which must be
- empty. */
+/* Similar to copy_tree_r but do not copy component references involving
+ PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
+ and substituted in substitute_in_expr. */
-void
-put_pending_sizes (tree chain)
+static tree
+copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
{
- gcc_assert (!pending_sizes);
- pending_sizes = chain;
+ enum tree_code code = TREE_CODE (*tp);
+
+ /* Stop at types, decls, constants like copy_tree_r. */
+ if (TREE_CODE_CLASS (code) == tcc_type
+ || TREE_CODE_CLASS (code) == tcc_declaration
+ || TREE_CODE_CLASS (code) == tcc_constant)
+ {
+ *walk_subtrees = 0;
+ return NULL_TREE;
+ }
+
+ /* This is the pattern built in ada/make_aligning_type. */
+ else if (code == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
+ {
+ *walk_subtrees = 0;
+ return NULL_TREE;
+ }
+
+ /* Default case: the component reference. */
+ else if (code == COMPONENT_REF)
+ {
+ tree inner;
+ for (inner = TREE_OPERAND (*tp, 0);
+ REFERENCE_CLASS_P (inner);
+ inner = TREE_OPERAND (inner, 0))
+ ;
+
+ if (TREE_CODE (inner) == PLACEHOLDER_EXPR)
+ {
+ *walk_subtrees = 0;
+ return NULL_TREE;
+ }
+ }
+
+ /* We're not supposed to have them in self-referential size trees
+ because we wouldn't properly control when they are evaluated.
+ However, not creating superfluous SAVE_EXPRs requires accurate
+ tracking of readonly-ness all the way down to here, which we
+ cannot always guarantee in practice. So punt in this case. */
+ else if (code == SAVE_EXPR)
+ return error_mark_node;
+
+ else if (code == STATEMENT_LIST)
+ gcc_unreachable ();
+
+ return copy_tree_r (tp, walk_subtrees, data);
}
-/* Given a size SIZE that may not be a constant, return a SAVE_EXPR
- to serve as the actual size-expression for a type or decl. */
+/* Given a SIZE expression that is self-referential, return an equivalent
+ expression to serve as the actual size expression for a type. */
-tree
-variable_size (tree size)
+static tree
+self_referential_size (tree size)
{
- tree save;
-
- /* If the language-processor is to take responsibility for variable-sized
- items (e.g., languages which have elaboration procedures like Ada),
- just return SIZE unchanged. Likewise for self-referential sizes and
- constant sizes. */
- if (TREE_CONSTANT (size)
- || lang_hooks.decls.global_bindings_p () < 0
- || CONTAINS_PLACEHOLDER_P (size))
+ static unsigned HOST_WIDE_INT fnno = 0;
+ VEC (tree, heap) *self_refs = NULL;
+ tree param_type_list = NULL, param_decl_list = NULL;
+ tree t, ref, return_type, fntype, fnname, fndecl;
+ unsigned int i;
+ char buf[128];
+ VEC(tree,gc) *args = NULL;
+
+ /* Do not factor out simple operations. */
+ t = skip_simple_constant_arithmetic (size);
+ if (TREE_CODE (t) == CALL_EXPR)
return size;
- size = save_expr (size);
-
- /* If an array with a variable number of elements is declared, and
- the elements require destruction, we will emit a cleanup for the
- array. That cleanup is run both on normal exit from the block
- and in the exception-handler for the block. Normally, when code
- is used in both ordinary code and in an exception handler it is
- `unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do
- not wish to do that here; the array-size is the same in both
- places. */
- save = skip_simple_arithmetic (size);
-
- if (cfun && cfun->dont_save_pending_sizes_p)
- /* The front-end doesn't want us to keep a list of the expressions
- that determine sizes for variable size objects. Trust it. */
+ /* Collect the list of self-references in the expression. */
+ find_placeholder_in_expr (size, &self_refs);
+ gcc_assert (VEC_length (tree, self_refs) > 0);
+
+ /* Obtain a private copy of the expression. */
+ t = size;
+ if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
return size;
+ size = t;
- if (lang_hooks.decls.global_bindings_p ())
+ /* Build the parameter and argument lists in parallel; also
+ substitute the former for the latter in the expression. */
+ args = VEC_alloc (tree, gc, VEC_length (tree, self_refs));
+ FOR_EACH_VEC_ELT (tree, self_refs, i, ref)
{
- if (TREE_CONSTANT (size))
- error ("type size can%'t be explicitly evaluated");
+ tree subst, param_name, param_type, param_decl;
+
+ if (DECL_P (ref))
+ {
+ /* We shouldn't have true variables here. */
+ gcc_assert (TREE_READONLY (ref));
+ subst = ref;
+ }
+ /* This is the pattern built in ada/make_aligning_type. */
+ else if (TREE_CODE (ref) == ADDR_EXPR)
+ subst = ref;
+ /* Default case: the component reference. */
+ else
+ subst = TREE_OPERAND (ref, 1);
+
+ sprintf (buf, "p%d", i);
+ param_name = get_identifier (buf);
+ param_type = TREE_TYPE (ref);
+ param_decl
+ = build_decl (input_location, PARM_DECL, param_name, param_type);
+ if (targetm.calls.promote_prototypes (NULL_TREE)
+ && INTEGRAL_TYPE_P (param_type)
+ && TYPE_PRECISION (param_type) < TYPE_PRECISION (integer_type_node))
+ DECL_ARG_TYPE (param_decl) = integer_type_node;
else
- error ("variable-size type declared outside of any function");
+ DECL_ARG_TYPE (param_decl) = param_type;
+ DECL_ARTIFICIAL (param_decl) = 1;
+ TREE_READONLY (param_decl) = 1;
- return size_one_node;
+ size = substitute_in_expr (size, subst, param_decl);
+
+ param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
+ param_decl_list = chainon (param_decl, param_decl_list);
+ VEC_quick_push (tree, args, ref);
}
- put_pending_size (save);
+ VEC_free (tree, heap, self_refs);
+
+ /* Append 'void' to indicate that the number of parameters is fixed. */
+ param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
+
+ /* The 3 lists have been created in reverse order. */
+ param_type_list = nreverse (param_type_list);
+ param_decl_list = nreverse (param_decl_list);
+
+ /* Build the function type. */
+ return_type = TREE_TYPE (size);
+ fntype = build_function_type (return_type, param_type_list);
+
+ /* Build the function declaration. */
+ sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
+ fnname = get_file_function_name (buf);
+ fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
+ for (t = param_decl_list; t; t = DECL_CHAIN (t))
+ DECL_CONTEXT (t) = fndecl;
+ DECL_ARGUMENTS (fndecl) = param_decl_list;
+ DECL_RESULT (fndecl)
+ = build_decl (input_location, RESULT_DECL, 0, return_type);
+ DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
+
+ /* The function has been created by the compiler and we don't
+ want to emit debug info for it. */
+ DECL_ARTIFICIAL (fndecl) = 1;
+ DECL_IGNORED_P (fndecl) = 1;
+
+ /* It is supposed to be "const" and never throw. */
+ TREE_READONLY (fndecl) = 1;
+ TREE_NOTHROW (fndecl) = 1;
+
+ /* We want it to be inlined when this is deemed profitable, as
+ well as discarded if every call has been integrated. */
+ DECL_DECLARED_INLINE_P (fndecl) = 1;
+
+ /* It is made up of a unique return statement. */
+ DECL_INITIAL (fndecl) = make_node (BLOCK);
+ BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
+ t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
+ DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
+ TREE_STATIC (fndecl) = 1;
+
+ /* Put it onto the list of size functions. */
+ VEC_safe_push (tree, gc, size_functions, fndecl);
+
+ /* Replace the original expression with a call to the size function. */
+ return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
+}
+
+/* Take, queue and compile all the size functions. It is essential that
+ the size functions be gimplified at the very end of the compilation
+ in order to guarantee transparent handling of self-referential sizes.
+ Otherwise the GENERIC inliner would not be able to inline them back
+ at each of their call sites, thus creating artificial non-constant
+ size expressions which would trigger nasty problems later on. */
+
+void
+finalize_size_functions (void)
+{
+ unsigned int i;
+ tree fndecl;
+
+ for (i = 0; VEC_iterate(tree, size_functions, i, fndecl); i++)
+ {
+ dump_function (TDI_original, fndecl);
+ gimplify_function_tree (fndecl);
+ dump_function (TDI_generic, fndecl);
+ cgraph_finalize_function (fndecl, false);
+ }
- return size;
+ VEC_free (tree, gc, size_functions);
}
\f
-#ifndef MAX_FIXED_MODE_SIZE
-#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
-#endif
-
/* Return the machine mode to use for a nonscalar of SIZE bits. The
mode must be in class MCLASS, and have exactly that many value bits;
it may have padding as well. If LIMIT is nonzero, modes of wider
return mode;
}
+/* Find a mode that is suitable for representing a vector with
+ NUNITS elements of mode INNERMODE. Returns BLKmode if there
+ is no suitable mode. */
+
+enum machine_mode
+mode_for_vector (enum machine_mode innermode, unsigned nunits)
+{
+ enum machine_mode mode;
+
+ /* First, look for a supported vector type. */
+ if (SCALAR_FLOAT_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_FLOAT;
+ else if (SCALAR_FRACT_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_FRACT;
+ else if (SCALAR_UFRACT_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_UFRACT;
+ else if (SCALAR_ACCUM_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_ACCUM;
+ else if (SCALAR_UACCUM_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_UACCUM;
+ else
+ mode = MIN_MODE_VECTOR_INT;
+
+ /* Do not check vector_mode_supported_p here. We'll do that
+ later in vector_type_mode. */
+ for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_NUNITS (mode) == nunits
+ && GET_MODE_INNER (mode) == innermode)
+ break;
+
+ /* For integers, try mapping it to a same-sized scalar mode. */
+ if (mode == VOIDmode
+ && GET_MODE_CLASS (innermode) == MODE_INT)
+ mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
+ MODE_INT, 0);
+
+ if (mode == VOIDmode
+ || (GET_MODE_CLASS (mode) == MODE_INT
+ && !have_regs_of_mode[mode]))
+ return BLKmode;
+
+ return mode;
+}
+
/* Return the alignment of MODE. This will be bounded by 1 and
BIGGEST_ALIGNMENT. */
return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
}
+/* Return the natural mode of an array, given that it is SIZE bytes in
+ total and has elements of type ELEM_TYPE. */
+
+static enum machine_mode
+mode_for_array (tree elem_type, tree size)
+{
+ tree elem_size;
+ unsigned HOST_WIDE_INT int_size, int_elem_size;
+ bool limit_p;
+
+ /* One-element arrays get the component type's mode. */
+ elem_size = TYPE_SIZE (elem_type);
+ if (simple_cst_equal (size, elem_size))
+ return TYPE_MODE (elem_type);
+
+ limit_p = true;
+ if (host_integerp (size, 1) && host_integerp (elem_size, 1))
+ {
+ int_size = tree_low_cst (size, 1);
+ int_elem_size = tree_low_cst (elem_size, 1);
+ if (int_elem_size > 0
+ && int_size % int_elem_size == 0
+ && targetm.array_mode_supported_p (TYPE_MODE (elem_type),
+ int_size / int_elem_size))
+ limit_p = false;
+ }
+ return mode_for_size_tree (size, MODE_INT, limit_p);
+}
\f
/* Subroutine of layout_decl: Force alignment required for the data type.
But if the decl itself wants greater alignment, don't override that. */
tree type = TREE_TYPE (decl);
enum tree_code code = TREE_CODE (decl);
rtx rtl = NULL_RTX;
+ location_t loc = DECL_SOURCE_LOCATION (decl);
if (code == CONST_DECL)
return;
}
else if (DECL_SIZE_UNIT (decl) == 0)
DECL_SIZE_UNIT (decl)
- = fold_convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl),
- bitsize_unit_node));
+ = fold_convert_loc (loc, sizetype,
+ size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
+ bitsize_unit_node));
if (code != FIELD_DECL)
/* For non-fields, update the alignment from the type. */
}
/* See if we can use an ordinary integer mode for a bit-field.
- Conditions are: a fixed size that is correct for another mode
- and occupying a complete byte or bytes on proper boundary. */
+ Conditions are: a fixed size that is correct for another mode,
+ occupying a complete byte or bytes on proper boundary,
+ and not -fstrict-volatile-bitfields. If the latter is set,
+ we unfortunately can't check TREE_THIS_VOLATILE, as a cast
+ may make a volatile object later. */
if (TYPE_SIZE (type) != 0
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
+ && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
+ && flag_strict_volatile_bitfields <= 0)
{
enum machine_mode xmode
= mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
int size_as_int = TREE_INT_CST_LOW (size);
if (compare_tree_int (size, size_as_int) == 0)
- warning (OPT_Wlarger_than_eq, "size of %q+D is %d bytes", decl, size_as_int);
+ warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int);
else
- warning (OPT_Wlarger_than_eq, "size of %q+D is larger than %wd bytes",
+ warning (OPT_Wlarger_than_, "size of %q+D is larger than %wd bytes",
decl, larger_than_size);
}
}
rli->offset = size_zero_node;
rli->bitpos = bitsize_zero_node;
rli->prev_field = 0;
- rli->pending_statics = 0;
+ rli->pending_statics = NULL;
rli->packed_maybe_necessary = 0;
rli->remaining_in_alignment = 0;
/* Print debugging information about the information in RLI. */
-void
+DEBUG_FUNCTION void
debug_rli (record_layout_info rli)
{
print_node_brief (stderr, "type", rli->t, 0);
if (rli->packed_maybe_necessary)
fprintf (stderr, "packed may be necessary\n");
- if (rli->pending_statics)
+ if (!VEC_empty (tree, rli->pending_statics))
{
fprintf (stderr, "pending statics:\n");
- debug_tree (rli->pending_statics);
+ debug_vec_tree (rli->pending_statics);
}
}
applies if there was an immediately prior, nonzero-size
bitfield. (That's the way it is, experimentally.) */
if ((!is_bitfield && !DECL_PACKED (field))
- || (!integer_zerop (DECL_SIZE (field))
+ || ((DECL_SIZE (field) == NULL_TREE
+ || !integer_zerop (DECL_SIZE (field)))
? !DECL_PACKED (field)
: (rli->prev_field
&& DECL_BIT_FIELD_TYPE (rli->prev_field)
if (TREE_CODE (rli->t) == UNION_TYPE)
rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
- rli->offset = fold_build3 (COND_EXPR, sizetype,
- DECL_QUALIFIER (field),
+ rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
DECL_SIZE_UNIT (field), rli->offset);
}
it *after* the record is laid out. */
if (TREE_CODE (field) == VAR_DECL)
{
- rli->pending_statics = tree_cons (NULL_TREE, field,
- rli->pending_statics);
+ VEC_safe_push (tree, gc, rli->pending_statics, field);
return;
}
if (STRICT_ALIGNMENT)
warning (OPT_Wattributes, "packed attribute causes "
"inefficient alignment for %q+D", field);
- else
+ /* Don't warn if DECL_PACKED was set by the type. */
+ else if (!TYPE_PACKED (rli->t))
warning (OPT_Wattributes, "packed attribute is "
"unnecessary for %q+D", field);
}
}
/* Does this field automatically have alignment it needs by virtue
- of the fields that precede it and the record's own alignment?
- We already align ms_struct fields, so don't re-align them. */
- if (known_align < desired_align
- && !targetm.ms_bitfield_layout_p (rli->t))
+ of the fields that precede it and the record's own alignment? */
+ if (known_align < desired_align)
{
/* No, we need to skip space before this field.
Bump the cumulative size to multiple of field alignment. */
- warning (OPT_Wpadded, "padding struct to align %q+D", field);
+ if (!targetm.ms_bitfield_layout_p (rli->t)
+ && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
+ warning (OPT_Wpadded, "padding struct to align %q+D", field);
/* If the alignment is still within offset_align, just align
the bit position. */
if (! TREE_CONSTANT (rli->offset))
rli->offset_align = desired_align;
-
+ if (targetm.ms_bitfield_layout_p (rli->t))
+ rli->prev_field = NULL;
}
/* Handle compatibility with PCC. Note that if the record has any
if (warn_packed_bitfield_compat == 1)
inform
(input_location,
- "Offset of packed bit-field %qD has changed in GCC 4.4",
+ "offset of packed bit-field %qD has changed in GCC 4.4",
field);
}
else
until we see a bitfield (and come by here again) we just skip
calculating it. */
if (DECL_SIZE (field) != NULL
- && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 0)
- && host_integerp (DECL_SIZE (field), 0))
+ && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1)
+ && host_integerp (DECL_SIZE (field), 1))
{
- HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
- HOST_WIDE_INT typesize
+ unsigned HOST_WIDE_INT bitsize
+ = tree_low_cst (DECL_SIZE (field), 1);
+ unsigned HOST_WIDE_INT typesize
= tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
if (typesize < bitsize)
/* If we ended a bitfield before the full length of the type then
pad the struct out to the full length of the last type. */
- if ((TREE_CHAIN (field) == NULL
- || TREE_CODE (TREE_CHAIN (field)) != FIELD_DECL)
+ if ((DECL_CHAIN (field) == NULL
+ || TREE_CODE (DECL_CHAIN (field)) != FIELD_DECL)
&& DECL_BIT_FIELD_TYPE (field)
&& !integer_zerop (DECL_SIZE (field)))
rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
= round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
if (TREE_CONSTANT (unpadded_size)
- && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0)
+ && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
+ && input_location != BUILTINS_LOCATION)
warning (OPT_Wpadded, "padding struct size to alignment boundary");
if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
{
- TYPE_PACKED (rli->t) = 0;
-
if (TYPE_NAME (rli->t))
{
- const char *name;
+ tree name;
if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
- name = IDENTIFIER_POINTER (TYPE_NAME (rli->t));
+ name = TYPE_NAME (rli->t);
else
- name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (rli->t)));
+ name = DECL_NAME (TYPE_NAME (rli->t));
if (STRICT_ALIGNMENT)
warning (OPT_Wpacked, "packed attribute causes inefficient "
- "alignment for %qs", name);
+ "alignment for %qE", name);
else
warning (OPT_Wpacked,
- "packed attribute is unnecessary for %qs", name);
+ "packed attribute is unnecessary for %qE", name);
}
else
{
/* A record which has any BLKmode members must itself be
BLKmode; it can't go in a register. Unless the member is
BLKmode only because it isn't aligned. */
- for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
{
if (TREE_CODE (field) != FIELD_DECL)
continue;
if (TYPE_SIZE (type) != 0)
{
TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
- TYPE_SIZE_UNIT (type) = round_up (TYPE_SIZE_UNIT (type),
- TYPE_ALIGN_UNIT (type));
+ TYPE_SIZE_UNIT (type)
+ = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
}
/* Evaluate nonconstant sizes only once, either now or as soon as safe. */
}
}
+/* Return a new underlying object for a bitfield started with FIELD. */
+
+static tree
+start_bitfield_representative (tree field)
+{
+ tree repr = make_node (FIELD_DECL);
+ DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field);
+ /* Force the representative to begin at a BITS_PER_UNIT aligned
+ boundary - C++ may use tail-padding of a base object to
+ continue packing bits so the bitfield region does not start
+ at bit zero (see g++.dg/abi/bitfield5.C for example).
+ Unallocated bits may happen for other reasons as well,
+ for example Ada which allows explicit bit-granular structure layout. */
+ DECL_FIELD_BIT_OFFSET (repr)
+ = size_binop (BIT_AND_EXPR,
+ DECL_FIELD_BIT_OFFSET (field),
+ bitsize_int (~(BITS_PER_UNIT - 1)));
+ SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field));
+ DECL_SIZE (repr) = DECL_SIZE (field);
+ DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field);
+ DECL_PACKED (repr) = DECL_PACKED (field);
+ DECL_CONTEXT (repr) = DECL_CONTEXT (field);
+ return repr;
+}
+
+/* Finish up a bitfield group that was started by creating the underlying
+ object REPR with the last field in the bitfield group FIELD. */
+
+static void
+finish_bitfield_representative (tree repr, tree field)
+{
+ unsigned HOST_WIDE_INT bitsize, maxbitsize;
+ enum machine_mode mode;
+ tree nextf, size;
+
+ size = size_diffop (DECL_FIELD_OFFSET (field),
+ DECL_FIELD_OFFSET (repr));
+ gcc_assert (host_integerp (size, 1));
+ bitsize = (tree_low_cst (size, 1) * BITS_PER_UNIT
+ + tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
+ - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1)
+ + tree_low_cst (DECL_SIZE (field), 1));
+
+ /* Round up bitsize to multiples of BITS_PER_UNIT. */
+ bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
+
+ /* Now nothing tells us how to pad out bitsize ... */
+ nextf = DECL_CHAIN (field);
+ while (nextf && TREE_CODE (nextf) != FIELD_DECL)
+ nextf = DECL_CHAIN (nextf);
+ if (nextf)
+ {
+ tree maxsize;
+ /* If there was an error, the field may be not laid out
+ correctly. Don't bother to do anything. */
+ if (TREE_TYPE (nextf) == error_mark_node)
+ return;
+ maxsize = size_diffop (DECL_FIELD_OFFSET (nextf),
+ DECL_FIELD_OFFSET (repr));
+ if (host_integerp (maxsize, 1))
+ {
+ maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT
+ + tree_low_cst (DECL_FIELD_BIT_OFFSET (nextf), 1)
+ - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1));
+ /* If the group ends within a bitfield nextf does not need to be
+ aligned to BITS_PER_UNIT. Thus round up. */
+ maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
+ }
+ else
+ maxbitsize = bitsize;
+ }
+ else
+ {
+ /* ??? If you consider that tail-padding of this struct might be
+ re-used when deriving from it we cannot really do the following
+ and thus need to set maxsize to bitsize? Also we cannot
+ generally rely on maxsize to fold to an integer constant, so
+ use bitsize as fallback for this case. */
+ tree maxsize = size_diffop (TYPE_SIZE_UNIT (DECL_CONTEXT (field)),
+ DECL_FIELD_OFFSET (repr));
+ if (host_integerp (maxsize, 1))
+ maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT
+ - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1));
+ else
+ maxbitsize = bitsize;
+ }
+
+ /* Only if we don't artificially break up the representative in
+ the middle of a large bitfield with different possibly
+ overlapping representatives. And all representatives start
+ at byte offset. */
+ gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
+
+ /* Find the smallest nice mode to use. */
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_BITSIZE (mode) >= bitsize)
+ break;
+ if (mode != VOIDmode
+ && (GET_MODE_BITSIZE (mode) > maxbitsize
+ || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE))
+ mode = VOIDmode;
+
+ if (mode == VOIDmode)
+ {
+ /* We really want a BLKmode representative only as a last resort,
+ considering the member b in
+ struct { int a : 7; int b : 17; int c; } __attribute__((packed));
+ Otherwise we simply want to split the representative up
+ allowing for overlaps within the bitfield region as required for
+ struct { int a : 7; int b : 7;
+ int c : 10; int d; } __attribute__((packed));
+ [0, 15] HImode for a and b, [8, 23] HImode for c. */
+ DECL_SIZE (repr) = bitsize_int (bitsize);
+ DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT);
+ DECL_MODE (repr) = BLKmode;
+ TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node,
+ bitsize / BITS_PER_UNIT);
+ }
+ else
+ {
+ unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode);
+ DECL_SIZE (repr) = bitsize_int (modesize);
+ DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT);
+ DECL_MODE (repr) = mode;
+ TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1);
+ }
+
+ /* Remember whether the bitfield group is at the end of the
+ structure or not. */
+ DECL_CHAIN (repr) = nextf;
+}
+
+/* Compute and set FIELD_DECLs for the underlying objects we should
+ use for bitfield access for the structure laid out with RLI. */
+
+static void
+finish_bitfield_layout (record_layout_info rli)
+{
+ tree field, prev;
+ tree repr = NULL_TREE;
+
+ /* Unions would be special, for the ease of type-punning optimizations
+ we could use the underlying type as hint for the representative
+ if the bitfield would fit and the representative would not exceed
+ the union in size. */
+ if (TREE_CODE (rli->t) != RECORD_TYPE)
+ return;
+
+ for (prev = NULL_TREE, field = TYPE_FIELDS (rli->t);
+ field; field = DECL_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ /* In the C++ memory model, consecutive bit fields in a structure are
+ considered one memory location and updating a memory location
+ may not store into adjacent memory locations. */
+ if (!repr
+ && DECL_BIT_FIELD_TYPE (field))
+ {
+ /* Start new representative. */
+ repr = start_bitfield_representative (field);
+ }
+ else if (repr
+ && ! DECL_BIT_FIELD_TYPE (field))
+ {
+ /* Finish off new representative. */
+ finish_bitfield_representative (repr, prev);
+ repr = NULL_TREE;
+ }
+ else if (DECL_BIT_FIELD_TYPE (field))
+ {
+ gcc_assert (repr != NULL_TREE);
+
+ /* Zero-size bitfields finish off a representative and
+ do not have a representative themselves. This is
+ required by the C++ memory model. */
+ if (integer_zerop (DECL_SIZE (field)))
+ {
+ finish_bitfield_representative (repr, prev);
+ repr = NULL_TREE;
+ }
+
+ /* We assume that either DECL_FIELD_OFFSET of the representative
+ and each bitfield member is a constant or they are equal.
+ This is because we need to be able to compute the bit-offset
+ of each field relative to the representative in get_bit_range
+ during RTL expansion.
+ If these constraints are not met, simply force a new
+ representative to be generated. That will at most
+ generate worse code but still maintain correctness with
+ respect to the C++ memory model. */
+ else if (!((host_integerp (DECL_FIELD_OFFSET (repr), 1)
+ && host_integerp (DECL_FIELD_OFFSET (field), 1))
+ || operand_equal_p (DECL_FIELD_OFFSET (repr),
+ DECL_FIELD_OFFSET (field), 0)))
+ {
+ finish_bitfield_representative (repr, prev);
+ repr = start_bitfield_representative (field);
+ }
+ }
+ else
+ continue;
+
+ if (repr)
+ DECL_BIT_FIELD_REPRESENTATIVE (field) = repr;
+
+ prev = field;
+ }
+
+ if (repr)
+ finish_bitfield_representative (repr, prev);
+}
+
/* Do all of the work required to layout the type indicated by RLI,
once the fields have been laid out. This function will call `free'
for RLI, unless FREE_P is false. Passing a value other than false
/* Perform any last tweaks to the TYPE_SIZE, etc. */
finalize_type_size (rli->t);
+ /* Compute bitfield representatives. */
+ finish_bitfield_layout (rli);
+
/* Propagate TYPE_PACKED to variants. With C++ templates,
handle_packed_attribute is too early to do this. */
for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
/* Lay out any static members. This is done now because their type
may use the record's type. */
- while (rli->pending_statics)
- {
- layout_decl (TREE_VALUE (rli->pending_statics), 0);
- rli->pending_statics = TREE_CHAIN (rli->pending_statics);
- }
+ while (!VEC_empty (tree, rli->pending_statics))
+ layout_decl (VEC_pop (tree, rli->pending_statics), 0);
/* Clean up. */
if (free_p)
- free (rli);
+ {
+ VEC_free (tree, gc, rli->pending_statics);
+ free (rli);
+ }
}
\f
for (tail = NULL_TREE; fields; tail = fields, fields = next)
{
DECL_FIELD_CONTEXT (fields) = type;
- next = TREE_CHAIN (fields);
- TREE_CHAIN (fields) = tail;
+ next = DECL_CHAIN (fields);
+ DECL_CHAIN (fields) = tail;
}
TYPE_FIELDS (type) = tail;
#if 0 /* not yet, should get fixed properly later */
TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
#else
- TYPE_NAME (type) = build_decl (TYPE_DECL, get_identifier (name), type);
+ TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
+ TYPE_DECL, get_identifier (name), type);
#endif
TYPE_STUB_DECL (type) = TYPE_NAME (type);
layout_decl (TYPE_NAME (type), 0);
/* Find an appropriate mode for the vector type. */
if (TYPE_MODE (type) == VOIDmode)
- {
- enum machine_mode innermode = TYPE_MODE (innertype);
- enum machine_mode mode;
-
- /* First, look for a supported vector type. */
- if (SCALAR_FLOAT_MODE_P (innermode))
- mode = MIN_MODE_VECTOR_FLOAT;
- else if (SCALAR_FRACT_MODE_P (innermode))
- mode = MIN_MODE_VECTOR_FRACT;
- else if (SCALAR_UFRACT_MODE_P (innermode))
- mode = MIN_MODE_VECTOR_UFRACT;
- else if (SCALAR_ACCUM_MODE_P (innermode))
- mode = MIN_MODE_VECTOR_ACCUM;
- else if (SCALAR_UACCUM_MODE_P (innermode))
- mode = MIN_MODE_VECTOR_UACCUM;
- else
- mode = MIN_MODE_VECTOR_INT;
-
- /* Do not check vector_mode_supported_p here. We'll do that
- later in vector_type_mode. */
- for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
- if (GET_MODE_NUNITS (mode) == nunits
- && GET_MODE_INNER (mode) == innermode)
- break;
-
- /* For integers, try mapping it to a same-sized scalar mode. */
- if (mode == VOIDmode
- && GET_MODE_CLASS (innermode) == MODE_INT)
- mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
- MODE_INT, 0);
-
- if (mode == VOIDmode ||
- (GET_MODE_CLASS (mode) == MODE_INT
- && !have_regs_of_mode[mode]))
- SET_TYPE_MODE (type, BLKmode);
- else
- SET_TYPE_MODE (type, mode);
- }
+ SET_TYPE_MODE (type,
+ mode_for_vector (TYPE_MODE (innertype), nunits));
TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
TYPE_SIZE_UNIT (innertype),
- size_int (nunits), 0);
+ size_int (nunits));
TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
- bitsize_int (nunits), 0);
-
- /* Always naturally align vectors. This prevents ABI changes
- depending on whether or not native vector modes are supported. */
- TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
+ bitsize_int (nunits));
+
+ /* For vector types, we do not default to the mode's alignment.
+ Instead, query a target hook, defaulting to natural alignment.
+ This prevents ABI changes depending on whether or not native
+ vector modes are supported. */
+ TYPE_ALIGN (type) = targetm.vector_alignment (type);
+
+ /* However, if the underlying mode requires a bigger alignment than
+ what the target hook provides, we cannot use the mode. For now,
+ simply reject that case. */
+ gcc_assert (TYPE_ALIGN (type)
+ >= GET_MODE_ALIGNMENT (TYPE_MODE (type)));
break;
}
/* A pointer might be MODE_PARTIAL_INT,
but ptrdiff_t must be integral. */
SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
+ TYPE_PRECISION (type) = POINTER_SIZE;
break;
case FUNCTION_TYPE:
case POINTER_TYPE:
case REFERENCE_TYPE:
{
- enum machine_mode mode = ((TREE_CODE (type) == REFERENCE_TYPE
- && reference_types_internal)
- ? Pmode : TYPE_MODE (type));
-
- int nbits = GET_MODE_BITSIZE (mode);
+ enum machine_mode mode = TYPE_MODE (type);
+ if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal)
+ {
+ addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
+ mode = targetm.addr_space.address_mode (as);
+ }
- TYPE_SIZE (type) = bitsize_int (nbits);
+ TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
TYPE_UNSIGNED (type) = 1;
- TYPE_PRECISION (type) = nbits;
+ TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode);
}
break;
{
tree ub = TYPE_MAX_VALUE (index);
tree lb = TYPE_MIN_VALUE (index);
+ tree element_size = TYPE_SIZE (element);
tree length;
- tree element_size;
-
- /* The initial subtraction should happen in the original type so
- that (possible) negative values are handled appropriately. */
- length = size_binop (PLUS_EXPR, size_one_node,
- fold_convert (sizetype,
- fold_build2 (MINUS_EXPR,
- TREE_TYPE (lb),
- ub, lb)));
-
- /* Special handling for arrays of bits (for Chill). */
- element_size = TYPE_SIZE (element);
- if (TYPE_PACKED (type) && INTEGRAL_TYPE_P (element)
- && (integer_zerop (TYPE_MAX_VALUE (element))
- || integer_onep (TYPE_MAX_VALUE (element)))
- && host_integerp (TYPE_MIN_VALUE (element), 1))
- {
- HOST_WIDE_INT maxvalue
- = tree_low_cst (TYPE_MAX_VALUE (element), 1);
- HOST_WIDE_INT minvalue
- = tree_low_cst (TYPE_MIN_VALUE (element), 1);
-
- if (maxvalue - minvalue == 1
- && (maxvalue == 1 || maxvalue == 0))
- element_size = integer_one_node;
- }
- /* If neither bound is a constant and sizetype is signed, make
- sure the size is never negative. We should really do this
- if *either* bound is non-constant, but this is the best
- compromise between C and Ada. */
- if (!TYPE_UNSIGNED (sizetype)
- && TREE_CODE (TYPE_MIN_VALUE (index)) != INTEGER_CST
- && TREE_CODE (TYPE_MAX_VALUE (index)) != INTEGER_CST)
- length = size_binop (MAX_EXPR, length, size_zero_node);
+ /* Make sure that an array of zero-sized element is zero-sized
+ regardless of its extent. */
+ if (integer_zerop (element_size))
+ length = size_zero_node;
+
+ /* The computation should happen in the original signedness so
+ that (possible) negative values are handled appropriately
+ when determining overflow. */
+ else
+ length
+ = fold_convert (sizetype,
+ size_binop (PLUS_EXPR,
+ build_int_cst (TREE_TYPE (lb), 1),
+ size_binop (MINUS_EXPR, ub, lb)));
TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
fold_convert (bitsizetype,
length));
- /* If we know the size of the element, calculate the total
- size directly, rather than do some division thing below.
- This optimization helps Fortran assumed-size arrays
- (where the size of the array is determined at runtime)
- substantially.
- Note that we can't do this in the case where the size of
- the elements is one bit since TYPE_SIZE_UNIT cannot be
- set correctly in that case. */
- if (TYPE_SIZE_UNIT (element) != 0 && ! integer_onep (element_size))
+ /* If we know the size of the element, calculate the total size
+ directly, rather than do some division thing below. This
+ optimization helps Fortran assumed-size arrays (where the
+ size of the array is determined at runtime) substantially. */
+ if (TYPE_SIZE_UNIT (element))
TYPE_SIZE_UNIT (type)
= size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
}
#else
TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
#endif
- if (!TYPE_SIZE (element))
- /* We don't know the size of the underlying element type, so
- our alignment calculations will be wrong, forcing us to
- fall back on structural equality. */
- SET_TYPE_STRUCTURAL_EQUALITY (type);
TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
SET_TYPE_MODE (type, BLKmode);
if (TYPE_SIZE (type) != 0
&& (TYPE_MODE (TREE_TYPE (type)) != BLKmode
|| TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
{
- /* One-element arrays get the component type's mode. */
- if (simple_cst_equal (TYPE_SIZE (type),
- TYPE_SIZE (TREE_TYPE (type))))
- SET_TYPE_MODE (type, TYPE_MODE (TREE_TYPE (type)));
- else
- SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type),
- MODE_INT, 1));
-
+ SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
+ TYPE_SIZE (type)));
if (TYPE_MODE (type) != BLKmode
&& STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
&& TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
/* Place all the fields. */
- for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
place_field (rli, field);
if (TREE_CODE (type) == QUAL_UNION_TYPE)
change the result of vector_mode_supported_p and have_regs_of_mode
on a per-function basis. Thus the TYPE_MODE of a VECTOR_TYPE can
change on a per-function basis. */
-/* ??? Possibly a better solution is to run through all the types
+/* ??? Possibly a better solution is to run through all the types
referenced by a function and re-compute the TYPE_MODE once, rather
than make the TYPE_MODE macro call a function. */
gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
- mode = t->type.mode;
+ mode = t->type_common.mode;
if (VECTOR_MODE_P (mode)
&& (!targetm.vector_mode_supported_p (mode)
|| !have_regs_of_mode[mode]))
{
- enum machine_mode innermode = TREE_TYPE (t)->type.mode;
+ enum machine_mode innermode = TREE_TYPE (t)->type_common.mode;
/* For integers, try mapping it to a same-sized scalar mode. */
if (GET_MODE_CLASS (innermode) == MODE_INT)
return type;
}
-/* Initialize sizetype and bitsizetype to a reasonable and temporary
- value to enable integer types to be created. */
-
-void
-initialize_sizetypes (bool signed_p)
-{
- tree t = make_node (INTEGER_TYPE);
- int precision = GET_MODE_BITSIZE (SImode);
-
- SET_TYPE_MODE (t, SImode);
- TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
- TYPE_USER_ALIGN (t) = 0;
- TYPE_IS_SIZETYPE (t) = 1;
- TYPE_UNSIGNED (t) = !signed_p;
- TYPE_SIZE (t) = build_int_cst (t, precision);
- TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
- TYPE_PRECISION (t) = precision;
-
- /* Set TYPE_MIN_VALUE and TYPE_MAX_VALUE. */
- set_min_and_max_values_for_integral_type (t, precision, !signed_p);
-
- sizetype = t;
- bitsizetype = build_distinct_type_copy (t);
-}
-
-/* Make sizetype a version of TYPE, and initialize *sizetype
- accordingly. We do this by overwriting the stub sizetype and
- bitsizetype nodes created by initialize_sizetypes. This makes sure
- that (a) anything stubby about them no longer exists, (b) any
- INTEGER_CSTs created with such a type, remain valid. */
+/* Initialize sizetypes so layout_type can use them. */
void
-set_sizetype (tree type)
+initialize_sizetypes (void)
{
- tree t;
- int oprecision = TYPE_PRECISION (type);
- /* The *bitsizetype types use a precision that avoids overflows when
- calculating signed sizes / offsets in bits. However, when
- cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
- precision. */
- int precision
- = MIN (oprecision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
- precision
- = GET_MODE_PRECISION (smallest_mode_for_size (precision, MODE_INT));
- if (precision > HOST_BITS_PER_WIDE_INT * 2)
- precision = HOST_BITS_PER_WIDE_INT * 2;
-
- gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
-
- t = build_distinct_type_copy (type);
- /* We do want to use sizetype's cache, as we will be replacing that
- type. */
- TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
- TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
- TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
- TYPE_UID (t) = TYPE_UID (sizetype);
- TYPE_IS_SIZETYPE (t) = 1;
-
- /* Replace our original stub sizetype. */
- memcpy (sizetype, t, tree_size (sizetype));
- TYPE_MAIN_VARIANT (sizetype) = sizetype;
- TYPE_CANONICAL (sizetype) = sizetype;
-
- t = make_node (INTEGER_TYPE);
- TYPE_NAME (t) = get_identifier ("bit_size_type");
- /* We do want to use bitsizetype's cache, as we will be replacing that
- type. */
- TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
- TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
- TYPE_PRECISION (t) = precision;
- TYPE_UID (t) = TYPE_UID (bitsizetype);
- TYPE_IS_SIZETYPE (t) = 1;
-
- /* Replace our original stub bitsizetype. */
- memcpy (bitsizetype, t, tree_size (bitsizetype));
- TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
- TYPE_CANONICAL (bitsizetype) = bitsizetype;
-
- if (TYPE_UNSIGNED (type))
- {
- fixup_unsigned_type (bitsizetype);
- ssizetype = build_distinct_type_copy (make_signed_type (oprecision));
- TYPE_IS_SIZETYPE (ssizetype) = 1;
- sbitsizetype = build_distinct_type_copy (make_signed_type (precision));
- TYPE_IS_SIZETYPE (sbitsizetype) = 1;
- }
+ int precision, bprecision;
+
+ /* Get sizetypes precision from the SIZE_TYPE target macro. */
+ if (strcmp (SIZE_TYPE, "unsigned int") == 0)
+ precision = INT_TYPE_SIZE;
+ else if (strcmp (SIZE_TYPE, "long unsigned int") == 0)
+ precision = LONG_TYPE_SIZE;
+ else if (strcmp (SIZE_TYPE, "long long unsigned int") == 0)
+ precision = LONG_LONG_TYPE_SIZE;
+ else if (strcmp (SIZE_TYPE, "short unsigned int") == 0)
+ precision = SHORT_TYPE_SIZE;
else
- {
- fixup_signed_type (bitsizetype);
- ssizetype = sizetype;
- sbitsizetype = bitsizetype;
- }
-
- /* If SIZETYPE is unsigned, we need to fix TYPE_MAX_VALUE so that
- it is sign extended in a way consistent with force_fit_type. */
- if (TYPE_UNSIGNED (type))
- {
- tree orig_max, new_max;
-
- orig_max = TYPE_MAX_VALUE (sizetype);
-
- /* Build a new node with the same values, but a different type.
- Sign extend it to ensure consistency. */
- new_max = build_int_cst_wide_type (sizetype,
- TREE_INT_CST_LOW (orig_max),
- TREE_INT_CST_HIGH (orig_max));
- TYPE_MAX_VALUE (sizetype) = new_max;
- }
+ gcc_unreachable ();
+
+ bprecision
+ = MIN (precision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
+ bprecision
+ = GET_MODE_PRECISION (smallest_mode_for_size (bprecision, MODE_INT));
+ if (bprecision > HOST_BITS_PER_WIDE_INT * 2)
+ bprecision = HOST_BITS_PER_WIDE_INT * 2;
+
+ /* Create stubs for sizetype and bitsizetype so we can create constants. */
+ sizetype = make_node (INTEGER_TYPE);
+ TYPE_NAME (sizetype) = get_identifier ("sizetype");
+ TYPE_PRECISION (sizetype) = precision;
+ TYPE_UNSIGNED (sizetype) = 1;
+ TYPE_IS_SIZETYPE (sizetype) = 1;
+ bitsizetype = make_node (INTEGER_TYPE);
+ TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
+ TYPE_PRECISION (bitsizetype) = bprecision;
+ TYPE_UNSIGNED (bitsizetype) = 1;
+ TYPE_IS_SIZETYPE (bitsizetype) = 1;
+
+ /* Now layout both types manually. */
+ SET_TYPE_MODE (sizetype, smallest_mode_for_size (precision, MODE_INT));
+ TYPE_ALIGN (sizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (sizetype));
+ TYPE_SIZE (sizetype) = bitsize_int (precision);
+ TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (sizetype)));
+ set_min_and_max_values_for_integral_type (sizetype, precision,
+ /*is_unsigned=*/true);
+ /* sizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
+ sign-extended in a way consistent with force_fit_type. */
+ TYPE_MAX_VALUE (sizetype)
+ = double_int_to_tree (sizetype,
+ tree_to_double_int (TYPE_MAX_VALUE (sizetype)));
+
+ SET_TYPE_MODE (bitsizetype, smallest_mode_for_size (bprecision, MODE_INT));
+ TYPE_ALIGN (bitsizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype));
+ TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
+ TYPE_SIZE_UNIT (bitsizetype)
+ = size_int (GET_MODE_SIZE (TYPE_MODE (bitsizetype)));
+ set_min_and_max_values_for_integral_type (bitsizetype, bprecision,
+ /*is_unsigned=*/true);
+ /* bitsizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
+ sign-extended in a way consistent with force_fit_type. */
+ TYPE_MAX_VALUE (bitsizetype)
+ = double_int_to_tree (bitsizetype,
+ tree_to_double_int (TYPE_MAX_VALUE (bitsizetype)));
+
+ /* Create the signed variants of *sizetype. */
+ ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
+ TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
+ TYPE_IS_SIZETYPE (ssizetype) = 1;
+ sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
+ TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
+ TYPE_IS_SIZETYPE (sbitsizetype) = 1;
}
\f
/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
/* Find the best machine mode to use when referencing a bit field of length
BITSIZE bits starting at BITPOS.
+ BITREGION_START is the bit position of the first bit in this
+ sequence of bit fields. BITREGION_END is the last bit in this
+ sequence. If these two fields are non-zero, we should restrict the
+ memory access to a maximum sized chunk of
+ BITREGION_END - BITREGION_START + 1. Otherwise, we are allowed to touch
+ any adjacent non bit-fields.
+
The underlying object is known to be aligned to a boundary of ALIGN bits.
If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
larger than LARGEST_MODE (usually SImode).
decide which of the above modes should be used. */
enum machine_mode
-get_best_mode (int bitsize, int bitpos, unsigned int align,
+get_best_mode (int bitsize, int bitpos,
+ unsigned HOST_WIDE_INT bitregion_start,
+ unsigned HOST_WIDE_INT bitregion_end,
+ unsigned int align,
enum machine_mode largest_mode, int volatilep)
{
enum machine_mode mode;
unsigned int unit = 0;
+ unsigned HOST_WIDE_INT maxbits;
+
+ /* If unset, no restriction. */
+ if (!bitregion_end)
+ maxbits = MAX_FIXED_MODE_SIZE;
+ else
+ maxbits = bitregion_end - bitregion_start + 1;
/* Find the narrowest integer mode that contains the bit field. */
for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
unit = GET_MODE_BITSIZE (mode);
- if ((bitpos % unit) + bitsize <= unit)
+ if (unit == GET_MODE_PRECISION (mode)
+ && (bitpos % unit) + bitsize <= unit)
break;
}
(Though at least one Unix compiler ignores this problem:
that on the Sequent 386 machine. */
|| MIN (unit, BIGGEST_ALIGNMENT) > align
- || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
+ || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode))
+ || unit > maxbits
+ || (bitregion_end
+ && bitpos - (bitpos % unit) + unit > bitregion_end + 1))
return VOIDmode;
if ((SLOW_BYTE_ACCESS && ! volatilep)
tmode = GET_MODE_WIDER_MODE (tmode))
{
unit = GET_MODE_BITSIZE (tmode);
- if (bitpos / unit == (bitpos + bitsize - 1) / unit
+ if (unit == GET_MODE_PRECISION (tmode)
+ && bitpos / unit == (bitpos + bitsize - 1) / unit
&& unit <= BITS_PER_WORD
&& unit <= MIN (align, BIGGEST_ALIGNMENT)
+ && unit <= maxbits
&& (largest_mode == VOIDmode
- || unit <= GET_MODE_BITSIZE (largest_mode)))
+ || unit <= GET_MODE_BITSIZE (largest_mode))
+ && (bitregion_end == 0
+ || bitpos - (bitpos % unit) + unit <= bitregion_end + 1))
wide_mode = tmode;
}