/* C-compiler utilities for types and variables storage layout
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
- Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ 2011 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "function.h"
#include "expr.h"
#include "output.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "ggc.h"
#include "target.h"
#include "langhooks.h"
#include "regs.h"
#include "params.h"
+#include "cgraph.h"
+#include "tree-inline.h"
+#include "tree-dump.h"
+#include "gimple.h"
/* Data type for the expressions representing sizes of data types.
It is the first integer type laid out. */
/* If nonzero, this is an upper limit on alignment of structure fields.
The value is measured in bits. */
unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
-/* ... and its original value in bytes, specified via -fpack-struct=<value>. */
-unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
-/* Nonzero if all REFERENCE_TYPEs are internal and hence should be
- allocated in Pmode, not ptr_mode. Set only by internal_reference_types
- called only by a front end. */
+/* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
+ in the address spaces' address_mode, not pointer_mode. Set only by
+ internal_reference_types called only by a front end. */
static int reference_types_internal = 0;
+static tree self_referential_size (tree);
static void finalize_record_size (record_layout_info);
static void finalize_type_size (tree);
static void place_union_field (record_layout_info, tree);
#endif
extern void debug_rli (record_layout_info);
\f
-/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
-
-static GTY(()) tree pending_sizes;
-
-/* Show that REFERENCE_TYPES are internal and should be Pmode. Called only
- by front end. */
+/* Show that REFERENCE_TYPES are internal and should use address_mode.
+ Called only by front end. */
void
internal_reference_types (void)
reference_types_internal = 1;
}
-/* Get a list of all the objects put on the pending sizes list. */
+/* Given a size SIZE that may not be a constant, return a SAVE_EXPR
+ to serve as the actual size-expression for a type or decl. */
tree
-get_pending_sizes (void)
+variable_size (tree size)
{
- tree chain = pending_sizes;
+ /* Obviously. */
+ if (TREE_CONSTANT (size))
+ return size;
+
+ /* If the size is self-referential, we can't make a SAVE_EXPR (see
+ save_expr for the rationale). But we can do something else. */
+ if (CONTAINS_PLACEHOLDER_P (size))
+ return self_referential_size (size);
+
+ /* If we are in the global binding level, we can't make a SAVE_EXPR
+ since it may end up being shared across functions, so it is up
+ to the front-end to deal with this case. */
+ if (lang_hooks.decls.global_bindings_p ())
+ return size;
- pending_sizes = 0;
- return chain;
+ return save_expr (size);
}
-/* Add EXPR to the pending sizes list. */
+/* An array of functions used for self-referential size computation. */
+static GTY(()) VEC (tree, gc) *size_functions;
-void
-put_pending_size (tree expr)
+/* Look inside EXPR into simple arithmetic operations involving constants.
+ Return the outermost non-arithmetic or non-constant node. */
+
+static tree
+skip_simple_constant_arithmetic (tree expr)
{
- /* Strip any simple arithmetic from EXPR to see if it has an underlying
- SAVE_EXPR. */
- expr = skip_simple_arithmetic (expr);
+ while (true)
+ {
+ if (UNARY_CLASS_P (expr))
+ expr = TREE_OPERAND (expr, 0);
+ else if (BINARY_CLASS_P (expr))
+ {
+ if (TREE_CONSTANT (TREE_OPERAND (expr, 1)))
+ expr = TREE_OPERAND (expr, 0);
+ else if (TREE_CONSTANT (TREE_OPERAND (expr, 0)))
+ expr = TREE_OPERAND (expr, 1);
+ else
+ break;
+ }
+ else
+ break;
+ }
- if (TREE_CODE (expr) == SAVE_EXPR)
- pending_sizes = tree_cons (NULL_TREE, expr, pending_sizes);
+ return expr;
}
-/* Put a chain of objects into the pending sizes list, which must be
- empty. */
+/* Similar to copy_tree_r but do not copy component references involving
+ PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
+ and substituted in substitute_in_expr. */
-void
-put_pending_sizes (tree chain)
+static tree
+copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
{
- gcc_assert (!pending_sizes);
- pending_sizes = chain;
+ enum tree_code code = TREE_CODE (*tp);
+
+ /* Stop at types, decls, constants like copy_tree_r. */
+ if (TREE_CODE_CLASS (code) == tcc_type
+ || TREE_CODE_CLASS (code) == tcc_declaration
+ || TREE_CODE_CLASS (code) == tcc_constant)
+ {
+ *walk_subtrees = 0;
+ return NULL_TREE;
+ }
+
+ /* This is the pattern built in ada/make_aligning_type. */
+ else if (code == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
+ {
+ *walk_subtrees = 0;
+ return NULL_TREE;
+ }
+
+ /* Default case: the component reference. */
+ else if (code == COMPONENT_REF)
+ {
+ tree inner;
+ for (inner = TREE_OPERAND (*tp, 0);
+ REFERENCE_CLASS_P (inner);
+ inner = TREE_OPERAND (inner, 0))
+ ;
+
+ if (TREE_CODE (inner) == PLACEHOLDER_EXPR)
+ {
+ *walk_subtrees = 0;
+ return NULL_TREE;
+ }
+ }
+
+ /* We're not supposed to have them in self-referential size trees
+ because we wouldn't properly control when they are evaluated.
+ However, not creating superfluous SAVE_EXPRs requires accurate
+ tracking of readonly-ness all the way down to here, which we
+ cannot always guarantee in practice. So punt in this case. */
+ else if (code == SAVE_EXPR)
+ return error_mark_node;
+
+ else if (code == STATEMENT_LIST)
+ gcc_unreachable ();
+
+ return copy_tree_r (tp, walk_subtrees, data);
}
-/* Given a size SIZE that may not be a constant, return a SAVE_EXPR
- to serve as the actual size-expression for a type or decl. */
+/* Given a SIZE expression that is self-referential, return an equivalent
+ expression to serve as the actual size expression for a type. */
-tree
-variable_size (tree size)
+static tree
+self_referential_size (tree size)
{
- tree save;
-
- /* If the language-processor is to take responsibility for variable-sized
- items (e.g., languages which have elaboration procedures like Ada),
- just return SIZE unchanged. Likewise for self-referential sizes and
- constant sizes. */
- if (TREE_CONSTANT (size)
- || lang_hooks.decls.global_bindings_p () < 0
- || CONTAINS_PLACEHOLDER_P (size))
+ static unsigned HOST_WIDE_INT fnno = 0;
+ VEC (tree, heap) *self_refs = NULL;
+ tree param_type_list = NULL, param_decl_list = NULL;
+ tree t, ref, return_type, fntype, fnname, fndecl;
+ unsigned int i;
+ char buf[128];
+ VEC(tree,gc) *args = NULL;
+
+ /* Do not factor out simple operations. */
+ t = skip_simple_constant_arithmetic (size);
+ if (TREE_CODE (t) == CALL_EXPR)
return size;
- size = save_expr (size);
-
- /* If an array with a variable number of elements is declared, and
- the elements require destruction, we will emit a cleanup for the
- array. That cleanup is run both on normal exit from the block
- and in the exception-handler for the block. Normally, when code
- is used in both ordinary code and in an exception handler it is
- `unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do
- not wish to do that here; the array-size is the same in both
- places. */
- save = skip_simple_arithmetic (size);
-
- if (cfun && cfun->x_dont_save_pending_sizes_p)
- /* The front-end doesn't want us to keep a list of the expressions
- that determine sizes for variable size objects. Trust it. */
+ /* Collect the list of self-references in the expression. */
+ find_placeholder_in_expr (size, &self_refs);
+ gcc_assert (VEC_length (tree, self_refs) > 0);
+
+ /* Obtain a private copy of the expression. */
+ t = size;
+ if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
return size;
+ size = t;
- if (lang_hooks.decls.global_bindings_p ())
+ /* Build the parameter and argument lists in parallel; also
+ substitute the former for the latter in the expression. */
+ args = VEC_alloc (tree, gc, VEC_length (tree, self_refs));
+ FOR_EACH_VEC_ELT (tree, self_refs, i, ref)
{
- if (TREE_CONSTANT (size))
- error ("type size can%'t be explicitly evaluated");
+ tree subst, param_name, param_type, param_decl;
+
+ if (DECL_P (ref))
+ {
+ /* We shouldn't have true variables here. */
+ gcc_assert (TREE_READONLY (ref));
+ subst = ref;
+ }
+ /* This is the pattern built in ada/make_aligning_type. */
+ else if (TREE_CODE (ref) == ADDR_EXPR)
+ subst = ref;
+ /* Default case: the component reference. */
+ else
+ subst = TREE_OPERAND (ref, 1);
+
+ sprintf (buf, "p%d", i);
+ param_name = get_identifier (buf);
+ param_type = TREE_TYPE (ref);
+ param_decl
+ = build_decl (input_location, PARM_DECL, param_name, param_type);
+ if (targetm.calls.promote_prototypes (NULL_TREE)
+ && INTEGRAL_TYPE_P (param_type)
+ && TYPE_PRECISION (param_type) < TYPE_PRECISION (integer_type_node))
+ DECL_ARG_TYPE (param_decl) = integer_type_node;
else
- error ("variable-size type declared outside of any function");
+ DECL_ARG_TYPE (param_decl) = param_type;
+ DECL_ARTIFICIAL (param_decl) = 1;
+ TREE_READONLY (param_decl) = 1;
+
+ size = substitute_in_expr (size, subst, param_decl);
- return size_one_node;
+ param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
+ param_decl_list = chainon (param_decl, param_decl_list);
+ VEC_quick_push (tree, args, ref);
}
- put_pending_size (save);
+ VEC_free (tree, heap, self_refs);
+
+ /* Append 'void' to indicate that the number of parameters is fixed. */
+ param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
+
+ /* The 3 lists have been created in reverse order. */
+ param_type_list = nreverse (param_type_list);
+ param_decl_list = nreverse (param_decl_list);
+
+ /* Build the function type. */
+ return_type = TREE_TYPE (size);
+ fntype = build_function_type (return_type, param_type_list);
+
+ /* Build the function declaration. */
+ sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
+ fnname = get_file_function_name (buf);
+ fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
+ for (t = param_decl_list; t; t = DECL_CHAIN (t))
+ DECL_CONTEXT (t) = fndecl;
+ DECL_ARGUMENTS (fndecl) = param_decl_list;
+ DECL_RESULT (fndecl)
+ = build_decl (input_location, RESULT_DECL, 0, return_type);
+ DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
+
+ /* The function has been created by the compiler and we don't
+ want to emit debug info for it. */
+ DECL_ARTIFICIAL (fndecl) = 1;
+ DECL_IGNORED_P (fndecl) = 1;
+
+ /* It is supposed to be "const" and never throw. */
+ TREE_READONLY (fndecl) = 1;
+ TREE_NOTHROW (fndecl) = 1;
+
+ /* We want it to be inlined when this is deemed profitable, as
+ well as discarded if every call has been integrated. */
+ DECL_DECLARED_INLINE_P (fndecl) = 1;
+
+ /* It is made up of a unique return statement. */
+ DECL_INITIAL (fndecl) = make_node (BLOCK);
+ BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
+ t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
+ DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
+ TREE_STATIC (fndecl) = 1;
+
+ /* Put it onto the list of size functions. */
+ VEC_safe_push (tree, gc, size_functions, fndecl);
+
+ /* Replace the original expression with a call to the size function. */
+ return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
+}
+
+/* Take, queue and compile all the size functions. It is essential that
+ the size functions be gimplified at the very end of the compilation
+ in order to guarantee transparent handling of self-referential sizes.
+ Otherwise the GENERIC inliner would not be able to inline them back
+ at each of their call sites, thus creating artificial non-constant
+ size expressions which would trigger nasty problems later on. */
+
+void
+finalize_size_functions (void)
+{
+ unsigned int i;
+ tree fndecl;
+
+ for (i = 0; VEC_iterate(tree, size_functions, i, fndecl); i++)
+ {
+ dump_function (TDI_original, fndecl);
+ gimplify_function_tree (fndecl);
+ dump_function (TDI_generic, fndecl);
+ cgraph_finalize_function (fndecl, false);
+ }
- return size;
+ VEC_free (tree, gc, size_functions);
}
\f
-#ifndef MAX_FIXED_MODE_SIZE
-#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
-#endif
-
/* Return the machine mode to use for a nonscalar of SIZE bits. The
- mode must be in class CLASS, and have exactly that many value bits;
+ mode must be in class MCLASS, and have exactly that many value bits;
it may have padding as well. If LIMIT is nonzero, modes of wider
than MAX_FIXED_MODE_SIZE will not be used. */
enum machine_mode
-mode_for_size (unsigned int size, enum mode_class class, int limit)
+mode_for_size (unsigned int size, enum mode_class mclass, int limit)
{
enum machine_mode mode;
return BLKmode;
/* Get the first mode which has this size, in the specified class. */
- for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
+ for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
if (GET_MODE_PRECISION (mode) == size)
return mode;
/* Similar, except passed a tree node. */
enum machine_mode
-mode_for_size_tree (tree size, enum mode_class class, int limit)
+mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
{
unsigned HOST_WIDE_INT uhwi;
unsigned int ui;
ui = uhwi;
if (uhwi != ui)
return BLKmode;
- return mode_for_size (ui, class, limit);
+ return mode_for_size (ui, mclass, limit);
}
/* Similar, but never return BLKmode; return the narrowest mode that
contains at least the requested number of value bits. */
enum machine_mode
-smallest_mode_for_size (unsigned int size, enum mode_class class)
+smallest_mode_for_size (unsigned int size, enum mode_class mclass)
{
enum machine_mode mode;
/* Get the first mode which has at least this size, in the
specified class. */
- for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
+ for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
if (GET_MODE_PRECISION (mode) >= size)
return mode;
case MODE_DECIMAL_FLOAT:
case MODE_VECTOR_INT:
case MODE_VECTOR_FLOAT:
+ case MODE_FRACT:
+ case MODE_ACCUM:
+ case MODE_UFRACT:
+ case MODE_UACCUM:
+ case MODE_VECTOR_FRACT:
+ case MODE_VECTOR_ACCUM:
+ case MODE_VECTOR_UFRACT:
+ case MODE_VECTOR_UACCUM:
mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
break;
return mode;
}
+/* Find a mode that is suitable for representing a vector with
+ NUNITS elements of mode INNERMODE. Returns BLKmode if there
+ is no suitable mode. */
+
+enum machine_mode
+mode_for_vector (enum machine_mode innermode, unsigned nunits)
+{
+ enum machine_mode mode;
+
+ /* First, look for a supported vector type. */
+ if (SCALAR_FLOAT_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_FLOAT;
+ else if (SCALAR_FRACT_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_FRACT;
+ else if (SCALAR_UFRACT_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_UFRACT;
+ else if (SCALAR_ACCUM_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_ACCUM;
+ else if (SCALAR_UACCUM_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_UACCUM;
+ else
+ mode = MIN_MODE_VECTOR_INT;
+
+ /* Do not check vector_mode_supported_p here. We'll do that
+ later in vector_type_mode. */
+ for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_NUNITS (mode) == nunits
+ && GET_MODE_INNER (mode) == innermode)
+ break;
+
+ /* For integers, try mapping it to a same-sized scalar mode. */
+ if (mode == VOIDmode
+ && GET_MODE_CLASS (innermode) == MODE_INT)
+ mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
+ MODE_INT, 0);
+
+ if (mode == VOIDmode
+ || (GET_MODE_CLASS (mode) == MODE_INT
+ && !have_regs_of_mode[mode]))
+ return BLKmode;
+
+ return mode;
+}
+
/* Return the alignment of MODE. This will be bounded by 1 and
BIGGEST_ALIGNMENT. */
return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
}
+/* Return the natural mode of an array, given that it is SIZE bytes in
+ total and has elements of type ELEM_TYPE. */
+
+static enum machine_mode
+mode_for_array (tree elem_type, tree size)
+{
+ tree elem_size;
+ unsigned HOST_WIDE_INT int_size, int_elem_size;
+ bool limit_p;
+
+ /* One-element arrays get the component type's mode. */
+ elem_size = TYPE_SIZE (elem_type);
+ if (simple_cst_equal (size, elem_size))
+ return TYPE_MODE (elem_type);
+
+ limit_p = true;
+ if (host_integerp (size, 1) && host_integerp (elem_size, 1))
+ {
+ int_size = tree_low_cst (size, 1);
+ int_elem_size = tree_low_cst (elem_size, 1);
+ if (int_elem_size > 0
+ && int_size % int_elem_size == 0
+ && targetm.array_mode_supported_p (TYPE_MODE (elem_type),
+ int_size / int_elem_size))
+ limit_p = false;
+ }
+ return mode_for_size_tree (size, MODE_INT, limit_p);
+}
\f
/* Subroutine of layout_decl: Force alignment required for the data type.
But if the decl itself wants greater alignment, don't override that. */
tree type = TREE_TYPE (decl);
enum tree_code code = TREE_CODE (decl);
rtx rtl = NULL_RTX;
+ location_t loc = DECL_SOURCE_LOCATION (decl);
if (code == CONST_DECL)
return;
}
else if (DECL_SIZE_UNIT (decl) == 0)
DECL_SIZE_UNIT (decl)
- = fold_convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl),
- bitsize_unit_node));
+ = fold_convert_loc (loc, sizetype,
+ size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
+ bitsize_unit_node));
if (code != FIELD_DECL)
/* For non-fields, update the alignment from the type. */
}
/* See if we can use an ordinary integer mode for a bit-field.
- Conditions are: a fixed size that is correct for another mode
- and occupying a complete byte or bytes on proper boundary. */
+ Conditions are: a fixed size that is correct for another mode,
+ occupying a complete byte or bytes on proper boundary,
+ and not -fstrict-volatile-bitfields. If the latter is set,
+ we unfortunately can't check TREE_THIS_VOLATILE, as a cast
+ may make a volatile object later. */
if (TYPE_SIZE (type) != 0
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
+ && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
+ && flag_strict_volatile_bitfields <= 0)
{
enum machine_mode xmode
= mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
+ unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
if (xmode != BLKmode
- && (known_align == 0
- || known_align >= GET_MODE_ALIGNMENT (xmode)))
+ && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
+ && (known_align == 0 || known_align >= xalign))
{
- DECL_ALIGN (decl) = MAX (GET_MODE_ALIGNMENT (xmode),
- DECL_ALIGN (decl));
+ DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl));
DECL_MODE (decl) = xmode;
DECL_BIT_FIELD (decl) = 0;
}
else
do_type_align (type, decl);
- /* If the field is of variable size, we can't misalign it since we
- have no way to make a temporary to align the result. But this
- isn't an issue if the decl is not addressable. Likewise if it
- is of unknown size.
-
- Note that do_type_align may set DECL_USER_ALIGN, so we need to
- check old_user_align instead. */
+ /* If the field is packed and not explicitly aligned, give it the
+ minimum alignment. Note that do_type_align may set
+ DECL_USER_ALIGN, so we need to check old_user_align instead. */
if (packed_p
- && !old_user_align
- && (DECL_NONADDRESSABLE_P (decl)
- || DECL_SIZE_UNIT (decl) == 0
- || TREE_CODE (DECL_SIZE_UNIT (decl)) == INTEGER_CST))
+ && !old_user_align)
DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
if (! packed_p && ! DECL_USER_ALIGN (decl))
int size_as_int = TREE_INT_CST_LOW (size);
if (compare_tree_int (size, size_as_int) == 0)
- warning (0, "size of %q+D is %d bytes", decl, size_as_int);
+ warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int);
else
- warning (0, "size of %q+D is larger than %wd bytes",
+ warning (OPT_Wlarger_than_, "size of %q+D is larger than %wd bytes",
decl, larger_than_size);
}
}
{
DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
DECL_MODE (decl) = VOIDmode;
- DECL_ALIGN (decl) = 0;
+ if (!DECL_USER_ALIGN (decl))
+ DECL_ALIGN (decl) = 0;
SET_DECL_RTL (decl, 0);
layout_decl (decl, 0);
}
\f
-/* Hook for a front-end function that can modify the record layout as needed
- immediately before it is finalized. */
-
-static void (*lang_adjust_rli) (record_layout_info) = 0;
-
-void
-set_lang_adjust_rli (void (*f) (record_layout_info))
-{
- lang_adjust_rli = f;
-}
-
/* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
is to be passed to all other layout functions for this record. It is the
record_layout_info
start_record_layout (tree t)
{
- record_layout_info rli = xmalloc (sizeof (struct record_layout_info_s));
+ record_layout_info rli = XNEW (struct record_layout_info_s);
rli->t = t;
#ifdef STRUCTURE_SIZE_BOUNDARY
/* Packed structures don't need to have minimum size. */
if (! TYPE_PACKED (t))
- rli->record_align = MAX (rli->record_align, (unsigned) STRUCTURE_SIZE_BOUNDARY);
+ {
+ unsigned tmp;
+
+ /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */
+ tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
+ if (maximum_field_alignment != 0)
+ tmp = MIN (tmp, maximum_field_alignment);
+ rli->record_align = MAX (rli->record_align, tmp);
+ }
#endif
rli->offset = size_zero_node;
rli->bitpos = bitsize_zero_node;
rli->prev_field = 0;
- rli->pending_statics = 0;
+ rli->pending_statics = NULL;
rli->packed_maybe_necessary = 0;
rli->remaining_in_alignment = 0;
/* Print debugging information about the information in RLI. */
-void
+DEBUG_FUNCTION void
debug_rli (record_layout_info rli)
{
print_node_brief (stderr, "type", rli->t, 0);
if (rli->packed_maybe_necessary)
fprintf (stderr, "packed may be necessary\n");
- if (rli->pending_statics)
+ if (!VEC_empty (tree, rli->pending_statics))
{
fprintf (stderr, "pending statics:\n");
- debug_tree (rli->pending_statics);
+ debug_vec_tree (rli->pending_statics);
}
}
applies if there was an immediately prior, nonzero-size
bitfield. (That's the way it is, experimentally.) */
if ((!is_bitfield && !DECL_PACKED (field))
- || (!integer_zerop (DECL_SIZE (field))
+ || ((DECL_SIZE (field) == NULL_TREE
+ || !integer_zerop (DECL_SIZE (field)))
? !DECL_PACKED (field)
: (rli->prev_field
&& DECL_BIT_FIELD_TYPE (rli->prev_field)
if (TREE_CODE (rli->t) == UNION_TYPE)
rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
- rli->offset = fold_build3 (COND_EXPR, sizetype,
- DECL_QUALIFIER (field),
+ rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
DECL_SIZE_UNIT (field), rli->offset);
}
it *after* the record is laid out. */
if (TREE_CODE (field) == VAR_DECL)
{
- rli->pending_statics = tree_cons (NULL_TREE, field,
- rli->pending_statics);
+ VEC_safe_push (tree, gc, rli->pending_statics, field);
return;
}
if (STRICT_ALIGNMENT)
warning (OPT_Wattributes, "packed attribute causes "
"inefficient alignment for %q+D", field);
- else
+ /* Don't warn if DECL_PACKED was set by the type. */
+ else if (!TYPE_PACKED (rli->t))
warning (OPT_Wattributes, "packed attribute is "
"unnecessary for %q+D", field);
}
}
/* Does this field automatically have alignment it needs by virtue
- of the fields that precede it and the record's own alignment?
- We already align ms_struct fields, so don't re-align them. */
- if (known_align < desired_align
- && !targetm.ms_bitfield_layout_p (rli->t))
+ of the fields that precede it and the record's own alignment? */
+ if (known_align < desired_align)
{
/* No, we need to skip space before this field.
Bump the cumulative size to multiple of field alignment. */
- warning (OPT_Wpadded, "padding struct to align %q+D", field);
+ if (!targetm.ms_bitfield_layout_p (rli->t)
+ && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
+ warning (OPT_Wpadded, "padding struct to align %q+D", field);
/* If the alignment is still within offset_align, just align
the bit position. */
if (! TREE_CONSTANT (rli->offset))
rli->offset_align = desired_align;
-
+ if (targetm.ms_bitfield_layout_p (rli->t))
+ rli->prev_field = NULL;
}
/* Handle compatibility with PCC. Note that if the record has any
&& TREE_CODE (field) == FIELD_DECL
&& type != error_mark_node
&& DECL_BIT_FIELD (field)
- && ! DECL_PACKED (field)
+ && (! DECL_PACKED (field)
+ /* Enter for these packed fields only to issue a warning. */
+ || TYPE_ALIGN (type) <= BITS_PER_UNIT)
&& maximum_field_alignment == 0
&& ! integer_zerop (DECL_SIZE (field))
&& host_integerp (DECL_SIZE (field), 1)
/* A bit field may not span more units of alignment of its type
than its type itself. Advance to next boundary if necessary. */
if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
- rli->bitpos = round_up (rli->bitpos, type_align);
+ {
+ if (DECL_PACKED (field))
+ {
+ if (warn_packed_bitfield_compat == 1)
+ inform
+ (input_location,
+ "offset of packed bit-field %qD has changed in GCC 4.4",
+ field);
+ }
+ else
+ rli->bitpos = round_up (rli->bitpos, type_align);
+ }
- TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
+ if (! DECL_PACKED (field))
+ TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
}
#endif
until we see a bitfield (and come by here again) we just skip
calculating it. */
if (DECL_SIZE (field) != NULL
- && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 0)
- && host_integerp (DECL_SIZE (field), 0))
+ && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1)
+ && host_integerp (DECL_SIZE (field), 1))
{
- HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
- HOST_WIDE_INT typesize
+ unsigned HOST_WIDE_INT bitsize
+ = tree_low_cst (DECL_SIZE (field), 1);
+ unsigned HOST_WIDE_INT typesize
= tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
if (typesize < bitsize)
if (DECL_SIZE (field) == 0)
/* Do nothing. */;
else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
- || TREE_CONSTANT_OVERFLOW (DECL_SIZE (field)))
+ || TREE_OVERFLOW (DECL_SIZE (field)))
{
rli->offset
= size_binop (PLUS_EXPR, rli->offset,
/* If we ended a bitfield before the full length of the type then
pad the struct out to the full length of the last type. */
- if ((TREE_CHAIN (field) == NULL
- || TREE_CODE (TREE_CHAIN (field)) != FIELD_DECL)
+ if ((DECL_CHAIN (field) == NULL
+ || TREE_CODE (DECL_CHAIN (field)) != FIELD_DECL)
&& DECL_BIT_FIELD_TYPE (field)
&& !integer_zerop (DECL_SIZE (field)))
rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
= round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
if (TREE_CONSTANT (unpadded_size)
- && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0)
+ && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
+ && input_location != BUILTINS_LOCATION)
warning (OPT_Wpadded, "padding struct size to alignment boundary");
if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
{
- TYPE_PACKED (rli->t) = 0;
-
if (TYPE_NAME (rli->t))
{
- const char *name;
+ tree name;
if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
- name = IDENTIFIER_POINTER (TYPE_NAME (rli->t));
+ name = TYPE_NAME (rli->t);
else
- name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (rli->t)));
+ name = DECL_NAME (TYPE_NAME (rli->t));
if (STRICT_ALIGNMENT)
warning (OPT_Wpacked, "packed attribute causes inefficient "
- "alignment for %qs", name);
+ "alignment for %qE", name);
else
warning (OPT_Wpacked,
- "packed attribute is unnecessary for %qs", name);
+ "packed attribute is unnecessary for %qE", name);
}
else
{
However, if possible, we use a mode that fits in a register
instead, in order to allow for better optimization down the
line. */
- TYPE_MODE (type) = BLKmode;
+ SET_TYPE_MODE (type, BLKmode);
if (! host_integerp (TYPE_SIZE (type), 1))
return;
/* A record which has any BLKmode members must itself be
BLKmode; it can't go in a register. Unless the member is
BLKmode only because it isn't aligned. */
- for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
{
if (TREE_CODE (field) != FIELD_DECL)
continue;
if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
&& host_integerp (TYPE_SIZE (type), 1)
&& GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
- TYPE_MODE (type) = mode;
+ SET_TYPE_MODE (type, mode);
else
- TYPE_MODE (type) = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
+ SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1));
/* If structure's known alignment is less than what the scalar
mode would need, and it matters, then stick with BLKmode. */
/* If this is the only reason this type is BLKmode, then
don't force containing types to be BLKmode. */
TYPE_NO_FORCE_BLK (type) = 1;
- TYPE_MODE (type) = BLKmode;
+ SET_TYPE_MODE (type, BLKmode);
}
}
if (TYPE_SIZE (type) != 0)
{
TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
- TYPE_SIZE_UNIT (type) = round_up (TYPE_SIZE_UNIT (type),
- TYPE_ALIGN_UNIT (type));
+ TYPE_SIZE_UNIT (type)
+ = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
}
/* Evaluate nonconstant sizes only once, either now or as soon as safe. */
TYPE_SIZE_UNIT (variant) = size_unit;
TYPE_ALIGN (variant) = align;
TYPE_USER_ALIGN (variant) = user_align;
- TYPE_MODE (variant) = mode;
+ SET_TYPE_MODE (variant, mode);
+ }
+ }
+}
+
+/* Return a new underlying object for a bitfield started with FIELD. */
+
+static tree
+start_bitfield_representative (tree field)
+{
+ tree repr = make_node (FIELD_DECL);
+ DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field);
+ /* Force the representative to begin at a BITS_PER_UNIT aligned
+ boundary - C++ may use tail-padding of a base object to
+ continue packing bits so the bitfield region does not start
+ at bit zero (see g++.dg/abi/bitfield5.C for example).
+ Unallocated bits may happen for other reasons as well,
+ for example Ada which allows explicit bit-granular structure layout. */
+ DECL_FIELD_BIT_OFFSET (repr)
+ = size_binop (BIT_AND_EXPR,
+ DECL_FIELD_BIT_OFFSET (field),
+ bitsize_int (~(BITS_PER_UNIT - 1)));
+ SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field));
+ DECL_SIZE (repr) = DECL_SIZE (field);
+ DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field);
+ DECL_PACKED (repr) = DECL_PACKED (field);
+ DECL_CONTEXT (repr) = DECL_CONTEXT (field);
+ return repr;
+}
+
+/* Finish up a bitfield group that was started by creating the underlying
+ object REPR with the last field in the bitfield group FIELD. */
+
+static void
+finish_bitfield_representative (tree repr, tree field)
+{
+ unsigned HOST_WIDE_INT bitsize, maxbitsize;
+ enum machine_mode mode;
+ tree nextf, size;
+
+ size = size_diffop (DECL_FIELD_OFFSET (field),
+ DECL_FIELD_OFFSET (repr));
+ gcc_assert (host_integerp (size, 1));
+ bitsize = (tree_low_cst (size, 1) * BITS_PER_UNIT
+ + tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
+ - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1)
+ + tree_low_cst (DECL_SIZE (field), 1));
+
+ /* Round up bitsize to multiples of BITS_PER_UNIT. */
+ bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
+
+ /* Now nothing tells us how to pad out bitsize ... */
+ nextf = DECL_CHAIN (field);
+ while (nextf && TREE_CODE (nextf) != FIELD_DECL)
+ nextf = DECL_CHAIN (nextf);
+ if (nextf)
+ {
+ tree maxsize;
+ /* If there was an error, the field may be not laid out
+ correctly. Don't bother to do anything. */
+ if (TREE_TYPE (nextf) == error_mark_node)
+ return;
+ maxsize = size_diffop (DECL_FIELD_OFFSET (nextf),
+ DECL_FIELD_OFFSET (repr));
+ if (host_integerp (maxsize, 1))
+ {
+ maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT
+ + tree_low_cst (DECL_FIELD_BIT_OFFSET (nextf), 1)
+ - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1));
+ /* If the group ends within a bitfield nextf does not need to be
+ aligned to BITS_PER_UNIT. Thus round up. */
+ maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
+ }
+ else
+ maxbitsize = bitsize;
+ }
+ else
+ {
+ /* ??? If you consider that tail-padding of this struct might be
+ re-used when deriving from it we cannot really do the following
+ and thus need to set maxsize to bitsize? Also we cannot
+ generally rely on maxsize to fold to an integer constant, so
+ use bitsize as fallback for this case. */
+ tree maxsize = size_diffop (TYPE_SIZE_UNIT (DECL_CONTEXT (field)),
+ DECL_FIELD_OFFSET (repr));
+ if (host_integerp (maxsize, 1))
+ maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT
+ - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1));
+ else
+ maxbitsize = bitsize;
+ }
+
+ /* Only if we don't artificially break up the representative in
+ the middle of a large bitfield with different possibly
+ overlapping representatives. And all representatives start
+ at byte offset. */
+ gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
+
+ /* Find the smallest nice mode to use. */
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_BITSIZE (mode) >= bitsize)
+ break;
+ if (mode != VOIDmode
+ && (GET_MODE_BITSIZE (mode) > maxbitsize
+ || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE))
+ mode = VOIDmode;
+
+ if (mode == VOIDmode)
+ {
+ /* We really want a BLKmode representative only as a last resort,
+ considering the member b in
+ struct { int a : 7; int b : 17; int c; } __attribute__((packed));
+ Otherwise we simply want to split the representative up
+ allowing for overlaps within the bitfield region as required for
+ struct { int a : 7; int b : 7;
+ int c : 10; int d; } __attribute__((packed));
+ [0, 15] HImode for a and b, [8, 23] HImode for c. */
+ DECL_SIZE (repr) = bitsize_int (bitsize);
+ DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT);
+ DECL_MODE (repr) = BLKmode;
+ TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node,
+ bitsize / BITS_PER_UNIT);
+ }
+ else
+ {
+ unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode);
+ DECL_SIZE (repr) = bitsize_int (modesize);
+ DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT);
+ DECL_MODE (repr) = mode;
+ TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1);
+ }
+
+ /* Remember whether the bitfield group is at the end of the
+ structure or not. */
+ DECL_CHAIN (repr) = nextf;
+}
+
+/* Compute and set FIELD_DECLs for the underlying objects we should
+ use for bitfield access for the structure laid out with RLI. */
+
+static void
+finish_bitfield_layout (record_layout_info rli)
+{
+ tree field, prev;
+ tree repr = NULL_TREE;
+
+ /* Unions would be special, for the ease of type-punning optimizations
+ we could use the underlying type as hint for the representative
+ if the bitfield would fit and the representative would not exceed
+ the union in size. */
+ if (TREE_CODE (rli->t) != RECORD_TYPE)
+ return;
+
+ for (prev = NULL_TREE, field = TYPE_FIELDS (rli->t);
+ field; field = DECL_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ /* In the C++ memory model, consecutive bit fields in a structure are
+ considered one memory location and updating a memory location
+ may not store into adjacent memory locations. */
+ if (!repr
+ && DECL_BIT_FIELD_TYPE (field))
+ {
+ /* Start new representative. */
+ repr = start_bitfield_representative (field);
+ }
+ else if (repr
+ && ! DECL_BIT_FIELD_TYPE (field))
+ {
+ /* Finish off new representative. */
+ finish_bitfield_representative (repr, prev);
+ repr = NULL_TREE;
}
+ else if (DECL_BIT_FIELD_TYPE (field))
+ {
+ gcc_assert (repr != NULL_TREE);
+
+ /* Zero-size bitfields finish off a representative and
+ do not have a representative themselves. This is
+ required by the C++ memory model. */
+ if (integer_zerop (DECL_SIZE (field)))
+ {
+ finish_bitfield_representative (repr, prev);
+ repr = NULL_TREE;
+ }
+
+ /* We assume that either DECL_FIELD_OFFSET of the representative
+ and each bitfield member is a constant or they are equal.
+ This is because we need to be able to compute the bit-offset
+ of each field relative to the representative in get_bit_range
+ during RTL expansion.
+ If these constraints are not met, simply force a new
+ representative to be generated. That will at most
+ generate worse code but still maintain correctness with
+ respect to the C++ memory model. */
+ else if (!((host_integerp (DECL_FIELD_OFFSET (repr), 1)
+ && host_integerp (DECL_FIELD_OFFSET (field), 1))
+ || operand_equal_p (DECL_FIELD_OFFSET (repr),
+ DECL_FIELD_OFFSET (field), 0)))
+ {
+ finish_bitfield_representative (repr, prev);
+ repr = start_bitfield_representative (field);
+ }
+ }
+ else
+ continue;
+
+ if (repr)
+ DECL_BIT_FIELD_REPRESENTATIVE (field) = repr;
+
+ prev = field;
}
+
+ if (repr)
+ finish_bitfield_representative (repr, prev);
}
/* Do all of the work required to layout the type indicated by RLI,
/* Perform any last tweaks to the TYPE_SIZE, etc. */
finalize_type_size (rli->t);
+ /* Compute bitfield representatives. */
+ finish_bitfield_layout (rli);
+
/* Propagate TYPE_PACKED to variants. With C++ templates,
handle_packed_attribute is too early to do this. */
for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
/* Lay out any static members. This is done now because their type
may use the record's type. */
- while (rli->pending_statics)
- {
- layout_decl (TREE_VALUE (rli->pending_statics), 0);
- rli->pending_statics = TREE_CHAIN (rli->pending_statics);
- }
+ while (!VEC_empty (tree, rli->pending_statics))
+ layout_decl (VEC_pop (tree, rli->pending_statics), 0);
/* Clean up. */
if (free_p)
- free (rli);
+ {
+ VEC_free (tree, gc, rli->pending_statics);
+ free (rli);
+ }
}
\f
for (tail = NULL_TREE; fields; tail = fields, fields = next)
{
DECL_FIELD_CONTEXT (fields) = type;
- next = TREE_CHAIN (fields);
- TREE_CHAIN (fields) = tail;
+ next = DECL_CHAIN (fields);
+ DECL_CHAIN (fields) = tail;
}
TYPE_FIELDS (type) = tail;
#if 0 /* not yet, should get fixed properly later */
TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
#else
- TYPE_NAME (type) = build_decl (TYPE_DECL, get_identifier (name), type);
+ TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
+ TYPE_DECL, get_identifier (name), type);
#endif
TYPE_STUB_DECL (type) = TYPE_NAME (type);
layout_decl (TYPE_NAME (type), 0);
&& tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
TYPE_UNSIGNED (type) = 1;
- TYPE_MODE (type) = smallest_mode_for_size (TYPE_PRECISION (type),
- MODE_INT);
+ SET_TYPE_MODE (type,
+ smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT));
TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
break;
case REAL_TYPE:
- TYPE_MODE (type) = mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0);
+ SET_TYPE_MODE (type,
+ mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0));
TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
break;
+ case FIXED_POINT_TYPE:
+ /* TYPE_MODE (type) has been set already. */
+ TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
+ TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
+ break;
+
case COMPLEX_TYPE:
TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
- TYPE_MODE (type)
- = mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
- (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
- ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
- 0);
+ SET_TYPE_MODE (type,
+ mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
+ (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
+ ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
+ 0));
TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
break;
case VECTOR_TYPE:
{
int nunits = TYPE_VECTOR_SUBPARTS (type);
- tree nunits_tree = build_int_cst (NULL_TREE, nunits);
tree innertype = TREE_TYPE (type);
gcc_assert (!(nunits & (nunits - 1)));
/* Find an appropriate mode for the vector type. */
if (TYPE_MODE (type) == VOIDmode)
- {
- enum machine_mode innermode = TYPE_MODE (innertype);
- enum machine_mode mode;
-
- /* First, look for a supported vector type. */
- if (SCALAR_FLOAT_MODE_P (innermode))
- mode = MIN_MODE_VECTOR_FLOAT;
- else
- mode = MIN_MODE_VECTOR_INT;
-
- for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
- if (GET_MODE_NUNITS (mode) == nunits
- && GET_MODE_INNER (mode) == innermode
- && targetm.vector_mode_supported_p (mode))
- break;
-
- /* For integers, try mapping it to a same-sized scalar mode. */
- if (mode == VOIDmode
- && GET_MODE_CLASS (innermode) == MODE_INT)
- mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
- MODE_INT, 0);
-
- if (mode == VOIDmode || !have_regs_of_mode[mode])
- TYPE_MODE (type) = BLKmode;
- else
- TYPE_MODE (type) = mode;
- }
+ SET_TYPE_MODE (type,
+ mode_for_vector (TYPE_MODE (innertype), nunits));
+ TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
TYPE_SIZE_UNIT (innertype),
- nunits_tree, 0);
+ size_int (nunits));
TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
- nunits_tree, 0);
-
- /* Always naturally align vectors. This prevents ABI changes
- depending on whether or not native vector modes are supported. */
- TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
+ bitsize_int (nunits));
+
+ /* For vector types, we do not default to the mode's alignment.
+ Instead, query a target hook, defaulting to natural alignment.
+ This prevents ABI changes depending on whether or not native
+ vector modes are supported. */
+ TYPE_ALIGN (type) = targetm.vector_alignment (type);
+
+ /* However, if the underlying mode requires a bigger alignment than
+ what the target hook provides, we cannot use the mode. For now,
+ simply reject that case. */
+ gcc_assert (TYPE_ALIGN (type)
+ >= GET_MODE_ALIGNMENT (TYPE_MODE (type)));
break;
}
/* This is an incomplete type and so doesn't have a size. */
TYPE_ALIGN (type) = 1;
TYPE_USER_ALIGN (type) = 0;
- TYPE_MODE (type) = VOIDmode;
+ SET_TYPE_MODE (type, VOIDmode);
break;
case OFFSET_TYPE:
TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
/* A pointer might be MODE_PARTIAL_INT,
but ptrdiff_t must be integral. */
- TYPE_MODE (type) = mode_for_size (POINTER_SIZE, MODE_INT, 0);
+ SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
+ TYPE_PRECISION (type) = POINTER_SIZE;
break;
case FUNCTION_TYPE:
/* It's hard to see what the mode and size of a function ought to
be, but we do know the alignment is FUNCTION_BOUNDARY, so
make it consistent with that. */
- TYPE_MODE (type) = mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0);
+ SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0));
TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
{
+ enum machine_mode mode = TYPE_MODE (type);
+ if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal)
+ {
+ addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
+ mode = targetm.addr_space.address_mode (as);
+ }
- enum machine_mode mode = ((TREE_CODE (type) == REFERENCE_TYPE
- && reference_types_internal)
- ? Pmode : TYPE_MODE (type));
-
- int nbits = GET_MODE_BITSIZE (mode);
-
- TYPE_SIZE (type) = bitsize_int (nbits);
+ TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
TYPE_UNSIGNED (type) = 1;
- TYPE_PRECISION (type) = nbits;
+ TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode);
}
break;
{
tree ub = TYPE_MAX_VALUE (index);
tree lb = TYPE_MIN_VALUE (index);
+ tree element_size = TYPE_SIZE (element);
tree length;
- tree element_size;
-
- /* The initial subtraction should happen in the original type so
- that (possible) negative values are handled appropriately. */
- length = size_binop (PLUS_EXPR, size_one_node,
- fold_convert (sizetype,
- fold_build2 (MINUS_EXPR,
- TREE_TYPE (lb),
- ub, lb)));
-
- /* Special handling for arrays of bits (for Chill). */
- element_size = TYPE_SIZE (element);
- if (TYPE_PACKED (type) && INTEGRAL_TYPE_P (element)
- && (integer_zerop (TYPE_MAX_VALUE (element))
- || integer_onep (TYPE_MAX_VALUE (element)))
- && host_integerp (TYPE_MIN_VALUE (element), 1))
- {
- HOST_WIDE_INT maxvalue
- = tree_low_cst (TYPE_MAX_VALUE (element), 1);
- HOST_WIDE_INT minvalue
- = tree_low_cst (TYPE_MIN_VALUE (element), 1);
-
- if (maxvalue - minvalue == 1
- && (maxvalue == 1 || maxvalue == 0))
- element_size = integer_one_node;
- }
- /* If neither bound is a constant and sizetype is signed, make
- sure the size is never negative. We should really do this
- if *either* bound is non-constant, but this is the best
- compromise between C and Ada. */
- if (!TYPE_UNSIGNED (sizetype)
- && TREE_CODE (TYPE_MIN_VALUE (index)) != INTEGER_CST
- && TREE_CODE (TYPE_MAX_VALUE (index)) != INTEGER_CST)
- length = size_binop (MAX_EXPR, length, size_zero_node);
+ /* Make sure that an array of zero-sized element is zero-sized
+ regardless of its extent. */
+ if (integer_zerop (element_size))
+ length = size_zero_node;
+
+ /* The computation should happen in the original signedness so
+ that (possible) negative values are handled appropriately
+ when determining overflow. */
+ else
+ length
+ = fold_convert (sizetype,
+ size_binop (PLUS_EXPR,
+ build_int_cst (TREE_TYPE (lb), 1),
+ size_binop (MINUS_EXPR, ub, lb)));
TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
fold_convert (bitsizetype,
length));
- /* If we know the size of the element, calculate the total
- size directly, rather than do some division thing below.
- This optimization helps Fortran assumed-size arrays
- (where the size of the array is determined at runtime)
- substantially.
- Note that we can't do this in the case where the size of
- the elements is one bit since TYPE_SIZE_UNIT cannot be
- set correctly in that case. */
- if (TYPE_SIZE_UNIT (element) != 0 && ! integer_onep (element_size))
+ /* If we know the size of the element, calculate the total size
+ directly, rather than do some division thing below. This
+ optimization helps Fortran assumed-size arrays (where the
+ size of the array is determined at runtime) substantially. */
+ if (TYPE_SIZE_UNIT (element))
TYPE_SIZE_UNIT (type)
= size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
}
TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
#endif
TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
- TYPE_MODE (type) = BLKmode;
+ SET_TYPE_MODE (type, BLKmode);
if (TYPE_SIZE (type) != 0
#ifdef MEMBER_TYPE_FORCES_BLK
&& ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode)
&& (TYPE_MODE (TREE_TYPE (type)) != BLKmode
|| TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
{
- /* One-element arrays get the component type's mode. */
- if (simple_cst_equal (TYPE_SIZE (type),
- TYPE_SIZE (TREE_TYPE (type))))
- TYPE_MODE (type) = TYPE_MODE (TREE_TYPE (type));
- else
- TYPE_MODE (type)
- = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
-
+ SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
+ TYPE_SIZE (type)));
if (TYPE_MODE (type) != BLKmode
&& STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
- && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type))
- && TYPE_MODE (type) != BLKmode)
+ && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
{
TYPE_NO_FORCE_BLK (type) = 1;
- TYPE_MODE (type) = BLKmode;
+ SET_TYPE_MODE (type, BLKmode);
}
}
/* When the element size is constant, check that it is at least as
&& TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
/* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
TYPE_ALIGN_UNIT. */
- && !TREE_CONSTANT_OVERFLOW (TYPE_SIZE_UNIT (element))
+ && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
&& !integer_zerop (TYPE_SIZE_UNIT (element))
&& compare_tree_int (TYPE_SIZE_UNIT (element),
TYPE_ALIGN_UNIT (element)) < 0)
TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
/* Place all the fields. */
- for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
place_field (rli, field);
if (TREE_CODE (type) == QUAL_UNION_TYPE)
TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
- if (lang_adjust_rli)
- (*lang_adjust_rli) (rli);
-
/* Finish laying out the record. */
finish_record_layout (rli, /*free_p=*/true);
}
&& TREE_CODE (type) != QUAL_UNION_TYPE)
finalize_type_size (type);
- /* If an alias set has been set for this aggregate when it was incomplete,
- force it into alias set 0.
- This is too conservative, but we cannot call record_component_aliases
- here because some frontends still change the aggregates after
- layout_type. */
- if (AGGREGATE_TYPE_P (type) && TYPE_ALIAS_SET_KNOWN_P (type))
- TYPE_ALIAS_SET (type) = 0;
+ /* We should never see alias sets on incomplete aggregates. And we
+ should not call layout_type on not incomplete aggregates. */
+ if (AGGREGATE_TYPE_P (type))
+ gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
+}
+
+/* Vector types need to re-check the target flags each time we report
+ the machine mode. We need to do this because attribute target can
+ change the result of vector_mode_supported_p and have_regs_of_mode
+ on a per-function basis. Thus the TYPE_MODE of a VECTOR_TYPE can
+ change on a per-function basis. */
+/* ??? Possibly a better solution is to run through all the types
+ referenced by a function and re-compute the TYPE_MODE once, rather
+ than make the TYPE_MODE macro call a function. */
+
+enum machine_mode
+vector_type_mode (const_tree t)
+{
+ enum machine_mode mode;
+
+ gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
+
+ mode = t->type_common.mode;
+ if (VECTOR_MODE_P (mode)
+ && (!targetm.vector_mode_supported_p (mode)
+ || !have_regs_of_mode[mode]))
+ {
+ enum machine_mode innermode = TREE_TYPE (t)->type_common.mode;
+
+ /* For integers, try mapping it to a same-sized scalar mode. */
+ if (GET_MODE_CLASS (innermode) == MODE_INT)
+ {
+ mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
+ * GET_MODE_BITSIZE (innermode), MODE_INT, 0);
+
+ if (mode != VOIDmode && have_regs_of_mode[mode])
+ return mode;
+ }
+
+ return BLKmode;
+ }
+
+ return mode;
}
\f
/* Create and return a type for signed integers of PRECISION bits. */
return type;
}
\f
-/* Initialize sizetype and bitsizetype to a reasonable and temporary
- value to enable integer types to be created. */
+/* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
+ and SATP. */
-void
-initialize_sizetypes (bool signed_p)
+tree
+make_fract_type (int precision, int unsignedp, int satp)
{
- tree t = make_node (INTEGER_TYPE);
- int precision = GET_MODE_BITSIZE (SImode);
-
- TYPE_MODE (t) = SImode;
- TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
- TYPE_USER_ALIGN (t) = 0;
- TYPE_IS_SIZETYPE (t) = 1;
- TYPE_UNSIGNED (t) = !signed_p;
- TYPE_SIZE (t) = build_int_cst (t, precision);
- TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
- TYPE_PRECISION (t) = precision;
-
- /* Set TYPE_MIN_VALUE and TYPE_MAX_VALUE. */
- set_min_and_max_values_for_integral_type (t, precision, !signed_p);
-
- sizetype = t;
- bitsizetype = build_distinct_type_copy (t);
-}
+ tree type = make_node (FIXED_POINT_TYPE);
-/* Make sizetype a version of TYPE, and initialize *sizetype
- accordingly. We do this by overwriting the stub sizetype and
- bitsizetype nodes created by initialize_sizetypes. This makes sure
- that (a) anything stubby about them no longer exists, (b) any
- INTEGER_CSTs created with such a type, remain valid. */
+ TYPE_PRECISION (type) = precision;
-void
-set_sizetype (tree type)
-{
- int oprecision = TYPE_PRECISION (type);
- /* The *bitsizetype types use a precision that avoids overflows when
- calculating signed sizes / offsets in bits. However, when
- cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
- precision. */
- int precision = MIN (oprecision + BITS_PER_UNIT_LOG + 1,
- 2 * HOST_BITS_PER_WIDE_INT);
- tree t;
-
- gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
-
- t = build_distinct_type_copy (type);
- /* We do want to use sizetype's cache, as we will be replacing that
- type. */
- TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
- TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
- TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
- TYPE_UID (t) = TYPE_UID (sizetype);
- TYPE_IS_SIZETYPE (t) = 1;
-
- /* Replace our original stub sizetype. */
- memcpy (sizetype, t, tree_size (sizetype));
- TYPE_MAIN_VARIANT (sizetype) = sizetype;
-
- t = make_node (INTEGER_TYPE);
- TYPE_NAME (t) = get_identifier ("bit_size_type");
- /* We do want to use bitsizetype's cache, as we will be replacing that
- type. */
- TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
- TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
- TYPE_PRECISION (t) = precision;
- TYPE_UID (t) = TYPE_UID (bitsizetype);
- TYPE_IS_SIZETYPE (t) = 1;
-
- /* Replace our original stub bitsizetype. */
- memcpy (bitsizetype, t, tree_size (bitsizetype));
- TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
-
- if (TYPE_UNSIGNED (type))
+ if (satp)
+ TYPE_SATURATING (type) = 1;
+
+ /* Lay out the type: set its alignment, size, etc. */
+ if (unsignedp)
{
- fixup_unsigned_type (bitsizetype);
- ssizetype = build_distinct_type_copy (make_signed_type (oprecision));
- TYPE_IS_SIZETYPE (ssizetype) = 1;
- sbitsizetype = build_distinct_type_copy (make_signed_type (precision));
- TYPE_IS_SIZETYPE (sbitsizetype) = 1;
+ TYPE_UNSIGNED (type) = 1;
+ SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0));
}
else
- {
- fixup_signed_type (bitsizetype);
- ssizetype = sizetype;
- sbitsizetype = bitsizetype;
- }
+ SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0));
+ layout_type (type);
- /* If SIZETYPE is unsigned, we need to fix TYPE_MAX_VALUE so that
- it is sign extended in a way consistent with force_fit_type. */
- if (TYPE_UNSIGNED (type))
- {
- tree orig_max, new_max;
+ return type;
+}
- orig_max = TYPE_MAX_VALUE (sizetype);
+/* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
+ and SATP. */
- /* Build a new node with the same values, but a different type. */
- new_max = build_int_cst_wide (sizetype,
- TREE_INT_CST_LOW (orig_max),
- TREE_INT_CST_HIGH (orig_max));
+tree
+make_accum_type (int precision, int unsignedp, int satp)
+{
+ tree type = make_node (FIXED_POINT_TYPE);
+
+ TYPE_PRECISION (type) = precision;
- /* Now sign extend it using force_fit_type to ensure
- consistency. */
- new_max = force_fit_type (new_max, 0, 0, 0);
- TYPE_MAX_VALUE (sizetype) = new_max;
+ if (satp)
+ TYPE_SATURATING (type) = 1;
+
+ /* Lay out the type: set its alignment, size, etc. */
+ if (unsignedp)
+ {
+ TYPE_UNSIGNED (type) = 1;
+ SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0));
}
+ else
+ SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0));
+ layout_type (type);
+
+ return type;
+}
+
+/* Initialize sizetypes so layout_type can use them. */
+
+void
+initialize_sizetypes (void)
+{
+ int precision, bprecision;
+
+ /* Get sizetypes precision from the SIZE_TYPE target macro. */
+ if (strcmp (SIZE_TYPE, "unsigned int") == 0)
+ precision = INT_TYPE_SIZE;
+ else if (strcmp (SIZE_TYPE, "long unsigned int") == 0)
+ precision = LONG_TYPE_SIZE;
+ else if (strcmp (SIZE_TYPE, "long long unsigned int") == 0)
+ precision = LONG_LONG_TYPE_SIZE;
+ else if (strcmp (SIZE_TYPE, "short unsigned int") == 0)
+ precision = SHORT_TYPE_SIZE;
+ else
+ gcc_unreachable ();
+
+ bprecision
+ = MIN (precision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
+ bprecision
+ = GET_MODE_PRECISION (smallest_mode_for_size (bprecision, MODE_INT));
+ if (bprecision > HOST_BITS_PER_WIDE_INT * 2)
+ bprecision = HOST_BITS_PER_WIDE_INT * 2;
+
+ /* Create stubs for sizetype and bitsizetype so we can create constants. */
+ sizetype = make_node (INTEGER_TYPE);
+ TYPE_NAME (sizetype) = get_identifier ("sizetype");
+ TYPE_PRECISION (sizetype) = precision;
+ TYPE_UNSIGNED (sizetype) = 1;
+ TYPE_IS_SIZETYPE (sizetype) = 1;
+ bitsizetype = make_node (INTEGER_TYPE);
+ TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
+ TYPE_PRECISION (bitsizetype) = bprecision;
+ TYPE_UNSIGNED (bitsizetype) = 1;
+ TYPE_IS_SIZETYPE (bitsizetype) = 1;
+
+ /* Now layout both types manually. */
+ SET_TYPE_MODE (sizetype, smallest_mode_for_size (precision, MODE_INT));
+ TYPE_ALIGN (sizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (sizetype));
+ TYPE_SIZE (sizetype) = bitsize_int (precision);
+ TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (sizetype)));
+ set_min_and_max_values_for_integral_type (sizetype, precision,
+ /*is_unsigned=*/true);
+ /* sizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
+ sign-extended in a way consistent with force_fit_type. */
+ TYPE_MAX_VALUE (sizetype)
+ = double_int_to_tree (sizetype,
+ tree_to_double_int (TYPE_MAX_VALUE (sizetype)));
+
+ SET_TYPE_MODE (bitsizetype, smallest_mode_for_size (bprecision, MODE_INT));
+ TYPE_ALIGN (bitsizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype));
+ TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
+ TYPE_SIZE_UNIT (bitsizetype)
+ = size_int (GET_MODE_SIZE (TYPE_MODE (bitsizetype)));
+ set_min_and_max_values_for_integral_type (bitsizetype, bprecision,
+ /*is_unsigned=*/true);
+ /* bitsizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
+ sign-extended in a way consistent with force_fit_type. */
+ TYPE_MAX_VALUE (bitsizetype)
+ = double_int_to_tree (bitsizetype,
+ tree_to_double_int (TYPE_MAX_VALUE (bitsizetype)));
+
+ /* Create the signed variants of *sizetype. */
+ ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
+ TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
+ TYPE_IS_SIZETYPE (ssizetype) = 1;
+ sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
+ TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
+ TYPE_IS_SIZETYPE (sbitsizetype) = 1;
}
\f
/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
/* Find the best machine mode to use when referencing a bit field of length
BITSIZE bits starting at BITPOS.
+ BITREGION_START is the bit position of the first bit in this
+ sequence of bit fields. BITREGION_END is the last bit in this
+ sequence. If these two fields are non-zero, we should restrict the
+ memory access to a maximum sized chunk of
+ BITREGION_END - BITREGION_START + 1. Otherwise, we are allowed to touch
+ any adjacent non bit-fields.
+
The underlying object is known to be aligned to a boundary of ALIGN bits.
If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
larger than LARGEST_MODE (usually SImode).
decide which of the above modes should be used. */
enum machine_mode
-get_best_mode (int bitsize, int bitpos, unsigned int align,
+get_best_mode (int bitsize, int bitpos,
+ unsigned HOST_WIDE_INT bitregion_start,
+ unsigned HOST_WIDE_INT bitregion_end,
+ unsigned int align,
enum machine_mode largest_mode, int volatilep)
{
enum machine_mode mode;
unsigned int unit = 0;
+ unsigned HOST_WIDE_INT maxbits;
+
+ /* If unset, no restriction. */
+ if (!bitregion_end)
+ maxbits = MAX_FIXED_MODE_SIZE;
+ else
+ maxbits = bitregion_end - bitregion_start + 1;
/* Find the narrowest integer mode that contains the bit field. */
for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
unit = GET_MODE_BITSIZE (mode);
- if ((bitpos % unit) + bitsize <= unit)
+ if (unit == GET_MODE_PRECISION (mode)
+ && (bitpos % unit) + bitsize <= unit)
break;
}
(Though at least one Unix compiler ignores this problem:
that on the Sequent 386 machine. */
|| MIN (unit, BIGGEST_ALIGNMENT) > align
- || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
+ || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode))
+ || unit > maxbits
+ || (bitregion_end
+ && bitpos - (bitpos % unit) + unit > bitregion_end + 1))
return VOIDmode;
if ((SLOW_BYTE_ACCESS && ! volatilep)
- || (volatilep && !targetm.narrow_volatile_bitfield()))
+ || (volatilep && !targetm.narrow_volatile_bitfield ()))
{
enum machine_mode wide_mode = VOIDmode, tmode;
tmode = GET_MODE_WIDER_MODE (tmode))
{
unit = GET_MODE_BITSIZE (tmode);
- if (bitpos / unit == (bitpos + bitsize - 1) / unit
+ if (unit == GET_MODE_PRECISION (tmode)
+ && bitpos / unit == (bitpos + bitsize - 1) / unit
&& unit <= BITS_PER_WORD
&& unit <= MIN (align, BIGGEST_ALIGNMENT)
+ && unit <= maxbits
&& (largest_mode == VOIDmode
- || unit <= GET_MODE_BITSIZE (largest_mode)))
+ || unit <= GET_MODE_BITSIZE (largest_mode))
+ && (bitregion_end == 0
+ || bitpos - (bitpos % unit) + unit <= bitregion_end + 1))
wide_mode = tmode;
}