#include "ggc.h"
#include "target.h"
#include "langhooks.h"
-
-/* Set to one when set_sizetype has been called. */
-static int sizetype_set;
-
-/* List of types created before set_sizetype has been called. We do not
- make this a GGC root since we want these nodes to be reclaimed. */
-static tree early_type_list;
+#include "regs.h"
+#include "params.h"
/* Data type for the expressions representing sizes of data types.
It is the first integer type laid out. */
/* If nonzero, this is an upper limit on alignment of structure fields.
The value is measured in bits. */
-unsigned int maximum_field_alignment;
+unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
+/* ... and its original value in bytes, specified via -fpack-struct=<value>. */
+unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
/* If nonzero, the alignment of a bitstring or (power-)set value, in bits.
May be overridden by front-ends. */
static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
HOST_WIDE_INT, tree);
#endif
-static unsigned int update_alignment_for_field (record_layout_info, tree,
- unsigned int);
extern void debug_rli (record_layout_info);
\f
/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
static GTY(()) tree pending_sizes;
-/* Nonzero means cannot safely call expand_expr now,
- so put variable sizes onto `pending_sizes' instead. */
-
-int immediate_size_expand;
-
/* Show that REFERENCE_TYPES are internal and should be Pmode. Called only
by front end. */
get_pending_sizes (void)
{
tree chain = pending_sizes;
- tree t;
-
- /* Put each SAVE_EXPR into the current function. */
- for (t = chain; t; t = TREE_CHAIN (t))
- SAVE_EXPR_CONTEXT (TREE_VALUE (t)) = current_function_decl;
pending_sizes = 0;
return chain;
void
put_pending_sizes (tree chain)
{
- if (pending_sizes)
- abort ();
-
+ gcc_assert (!pending_sizes);
pending_sizes = chain;
}
|| CONTAINS_PLACEHOLDER_P (size))
return size;
- if (TREE_CODE (size) == MINUS_EXPR && integer_onep (TREE_OPERAND (size, 1)))
- /* If this is the upper bound of a C array, leave the minus 1 outside
- the SAVE_EXPR so it can be folded away. */
- TREE_OPERAND (size, 0) = save = save_expr (TREE_OPERAND (size, 0));
- else
- size = save = save_expr (size);
+ size = save_expr (size);
/* If an array with a variable number of elements is declared, and
the elements require destruction, we will emit a cleanup for the
`unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do
not wish to do that here; the array-size is the same in both
places. */
- if (TREE_CODE (save) == SAVE_EXPR)
- SAVE_EXPR_PERSISTENT_P (save) = 1;
+ save = skip_simple_arithmetic (size);
+
+ if (cfun && cfun->x_dont_save_pending_sizes_p)
+ /* The front-end doesn't want us to keep a list of the expressions
+ that determine sizes for variable size objects. Trust it. */
+ return size;
if (lang_hooks.decls.global_bindings_p ())
{
if (TREE_CONSTANT (size))
- error ("type size can't be explicitly evaluated");
+ error ("type size can%'t be explicitly evaluated");
else
error ("variable-size type declared outside of any function");
return size_one_node;
}
- if (immediate_size_expand)
- expand_expr (save, const0_rtx, VOIDmode, 0);
- else if (cfun != 0 && cfun->x_dont_save_pending_sizes_p)
- /* The front-end doesn't want us to keep a list of the expressions
- that determine sizes for variable size objects. */
- ;
- else
- put_pending_size (save);
+ put_pending_size (save);
return size;
}
if (GET_MODE_PRECISION (mode) >= size)
return mode;
- abort ();
+ gcc_unreachable ();
}
/* Find an integer mode of the exact same size, or BLKmode on failure. */
case MODE_CC:
default:
- abort ();
+ gcc_unreachable ();
}
return mode;
return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
}
-/* Return the value of VALUE, rounded up to a multiple of DIVISOR.
- This can only be applied to objects of a sizetype. */
-
-tree
-round_up (tree value, int divisor)
-{
- tree arg = size_int_type (divisor, TREE_TYPE (value));
-
- return size_binop (MULT_EXPR, size_binop (CEIL_DIV_EXPR, value, arg), arg);
-}
-
-/* Likewise, but round down. */
-
-tree
-round_down (tree value, int divisor)
-{
- tree arg = size_int_type (divisor, TREE_TYPE (value));
-
- return size_binop (MULT_EXPR, size_binop (FLOOR_DIV_EXPR, value, arg), arg);
-}
\f
/* Subroutine of layout_decl: Force alignment required for the data type.
But if the decl itself wants greater alignment, don't override that. */
if (code == CONST_DECL)
return;
- else if (code != VAR_DECL && code != PARM_DECL && code != RESULT_DECL
- && code != TYPE_DECL && code != FIELD_DECL)
- abort ();
-
+
+ gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
+ || code == TYPE_DECL ||code == FIELD_DECL);
+
rtl = DECL_RTL_IF_SET (decl);
if (type == error_mark_node)
size in bytes from the size in bits. If we have already set the mode,
don't set it again since we can be called twice for FIELD_DECLs. */
- TREE_UNSIGNED (decl) = TREE_UNSIGNED (type);
+ DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
if (DECL_MODE (decl) == VOIDmode)
DECL_MODE (decl) = TYPE_MODE (type);
}
else if (DECL_SIZE_UNIT (decl) == 0)
DECL_SIZE_UNIT (decl)
- = convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl),
- bitsize_unit_node));
+ = fold_convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl),
+ bitsize_unit_node));
if (code != FIELD_DECL)
/* For non-fields, update the alignment from the type. */
field. */
if (integer_zerop (DECL_SIZE (decl))
&& ! DECL_PACKED (decl)
- && ! (*targetm.ms_bitfield_layout_p) (DECL_FIELD_CONTEXT (decl)))
+ && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
{
#ifdef PCC_BITFIELD_TYPE_MATTERS
if (PCC_BITFIELD_TYPE_MATTERS)
enum machine_mode xmode
= mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
- if (xmode != BLKmode
+ if (xmode != BLKmode
&& (known_align == 0
|| known_align >= GET_MODE_ALIGNMENT (xmode)))
{
|| TREE_CODE (DECL_SIZE_UNIT (decl)) == INTEGER_CST))
DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
- /* Should this be controlled by DECL_USER_ALIGN, too? */
- if (maximum_field_alignment != 0)
- DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), maximum_field_alignment);
- if (! DECL_USER_ALIGN (decl))
+ if (! DECL_USER_ALIGN (decl) && ! DECL_PACKED (decl))
{
/* Some targets (i.e. i386, VMS) limit struct field alignment
to a lower boundary than alignment of variables unless
DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
#endif
}
+
+ /* Should this be controlled by DECL_USER_ALIGN, too? */
+ if (maximum_field_alignment != 0)
+ DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), maximum_field_alignment);
}
/* Evaluate nonconstant size only once, either now or as soon as safe. */
int size_as_int = TREE_INT_CST_LOW (size);
if (compare_tree_int (size, size_as_int) == 0)
- warning ("%Jsize of '%D' is %d bytes", decl, decl, size_as_int);
+ warning ("%Jsize of %qD is %d bytes", decl, decl, size_as_int);
else
- warning ("%Jsize of '%D' is larger than %d bytes",
+ warning ("%Jsize of %qD is larger than %d bytes",
decl, decl, larger_than_size);
}
}
SET_DECL_RTL (decl, rtl);
}
}
+
+/* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
+ a previous call to layout_decl and calls it again. */
+
+void
+relayout_decl (tree decl)
+{
+ DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
+ DECL_MODE (decl) = VOIDmode;
+ DECL_ALIGN (decl) = 0;
+ SET_DECL_RTL (decl, 0);
+
+ layout_decl (decl, 0);
+}
\f
/* Hook for a front-end function that can modify the record layout as needed
immediately before it is finalized. */
bit_from_pos (tree offset, tree bitpos)
{
return size_binop (PLUS_EXPR, bitpos,
- size_binop (MULT_EXPR, convert (bitsizetype, offset),
+ size_binop (MULT_EXPR,
+ fold_convert (bitsizetype, offset),
bitsize_unit_node));
}
byte_from_pos (tree offset, tree bitpos)
{
return size_binop (PLUS_EXPR, offset,
- convert (sizetype,
- size_binop (TRUNC_DIV_EXPR, bitpos,
- bitsize_unit_node)));
+ fold_convert (sizetype,
+ size_binop (TRUNC_DIV_EXPR, bitpos,
+ bitsize_unit_node)));
}
void
tree pos)
{
*poffset = size_binop (MULT_EXPR,
- convert (sizetype,
- size_binop (FLOOR_DIV_EXPR, pos,
- bitsize_int (off_align))),
+ fold_convert (sizetype,
+ size_binop (FLOOR_DIV_EXPR, pos,
+ bitsize_int (off_align))),
size_int (off_align / BITS_PER_UNIT));
*pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
}
*poffset
= size_binop (PLUS_EXPR, *poffset,
- size_binop (MULT_EXPR, convert (sizetype, extra_aligns),
+ size_binop (MULT_EXPR,
+ fold_convert (sizetype, extra_aligns),
size_int (off_align / BITS_PER_UNIT)));
*pbitpos
variable alignment fields in RLI, and return the alignment to give
the FIELD. */
-static unsigned int
+unsigned int
update_alignment_for_field (record_layout_info rli, tree field,
unsigned int known_align)
{
/* Record must have at least as much alignment as any field.
Otherwise, the alignment of the field within the record is
meaningless. */
- if (is_bitfield && (* targetm.ms_bitfield_layout_p) (rli->t))
+ if (is_bitfield && targetm.ms_bitfield_layout_p (rli->t))
{
/* Here, the alignment of the underlying type of a bitfield can
affect the alignment of a record; even a zero-sized field
else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
{
/* Named bit-fields cause the entire structure to have the
- alignment implied by their type. */
- if (DECL_NAME (field) != 0)
+ alignment implied by their type. Some targets also apply the same
+ rules to unnamed bitfields. */
+ if (DECL_NAME (field) != 0
+ || targetm.align_anon_bitfield ())
{
unsigned int type_align = TYPE_ALIGN (type);
if (TREE_CODE (rli->t) == UNION_TYPE)
rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
- rli->offset = fold (build (COND_EXPR, sizetype,
- DECL_QUALIFIER (field),
- DECL_SIZE_UNIT (field), rli->offset));
+ rli->offset = fold (build3 (COND_EXPR, sizetype,
+ DECL_QUALIFIER (field),
+ DECL_SIZE_UNIT (field), rli->offset));
}
#if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
{
if (STRICT_ALIGNMENT)
warning ("%Jpacked attribute causes inefficient alignment "
- "for '%D'", field, field);
+ "for %qD", field, field);
else
- warning ("%Jpacked attribute is unnecessary for '%D'",
+ warning ("%Jpacked attribute is unnecessary for %qD",
field, field);
}
}
Bump the cumulative size to multiple of field alignment. */
if (warn_padded)
- warning ("%Jpadding struct to align '%D'", field, field);
+ warning ("%Jpadding struct to align %qD", field, field);
/* If the alignment is still within offset_align, just align
the bit position. */
/* First adjust OFFSET by the partial bits, then align. */
rli->offset
= size_binop (PLUS_EXPR, rli->offset,
- convert (sizetype,
- size_binop (CEIL_DIV_EXPR, rli->bitpos,
- bitsize_unit_node)));
+ fold_convert (sizetype,
+ size_binop (CEIL_DIV_EXPR, rli->bitpos,
+ bitsize_unit_node)));
rli->bitpos = bitsize_zero_node;
rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
variable-sized fields, we need not worry about compatibility. */
#ifdef PCC_BITFIELD_TYPE_MATTERS
if (PCC_BITFIELD_TYPE_MATTERS
- && ! (* targetm.ms_bitfield_layout_p) (rli->t)
+ && ! targetm.ms_bitfield_layout_p (rli->t)
&& TREE_CODE (field) == FIELD_DECL
&& type != error_mark_node
&& DECL_BIT_FIELD (field)
#ifdef BITFIELD_NBYTES_LIMITED
if (BITFIELD_NBYTES_LIMITED
- && ! (* targetm.ms_bitfield_layout_p) (rli->t)
+ && ! targetm.ms_bitfield_layout_p (rli->t)
&& TREE_CODE (field) == FIELD_DECL
&& type != error_mark_node
&& DECL_BIT_FIELD_TYPE (field)
Note: for compatibility, we use the type size, not the type alignment
to determine alignment, since that matches the documentation */
- if ((* targetm.ms_bitfield_layout_p) (rli->t)
+ if (targetm.ms_bitfield_layout_p (rli->t)
&& ((DECL_BIT_FIELD_TYPE (field) && ! DECL_PACKED (field))
|| (rli->prev_field && ! DECL_PACKED (rli->prev_field))))
{
rli->prev_field = NULL;
}
- rli->offset_align = tree_low_cst (TYPE_SIZE (type), 0);
normalize_rli (rli);
}
{
rli->offset
= size_binop (PLUS_EXPR, rli->offset,
- convert (sizetype,
- size_binop (CEIL_DIV_EXPR, rli->bitpos,
- bitsize_unit_node)));
+ fold_convert (sizetype,
+ size_binop (CEIL_DIV_EXPR, rli->bitpos,
+ bitsize_unit_node)));
rli->offset
= size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
rli->bitpos = bitsize_zero_node;
/* Round the size up to be a multiple of the required alignment. */
TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
- TYPE_SIZE_UNIT (rli->t) = round_up (unpadded_size_unit,
- TYPE_ALIGN (rli->t) / BITS_PER_UNIT);
+ TYPE_SIZE_UNIT (rli->t)
+ = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
if (warn_padded && TREE_CONSTANT (unpadded_size)
&& simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0)
name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (rli->t)));
if (STRICT_ALIGNMENT)
- warning ("packed attribute causes inefficient alignment for `%s'", name);
+ warning ("packed attribute causes inefficient "
+ "alignment for %qs", name);
else
- warning ("packed attribute is unnecessary for `%s'", name);
+ warning ("packed attribute is unnecessary for %qs", name);
}
else
{
#endif /* MEMBER_TYPE_FORCES_BLK */
}
- /* If we only have one real field; use its mode. This only applies to
- RECORD_TYPE. This does not apply to unions. */
- if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode)
+ TYPE_MODE (type) = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
+
+ /* If we only have one real field; use its mode if that mode's size
+ matches the type's size. This only applies to RECORD_TYPE. This
+ does not apply to unions. */
+ if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
+ && GET_MODE_SIZE (mode) == GET_MODE_SIZE (TYPE_MODE (type)))
TYPE_MODE (type) = mode;
- else
- TYPE_MODE (type) = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
/* If structure's known alignment is less than what the scalar
mode would need, and it matters, then stick with BLKmode. */
result will fit in sizetype. We will get more efficient code using
sizetype, so we force a conversion. */
TYPE_SIZE_UNIT (type)
- = convert (sizetype,
- size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
- bitsize_unit_node));
+ = fold_convert (sizetype,
+ size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
+ bitsize_unit_node));
if (TYPE_SIZE (type) != 0)
{
TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
- TYPE_SIZE_UNIT (type)
- = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN (type) / BITS_PER_UNIT);
+ TYPE_SIZE_UNIT (type) = round_up (TYPE_SIZE_UNIT (type),
+ TYPE_ALIGN_UNIT (type));
}
/* Evaluate nonconstant sizes only once, either now or as soon as safe. */
void
layout_type (tree type)
{
- if (type == 0)
- abort ();
+ gcc_assert (type);
+
+ if (type == error_mark_node)
+ return;
/* Do nothing if type has been laid out before. */
if (TYPE_SIZE (type))
case LANG_TYPE:
/* This kind of type is the responsibility
of the language-specific code. */
- abort ();
+ gcc_unreachable ();
case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */
if (TYPE_PRECISION (type) == 0)
case CHAR_TYPE:
if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
&& tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
- TREE_UNSIGNED (type) = 1;
+ TYPE_UNSIGNED (type) = 1;
TYPE_MODE (type) = smallest_mode_for_size (TYPE_PRECISION (type),
MODE_INT);
break;
case COMPLEX_TYPE:
- TREE_UNSIGNED (type) = TREE_UNSIGNED (TREE_TYPE (type));
+ TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
TYPE_MODE (type)
= mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
- (TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE
- ? MODE_COMPLEX_INT : MODE_COMPLEX_FLOAT),
+ (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
+ ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
0);
TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
case VECTOR_TYPE:
{
- tree subtype;
+ int nunits = TYPE_VECTOR_SUBPARTS (type);
+ tree nunits_tree = build_int_cst (NULL_TREE, nunits);
+ tree innertype = TREE_TYPE (type);
+
+ gcc_assert (!(nunits & (nunits - 1)));
+
+ /* Find an appropriate mode for the vector type. */
+ if (TYPE_MODE (type) == VOIDmode)
+ {
+ enum machine_mode innermode = TYPE_MODE (innertype);
+ enum machine_mode mode;
- subtype = TREE_TYPE (type);
- TREE_UNSIGNED (type) = TREE_UNSIGNED (subtype);
- TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
- TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
+ /* First, look for a supported vector type. */
+ if (GET_MODE_CLASS (innermode) == MODE_FLOAT)
+ mode = MIN_MODE_VECTOR_FLOAT;
+ else
+ mode = MIN_MODE_VECTOR_INT;
+
+ for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_NUNITS (mode) == nunits
+ && GET_MODE_INNER (mode) == innermode
+ && targetm.vector_mode_supported_p (mode))
+ break;
+
+ /* For integers, try mapping it to a same-sized scalar mode. */
+ if (mode == VOIDmode
+ && GET_MODE_CLASS (innermode) == MODE_INT)
+ mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
+ MODE_INT, 0);
+
+ if (mode == VOIDmode || !have_regs_of_mode[mode])
+ TYPE_MODE (type) = BLKmode;
+ else
+ TYPE_MODE (type) = mode;
+ }
+
+ TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
+ TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
+ TYPE_SIZE_UNIT (innertype),
+ nunits_tree, 0);
+ TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
+ nunits_tree, 0);
+
+ /* Always naturally align vectors. This prevents ABI changes
+ depending on whether or not native vector modes are supported. */
+ TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
+ break;
}
- break;
case VOID_TYPE:
/* This is an incomplete type and so doesn't have a size. */
TYPE_SIZE (type) = bitsize_int (nbits);
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
- TREE_UNSIGNED (type) = 1;
+ TYPE_UNSIGNED (type) = 1;
TYPE_PRECISION (type) = nbits;
}
break;
/* The initial subtraction should happen in the original type so
that (possible) negative values are handled appropriately. */
length = size_binop (PLUS_EXPR, size_one_node,
- convert (sizetype,
- fold (build (MINUS_EXPR,
- TREE_TYPE (lb),
- ub, lb))));
+ fold_convert (sizetype,
+ fold (build2 (MINUS_EXPR,
+ TREE_TYPE (lb),
+ ub, lb))));
/* Special handling for arrays of bits (for Chill). */
element_size = TYPE_SIZE (element);
sure the size is never negative. We should really do this
if *either* bound is non-constant, but this is the best
compromise between C and Ada. */
- if (! TREE_UNSIGNED (sizetype)
+ if (!TYPE_UNSIGNED (sizetype)
&& TREE_CODE (TYPE_MIN_VALUE (index)) != INTEGER_CST
&& TREE_CODE (TYPE_MAX_VALUE (index)) != INTEGER_CST)
length = size_binop (MAX_EXPR, length, size_zero_node);
TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
- convert (bitsizetype, length));
+ fold_convert (bitsizetype,
+ length));
/* If we know the size of the element, calculate the total
size directly, rather than do some division thing below.
break;
case SET_TYPE: /* Used by Chill and Pascal. */
- if (TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST
- || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST)
- abort ();
- else
- {
+ {
+ unsigned int alignment;
+ HOST_WIDE_INT size_in_bits;
+ HOST_WIDE_INT rounded_size;
+
+ gcc_assert (TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))
+ == INTEGER_CST);
+ gcc_assert (TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (type)))
+ == INTEGER_CST);
+
#ifndef SET_WORD_SIZE
#define SET_WORD_SIZE BITS_PER_WORD
#endif
- unsigned int alignment
- = set_alignment ? set_alignment : SET_WORD_SIZE;
- HOST_WIDE_INT size_in_bits
- = (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)
- - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1);
- HOST_WIDE_INT rounded_size
- = ((size_in_bits + alignment - 1) / alignment) * alignment;
-
- if (rounded_size > (int) alignment)
- TYPE_MODE (type) = BLKmode;
- else
- TYPE_MODE (type) = mode_for_size (alignment, MODE_INT, 1);
-
- TYPE_SIZE (type) = bitsize_int (rounded_size);
- TYPE_SIZE_UNIT (type) = size_int (rounded_size / BITS_PER_UNIT);
- TYPE_ALIGN (type) = alignment;
- TYPE_USER_ALIGN (type) = 0;
- TYPE_PRECISION (type) = size_in_bits;
- }
+ alignment = set_alignment ? set_alignment : SET_WORD_SIZE;
+ size_in_bits
+ = (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)
+ - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1);
+ rounded_size
+ = ((size_in_bits + alignment - 1) / alignment) * alignment;
+
+ if (rounded_size > (int) alignment)
+ TYPE_MODE (type) = BLKmode;
+ else
+ TYPE_MODE (type) = mode_for_size (alignment, MODE_INT, 1);
+
+ TYPE_SIZE (type) = bitsize_int (rounded_size);
+ TYPE_SIZE_UNIT (type) = size_int (rounded_size / BITS_PER_UNIT);
+ TYPE_ALIGN (type) = alignment;
+ TYPE_USER_ALIGN (type) = 0;
+ TYPE_PRECISION (type) = size_in_bits;
+ }
break;
case FILE_TYPE:
break;
default:
- abort ();
+ gcc_unreachable ();
}
/* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
&& TREE_CODE (type) != QUAL_UNION_TYPE)
finalize_type_size (type);
- /* If this type is created before sizetype has been permanently set,
- record it so set_sizetype can fix it up. */
- if (! sizetype_set)
- early_type_list = tree_cons (NULL_TREE, type, early_type_list);
-
/* If an alias set has been set for this aggregate when it was incomplete,
force it into alias set 0.
This is too conservative, but we cannot call record_component_aliases
value to enable integer types to be created. */
void
-initialize_sizetypes (void)
+initialize_sizetypes (bool signed_p)
{
tree t = make_node (INTEGER_TYPE);
- /* Set this so we do something reasonable for the build_int_2 calls
- below. */
- integer_type_node = t;
-
TYPE_MODE (t) = SImode;
TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
TYPE_USER_ALIGN (t) = 0;
- TYPE_SIZE (t) = build_int_2 (GET_MODE_BITSIZE (SImode), 0);
- TYPE_SIZE_UNIT (t) = build_int_2 (GET_MODE_SIZE (SImode), 0);
- TREE_UNSIGNED (t) = 1;
- TYPE_PRECISION (t) = GET_MODE_BITSIZE (SImode);
- TYPE_MIN_VALUE (t) = build_int_2 (0, 0);
TYPE_IS_SIZETYPE (t) = 1;
+ TYPE_UNSIGNED (t) = !signed_p;
+ TYPE_SIZE (t) = build_int_cst (t, GET_MODE_BITSIZE (SImode));
+ TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
+ TYPE_PRECISION (t) = GET_MODE_BITSIZE (SImode);
+ TYPE_MIN_VALUE (t) = build_int_cst (t, 0);
/* 1000 avoids problems with possible overflow and is certainly
larger than any size value we'd want to be storing. */
- TYPE_MAX_VALUE (t) = build_int_2 (1000, 0);
+ TYPE_MAX_VALUE (t) = build_int_cst (t, 1000);
- /* These two must be different nodes because of the caching done in
- size_int_wide. */
sizetype = t;
- bitsizetype = copy_node (t);
- integer_type_node = 0;
+ bitsizetype = build_distinct_type_copy (t);
}
-/* Set sizetype to TYPE, and initialize *sizetype accordingly.
- Also update the type of any standard type's sizes made so far. */
+/* Make sizetype a version of TYPE, and initialize *sizetype
+ accordingly. We do this by overwriting the stub sizetype and
+ bitsizetype nodes created by initialize_sizetypes. This makes sure
+ that (a) anything stubby about them no longer exists, (b) any
+ INTEGER_CSTs created with such a type, remain valid. */
void
set_sizetype (tree type)
precision. */
int precision = MIN (oprecision + BITS_PER_UNIT_LOG + 1,
2 * HOST_BITS_PER_WIDE_INT);
- unsigned int i;
tree t;
- if (sizetype_set)
- abort ();
-
- /* Make copies of nodes since we'll be setting TYPE_IS_SIZETYPE. */
- sizetype = copy_node (type);
- TYPE_ORIG_SIZE_TYPE (sizetype) = type;
- TYPE_IS_SIZETYPE (sizetype) = 1;
- bitsizetype = make_node (INTEGER_TYPE);
- TYPE_NAME (bitsizetype) = TYPE_NAME (type);
- TYPE_PRECISION (bitsizetype) = precision;
- TYPE_IS_SIZETYPE (bitsizetype) = 1;
-
- if (TREE_UNSIGNED (type))
- fixup_unsigned_type (bitsizetype);
- else
- fixup_signed_type (bitsizetype);
+ gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
- layout_type (bitsizetype);
-
- if (TREE_UNSIGNED (type))
+ t = build_distinct_type_copy (type);
+ /* We do want to use sizetype's cache, as we will be replacing that
+ type. */
+ TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
+ TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
+ TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
+ TYPE_UID (t) = TYPE_UID (sizetype);
+ TYPE_IS_SIZETYPE (t) = 1;
+
+ /* Replace our original stub sizetype. */
+ memcpy (sizetype, t, tree_size (sizetype));
+ TYPE_MAIN_VARIANT (sizetype) = sizetype;
+
+ t = make_node (INTEGER_TYPE);
+ TYPE_NAME (t) = get_identifier ("bit_size_type");
+ /* We do want to use bitsizetype's cache, as we will be replacing that
+ type. */
+ TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
+ TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
+ TYPE_PRECISION (t) = precision;
+ TYPE_UID (t) = TYPE_UID (bitsizetype);
+ TYPE_IS_SIZETYPE (t) = 1;
+ /* Replace our original stub bitsizetype. */
+ memcpy (bitsizetype, t, tree_size (bitsizetype));
+
+ if (TYPE_UNSIGNED (type))
{
- usizetype = sizetype;
- ubitsizetype = bitsizetype;
- ssizetype = copy_node (make_signed_type (oprecision));
- sbitsizetype = copy_node (make_signed_type (precision));
+ fixup_unsigned_type (bitsizetype);
+ ssizetype = build_distinct_type_copy (make_signed_type (oprecision));
+ TYPE_IS_SIZETYPE (ssizetype) = 1;
+ sbitsizetype = build_distinct_type_copy (make_signed_type (precision));
+ TYPE_IS_SIZETYPE (sbitsizetype) = 1;
}
else
{
+ fixup_signed_type (bitsizetype);
ssizetype = sizetype;
sbitsizetype = bitsizetype;
- usizetype = copy_node (make_unsigned_type (oprecision));
- ubitsizetype = copy_node (make_unsigned_type (precision));
}
-
- TYPE_NAME (bitsizetype) = get_identifier ("bit_size_type");
-
- /* Show is a sizetype, is a main type, and has no pointers to it. */
- for (i = 0; i < ARRAY_SIZE (sizetype_tab); i++)
- {
- TYPE_IS_SIZETYPE (sizetype_tab[i]) = 1;
- TYPE_MAIN_VARIANT (sizetype_tab[i]) = sizetype_tab[i];
- TYPE_NEXT_VARIANT (sizetype_tab[i]) = 0;
- TYPE_POINTER_TO (sizetype_tab[i]) = 0;
- TYPE_REFERENCE_TO (sizetype_tab[i]) = 0;
- }
-
- /* Go down each of the types we already made and set the proper type
- for the sizes in them. */
- for (t = early_type_list; t != 0; t = TREE_CHAIN (t))
- {
- if (TREE_CODE (TREE_VALUE (t)) != INTEGER_TYPE
- && TREE_CODE (TREE_VALUE (t)) != BOOLEAN_TYPE)
- abort ();
-
- TREE_TYPE (TYPE_SIZE (TREE_VALUE (t))) = bitsizetype;
- TREE_TYPE (TYPE_SIZE_UNIT (TREE_VALUE (t))) = sizetype;
- }
-
- early_type_list = 0;
- sizetype_set = 1;
}
\f
/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE,
if (is_unsigned)
{
- min_value = build_int_2 (0, 0);
- max_value
- = build_int_2 (precision - HOST_BITS_PER_WIDE_INT >= 0
- ? -1 : ((HOST_WIDE_INT) 1 << precision) - 1,
- precision - HOST_BITS_PER_WIDE_INT > 0
- ? ((unsigned HOST_WIDE_INT) ~0
- >> (HOST_BITS_PER_WIDE_INT
- - (precision - HOST_BITS_PER_WIDE_INT)))
- : 0);
+ min_value = build_int_cst (type, 0);
+ max_value
+ = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
+ ? -1
+ : ((HOST_WIDE_INT) 1 << precision) - 1,
+ precision - HOST_BITS_PER_WIDE_INT > 0
+ ? ((unsigned HOST_WIDE_INT) ~0
+ >> (HOST_BITS_PER_WIDE_INT
+ - (precision - HOST_BITS_PER_WIDE_INT)))
+ : 0);
}
else
{
- min_value
- = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
- ? 0 : (HOST_WIDE_INT) (-1) << (precision - 1)),
- (((HOST_WIDE_INT) (-1)
- << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
- ? precision - HOST_BITS_PER_WIDE_INT - 1
- : 0))));
+ min_value
+ = build_int_cst_wide (type,
+ (precision - HOST_BITS_PER_WIDE_INT > 0
+ ? 0
+ : (HOST_WIDE_INT) (-1) << (precision - 1)),
+ (((HOST_WIDE_INT) (-1)
+ << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? precision - HOST_BITS_PER_WIDE_INT - 1
+ : 0))));
max_value
- = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
- ? -1 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
- (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
- ? (((HOST_WIDE_INT) 1
- << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
- : 0));
+ = build_int_cst_wide (type,
+ (precision - HOST_BITS_PER_WIDE_INT > 0
+ ? -1
+ : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
+ (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? (((HOST_WIDE_INT) 1
+ << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
+ : 0));
}
- TREE_TYPE (min_value) = type;
- TREE_TYPE (max_value) = type;
TYPE_MIN_VALUE (type) = min_value;
TYPE_MAX_VALUE (type) = max_value;
}
if (precision > HOST_BITS_PER_WIDE_INT * 2)
precision = HOST_BITS_PER_WIDE_INT * 2;
- set_min_and_max_values_for_integral_type (type, precision,
+ set_min_and_max_values_for_integral_type (type, precision,
/*is_unsigned=*/false);
/* Lay out the type: set its alignment, size, etc. */
if (precision > HOST_BITS_PER_WIDE_INT * 2)
precision = HOST_BITS_PER_WIDE_INT * 2;
- set_min_and_max_values_for_integral_type (type, precision,
+ TYPE_UNSIGNED (type) = 1;
+
+ set_min_and_max_values_for_integral_type (type, precision,
/*is_unsigned=*/true);
/* Lay out the type: set its alignment, size, etc. */
}
/* Gets minimal and maximal values for MODE (signed or unsigned depending on
- SIGN). */
+ SIGN). The returned constants are made to be usable in TARGET_MODE. */
void
-get_mode_bounds (enum machine_mode mode, int sign, rtx *mmin, rtx *mmax)
+get_mode_bounds (enum machine_mode mode, int sign,
+ enum machine_mode target_mode,
+ rtx *mmin, rtx *mmax)
{
- int size = GET_MODE_BITSIZE (mode);
+ unsigned size = GET_MODE_BITSIZE (mode);
+ unsigned HOST_WIDE_INT min_val, max_val;
- if (size > HOST_BITS_PER_WIDE_INT)
- abort ();
+ gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
if (sign)
{
- *mmin = GEN_INT (-((unsigned HOST_WIDE_INT) 1 << (size - 1)));
- *mmax = GEN_INT (((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1);
+ min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
+ max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
}
else
{
- *mmin = const0_rtx;
- *mmax = GEN_INT (((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1);
+ min_val = 0;
+ max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
}
+
+ *mmin = GEN_INT (trunc_int_for_mode (min_val, target_mode));
+ *mmax = GEN_INT (trunc_int_for_mode (max_val, target_mode));
}
#include "gt-stor-layout.h"