/* C-compiler utilities for types and variables storage layout
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
- 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
This file is part of GCC.
static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
HOST_WIDE_INT, tree);
#endif
+static void force_type_save_exprs_1 (tree);
static unsigned int update_alignment_for_field (record_layout_info, tree,
unsigned int);
extern void debug_rli (record_layout_info);
return chain;
}
-/* Return nonzero if EXPR is present on the pending sizes list. */
-
-int
-is_pending_size (tree expr)
-{
- tree t;
-
- for (t = pending_sizes; t; t = TREE_CHAIN (t))
- if (TREE_VALUE (t) == expr)
- return 1;
- return 0;
-}
-
/* Add EXPR to the pending sizes list. */
void
just return SIZE unchanged. Likewise for self-referential sizes and
constant sizes. */
if (TREE_CONSTANT (size)
- || (*lang_hooks.decls.global_bindings_p) () < 0
+ || lang_hooks.decls.global_bindings_p () < 0
|| CONTAINS_PLACEHOLDER_P (size))
return size;
- if (TREE_CODE (size) == MINUS_EXPR && integer_onep (TREE_OPERAND (size, 1)))
- /* If this is the upper bound of a C array, leave the minus 1 outside
- the SAVE_EXPR so it can be folded away. */
- TREE_OPERAND (size, 0) = save = save_expr (TREE_OPERAND (size, 0));
- else
- size = save = save_expr (size);
+ size = save_expr (size);
/* If an array with a variable number of elements is declared, and
the elements require destruction, we will emit a cleanup for the
`unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do
not wish to do that here; the array-size is the same in both
places. */
+ save = skip_simple_arithmetic (size);
if (TREE_CODE (save) == SAVE_EXPR)
SAVE_EXPR_PERSISTENT_P (save) = 1;
- if ((*lang_hooks.decls.global_bindings_p) ())
+ if (lang_hooks.decls.global_bindings_p ())
{
if (TREE_CONSTANT (size))
error ("type size can't be explicitly evaluated");
return size;
}
+
+/* Given a type T, force elaboration of any SAVE_EXPRs used in the definition
+ of that type. */
+
+void
+force_type_save_exprs (tree t)
+{
+ tree field;
+
+ switch (TREE_CODE (t))
+ {
+ case ERROR_MARK:
+ return;
+
+ case ARRAY_TYPE:
+ case SET_TYPE:
+ case VECTOR_TYPE:
+ /* It's probably overly-conservative to force elaboration of bounds and
+ also the sizes, but it's better to be safe than sorry. */
+ force_type_save_exprs_1 (TYPE_MIN_VALUE (TYPE_DOMAIN (t)));
+ force_type_save_exprs_1 (TYPE_MAX_VALUE (TYPE_DOMAIN (t)));
+ break;
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ for (field = TYPE_FIELDS (t); field; field = TREE_CHAIN (field))
+ if (TREE_CODE (field) == FIELD_DECL)
+ {
+ force_type_save_exprs (TREE_TYPE (field));
+ force_type_save_exprs_1 (DECL_FIELD_OFFSET (field));
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ force_type_save_exprs_1 (TYPE_SIZE (t));
+ force_type_save_exprs_1 (TYPE_SIZE_UNIT (t));
+}
+
+/* Utility routine of above, to verify that SIZE has been elaborated and
+ do so it it is a SAVE_EXPR and has not been. */
+
+static void
+force_type_save_exprs_1 (tree size)
+{
+ if (size
+ && (size = skip_simple_arithmetic (size))
+ && TREE_CODE (size) == SAVE_EXPR
+ && !SAVE_EXPR_RTL (size))
+ expand_expr (size, NULL_RTX, VOIDmode, 0);
+}
\f
#ifndef MAX_FIXED_MODE_SIZE
#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
#endif
-/* Return the machine mode to use for a nonscalar of SIZE bits.
- The mode must be in class CLASS, and have exactly that many bits.
- If LIMIT is nonzero, modes of wider than MAX_FIXED_MODE_SIZE will not
- be used. */
+/* Return the machine mode to use for a nonscalar of SIZE bits. The
+ mode must be in class CLASS, and have exactly that many value bits;
+ it may have padding as well. If LIMIT is nonzero, modes of wider
+ than MAX_FIXED_MODE_SIZE will not be used. */
enum machine_mode
mode_for_size (unsigned int size, enum mode_class class, int limit)
/* Get the first mode which has this size, in the specified class. */
for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
- if (GET_MODE_BITSIZE (mode) == size)
+ if (GET_MODE_PRECISION (mode) == size)
return mode;
return BLKmode;
}
/* Similar, but never return BLKmode; return the narrowest mode that
- contains at least the requested number of bits. */
+ contains at least the requested number of value bits. */
enum machine_mode
smallest_mode_for_size (unsigned int size, enum mode_class class)
specified class. */
for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
- if (GET_MODE_BITSIZE (mode) >= size)
+ if (GET_MODE_PRECISION (mode) >= size)
return mode;
abort ();
unsigned int
get_mode_alignment (enum machine_mode mode)
{
- unsigned int alignment;
-
- if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
- || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
- alignment = GET_MODE_UNIT_SIZE (mode);
- else
- alignment = GET_MODE_SIZE (mode);
-
- /* Extract the LSB of the size. */
- alignment = alignment & -alignment;
- alignment *= BITS_PER_UNIT;
-
- alignment = MIN (BIGGEST_ALIGNMENT, MAX (1, alignment));
- return alignment;
+ return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
}
/* Return the value of VALUE, rounded up to a multiple of DIVISOR.
if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
{
DECL_ALIGN (decl) = TYPE_ALIGN (type);
- DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
+ if (TREE_CODE (decl) == FIELD_DECL)
+ DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
}
}
size in bytes from the size in bits. If we have already set the mode,
don't set it again since we can be called twice for FIELD_DECLs. */
- TREE_UNSIGNED (decl) = TREE_UNSIGNED (type);
+ DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
if (DECL_MODE (decl) == VOIDmode)
DECL_MODE (decl) = TYPE_MODE (type);
else
/* For fields, it's a bit more complicated... */
{
+ bool old_user_align = DECL_USER_ALIGN (decl);
+
if (DECL_BIT_FIELD (decl))
{
DECL_BIT_FIELD_TYPE (decl) = type;
field. */
if (integer_zerop (DECL_SIZE (decl))
&& ! DECL_PACKED (decl)
- && ! (*targetm.ms_bitfield_layout_p) (DECL_FIELD_CONTEXT (decl)))
+ && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
{
#ifdef PCC_BITFIELD_TYPE_MATTERS
if (PCC_BITFIELD_TYPE_MATTERS)
enum machine_mode xmode
= mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
- if (xmode != BLKmode && known_align >= GET_MODE_ALIGNMENT (xmode))
+ if (xmode != BLKmode
+ && (known_align == 0
+ || known_align >= GET_MODE_ALIGNMENT (xmode)))
{
DECL_ALIGN (decl) = MAX (GET_MODE_ALIGNMENT (xmode),
DECL_ALIGN (decl));
}
else if (DECL_PACKED (decl) && DECL_USER_ALIGN (decl))
/* Don't touch DECL_ALIGN. For other packed fields, go ahead and
- round up; we'll reduce it again below. */;
+ round up; we'll reduce it again below. We want packing to
+ supersede USER_ALIGN inherited from the type, but defer to
+ alignment explicitly specified on the field decl. */;
else
do_type_align (type, decl);
/* If the field is of variable size, we can't misalign it since we
have no way to make a temporary to align the result. But this
isn't an issue if the decl is not addressable. Likewise if it
- is of unknown size. */
+ is of unknown size.
+
+ Note that do_type_align may set DECL_USER_ALIGN, so we need to
+ check old_user_align instead. */
if (DECL_PACKED (decl)
- && !DECL_USER_ALIGN (decl)
+ && !old_user_align
&& (DECL_NONADDRESSABLE_P (decl)
|| DECL_SIZE_UNIT (decl) == 0
|| TREE_CODE (DECL_SIZE_UNIT (decl)) == INTEGER_CST))
int size_as_int = TREE_INT_CST_LOW (size);
if (compare_tree_int (size, size_as_int) == 0)
- warning_with_decl (decl, "size of `%s' is %d bytes", size_as_int);
+ warning ("%Jsize of '%D' is %d bytes", decl, decl, size_as_int);
else
- warning_with_decl (decl, "size of `%s' is larger than %d bytes",
- larger_than_size);
+ warning ("%Jsize of '%D' is larger than %d bytes",
+ decl, decl, larger_than_size);
}
}
record_layout_info
start_record_layout (tree t)
{
- record_layout_info rli
- = (record_layout_info) xmalloc (sizeof (struct record_layout_info_s));
+ record_layout_info rli = xmalloc (sizeof (struct record_layout_info_s));
rli->t = t;
/* Record must have at least as much alignment as any field.
Otherwise, the alignment of the field within the record is
meaningless. */
- if (is_bitfield && (* targetm.ms_bitfield_layout_p) (rli->t))
+ if (is_bitfield && targetm.ms_bitfield_layout_p (rli->t))
{
/* Here, the alignment of the underlying type of a bitfield can
affect the alignment of a record; even a zero-sized field
if (TYPE_ALIGN (type) > desired_align)
{
if (STRICT_ALIGNMENT)
- warning_with_decl (field, "packed attribute causes inefficient alignment for `%s'");
+ warning ("%Jpacked attribute causes inefficient alignment "
+ "for '%D'", field, field);
else
- warning_with_decl (field, "packed attribute is unnecessary for `%s'");
+ warning ("%Jpacked attribute is unnecessary for '%D'",
+ field, field);
}
}
else
Bump the cumulative size to multiple of field alignment. */
if (warn_padded)
- warning_with_decl (field, "padding struct to align `%s'");
+ warning ("%Jpadding struct to align '%D'", field, field);
/* If the alignment is still within offset_align, just align
the bit position. */
variable-sized fields, we need not worry about compatibility. */
#ifdef PCC_BITFIELD_TYPE_MATTERS
if (PCC_BITFIELD_TYPE_MATTERS
- && ! (* targetm.ms_bitfield_layout_p) (rli->t)
+ && ! targetm.ms_bitfield_layout_p (rli->t)
&& TREE_CODE (field) == FIELD_DECL
&& type != error_mark_node
&& DECL_BIT_FIELD (field)
#ifdef BITFIELD_NBYTES_LIMITED
if (BITFIELD_NBYTES_LIMITED
- && ! (* targetm.ms_bitfield_layout_p) (rli->t)
+ && ! targetm.ms_bitfield_layout_p (rli->t)
&& TREE_CODE (field) == FIELD_DECL
&& type != error_mark_node
&& DECL_BIT_FIELD_TYPE (field)
Note: for compatibility, we use the type size, not the type alignment
to determine alignment, since that matches the documentation */
- if ((* targetm.ms_bitfield_layout_p) (rli->t)
+ if (targetm.ms_bitfield_layout_p (rli->t)
&& ((DECL_BIT_FIELD_TYPE (field) && ! DECL_PACKED (field))
|| (rli->prev_field && ! DECL_PACKED (rli->prev_field))))
{
rli->prev_field = NULL;
}
+ rli->offset_align = tree_low_cst (TYPE_SIZE (type), 0);
normalize_rli (rli);
}
if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
|| (TYPE_MODE (TREE_TYPE (field)) == BLKmode
- && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field)))
+ && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
+ && !(TYPE_SIZE (TREE_TYPE (field)) != 0
+ && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
|| ! host_integerp (bit_position (field), 1)
|| DECL_SIZE (field) == 0
|| ! host_integerp (DECL_SIZE (field), 1))
case CHAR_TYPE:
if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
&& tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
- TREE_UNSIGNED (type) = 1;
+ TYPE_UNSIGNED (type) = 1;
TYPE_MODE (type) = smallest_mode_for_size (TYPE_PRECISION (type),
MODE_INT);
break;
case COMPLEX_TYPE:
- TREE_UNSIGNED (type) = TREE_UNSIGNED (TREE_TYPE (type));
+ TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
TYPE_MODE (type)
= mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
- (TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE
- ? MODE_COMPLEX_INT : MODE_COMPLEX_FLOAT),
+ (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
+ ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
0);
TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
break;
case VECTOR_TYPE:
- {
- tree subtype;
-
- subtype = TREE_TYPE (type);
- TREE_UNSIGNED (type) = TREE_UNSIGNED (subtype);
- TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
- TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
- }
+ TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
+ TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
+ TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
break;
case VOID_TYPE:
case FUNCTION_TYPE:
case METHOD_TYPE:
- TYPE_MODE (type) = mode_for_size (2 * POINTER_SIZE, MODE_INT, 0);
- TYPE_SIZE (type) = bitsize_int (2 * POINTER_SIZE);
- TYPE_SIZE_UNIT (type) = size_int ((2 * POINTER_SIZE) / BITS_PER_UNIT);
+ /* It's hard to see what the mode and size of a function ought to
+ be, but we do know the alignment is FUNCTION_BOUNDARY, so
+ make it consistent with that. */
+ TYPE_MODE (type) = mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0);
+ TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
+ TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
break;
case POINTER_TYPE:
TYPE_SIZE (type) = bitsize_int (nbits);
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
- TREE_UNSIGNED (type) = 1;
+ TYPE_UNSIGNED (type) = 1;
TYPE_PRECISION (type) = nbits;
}
break;
sure the size is never negative. We should really do this
if *either* bound is non-constant, but this is the best
compromise between C and Ada. */
- if (! TREE_UNSIGNED (sizetype)
+ if (!TYPE_UNSIGNED (sizetype)
&& TREE_CODE (TYPE_MIN_VALUE (index)) != INTEGER_CST
&& TREE_CODE (TYPE_MAX_VALUE (index)) != INTEGER_CST)
length = size_binop (MAX_EXPR, length, size_zero_node);
TYPE_USER_ALIGN (t) = 0;
TYPE_SIZE (t) = build_int_2 (GET_MODE_BITSIZE (SImode), 0);
TYPE_SIZE_UNIT (t) = build_int_2 (GET_MODE_SIZE (SImode), 0);
- TREE_UNSIGNED (t) = 1;
+ TYPE_UNSIGNED (t) = 1;
TYPE_PRECISION (t) = GET_MODE_BITSIZE (SImode);
TYPE_MIN_VALUE (t) = build_int_2 (0, 0);
TYPE_IS_SIZETYPE (t) = 1;
/* Make copies of nodes since we'll be setting TYPE_IS_SIZETYPE. */
sizetype = copy_node (type);
- TYPE_DOMAIN (sizetype) = type;
+ TYPE_ORIG_SIZE_TYPE (sizetype) = type;
TYPE_IS_SIZETYPE (sizetype) = 1;
bitsizetype = make_node (INTEGER_TYPE);
TYPE_NAME (bitsizetype) = TYPE_NAME (type);
TYPE_PRECISION (bitsizetype) = precision;
TYPE_IS_SIZETYPE (bitsizetype) = 1;
- if (TREE_UNSIGNED (type))
+ if (TYPE_UNSIGNED (type))
fixup_unsigned_type (bitsizetype);
else
fixup_signed_type (bitsizetype);
layout_type (bitsizetype);
- if (TREE_UNSIGNED (type))
+ if (TYPE_UNSIGNED (type))
{
usizetype = sizetype;
ubitsizetype = bitsizetype;
for the sizes in them. */
for (t = early_type_list; t != 0; t = TREE_CHAIN (t))
{
- if (TREE_CODE (TREE_VALUE (t)) != INTEGER_TYPE)
+ if (TREE_CODE (TREE_VALUE (t)) != INTEGER_TYPE
+ && TREE_CODE (TREE_VALUE (t)) != BOOLEAN_TYPE)
abort ();
TREE_TYPE (TYPE_SIZE (TREE_VALUE (t))) = bitsizetype;
sizetype_set = 1;
}
\f
+/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE,
+ BOOLEAN_TYPE, or CHAR_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
+ for TYPE, based on the PRECISION and whether or not the TYPE
+ IS_UNSIGNED. PRECISION need not correspond to a width supported
+ natively by the hardware; for example, on a machine with 8-bit,
+ 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
+ 61. */
+
+void
+set_min_and_max_values_for_integral_type (tree type,
+ int precision,
+ bool is_unsigned)
+{
+ tree min_value;
+ tree max_value;
+
+ if (is_unsigned)
+ {
+ min_value = build_int_2 (0, 0);
+ max_value
+ = build_int_2 (precision - HOST_BITS_PER_WIDE_INT >= 0
+ ? -1 : ((HOST_WIDE_INT) 1 << precision) - 1,
+ precision - HOST_BITS_PER_WIDE_INT > 0
+ ? ((unsigned HOST_WIDE_INT) ~0
+ >> (HOST_BITS_PER_WIDE_INT
+ - (precision - HOST_BITS_PER_WIDE_INT)))
+ : 0);
+ }
+ else
+ {
+ min_value
+ = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
+ ? 0 : (HOST_WIDE_INT) (-1) << (precision - 1)),
+ (((HOST_WIDE_INT) (-1)
+ << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? precision - HOST_BITS_PER_WIDE_INT - 1
+ : 0))));
+ max_value
+ = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
+ ? -1 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
+ (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? (((HOST_WIDE_INT) 1
+ << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
+ : 0));
+ }
+
+ TREE_TYPE (min_value) = type;
+ TREE_TYPE (max_value) = type;
+ TYPE_MIN_VALUE (type) = min_value;
+ TYPE_MAX_VALUE (type) = max_value;
+}
+
/* Set the extreme values of TYPE based on its precision in bits,
then lay it out. Used when make_signed_type won't do
because the tree code is not INTEGER_TYPE.
if (precision > HOST_BITS_PER_WIDE_INT * 2)
precision = HOST_BITS_PER_WIDE_INT * 2;
- TYPE_MIN_VALUE (type)
- = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
- ? 0 : (HOST_WIDE_INT) (-1) << (precision - 1)),
- (((HOST_WIDE_INT) (-1)
- << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
- ? precision - HOST_BITS_PER_WIDE_INT - 1
- : 0))));
- TYPE_MAX_VALUE (type)
- = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
- ? -1 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
- (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
- ? (((HOST_WIDE_INT) 1
- << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
- : 0));
-
- TREE_TYPE (TYPE_MIN_VALUE (type)) = type;
- TREE_TYPE (TYPE_MAX_VALUE (type)) = type;
+ set_min_and_max_values_for_integral_type (type, precision,
+ /*is_unsigned=*/false);
/* Lay out the type: set its alignment, size, etc. */
layout_type (type);
if (precision > HOST_BITS_PER_WIDE_INT * 2)
precision = HOST_BITS_PER_WIDE_INT * 2;
- TYPE_MIN_VALUE (type) = build_int_2 (0, 0);
- TYPE_MAX_VALUE (type)
- = build_int_2 (precision - HOST_BITS_PER_WIDE_INT >= 0
- ? -1 : ((HOST_WIDE_INT) 1 << precision) - 1,
- precision - HOST_BITS_PER_WIDE_INT > 0
- ? ((unsigned HOST_WIDE_INT) ~0
- >> (HOST_BITS_PER_WIDE_INT
- - (precision - HOST_BITS_PER_WIDE_INT)))
- : 0);
- TREE_TYPE (TYPE_MIN_VALUE (type)) = type;
- TREE_TYPE (TYPE_MAX_VALUE (type)) = type;
+ set_min_and_max_values_for_integral_type (type, precision,
+ /*is_unsigned=*/true);
/* Lay out the type: set its alignment, size, etc. */
layout_type (type);
return mode;
}
+/* Gets minimal and maximal values for MODE (signed or unsigned depending on
+ SIGN). */
+
+void
+get_mode_bounds (enum machine_mode mode, int sign, rtx *mmin, rtx *mmax)
+{
+ int size = GET_MODE_BITSIZE (mode);
+
+ if (size > HOST_BITS_PER_WIDE_INT)
+ abort ();
+
+ if (sign)
+ {
+ *mmin = GEN_INT (-((unsigned HOST_WIDE_INT) 1 << (size - 1)));
+ *mmax = GEN_INT (((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1);
+ }
+ else
+ {
+ *mmin = const0_rtx;
+ *mmax = GEN_INT (((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1);
+ }
+}
+
#include "gt-stor-layout.h"