X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Fstor-layout.c;h=c58237f66e6345dc52d8290c26fe8653ca294702;hp=156702f652f7567c9b30ed8d8986b6c8b0cd1af8;hb=53c218e267884350b366c89c9a67e510c7694781;hpb=00b76131e87f7da2a6ebf1e13498f5bc9f33f69f diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c index 156702f652f..c58237f66e6 100644 --- a/gcc/stor-layout.c +++ b/gcc/stor-layout.c @@ -1,6 +1,7 @@ /* C-compiler utilities for types and variables storage layout Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998, - 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. + 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 + Free Software Foundation, Inc. This file is part of GCC. @@ -16,8 +17,8 @@ for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 59 Temple Place - Suite 330, Boston, MA -02111-1307, USA. */ +Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA +02110-1301, USA. */ #include "config.h" @@ -30,6 +31,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "flags.h" #include "function.h" #include "expr.h" +#include "output.h" #include "toplev.h" #include "ggc.h" #include "target.h" @@ -37,24 +39,15 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "regs.h" #include "params.h" -/* Set to one when set_sizetype has been called. */ -static int sizetype_set; - -/* List of types created before set_sizetype has been called. We do not - make this a GGC root since we want these nodes to be reclaimed. */ -static tree early_type_list; - /* Data type for the expressions representing sizes of data types. It is the first integer type laid out. */ tree sizetype_tab[(int) TYPE_KIND_LAST]; /* If nonzero, this is an upper limit on alignment of structure fields. The value is measured in bits. */ -unsigned int maximum_field_alignment; - -/* If nonzero, the alignment of a bitstring or (power-)set value, in bits. - May be overridden by front-ends. */ -unsigned int set_alignment = 0; +unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT; +/* ... and its original value in bytes, specified via -fpack-struct=. */ +unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT; /* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated in Pmode, not ptr_mode. Set only by internal_reference_types @@ -113,9 +106,7 @@ put_pending_size (tree expr) void put_pending_sizes (tree chain) { - if (pending_sizes) - abort (); - + gcc_assert (!pending_sizes); pending_sizes = chain; } @@ -156,7 +147,7 @@ variable_size (tree size) if (lang_hooks.decls.global_bindings_p ()) { if (TREE_CONSTANT (size)) - error ("type size can't be explicitly evaluated"); + error ("type size can%'t be explicitly evaluated"); else error ("variable-size type declared outside of any function"); @@ -199,15 +190,16 @@ mode_for_size (unsigned int size, enum mode_class class, int limit) enum machine_mode mode_for_size_tree (tree size, enum mode_class class, int limit) { - if (TREE_CODE (size) != INTEGER_CST - || TREE_OVERFLOW (size) - /* What we really want to say here is that the size can fit in a - host integer, but we know there's no way we'd find a mode for - this many bits, so there's no point in doing the precise test. */ - || compare_tree_int (size, 1000) > 0) + unsigned HOST_WIDE_INT uhwi; + unsigned int ui; + + if (!host_integerp (size, 1)) return BLKmode; - else - return mode_for_size (tree_low_cst (size, 1), class, limit); + uhwi = tree_low_cst (size, 1); + ui = uhwi; + if (uhwi != ui) + return BLKmode; + return mode_for_size (ui, class, limit); } /* Similar, but never return BLKmode; return the narrowest mode that @@ -225,7 +217,7 @@ smallest_mode_for_size (unsigned int size, enum mode_class class) if (GET_MODE_PRECISION (mode) >= size) return mode; - abort (); + gcc_unreachable (); } /* Find an integer mode of the exact same size, or BLKmode on failure. */ @@ -242,6 +234,7 @@ int_mode_for_mode (enum machine_mode mode) case MODE_COMPLEX_INT: case MODE_COMPLEX_FLOAT: case MODE_FLOAT: + case MODE_DECIMAL_FLOAT: case MODE_VECTOR_INT: case MODE_VECTOR_FLOAT: mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0); @@ -255,7 +248,7 @@ int_mode_for_mode (enum machine_mode mode) case MODE_CC: default: - abort (); + gcc_unreachable (); } return mode; @@ -307,9 +300,9 @@ layout_decl (tree decl, unsigned int known_align) if (code == CONST_DECL) return; - else if (code != VAR_DECL && code != PARM_DECL && code != RESULT_DECL - && code != TYPE_DECL && code != FIELD_DECL) - abort (); + + gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL + || code == TYPE_DECL ||code == FIELD_DECL); rtl = DECL_RTL_IF_SET (decl); @@ -337,8 +330,8 @@ layout_decl (tree decl, unsigned int known_align) } else if (DECL_SIZE_UNIT (decl) == 0) DECL_SIZE_UNIT (decl) - = convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl), - bitsize_unit_node)); + = fold_convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl), + bitsize_unit_node)); if (code != FIELD_DECL) /* For non-fields, update the alignment from the type. */ @@ -347,17 +340,22 @@ layout_decl (tree decl, unsigned int known_align) /* For fields, it's a bit more complicated... */ { bool old_user_align = DECL_USER_ALIGN (decl); + bool zero_bitfield = false; + bool packed_p = DECL_PACKED (decl); + unsigned int mfa; if (DECL_BIT_FIELD (decl)) { DECL_BIT_FIELD_TYPE (decl) = type; /* A zero-length bit-field affects the alignment of the next - field. */ + field. In essence such bit-fields are not influenced by + any packing due to #pragma pack or attribute packed. */ if (integer_zerop (DECL_SIZE (decl)) - && ! DECL_PACKED (decl) && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl))) { + zero_bitfield = true; + packed_p = false; #ifdef PCC_BITFIELD_TYPE_MATTERS if (PCC_BITFIELD_TYPE_MATTERS) do_type_align (type, decl); @@ -384,7 +382,7 @@ layout_decl (tree decl, unsigned int known_align) enum machine_mode xmode = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1); - if (xmode != BLKmode + if (xmode != BLKmode && (known_align == 0 || known_align >= GET_MODE_ALIGNMENT (xmode))) { @@ -401,7 +399,7 @@ layout_decl (tree decl, unsigned int known_align) && DECL_ALIGN (decl) >= TYPE_ALIGN (type)) DECL_BIT_FIELD (decl) = 0; } - else if (DECL_PACKED (decl) && DECL_USER_ALIGN (decl)) + else if (packed_p && DECL_USER_ALIGN (decl)) /* Don't touch DECL_ALIGN. For other packed fields, go ahead and round up; we'll reduce it again below. We want packing to supersede USER_ALIGN inherited from the type, but defer to @@ -416,14 +414,14 @@ layout_decl (tree decl, unsigned int known_align) Note that do_type_align may set DECL_USER_ALIGN, so we need to check old_user_align instead. */ - if (DECL_PACKED (decl) + if (packed_p && !old_user_align && (DECL_NONADDRESSABLE_P (decl) || DECL_SIZE_UNIT (decl) == 0 || TREE_CODE (DECL_SIZE_UNIT (decl)) == INTEGER_CST)) DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT); - if (! DECL_USER_ALIGN (decl) && ! DECL_PACKED (decl)) + if (! packed_p && ! DECL_USER_ALIGN (decl)) { /* Some targets (i.e. i386, VMS) limit struct field alignment to a lower boundary than alignment of variables unless @@ -437,9 +435,13 @@ layout_decl (tree decl, unsigned int known_align) #endif } + if (zero_bitfield) + mfa = initial_max_fld_align * BITS_PER_UNIT; + else + mfa = maximum_field_alignment; /* Should this be controlled by DECL_USER_ALIGN, too? */ - if (maximum_field_alignment != 0) - DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), maximum_field_alignment); + if (mfa != 0) + DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa); } /* Evaluate nonconstant size only once, either now or as soon as safe. */ @@ -462,10 +464,10 @@ layout_decl (tree decl, unsigned int known_align) int size_as_int = TREE_INT_CST_LOW (size); if (compare_tree_int (size, size_as_int) == 0) - warning ("%Jsize of '%D' is %d bytes", decl, decl, size_as_int); + warning (0, "size of %q+D is %d bytes", decl, size_as_int); else - warning ("%Jsize of '%D' is larger than %d bytes", - decl, decl, larger_than_size); + warning (0, "size of %q+D is larger than %wd bytes", + decl, larger_than_size); } } @@ -496,7 +498,7 @@ relayout_decl (tree decl) /* Hook for a front-end function that can modify the record layout as needed immediately before it is finalized. */ -void (*lang_adjust_rli) (record_layout_info) = 0; +static void (*lang_adjust_rli) (record_layout_info) = 0; void set_lang_adjust_rli (void (*f) (record_layout_info)) @@ -536,6 +538,7 @@ start_record_layout (tree t) rli->prev_field = 0; rli->pending_statics = 0; rli->packed_maybe_necessary = 0; + rli->remaining_in_alignment = 0; return rli; } @@ -547,7 +550,8 @@ tree bit_from_pos (tree offset, tree bitpos) { return size_binop (PLUS_EXPR, bitpos, - size_binop (MULT_EXPR, convert (bitsizetype, offset), + size_binop (MULT_EXPR, + fold_convert (bitsizetype, offset), bitsize_unit_node)); } @@ -555,9 +559,9 @@ tree byte_from_pos (tree offset, tree bitpos) { return size_binop (PLUS_EXPR, offset, - convert (sizetype, - size_binop (TRUNC_DIV_EXPR, bitpos, - bitsize_unit_node))); + fold_convert (sizetype, + size_binop (TRUNC_DIV_EXPR, bitpos, + bitsize_unit_node))); } void @@ -565,9 +569,9 @@ pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align, tree pos) { *poffset = size_binop (MULT_EXPR, - convert (sizetype, - size_binop (FLOOR_DIV_EXPR, pos, - bitsize_int (off_align))), + fold_convert (sizetype, + size_binop (FLOOR_DIV_EXPR, pos, + bitsize_int (off_align))), size_int (off_align / BITS_PER_UNIT)); *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align)); } @@ -587,7 +591,8 @@ normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align) *poffset = size_binop (PLUS_EXPR, *poffset, - size_binop (MULT_EXPR, convert (sizetype, extra_aligns), + size_binop (MULT_EXPR, + fold_convert (sizetype, extra_aligns), size_int (off_align / BITS_PER_UNIT))); *pbitpos @@ -607,6 +612,11 @@ debug_rli (record_layout_info rli) fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n", rli->record_align, rli->unpacked_align, rli->offset_align); + + /* The ms_struct code is the only that uses this. */ + if (targetm.ms_bitfield_layout_p (rli->t)) + fprintf (stderr, "remaning in alignment = %u\n", rli->remaining_in_alignment); + if (rli->packed_maybe_necessary) fprintf (stderr, "packed may be necessary\n"); @@ -643,9 +653,9 @@ rli_size_so_far (record_layout_info rli) } /* FIELD is about to be added to RLI->T. The alignment (in bits) of - the next available location is given by KNOWN_ALIGN. Update the - variable alignment fields in RLI, and return the alignment to give - the FIELD. */ + the next available location within the record is given by KNOWN_ALIGN. + Update the variable alignment fields in RLI, and return the alignment + to give the FIELD. */ unsigned int update_alignment_for_field (record_layout_info rli, tree field, @@ -659,6 +669,10 @@ update_alignment_for_field (record_layout_info rli, tree field, bool user_align; bool is_bitfield; + /* Do not attempt to align an ERROR_MARK node */ + if (TREE_CODE (type) == ERROR_MARK) + return 0; + /* Lay out the field so we know what alignment it needs. */ layout_decl (field, known_align); desired_align = DECL_ALIGN (field); @@ -671,7 +685,7 @@ update_alignment_for_field (record_layout_info rli, tree field, /* Record must have at least as much alignment as any field. Otherwise, the alignment of the field within the record is meaningless. */ - if (is_bitfield && targetm.ms_bitfield_layout_p (rli->t)) + if (targetm.ms_bitfield_layout_p (rli->t)) { /* Here, the alignment of the underlying type of a bitfield can affect the alignment of a record; even a zero-sized field @@ -679,11 +693,12 @@ update_alignment_for_field (record_layout_info rli, tree field, the type, except that for zero-size bitfields this only applies if there was an immediately prior, nonzero-size bitfield. (That's the way it is, experimentally.) */ - if (! integer_zerop (DECL_SIZE (field)) - ? ! DECL_PACKED (field) - : (rli->prev_field - && DECL_BIT_FIELD_TYPE (rli->prev_field) - && ! integer_zerop (DECL_SIZE (rli->prev_field)))) + if (!is_bitfield + || (!integer_zerop (DECL_SIZE (field)) + ? !DECL_PACKED (field) + : (rli->prev_field + && DECL_BIT_FIELD_TYPE (rli->prev_field) + && ! integer_zerop (DECL_SIZE (rli->prev_field))))) { unsigned int type_align = TYPE_ALIGN (type); type_align = MAX (type_align, desired_align); @@ -709,7 +724,16 @@ update_alignment_for_field (record_layout_info rli, tree field, type_align = ADJUST_FIELD_ALIGN (field, type_align); #endif - if (maximum_field_alignment != 0) + /* Targets might chose to handle unnamed and hence possibly + zero-width bitfield. Those are not influenced by #pragmas + or packed attributes. */ + if (integer_zerop (DECL_SIZE (field))) + { + if (initial_max_fld_align) + type_align = MIN (type_align, + initial_max_fld_align * BITS_PER_UNIT); + } + else if (maximum_field_alignment != 0) type_align = MIN (type_align, maximum_field_alignment); else if (DECL_PACKED (field)) type_align = MIN (type_align, BITS_PER_UNIT); @@ -750,14 +774,20 @@ place_union_field (record_layout_info rli, tree field) DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node; SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT); + /* If this is an ERROR_MARK return *after* having set the + field at the start of the union. This helps when parsing + invalid fields. */ + if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK) + return; + /* We assume the union's size will be a multiple of a byte so we don't bother with BITPOS. */ if (TREE_CODE (rli->t) == UNION_TYPE) rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field)); else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE) - rli->offset = fold (build3 (COND_EXPR, sizetype, - DECL_QUALIFIER (field), - DECL_SIZE_UNIT (field), rli->offset)); + rli->offset = fold_build3 (COND_EXPR, sizetype, + DECL_QUALIFIER (field), + DECL_SIZE_UNIT (field), rli->offset); } #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED) @@ -796,8 +826,7 @@ place_field (record_layout_info rli, tree field) /* The type of this field. */ tree type = TREE_TYPE (field); - if (TREE_CODE (field) == ERROR_MARK || TREE_CODE (type) == ERROR_MARK) - return; + gcc_assert (TREE_CODE (field) != ERROR_MARK); /* If FIELD is static, then treat it like a separate variable, not really like a structure field. If it is a FUNCTION_DECL, it's a @@ -823,13 +852,23 @@ place_field (record_layout_info rli, tree field) return; } + else if (TREE_CODE (type) == ERROR_MARK) + { + /* Place this field at the current allocation position, so we + maintain monotonicity. */ + DECL_FIELD_OFFSET (field) = rli->offset; + DECL_FIELD_BIT_OFFSET (field) = rli->bitpos; + SET_DECL_OFFSET_ALIGN (field, rli->offset_align); + return; + } + /* Work out the known alignment so far. Note that A & (-A) is the value of the least-significant bit in A that is one. */ if (! integer_zerop (rli->bitpos)) known_align = (tree_low_cst (rli->bitpos, 1) & - tree_low_cst (rli->bitpos, 1)); else if (integer_zerop (rli->offset)) - known_align = BIGGEST_ALIGNMENT; + known_align = 0; else if (host_integerp (rli->offset, 1)) known_align = (BITS_PER_UNIT * (tree_low_cst (rli->offset, 1) @@ -838,6 +877,8 @@ place_field (record_layout_info rli, tree field) known_align = rli->offset_align; desired_align = update_alignment_for_field (rli, field, known_align); + if (known_align == 0) + known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); if (warn_packed && DECL_PACKED (field)) { @@ -846,11 +887,11 @@ place_field (record_layout_info rli, tree field) if (TYPE_ALIGN (type) > desired_align) { if (STRICT_ALIGNMENT) - warning ("%Jpacked attribute causes inefficient alignment " - "for '%D'", field, field); + warning (OPT_Wattributes, "packed attribute causes " + "inefficient alignment for %q+D", field); else - warning ("%Jpacked attribute is unnecessary for '%D'", - field, field); + warning (OPT_Wattributes, "packed attribute is " + "unnecessary for %q+D", field); } } else @@ -858,14 +899,15 @@ place_field (record_layout_info rli, tree field) } /* Does this field automatically have alignment it needs by virtue - of the fields that precede it and the record's own alignment? */ - if (known_align < desired_align) + of the fields that precede it and the record's own alignment? + We already align ms_struct fields, so don't re-align them. */ + if (known_align < desired_align + && !targetm.ms_bitfield_layout_p (rli->t)) { /* No, we need to skip space before this field. Bump the cumulative size to multiple of field alignment. */ - if (warn_padded) - warning ("%Jpadding struct to align '%D'", field, field); + warning (OPT_Wpadded, "padding struct to align %q+D", field); /* If the alignment is still within offset_align, just align the bit position. */ @@ -876,9 +918,9 @@ place_field (record_layout_info rli, tree field) /* First adjust OFFSET by the partial bits, then align. */ rli->offset = size_binop (PLUS_EXPR, rli->offset, - convert (sizetype, - size_binop (CEIL_DIV_EXPR, rli->bitpos, - bitsize_unit_node))); + fold_convert (sizetype, + size_binop (CEIL_DIV_EXPR, rli->bitpos, + bitsize_unit_node))); rli->bitpos = bitsize_zero_node; rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT); @@ -977,17 +1019,12 @@ place_field (record_layout_info rli, tree field) Note: for compatibility, we use the type size, not the type alignment to determine alignment, since that matches the documentation */ - if (targetm.ms_bitfield_layout_p (rli->t) - && ((DECL_BIT_FIELD_TYPE (field) && ! DECL_PACKED (field)) - || (rli->prev_field && ! DECL_PACKED (rli->prev_field)))) + if (targetm.ms_bitfield_layout_p (rli->t)) { - /* At this point, either the prior or current are bitfields, - (possibly both), and we're dealing with MS packing. */ tree prev_saved = rli->prev_field; - /* Is the prior field a bitfield? If so, handle "runs" of same - type size fields. */ - if (rli->prev_field /* necessarily a bitfield if it exists. */) + /* This is a bitfield if it exists. */ + if (rli->prev_field) { /* If both are bitfields, nonzero, and the same size, this is the middle of a run. Zero declared size fields are special @@ -1006,7 +1043,7 @@ place_field (record_layout_info rli, tree field) /* We're in the middle of a run of equal type size fields; make sure we realign if we run out of bits. (Not decl size, type size!) */ - HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 0); + HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1); if (rli->remaining_in_alignment < bitsize) { @@ -1017,7 +1054,7 @@ place_field (record_layout_info rli, tree field) DECL_FIELD_BIT_OFFSET (rli->prev_field)); rli->prev_field = field; rli->remaining_in_alignment - = tree_low_cst (TYPE_SIZE (type), 0); + = tree_low_cst (TYPE_SIZE (type), 1); } rli->remaining_in_alignment -= bitsize; @@ -1033,13 +1070,12 @@ place_field (record_layout_info rli, tree field) Note: since the beginning of the field was aligned then of course the end will be too. No round needed. */ - if (!integer_zerop (DECL_SIZE (rli->prev_field))) + if (!integer_zerop (DECL_SIZE (rli->prev_field)) + && rli->remaining_in_alignment) { - tree type_size = TYPE_SIZE (TREE_TYPE (rli->prev_field)); - rli->bitpos - = size_binop (PLUS_EXPR, type_size, - DECL_FIELD_BIT_OFFSET (rli->prev_field)); + = size_binop (PLUS_EXPR, rli->bitpos, + bitsize_int (rli->remaining_in_alignment)); } else /* We "use up" size zero fields; the code below should behave @@ -1085,21 +1121,11 @@ place_field (record_layout_info rli, tree field) && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 0) && host_integerp (DECL_SIZE (field), 0)) rli->remaining_in_alignment - = tree_low_cst (TYPE_SIZE (TREE_TYPE(field)), 0) - - tree_low_cst (DECL_SIZE (field), 0); + = tree_low_cst (TYPE_SIZE (TREE_TYPE(field)), 1) + - tree_low_cst (DECL_SIZE (field), 1); /* Now align (conventionally) for the new type. */ - if (!DECL_PACKED(field)) - type_align = MAX(TYPE_ALIGN (type), type_align); - - if (prev_saved - && DECL_BIT_FIELD_TYPE (prev_saved) - /* If the previous bit-field is zero-sized, we've already - accounted for its alignment needs (or ignored it, if - appropriate) while placing it. */ - && ! integer_zerop (DECL_SIZE (prev_saved))) - type_align = MAX (type_align, - TYPE_ALIGN (TREE_TYPE (prev_saved))); + type_align = TYPE_ALIGN (TREE_TYPE (field)); if (maximum_field_alignment != 0) type_align = MIN (type_align, maximum_field_alignment); @@ -1125,20 +1151,22 @@ place_field (record_layout_info rli, tree field) actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)); else if (integer_zerop (DECL_FIELD_OFFSET (field))) - actual_align = BIGGEST_ALIGNMENT; + actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); else if (host_integerp (DECL_FIELD_OFFSET (field), 1)) actual_align = (BITS_PER_UNIT * (tree_low_cst (DECL_FIELD_OFFSET (field), 1) & - tree_low_cst (DECL_FIELD_OFFSET (field), 1))); else actual_align = DECL_OFFSET_ALIGN (field); + /* ACTUAL_ALIGN is still the actual alignment *within the record* . + store / extract bit field operations will check the alignment of the + record against the mode of bit fields. */ if (known_align != actual_align) layout_decl (field, actual_align); - /* Only the MS bitfields use this. */ - if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE(field)) - rli->prev_field = field; + if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field)) + rli->prev_field = field; /* Now add size of this field to the size of the record. If the size is not constant, treat the field as being a multiple of bytes and just @@ -1149,19 +1177,34 @@ place_field (record_layout_info rli, tree field) is printed in finish_struct. */ if (DECL_SIZE (field) == 0) /* Do nothing. */; - else if (TREE_CODE (DECL_SIZE_UNIT (field)) != INTEGER_CST - || TREE_CONSTANT_OVERFLOW (DECL_SIZE_UNIT (field))) + else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST + || TREE_CONSTANT_OVERFLOW (DECL_SIZE (field))) { rli->offset = size_binop (PLUS_EXPR, rli->offset, - convert (sizetype, - size_binop (CEIL_DIV_EXPR, rli->bitpos, - bitsize_unit_node))); + fold_convert (sizetype, + size_binop (CEIL_DIV_EXPR, rli->bitpos, + bitsize_unit_node))); rli->offset = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field)); rli->bitpos = bitsize_zero_node; rli->offset_align = MIN (rli->offset_align, desired_align); } + else if (targetm.ms_bitfield_layout_p (rli->t)) + { + rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field)); + + /* If we ended a bitfield before the full length of the type then + pad the struct out to the full length of the last type. */ + if ((TREE_CHAIN (field) == NULL + || TREE_CODE (TREE_CHAIN (field)) != FIELD_DECL) + && DECL_BIT_FIELD_TYPE (field) + && !integer_zerop (DECL_SIZE (field))) + rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, + bitsize_int (rli->remaining_in_alignment)); + + normalize_rli (rli); + } else { rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field)); @@ -1202,12 +1245,12 @@ finalize_record_size (record_layout_info rli) /* Round the size up to be a multiple of the required alignment. */ TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t)); - TYPE_SIZE_UNIT (rli->t) = round_up (unpadded_size_unit, - TYPE_ALIGN (rli->t) / BITS_PER_UNIT); + TYPE_SIZE_UNIT (rli->t) + = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t)); - if (warn_padded && TREE_CONSTANT (unpadded_size) + if (TREE_CONSTANT (unpadded_size) && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0) - warning ("padding struct size to alignment boundary"); + warning (OPT_Wpadded, "padding struct size to alignment boundary"); if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary @@ -1237,16 +1280,19 @@ finalize_record_size (record_layout_info rli) name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (rli->t))); if (STRICT_ALIGNMENT) - warning ("packed attribute causes inefficient alignment for `%s'", name); + warning (OPT_Wpacked, "packed attribute causes inefficient " + "alignment for %qs", name); else - warning ("packed attribute is unnecessary for `%s'", name); + warning (OPT_Wpacked, + "packed attribute is unnecessary for %qs", name); } else { if (STRICT_ALIGNMENT) - warning ("packed attribute causes inefficient alignment"); + warning (OPT_Wpacked, + "packed attribute causes inefficient alignment"); else - warning ("packed attribute is unnecessary"); + warning (OPT_Wpacked, "packed attribute is unnecessary"); } } } @@ -1302,9 +1348,12 @@ compute_record_mode (tree type) #endif /* MEMBER_TYPE_FORCES_BLK */ } - /* If we only have one real field; use its mode. This only applies to - RECORD_TYPE. This does not apply to unions. */ - if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode) + /* If we only have one real field; use its mode if that mode's size + matches the type's size. This only applies to RECORD_TYPE. This + does not apply to unions. */ + if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode + && host_integerp (TYPE_SIZE (type), 1) + && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type))) TYPE_MODE (type) = mode; else TYPE_MODE (type) = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1); @@ -1340,8 +1389,15 @@ finalize_type_size (tree type) && TREE_CODE (type) != QUAL_UNION_TYPE && TREE_CODE (type) != ARRAY_TYPE))) { - TYPE_ALIGN (type) = GET_MODE_ALIGNMENT (TYPE_MODE (type)); - TYPE_USER_ALIGN (type) = 0; + unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type)); + + /* Don't override a larger alignment requirement coming from a user + alignment of one of the fields. */ + if (mode_align >= TYPE_ALIGN (type)) + { + TYPE_ALIGN (type) = mode_align; + TYPE_USER_ALIGN (type) = 0; + } } /* Do machine-dependent extra alignment. */ @@ -1357,15 +1413,15 @@ finalize_type_size (tree type) result will fit in sizetype. We will get more efficient code using sizetype, so we force a conversion. */ TYPE_SIZE_UNIT (type) - = convert (sizetype, - size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type), - bitsize_unit_node)); + = fold_convert (sizetype, + size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type), + bitsize_unit_node)); if (TYPE_SIZE (type) != 0) { TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type)); - TYPE_SIZE_UNIT (type) - = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN (type) / BITS_PER_UNIT); + TYPE_SIZE_UNIT (type) = round_up (TYPE_SIZE_UNIT (type), + TYPE_ALIGN_UNIT (type)); } /* Evaluate nonconstant sizes only once, either now or as soon as safe. */ @@ -1482,8 +1538,7 @@ finish_builtin_struct (tree type, const char *name, tree fields, void layout_type (tree type) { - if (type == 0) - abort (); + gcc_assert (type); if (type == error_mark_node) return; @@ -1497,7 +1552,7 @@ layout_type (tree type) case LANG_TYPE: /* This kind of type is the responsibility of the language-specific code. */ - abort (); + gcc_unreachable (); case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */ if (TYPE_PRECISION (type) == 0) @@ -1507,7 +1562,6 @@ layout_type (tree type) case INTEGER_TYPE: case ENUMERAL_TYPE: - case CHAR_TYPE: if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0) TYPE_UNSIGNED (type) = 1; @@ -1538,11 +1592,10 @@ layout_type (tree type) case VECTOR_TYPE: { int nunits = TYPE_VECTOR_SUBPARTS (type); - tree nunits_tree = build_int_cst (NULL_TREE, nunits, 0); + tree nunits_tree = build_int_cst (NULL_TREE, nunits); tree innertype = TREE_TYPE (type); - if (nunits & (nunits - 1)) - abort (); + gcc_assert (!(nunits & (nunits - 1))); /* Find an appropriate mode for the vector type. */ if (TYPE_MODE (type) == VOIDmode) @@ -1551,7 +1604,7 @@ layout_type (tree type) enum machine_mode mode; /* First, look for a supported vector type. */ - if (GET_MODE_CLASS (innermode) == MODE_FLOAT) + if (SCALAR_FLOAT_MODE_P (innermode)) mode = MIN_MODE_VECTOR_FLOAT; else mode = MIN_MODE_VECTOR_INT; @@ -1559,7 +1612,7 @@ layout_type (tree type) for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode)) if (GET_MODE_NUNITS (mode) == nunits && GET_MODE_INNER (mode) == innermode - && VECTOR_MODE_SUPPORTED_P (mode)) + && targetm.vector_mode_supported_p (mode)) break; /* For integers, try mapping it to a same-sized scalar mode. */ @@ -1580,6 +1633,10 @@ layout_type (tree type) nunits_tree, 0); TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype), nunits_tree, 0); + + /* Always naturally align vectors. This prevents ABI changes + depending on whether or not native vector modes are supported. */ + TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0); break; } @@ -1644,10 +1701,10 @@ layout_type (tree type) /* The initial subtraction should happen in the original type so that (possible) negative values are handled appropriately. */ length = size_binop (PLUS_EXPR, size_one_node, - convert (sizetype, - fold (build2 (MINUS_EXPR, - TREE_TYPE (lb), - ub, lb)))); + fold_convert (sizetype, + fold_build2 (MINUS_EXPR, + TREE_TYPE (lb), + ub, lb))); /* Special handling for arrays of bits (for Chill). */ element_size = TYPE_SIZE (element); @@ -1676,7 +1733,8 @@ layout_type (tree type) length = size_binop (MAX_EXPR, length, size_zero_node); TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size, - convert (bitsizetype, length)); + fold_convert (bitsizetype, + length)); /* If we know the size of the element, calculate the total size directly, rather than do some division thing below. @@ -1728,6 +1786,17 @@ layout_type (tree type) TYPE_MODE (type) = BLKmode; } } + /* When the element size is constant, check that it is at least as + large as the element alignment. */ + if (TYPE_SIZE_UNIT (element) + && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST + /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than + TYPE_ALIGN_UNIT. */ + && !TREE_CONSTANT_OVERFLOW (TYPE_SIZE_UNIT (element)) + && !integer_zerop (TYPE_SIZE_UNIT (element)) + && compare_tree_int (TYPE_SIZE_UNIT (element), + TYPE_ALIGN_UNIT (element)) < 0) + error ("alignment of array elements is greater than element size"); break; } @@ -1762,46 +1831,8 @@ layout_type (tree type) } break; - case SET_TYPE: /* Used by Chill and Pascal. */ - if (TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST - || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST) - abort (); - else - { -#ifndef SET_WORD_SIZE -#define SET_WORD_SIZE BITS_PER_WORD -#endif - unsigned int alignment - = set_alignment ? set_alignment : SET_WORD_SIZE; - HOST_WIDE_INT size_in_bits - = (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0) - - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1); - HOST_WIDE_INT rounded_size - = ((size_in_bits + alignment - 1) / alignment) * alignment; - - if (rounded_size > (int) alignment) - TYPE_MODE (type) = BLKmode; - else - TYPE_MODE (type) = mode_for_size (alignment, MODE_INT, 1); - - TYPE_SIZE (type) = bitsize_int (rounded_size); - TYPE_SIZE_UNIT (type) = size_int (rounded_size / BITS_PER_UNIT); - TYPE_ALIGN (type) = alignment; - TYPE_USER_ALIGN (type) = 0; - TYPE_PRECISION (type) = size_in_bits; - } - break; - - case FILE_TYPE: - /* The size may vary in different languages, so the language front end - should fill in the size. */ - TYPE_ALIGN (type) = BIGGEST_ALIGNMENT; - TYPE_USER_ALIGN (type) = 0; - TYPE_MODE (type) = BLKmode; - break; - default: - abort (); + gcc_unreachable (); } /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For @@ -1812,11 +1843,6 @@ layout_type (tree type) && TREE_CODE (type) != QUAL_UNION_TYPE) finalize_type_size (type); - /* If this type is created before sizetype has been permanently set, - record it so set_sizetype can fix it up. */ - if (! sizetype_set) - early_type_list = tree_cons (NULL_TREE, type, early_type_list); - /* If an alias set has been set for this aggregate when it was incomplete, force it into alias set 0. This is too conservative, but we cannot call record_component_aliases @@ -1856,32 +1882,32 @@ make_unsigned_type (int precision) value to enable integer types to be created. */ void -initialize_sizetypes (void) +initialize_sizetypes (bool signed_p) { tree t = make_node (INTEGER_TYPE); + int precision = GET_MODE_BITSIZE (SImode); TYPE_MODE (t) = SImode; TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode); TYPE_USER_ALIGN (t) = 0; - TYPE_SIZE (t) = build_int_cst (t, GET_MODE_BITSIZE (SImode), 0); - TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode), 0); - TYPE_UNSIGNED (t) = 1; - TYPE_PRECISION (t) = GET_MODE_BITSIZE (SImode); - TYPE_MIN_VALUE (t) = build_int_cst (t, 0, 0); TYPE_IS_SIZETYPE (t) = 1; + TYPE_UNSIGNED (t) = !signed_p; + TYPE_SIZE (t) = build_int_cst (t, precision); + TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode)); + TYPE_PRECISION (t) = precision; - /* 1000 avoids problems with possible overflow and is certainly - larger than any size value we'd want to be storing. */ - TYPE_MAX_VALUE (t) = build_int_cst (t, 1000, 0); + /* Set TYPE_MIN_VALUE and TYPE_MAX_VALUE. */ + set_min_and_max_values_for_integral_type (t, precision, !signed_p); - /* These two must be different nodes because of the caching done in - size_int_wide. */ sizetype = t; - bitsizetype = copy_node (t); + bitsizetype = build_distinct_type_copy (t); } -/* Set sizetype to TYPE, and initialize *sizetype accordingly. - Also update the type of any standard type's sizes made so far. */ +/* Make sizetype a version of TYPE, and initialize *sizetype + accordingly. We do this by overwriting the stub sizetype and + bitsizetype nodes created by initialize_sizetypes. This makes sure + that (a) anything stubby about them no longer exists, (b) any + INTEGER_CSTs created with such a type, remain valid. */ void set_sizetype (tree type) @@ -1893,75 +1919,74 @@ set_sizetype (tree type) precision. */ int precision = MIN (oprecision + BITS_PER_UNIT_LOG + 1, 2 * HOST_BITS_PER_WIDE_INT); - unsigned int i; tree t; - if (sizetype_set) - abort (); + gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype)); - /* Make copies of nodes since we'll be setting TYPE_IS_SIZETYPE. */ - sizetype = copy_node (type); - TYPE_CACHED_VALUES (sizetype) = make_tree_vec (INTEGER_SHARE_LIMIT); - TYPE_CACHED_VALUES_P (sizetype) = 1; - TREE_TYPE (TYPE_CACHED_VALUES (sizetype)) = type; - TYPE_IS_SIZETYPE (sizetype) = 1; - bitsizetype = make_node (INTEGER_TYPE); - TYPE_NAME (bitsizetype) = TYPE_NAME (type); - TYPE_PRECISION (bitsizetype) = precision; - TYPE_IS_SIZETYPE (bitsizetype) = 1; + t = build_distinct_type_copy (type); + /* We do want to use sizetype's cache, as we will be replacing that + type. */ + TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype); + TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype); + TREE_TYPE (TYPE_CACHED_VALUES (t)) = type; + TYPE_UID (t) = TYPE_UID (sizetype); + TYPE_IS_SIZETYPE (t) = 1; - if (TYPE_UNSIGNED (type)) - fixup_unsigned_type (bitsizetype); - else - fixup_signed_type (bitsizetype); + /* Replace our original stub sizetype. */ + memcpy (sizetype, t, tree_size (sizetype)); + TYPE_MAIN_VARIANT (sizetype) = sizetype; + + t = make_node (INTEGER_TYPE); + TYPE_NAME (t) = get_identifier ("bit_size_type"); + /* We do want to use bitsizetype's cache, as we will be replacing that + type. */ + TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype); + TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype); + TYPE_PRECISION (t) = precision; + TYPE_UID (t) = TYPE_UID (bitsizetype); + TYPE_IS_SIZETYPE (t) = 1; - layout_type (bitsizetype); + /* Replace our original stub bitsizetype. */ + memcpy (bitsizetype, t, tree_size (bitsizetype)); + TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype; if (TYPE_UNSIGNED (type)) { - usizetype = sizetype; - ubitsizetype = bitsizetype; - ssizetype = copy_node (make_signed_type (oprecision)); - sbitsizetype = copy_node (make_signed_type (precision)); + fixup_unsigned_type (bitsizetype); + ssizetype = build_distinct_type_copy (make_signed_type (oprecision)); + TYPE_IS_SIZETYPE (ssizetype) = 1; + sbitsizetype = build_distinct_type_copy (make_signed_type (precision)); + TYPE_IS_SIZETYPE (sbitsizetype) = 1; } else { + fixup_signed_type (bitsizetype); ssizetype = sizetype; sbitsizetype = bitsizetype; - usizetype = copy_node (make_unsigned_type (oprecision)); - ubitsizetype = copy_node (make_unsigned_type (precision)); } - TYPE_NAME (bitsizetype) = get_identifier ("bit_size_type"); - - /* Show is a sizetype, is a main type, and has no pointers to it. */ - for (i = 0; i < ARRAY_SIZE (sizetype_tab); i++) + /* If SIZETYPE is unsigned, we need to fix TYPE_MAX_VALUE so that + it is sign extended in a way consistent with force_fit_type. */ + if (TYPE_UNSIGNED (type)) { - TYPE_IS_SIZETYPE (sizetype_tab[i]) = 1; - TYPE_MAIN_VARIANT (sizetype_tab[i]) = sizetype_tab[i]; - TYPE_NEXT_VARIANT (sizetype_tab[i]) = 0; - TYPE_POINTER_TO (sizetype_tab[i]) = 0; - TYPE_REFERENCE_TO (sizetype_tab[i]) = 0; - } + tree orig_max, new_max; - /* Go down each of the types we already made and set the proper type - for the sizes in them. */ - for (t = early_type_list; t != 0; t = TREE_CHAIN (t)) - { - if (TREE_CODE (TREE_VALUE (t)) != INTEGER_TYPE - && TREE_CODE (TREE_VALUE (t)) != BOOLEAN_TYPE) - abort (); + orig_max = TYPE_MAX_VALUE (sizetype); - TREE_TYPE (TYPE_SIZE (TREE_VALUE (t))) = bitsizetype; - TREE_TYPE (TYPE_SIZE_UNIT (TREE_VALUE (t))) = sizetype; - } + /* Build a new node with the same values, but a different type. */ + new_max = build_int_cst_wide (sizetype, + TREE_INT_CST_LOW (orig_max), + TREE_INT_CST_HIGH (orig_max)); - early_type_list = 0; - sizetype_set = 1; + /* Now sign extend it using force_fit_type to ensure + consistency. */ + new_max = force_fit_type (new_max, 0, 0, 0); + TYPE_MAX_VALUE (sizetype) = new_max; + } } -/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE, - BOOLEAN_TYPE, or CHAR_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE +/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE + or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE for TYPE, based on the PRECISION and whether or not the TYPE IS_UNSIGNED. PRECISION need not correspond to a width supported natively by the hardware; for example, on a machine with 8-bit, @@ -1978,34 +2003,37 @@ set_min_and_max_values_for_integral_type (tree type, if (is_unsigned) { - min_value = build_int_cst (type, 0, 0); - max_value - = build_int_cst (type, precision - HOST_BITS_PER_WIDE_INT >= 0 - ? -1 : ((HOST_WIDE_INT) 1 << precision) - 1, - precision - HOST_BITS_PER_WIDE_INT > 0 - ? ((unsigned HOST_WIDE_INT) ~0 - >> (HOST_BITS_PER_WIDE_INT - - (precision - HOST_BITS_PER_WIDE_INT))) - : 0); + min_value = build_int_cst (type, 0); + max_value + = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0 + ? -1 + : ((HOST_WIDE_INT) 1 << precision) - 1, + precision - HOST_BITS_PER_WIDE_INT > 0 + ? ((unsigned HOST_WIDE_INT) ~0 + >> (HOST_BITS_PER_WIDE_INT + - (precision - HOST_BITS_PER_WIDE_INT))) + : 0); } else { - min_value - = build_int_cst (type, - (precision - HOST_BITS_PER_WIDE_INT > 0 - ? 0 : (HOST_WIDE_INT) (-1) << (precision - 1)), - (((HOST_WIDE_INT) (-1) - << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0 - ? precision - HOST_BITS_PER_WIDE_INT - 1 - : 0)))); + min_value + = build_int_cst_wide (type, + (precision - HOST_BITS_PER_WIDE_INT > 0 + ? 0 + : (HOST_WIDE_INT) (-1) << (precision - 1)), + (((HOST_WIDE_INT) (-1) + << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0 + ? precision - HOST_BITS_PER_WIDE_INT - 1 + : 0)))); max_value - = build_int_cst (type, - (precision - HOST_BITS_PER_WIDE_INT > 0 - ? -1 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1), - (precision - HOST_BITS_PER_WIDE_INT - 1 > 0 - ? (((HOST_WIDE_INT) 1 - << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1 - : 0)); + = build_int_cst_wide (type, + (precision - HOST_BITS_PER_WIDE_INT > 0 + ? -1 + : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1), + (precision - HOST_BITS_PER_WIDE_INT - 1 > 0 + ? (((HOST_WIDE_INT) 1 + << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1 + : 0)); } TYPE_MIN_VALUE (type) = min_value; @@ -2028,7 +2056,7 @@ fixup_signed_type (tree type) if (precision > HOST_BITS_PER_WIDE_INT * 2) precision = HOST_BITS_PER_WIDE_INT * 2; - set_min_and_max_values_for_integral_type (type, precision, + set_min_and_max_values_for_integral_type (type, precision, /*is_unsigned=*/false); /* Lay out the type: set its alignment, size, etc. */ @@ -2051,8 +2079,8 @@ fixup_unsigned_type (tree type) precision = HOST_BITS_PER_WIDE_INT * 2; TYPE_UNSIGNED (type) = 1; - - set_min_and_max_values_for_integral_type (type, precision, + + set_min_and_max_values_for_integral_type (type, precision, /*is_unsigned=*/true); /* Lay out the type: set its alignment, size, etc. */ @@ -2066,13 +2094,17 @@ fixup_unsigned_type (tree type) If LARGEST_MODE is not VOIDmode, it means that we should not use a mode larger than LARGEST_MODE (usually SImode). - If no mode meets all these conditions, we return VOIDmode. Otherwise, if - VOLATILEP is true or SLOW_BYTE_ACCESS is false, we return the smallest - mode meeting these conditions. + If no mode meets all these conditions, we return VOIDmode. + + If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the + smallest mode meeting these conditions. + + If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the + largest mode (but a mode no wider than UNITS_PER_WORD) that meets + all the conditions. - Otherwise (VOLATILEP is false and SLOW_BYTE_ACCESS is true), we return - the largest mode (but a mode no wider than UNITS_PER_WORD) that meets - all the conditions. */ + If VOLATILEP is true the narrow_volatile_bitfields target hook is used to + decide which of the above modes should be used. */ enum machine_mode get_best_mode (int bitsize, int bitpos, unsigned int align, @@ -2102,7 +2134,8 @@ get_best_mode (int bitsize, int bitpos, unsigned int align, || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode))) return VOIDmode; - if (SLOW_BYTE_ACCESS && ! volatilep) + if ((SLOW_BYTE_ACCESS && ! volatilep) + || (volatilep && !targetm.narrow_volatile_bitfield())) { enum machine_mode wide_mode = VOIDmode, tmode; @@ -2136,8 +2169,7 @@ get_mode_bounds (enum machine_mode mode, int sign, unsigned size = GET_MODE_BITSIZE (mode); unsigned HOST_WIDE_INT min_val, max_val; - if (size > HOST_BITS_PER_WIDE_INT) - abort (); + gcc_assert (size <= HOST_BITS_PER_WIDE_INT); if (sign) { @@ -2150,8 +2182,8 @@ get_mode_bounds (enum machine_mode mode, int sign, max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1; } - *mmin = GEN_INT (trunc_int_for_mode (min_val, target_mode)); - *mmax = GEN_INT (trunc_int_for_mode (max_val, target_mode)); + *mmin = gen_int_mode (min_val, target_mode); + *mmax = gen_int_mode (max_val, target_mode); } #include "gt-stor-layout.h"