X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Ftree-sra.c;h=391511f56a7d3c15455489cb183beecc2107bccb;hb=6a0fb35154ad3494a97fa4a553e8ccf1b90ece39;hp=fed7fbd03903377ebcc3b9dae75ce5f835042d84;hpb=0b35ae2d31bb672ad47bd9d1bf3dbe671fcd474e;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c index fed7fbd0390..391511f56a7 100644 --- a/gcc/tree-sra.c +++ b/gcc/tree-sra.c @@ -1,7 +1,7 @@ /* Scalar Replacement of Aggregates (SRA) converts some structure references into scalar references, exposing them to the scalar optimizers. - Copyright (C) 2003, 2004, 2005, 2006, 2007 + Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. Contributed by Diego Novillo @@ -268,6 +268,7 @@ sra_type_can_be_decomposed_p (tree type) { /* Reject incorrectly represented bit fields. */ if (DECL_BIT_FIELD (t) + && INTEGRAL_TYPE_P (TREE_TYPE (t)) && (tree_low_cst (DECL_SIZE (t), 1) != TYPE_PRECISION (TREE_TYPE (t)))) goto fail; @@ -356,7 +357,7 @@ decl_can_be_decomposed_p (tree var) /* HACK: if we decompose a va_list_type_node before inlining, then we'll confuse tree-stdarg.c, and we won't be able to figure out which and how many arguments are accessed. This really should be improved in - tree-stdarg.c, as the decomposition is truely a win. This could also + tree-stdarg.c, as the decomposition is truly a win. This could also be fixed if the stdarg pass ran early, but this can't be done until we've aliasing information early too. See PR 30791. */ if (early_sra @@ -486,7 +487,7 @@ sra_hash_tree (tree t) static hashval_t sra_elt_hash (const void *x) { - const struct sra_elt *e = x; + const struct sra_elt *const e = (const struct sra_elt *) x; const struct sra_elt *p; hashval_t h; @@ -509,8 +510,8 @@ sra_elt_hash (const void *x) static int sra_elt_eq (const void *x, const void *y) { - const struct sra_elt *a = x; - const struct sra_elt *b = y; + const struct sra_elt *const a = (const struct sra_elt *) x; + const struct sra_elt *const b = (const struct sra_elt *) y; tree ae, be; const struct sra_elt *ap = a->parent; const struct sra_elt *bp = b->parent; @@ -591,7 +592,7 @@ lookup_element (struct sra_elt *parent, tree child, tree type, elt = *slot; if (!elt && insert == INSERT) { - *slot = elt = obstack_alloc (&sra_obstack, sizeof (*elt)); + *slot = elt = XOBNEW (&sra_obstack, struct sra_elt); memset (elt, 0, sizeof (*elt)); elt->parent = parent; @@ -855,18 +856,28 @@ sra_walk_expr (tree *expr_p, block_stmt_iterator *bsi, bool is_output, if (elt) elt->is_vector_lhs = true; } + /* A bit field reference (access to *multiple* fields simultaneously) - is not currently scalarized. Consider this an access to the - complete outer element, to which walk_tree will bring us next. */ - + is not currently scalarized. Consider this an access to the full + outer element, to which walk_tree will bring us next. */ goto use_all; - case VIEW_CONVERT_EXPR: case NOP_EXPR: - /* Similarly, a view/nop explicitly wants to look at an object in a + /* Similarly, a nop explicitly wants to look at an object in a type other than the one we've scalarized. */ goto use_all; + case VIEW_CONVERT_EXPR: + /* Likewise for a view conversion, but with an additional twist: + it can be on the LHS and, in this case, an access to the full + outer element would mean a killing def. So we need to punt + if we haven't already a full access to the current element, + because we cannot pretend to have a killing def if we only + have a partial access at some level. */ + if (is_output && !use_all_p && inner != expr) + disable_scalarization = true; + goto use_all; + case WITH_SIZE_EXPR: /* This is a transparent wrapper. The entire inner expression really is being used. */ @@ -1273,16 +1284,13 @@ instantiate_element (struct sra_elt *elt) DECL_SIZE_UNIT (var) = DECL_SIZE_UNIT (elt->element); elt->in_bitfld_block = 1; - elt->replacement = build3 (BIT_FIELD_REF, elt->type, var, - DECL_SIZE (var), - BITS_BIG_ENDIAN - ? size_binop (MINUS_EXPR, - TYPE_SIZE (elt->type), - DECL_SIZE (var)) - : bitsize_int (0)); - if (!INTEGRAL_TYPE_P (elt->type) - || TYPE_UNSIGNED (elt->type)) - BIT_FIELD_REF_UNSIGNED (elt->replacement) = 1; + elt->replacement = fold_build3 (BIT_FIELD_REF, elt->type, var, + DECL_SIZE (var), + BYTES_BIG_ENDIAN + ? size_binop (MINUS_EXPR, + TYPE_SIZE (elt->type), + DECL_SIZE (var)) + : bitsize_int (0)); } /* For vectors, if used on the left hand side with BIT_FIELD_REF, @@ -1465,6 +1473,10 @@ try_instantiate_multiple_fields (struct sra_elt *elt, tree f) tree type, var; struct sra_elt *block; + /* Point fields are typically best handled as standalone entities. */ + if (POINTER_TYPE_P (TREE_TYPE (f))) + return f; + if (!is_sra_scalar_type (TREE_TYPE (f)) || !host_integerp (DECL_FIELD_OFFSET (f), 1) || !host_integerp (DECL_FIELD_BIT_OFFSET (f), 1) @@ -1677,12 +1689,16 @@ try_instantiate_multiple_fields (struct sra_elt *elt, tree f) /* Create the field group as a single variable. */ - type = lang_hooks.types.type_for_mode (mode, 1); + /* We used to create a type for the mode above, but size turns + to be out not of mode-size. As we need a matching type + to build a BIT_FIELD_REF, use a nonstandard integer type as + fallback. */ + type = lang_hooks.types.type_for_size (size, 1); + if (!type || TYPE_PRECISION (type) != size) + type = build_nonstandard_integer_type (size, 1); gcc_assert (type); var = build3 (BIT_FIELD_REF, type, NULL_TREE, - bitsize_int (size), - bitsize_int (bit)); - BIT_FIELD_REF_UNSIGNED (var) = 1; + bitsize_int (size), bitsize_int (bit)); block = instantiate_missing_elements_1 (elt, var, type); gcc_assert (block && block->is_scalar); @@ -1692,11 +1708,10 @@ try_instantiate_multiple_fields (struct sra_elt *elt, tree f) if ((bit & ~alchk) || (HOST_WIDE_INT)size != tree_low_cst (DECL_SIZE (var), 1)) { - block->replacement = build3 (BIT_FIELD_REF, - TREE_TYPE (block->element), var, - bitsize_int (size), - bitsize_int (bit & ~alchk)); - BIT_FIELD_REF_UNSIGNED (block->replacement) = 1; + block->replacement = fold_build3 (BIT_FIELD_REF, + TREE_TYPE (block->element), var, + bitsize_int (size), + bitsize_int (bit & ~alchk)); } block->in_bitfld_block = 2; @@ -1711,15 +1726,14 @@ try_instantiate_multiple_fields (struct sra_elt *elt, tree f) gcc_assert (fld && fld->is_scalar && !fld->replacement); - fld->replacement = build3 (BIT_FIELD_REF, field_type, var, - DECL_SIZE (f), - bitsize_int - ((TREE_INT_CST_LOW (DECL_FIELD_OFFSET (f)) - * BITS_PER_UNIT - + (TREE_INT_CST_LOW - (DECL_FIELD_BIT_OFFSET (f)))) - & ~alchk)); - BIT_FIELD_REF_UNSIGNED (fld->replacement) = TYPE_UNSIGNED (field_type); + fld->replacement = fold_build3 (BIT_FIELD_REF, field_type, var, + DECL_SIZE (f), + bitsize_int + ((TREE_INT_CST_LOW (DECL_FIELD_OFFSET (f)) + * BITS_PER_UNIT + + (TREE_INT_CST_LOW + (DECL_FIELD_BIT_OFFSET (f)))) + & ~alchk)); fld->in_bitfld_block = 1; } @@ -2061,7 +2075,7 @@ generate_one_element_ref (struct sra_elt *elt, tree base) { tree field = elt->element; - /* We can't test elt->in_bitfld_blk here because, when this is + /* We can't test elt->in_bitfld_block here because, when this is called from instantiate_element, we haven't set this field yet. */ if (TREE_CODE (field) == BIT_FIELD_REF) @@ -2127,30 +2141,38 @@ static tree sra_build_assignment (tree dst, tree src) { /* Turning BIT_FIELD_REFs into bit operations enables other passes - to do a much better job at optimizing the code. */ + to do a much better job at optimizing the code. + From dst = BIT_FIELD_REF we produce + + SR.1 = (scalar type) var; + SR.2 = SR.1 >> off; + SR.3 = SR.2 & ((1 << sz) - 1); + ... possible sign extension of SR.3 ... + dst = (destination type) SR.3; + */ if (scalar_bitfield_p (src)) { - tree cst, cst2, mask, minshift, maxshift; - tree tmp, var, utype, stype; + tree var, shift, width; + tree utype, stype, stmp, utmp, dtmp; tree list, stmt; - bool unsignedp = BIT_FIELD_REF_UNSIGNED (src); + bool unsignedp = (INTEGRAL_TYPE_P (TREE_TYPE (src)) + ? TYPE_UNSIGNED (TREE_TYPE (src)) : true); var = TREE_OPERAND (src, 0); - cst = TREE_OPERAND (src, 2); - cst2 = size_binop (PLUS_EXPR, TREE_OPERAND (src, 1), - TREE_OPERAND (src, 2)); - - if (BITS_BIG_ENDIAN) + width = TREE_OPERAND (src, 1); + /* The offset needs to be adjusted to a right shift quantity + depending on the endianness. */ + if (BYTES_BIG_ENDIAN) { - maxshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst); - minshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst2); + tree tmp = size_binop (PLUS_EXPR, width, TREE_OPERAND (src, 2)); + shift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), tmp); } else - { - maxshift = cst2; - minshift = cst; - } + shift = TREE_OPERAND (src, 2); + /* In weird cases we have non-integral types for the source or + destination object. + ??? For unknown reasons we also want an unsigned scalar type. */ stype = TREE_TYPE (var); if (!INTEGRAL_TYPE_P (stype)) stype = lang_hooks.types.type_for_size (TREE_INT_CST_LOW @@ -2166,117 +2188,102 @@ sra_build_assignment (tree dst, tree src) utype = unsigned_type_for (utype); list = NULL; + stmp = make_rename_temp (stype, "SR"); - cst2 = size_binop (MINUS_EXPR, maxshift, minshift); - if (tree_int_cst_equal (cst2, TYPE_SIZE (utype))) - { - unsignedp = true; - mask = NULL_TREE; - } - else - { - mask = build_int_cst_wide (utype, 1, 0); - cst = int_const_binop (LSHIFT_EXPR, mask, cst2, true); - mask = int_const_binop (MINUS_EXPR, cst, mask, true); - } - - tmp = make_rename_temp (stype, "SR"); - if (TYPE_MAIN_VARIANT (TREE_TYPE (var)) != TYPE_MAIN_VARIANT (stype)) + /* Convert the base var of the BIT_FIELD_REF to the scalar type + we use for computation if we cannot use it directly. */ + if (!useless_type_conversion_p (stype, TREE_TYPE (var))) { if (INTEGRAL_TYPE_P (TREE_TYPE (var))) - stmt = build_gimple_modify_stmt (tmp, + stmt = build_gimple_modify_stmt (stmp, fold_convert (stype, var)); else - stmt = build_gimple_modify_stmt (tmp, + stmt = build_gimple_modify_stmt (stmp, fold_build1 (VIEW_CONVERT_EXPR, stype, var)); append_to_statement_list (stmt, &list); - - var = tmp; + var = stmp; } - if (!integer_zerop (minshift)) + if (!integer_zerop (shift)) { - tmp = make_rename_temp (stype, "SR"); - stmt = build_gimple_modify_stmt (tmp, + stmt = build_gimple_modify_stmt (stmp, fold_build2 (RSHIFT_EXPR, stype, - var, minshift)); + var, shift)); append_to_statement_list (stmt, &list); - - var = tmp; + var = stmp; } - if (TYPE_MAIN_VARIANT (utype) != TYPE_MAIN_VARIANT (stype)) + /* If we need a masking operation, produce one. */ + if (TREE_INT_CST_LOW (width) == TYPE_PRECISION (stype)) + unsignedp = true; + else { - if (!mask && unsignedp - && (TYPE_MAIN_VARIANT (utype) - == TYPE_MAIN_VARIANT (TREE_TYPE (dst)))) - tmp = dst; - else - tmp = make_rename_temp (utype, "SR"); + tree one = build_int_cst_wide (stype, 1, 0); + tree mask = int_const_binop (LSHIFT_EXPR, one, width, 0); + mask = int_const_binop (MINUS_EXPR, mask, one, 0); - stmt = build_gimple_modify_stmt (tmp, fold_convert (utype, var)); + stmt = build_gimple_modify_stmt (stmp, + fold_build2 (BIT_AND_EXPR, stype, + var, mask)); append_to_statement_list (stmt, &list); - - var = tmp; + var = stmp; } - if (mask) + /* After shifting and masking, convert to the target type. */ + utmp = stmp; + if (!useless_type_conversion_p (utype, stype)) { - if (!unsignedp - || (TYPE_MAIN_VARIANT (TREE_TYPE (dst)) - != TYPE_MAIN_VARIANT (utype))) - tmp = make_rename_temp (utype, "SR"); - else - tmp = dst; + utmp = make_rename_temp (utype, "SR"); - stmt = build_gimple_modify_stmt (tmp, - fold_build2 (BIT_AND_EXPR, utype, - var, mask)); + stmt = build_gimple_modify_stmt (utmp, fold_convert (utype, var)); append_to_statement_list (stmt, &list); - var = tmp; + var = utmp; } + /* Perform sign extension, if required. + ??? This should never be necessary. */ if (!unsignedp) { tree signbit = int_const_binop (LSHIFT_EXPR, build_int_cst_wide (utype, 1, 0), - size_binop (MINUS_EXPR, cst2, - bitsize_int (1)), - true); + size_binop (MINUS_EXPR, width, + bitsize_int (1)), 0); - tmp = make_rename_temp (utype, "SR"); - stmt = build_gimple_modify_stmt (tmp, + stmt = build_gimple_modify_stmt (utmp, fold_build2 (BIT_XOR_EXPR, utype, var, signbit)); append_to_statement_list (stmt, &list); - var = tmp; - - if (TYPE_MAIN_VARIANT (TREE_TYPE (dst)) != TYPE_MAIN_VARIANT (utype)) - tmp = make_rename_temp (utype, "SR"); - else - tmp = dst; - - stmt = build_gimple_modify_stmt (tmp, + stmt = build_gimple_modify_stmt (utmp, fold_build2 (MINUS_EXPR, utype, - var, signbit)); + utmp, signbit)); append_to_statement_list (stmt, &list); - var = tmp; + var = utmp; } - if (var != dst) + /* Finally, move and convert to the destination. */ + if (!useless_type_conversion_p (TREE_TYPE (dst), TREE_TYPE (var))) { if (INTEGRAL_TYPE_P (TREE_TYPE (dst))) var = fold_convert (TREE_TYPE (dst), var); else var = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (dst), var); - stmt = build_gimple_modify_stmt (dst, var); - append_to_statement_list (stmt, &list); + /* If the destination is not a register the conversion needs + to be a separate statement. */ + if (!is_gimple_reg (dst)) + { + dtmp = make_rename_temp (TREE_TYPE (dst), "SR"); + stmt = build_gimple_modify_stmt (dtmp, var); + append_to_statement_list (stmt, &list); + var = dtmp; + } } + stmt = build_gimple_modify_stmt (dst, var); + append_to_statement_list (stmt, &list); return list; } @@ -2288,7 +2295,13 @@ sra_build_assignment (tree dst, tree src) Since such accesses under different types require compatibility anyway, there's little point in making tests and/or adding conversions to ensure the types of src and dst are the same. - So we just assume type differences at this point are ok. */ + So we just assume type differences at this point are ok. + The only exception we make here are pointer types, which can be different + in e.g. structurally equal, but non-identical RECORD_TYPEs. */ + if (POINTER_TYPE_P (TREE_TYPE (dst)) + && !useless_type_conversion_p (TREE_TYPE (dst), TREE_TYPE (src))) + src = fold_convert (TREE_TYPE (dst), src); + return build_gimple_modify_stmt (dst, src); } @@ -2322,7 +2335,7 @@ sra_build_bf_assignment (tree dst, tree src) fold_convert (bitsizetype, TREE_OPERAND (dst, 1)), cst); - if (BITS_BIG_ENDIAN) + if (BYTES_BIG_ENDIAN) { maxshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst); minshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst2); @@ -2343,8 +2356,14 @@ sra_build_bf_assignment (tree dst, tree src) utype = unsigned_type_for (type); mask = build_int_cst_wide (utype, 1, 0); - cst = int_const_binop (LSHIFT_EXPR, mask, maxshift, true); - cst2 = int_const_binop (LSHIFT_EXPR, mask, minshift, true); + if (TREE_INT_CST_LOW (maxshift) == TYPE_PRECISION (utype)) + cst = build_int_cst_wide (utype, 0, 0); + else + cst = int_const_binop (LSHIFT_EXPR, mask, maxshift, true); + if (integer_zerop (minshift)) + cst2 = mask; + else + cst2 = int_const_binop (LSHIFT_EXPR, mask, minshift, true); mask = int_const_binop (MINUS_EXPR, cst, cst2, true); mask = fold_build1 (BIT_NOT_EXPR, utype, mask); @@ -2487,6 +2506,7 @@ sra_build_elt_assignment (struct sra_elt *elt, tree src) if (elt->in_bitfld_block == 2 && TREE_CODE (src) == BIT_FIELD_REF) { + tmp = src; cst = TYPE_SIZE (TREE_TYPE (var)); cst2 = size_binop (MINUS_EXPR, TREE_OPERAND (src, 2), TREE_OPERAND (dst, 2)); @@ -2508,13 +2528,13 @@ sra_build_elt_assignment (struct sra_elt *elt, tree src) { list = NULL; - if (!INTEGRAL_TYPE_P (TREE_TYPE (src)) - || !TYPE_UNSIGNED (TREE_TYPE (src))) + if (!INTEGRAL_TYPE_P (TREE_TYPE (src))) src = fold_build1 (VIEW_CONVERT_EXPR, lang_hooks.types.type_for_size (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (src))), 1), src); + gcc_assert (TYPE_UNSIGNED (TREE_TYPE (src))); tmp = make_rename_temp (TREE_TYPE (src), "SR"); stmt = build_gimple_modify_stmt (tmp, src); @@ -2532,8 +2552,7 @@ sra_build_elt_assignment (struct sra_elt *elt, tree src) } else { - src = fold_build3 (BIT_FIELD_REF, TREE_TYPE (var), src, cst, cst2); - BIT_FIELD_REF_UNSIGNED (src) = 1; + src = fold_convert (TREE_TYPE (var), tmp); } return sra_build_assignment (var, src); @@ -2612,7 +2631,33 @@ generate_element_copy (struct sra_elt *dst, struct sra_elt *src, tree *list_p) continue; } - gcc_assert (sc); + + /* If DST and SRC are structs with the same elements, but do not have + the same TYPE_MAIN_VARIANT, then lookup of DST FIELD_DECL in SRC + will fail. Try harder by finding the corresponding FIELD_DECL + in SRC. */ + if (!sc) + { + tree f; + + gcc_assert (useless_type_conversion_p (dst->type, src->type)); + gcc_assert (TREE_CODE (dc->element) == FIELD_DECL); + for (f = TYPE_FIELDS (src->type); f ; f = TREE_CHAIN (f)) + if (simple_cst_equal (DECL_FIELD_OFFSET (f), + DECL_FIELD_OFFSET (dc->element)) > 0 + && simple_cst_equal (DECL_FIELD_BIT_OFFSET (f), + DECL_FIELD_BIT_OFFSET (dc->element)) > 0 + && simple_cst_equal (DECL_SIZE (f), + DECL_SIZE (dc->element)) > 0 + && (useless_type_conversion_p (TREE_TYPE (dc->element), + TREE_TYPE (f)) + || (POINTER_TYPE_P (TREE_TYPE (dc->element)) + && POINTER_TYPE_P (TREE_TYPE (f))))) + break; + gcc_assert (f != NULL_TREE); + sc = lookup_element (src, f, NULL, NO_INSERT); + } + generate_element_copy (dc, sc, list_p); } @@ -2870,7 +2915,7 @@ struct bitfield_overlap_info }; /* Return true if a BIT_FIELD_REF<(FLD->parent), BLEN, BPOS> - expression (refereced as BF below) accesses any of the bits in FLD, + expression (referenced as BF below) accesses any of the bits in FLD, false if it doesn't. If DATA is non-null, its field_len and field_pos are filled in such that BIT_FIELD_REF<(FLD->parent), field_len, field_pos> (referenced as BFLD below) represents the @@ -2900,7 +2945,8 @@ bitfield_overlaps_p (tree blen, tree bpos, struct sra_elt *fld, else if (TREE_CODE (fld->element) == INTEGER_CST) { flen = fold_convert (bitsizetype, TYPE_SIZE (fld->type)); - fpos = size_binop (MULT_EXPR, flen, fld->element); + fpos = fold_convert (bitsizetype, fld->element); + fpos = size_binop (MULT_EXPR, flen, fpos); } else gcc_unreachable (); @@ -2976,16 +3022,22 @@ sra_explode_bitfield_assignment (tree var, tree vpos, bool to_var, if (fld->replacement) { - tree infld, invar, st; + tree infld, invar, st, type; infld = fld->replacement; + type = TREE_TYPE (infld); + if (TYPE_PRECISION (type) != TREE_INT_CST_LOW (flen)) + type = lang_hooks.types.type_for_size (TREE_INT_CST_LOW (flen), 1); + else + type = unsigned_type_for (type); + if (TREE_CODE (infld) == BIT_FIELD_REF) { fpos = size_binop (PLUS_EXPR, fpos, TREE_OPERAND (infld, 2)); infld = TREE_OPERAND (infld, 0); } - else if (BITS_BIG_ENDIAN && DECL_P (fld->element) + else if (BYTES_BIG_ENDIAN && DECL_P (fld->element) && !tree_int_cst_equal (TYPE_SIZE (TREE_TYPE (infld)), DECL_SIZE (fld->element))) { @@ -2995,20 +3047,14 @@ sra_explode_bitfield_assignment (tree var, tree vpos, bool to_var, DECL_SIZE (fld->element)); } - infld = fold_build3 (BIT_FIELD_REF, - lang_hooks.types.type_for_size - (TREE_INT_CST_LOW (flen), 1), - infld, flen, fpos); - BIT_FIELD_REF_UNSIGNED (infld) = 1; + infld = fold_build3 (BIT_FIELD_REF, type, infld, flen, fpos); invar = size_binop (MINUS_EXPR, flp.field_pos, bpos); if (flp.overlap_pos) invar = size_binop (PLUS_EXPR, invar, flp.overlap_pos); invar = size_binop (PLUS_EXPR, invar, vpos); - invar = fold_build3 (BIT_FIELD_REF, TREE_TYPE (infld), - var, flen, invar); - BIT_FIELD_REF_UNSIGNED (invar) = 1; + invar = fold_build3 (BIT_FIELD_REF, type, var, flen, invar); if (to_var) st = sra_build_bf_assignment (invar, infld); @@ -3140,7 +3186,7 @@ scalarize_use (struct sra_elt *elt, tree *expr_p, block_stmt_iterator *bsi, if (!elt->use_block_copy) { tree type = TREE_TYPE (bfexpr); - tree var = make_rename_temp (type, "SR"), tmp, st; + tree var = make_rename_temp (type, "SR"), tmp, st, vpos; GIMPLE_STMT_OPERAND (stmt, 0) = var; update = true; @@ -3155,8 +3201,16 @@ scalarize_use (struct sra_elt *elt, tree *expr_p, block_stmt_iterator *bsi, var = tmp; } + /* If VAR is wider than BLEN bits, it is padded at the + most-significant end. We want to set VPOS such that + would refer to the + least-significant BLEN bits of VAR. */ + if (BYTES_BIG_ENDIAN) + vpos = size_binop (MINUS_EXPR, TYPE_SIZE (type), blen); + else + vpos = bitsize_int (0); sra_explode_bitfield_assignment - (var, bitsize_int (0), false, &listafter, blen, bpos, elt); + (var, vpos, false, &listafter, blen, bpos, elt); } else sra_sync_for_bitfield_assignment @@ -3192,7 +3246,7 @@ scalarize_use (struct sra_elt *elt, tree *expr_p, block_stmt_iterator *bsi, if (!elt->use_block_copy) { tree type = TREE_TYPE (bfexpr); - tree var; + tree var, vpos; if (!TYPE_UNSIGNED (type)) type = unsigned_type_for (type); @@ -3203,8 +3257,16 @@ scalarize_use (struct sra_elt *elt, tree *expr_p, block_stmt_iterator *bsi, (var, build_int_cst_wide (type, 0, 0)), &list); + /* If VAR is wider than BLEN bits, it is padded at the + most-significant end. We want to set VPOS such that + would refer to the + least-significant BLEN bits of VAR. */ + if (BYTES_BIG_ENDIAN) + vpos = size_binop (MINUS_EXPR, TYPE_SIZE (type), blen); + else + vpos = bitsize_int (0); sra_explode_bitfield_assignment - (var, bitsize_int (0), true, &list, blen, bpos, elt); + (var, vpos, true, &list, blen, bpos, elt); GIMPLE_STMT_OPERAND (stmt, 1) = var; update = true; @@ -3331,19 +3393,20 @@ static void scalarize_init (struct sra_elt *lhs_elt, tree rhs, block_stmt_iterator *bsi) { bool result = true; - tree list = NULL; + tree list = NULL, init_list = NULL; /* Generate initialization statements for all members extant in the RHS. */ if (rhs) { /* Unshare the expression just in case this is from a decl's initial. */ rhs = unshare_expr (rhs); - result = generate_element_init (lhs_elt, rhs, &list); + result = generate_element_init (lhs_elt, rhs, &init_list); } /* CONSTRUCTOR is defined such that any member not mentioned is assigned a zero value. Initialize the rest of the instantiated elements. */ generate_element_zero (lhs_elt, &list); + append_to_statement_list (init_list, &list); if (!result) { @@ -3631,8 +3694,10 @@ gate_sra (void) return flag_tree_sra != 0; } -struct tree_opt_pass pass_sra_early = +struct gimple_opt_pass pass_sra_early = { + { + GIMPLE_PASS, "esra", /* name */ gate_sra, /* gate */ tree_sra_early, /* execute */ @@ -3647,12 +3712,14 @@ struct tree_opt_pass pass_sra_early = TODO_dump_func | TODO_update_ssa | TODO_ggc_collect - | TODO_verify_ssa, /* todo_flags_finish */ - 0 /* letter */ + | TODO_verify_ssa /* todo_flags_finish */ + } }; -struct tree_opt_pass pass_sra = +struct gimple_opt_pass pass_sra = { + { + GIMPLE_PASS, "sra", /* name */ gate_sra, /* gate */ tree_sra, /* execute */ @@ -3667,6 +3734,6 @@ struct tree_opt_pass pass_sra = TODO_dump_func | TODO_update_ssa | TODO_ggc_collect - | TODO_verify_ssa, /* todo_flags_finish */ - 0 /* letter */ + | TODO_verify_ssa /* todo_flags_finish */ + } };