/* Scalar Replacement of Aggregates (SRA) converts some structure
references into scalar references, exposing them to the scalar
optimizers.
- Copyright (C) 2003, 2004, 2005, 2006, 2007
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
Contributed by Diego Novillo <dnovillo@redhat.com>
#include "langhooks.h"
#include "tree-inline.h"
#include "tree-flow.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "tree-dump.h"
#include "tree-pass.h"
#include "timevar.h"
/* Forward declarations. */
static tree generate_element_ref (struct sra_elt *);
-static tree sra_build_assignment (tree dst, tree src);
-static void mark_all_v_defs (tree list);
+static gimple_seq sra_build_assignment (tree dst, tree src);
+static void mark_all_v_defs_seq (gimple_seq);
+static void mark_all_v_defs_stmt (gimple);
\f
/* Return true if DECL is an SRA candidate. */
{
/* Reject incorrectly represented bit fields. */
if (DECL_BIT_FIELD (t)
+ && INTEGRAL_TYPE_P (TREE_TYPE (t))
&& (tree_low_cst (DECL_SIZE (t), 1)
!= TYPE_PRECISION (TREE_TYPE (t))))
goto fail;
return false;
}
+/* Returns true if the TYPE is one of the available va_list types.
+ Otherwise it returns false.
+ Note, that for multiple calling conventions there can be more
+ than just one va_list type present. */
+
+static bool
+is_va_list_type (tree type)
+{
+ tree h;
+
+ if (type == NULL_TREE)
+ return false;
+ h = targetm.canonical_va_list_type (type);
+ if (h == NULL_TREE)
+ return false;
+ if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (h))
+ return true;
+ return false;
+}
+
/* Return true if DECL can be decomposed into a set of independent
(though not necessarily scalar) variables. */
/* HACK: if we decompose a va_list_type_node before inlining, then we'll
confuse tree-stdarg.c, and we won't be able to figure out which and
how many arguments are accessed. This really should be improved in
- tree-stdarg.c, as the decomposition is truely a win. This could also
+ tree-stdarg.c, as the decomposition is truly a win. This could also
be fixed if the stdarg pass ran early, but this can't be done until
we've aliasing information early too. See PR 30791. */
- if (early_sra
- && TYPE_MAIN_VARIANT (TREE_TYPE (var))
- == TYPE_MAIN_VARIANT (va_list_type_node))
+ if (early_sra && is_va_list_type (TREE_TYPE (var)))
return false;
return true;
static hashval_t
sra_elt_hash (const void *x)
{
- const struct sra_elt *e = x;
+ const struct sra_elt *const e = (const struct sra_elt *) x;
const struct sra_elt *p;
hashval_t h;
static int
sra_elt_eq (const void *x, const void *y)
{
- const struct sra_elt *a = x;
- const struct sra_elt *b = y;
+ const struct sra_elt *const a = (const struct sra_elt *) x;
+ const struct sra_elt *const b = (const struct sra_elt *) y;
tree ae, be;
const struct sra_elt *ap = a->parent;
const struct sra_elt *bp = b->parent;
elt = *slot;
if (!elt && insert == INSERT)
{
- *slot = elt = obstack_alloc (&sra_obstack, sizeof (*elt));
+ *slot = elt = XOBNEW (&sra_obstack, struct sra_elt);
memset (elt, 0, sizeof (*elt));
elt->parent = parent;
references, and categorize them. */
/* A set of callbacks for phases 2 and 4. They'll be invoked for the
- various kinds of references seen. In all cases, *BSI is an iterator
+ various kinds of references seen. In all cases, *GSI is an iterator
pointing to the statement being processed. */
struct sra_walk_fns
{
is a left-hand-side reference. USE_ALL is true if we saw something we
couldn't quite identify and had to force the use of the entire object. */
void (*use) (struct sra_elt *elt, tree *expr_p,
- block_stmt_iterator *bsi, bool is_output, bool use_all);
+ gimple_stmt_iterator *gsi, bool is_output, bool use_all);
/* Invoked when we have a copy between two scalarizable references. */
void (*copy) (struct sra_elt *lhs_elt, struct sra_elt *rhs_elt,
- block_stmt_iterator *bsi);
+ gimple_stmt_iterator *gsi);
/* Invoked when ELT is initialized from a constant. VALUE may be NULL,
in which case it should be treated as an empty CONSTRUCTOR. */
- void (*init) (struct sra_elt *elt, tree value, block_stmt_iterator *bsi);
+ void (*init) (struct sra_elt *elt, tree value, gimple_stmt_iterator *gsi);
/* Invoked when we have a copy between one scalarizable reference ELT
and one non-scalarizable reference OTHER without side-effects.
IS_OUTPUT is true if ELT is on the left-hand side. */
void (*ldst) (struct sra_elt *elt, tree other,
- block_stmt_iterator *bsi, bool is_output);
+ gimple_stmt_iterator *gsi, bool is_output);
/* True during phase 2, false during phase 4. */
/* ??? This is a hack. */
If we find one, invoke FNS->USE. */
static void
-sra_walk_expr (tree *expr_p, block_stmt_iterator *bsi, bool is_output,
+sra_walk_expr (tree *expr_p, gimple_stmt_iterator *gsi, bool is_output,
const struct sra_walk_fns *fns)
{
tree expr = *expr_p;
if (disable_scalarization)
elt->cannot_scalarize = true;
else
- fns->use (elt, expr_p, bsi, is_output, use_all_p);
+ fns->use (elt, expr_p, gsi, is_output, use_all_p);
}
return;
if (elt)
elt->is_vector_lhs = true;
}
+
/* A bit field reference (access to *multiple* fields simultaneously)
- is not currently scalarized. Consider this an access to the
- complete outer element, to which walk_tree will bring us next. */
-
+ is not currently scalarized. Consider this an access to the full
+ outer element, to which walk_tree will bring us next. */
goto use_all;
- case VIEW_CONVERT_EXPR:
- case NOP_EXPR:
- /* Similarly, a view/nop explicitly wants to look at an object in a
+ CASE_CONVERT:
+ /* Similarly, a nop explicitly wants to look at an object in a
type other than the one we've scalarized. */
goto use_all;
+ case VIEW_CONVERT_EXPR:
+ /* Likewise for a view conversion, but with an additional twist:
+ it can be on the LHS and, in this case, an access to the full
+ outer element would mean a killing def. So we need to punt
+ if we haven't already a full access to the current element,
+ because we cannot pretend to have a killing def if we only
+ have a partial access at some level. */
+ if (is_output && !use_all_p && inner != expr)
+ disable_scalarization = true;
+ goto use_all;
+
case WITH_SIZE_EXPR:
/* This is a transparent wrapper. The entire inner expression really
is being used. */
}
}
-/* Walk a TREE_LIST of values looking for scalarizable aggregates.
- If we find one, invoke FNS->USE. */
-
-static void
-sra_walk_tree_list (tree list, block_stmt_iterator *bsi, bool is_output,
- const struct sra_walk_fns *fns)
-{
- tree op;
- for (op = list; op ; op = TREE_CHAIN (op))
- sra_walk_expr (&TREE_VALUE (op), bsi, is_output, fns);
-}
-
-/* Walk the arguments of a CALL_EXPR looking for scalarizable aggregates.
+/* Walk the arguments of a GIMPLE_CALL looking for scalarizable aggregates.
If we find one, invoke FNS->USE. */
static void
-sra_walk_call_expr (tree expr, block_stmt_iterator *bsi,
+sra_walk_gimple_call (gimple stmt, gimple_stmt_iterator *gsi,
const struct sra_walk_fns *fns)
{
int i;
- int nargs = call_expr_nargs (expr);
+ int nargs = gimple_call_num_args (stmt);
+
for (i = 0; i < nargs; i++)
- sra_walk_expr (&CALL_EXPR_ARG (expr, i), bsi, false, fns);
+ sra_walk_expr (gimple_call_arg_ptr (stmt, i), gsi, false, fns);
+
+ if (gimple_call_lhs (stmt))
+ sra_walk_expr (gimple_call_lhs_ptr (stmt), gsi, true, fns);
}
-/* Walk the inputs and outputs of an ASM_EXPR looking for scalarizable
+/* Walk the inputs and outputs of a GIMPLE_ASM looking for scalarizable
aggregates. If we find one, invoke FNS->USE. */
static void
-sra_walk_asm_expr (tree expr, block_stmt_iterator *bsi,
+sra_walk_gimple_asm (gimple stmt, gimple_stmt_iterator *gsi,
const struct sra_walk_fns *fns)
{
- sra_walk_tree_list (ASM_INPUTS (expr), bsi, false, fns);
- sra_walk_tree_list (ASM_OUTPUTS (expr), bsi, true, fns);
+ size_t i;
+ for (i = 0; i < gimple_asm_ninputs (stmt); i++)
+ sra_walk_expr (&TREE_VALUE (gimple_asm_input_op (stmt, i)), gsi, false, fns);
+ for (i = 0; i < gimple_asm_noutputs (stmt); i++)
+ sra_walk_expr (&TREE_VALUE (gimple_asm_output_op (stmt, i)), gsi, true, fns);
}
-/* Walk a GIMPLE_MODIFY_STMT and categorize the assignment appropriately. */
+/* Walk a GIMPLE_ASSIGN and categorize the assignment appropriately. */
static void
-sra_walk_gimple_modify_stmt (tree expr, block_stmt_iterator *bsi,
- const struct sra_walk_fns *fns)
+sra_walk_gimple_assign (gimple stmt, gimple_stmt_iterator *gsi,
+ const struct sra_walk_fns *fns)
{
- struct sra_elt *lhs_elt, *rhs_elt;
+ struct sra_elt *lhs_elt = NULL, *rhs_elt = NULL;
tree lhs, rhs;
- lhs = GIMPLE_STMT_OPERAND (expr, 0);
- rhs = GIMPLE_STMT_OPERAND (expr, 1);
+ /* If there is more than 1 element on the RHS, only walk the lhs. */
+ if (!gimple_assign_single_p (stmt))
+ {
+ sra_walk_expr (gimple_assign_lhs_ptr (stmt), gsi, true, fns);
+ return;
+ }
+
+ lhs = gimple_assign_lhs (stmt);
+ rhs = gimple_assign_rhs1 (stmt);
lhs_elt = maybe_lookup_element_for_expr (lhs);
rhs_elt = maybe_lookup_element_for_expr (rhs);
/* If both sides are scalarizable, this is a COPY operation. */
if (lhs_elt && rhs_elt)
{
- fns->copy (lhs_elt, rhs_elt, bsi);
+ fns->copy (lhs_elt, rhs_elt, gsi);
return;
}
if (rhs_elt)
{
if (!rhs_elt->is_scalar && !TREE_SIDE_EFFECTS (lhs))
- fns->ldst (rhs_elt, lhs, bsi, false);
+ fns->ldst (rhs_elt, lhs, gsi, false);
else
- fns->use (rhs_elt, &GIMPLE_STMT_OPERAND (expr, 1), bsi, false, false);
+ fns->use (rhs_elt, gimple_assign_rhs1_ptr (stmt), gsi, false, false);
}
/* If it isn't scalarizable, there may be scalarizable variables within, so
that the statements get inserted in the proper place, before any
copy-out operations. */
else
- {
- tree call = get_call_expr_in (rhs);
- if (call)
- sra_walk_call_expr (call, bsi, fns);
- else
- sra_walk_expr (&GIMPLE_STMT_OPERAND (expr, 1), bsi, false, fns);
- }
+ sra_walk_expr (gimple_assign_rhs1_ptr (stmt), gsi, false, fns);
/* Likewise, handle the LHS being scalarizable. We have cases similar
to those above, but also want to handle RHS being constant. */
if (TREE_CODE (rhs) == COMPLEX_EXPR
|| TREE_CODE (rhs) == COMPLEX_CST
|| TREE_CODE (rhs) == CONSTRUCTOR)
- fns->init (lhs_elt, rhs, bsi);
+ fns->init (lhs_elt, rhs, gsi);
/* If this is an assignment from read-only memory, treat this as if
we'd been passed the constructor directly. Invoke INIT. */
&& TREE_STATIC (rhs)
&& TREE_READONLY (rhs)
&& targetm.binds_local_p (rhs))
- fns->init (lhs_elt, DECL_INITIAL (rhs), bsi);
+ fns->init (lhs_elt, DECL_INITIAL (rhs), gsi);
/* If this is a copy from a non-scalarizable lvalue, invoke LDST.
The lvalue requirement prevents us from trying to directly scalarize
the function multiple times, and other evil things. */
else if (!lhs_elt->is_scalar
&& !TREE_SIDE_EFFECTS (rhs) && is_gimple_addressable (rhs))
- fns->ldst (lhs_elt, rhs, bsi, true);
+ fns->ldst (lhs_elt, rhs, gsi, true);
/* Otherwise we're being used in some context that requires the
aggregate to be seen as a whole. Invoke USE. */
else
- fns->use (lhs_elt, &GIMPLE_STMT_OPERAND (expr, 0), bsi, true, false);
+ fns->use (lhs_elt, gimple_assign_lhs_ptr (stmt), gsi, true, false);
}
/* Similarly to above, LHS_ELT being null only means that the LHS as a
whole is not a scalarizable reference. There may be occurrences of
scalarizable variables within, which implies a USE. */
else
- sra_walk_expr (&GIMPLE_STMT_OPERAND (expr, 0), bsi, true, fns);
+ sra_walk_expr (gimple_assign_lhs_ptr (stmt), gsi, true, fns);
}
/* Entry point to the walk functions. Search the entire function,
sra_walk_function (const struct sra_walk_fns *fns)
{
basic_block bb;
- block_stmt_iterator si, ni;
+ gimple_stmt_iterator si, ni;
/* ??? Phase 4 could derive some benefit to walking the function in
dominator tree order. */
FOR_EACH_BB (bb)
- for (si = bsi_start (bb); !bsi_end_p (si); si = ni)
+ for (si = gsi_start_bb (bb); !gsi_end_p (si); si = ni)
{
- tree stmt, t;
- stmt_ann_t ann;
+ gimple stmt;
- stmt = bsi_stmt (si);
- ann = stmt_ann (stmt);
+ stmt = gsi_stmt (si);
ni = si;
- bsi_next (&ni);
+ gsi_next (&ni);
/* If the statement has no virtual operands, then it doesn't
make any structure references that we care about. */
&& ZERO_SSA_OPERANDS (stmt, (SSA_OP_VIRTUAL_DEFS | SSA_OP_VUSE)))
continue;
- switch (TREE_CODE (stmt))
+ switch (gimple_code (stmt))
{
- case RETURN_EXPR:
+ case GIMPLE_RETURN:
/* If we have "return <retval>" then the return value is
already exposed for our pleasure. Walk it as a USE to
force all the components back in place for the return.
-
- If we have an embedded assignment, then <retval> is of
- a type that gets returned in registers in this ABI, and
- we do not wish to extend their lifetimes. Treat this
- as a USE of the variable on the RHS of this assignment. */
-
- t = TREE_OPERAND (stmt, 0);
- if (t == NULL_TREE)
+ */
+ if (gimple_return_retval (stmt) == NULL_TREE)
;
- else if (TREE_CODE (t) == GIMPLE_MODIFY_STMT)
- sra_walk_expr (&GIMPLE_STMT_OPERAND (t, 1), &si, false, fns);
else
- sra_walk_expr (&TREE_OPERAND (stmt, 0), &si, false, fns);
+ sra_walk_expr (gimple_return_retval_ptr (stmt), &si, false,
+ fns);
break;
- case GIMPLE_MODIFY_STMT:
- sra_walk_gimple_modify_stmt (stmt, &si, fns);
+ case GIMPLE_ASSIGN:
+ sra_walk_gimple_assign (stmt, &si, fns);
break;
- case CALL_EXPR:
- sra_walk_call_expr (stmt, &si, fns);
+ case GIMPLE_CALL:
+ sra_walk_gimple_call (stmt, &si, fns);
break;
- case ASM_EXPR:
- sra_walk_asm_expr (stmt, &si, fns);
+ case GIMPLE_ASM:
+ sra_walk_gimple_asm (stmt, &si, fns);
break;
default:
static void
scan_use (struct sra_elt *elt, tree *expr_p ATTRIBUTE_UNUSED,
- block_stmt_iterator *bsi ATTRIBUTE_UNUSED,
+ gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
bool is_output ATTRIBUTE_UNUSED, bool use_all ATTRIBUTE_UNUSED)
{
elt->n_uses += 1;
static void
scan_copy (struct sra_elt *lhs_elt, struct sra_elt *rhs_elt,
- block_stmt_iterator *bsi ATTRIBUTE_UNUSED)
+ gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED)
{
lhs_elt->n_copies += 1;
rhs_elt->n_copies += 1;
static void
scan_init (struct sra_elt *lhs_elt, tree rhs ATTRIBUTE_UNUSED,
- block_stmt_iterator *bsi ATTRIBUTE_UNUSED)
+ gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED)
{
lhs_elt->n_copies += 1;
}
static void
scan_ldst (struct sra_elt *elt, tree other ATTRIBUTE_UNUSED,
- block_stmt_iterator *bsi ATTRIBUTE_UNUSED,
+ gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
bool is_output ATTRIBUTE_UNUSED)
{
elt->n_copies += 1;
DECL_SIZE_UNIT (var) = DECL_SIZE_UNIT (elt->element);
elt->in_bitfld_block = 1;
- elt->replacement = build3 (BIT_FIELD_REF, elt->type, var,
- DECL_SIZE (var),
- BITS_BIG_ENDIAN
- ? size_binop (MINUS_EXPR,
- TYPE_SIZE (elt->type),
- DECL_SIZE (var))
- : bitsize_int (0));
- if (!INTEGRAL_TYPE_P (elt->type)
- || TYPE_UNSIGNED (elt->type))
- BIT_FIELD_REF_UNSIGNED (elt->replacement) = 1;
+ elt->replacement = fold_build3 (BIT_FIELD_REF, elt->type, var,
+ DECL_SIZE (var),
+ BYTES_BIG_ENDIAN
+ ? size_binop (MINUS_EXPR,
+ TYPE_SIZE (elt->type),
+ DECL_SIZE (var))
+ : bitsize_int (0));
}
/* For vectors, if used on the left hand side with BIT_FIELD_REF,
|| (var != elt->replacement
&& TREE_CODE (elt->replacement) == BIT_FIELD_REF))
{
- tree init = sra_build_assignment (var, fold_convert (TREE_TYPE (var),
- integer_zero_node));
- insert_edge_copies (init, ENTRY_BLOCK_PTR);
- mark_all_v_defs (init);
+ gimple_seq init = sra_build_assignment (var,
+ fold_convert (TREE_TYPE (var),
+ integer_zero_node)
+ );
+ insert_edge_copies_seq (init, ENTRY_BLOCK_PTR);
+ mark_all_v_defs_seq (init);
}
if (dump_file)
tree type, var;
struct sra_elt *block;
+ /* Point fields are typically best handled as standalone entities. */
+ if (POINTER_TYPE_P (TREE_TYPE (f)))
+ return f;
+
if (!is_sra_scalar_type (TREE_TYPE (f))
|| !host_integerp (DECL_FIELD_OFFSET (f), 1)
|| !host_integerp (DECL_FIELD_BIT_OFFSET (f), 1)
/* Create the field group as a single variable. */
- type = lang_hooks.types.type_for_mode (mode, 1);
+ /* We used to create a type for the mode above, but size turns
+ to be out not of mode-size. As we need a matching type
+ to build a BIT_FIELD_REF, use a nonstandard integer type as
+ fallback. */
+ type = lang_hooks.types.type_for_size (size, 1);
+ if (!type || TYPE_PRECISION (type) != size)
+ type = build_nonstandard_integer_type (size, 1);
gcc_assert (type);
var = build3 (BIT_FIELD_REF, type, NULL_TREE,
- bitsize_int (size),
- bitsize_int (bit));
- BIT_FIELD_REF_UNSIGNED (var) = 1;
+ bitsize_int (size), bitsize_int (bit));
block = instantiate_missing_elements_1 (elt, var, type);
gcc_assert (block && block->is_scalar);
if ((bit & ~alchk)
|| (HOST_WIDE_INT)size != tree_low_cst (DECL_SIZE (var), 1))
{
- block->replacement = build3 (BIT_FIELD_REF,
- TREE_TYPE (block->element), var,
- bitsize_int (size),
- bitsize_int (bit & ~alchk));
- BIT_FIELD_REF_UNSIGNED (block->replacement) = 1;
+ block->replacement = fold_build3 (BIT_FIELD_REF,
+ TREE_TYPE (block->element), var,
+ bitsize_int (size),
+ bitsize_int (bit & ~alchk));
}
block->in_bitfld_block = 2;
gcc_assert (fld && fld->is_scalar && !fld->replacement);
- fld->replacement = build3 (BIT_FIELD_REF, field_type, var,
- DECL_SIZE (f),
- bitsize_int
- ((TREE_INT_CST_LOW (DECL_FIELD_OFFSET (f))
- * BITS_PER_UNIT
- + (TREE_INT_CST_LOW
- (DECL_FIELD_BIT_OFFSET (f))))
- & ~alchk));
- BIT_FIELD_REF_UNSIGNED (fld->replacement) = TYPE_UNSIGNED (field_type);
+ fld->replacement = fold_build3 (BIT_FIELD_REF, field_type, var,
+ DECL_SIZE (f),
+ bitsize_int
+ ((TREE_INT_CST_LOW (DECL_FIELD_OFFSET (f))
+ * BITS_PER_UNIT
+ + (TREE_INT_CST_LOW
+ (DECL_FIELD_BIT_OFFSET (f))))
+ & ~alchk));
fld->in_bitfld_block = 1;
}
sensible default. */
max_size = SRA_MAX_STRUCTURE_SIZE
? SRA_MAX_STRUCTURE_SIZE
- : MOVE_RATIO * UNITS_PER_WORD;
+ : MOVE_RATIO (optimize_function_for_speed_p (cfun)) * UNITS_PER_WORD;
max_count = SRA_MAX_STRUCTURE_COUNT
? SRA_MAX_STRUCTURE_COUNT
- : MOVE_RATIO;
+ : MOVE_RATIO (optimize_function_for_speed_p (cfun));
full_size = tree_low_cst (size_tree, 1);
full_count = count_type_elements (elt->type, false);
non-scalar. */
static void
-mark_all_v_defs_1 (tree stmt)
+mark_all_v_defs_stmt (gimple stmt)
{
tree sym;
ssa_op_iter iter;
LIST for renaming. */
static void
-mark_all_v_defs (tree list)
+mark_all_v_defs_seq (gimple_seq seq)
{
- if (TREE_CODE (list) != STATEMENT_LIST)
- mark_all_v_defs_1 (list);
- else
- {
- tree_stmt_iterator i;
- for (i = tsi_start (list); !tsi_end_p (i); tsi_next (&i))
- mark_all_v_defs_1 (tsi_stmt (i));
- }
-}
+ gimple_stmt_iterator gsi;
+ for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
+ mark_all_v_defs_stmt (gsi_stmt (gsi));
+}
/* Mark every replacement under ELT with TREE_NO_WARNING. */
{
tree field = elt->element;
- /* We can't test elt->in_bitfld_blk here because, when this is
+ /* We can't test elt->in_bitfld_block here because, when this is
called from instantiate_element, we haven't set this field
yet. */
if (TREE_CODE (field) == BIT_FIELD_REF)
/* Create an assignment statement from SRC to DST. */
-static tree
+static gimple_seq
sra_build_assignment (tree dst, tree src)
{
+ gimple stmt;
+ gimple_seq seq = NULL, seq2 = NULL;
/* Turning BIT_FIELD_REFs into bit operations enables other passes
- to do a much better job at optimizing the code. */
+ to do a much better job at optimizing the code.
+ From dst = BIT_FIELD_REF <var, sz, off> we produce
+
+ SR.1 = (scalar type) var;
+ SR.2 = SR.1 >> off;
+ SR.3 = SR.2 & ((1 << sz) - 1);
+ ... possible sign extension of SR.3 ...
+ dst = (destination type) SR.3;
+ */
if (scalar_bitfield_p (src))
{
- tree cst, cst2, mask, minshift, maxshift;
- tree tmp, var, utype, stype;
- tree list, stmt;
- bool unsignedp = BIT_FIELD_REF_UNSIGNED (src);
+ tree var, shift, width;
+ tree utype, stype;
+ bool unsignedp = (INTEGRAL_TYPE_P (TREE_TYPE (src))
+ ? TYPE_UNSIGNED (TREE_TYPE (src)) : true);
+ struct gimplify_ctx gctx;
var = TREE_OPERAND (src, 0);
- cst = TREE_OPERAND (src, 2);
- cst2 = size_binop (PLUS_EXPR, TREE_OPERAND (src, 1),
- TREE_OPERAND (src, 2));
-
- if (BITS_BIG_ENDIAN)
+ width = TREE_OPERAND (src, 1);
+ /* The offset needs to be adjusted to a right shift quantity
+ depending on the endianness. */
+ if (BYTES_BIG_ENDIAN)
{
- maxshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst);
- minshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst2);
+ tree tmp = size_binop (PLUS_EXPR, width, TREE_OPERAND (src, 2));
+ shift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), tmp);
}
else
- {
- maxshift = cst2;
- minshift = cst;
- }
+ shift = TREE_OPERAND (src, 2);
+ /* In weird cases we have non-integral types for the source or
+ destination object.
+ ??? For unknown reasons we also want an unsigned scalar type. */
stype = TREE_TYPE (var);
if (!INTEGRAL_TYPE_P (stype))
stype = lang_hooks.types.type_for_size (TREE_INT_CST_LOW
else if (!TYPE_UNSIGNED (utype))
utype = unsigned_type_for (utype);
- list = NULL;
-
- cst2 = size_binop (MINUS_EXPR, maxshift, minshift);
- if (tree_int_cst_equal (cst2, TYPE_SIZE (utype)))
- {
- unsignedp = true;
- mask = NULL_TREE;
- }
+ /* Convert the base var of the BIT_FIELD_REF to the scalar type
+ we use for computation if we cannot use it directly. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
+ var = fold_convert (stype, var);
else
- {
- mask = build_int_cst_wide (utype, 1, 0);
- cst = int_const_binop (LSHIFT_EXPR, mask, cst2, true);
- mask = int_const_binop (MINUS_EXPR, cst, mask, true);
- }
+ var = fold_build1 (VIEW_CONVERT_EXPR, stype, var);
- tmp = make_rename_temp (stype, "SR");
- if (TYPE_MAIN_VARIANT (TREE_TYPE (var)) != TYPE_MAIN_VARIANT (stype))
- {
- if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
- stmt = build_gimple_modify_stmt (tmp,
- fold_convert (stype, var));
- else
- stmt = build_gimple_modify_stmt (tmp,
- fold_build1 (VIEW_CONVERT_EXPR,
- stype, var));
- append_to_statement_list (stmt, &list);
-
- var = tmp;
- }
-
- if (!integer_zerop (minshift))
- {
- tmp = make_rename_temp (stype, "SR");
- stmt = build_gimple_modify_stmt (tmp,
- fold_build2 (RSHIFT_EXPR, stype,
- var, minshift));
- append_to_statement_list (stmt, &list);
-
- var = tmp;
- }
+ if (!integer_zerop (shift))
+ var = fold_build2 (RSHIFT_EXPR, stype, var, shift);
- if (TYPE_MAIN_VARIANT (utype) != TYPE_MAIN_VARIANT (stype))
+ /* If we need a masking operation, produce one. */
+ if (TREE_INT_CST_LOW (width) == TYPE_PRECISION (stype))
+ unsignedp = true;
+ else
{
- if (!mask && unsignedp
- && (TYPE_MAIN_VARIANT (utype)
- == TYPE_MAIN_VARIANT (TREE_TYPE (dst))))
- tmp = dst;
- else
- tmp = make_rename_temp (utype, "SR");
-
- stmt = build_gimple_modify_stmt (tmp, fold_convert (utype, var));
- append_to_statement_list (stmt, &list);
-
- var = tmp;
+ tree one = build_int_cst_wide (stype, 1, 0);
+ tree mask = int_const_binop (LSHIFT_EXPR, one, width, 0);
+ mask = int_const_binop (MINUS_EXPR, mask, one, 0);
+ var = fold_build2 (BIT_AND_EXPR, stype, var, mask);
}
- if (mask)
- {
- if (!unsignedp
- || (TYPE_MAIN_VARIANT (TREE_TYPE (dst))
- != TYPE_MAIN_VARIANT (utype)))
- tmp = make_rename_temp (utype, "SR");
- else
- tmp = dst;
-
- stmt = build_gimple_modify_stmt (tmp,
- fold_build2 (BIT_AND_EXPR, utype,
- var, mask));
- append_to_statement_list (stmt, &list);
-
- var = tmp;
- }
+ /* After shifting and masking, convert to the target type. */
+ var = fold_convert (utype, var);
+ /* Perform sign extension, if required.
+ ??? This should never be necessary. */
if (!unsignedp)
{
tree signbit = int_const_binop (LSHIFT_EXPR,
build_int_cst_wide (utype, 1, 0),
- size_binop (MINUS_EXPR, cst2,
- bitsize_int (1)),
- true);
+ size_binop (MINUS_EXPR, width,
+ bitsize_int (1)), 0);
- tmp = make_rename_temp (utype, "SR");
- stmt = build_gimple_modify_stmt (tmp,
- fold_build2 (BIT_XOR_EXPR, utype,
- var, signbit));
- append_to_statement_list (stmt, &list);
-
- var = tmp;
+ var = fold_build2 (BIT_XOR_EXPR, utype, var, signbit);
+ var = fold_build2 (MINUS_EXPR, utype, var, signbit);
+ }
- if (TYPE_MAIN_VARIANT (TREE_TYPE (dst)) != TYPE_MAIN_VARIANT (utype))
- tmp = make_rename_temp (utype, "SR");
- else
- tmp = dst;
+ /* fold_build3 (BIT_FIELD_REF, ...) sometimes returns a cast. */
+ STRIP_NOPS (dst);
- stmt = build_gimple_modify_stmt (tmp,
- fold_build2 (MINUS_EXPR, utype,
- var, signbit));
- append_to_statement_list (stmt, &list);
+ /* Finally, move and convert to the destination. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (dst)))
+ var = fold_convert (TREE_TYPE (dst), var);
+ else
+ var = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (dst), var);
- var = tmp;
- }
+ push_gimplify_context (&gctx);
+ gctx.into_ssa = true;
+ gctx.allow_rhs_cond_expr = true;
- if (var != dst)
- {
- if (INTEGRAL_TYPE_P (TREE_TYPE (dst)))
- var = fold_convert (TREE_TYPE (dst), var);
- else
- var = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (dst), var);
+ gimplify_assign (dst, var, &seq);
- stmt = build_gimple_modify_stmt (dst, var);
- append_to_statement_list (stmt, &list);
- }
+ if (gimple_referenced_vars (cfun))
+ for (var = gctx.temps; var; var = TREE_CHAIN (var))
+ add_referenced_var (var);
+ pop_gimplify_context (NULL);
- return list;
+ return seq;
}
+ /* fold_build3 (BIT_FIELD_REF, ...) sometimes returns a cast. */
+ if (CONVERT_EXPR_P (dst))
+ {
+ STRIP_NOPS (dst);
+ src = fold_convert (TREE_TYPE (dst), src);
+ }
/* It was hoped that we could perform some type sanity checking
here, but since front-ends can emit accesses of fields in types
different from their nominal types and copy structures containing
Since such accesses under different types require compatibility
anyway, there's little point in making tests and/or adding
conversions to ensure the types of src and dst are the same.
- So we just assume type differences at this point are ok. */
- return build_gimple_modify_stmt (dst, src);
+ So we just assume type differences at this point are ok.
+ The only exception we make here are pointer types, which can be different
+ in e.g. structurally equal, but non-identical RECORD_TYPEs. */
+ else if (POINTER_TYPE_P (TREE_TYPE (dst))
+ && !useless_type_conversion_p (TREE_TYPE (dst), TREE_TYPE (src)))
+ src = fold_convert (TREE_TYPE (dst), src);
+
+ /* ??? Only call the gimplifier if we need to. Otherwise we may
+ end up substituting with DECL_VALUE_EXPR - see PR37380. */
+ if (!handled_component_p (src)
+ && !SSA_VAR_P (src))
+ {
+ src = force_gimple_operand (src, &seq2, false, NULL_TREE);
+ gimple_seq_add_seq (&seq, seq2);
+ }
+ stmt = gimple_build_assign (dst, src);
+ gimple_seq_add_stmt (&seq, stmt);
+ return seq;
}
/* BIT_FIELD_REFs must not be shared. sra_build_elt_assignment()
/* Emit an assignment from SRC to DST, but if DST is a scalarizable
BIT_FIELD_REF, turn it into bit operations. */
-static tree
+static gimple_seq
sra_build_bf_assignment (tree dst, tree src)
{
tree var, type, utype, tmp, tmp2, tmp3;
- tree list, stmt;
+ gimple_seq seq;
+ gimple stmt;
tree cst, cst2, mask;
tree minshift, maxshift;
if (!scalar_bitfield_p (dst))
return sra_build_assignment (REPLDUP (dst), src);
- list = NULL;
+ seq = NULL;
cst = fold_convert (bitsizetype, TREE_OPERAND (dst, 2));
cst2 = size_binop (PLUS_EXPR,
fold_convert (bitsizetype, TREE_OPERAND (dst, 1)),
cst);
- if (BITS_BIG_ENDIAN)
+ if (BYTES_BIG_ENDIAN)
{
maxshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst);
minshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst2);
utype = unsigned_type_for (type);
mask = build_int_cst_wide (utype, 1, 0);
- cst = int_const_binop (LSHIFT_EXPR, mask, maxshift, true);
- cst2 = int_const_binop (LSHIFT_EXPR, mask, minshift, true);
+ if (TREE_INT_CST_LOW (maxshift) == TYPE_PRECISION (utype))
+ cst = build_int_cst_wide (utype, 0, 0);
+ else
+ cst = int_const_binop (LSHIFT_EXPR, mask, maxshift, true);
+ if (integer_zerop (minshift))
+ cst2 = mask;
+ else
+ cst2 = int_const_binop (LSHIFT_EXPR, mask, minshift, true);
mask = int_const_binop (MINUS_EXPR, cst, cst2, true);
mask = fold_build1 (BIT_NOT_EXPR, utype, mask);
tmp = var;
if (!is_gimple_variable (tmp))
tmp = unshare_expr (var);
+ else
+ TREE_NO_WARNING (var) = true;
tmp2 = make_rename_temp (utype, "SR");
if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
- stmt = build_gimple_modify_stmt (tmp2, fold_convert (utype, tmp));
+ tmp = fold_convert (utype, tmp);
else
- stmt = build_gimple_modify_stmt (tmp2, fold_build1 (VIEW_CONVERT_EXPR,
- utype, tmp));
- append_to_statement_list (stmt, &list);
+ tmp = fold_build1 (VIEW_CONVERT_EXPR, utype, tmp);
+
+ stmt = gimple_build_assign (tmp2, tmp);
+ gimple_seq_add_stmt (&seq, stmt);
}
else
tmp2 = var;
if (!integer_zerop (mask))
{
tmp = make_rename_temp (utype, "SR");
- stmt = build_gimple_modify_stmt (tmp,
- fold_build2 (BIT_AND_EXPR, utype,
+ stmt = gimple_build_assign (tmp, fold_build2 (BIT_AND_EXPR, utype,
tmp2, mask));
- append_to_statement_list (stmt, &list);
+ gimple_seq_add_stmt (&seq, stmt);
}
else
tmp = mask;
tmp2 = src;
else if (INTEGRAL_TYPE_P (TREE_TYPE (src)))
{
+ gimple_seq tmp_seq;
tmp2 = make_rename_temp (TREE_TYPE (src), "SR");
- stmt = sra_build_assignment (tmp2, src);
- append_to_statement_list (stmt, &list);
+ tmp_seq = sra_build_assignment (tmp2, src);
+ gimple_seq_add_seq (&seq, tmp_seq);
}
else
{
+ gimple_seq tmp_seq;
tmp2 = make_rename_temp
(lang_hooks.types.type_for_size
(TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (src))),
1), "SR");
- stmt = sra_build_assignment (tmp2, fold_build1 (VIEW_CONVERT_EXPR,
+ tmp_seq = sra_build_assignment (tmp2, fold_build1 (VIEW_CONVERT_EXPR,
TREE_TYPE (tmp2), src));
- append_to_statement_list (stmt, &list);
+ gimple_seq_add_seq (&seq, tmp_seq);
}
if (!TYPE_UNSIGNED (TREE_TYPE (tmp2)))
{
+ gimple_seq tmp_seq;
tree ut = unsigned_type_for (TREE_TYPE (tmp2));
tmp3 = make_rename_temp (ut, "SR");
tmp2 = fold_convert (ut, tmp2);
- stmt = sra_build_assignment (tmp3, tmp2);
- append_to_statement_list (stmt, &list);
+ tmp_seq = sra_build_assignment (tmp3, tmp2);
+ gimple_seq_add_seq (&seq, tmp_seq);
tmp2 = fold_build1 (BIT_NOT_EXPR, utype, mask);
tmp2 = int_const_binop (RSHIFT_EXPR, tmp2, minshift, true);
tmp2 = fold_convert (ut, tmp2);
- tmp2 = fold_build2 (BIT_AND_EXPR, utype, tmp3, tmp2);
+ tmp2 = fold_build2 (BIT_AND_EXPR, ut, tmp3, tmp2);
if (tmp3 != tmp2)
{
tmp3 = make_rename_temp (ut, "SR");
- stmt = sra_build_assignment (tmp3, tmp2);
- append_to_statement_list (stmt, &list);
+ tmp_seq = sra_build_assignment (tmp3, tmp2);
+ gimple_seq_add_seq (&seq, tmp_seq);
}
tmp2 = tmp3;
if (TYPE_MAIN_VARIANT (TREE_TYPE (tmp2)) != TYPE_MAIN_VARIANT (utype))
{
+ gimple_seq tmp_seq;
tmp3 = make_rename_temp (utype, "SR");
tmp2 = fold_convert (utype, tmp2);
- stmt = sra_build_assignment (tmp3, tmp2);
- append_to_statement_list (stmt, &list);
+ tmp_seq = sra_build_assignment (tmp3, tmp2);
+ gimple_seq_add_seq (&seq, tmp_seq);
tmp2 = tmp3;
}
if (!integer_zerop (minshift))
{
tmp3 = make_rename_temp (utype, "SR");
- stmt = build_gimple_modify_stmt (tmp3,
- fold_build2 (LSHIFT_EXPR, utype,
- tmp2, minshift));
- append_to_statement_list (stmt, &list);
+ stmt = gimple_build_assign (tmp3, fold_build2 (LSHIFT_EXPR, utype,
+ tmp2, minshift));
+ gimple_seq_add_stmt (&seq, stmt);
tmp2 = tmp3;
}
tmp3 = make_rename_temp (utype, "SR");
else
tmp3 = var;
- stmt = build_gimple_modify_stmt (tmp3,
- fold_build2 (BIT_IOR_EXPR, utype,
- tmp, tmp2));
- append_to_statement_list (stmt, &list);
+ stmt = gimple_build_assign (tmp3, fold_build2 (BIT_IOR_EXPR, utype,
+ tmp, tmp2));
+ gimple_seq_add_stmt (&seq, stmt);
if (tmp3 != var)
{
if (TREE_TYPE (var) == type)
- stmt = build_gimple_modify_stmt (var,
- fold_convert (type, tmp3));
+ stmt = gimple_build_assign (var, fold_convert (type, tmp3));
else
- stmt = build_gimple_modify_stmt (var,
- fold_build1 (VIEW_CONVERT_EXPR,
+ stmt = gimple_build_assign (var, fold_build1 (VIEW_CONVERT_EXPR,
TREE_TYPE (var), tmp3));
- append_to_statement_list (stmt, &list);
+ gimple_seq_add_stmt (&seq, stmt);
}
- return list;
+ return seq;
}
/* Expand an assignment of SRC to the scalarized representation of
ELT. If it is a field group, try to widen the assignment to cover
the full variable. */
-static tree
+static gimple_seq
sra_build_elt_assignment (struct sra_elt *elt, tree src)
{
tree dst = elt->replacement;
- tree var, tmp, cst, cst2, list, stmt;
+ tree var, tmp, cst, cst2;
+ gimple stmt;
+ gimple_seq seq;
if (TREE_CODE (dst) != BIT_FIELD_REF
|| !elt->in_bitfld_block)
if (elt->in_bitfld_block == 2
&& TREE_CODE (src) == BIT_FIELD_REF)
{
+ tmp = src;
cst = TYPE_SIZE (TREE_TYPE (var));
cst2 = size_binop (MINUS_EXPR, TREE_OPERAND (src, 2),
TREE_OPERAND (dst, 2));
if (TYPE_MAIN_VARIANT (TREE_TYPE (var))
!= TYPE_MAIN_VARIANT (TREE_TYPE (src)))
{
- list = NULL;
+ gimple_seq tmp_seq;
+ seq = NULL;
- if (!INTEGRAL_TYPE_P (TREE_TYPE (src))
- || !TYPE_UNSIGNED (TREE_TYPE (src)))
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (src)))
src = fold_build1 (VIEW_CONVERT_EXPR,
lang_hooks.types.type_for_size
(TREE_INT_CST_LOW
(TYPE_SIZE (TREE_TYPE (src))),
1), src);
+ gcc_assert (TYPE_UNSIGNED (TREE_TYPE (src)));
tmp = make_rename_temp (TREE_TYPE (src), "SR");
- stmt = build_gimple_modify_stmt (tmp, src);
- append_to_statement_list (stmt, &list);
+ stmt = gimple_build_assign (tmp, src);
+ gimple_seq_add_stmt (&seq, stmt);
- stmt = sra_build_assignment (var,
- fold_convert (TREE_TYPE (var),
- tmp));
- append_to_statement_list (stmt, &list);
+ tmp_seq = sra_build_assignment (var,
+ fold_convert (TREE_TYPE (var),
+ tmp));
+ gimple_seq_add_seq (&seq, tmp_seq);
- return list;
+ return seq;
}
src = fold_convert (TREE_TYPE (var), src);
}
else
{
- src = fold_build3 (BIT_FIELD_REF, TREE_TYPE (var), src, cst, cst2);
- BIT_FIELD_REF_UNSIGNED (src) = 1;
+ src = fold_convert (TREE_TYPE (var), tmp);
}
return sra_build_assignment (var, src);
static void
generate_copy_inout (struct sra_elt *elt, bool copy_out, tree expr,
- tree *list_p)
+ gimple_seq *seq_p)
{
struct sra_elt *c;
+ gimple_seq tmp_seq;
tree t;
if (!copy_out && TREE_CODE (expr) == SSA_NAME
i = c->replacement;
t = build2 (COMPLEX_EXPR, elt->type, r, i);
- t = sra_build_bf_assignment (expr, t);
- SSA_NAME_DEF_STMT (expr) = t;
- append_to_statement_list (t, list_p);
+ tmp_seq = sra_build_bf_assignment (expr, t);
+ SSA_NAME_DEF_STMT (expr) = gimple_seq_last_stmt (tmp_seq);
+ gimple_seq_add_seq (seq_p, tmp_seq);
}
else if (elt->replacement)
{
if (copy_out)
- t = sra_build_elt_assignment (elt, expr);
+ tmp_seq = sra_build_elt_assignment (elt, expr);
else
- t = sra_build_bf_assignment (expr, REPLDUP (elt->replacement));
- append_to_statement_list (t, list_p);
+ tmp_seq = sra_build_bf_assignment (expr, REPLDUP (elt->replacement));
+ gimple_seq_add_seq (seq_p, tmp_seq);
}
else
{
FOR_EACH_ACTUAL_CHILD (c, elt)
{
t = generate_one_element_ref (c, unshare_expr (expr));
- generate_copy_inout (c, copy_out, t, list_p);
+ generate_copy_inout (c, copy_out, t, seq_p);
}
}
}
correspondence of instantiated elements. */
static void
-generate_element_copy (struct sra_elt *dst, struct sra_elt *src, tree *list_p)
+generate_element_copy (struct sra_elt *dst, struct sra_elt *src, gimple_seq *seq_p)
{
struct sra_elt *dc, *sc;
{
sc = lookup_element (src, dcs->element, NULL, NO_INSERT);
gcc_assert (sc);
- generate_element_copy (dcs, sc, list_p);
+ generate_element_copy (dcs, sc, seq_p);
}
continue;
}
- gcc_assert (sc);
- generate_element_copy (dc, sc, list_p);
+
+ /* If DST and SRC are structs with the same elements, but do not have
+ the same TYPE_MAIN_VARIANT, then lookup of DST FIELD_DECL in SRC
+ will fail. Try harder by finding the corresponding FIELD_DECL
+ in SRC. */
+ if (!sc)
+ {
+ tree f;
+
+ gcc_assert (useless_type_conversion_p (dst->type, src->type));
+ gcc_assert (TREE_CODE (dc->element) == FIELD_DECL);
+ for (f = TYPE_FIELDS (src->type); f ; f = TREE_CHAIN (f))
+ if (simple_cst_equal (DECL_FIELD_OFFSET (f),
+ DECL_FIELD_OFFSET (dc->element)) > 0
+ && simple_cst_equal (DECL_FIELD_BIT_OFFSET (f),
+ DECL_FIELD_BIT_OFFSET (dc->element)) > 0
+ && simple_cst_equal (DECL_SIZE (f),
+ DECL_SIZE (dc->element)) > 0
+ && (useless_type_conversion_p (TREE_TYPE (dc->element),
+ TREE_TYPE (f))
+ || (POINTER_TYPE_P (TREE_TYPE (dc->element))
+ && POINTER_TYPE_P (TREE_TYPE (f)))))
+ break;
+ gcc_assert (f != NULL_TREE);
+ sc = lookup_element (src, f, NULL, NO_INSERT);
+ }
+
+ generate_element_copy (dc, sc, seq_p);
}
if (dst->replacement)
{
- tree t;
+ gimple_seq tmp_seq;
gcc_assert (src->replacement);
- t = sra_build_elt_assignment (dst, REPLDUP (src->replacement));
- append_to_statement_list (t, list_p);
+ tmp_seq = sra_build_elt_assignment (dst, REPLDUP (src->replacement));
+ gimple_seq_add_seq (seq_p, tmp_seq);
}
}
with generate_element_init. */
static void
-generate_element_zero (struct sra_elt *elt, tree *list_p)
+generate_element_zero (struct sra_elt *elt, gimple_seq *seq_p)
{
struct sra_elt *c;
if (!elt->in_bitfld_block)
FOR_EACH_ACTUAL_CHILD (c, elt)
- generate_element_zero (c, list_p);
+ generate_element_zero (c, seq_p);
if (elt->replacement)
{
tree t;
+ gimple_seq tmp_seq;
gcc_assert (elt->is_scalar);
t = fold_convert (elt->type, integer_zero_node);
- t = sra_build_elt_assignment (elt, t);
- append_to_statement_list (t, list_p);
+ tmp_seq = sra_build_elt_assignment (elt, t);
+ gimple_seq_add_seq (seq_p, tmp_seq);
}
}
Add the result to *LIST_P. */
static void
-generate_one_element_init (struct sra_elt *elt, tree init, tree *list_p)
+generate_one_element_init (struct sra_elt *elt, tree init, gimple_seq *seq_p)
{
- /* The replacement can be almost arbitrarily complex. Gimplify. */
- tree stmt = sra_build_elt_assignment (elt, init);
- gimplify_and_add (stmt, list_p);
+ gimple_seq tmp_seq = sra_build_elt_assignment (elt, init);
+ gimple_seq_add_seq (seq_p, tmp_seq);
}
/* Generate a set of assignment statements in *LIST_P to set all instantiated
handle. */
static bool
-generate_element_init_1 (struct sra_elt *elt, tree init, tree *list_p)
+generate_element_init_1 (struct sra_elt *elt, tree init, gimple_seq *seq_p)
{
bool result = true;
enum tree_code init_code;
{
if (elt->replacement)
{
- generate_one_element_init (elt, init, list_p);
+ generate_one_element_init (elt, init, seq_p);
elt->visited = true;
}
return result;
else
t = (init_code == COMPLEX_EXPR
? TREE_OPERAND (init, 1) : TREE_IMAGPART (init));
- result &= generate_element_init_1 (sub, t, list_p);
+ result &= generate_element_init_1 (sub, t, seq_p);
}
break;
case CONSTRUCTOR:
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init), idx, purpose, value)
{
+ /* Array constructors are routinely created with NULL indices. */
+ if (purpose == NULL_TREE)
+ {
+ result = false;
+ break;
+ }
if (TREE_CODE (purpose) == RANGE_EXPR)
{
tree lower = TREE_OPERAND (purpose, 0);
{
sub = lookup_element (elt, lower, NULL, NO_INSERT);
if (sub != NULL)
- result &= generate_element_init_1 (sub, value, list_p);
+ result &= generate_element_init_1 (sub, value, seq_p);
if (tree_int_cst_equal (lower, upper))
break;
lower = int_const_binop (PLUS_EXPR, lower,
{
sub = lookup_element (elt, purpose, NULL, NO_INSERT);
if (sub != NULL)
- result &= generate_element_init_1 (sub, value, list_p);
+ result &= generate_element_init_1 (sub, value, seq_p);
}
}
break;
gimplification. */
static bool
-generate_element_init (struct sra_elt *elt, tree init, tree *list_p)
+generate_element_init (struct sra_elt *elt, tree init, gimple_seq *seq_p)
{
bool ret;
+ struct gimplify_ctx gctx;
- push_gimplify_context ();
- ret = generate_element_init_1 (elt, init, list_p);
+ push_gimplify_context (&gctx);
+ ret = generate_element_init_1 (elt, init, seq_p);
pop_gimplify_context (NULL);
/* The replacement can expose previously unreferenced variables. */
- if (ret && *list_p)
+ if (ret && *seq_p)
{
- tree_stmt_iterator i;
+ gimple_stmt_iterator i;
- for (i = tsi_start (*list_p); !tsi_end_p (i); tsi_next (&i))
- find_new_referenced_vars (tsi_stmt_ptr (i));
+ for (i = gsi_start (*seq_p); !gsi_end_p (i); gsi_next (&i))
+ find_new_referenced_vars (gsi_stmt (i));
}
return ret;
}
-/* Insert STMT on all the outgoing edges out of BB. Note that if BB
- has more than one edge, STMT will be replicated for each edge. Also,
- abnormal edges will be ignored. */
+/* Insert a gimple_seq SEQ on all the outgoing edges out of BB. Note that
+ if BB has more than one edge, STMT will be replicated for each edge.
+ Also, abnormal edges will be ignored. */
void
-insert_edge_copies (tree stmt, basic_block bb)
+insert_edge_copies_seq (gimple_seq seq, basic_block bb)
{
edge e;
edge_iterator ei;
- bool first_copy;
+ unsigned n_copies = -1;
- first_copy = true;
FOR_EACH_EDGE (e, ei, bb->succs)
- {
- /* We don't need to insert copies on abnormal edges. The
- value of the scalar replacement is not guaranteed to
- be valid through an abnormal edge. */
- if (!(e->flags & EDGE_ABNORMAL))
- {
- if (first_copy)
- {
- bsi_insert_on_edge (e, stmt);
- first_copy = false;
- }
- else
- bsi_insert_on_edge (e, unsave_expr_now (stmt));
- }
- }
+ if (!(e->flags & EDGE_ABNORMAL))
+ n_copies++;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (!(e->flags & EDGE_ABNORMAL))
+ gsi_insert_seq_on_edge (e, n_copies-- > 0 ? gimple_seq_copy (seq) : seq);
}
-/* Helper function to insert LIST before BSI, and set up line number info. */
+/* Helper function to insert LIST before GSI, and set up line number info. */
void
-sra_insert_before (block_stmt_iterator *bsi, tree list)
+sra_insert_before (gimple_stmt_iterator *gsi, gimple_seq seq)
{
- tree stmt = bsi_stmt (*bsi);
+ gimple stmt = gsi_stmt (*gsi);
- if (EXPR_HAS_LOCATION (stmt))
- annotate_all_with_locus (&list, EXPR_LOCATION (stmt));
- bsi_insert_before (bsi, list, BSI_SAME_STMT);
+ if (gimple_has_location (stmt))
+ annotate_all_with_location (seq, gimple_location (stmt));
+ gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
}
-/* Similarly, but insert after BSI. Handles insertion onto edges as well. */
+/* Similarly, but insert after GSI. Handles insertion onto edges as well. */
void
-sra_insert_after (block_stmt_iterator *bsi, tree list)
+sra_insert_after (gimple_stmt_iterator *gsi, gimple_seq seq)
{
- tree stmt = bsi_stmt (*bsi);
+ gimple stmt = gsi_stmt (*gsi);
- if (EXPR_HAS_LOCATION (stmt))
- annotate_all_with_locus (&list, EXPR_LOCATION (stmt));
+ if (gimple_has_location (stmt))
+ annotate_all_with_location (seq, gimple_location (stmt));
if (stmt_ends_bb_p (stmt))
- insert_edge_copies (list, bsi->bb);
+ insert_edge_copies_seq (seq, gsi_bb (*gsi));
else
- bsi_insert_after (bsi, list, BSI_SAME_STMT);
+ gsi_insert_seq_after (gsi, seq, GSI_SAME_STMT);
}
-/* Similarly, but replace the statement at BSI. */
+/* Similarly, but replace the statement at GSI. */
static void
-sra_replace (block_stmt_iterator *bsi, tree list)
+sra_replace (gimple_stmt_iterator *gsi, gimple_seq seq)
{
- sra_insert_before (bsi, list);
- bsi_remove (bsi, false);
- if (bsi_end_p (*bsi))
- *bsi = bsi_last (bsi->bb);
+ sra_insert_before (gsi, seq);
+ gsi_remove (gsi, false);
+ if (gsi_end_p (*gsi))
+ *gsi = gsi_last (gsi_seq (*gsi));
else
- bsi_prev (bsi);
+ gsi_prev (gsi);
}
/* Data structure that bitfield_overlaps_p fills in with information
};
/* Return true if a BIT_FIELD_REF<(FLD->parent), BLEN, BPOS>
- expression (refereced as BF below) accesses any of the bits in FLD,
+ expression (referenced as BF below) accesses any of the bits in FLD,
false if it doesn't. If DATA is non-null, its field_len and
field_pos are filled in such that BIT_FIELD_REF<(FLD->parent),
field_len, field_pos> (referenced as BFLD below) represents the
flen = fold_convert (bitsizetype, TREE_OPERAND (fld->element, 1));
fpos = fold_convert (bitsizetype, TREE_OPERAND (fld->element, 2));
}
+ else if (TREE_CODE (fld->element) == INTEGER_CST)
+ {
+ tree domain_type = TYPE_DOMAIN (TREE_TYPE (fld->parent->element));
+ flen = fold_convert (bitsizetype, TYPE_SIZE (fld->type));
+ fpos = fold_convert (bitsizetype, fld->element);
+ if (domain_type && TYPE_MIN_VALUE (domain_type))
+ fpos = size_binop (MINUS_EXPR, fpos,
+ fold_convert (bitsizetype,
+ TYPE_MIN_VALUE (domain_type)));
+ fpos = size_binop (MULT_EXPR, flen, fpos);
+ }
else
gcc_unreachable ();
static void
sra_explode_bitfield_assignment (tree var, tree vpos, bool to_var,
- tree *listp, tree blen, tree bpos,
+ gimple_seq *seq_p, tree blen, tree bpos,
struct sra_elt *elt)
{
struct sra_elt *fld;
if (fld->replacement)
{
- tree infld, invar, st;
+ tree infld, invar, type;
+ gimple_seq st;
infld = fld->replacement;
+ type = unsigned_type_for (TREE_TYPE (infld));
+ if (TYPE_PRECISION (type) != TREE_INT_CST_LOW (flen))
+ type = build_nonstandard_integer_type (TREE_INT_CST_LOW (flen), 1);
+
if (TREE_CODE (infld) == BIT_FIELD_REF)
{
fpos = size_binop (PLUS_EXPR, fpos, TREE_OPERAND (infld, 2));
infld = TREE_OPERAND (infld, 0);
}
- else if (BITS_BIG_ENDIAN && DECL_P (fld->element)
+ else if (BYTES_BIG_ENDIAN && DECL_P (fld->element)
&& !tree_int_cst_equal (TYPE_SIZE (TREE_TYPE (infld)),
DECL_SIZE (fld->element)))
{
DECL_SIZE (fld->element));
}
- infld = fold_build3 (BIT_FIELD_REF,
- lang_hooks.types.type_for_size
- (TREE_INT_CST_LOW (flen), 1),
- infld, flen, fpos);
- BIT_FIELD_REF_UNSIGNED (infld) = 1;
+ infld = fold_build3 (BIT_FIELD_REF, type, infld, flen, fpos);
invar = size_binop (MINUS_EXPR, flp.field_pos, bpos);
if (flp.overlap_pos)
invar = size_binop (PLUS_EXPR, invar, flp.overlap_pos);
invar = size_binop (PLUS_EXPR, invar, vpos);
- invar = fold_build3 (BIT_FIELD_REF, TREE_TYPE (infld),
- var, flen, invar);
- BIT_FIELD_REF_UNSIGNED (invar) = 1;
+ invar = fold_build3 (BIT_FIELD_REF, type, var, flen, invar);
if (to_var)
st = sra_build_bf_assignment (invar, infld);
else
st = sra_build_bf_assignment (infld, invar);
- append_to_statement_list (st, listp);
+ gimple_seq_add_seq (seq_p, st);
}
else
{
if (flp.overlap_pos)
sub = size_binop (PLUS_EXPR, sub, flp.overlap_pos);
- sra_explode_bitfield_assignment (var, sub, to_var, listp,
+ sra_explode_bitfield_assignment (var, sub, to_var, seq_p,
flen, fpos, fld);
}
}
full variable back to the scalarized variables. */
static void
-sra_sync_for_bitfield_assignment (tree *listbeforep, tree *listafterp,
+sra_sync_for_bitfield_assignment (gimple_seq *seq_before_p,
+ gimple_seq *seq_after_p,
tree blen, tree bpos,
struct sra_elt *elt)
{
if (fld->replacement || (!flp.overlap_len && !flp.overlap_pos))
{
generate_copy_inout (fld, false, generate_element_ref (fld),
- listbeforep);
+ seq_before_p);
mark_no_warning (fld);
- if (listafterp)
+ if (seq_after_p)
generate_copy_inout (fld, true, generate_element_ref (fld),
- listafterp);
+ seq_after_p);
}
else
{
tree flen = flp.overlap_len ? flp.overlap_len : flp.field_len;
tree fpos = flp.overlap_pos ? flp.overlap_pos : bitsize_int (0);
- sra_sync_for_bitfield_assignment (listbeforep, listafterp,
+ sra_sync_for_bitfield_assignment (seq_before_p, seq_after_p,
flen, fpos, fld);
}
}
aggregate. IS_OUTPUT is true if ELT is being modified. */
static void
-scalarize_use (struct sra_elt *elt, tree *expr_p, block_stmt_iterator *bsi,
+scalarize_use (struct sra_elt *elt, tree *expr_p, gimple_stmt_iterator *gsi,
bool is_output, bool use_all)
{
- tree stmt = bsi_stmt (*bsi);
+ gimple stmt = gsi_stmt (*gsi);
tree bfexpr;
if (elt->replacement)
if (is_output
&& TREE_CODE (elt->replacement) == BIT_FIELD_REF
&& is_gimple_reg (TREE_OPERAND (elt->replacement, 0))
- && TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
- && &GIMPLE_STMT_OPERAND (stmt, 0) == expr_p)
+ && is_gimple_assign (stmt)
+ && gimple_assign_lhs_ptr (stmt) == expr_p)
{
- tree newstmt = sra_build_elt_assignment
- (elt, GIMPLE_STMT_OPERAND (stmt, 1));
- if (TREE_CODE (newstmt) != STATEMENT_LIST)
- {
- tree list = NULL;
- append_to_statement_list (newstmt, &list);
- newstmt = list;
- }
- sra_replace (bsi, newstmt);
+ gimple_seq newseq;
+ /* RHS must be a single operand. */
+ gcc_assert (gimple_assign_single_p (stmt));
+ newseq = sra_build_elt_assignment (elt, gimple_assign_rhs1 (stmt));
+ sra_replace (gsi, newseq);
return;
}
else if (!is_output
&& TREE_CODE (elt->replacement) == BIT_FIELD_REF
- && TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
- && &GIMPLE_STMT_OPERAND (stmt, 1) == expr_p)
+ && is_gimple_assign (stmt)
+ && gimple_assign_rhs1_ptr (stmt) == expr_p)
{
tree tmp = make_rename_temp
- (TREE_TYPE (GIMPLE_STMT_OPERAND (stmt, 0)), "SR");
- tree newstmt = sra_build_assignment (tmp, REPLDUP (elt->replacement));
+ (TREE_TYPE (gimple_assign_lhs (stmt)), "SR");
+ gimple_seq newseq = sra_build_assignment (tmp, REPLDUP (elt->replacement));
- if (TREE_CODE (newstmt) != STATEMENT_LIST)
- {
- tree list = NULL;
- append_to_statement_list (newstmt, &list);
- newstmt = list;
- }
- sra_insert_before (bsi, newstmt);
+ sra_insert_before (gsi, newseq);
replacement = tmp;
}
if (is_output)
- mark_all_v_defs (stmt);
+ mark_all_v_defs_stmt (stmt);
*expr_p = REPLDUP (replacement);
update_stmt (stmt);
}
else if (use_all && is_output
- && TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
+ && is_gimple_assign (stmt)
&& TREE_CODE (bfexpr
- = GIMPLE_STMT_OPERAND (stmt, 0)) == BIT_FIELD_REF
+ = gimple_assign_lhs (stmt)) == BIT_FIELD_REF
&& &TREE_OPERAND (bfexpr, 0) == expr_p
&& INTEGRAL_TYPE_P (TREE_TYPE (bfexpr))
&& TREE_CODE (TREE_TYPE (*expr_p)) == RECORD_TYPE)
{
- tree listbefore = NULL, listafter = NULL;
+ gimple_seq seq_before = NULL;
+ gimple_seq seq_after = NULL;
tree blen = fold_convert (bitsizetype, TREE_OPERAND (bfexpr, 1));
tree bpos = fold_convert (bitsizetype, TREE_OPERAND (bfexpr, 2));
bool update = false;
if (!elt->use_block_copy)
{
tree type = TREE_TYPE (bfexpr);
- tree var = make_rename_temp (type, "SR"), tmp, st;
+ tree var = make_rename_temp (type, "SR"), tmp, vpos;
+ gimple st;
- GIMPLE_STMT_OPERAND (stmt, 0) = var;
+ gimple_assign_set_lhs (stmt, var);
update = true;
if (!TYPE_UNSIGNED (type))
{
type = unsigned_type_for (type);
tmp = make_rename_temp (type, "SR");
- st = build_gimple_modify_stmt (tmp,
- fold_convert (type, var));
- append_to_statement_list (st, &listafter);
+ st = gimple_build_assign (tmp, fold_convert (type, var));
+ gimple_seq_add_stmt (&seq_after, st);
var = tmp;
}
+ /* If VAR is wider than BLEN bits, it is padded at the
+ most-significant end. We want to set VPOS such that
+ <BIT_FIELD_REF VAR BLEN VPOS> would refer to the
+ least-significant BLEN bits of VAR. */
+ if (BYTES_BIG_ENDIAN)
+ vpos = size_binop (MINUS_EXPR, TYPE_SIZE (type), blen);
+ else
+ vpos = bitsize_int (0);
sra_explode_bitfield_assignment
- (var, bitsize_int (0), false, &listafter, blen, bpos, elt);
+ (var, vpos, false, &seq_after, blen, bpos, elt);
}
else
sra_sync_for_bitfield_assignment
- (&listbefore, &listafter, blen, bpos, elt);
+ (&seq_before, &seq_after, blen, bpos, elt);
- if (listbefore)
+ if (seq_before)
{
- mark_all_v_defs (listbefore);
- sra_insert_before (bsi, listbefore);
+ mark_all_v_defs_seq (seq_before);
+ sra_insert_before (gsi, seq_before);
}
- if (listafter)
+ if (seq_after)
{
- mark_all_v_defs (listafter);
- sra_insert_after (bsi, listafter);
+ mark_all_v_defs_seq (seq_after);
+ sra_insert_after (gsi, seq_after);
}
if (update)
update_stmt (stmt);
}
else if (use_all && !is_output
- && TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
+ && is_gimple_assign (stmt)
&& TREE_CODE (bfexpr
- = GIMPLE_STMT_OPERAND (stmt, 1)) == BIT_FIELD_REF
- && &TREE_OPERAND (GIMPLE_STMT_OPERAND (stmt, 1), 0) == expr_p
+ = gimple_assign_rhs1 (stmt)) == BIT_FIELD_REF
+ && &TREE_OPERAND (gimple_assign_rhs1 (stmt), 0) == expr_p
&& INTEGRAL_TYPE_P (TREE_TYPE (bfexpr))
&& TREE_CODE (TREE_TYPE (*expr_p)) == RECORD_TYPE)
{
- tree list = NULL;
+ gimple_seq seq = NULL;
tree blen = fold_convert (bitsizetype, TREE_OPERAND (bfexpr, 1));
tree bpos = fold_convert (bitsizetype, TREE_OPERAND (bfexpr, 2));
bool update = false;
if (!elt->use_block_copy)
{
tree type = TREE_TYPE (bfexpr);
- tree var;
+ tree var = make_rename_temp (type, "SR"), tmp, vpos;
+ gimple st = NULL;
- if (!TYPE_UNSIGNED (type))
- type = unsigned_type_for (type);
+ gimple_assign_set_rhs1 (stmt, var);
+ update = true;
- var = make_rename_temp (type, "SR");
+ if (!TYPE_UNSIGNED (type))
+ {
+ type = unsigned_type_for (type);
+ tmp = make_rename_temp (type, "SR");
+ st = gimple_build_assign (var,
+ fold_convert (TREE_TYPE (var), tmp));
+ var = tmp;
+ }
- append_to_statement_list (build_gimple_modify_stmt
- (var, build_int_cst_wide (type, 0, 0)),
- &list);
+ gimple_seq_add_stmt (&seq,
+ gimple_build_assign
+ (var, build_int_cst_wide (type, 0, 0)));
+ /* If VAR is wider than BLEN bits, it is padded at the
+ most-significant end. We want to set VPOS such that
+ <BIT_FIELD_REF VAR BLEN VPOS> would refer to the
+ least-significant BLEN bits of VAR. */
+ if (BYTES_BIG_ENDIAN)
+ vpos = size_binop (MINUS_EXPR, TYPE_SIZE (type), blen);
+ else
+ vpos = bitsize_int (0);
sra_explode_bitfield_assignment
- (var, bitsize_int (0), true, &list, blen, bpos, elt);
+ (var, vpos, true, &seq, blen, bpos, elt);
- GIMPLE_STMT_OPERAND (stmt, 1) = var;
- update = true;
+ if (st)
+ gimple_seq_add_stmt (&seq, st);
}
else
sra_sync_for_bitfield_assignment
- (&list, NULL, blen, bpos, elt);
+ (&seq, NULL, blen, bpos, elt);
- if (list)
+ if (seq)
{
- mark_all_v_defs (list);
- sra_insert_before (bsi, list);
+ mark_all_v_defs_seq (seq);
+ sra_insert_before (gsi, seq);
}
if (update)
}
else
{
- tree list = NULL;
+ gimple_seq seq = NULL;
/* Otherwise we need some copies. If ELT is being read, then we
want to store all (modified) sub-elements back into the
This optimization would be most effective if sra_walk_function
processed the blocks in dominator order. */
- generate_copy_inout (elt, is_output, generate_element_ref (elt), &list);
- if (list == NULL)
+ generate_copy_inout (elt, is_output, generate_element_ref (elt), &seq);
+ if (seq == NULL)
return;
- mark_all_v_defs (list);
+ mark_all_v_defs_seq (seq);
if (is_output)
- sra_insert_after (bsi, list);
+ sra_insert_after (gsi, seq);
else
{
- sra_insert_before (bsi, list);
+ sra_insert_before (gsi, seq);
if (use_all)
mark_no_warning (elt);
}
static void
scalarize_copy (struct sra_elt *lhs_elt, struct sra_elt *rhs_elt,
- block_stmt_iterator *bsi)
+ gimple_stmt_iterator *gsi)
{
- tree list, stmt;
+ gimple_seq seq;
+ gimple stmt;
if (lhs_elt->replacement && rhs_elt->replacement)
{
/* If we have two scalar operands, modify the existing statement. */
- stmt = bsi_stmt (*bsi);
+ stmt = gsi_stmt (*gsi);
/* See the commentary in sra_walk_function concerning
RETURN_EXPR, and why we should never see one here. */
- gcc_assert (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT);
+ gcc_assert (is_gimple_assign (stmt));
+ gcc_assert (gimple_assign_copy_p (stmt));
+
- GIMPLE_STMT_OPERAND (stmt, 0) = lhs_elt->replacement;
- GIMPLE_STMT_OPERAND (stmt, 1) = REPLDUP (rhs_elt->replacement);
+ gimple_assign_set_lhs (stmt, lhs_elt->replacement);
+ gimple_assign_set_rhs1 (stmt, REPLDUP (rhs_elt->replacement));
update_stmt (stmt);
}
else if (lhs_elt->use_block_copy || rhs_elt->use_block_copy)
would at least allow those elements that are instantiated in
both structures to be optimized well. */
- list = NULL;
+ seq = NULL;
generate_copy_inout (rhs_elt, false,
- generate_element_ref (rhs_elt), &list);
- if (list)
+ generate_element_ref (rhs_elt), &seq);
+ if (seq)
{
- mark_all_v_defs (list);
- sra_insert_before (bsi, list);
+ mark_all_v_defs_seq (seq);
+ sra_insert_before (gsi, seq);
}
- list = NULL;
+ seq = NULL;
generate_copy_inout (lhs_elt, true,
- generate_element_ref (lhs_elt), &list);
- if (list)
+ generate_element_ref (lhs_elt), &seq);
+ if (seq)
{
- mark_all_v_defs (list);
- sra_insert_after (bsi, list);
+ mark_all_v_defs_seq (seq);
+ sra_insert_after (gsi, seq);
}
}
else
case perform pair-wise element assignments and replace the
original block copy statement. */
- stmt = bsi_stmt (*bsi);
- mark_all_v_defs (stmt);
+ stmt = gsi_stmt (*gsi);
+ mark_all_v_defs_stmt (stmt);
- list = NULL;
- generate_element_copy (lhs_elt, rhs_elt, &list);
- gcc_assert (list);
- mark_all_v_defs (list);
- sra_replace (bsi, list);
+ seq = NULL;
+ generate_element_copy (lhs_elt, rhs_elt, &seq);
+ gcc_assert (seq);
+ mark_all_v_defs_seq (seq);
+ sra_replace (gsi, seq);
}
}
CONSTRUCTOR. */
static void
-scalarize_init (struct sra_elt *lhs_elt, tree rhs, block_stmt_iterator *bsi)
+scalarize_init (struct sra_elt *lhs_elt, tree rhs, gimple_stmt_iterator *gsi)
{
bool result = true;
- tree list = NULL;
+ gimple_seq seq = NULL, init_seq = NULL;
/* Generate initialization statements for all members extant in the RHS. */
if (rhs)
{
/* Unshare the expression just in case this is from a decl's initial. */
rhs = unshare_expr (rhs);
- result = generate_element_init (lhs_elt, rhs, &list);
+ result = generate_element_init (lhs_elt, rhs, &init_seq);
}
- /* CONSTRUCTOR is defined such that any member not mentioned is assigned
- a zero value. Initialize the rest of the instantiated elements. */
- generate_element_zero (lhs_elt, &list);
-
if (!result)
{
/* If we failed to convert the entire initializer, then we must
constants. The easiest way to do this is to generate a complete
copy-out, and then follow that with the constant assignments
that we were able to build. DCE will clean things up. */
- tree list0 = NULL;
+ gimple_seq seq0 = NULL;
generate_copy_inout (lhs_elt, true, generate_element_ref (lhs_elt),
- &list0);
- append_to_statement_list (list, &list0);
- list = list0;
+ &seq0);
+ gimple_seq_add_seq (&seq0, seq);
+ seq = seq0;
+ }
+ else
+ {
+ /* CONSTRUCTOR is defined such that any member not mentioned is assigned
+ a zero value. Initialize the rest of the instantiated elements. */
+ generate_element_zero (lhs_elt, &seq);
+ gimple_seq_add_seq (&seq, init_seq);
}
if (lhs_elt->use_block_copy || !result)
/* Since LHS is not fully instantiated, we must leave the structure
assignment in place. Treating this case differently from a USE
exposes constants to later optimizations. */
- if (list)
+ if (seq)
{
- mark_all_v_defs (list);
- sra_insert_after (bsi, list);
+ mark_all_v_defs_seq (seq);
+ sra_insert_after (gsi, seq);
}
}
else
{
/* The LHS is fully instantiated. The list of initializations
replaces the original structure assignment. */
- gcc_assert (list);
- mark_all_v_defs (bsi_stmt (*bsi));
- mark_all_v_defs (list);
- sra_replace (bsi, list);
+ gcc_assert (seq);
+ mark_all_v_defs_stmt (gsi_stmt (*gsi));
+ mark_all_v_defs_seq (seq);
+ sra_replace (gsi, seq);
}
}
static void
scalarize_ldst (struct sra_elt *elt, tree other,
- block_stmt_iterator *bsi, bool is_output)
+ gimple_stmt_iterator *gsi, bool is_output)
{
/* Shouldn't have gotten called for a scalar. */
gcc_assert (!elt->replacement);
{
/* Since ELT is not fully instantiated, we have to leave the
block copy in place. Treat this as a USE. */
- scalarize_use (elt, NULL, bsi, is_output, false);
+ scalarize_use (elt, NULL, gsi, is_output, false);
}
else
{
case we can have each element stored/loaded directly to/from the
corresponding slot in OTHER. This avoids a block copy. */
- tree list = NULL, stmt = bsi_stmt (*bsi);
+ gimple_seq seq = NULL;
+ gimple stmt = gsi_stmt (*gsi);
- mark_all_v_defs (stmt);
- generate_copy_inout (elt, is_output, other, &list);
- gcc_assert (list);
- mark_all_v_defs (list);
+ mark_all_v_defs_stmt (stmt);
+ generate_copy_inout (elt, is_output, other, &seq);
+ gcc_assert (seq);
+ mark_all_v_defs_seq (seq);
/* Preserve EH semantics. */
if (stmt_ends_bb_p (stmt))
{
- tree_stmt_iterator tsi;
- tree first, blist = NULL;
- bool thr = (bsi->bb->flags & EDGE_COMPLEX) != 0;
+ gimple_stmt_iterator si;
+ gimple first;
+ gimple_seq blist = NULL;
+ bool thr = stmt_could_throw_p (stmt);
/* If the last statement of this BB created an EH edge
before scalarization, we have to locate the first
list will be added to normal outgoing edges of the same
BB. If they access any memory, it's the same memory, so
we can assume they won't throw. */
- tsi = tsi_start (list);
- for (first = tsi_stmt (tsi);
- thr && !tsi_end_p (tsi) && !tree_could_throw_p (first);
- first = tsi_stmt (tsi))
+ si = gsi_start (seq);
+ for (first = gsi_stmt (si);
+ thr && !gsi_end_p (si) && !stmt_could_throw_p (first);
+ first = gsi_stmt (si))
{
- tsi_delink (&tsi);
- append_to_statement_list (first, &blist);
+ gsi_remove (&si, false);
+ gimple_seq_add_stmt (&blist, first);
}
/* Extract the first remaining statement from LIST, this is
the EH statement if there is one. */
- tsi_delink (&tsi);
+ gsi_remove (&si, false);
if (blist)
- sra_insert_before (bsi, blist);
+ sra_insert_before (gsi, blist);
/* Replace the old statement with this new representative. */
- bsi_replace (bsi, first, true);
+ gsi_replace (gsi, first, true);
- if (!tsi_end_p (tsi))
+ if (!gsi_end_p (si))
{
/* If any reference would trap, then they all would. And more
to the point, the first would. Therefore none of the rest
TREE_THIS_NOTRAP in all INDIRECT_REFs. */
do
{
- walk_tree (tsi_stmt_ptr (tsi), mark_notrap, NULL, NULL);
- tsi_next (&tsi);
+ walk_gimple_stmt (&si, NULL, mark_notrap, NULL);
+ gsi_next (&si);
}
- while (!tsi_end_p (tsi));
+ while (!gsi_end_p (si));
- insert_edge_copies (list, bsi->bb);
+ insert_edge_copies_seq (seq, gsi_bb (*gsi));
}
}
else
- sra_replace (bsi, list);
+ sra_replace (gsi, seq);
}
}
static void
scalarize_parms (void)
{
- tree list = NULL;
+ gimple_seq seq = NULL;
unsigned i;
bitmap_iterator bi;
{
tree var = referenced_var (i);
struct sra_elt *elt = lookup_element (NULL, var, NULL, NO_INSERT);
- generate_copy_inout (elt, true, var, &list);
+ generate_copy_inout (elt, true, var, &seq);
}
- if (list)
+ if (seq)
{
- insert_edge_copies (list, ENTRY_BLOCK_PTR);
- mark_all_v_defs (list);
+ insert_edge_copies_seq (seq, ENTRY_BLOCK_PTR);
+ mark_all_v_defs_seq (seq);
}
}
sra_walk_function (&fns);
scalarize_parms ();
- bsi_commit_edge_inserts ();
+ gsi_commit_edge_inserts ();
}
\f
void
sra_init_cache (void)
{
- if (sra_type_decomp_cache)
+ if (sra_type_decomp_cache)
return;
sra_type_decomp_cache = BITMAP_ALLOC (NULL);
sra_type_inst_cache = BITMAP_ALLOC (NULL);
}
+
/* Main entry point. */
static unsigned int
return flag_tree_sra != 0;
}
-struct tree_opt_pass pass_sra_early =
+struct gimple_opt_pass pass_sra_early =
{
+ {
+ GIMPLE_PASS,
"esra", /* name */
gate_sra, /* gate */
tree_sra_early, /* execute */
TODO_dump_func
| TODO_update_ssa
| TODO_ggc_collect
- | TODO_verify_ssa, /* todo_flags_finish */
- 0 /* letter */
+ | TODO_verify_ssa /* todo_flags_finish */
+ }
};
-struct tree_opt_pass pass_sra =
+struct gimple_opt_pass pass_sra =
{
+ {
+ GIMPLE_PASS,
"sra", /* name */
gate_sra, /* gate */
tree_sra, /* execute */
TODO_dump_func
| TODO_update_ssa
| TODO_ggc_collect
- | TODO_verify_ssa, /* todo_flags_finish */
- 0 /* letter */
+ | TODO_verify_ssa /* todo_flags_finish */
+ }
};