/* Loop autoparallelization.
- Copyright (C) 2006, 2007, 2008, 2009, 2010
+ Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Sebastian Pop <pop@cri.ensmp.fr> and
Zdenek Dvorak <dvorakz@suse.cz>.
FOR_EACH_VEC_ELT (basic_block, body, i, bb)
if (bb != entry_bb && bb != exit_bb)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- if (gimple_debug_bind_p (gsi_stmt (gsi)))
- has_debug_stmt = true;
+ if (is_gimple_debug (gsi_stmt (gsi)))
+ {
+ if (gimple_debug_bind_p (gsi_stmt (gsi)))
+ has_debug_stmt = true;
+ }
else
eliminate_local_variables_stmt (entry, &gsi, decl_address);
replacement decls are stored in DECL_COPIES. */
static bool
-separate_decls_in_region_debug_bind (gimple stmt,
- htab_t name_copies, htab_t decl_copies)
+separate_decls_in_region_debug (gimple stmt, htab_t name_copies,
+ htab_t decl_copies)
{
use_operand_p use;
ssa_op_iter oi;
struct name_to_copy_elt elt;
void **slot, **dslot;
- var = gimple_debug_bind_get_var (stmt);
+ if (gimple_debug_bind_p (stmt))
+ var = gimple_debug_bind_get_var (stmt);
+ else if (gimple_debug_source_bind_p (stmt))
+ var = gimple_debug_source_bind_get_var (stmt);
+ else
+ return true;
if (TREE_CODE (var) == DEBUG_EXPR_DECL)
return true;
gcc_assert (DECL_P (var) && SSA_VAR_P (var));
dslot = htab_find_slot_with_hash (decl_copies, &ielt, ielt.uid, NO_INSERT);
if (!dslot)
return true;
- gimple_debug_bind_set_var (stmt, ((struct int_tree_map *) *dslot)->to);
+ if (gimple_debug_bind_p (stmt))
+ gimple_debug_bind_set_var (stmt, ((struct int_tree_map *) *dslot)->to);
+ else if (gimple_debug_source_bind_p (stmt))
+ gimple_debug_source_bind_set_var (stmt, ((struct int_tree_map *) *dslot)->to);
FOR_EACH_PHI_OR_STMT_USE (use, stmt, oi, SSA_OP_USE)
{
{
gimple stmt = gsi_stmt (gsi);
- if (gimple_debug_bind_p (stmt))
+ if (is_gimple_debug (stmt))
{
- if (separate_decls_in_region_debug_bind (stmt,
- name_copies,
- decl_copies))
+ if (separate_decls_in_region_debug (stmt, name_copies,
+ decl_copies))
{
gsi_remove (&gsi, true);
continue;
gimple phi, nphi, cond_stmt, stmt, cond_nit;
gimple_stmt_iterator gsi;
tree nit_1;
+ edge exit_1;
+ tree new_rhs;
split_block_after_labels (loop->header);
orig_header = single_succ (loop->header);
control = t;
}
}
+
+ /* Setting the condition towards peeling the last iteration:
+ If the block consisting of the exit condition has the latch as
+ successor, then the body of the loop is executed before
+ the exit condition is tested. In such case, moving the
+ condition to the entry, causes that the loop will iterate
+ one less iteration (which is the wanted outcome, since we
+ peel out the last iteration). If the body is executed after
+ the condition, moving the condition to the entry requires
+ decrementing one iteration. */
+ exit_1 = EDGE_SUCC (exit->src, EDGE_SUCC (exit->src, 0) == exit);
+ if (exit_1->dest == loop->latch)
+ new_rhs = gimple_cond_rhs (cond_stmt);
+ else
+ {
+ new_rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (gimple_cond_rhs (cond_stmt)),
+ gimple_cond_rhs (cond_stmt),
+ build_int_cst (TREE_TYPE (gimple_cond_rhs (cond_stmt)), 1));
+ if (TREE_CODE (gimple_cond_rhs (cond_stmt)) == SSA_NAME)
+ {
+ basic_block preheader;
+ gimple_stmt_iterator gsi1;
+
+ preheader = loop_preheader_edge(loop)->src;
+ gsi1 = gsi_after_labels (preheader);
+ new_rhs = force_gimple_operand_gsi (&gsi1, new_rhs, true,
+ NULL_TREE,false,GSI_CONTINUE_LINKING);
+ }
+ }
+ gimple_cond_set_rhs (cond_stmt, unshare_expr (new_rhs));
+ gimple_cond_set_lhs (cond_stmt, unshare_expr (gimple_cond_lhs (cond_stmt)));
+
bbs = get_loop_body_in_dom_order (loop);
for (n = 0; bbs[n] != loop->latch; n++)