+static bool
+can_duplicate_iv (tree iv, struct loop *loop)
+{
+ tree scev = instantiate_parameters
+ (loop, analyze_scalar_evolution (loop, iv));
+
+ if (!automatically_generated_chrec_p (scev))
+ {
+ tree step = evolution_part_in_loop_num (scev, loop->num);
+
+ if (step && step != chrec_dont_know && TREE_CODE (step) == INTEGER_CST)
+ return true;
+ }
+
+ return false;
+}
+
+/* If this is a scalar operation that can be put back into the inner
+ loop, or after the inner loop, through copying, then do so. This
+ works on the theory that any amount of scalar code we have to
+ reduplicate into or after the loops is less expensive that the win
+ we get from rearranging the memory walk the loop is doing so that
+ it has better cache behavior. */
+
+static bool
+cannot_convert_modify_to_perfect_nest (tree stmt, struct loop *loop)
+{
+
+ use_operand_p use_a, use_b;
+ imm_use_iterator imm_iter;
+ ssa_op_iter op_iter, op_iter1;
+ tree op0 = GIMPLE_STMT_OPERAND (stmt, 0);
+
+ /* The statement should not define a variable used in the inner
+ loop. */
+ if (TREE_CODE (op0) == SSA_NAME
+ && !can_duplicate_iv (op0, loop))
+ FOR_EACH_IMM_USE_FAST (use_a, imm_iter, op0)
+ if (bb_for_stmt (USE_STMT (use_a))->loop_father
+ == loop->inner)
+ return true;
+
+ FOR_EACH_SSA_USE_OPERAND (use_a, stmt, op_iter, SSA_OP_USE)
+ {
+ tree node, op = USE_FROM_PTR (use_a);
+
+ /* The variables should not be used in both loops. */
+ if (!can_duplicate_iv (op, loop))
+ FOR_EACH_IMM_USE_FAST (use_b, imm_iter, op)
+ if (bb_for_stmt (USE_STMT (use_b))->loop_father
+ == loop->inner)
+ return true;
+
+ /* The statement should not use the value of a scalar that was
+ modified in the loop. */
+ node = SSA_NAME_DEF_STMT (op);
+ if (TREE_CODE (node) == PHI_NODE)
+ FOR_EACH_PHI_ARG (use_b, node, op_iter1, SSA_OP_USE)
+ {
+ tree arg = USE_FROM_PTR (use_b);
+
+ if (TREE_CODE (arg) == SSA_NAME)
+ {
+ tree arg_stmt = SSA_NAME_DEF_STMT (arg);
+
+ if (bb_for_stmt (arg_stmt)
+ && (bb_for_stmt (arg_stmt)->loop_father
+ == loop->inner))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/* Return true when BB contains statements that can harm the transform
+ to a perfect loop nest. */
+
+static bool
+cannot_convert_bb_to_perfect_nest (basic_block bb, struct loop *loop)
+{
+ block_stmt_iterator bsi;
+ tree exit_condition = get_loop_exit_condition (loop);
+
+ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
+ {
+ tree stmt = bsi_stmt (bsi);
+
+ if (stmt == exit_condition
+ || not_interesting_stmt (stmt)
+ || stmt_is_bumper_for_loop (loop, stmt))
+ continue;
+
+ if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
+ {
+ if (cannot_convert_modify_to_perfect_nest (stmt, loop))
+ return true;
+
+ if (can_duplicate_iv (GIMPLE_STMT_OPERAND (stmt, 0), loop))
+ continue;
+
+ if (can_put_in_inner_loop (loop->inner, stmt)
+ || can_put_after_inner_loop (loop, stmt))
+ continue;
+ }
+
+ /* If the bb of a statement we care about isn't dominated by the
+ header of the inner loop, then we can't handle this case
+ right now. This test ensures that the statement comes
+ completely *after* the inner loop. */
+ if (!dominated_by_p (CDI_DOMINATORS,
+ bb_for_stmt (stmt),
+ loop->inner->header))
+ return true;
+ }
+
+ return false;
+}