+ if (!exit_phi_for_loop_p (inner, USE_STMT (use_p)))
+ {
+ basic_block immbb = gimple_bb (USE_STMT (use_p));
+
+ if (!flow_bb_inside_loop_p (inner, immbb))
+ return false;
+ }
+ }
+ return true;
+}
+
+/* Return true if STMT can be put *after* the inner loop of LOOP. */
+
+static bool
+can_put_after_inner_loop (struct loop *loop, gimple stmt)
+{
+ imm_use_iterator imm_iter;
+ use_operand_p use_p;
+
+ if (gimple_vuse (stmt))
+ return false;
+
+ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_assign_lhs (stmt))
+ {
+ if (!exit_phi_for_loop_p (loop, USE_STMT (use_p)))
+ {
+ basic_block immbb = gimple_bb (USE_STMT (use_p));
+
+ if (!dominated_by_p (CDI_DOMINATORS,
+ immbb,
+ loop->inner->header)
+ && !can_put_in_inner_loop (loop->inner, stmt))
+ return false;
+ }
+ }
+ return true;
+}
+
+/* Return true when the induction variable IV is simple enough to be
+ re-synthesized. */
+
+static bool
+can_duplicate_iv (tree iv, struct loop *loop)
+{
+ tree scev = instantiate_parameters
+ (loop, analyze_scalar_evolution (loop, iv));
+
+ if (!automatically_generated_chrec_p (scev))
+ {
+ tree step = evolution_part_in_loop_num (scev, loop->num);
+
+ if (step && step != chrec_dont_know && TREE_CODE (step) == INTEGER_CST)
+ return true;
+ }
+
+ return false;
+}
+
+/* If this is a scalar operation that can be put back into the inner
+ loop, or after the inner loop, through copying, then do so. This
+ works on the theory that any amount of scalar code we have to
+ reduplicate into or after the loops is less expensive that the win
+ we get from rearranging the memory walk the loop is doing so that
+ it has better cache behavior. */
+
+static bool
+cannot_convert_modify_to_perfect_nest (gimple stmt, struct loop *loop)
+{
+ use_operand_p use_a, use_b;
+ imm_use_iterator imm_iter;
+ ssa_op_iter op_iter, op_iter1;
+ tree op0 = gimple_assign_lhs (stmt);
+
+ /* The statement should not define a variable used in the inner
+ loop. */
+ if (TREE_CODE (op0) == SSA_NAME
+ && !can_duplicate_iv (op0, loop))
+ FOR_EACH_IMM_USE_FAST (use_a, imm_iter, op0)
+ if (gimple_bb (USE_STMT (use_a))->loop_father == loop->inner)
+ return true;
+
+ FOR_EACH_SSA_USE_OPERAND (use_a, stmt, op_iter, SSA_OP_USE)
+ {
+ gimple node;
+ tree op = USE_FROM_PTR (use_a);
+
+ /* The variables should not be used in both loops. */
+ if (!can_duplicate_iv (op, loop))
+ FOR_EACH_IMM_USE_FAST (use_b, imm_iter, op)
+ if (gimple_bb (USE_STMT (use_b))->loop_father == loop->inner)
+ return true;
+
+ /* The statement should not use the value of a scalar that was
+ modified in the loop. */
+ node = SSA_NAME_DEF_STMT (op);
+ if (gimple_code (node) == GIMPLE_PHI)
+ FOR_EACH_PHI_ARG (use_b, node, op_iter1, SSA_OP_USE)
+ {
+ tree arg = USE_FROM_PTR (use_b);
+
+ if (TREE_CODE (arg) == SSA_NAME)
+ {
+ gimple arg_stmt = SSA_NAME_DEF_STMT (arg);
+
+ if (gimple_bb (arg_stmt)
+ && (gimple_bb (arg_stmt)->loop_father == loop->inner))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+/* Return true when BB contains statements that can harm the transform
+ to a perfect loop nest. */
+
+static bool
+cannot_convert_bb_to_perfect_nest (basic_block bb, struct loop *loop)
+{
+ gimple_stmt_iterator bsi;
+ gimple exit_condition = get_loop_exit_condition (loop);
+
+ for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
+ {
+ gimple stmt = gsi_stmt (bsi);
+
+ if (stmt == exit_condition
+ || not_interesting_stmt (stmt)
+ || stmt_is_bumper_for_loop (loop, stmt))
+ continue;
+
+ if (is_gimple_assign (stmt))
+ {
+ if (cannot_convert_modify_to_perfect_nest (stmt, loop))
+ return true;
+
+ if (can_duplicate_iv (gimple_assign_lhs (stmt), loop))
+ continue;
+
+ if (can_put_in_inner_loop (loop->inner, stmt)
+ || can_put_after_inner_loop (loop, stmt))
+ continue;
+ }
+
+ /* If the bb of a statement we care about isn't dominated by the
+ header of the inner loop, then we can't handle this case
+ right now. This test ensures that the statement comes
+ completely *after* the inner loop. */
+ if (!dominated_by_p (CDI_DOMINATORS,
+ gimple_bb (stmt),
+ loop->inner->header))