vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
- if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
{
fprintf (vect_dump,
"not vectorized: unsupported data-type ");
if (gimple_get_lhs (stmt) == NULL_TREE)
{
- if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
{
fprintf (vect_dump, "not vectorized: irregular stmt.");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
{
- if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
{
fprintf (vect_dump, "not vectorized: vector stmt in loop:");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
- if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
{
fprintf (vect_dump,
"not vectorized: unsupported data-type ");
fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
if (vectorization_factor <= 1)
{
- if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
fprintf (vect_dump, "not vectorized: unsupported data-type");
return false;
}
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
- /* First - identify all inductions. */
+ /* First - identify all inductions. Reduction detection assumes that all the
+ inductions have been identified, therefore, this order must not be
+ changed. */
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
}
- /* Second - identify all reductions. */
+ /* Second - identify all reductions and nested cycles. */
while (VEC_length (gimple, worklist) > 0)
{
gimple phi = VEC_pop (gimple, worklist);
tree def = PHI_RESULT (phi);
stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
gimple reduc_stmt;
+ bool nested_cycle;
if (vect_print_dump_info (REPORT_DETAILS))
{
gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
- reduc_stmt = vect_is_simple_reduction (loop_vinfo, phi);
+ nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
+ reduc_stmt = vect_is_simple_reduction (loop_vinfo, phi, !nested_cycle);
if (reduc_stmt)
{
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "Detected reduction.");
- STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
- STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
+ if (nested_cycle)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "Detected vectorizable nested cycle.");
+
+ STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
+ STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
+ vect_nested_cycle;
+ }
+ else
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "Detected reduction.");
+
+ STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
+ STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
vect_reduction_def;
+ }
}
else
if (vect_print_dump_info (REPORT_DETAILS))
}
VEC_free (gimple, heap, worklist);
- return;
}
vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
}
-
/* Function vect_get_loop_niters.
Determine how many iterations the loop is executed.
*number_of_iterations = niters;
if (vect_print_dump_info (REPORT_DETAILS))
- {
- fprintf (vect_dump, "==> get_loop_niters:" );
- print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
- }
+ {
+ fprintf (vect_dump, "==> get_loop_niters:" );
+ print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
+ }
}
return get_loop_exit_condition (loop);
{
gimple phi = gsi_stmt (si);
gimple_set_uid (phi, 0);
- set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res));
+ set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
}
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
gimple stmt = gsi_stmt (si);
gimple_set_uid (stmt, 0);
- set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res));
+ set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
}
}
}
if (loop->num_nodes != 2)
{
if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
- fprintf (vect_dump, "not vectorized: too many BBs in loop.");
+ fprintf (vect_dump, "not vectorized: control flow in loop.");
return NULL;
}
if (loop->num_nodes != 5)
{
if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
- fprintf (vect_dump, "not vectorized: too many BBs in loop.");
+ fprintf (vect_dump, "not vectorized: control flow in loop.");
destroy_loop_vec_info (inner_loop_vinfo, true);
return NULL;
}
}
else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
{
- if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
fprintf (vect_dump, "not vectorized: number of iterations = 0.");
if (inner_loop_vinfo)
destroy_loop_vec_info (inner_loop_vinfo, false);
return loop_vinfo;
}
+
+/* Function vect_analyze_loop_operations.
+
+ Scan the loop stmts and make sure they are all vectorizable. */
+
+static bool
+vect_analyze_loop_operations (loop_vec_info loop_vinfo)
+{
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
+ int nbbs = loop->num_nodes;
+ gimple_stmt_iterator si;
+ unsigned int vectorization_factor = 0;
+ int i;
+ gimple phi;
+ stmt_vec_info stmt_info;
+ bool need_to_vectorize = false;
+ int min_profitable_iters;
+ int min_scalar_loop_bound;
+ unsigned int th;
+ bool only_slp_in_loop = true, ok;
+
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
+
+ gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
+ vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+
+ for (i = 0; i < nbbs; i++)
+ {
+ basic_block bb = bbs[i];
+
+ for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
+ {
+ phi = gsi_stmt (si);
+ ok = true;
+
+ stmt_info = vinfo_for_stmt (phi);
+ if (vect_print_dump_info (REPORT_DETAILS))
+ {
+ fprintf (vect_dump, "examining phi: ");
+ print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
+ }
+
+ if (! is_loop_header_bb_p (bb))
+ {
+ /* inner-loop loop-closed exit phi in outer-loop vectorization
+ (i.e. a phi in the tail of the outer-loop).
+ FORNOW: we currently don't support the case that these phis
+ are not used in the outerloop, cause this case requires
+ to actually do something here. */
+ if (!STMT_VINFO_RELEVANT_P (stmt_info)
+ || STMT_VINFO_LIVE_P (stmt_info))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump,
+ "Unsupported loop-closed phi in outer-loop.");
+ return false;
+ }
+ continue;
+ }
+
+ gcc_assert (stmt_info);
+
+ if (STMT_VINFO_LIVE_P (stmt_info))
+ {
+ /* FORNOW: not yet supported. */
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ fprintf (vect_dump, "not vectorized: value used after loop.");
+ return false;
+ }
+
+ if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
+ && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
+ {
+ /* A scalar-dependence cycle that we don't support. */
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
+ return false;
+ }
+
+ if (STMT_VINFO_RELEVANT_P (stmt_info))
+ {
+ need_to_vectorize = true;
+ if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
+ ok = vectorizable_induction (phi, NULL, NULL);
+ }
+
+ if (!ok)
+ {
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ {
+ fprintf (vect_dump,
+ "not vectorized: relevant phi not supported: ");
+ print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
+ }
+ return false;
+ }
+ }
+
+ for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
+ {
+ gimple stmt = gsi_stmt (si);
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+
+ gcc_assert (stmt_info);
+
+ if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
+ return false;
+
+ if (STMT_VINFO_RELEVANT_P (stmt_info) && !PURE_SLP_STMT (stmt_info))
+ /* STMT needs both SLP and loop-based vectorization. */
+ only_slp_in_loop = false;
+ }
+ } /* bbs */
+
+ /* All operations in the loop are either irrelevant (deal with loop
+ control, or dead), or only used outside the loop and can be moved
+ out of the loop (e.g. invariants, inductions). The loop can be
+ optimized away by scalar optimizations. We're better off not
+ touching this loop. */
+ if (!need_to_vectorize)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump,
+ "All the computation can be taken out of the loop.");
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ fprintf (vect_dump,
+ "not vectorized: redundant loop. no profit to vectorize.");
+ return false;
+ }
+
+ /* If all the stmts in the loop can be SLPed, we perform only SLP, and
+ vectorization factor of the loop is the unrolling factor required by the
+ SLP instances. If that unrolling factor is 1, we say, that we perform
+ pure SLP on loop - cross iteration parallelism is not exploited. */
+ if (only_slp_in_loop)
+ vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
+ else
+ vectorization_factor = least_common_multiple (vectorization_factor,
+ LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
+
+ LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
+
+ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+ && vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump,
+ "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
+ vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
+
+ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+ && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
+ {
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ fprintf (vect_dump, "not vectorized: iteration count too small.");
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump,"not vectorized: iteration count smaller than "
+ "vectorization factor.");
+ return false;
+ }
+
+ /* Analyze cost. Decide if worth while to vectorize. */
+
+ /* Once VF is set, SLP costs should be updated since the number of created
+ vector stmts depends on VF. */
+ vect_update_slp_costs_according_to_vf (loop_vinfo);
+
+ min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
+ LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
+
+ if (min_profitable_iters < 0)
+ {
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ fprintf (vect_dump, "not vectorized: vectorization not profitable.");
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "not vectorized: vector version will never be "
+ "profitable.");
+ return false;
+ }
+
+ min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
+ * vectorization_factor) - 1);
+
+ /* Use the cost model only if it is more conservative than user specified
+ threshold. */
+
+ th = (unsigned) min_scalar_loop_bound;
+ if (min_profitable_iters
+ && (!min_scalar_loop_bound
+ || min_profitable_iters > min_scalar_loop_bound))
+ th = (unsigned) min_profitable_iters;
+
+ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+ && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
+ {
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ fprintf (vect_dump, "not vectorized: vectorization not "
+ "profitable.");
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "not vectorized: iteration count smaller than "
+ "user specified loop bound parameter or minimum "
+ "profitable iterations (whichever is more conservative).");
+ return false;
+ }
+
+ if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+ || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
+ || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "epilog loop required.");
+ if (!vect_can_advance_ivs_p (loop_vinfo))
+ {
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ fprintf (vect_dump,
+ "not vectorized: can't create epilog loop 1.");
+ return false;
+ }
+ if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
+ {
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ fprintf (vect_dump,
+ "not vectorized: can't create epilog loop 2.");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
/* Function vect_analyze_loop.
Apply a set of analyses on LOOP, and create a loop_vec_info struct
FORNOW: Handle only simple, array references, which
alignment can be forced, and aligned pointer-references. */
- ok = vect_analyze_data_refs (loop_vinfo);
+ ok = vect_analyze_data_refs (loop_vinfo, NULL);
if (!ok)
{
if (vect_print_dump_info (REPORT_DETAILS))
/* Analyze the alignment of the data-refs in the loop.
Fail if a data reference is found that cannot be vectorized. */
- ok = vect_analyze_data_refs_alignment (loop_vinfo);
+ ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
if (!ok)
{
if (vect_print_dump_info (REPORT_DETAILS))
/* Analyze data dependences between the data-refs in the loop.
FORNOW: fail at the first data dependence that we encounter. */
- ok = vect_analyze_data_ref_dependences (loop_vinfo);
+ ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL);
if (!ok)
{
if (vect_print_dump_info (REPORT_DETAILS))
/* Analyze the access patterns of the data-refs in the loop (consecutive,
complex, etc.). FORNOW: Only handle consecutive access pattern. */
- ok = vect_analyze_data_ref_accesses (loop_vinfo);
+ ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
if (!ok)
{
if (vect_print_dump_info (REPORT_DETAILS))
}
/* Check the SLP opportunities in the loop, analyze and build SLP trees. */
- ok = vect_analyze_slp (loop_vinfo);
+ ok = vect_analyze_slp (loop_vinfo, NULL);
if (ok)
{
/* Decide which possible SLP instances to SLP. */
/* Scan all the operations in the loop and make sure they are
vectorizable. */
- ok = vect_analyze_operations (loop_vinfo);
+ ok = vect_analyze_loop_operations (loop_vinfo);
if (!ok)
{
if (vect_print_dump_info (REPORT_DETAILS))
such that:
1. operation is commutative and associative and it is safe to
- change the order of the computation.
+ change the order of the computation (if CHECK_REDUCTION is true)
2. no uses for a2 in the loop (a2 is used out of the loop)
3. no uses of a1 in the loop besides the reduction operation.
Condition 1 is tested here.
- Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. */
+ Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
+
+ Also detect a cross-iteration def-use cycle in nested loops, i.e., nested
+ cycles, if CHECK_REDUCTION is false. */
gimple
-vect_is_simple_reduction (loop_vec_info loop_info, gimple phi)
+vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
+ bool check_reduction)
{
struct loop *loop = (gimple_bb (phi))->loop_father;
struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
imm_use_iterator imm_iter;
use_operand_p use_p;
- gcc_assert (loop == vect_loop || flow_loop_nested_p (vect_loop, loop));
+ /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
+ otherwise, we assume outer loop vectorization. */
+ gcc_assert ((check_reduction && loop == vect_loop)
+ || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
name = PHI_RESULT (phi);
nloop_uses = 0;
code = gimple_assign_rhs_code (def_stmt);
- if (!commutative_tree_code (code) || !associative_tree_code (code))
+ if (check_reduction
+ && (!commutative_tree_code (code) || !associative_tree_code (code)))
{
if (vect_print_dump_info (REPORT_DETAILS))
report_vect_op (def_stmt, "reduction: not commutative/associative: ");
return NULL;
}
- /* Check that it's ok to change the order of the computation. */
type = TREE_TYPE (gimple_assign_lhs (def_stmt));
if (TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (TREE_TYPE (op1))
|| TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (TREE_TYPE (op2)))
return NULL;
}
- /* Generally, when vectorizing a reduction we change the order of the
+ /* Check that it's ok to change the order of the computation.
+ Generally, when vectorizing a reduction we change the order of the
computation. This may change the behavior of the program in some
cases, so we need to check that this is ok. One exception is when
vectorizing an outer-loop: the inner-loop is executed sequentially,
/* CHECKME: check for !flag_finite_math_only too? */
if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
- && !nested_in_vect_loop_p (vect_loop, def_stmt))
+ && check_reduction)
{
/* Changing the order of operations changes the semantics. */
if (vect_print_dump_info (REPORT_DETAILS))
return NULL;
}
else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
- && !nested_in_vect_loop_p (vect_loop, def_stmt))
+ && check_reduction)
{
/* Changing the order of operations changes the semantics. */
if (vect_print_dump_info (REPORT_DETAILS))
report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
return NULL;
}
- else if (SAT_FIXED_POINT_TYPE_P (type))
+ else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
{
/* Changing the order of operations changes the semantics. */
if (vect_print_dump_info (REPORT_DETAILS))
return NULL;
}
- /* reduction is safe. we're dealing with one of the following:
+ /* Reduction is safe. We're dealing with one of the following:
1) integer arithmetic and no trapv
- 2) floating point arithmetic, and special flags permit this optimization.
- */
+ 2) floating point arithmetic, and special flags permit this optimization
+ 3) nested cycle (i.e., outer loop vectorization). */
def1 = SSA_NAME_DEF_STMT (op1);
def2 = SSA_NAME_DEF_STMT (op2);
if (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2))
/* Check that one def is the reduction def, defined by PHI,
- the other def is either defined in the loop ("vect_loop_def"),
+ the other def is either defined in the loop ("vect_internal_def"),
or it's an induction (defined by a loop-header phi-node). */
if (def2 == phi
&& (is_gimple_assign (def1)
|| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_induction_def
|| (gimple_code (def1) == GIMPLE_PHI
- && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_loop_def
+ && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
+ == vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def1)))))
{
if (vect_print_dump_info (REPORT_DETAILS))
- report_vect_op (def_stmt, "detected reduction:");
+ report_vect_op (def_stmt, "detected reduction: ");
return def_stmt;
}
else if (def1 == phi
&& flow_bb_inside_loop_p (loop, gimple_bb (def2))
&& (is_gimple_assign (def2)
- || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_induction_def
+ || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
+ == vect_induction_def
|| (gimple_code (def2) == GIMPLE_PHI
- && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_loop_def
+ && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
+ == vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def2)))))
{
- /* Swap operands (just for simplicity - so that the rest of the code
- can assume that the reduction variable is always the last (second)
- argument). */
- if (vect_print_dump_info (REPORT_DETAILS))
- report_vect_op (def_stmt ,
- "detected reduction: need to swap operands:");
- swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
- gimple_assign_rhs2_ptr (def_stmt));
+ if (check_reduction)
+ {
+ /* Swap operands (just for simplicity - so that the rest of the code
+ can assume that the reduction variable is always the last (second)
+ argument). */
+ if (vect_print_dump_info (REPORT_DETAILS))
+ report_vect_op (def_stmt,
+ "detected reduction: need to swap operands: ");
+
+ swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
+ gimple_assign_rhs2_ptr (def_stmt));
+ }
+ else
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ report_vect_op (def_stmt, "detected reduction: ");
+ }
+
return def_stmt;
}
else
{
if (vect_print_dump_info (REPORT_DETAILS))
- report_vect_op (def_stmt, "reduction: unknown pattern.");
+ report_vect_op (def_stmt, "reduction: unknown pattern: ");
+
return NULL;
}
}
}
/* Requires loop versioning tests to handle misalignment. */
- if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)))
+ if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
{
/* FIXME: Make cost depend on complexity of individual check. */
vec_outside_cost +=
"versioning to treat misalignment.\n");
}
- if (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
+ /* Requires loop versioning with alias checks. */
+ if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
{
/* FIXME: Make cost depend on complexity of individual check. */
vec_outside_cost +=
"versioning aliasing.\n");
}
- if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
- || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
- {
- vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST;
- }
+ if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
+ || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
+ vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST;
/* Count statements in scalar loop. Using this as scalar cost for a single
iteration for now.
decide whether to vectorize at compile time. Hence the scalar version
do not carry cost model guard costs. */
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
- || VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
- || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
+ || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
+ || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
{
/* Cost model check occurs at versioning. */
- if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
- || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
+ if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
+ || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST;
else
{
if (!nested_in_vect_loop_p (loop, orig_stmt))
{
- if (reduc_code < NUM_TREE_CODES)
+ if (reduc_code != ERROR_MARK)
outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
else
{
tree loop_arg;
gimple_stmt_iterator si;
basic_block bb = gimple_bb (iv_phi);
+ tree stepvectype;
vectype = get_vectype_for_scalar_type (scalar_type);
gcc_assert (vectype);
/* Find the first insertion point in the BB. */
si = gsi_after_labels (bb);
- if (INTEGRAL_TYPE_P (scalar_type) || POINTER_TYPE_P (scalar_type))
+ if (INTEGRAL_TYPE_P (scalar_type))
step_expr = build_int_cst (scalar_type, 0);
+ else if (POINTER_TYPE_P (scalar_type))
+ step_expr = build_int_cst (sizetype, 0);
else
step_expr = build_real (scalar_type, dconst0);
{
/* iv_loop is the loop to be vectorized. Generate:
vec_step = [VF*S, VF*S, VF*S, VF*S] */
- expr = build_int_cst (scalar_type, vf);
- new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr);
+ expr = build_int_cst (TREE_TYPE (step_expr), vf);
+ new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
+ expr, step_expr);
}
t = NULL_TREE;
for (i = 0; i < nunits; i++)
t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
gcc_assert (CONSTANT_CLASS_P (new_name));
- vec = build_vector (vectype, t);
- vec_step = vect_init_vector (iv_phi, vec, vectype, NULL);
+ stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
+ gcc_assert (stepvectype);
+ vec = build_vector (stepvectype, t);
+ vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
/* Create the following def-use cycle:
add_referenced_var (vec_dest);
induction_phi = create_phi_node (vec_dest, iv_loop->header);
set_vinfo_for_stmt (induction_phi,
- new_stmt_vec_info (induction_phi, loop_vinfo));
+ new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
induc_def = PHI_RESULT (induction_phi);
/* Create the iv update inside the loop */
vec_def = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, vec_def);
gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
- set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo));
+ set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
+ NULL));
/* Set the arguments of the phi node: */
add_phi_arg (induction_phi, vec_init, pe);
gcc_assert (!nested_in_vect_loop);
/* Create the vector that holds the step of the induction. */
- expr = build_int_cst (scalar_type, nunits);
- new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr);
+ expr = build_int_cst (TREE_TYPE (step_expr), nunits);
+ new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
+ expr, step_expr);
t = NULL_TREE;
for (i = 0; i < nunits; i++)
t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
gcc_assert (CONSTANT_CLASS_P (new_name));
- vec = build_vector (vectype, t);
- vec_step = vect_init_vector (iv_phi, vec, vectype, NULL);
+ vec = build_vector (stepvectype, t);
+ vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
vec_def = induc_def;
prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
set_vinfo_for_stmt (new_stmt,
- new_stmt_vec_info (new_stmt, loop_vinfo));
+ new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
}
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
- int nunits = TYPE_VECTOR_SUBPARTS (vectype);
- tree scalar_type = TREE_TYPE (vectype);
+ tree scalar_type = TREE_TYPE (init_val);
+ tree vectype = get_vectype_for_scalar_type (scalar_type);
+ int nunits;
enum tree_code code = gimple_assign_rhs_code (stmt);
- tree type = TREE_TYPE (init_val);
- tree vecdef;
tree def_for_init;
tree init_def;
tree t = NULL_TREE;
int i;
bool nested_in_vect_loop = false;
- gcc_assert (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type));
+ gcc_assert (vectype);
+ nunits = TYPE_VECTOR_SUBPARTS (vectype);
+
+ gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
+ || SCALAR_FLOAT_TYPE_P (scalar_type));
if (nested_in_vect_loop_p (loop, stmt))
nested_in_vect_loop = true;
else
gcc_assert (loop == (gimple_bb (stmt))->loop_father);
- vecdef = vect_get_vec_def_for_operand (init_val, stmt, NULL);
-
switch (code)
{
case WIDEN_SUM_EXPR:
case DOT_PROD_EXPR:
case PLUS_EXPR:
+ case MINUS_EXPR:
if (nested_in_vect_loop)
- *adjustment_def = vecdef;
+ *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
else
*adjustment_def = init_val;
/* Create a vector of zeros for init_def. */
case MIN_EXPR:
case MAX_EXPR:
*adjustment_def = NULL_TREE;
- init_def = vecdef;
+ init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
break;
default:
in vectorizable_operation.
STMT is the scalar reduction stmt that is being vectorized.
REDUCTION_PHI is the phi-node that carries the reduction computation.
+ REDUC_INDEX is the index of the operand in the right hand side of the
+ statement that is defined by REDUCTION_PHI.
This function:
1. Creates the reduction def-use cycle: sets the arguments for
vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
int ncopies,
enum tree_code reduc_code,
- gimple reduction_phi)
+ gimple reduction_phi,
+ int reduc_index)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
stmt_vec_info prev_phi_info;
switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
{
case GIMPLE_SINGLE_RHS:
- gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
- reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
+ gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
+ == ternary_op);
+ reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
break;
case GIMPLE_UNARY_RHS:
reduction_op = gimple_assign_rhs1 (stmt);
break;
case GIMPLE_BINARY_RHS:
- reduction_op = gimple_assign_rhs2 (stmt);
+ reduction_op = reduc_index ?
+ gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
break;
default:
gcc_unreachable ();
for (j = 0; j < ncopies; j++)
{
phi = create_phi_node (SSA_NAME_VAR (vect_def), exit_bb);
- set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo));
+ set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
if (j == 0)
new_phi = phi;
else
SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
prev_phi_info = vinfo_for_stmt (phi);
}
+
exit_gsi = gsi_after_labels (exit_bb);
/* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
}
+
code = gimple_assign_rhs_code (orig_stmt);
scalar_dest = gimple_assign_lhs (orig_stmt);
scalar_type = TREE_TYPE (scalar_dest);
/* 2.3 Create the reduction code, using one of the three schemes described
above. */
- if (reduc_code < NUM_TREE_CODES)
+ if (reduc_code != ERROR_MARK)
{
tree tmp;
}
else
{
- enum tree_code shift_code = 0;
+ enum tree_code shift_code = ERROR_MARK;
bool have_whole_vector_shift = true;
int bit_offset;
int element_bitsize = tree_low_cst (bitsize, 1);
{
if (nested_in_vect_loop)
{
+ /* For MINUS_EXPR we create new_temp = loop_exit_def + adjustment_def
+ since the initial value is [0,0,...,0]. */
+ if (code == MINUS_EXPR)
+ code = PLUS_EXPR;
+
gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
new_dest = vect_create_destination_var (scalar_dest, vectype);
expr = build2 (code, scalar_type, new_temp, adjustment_def);
new_dest = vect_create_destination_var (scalar_dest, scalar_type);
}
+
epilog_stmt = gimple_build_assign (new_dest, expr);
new_temp = make_ssa_name (new_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_temp);
epilog_stmt = adjustment_def ? epilog_stmt : new_phi;
STMT_VINFO_VEC_STMT (stmt_vinfo) = epilog_stmt;
set_vinfo_for_stmt (epilog_stmt,
- new_stmt_vec_info (epilog_stmt, loop_vinfo));
+ new_stmt_vec_info (epilog_stmt, loop_vinfo,
+ NULL));
if (adjustment_def)
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
Check if STMT performs a reduction operation that can be vectorized.
If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt to replace it, put it in VEC_STMT, and insert it at BSI.
+ stmt to replace it, put it in VEC_STMT, and insert it at GSI.
Return FALSE if not a vectorizable STMT, TRUE otherwise.
This function also handles reduction idioms (patterns) that have been
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- enum tree_code code, orig_code, epilog_reduc_code = 0;
+ enum tree_code code, orig_code, epilog_reduc_code;
enum machine_mode vec_mode;
int op_type;
optab optab, reduc_optab;
gimple new_stmt = NULL;
int j;
tree ops[3];
+ bool nested_cycle = false, found_nested_cycle_def = false;
+ gimple reduc_def_stmt = NULL;
+ /* The default is that the reduction variable is the last in statement. */
+ int reduc_index = 2;
if (nested_in_vect_loop_p (loop, stmt))
- loop = loop->inner;
+ {
+ loop = loop->inner;
+ nested_cycle = true;
+ }
gcc_assert (ncopies >= 1);
/* Reductions that are not used even in an enclosing outer-loop,
are expected to be "live" (used out of the loop). */
- if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_loop
+ if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
&& !STMT_VINFO_LIVE_P (stmt_info))
return false;
/* Make sure it was already recognized as a reduction computation. */
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def)
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
+ && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
return false;
/* 2. Has this been recognized as a reduction pattern?
return false;
/* All uses but the last are expected to be defined in the loop.
- The last use is the reduction variable. */
+ The last use is the reduction variable. In case of nested cycle this
+ assumption is not true: we use reduc_index to record the index of the
+ reduction variable. */
for (i = 0; i < op_type-1; i++)
{
- is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt,
+ is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, NULL, &def_stmt,
&def, &dt);
gcc_assert (is_simple_use);
- if (dt != vect_loop_def
- && dt != vect_invariant_def
+ if (dt != vect_internal_def
+ && dt != vect_external_def
&& dt != vect_constant_def
- && dt != vect_induction_def)
+ && dt != vect_induction_def
+ && dt != vect_nested_cycle)
return false;
+
+ if (dt == vect_nested_cycle)
+ {
+ found_nested_cycle_def = true;
+ reduc_def_stmt = def_stmt;
+ reduc_index = i;
+ }
}
- is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt, &def, &dt);
+ is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, NULL, &def_stmt,
+ &def, &dt);
gcc_assert (is_simple_use);
- gcc_assert (dt == vect_reduction_def);
- gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
+ gcc_assert (dt == vect_reduction_def
+ || dt == vect_nested_cycle
+ || ((dt == vect_internal_def || dt == vect_external_def
+ || dt == vect_constant_def || dt == vect_induction_def)
+ && nested_cycle && found_nested_cycle_def));
+ if (!found_nested_cycle_def)
+ reduc_def_stmt = def_stmt;
+
+ gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
if (orig_stmt)
- gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo, def_stmt));
+ gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
+ reduc_def_stmt,
+ !nested_cycle));
else
- gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, def_stmt));
+ gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
+ !nested_cycle));
- if (STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
+ if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
return false;
/* 4. Supportable by target? */
orig_code = code;
}
- if (!reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
- return false;
+ if (nested_cycle)
+ epilog_reduc_code = orig_code;
+ else
+ if (!reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
+ return false;
+
reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype, optab_default);
if (!reduc_optab)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "no optab for reduction.");
- epilog_reduc_code = NUM_TREE_CODES;
+ epilog_reduc_code = ERROR_MARK;
}
if (optab_handler (reduc_optab, vec_mode)->insn_code == CODE_FOR_nothing)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "reduc op not supported by target.");
- epilog_reduc_code = NUM_TREE_CODES;
+ epilog_reduc_code = ERROR_MARK;
}
if (!vec_stmt) /* transformation not required. */
from the vectorized reduction operation generated in the previous iteration.
*/
- if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_loop)
+ if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
{
single_defuse_cycle = true;
epilog_copies = 1;
{
/* Create the reduction-phi that defines the reduction-operand. */
new_phi = create_phi_node (vec_dest, loop->header);
- set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo));
+ set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo,
+ NULL));
}
/* Handle uses. */
if (j == 0)
{
- loop_vec_def0 = vect_get_vec_def_for_operand (ops[0], stmt, NULL);
+ loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
+ stmt, NULL);
if (op_type == ternary_op)
{
- loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt, NULL);
+ if (reduc_index == 0)
+ loop_vec_def1 = vect_get_vec_def_for_operand (ops[2], stmt,
+ NULL);
+ else
+ loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt,
+ NULL);
}
- /* Get the vector def for the reduction variable from the phi node */
+ /* Get the vector def for the reduction variable from the phi
+ node. */
reduc_def = PHI_RESULT (new_phi);
first_phi = new_phi;
}
STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
}
+
/* Arguments are ready. create the new vector stmt. */
if (op_type == binary_op)
- expr = build2 (code, vectype, loop_vec_def0, reduc_def);
+ {
+ if (reduc_index == 0)
+ expr = build2 (code, vectype, reduc_def, loop_vec_def0);
+ else
+ expr = build2 (code, vectype, loop_vec_def0, reduc_def);
+ }
else
- expr = build3 (code, vectype, loop_vec_def0, loop_vec_def1,
- reduc_def);
+ {
+ if (reduc_index == 0)
+ expr = build3 (code, vectype, reduc_def, loop_vec_def0,
+ loop_vec_def1);
+ else
+ {
+ if (reduc_index == 1)
+ expr = build3 (code, vectype, loop_vec_def0, reduc_def,
+ loop_vec_def1);
+ else
+ expr = build3 (code, vectype, loop_vec_def0, loop_vec_def1,
+ reduc_def);
+ }
+ }
+
new_stmt = gimple_build_assign (vec_dest, expr);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
if (!single_defuse_cycle)
new_temp = gimple_assign_lhs (*vec_stmt);
vect_create_epilog_for_reduction (new_temp, stmt, epilog_copies,
- epilog_reduc_code, first_phi);
+ epilog_reduc_code, first_phi, reduc_index);
return true;
}
op = TREE_OPERAND (gimple_op (stmt, 1), i);
else
op = gimple_op (stmt, i + 1);
- if (op && !vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt))
+ if (op
+ && !vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "use not simple.");
return false;
}
- if (dt != vect_invariant_def && dt != vect_constant_def)
+ if (dt != vect_external_def && dt != vect_constant_def)
return false;
}
|| (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0));
- if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
- || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
+ if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
+ || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
vect_loop_versioning (loop_vinfo,
!do_peeling_for_loop_bound,
&cond_expr, &cond_expr_stmt_list);
- /* CHECKME: we wouldn't need this if we called update_ssa once
- for all loops. */
- bitmap_zero (vect_memsyms_to_rename);
-
/* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
compile time constant), or it is a constant that doesn't divide by the
vectorization factor, then an epilog loop needs to be created.
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== scheduling SLP instances ===");
- is_store = vect_schedule_slp (loop_vinfo);
-
- /* IS_STORE is true if STMT is a store. Stores cannot be of
- hybrid SLP type. They are removed in
- vect_schedule_slp_instance and their vinfo is destroyed. */
- if (is_store)
- {
- gsi_next (&si);
- continue;
- }
+ vect_schedule_slp (loop_vinfo, NULL);
}
/* Hybrid SLP stmts must be vectorized in addition to SLP. */
- if (PURE_SLP_STMT (stmt_info))
+ if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
{
gsi_next (&si);
continue;
slpeel_make_loop_iterate_ntimes (loop, ratio);
- mark_set_for_renaming (vect_memsyms_to_rename);
-
/* The memory tags and pointers in vectorized statements need to
have their SSA forms updated. FIXME, why can't this be delayed
until all the loops have been transformed? */
update_ssa (TODO_update_ssa);
- if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS))
+ if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
fprintf (vect_dump, "LOOP VECTORIZED.");
- if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOOPS))
+ if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
fprintf (vect_dump, "OUTER LOOP VECTORIZED.");
}
-
-
-