/* Transformation Utilities for Loop Vectorization.
- Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com>
This file is part of GCC.
#include "real.h"
/* Utility functions for the code transformation. */
-static bool vect_transform_stmt (tree, block_stmt_iterator *, bool *);
+static bool vect_transform_stmt (tree, block_stmt_iterator *, bool *, slp_tree);
static tree vect_create_destination_var (tree, tree);
static tree vect_create_data_ref_ptr
- (tree, block_stmt_iterator *, tree, tree *, tree *, bool, tree);
-static tree vect_create_addr_base_for_vector_ref (tree, tree *, tree);
-static tree vect_setup_realignment (tree, block_stmt_iterator *, tree *);
+ (tree, struct loop*, tree, tree *, tree *, bool, tree, bool *);
+static tree vect_create_addr_base_for_vector_ref
+ (tree, tree *, tree, struct loop *);
static tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
static tree vect_get_vec_def_for_operand (tree, tree, tree *);
-static tree vect_init_vector (tree, tree, tree);
+static tree vect_init_vector (tree, tree, tree, block_stmt_iterator *);
static void vect_finish_stmt_generation
- (tree stmt, tree vec_stmt, block_stmt_iterator *bsi);
+ (tree stmt, tree vec_stmt, block_stmt_iterator *);
static bool vect_is_simple_cond (tree, loop_vec_info);
static void vect_create_epilog_for_reduction (tree, tree, enum tree_code, tree);
static tree get_initial_def_for_reduction (tree, tree, tree *);
int vec_inside_cost = 0;
int vec_outside_cost = 0;
int scalar_single_iter_cost = 0;
+ int scalar_outside_cost = 0;
+ bool runtime_test = false;
int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
- int byte_misalign;
+ int byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
+ int peel_guard_costs = 0;
+ int innerloop_iters = 0, factor;
+ VEC (slp_instance, heap) *slp_instances;
+ slp_instance instance;
/* Cost model disabled. */
if (!flag_vect_cost_model)
{
- if (vect_print_dump_info (REPORT_DETAILS))
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "cost model disabled.");
return 0;
}
- /* Requires loop versioning tests to handle misalignment.
- FIXME: Make cost depend on number of stmts in may_misalign list. */
+ /* If the number of iterations is unknown, or the
+ peeling-for-misalignment amount is unknown, we will have to generate
+ a runtime test to test the loop count against the threshold. */
+ if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+ || (byte_misalign < 0))
+ runtime_test = true;
+
+ /* Requires loop versioning tests to handle misalignment. */
if (VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)))
{
- vec_outside_cost += TARG_COND_BRANCH_COST;
- if (vect_print_dump_info (REPORT_DETAILS))
+ /* FIXME: Make cost depend on complexity of individual check. */
+ vec_outside_cost +=
+ VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "cost model: Adding cost of checks for loop "
- "versioning.\n");
+ "versioning to treat misalignment.\n");
+ }
+
+ if (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
+ {
+ /* FIXME: Make cost depend on complexity of individual check. */
+ vec_outside_cost +=
+ VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
+ if (vect_print_dump_info (REPORT_COST))
+ fprintf (vect_dump, "cost model: Adding cost of checks for loop "
+ "versioning aliasing.\n");
+ }
+
+ if (VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
+ || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
+ {
+ vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST;
}
/* Count statements in scalar loop. Using this as scalar cost for a single
TODO: Consider assigning different costs to different scalar
statements. */
+ /* FORNOW. */
+ if (loop->inner)
+ innerloop_iters = 50; /* FIXME */
+
for (i = 0; i < nbbs; i++)
{
block_stmt_iterator si;
basic_block bb = bbs[i];
+ if (bb->loop_father == loop->inner)
+ factor = innerloop_iters;
+ else
+ factor = 1;
+
for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
- {
- tree stmt = bsi_stmt (si);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- if (!STMT_VINFO_RELEVANT_P (stmt_info)
- && !STMT_VINFO_LIVE_P (stmt_info))
- continue;
- scalar_single_iter_cost += cost_for_stmt (stmt);
- vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info);
- vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
- }
+ {
+ tree stmt = bsi_stmt (si);
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ /* Skip stmts that are not vectorized inside the loop. */
+ if (!STMT_VINFO_RELEVANT_P (stmt_info)
+ && (!STMT_VINFO_LIVE_P (stmt_info)
+ || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
+ continue;
+ scalar_single_iter_cost += cost_for_stmt (stmt) * factor;
+ vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
+ /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
+ some of the "outside" costs are generated inside the outer-loop. */
+ vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
+ }
}
/* Add additional cost for the peeled instructions in prologue and epilogue
loop.
FORNOW: If we dont know the value of peel_iters for prologue or epilogue
- at compile-time - we assume it's (vf-1)/2 (the worst would be vf-1).
+ at compile-time - we assume it's vf/2 (the worst would be vf-1).
TODO: Build an expression that represents peel_iters for prologue and
epilogue to be used in a run-time test. */
- byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
-
if (byte_misalign < 0)
{
- peel_iters_prologue = (vf - 1)/2;
- if (vect_print_dump_info (REPORT_DETAILS))
+ peel_iters_prologue = vf/2;
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "cost model: "
- "prologue peel iters set to (vf-1)/2.");
+ "prologue peel iters set to vf/2.");
/* If peeling for alignment is unknown, loop bound of main loop becomes
unknown. */
- peel_iters_epilogue = (vf - 1)/2;
- if (vect_print_dump_info (REPORT_DETAILS))
+ peel_iters_epilogue = vf/2;
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "cost model: "
- "epilogue peel iters set to (vf-1)/2 because "
+ "epilogue peel iters set to vf/2 because "
"peeling for alignment is unknown .");
+
+ /* If peeled iterations are unknown, count a taken branch and a not taken
+ branch per peeled loop. Even if scalar loop iterations are known,
+ vector iterations are not known since peeled prologue iterations are
+ not known. Hence guards remain the same. */
+ peel_guard_costs += 2 * (TARG_COND_TAKEN_BRANCH_COST
+ + TARG_COND_NOT_TAKEN_BRANCH_COST);
+
}
else
{
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
- peel_iters_epilogue = (vf - 1)/2;
- if (vect_print_dump_info (REPORT_DETAILS))
+ peel_iters_epilogue = vf/2;
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "cost model: "
- "epilogue peel iters set to (vf-1)/2 because "
+ "epilogue peel iters set to vf/2 because "
"loop iterations are unknown .");
+
+ /* If peeled iterations are known but number of scalar loop
+ iterations are unknown, count a taken branch per peeled loop. */
+ peel_guard_costs += 2 * TARG_COND_TAKEN_BRANCH_COST;
+
}
else
{
}
}
- /* Requires a prologue loop when peeling to handle misalignment. Add cost of
- two guards, one for the peeled loop and one for the vector loop. */
-
- if (peel_iters_prologue)
- {
- vec_outside_cost += 2 * TARG_COND_BRANCH_COST;
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "cost model: Adding cost of checks for "
- "prologue.\n");
- }
-
- /* Requires an epilogue loop to finish up remaining iterations after vector
- loop. Add cost of two guards, one for the peeled loop and one for the
- vector loop. */
-
- if (peel_iters_epilogue
- || !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
- || LOOP_VINFO_INT_NITERS (loop_vinfo) % vf)
- {
- vec_outside_cost += 2 * TARG_COND_BRANCH_COST;
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "cost model : Adding cost of checks for "
- "epilogue.\n");
- }
-
vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
- + (peel_iters_epilogue * scalar_single_iter_cost);
+ + (peel_iters_epilogue * scalar_single_iter_cost)
+ + peel_guard_costs;
+
+ /* FORNOW: The scalar outside cost is incremented in one of the
+ following ways:
+
+ 1. The vectorizer checks for alignment and aliasing and generates
+ a condition that allows dynamic vectorization. A cost model
+ check is ANDED with the versioning condition. Hence scalar code
+ path now has the added cost of the versioning check.
+
+ if (cost > th & versioning_check)
+ jmp to vector code
+
+ Hence run-time scalar is incremented by not-taken branch cost.
+
+ 2. The vectorizer then checks if a prologue is required. If the
+ cost model check was not done before during versioning, it has to
+ be done before the prologue check.
+
+ if (cost <= th)
+ prologue = scalar_iters
+ if (prologue == 0)
+ jmp to vector code
+ else
+ execute prologue
+ if (prologue == num_iters)
+ go to exit
+
+ Hence the run-time scalar cost is incremented by a taken branch,
+ plus a not-taken branch, plus a taken branch cost.
+
+ 3. The vectorizer then checks if an epilogue is required. If the
+ cost model check was not done before during prologue check, it
+ has to be done with the epilogue check.
+
+ if (prologue == 0)
+ jmp to vector code
+ else
+ execute prologue
+ if (prologue == num_iters)
+ go to exit
+ vector code:
+ if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
+ jmp to epilogue
+
+ Hence the run-time scalar cost should be incremented by 2 taken
+ branches.
+
+ TODO: The back end may reorder the BBS's differently and reverse
+ conditions/branch directions. Change the stimates below to
+ something more reasonable. */
+
+ if (runtime_test)
+ {
+ /* Cost model check occurs at versioning. */
+ if (VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
+ || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
+ scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST;
+ else
+ {
+ /* Cost model occurs at prologue generation. */
+ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
+ scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST
+ + TARG_COND_NOT_TAKEN_BRANCH_COST;
+ /* Cost model check occurs at epilogue generation. */
+ else
+ scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST;
+ }
+ }
- /* Allow targets add additional (outside-of-loop) costs. FORNOW, the only
- information we provide for the target is whether testing against the
- threshold involves a runtime test. */
- if (targetm.vectorize.builtin_vectorization_cost)
+ /* Add SLP costs. */
+ slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
+ for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
{
- bool runtime_test = false;
-
- /* If the number of iterations is unknown, or the
- peeling-for-misalignment amount is unknown, we eill have to generate
- a runtime test to test the loop count against the threshold. */
- if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
- || (byte_misalign < 0))
- runtime_test = true;
- vec_outside_cost +=
- targetm.vectorize.builtin_vectorization_cost (runtime_test);
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "cost model : Adding target out-of-loop cost = %d",
- targetm.vectorize.builtin_vectorization_cost (runtime_test));
+ vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
+ vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
}
/* Calculate number of iterations required to make the vector version
profitable, relative to the loop bodies only. The following condition
- must hold true: ((SIC*VF)-VIC)*niters > VOC*VF, where
+ must hold true:
+ SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
+ where
SIC = scalar iteration cost, VIC = vector iteration cost,
- VOC = vector outside cost and VF = vectorization factor. */
+ VOC = vector outside cost, VF = vectorization factor,
+ PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
+ SOC = scalar outside cost for run time cost model check. */
if ((scalar_single_iter_cost * vf) > vec_inside_cost)
{
- if (vec_outside_cost == 0)
+ if (vec_outside_cost <= 0)
min_profitable_iters = 1;
else
{
- min_profitable_iters = (vec_outside_cost * vf)
+ min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
+ - vec_inside_cost * peel_iters_prologue
+ - vec_inside_cost * peel_iters_epilogue)
/ ((scalar_single_iter_cost * vf)
- vec_inside_cost);
if ((scalar_single_iter_cost * vf * min_profitable_iters)
<= ((vec_inside_cost * min_profitable_iters)
- + (vec_outside_cost * vf)))
+ + ((vec_outside_cost - scalar_outside_cost) * vf)))
min_profitable_iters++;
}
}
/* vector version will never be profitable. */
else
{
- if (vect_print_dump_info (REPORT_DETAILS))
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "cost model: vector iteration cost = %d "
"is divisible by scalar iteration cost = %d by a factor "
"greater than or equal to the vectorization factor = %d .",
return -1;
}
- if (vect_print_dump_info (REPORT_DETAILS))
+ if (vect_print_dump_info (REPORT_COST))
{
fprintf (vect_dump, "Cost model analysis: \n");
fprintf (vect_dump, " Vector inside of loop cost: %d\n",
vec_inside_cost);
fprintf (vect_dump, " Vector outside of loop cost: %d\n",
vec_outside_cost);
- fprintf (vect_dump, " Scalar cost: %d\n", scalar_single_iter_cost);
+ fprintf (vect_dump, " Scalar iteration cost: %d\n",
+ scalar_single_iter_cost);
+ fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
fprintf (vect_dump, " prologue iterations: %d\n",
peel_iters_prologue);
fprintf (vect_dump, " epilogue iterations: %d\n",
peel_iters_epilogue);
fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
min_profitable_iters);
- fprintf (vect_dump, " Actual minimum iters for profitability: %d\n",
- min_profitable_iters < vf ? vf : min_profitable_iters);
}
min_profitable_iters =
if (niters <= min_profitable_iters)
then skip the vectorized loop. */
min_profitable_iters--;
+
+ if (vect_print_dump_info (REPORT_COST))
+ fprintf (vect_dump, " Profitability threshold = %d\n",
+ min_profitable_iters);
+
return min_profitable_iters;
}
generated within the strip-mine loop, the initial definition before
the loop, and the epilogue code that must be generated. */
-static void
+static bool
vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
int ncopies)
{
enum machine_mode mode;
tree operation = GIMPLE_STMT_OPERAND (STMT_VINFO_STMT (stmt_info), 1);
int op_type = TREE_CODE_LENGTH (TREE_CODE (operation));
+ loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
/* Cost of reduction op inside loop. */
STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) += ncopies * TARG_VEC_STMT_COST;
reduction_op = TREE_OPERAND (operation, op_type-1);
vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
+ if (!vectype)
+ {
+ if (vect_print_dump_info (REPORT_COST))
+ {
+ fprintf (vect_dump, "unsupported data-type ");
+ print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
+ }
+ return false;
+ }
+
mode = TYPE_MODE (vectype);
orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
We have a reduction operator that will reduce the vector in one statement.
Also requires scalar extract. */
- if (reduc_code < NUM_TREE_CODES)
- outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
- else
+ if (!nested_in_vect_loop_p (loop, orig_stmt))
{
- int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
- tree bitsize =
- TYPE_SIZE (TREE_TYPE ( GIMPLE_STMT_OPERAND (orig_stmt, 0)));
- int element_bitsize = tree_low_cst (bitsize, 1);
- int nelements = vec_size_in_bits / element_bitsize;
-
- optab = optab_for_tree_code (code, vectype);
-
- /* We have a whole vector shift available. */
- if (VECTOR_MODE_P (mode)
- && optab->handlers[mode].insn_code != CODE_FOR_nothing
- && vec_shr_optab->handlers[mode].insn_code != CODE_FOR_nothing)
- /* Final reduction via vector shifts and the reduction operator. Also
- requires scalar extract. */
- outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST
- + TARG_VEC_TO_SCALAR_COST);
- else
- /* Use extracts and reduction op for final reduction. For N elements,
- we have N extracts and N-1 reduction ops. */
- outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST);
+ if (reduc_code < NUM_TREE_CODES)
+ outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
+ else
+ {
+ int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
+ tree bitsize =
+ TYPE_SIZE (TREE_TYPE ( GIMPLE_STMT_OPERAND (orig_stmt, 0)));
+ int element_bitsize = tree_low_cst (bitsize, 1);
+ int nelements = vec_size_in_bits / element_bitsize;
+
+ optab = optab_for_tree_code (code, vectype);
+
+ /* We have a whole vector shift available. */
+ if (VECTOR_MODE_P (mode)
+ && optab_handler (optab, mode)->insn_code != CODE_FOR_nothing
+ && optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
+ /* Final reduction via vector shifts and the reduction operator. Also
+ requires scalar extract. */
+ outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST
+ + TARG_VEC_TO_SCALAR_COST);
+ else
+ /* Use extracts and reduction op for final reduction. For N elements,
+ we have N extracts and N-1 reduction ops. */
+ outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST);
+ }
}
STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
- if (vect_print_dump_info (REPORT_DETAILS))
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
"outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
+
+ return true;
}
/* prologue cost for vec_init and vec_step. */
STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_SCALAR_TO_VEC_COST;
- if (vect_print_dump_info (REPORT_DETAILS))
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
"outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
single op. Right now, this does not account for multiple insns that could
be generated for the single vector op. We will handle that shortly. */
-static void
-vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies, enum vect_def_type *dt)
+void
+vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
+ enum vect_def_type *dt, slp_tree slp_node)
{
int i;
+ int inside_cost = 0, outside_cost = 0;
- STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
+ inside_cost = ncopies * TARG_VEC_STMT_COST;
/* FORNOW: Assuming maximum 2 args per stmts. */
- for (i=0; i<2; i++)
+ for (i = 0; i < 2; i++)
{
if (dt[i] == vect_constant_def || dt[i] == vect_invariant_def)
- STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) += TARG_SCALAR_TO_VEC_COST;
+ outside_cost += TARG_SCALAR_TO_VEC_COST;
}
- if (vect_print_dump_info (REPORT_DETAILS))
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
- "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
- STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
+ "outside_cost = %d .", inside_cost, outside_cost);
+
+ /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
+ stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
+ stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
}
Models cost for stores. In the case of strided accesses, one access
has the overhead of the strided access attributed to it. */
-static void
-vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, enum vect_def_type dt)
+void
+vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
+ enum vect_def_type dt, slp_tree slp_node)
{
- int cost = 0;
int group_size;
+ int inside_cost = 0, outside_cost = 0;
if (dt == vect_constant_def || dt == vect_invariant_def)
- STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = TARG_SCALAR_TO_VEC_COST;
+ outside_cost = TARG_SCALAR_TO_VEC_COST;
/* Strided access? */
if (DR_GROUP_FIRST_DR (stmt_info))
if (group_size > 1)
{
/* Uses a high and low interleave operation for each needed permute. */
- cost = ncopies * exact_log2(group_size) * group_size
+ inside_cost = ncopies * exact_log2(group_size) * group_size
* TARG_VEC_STMT_COST;
- if (vect_print_dump_info (REPORT_DETAILS))
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
group_size);
}
/* Costs of the stores. */
- cost += ncopies * TARG_VEC_STORE_COST;
-
- STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = cost;
+ inside_cost += ncopies * TARG_VEC_STORE_COST;
- if (vect_print_dump_info (REPORT_DETAILS))
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
- "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
- STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
+ "outside_cost = %d .", inside_cost, outside_cost);
+
+ /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
+ stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
+ stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
}
accesses are supported for loads, we also account for the costs of the
access scheme chosen. */
-static void
-vect_model_load_cost (stmt_vec_info stmt_info, int ncopies)
+void
+vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
{
- int inner_cost = 0;
int group_size;
int alignment_support_cheme;
tree first_stmt;
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
+ int inside_cost = 0, outside_cost = 0;
/* Strided accesses? */
first_stmt = DR_GROUP_FIRST_DR (stmt_info);
- if (first_stmt)
+ if (first_stmt && !slp_node)
{
group_size = vect_cost_strided_group_size (stmt_info);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
if (group_size > 1)
{
/* Uses an even and odd extract operations for each needed permute. */
- inner_cost = ncopies * exact_log2(group_size) * group_size
- * TARG_VEC_STMT_COST;
+ inside_cost = ncopies * exact_log2(group_size) * group_size
+ * TARG_VEC_STMT_COST;
- if (vect_print_dump_info (REPORT_DETAILS))
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
group_size);
{
case dr_aligned:
{
- inner_cost += ncopies * TARG_VEC_LOAD_COST;
+ inside_cost += ncopies * TARG_VEC_LOAD_COST;
- if (vect_print_dump_info (REPORT_DETAILS))
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_load_cost: aligned.");
break;
case dr_unaligned_supported:
{
/* Here, we assign an additional cost for the unaligned load. */
- inner_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
+ inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
- if (vect_print_dump_info (REPORT_DETAILS))
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
"hardware.");
break;
}
- case dr_unaligned_software_pipeline:
+ case dr_explicit_realign:
{
- int outer_cost = 0;
+ inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
- if (vect_print_dump_info (REPORT_DETAILS))
+ /* FIXME: If the misalignment remains fixed across the iterations of
+ the containing loop, the following cost should be added to the
+ outside costs. */
+ if (targetm.vectorize.builtin_mask_for_load)
+ inside_cost += TARG_VEC_STMT_COST;
+
+ break;
+ }
+ case dr_explicit_realign_optimized:
+ {
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_load_cost: unaligned software "
"pipelined.");
access in the group. Inside the loop, there is a load op
and a realignment op. */
- if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1)
+ if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
{
- outer_cost = 2*TARG_VEC_STMT_COST;
+ outside_cost = 2*TARG_VEC_STMT_COST;
if (targetm.vectorize.builtin_mask_for_load)
- outer_cost += TARG_VEC_STMT_COST;
+ outside_cost += TARG_VEC_STMT_COST;
}
-
- STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
- inner_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
+ inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
break;
}
default:
gcc_unreachable ();
}
-
- STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = inner_cost;
-
- if (vect_print_dump_info (REPORT_DETAILS))
+
+ if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
- "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
- STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
+ "outside_cost = %d .", inside_cost, outside_cost);
+ /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
+ stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
+ stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
}
STMT: The statement containing the data reference.
NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
OFFSET: Optional. If supplied, it is be added to the initial address.
+ LOOP: Specify relative to which loop-nest should the address be computed.
+ For example, when the dataref is in an inner-loop nested in an
+ outer-loop that is now being vectorized, LOOP can be either the
+ outer-loop, or the inner-loop. The first memory location accessed
+ by the following dataref ('in' points to short):
+
+ for (i=0; i<N; i++)
+ for (j=0; j<M; j++)
+ s += in[i+j]
+
+ is as follows:
+ if LOOP=i_loop: &in (relative to i_loop)
+ if LOOP=j_loop: &in+i*2B (relative to j_loop)
Output:
1. Return an SSA_NAME whose value is the address of the memory location of
static tree
vect_create_addr_base_for_vector_ref (tree stmt,
tree *new_stmt_list,
- tree offset)
+ tree offset,
+ struct loop *loop)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
- tree data_ref_base_expr = unshare_expr (DR_BASE_ADDRESS (dr));
- tree base_name = build_fold_indirect_ref (data_ref_base_expr);
+ struct loop *containing_loop = (bb_for_stmt (stmt))->loop_father;
+ tree data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
+ tree base_name;
tree data_ref_base_var;
- tree data_ref_base;
tree new_base_stmt;
tree vec_stmt;
tree addr_base, addr_expr;
tree base_offset = unshare_expr (DR_OFFSET (dr));
tree init = unshare_expr (DR_INIT (dr));
tree vect_ptr_type, addr_expr2;
-
-
+ tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
+
+ gcc_assert (loop);
+ if (loop != containing_loop)
+ {
+ loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+
+ gcc_assert (nested_in_vect_loop_p (loop, stmt));
+
+ data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
+ base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
+ init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
+ }
+
/* Create data_ref_base */
- data_ref_base_var = create_tmp_var (TREE_TYPE (data_ref_base_expr), "batmp");
+ base_name = build_fold_indirect_ref (data_ref_base);
+ data_ref_base_var = create_tmp_var (TREE_TYPE (data_ref_base), "batmp");
add_referenced_var (data_ref_base_var);
- data_ref_base = force_gimple_operand (data_ref_base_expr, &new_base_stmt,
+ data_ref_base = force_gimple_operand (data_ref_base, &new_base_stmt,
true, data_ref_base_var);
append_to_statement_list_force(new_base_stmt, new_stmt_list);
if (offset)
{
tree tmp = create_tmp_var (sizetype, "offset");
- tree step;
-
- /* For interleaved access step we divide STEP by the size of the
- interleaving group. */
- if (DR_GROUP_SIZE (stmt_info))
- step = fold_build2 (TRUNC_DIV_EXPR, TREE_TYPE (offset), DR_STEP (dr),
- build_int_cst (TREE_TYPE (offset),
- DR_GROUP_SIZE (stmt_info)));
- else
- step = DR_STEP (dr);
add_referenced_var (tmp);
offset = fold_build2 (MULT_EXPR, TREE_TYPE (offset), offset, step);
}
/* base + base_offset */
- addr_base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (data_ref_base), data_ref_base,
- base_offset);
+ addr_base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (data_ref_base),
+ data_ref_base, base_offset);
vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
1. STMT: a stmt that references memory. Expected to be of the form
GIMPLE_MODIFY_STMT <name, data-ref> or
GIMPLE_MODIFY_STMT <data-ref, name>.
- 2. BSI: block_stmt_iterator where new stmts can be added.
+ 2. AT_LOOP: the loop where the vector memref is to be created.
3. OFFSET (optional): an offset to be added to the initial address accessed
by the data-ref in STMT.
4. ONLY_INIT: indicate if vp is to be updated in the loop, or remain
Return the increment stmt that updates the pointer in PTR_INCR.
- 3. Return the pointer. */
+ 3. Set INV_P to true if the access pattern of the data reference in the
+ vectorized loop is invariant. Set it to false otherwise.
+
+ 4. Return the pointer. */
static tree
-vect_create_data_ref_ptr (tree stmt,
- block_stmt_iterator *bsi ATTRIBUTE_UNUSED,
+vect_create_data_ref_ptr (tree stmt, struct loop *at_loop,
tree offset, tree *initial_address, tree *ptr_incr,
- bool only_init, tree type)
+ bool only_init, tree type, bool *inv_p)
{
tree base_name;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
+ struct loop *containing_loop = (bb_for_stmt (stmt))->loop_father;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
tree vect_ptr_type;
tree vect_ptr;
tree new_temp;
tree vec_stmt;
tree new_stmt_list = NULL_TREE;
- edge pe = loop_preheader_edge (loop);
+ edge pe;
basic_block new_bb;
tree vect_ptr_init;
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+ tree vptr;
+ block_stmt_iterator incr_bsi;
+ bool insert_after;
+ tree indx_before_incr, indx_after_incr;
+ tree incr;
+ tree step;
+
+ /* Check the step (evolution) of the load in LOOP, and record
+ whether it's invariant. */
+ if (nested_in_vect_loop)
+ step = STMT_VINFO_DR_STEP (stmt_info);
+ else
+ step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
+
+ if (tree_int_cst_compare (step, size_zero_node) == 0)
+ *inv_p = true;
+ else
+ *inv_p = false;
+ /* Create an expression for the first address accessed by this load
+ in LOOP. */
base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr)));
if (vect_print_dump_info (REPORT_DETAILS))
var_ann (vect_ptr)->subvars = DR_SUBVARS (dr);
+ /** Note: If the dataref is in an inner-loop nested in LOOP, and we are
+ vectorizing LOOP (i.e. outer-loop vectorization), we need to create two
+ def-use update cycles for the pointer: One relative to the outer-loop
+ (LOOP), which is what steps (3) and (4) below do. The other is relative
+ to the inner-loop (which is the inner-most loop containing the dataref),
+ and this is done be step (5) below.
+
+ When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
+ inner-most loop, and so steps (3),(4) work the same, and step (5) is
+ redundant. Steps (3),(4) create the following:
+
+ vp0 = &base_addr;
+ LOOP: vp1 = phi(vp0,vp2)
+ ...
+ ...
+ vp2 = vp1 + step
+ goto LOOP
+
+ If there is an inner-loop nested in loop, then step (5) will also be
+ applied, and an additional update in the inner-loop will be created:
+
+ vp0 = &base_addr;
+ LOOP: vp1 = phi(vp0,vp2)
+ ...
+ inner: vp3 = phi(vp1,vp4)
+ vp4 = vp3 + inner_step
+ if () goto inner
+ ...
+ vp2 = vp1 + step
+ if () goto LOOP */
+
/** (3) Calculate the initial address the vector-pointer, and set
the vector-pointer to point to it before the loop: **/
/* Create: (&(base[init_val+offset]) in the loop preheader. */
+
new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
- offset);
+ offset, loop);
pe = loop_preheader_edge (loop);
new_bb = bsi_insert_on_edge_immediate (pe, new_stmt_list);
gcc_assert (!new_bb);
gcc_assert (!new_bb);
- /** (4) Handle the updating of the vector-pointer inside the loop: **/
+ /** (4) Handle the updating of the vector-pointer inside the loop.
+ This is needed when ONLY_INIT is false, and also when AT_LOOP
+ is the inner-loop nested in LOOP (during outer-loop vectorization).
+ **/
- if (only_init) /* No update in loop is required. */
+ if (only_init && at_loop == loop) /* No update in loop is required. */
{
/* Copy the points-to information if it exists. */
if (DR_PTR_INFO (dr))
duplicate_ssa_name_ptr_info (vect_ptr_init, DR_PTR_INFO (dr));
- return vect_ptr_init;
+ vptr = vect_ptr_init;
}
else
{
- block_stmt_iterator incr_bsi;
- bool insert_after;
- tree indx_before_incr, indx_after_incr;
- tree incr;
+ /* The step of the vector pointer is the Vector Size. */
+ tree step = TYPE_SIZE_UNIT (vectype);
+ /* One exception to the above is when the scalar step of the load in
+ LOOP is zero. In this case the step here is also zero. */
+ if (*inv_p)
+ step = size_zero_node;
standard_iv_increment_position (loop, &incr_bsi, &insert_after);
+
create_iv (vect_ptr_init,
- fold_convert (vect_ptr_type, TYPE_SIZE_UNIT (vectype)),
+ fold_convert (vect_ptr_type, step),
NULL_TREE, loop, &incr_bsi, insert_after,
&indx_before_incr, &indx_after_incr);
incr = bsi_stmt (incr_bsi);
if (ptr_incr)
*ptr_incr = incr;
- return indx_before_incr;
+ vptr = indx_before_incr;
+ }
+
+ if (!nested_in_vect_loop || only_init)
+ return vptr;
+
+
+ /** (5) Handle the updating of the vector-pointer inside the inner-loop
+ nested in LOOP, if exists: **/
+
+ gcc_assert (nested_in_vect_loop);
+ if (!only_init)
+ {
+ standard_iv_increment_position (containing_loop, &incr_bsi,
+ &insert_after);
+ create_iv (vptr, fold_convert (vect_ptr_type, DR_STEP (dr)), NULL_TREE,
+ containing_loop, &incr_bsi, insert_after, &indx_before_incr,
+ &indx_after_incr);
+ incr = bsi_stmt (incr_bsi);
+ set_stmt_info (stmt_ann (incr), new_stmt_vec_info (incr, loop_vinfo));
+
+ /* Copy the points-to information if it exists. */
+ if (DR_PTR_INFO (dr))
+ {
+ duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
+ duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
+ }
+ merge_alias_info (vect_ptr_init, indx_before_incr);
+ merge_alias_info (vect_ptr_init, indx_after_incr);
+ if (ptr_incr)
+ *ptr_incr = incr;
+
+ return indx_before_incr;
}
+ else
+ gcc_unreachable ();
}
/* Function bump_vector_ptr
- Increment a pointer (to a vector type) by vector-size. Connect the new
- increment stmt to the existing def-use update-chain of the pointer.
+ Increment a pointer (to a vector type) by vector-size. If requested,
+ i.e. if PTR-INCR is given, then also connect the new increment stmt
+ to the existing def-use update-chain of the pointer, by modifying
+ the PTR_INCR as illustrated below:
The pointer def-use update-chain before this function:
DATAREF_PTR = phi (p_0, p_2)
The pointer def-use update-chain after this function:
DATAREF_PTR = phi (p_0, p_2)
....
- NEW_DATAREF_PTR = DATAREF_PTR + vector_size
+ NEW_DATAREF_PTR = DATAREF_PTR + BUMP
....
PTR_INCR: p_2 = NEW_DATAREF_PTR + step
Input:
DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
in the loop.
- PTR_INCR - the stmt that updates the pointer in each iteration of the loop.
- The increment amount across iterations is also expected to be
- vector_size.
+ PTR_INCR - optional. The stmt that updates the pointer in each iteration of
+ the loop. The increment amount across iterations is expected
+ to be vector_size.
BSI - location where the new update stmt is to be placed.
STMT - the original scalar memory-access stmt that is being vectorized.
+ BUMP - optional. The offset by which to bump the pointer. If not given,
+ the offset is assumed to be vector_size.
Output: Return NEW_DATAREF_PTR as illustrated above.
static tree
bump_vector_ptr (tree dataref_ptr, tree ptr_incr, block_stmt_iterator *bsi,
- tree stmt)
+ tree stmt, tree bump)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
use_operand_p use_p;
tree new_dataref_ptr;
+ if (bump)
+ update = bump;
+
incr_stmt = build_gimple_modify_stmt (ptr_var,
build2 (POINTER_PLUS_EXPR, vptr_type,
dataref_ptr, update));
GIMPLE_STMT_OPERAND (incr_stmt, 0) = new_dataref_ptr;
vect_finish_stmt_generation (stmt, incr_stmt, bsi);
+ /* Copy the points-to information if it exists. */
+ if (DR_PTR_INFO (dr))
+ duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
+ merge_alias_info (new_dataref_ptr, dataref_ptr);
+
+ if (!ptr_incr)
+ return new_dataref_ptr;
+
/* Update the vector-pointer's cross-iteration increment. */
FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
{
gcc_assert (tree_int_cst_compare (use, update) == 0);
}
- /* Copy the points-to information if it exists. */
- if (DR_PTR_INFO (dr))
- duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
- merge_alias_info (new_dataref_ptr, dataref_ptr);
-
return new_dataref_ptr;
}
/* Function vect_init_vector.
Insert a new stmt (INIT_STMT) that initializes a new vector variable with
- the vector elements of VECTOR_VAR. Return the DEF of INIT_STMT. It will be
- used in the vectorization of STMT. */
+ the vector elements of VECTOR_VAR. Place the initialization at BSI if it
+ is not NULL. Otherwise, place the initialization at the loop preheader.
+ Return the DEF of INIT_STMT.
+ It will be used in the vectorization of STMT. */
static tree
-vect_init_vector (tree stmt, tree vector_var, tree vector_type)
+vect_init_vector (tree stmt, tree vector_var, tree vector_type,
+ block_stmt_iterator *bsi)
{
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree new_var;
tree init_stmt;
tree vec_oprnd;
new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
add_referenced_var (new_var);
-
init_stmt = build_gimple_modify_stmt (new_var, vector_var);
new_temp = make_ssa_name (new_var, init_stmt);
GIMPLE_STMT_OPERAND (init_stmt, 0) = new_temp;
- pe = loop_preheader_edge (loop);
- new_bb = bsi_insert_on_edge_immediate (pe, init_stmt);
- gcc_assert (!new_bb);
+ if (bsi)
+ vect_finish_stmt_generation (stmt, init_stmt, bsi);
+ else
+ {
+ loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+
+ if (nested_in_vect_loop_p (loop, stmt))
+ loop = loop->inner;
+ pe = loop_preheader_edge (loop);
+ new_bb = bsi_insert_on_edge_immediate (pe, init_stmt);
+ gcc_assert (!new_bb);
+ }
if (vect_print_dump_info (REPORT_DETAILS))
{
}
+/* For constant and loop invariant defs of SLP_NODE this function returns
+ (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
+ OP_NUM determines if we gather defs for operand 0 or operand 1 of the scalar
+ stmts. */
+
+static void
+vect_get_constant_vectors (slp_tree slp_node, VEC(tree,heap) **vec_oprnds,
+ unsigned int op_num)
+{
+ VEC (tree, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node);
+ tree stmt = VEC_index (tree, stmts, 0);
+ stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
+ tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
+ int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ tree vec_cst;
+ tree t = NULL_TREE;
+ int j, number_of_places_left_in_vector;
+ tree vector_type;
+ tree op, vop, operation;
+ int group_size = VEC_length (tree, stmts);
+ unsigned int vec_num, i;
+ int number_of_copies = 1;
+ bool is_store = false;
+ unsigned int number_of_vectors = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ VEC (tree, heap) *voprnds = VEC_alloc (tree, heap, number_of_vectors);
+ bool constant_p;
+
+ if (STMT_VINFO_DATA_REF (stmt_vinfo))
+ is_store = true;
+
+ /* NUMBER_OF_COPIES is the number of times we need to use the same values in
+ created vectors. It is greater than 1 if unrolling is performed.
+
+ For example, we have two scalar operands, s1 and s2 (e.g., group of
+ strided accesses of size two), while NUINTS is four (i.e., four scalars
+ of this type can be packed in a vector). The output vector will contain
+ two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
+ will be 2).
+
+ If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
+ containing the operands.
+
+ For example, NUINTS is four as before, and the group size is 8
+ (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
+ {s5, s6, s7, s8}. */
+
+ number_of_copies = least_common_multiple (nunits, group_size) / group_size;
+
+ number_of_places_left_in_vector = nunits;
+ constant_p = true;
+ for (j = 0; j < number_of_copies; j++)
+ {
+ for (i = group_size - 1; VEC_iterate (tree, stmts, i, stmt); i--)
+ {
+ operation = GIMPLE_STMT_OPERAND (stmt, 1);
+ if (is_store)
+ op = operation;
+ else
+ op = TREE_OPERAND (operation, op_num);
+ if (!CONSTANT_CLASS_P (op))
+ constant_p = false;
+
+ /* Create 'vect_ = {op0,op1,...,opn}'. */
+ t = tree_cons (NULL_TREE, op, t);
+
+ number_of_places_left_in_vector--;
+
+ if (number_of_places_left_in_vector == 0)
+ {
+ number_of_places_left_in_vector = nunits;
+
+ vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
+ gcc_assert (vector_type);
+ if (constant_p)
+ vec_cst = build_vector (vector_type, t);
+ else
+ vec_cst = build_constructor_from_list (vector_type, t);
+ constant_p = true;
+ VEC_quick_push (tree, voprnds,
+ vect_init_vector (stmt, vec_cst, vector_type,
+ NULL));
+ t = NULL_TREE;
+ }
+ }
+ }
+
+ /* Since the vectors are created in the reverse order, we should invert
+ them. */
+ vec_num = VEC_length (tree, voprnds);
+ for (j = vec_num - 1; j >= 0; j--)
+ {
+ vop = VEC_index (tree, voprnds, j);
+ VEC_quick_push (tree, *vec_oprnds, vop);
+ }
+
+ VEC_free (tree, heap, voprnds);
+
+ /* In case that VF is greater than the unrolling factor needed for the SLP
+ group of stmts, NUMBER_OF_VECTORS to be created is greater than
+ NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
+ to replicate the vectors. */
+ while (number_of_vectors > VEC_length (tree, *vec_oprnds))
+ {
+ for (i = 0; VEC_iterate (tree, *vec_oprnds, i, vop) && i < vec_num; i++)
+ VEC_quick_push (tree, *vec_oprnds, vop);
+ }
+}
+
+
+/* Get vectorized definitions from SLP_NODE that contains corresponding
+ vectorized def-stmts. */
+
+static void
+vect_get_slp_vect_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds)
+{
+ tree vec_oprnd;
+ tree vec_def_stmt;
+ unsigned int i;
+
+ gcc_assert (SLP_TREE_VEC_STMTS (slp_node));
+
+ for (i = 0;
+ VEC_iterate (tree, SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt);
+ i++)
+ {
+ gcc_assert (vec_def_stmt);
+ vec_oprnd = GIMPLE_STMT_OPERAND (vec_def_stmt, 0);
+ VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
+ }
+}
+
+
+/* Get vectorized definitions for SLP_NODE.
+ If the scalar definitions are loop invariants or constants, collect them and
+ call vect_get_constant_vectors() to create vector stmts.
+ Otherwise, the def-stmts must be already vectorized and the vectorized stmts
+ must be stored in the LEFT/RIGHT node of SLP_NODE, and we call
+ vect_get_slp_vect_defs() to retrieve them.
+ If VEC_OPRNDS1 is NULL, don't get vector defs for the second operand (from
+ the right node. This is used when the second operand must remain scalar. */
+
+static void
+vect_get_slp_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds0,
+ VEC (tree,heap) **vec_oprnds1)
+{
+ tree operation, first_stmt;
+
+ /* Allocate memory for vectorized defs. */
+ *vec_oprnds0 = VEC_alloc (tree, heap,
+ SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node));
+
+ /* SLP_NODE corresponds either to a group of stores or to a group of
+ unary/binary operations. We don't call this function for loads. */
+ if (SLP_TREE_LEFT (slp_node))
+ /* The defs are already vectorized. */
+ vect_get_slp_vect_defs (SLP_TREE_LEFT (slp_node), vec_oprnds0);
+ else
+ /* Build vectors from scalar defs. */
+ vect_get_constant_vectors (slp_node, vec_oprnds0, 0);
+
+ first_stmt = VEC_index (tree, SLP_TREE_SCALAR_STMTS (slp_node), 0);
+ if (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)))
+ /* Since we don't call this function with loads, this is a group of
+ stores. */
+ return;
+
+ operation = GIMPLE_STMT_OPERAND (first_stmt, 1);
+ if (TREE_OPERAND_LENGTH (operation) == unary_op || !vec_oprnds1)
+ return;
+
+ *vec_oprnds1 = VEC_alloc (tree, heap,
+ SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node));
+
+ if (SLP_TREE_RIGHT (slp_node))
+ /* The defs are already vectorized. */
+ vect_get_slp_vect_defs (SLP_TREE_RIGHT (slp_node), vec_oprnds1);
+ else
+ /* Build vectors from scalar defs. */
+ vect_get_constant_vectors (slp_node, vec_oprnds1, 1);
+}
+
+
/* Function get_initial_def_for_induction
Input:
+ STMT - a stmt that performs an induction operation in the loop.
IV_PHI - the initial value of the induction variable
Output:
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree scalar_type = TREE_TYPE (PHI_RESULT_TREE (iv_phi));
- tree vectype = get_vectype_for_scalar_type (scalar_type);
- int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ tree vectype;
+ int nunits;
edge pe = loop_preheader_edge (loop);
+ struct loop *iv_loop;
basic_block new_bb;
- block_stmt_iterator bsi;
tree vec, vec_init, vec_step, t;
tree access_fn;
tree new_var;
int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
int i;
bool ok;
- int ncopies = vf / nunits;
+ int ncopies;
tree expr;
stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
+ bool nested_in_vect_loop = false;
tree stmts;
- tree stmt = NULL_TREE;
+ imm_use_iterator imm_iter;
+ use_operand_p use_p;
+ tree exit_phi;
+ edge latch_e;
+ tree loop_arg;
block_stmt_iterator si;
basic_block bb = bb_for_stmt (iv_phi);
+ vectype = get_vectype_for_scalar_type (scalar_type);
+ gcc_assert (vectype);
+ nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ ncopies = vf / nunits;
+
gcc_assert (phi_info);
gcc_assert (ncopies >= 1);
/* Find the first insertion point in the BB. */
si = bsi_after_labels (bb);
- stmt = bsi_stmt (si);
- access_fn = analyze_scalar_evolution (loop, PHI_RESULT (iv_phi));
+ if (INTEGRAL_TYPE_P (scalar_type))
+ step_expr = build_int_cst (scalar_type, 0);
+ else
+ step_expr = build_real (scalar_type, dconst0);
+
+ /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
+ if (nested_in_vect_loop_p (loop, iv_phi))
+ {
+ nested_in_vect_loop = true;
+ iv_loop = loop->inner;
+ }
+ else
+ iv_loop = loop;
+ gcc_assert (iv_loop == (bb_for_stmt (iv_phi))->loop_father);
+
+ latch_e = loop_latch_edge (iv_loop);
+ loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
+
+ access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
gcc_assert (access_fn);
- ok = vect_is_simple_iv_evolution (loop->num, access_fn,
- &init_expr, &step_expr);
+ ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
+ &init_expr, &step_expr);
gcc_assert (ok);
+ pe = loop_preheader_edge (iv_loop);
/* Create the vector that holds the initial_value of the induction. */
- new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
- add_referenced_var (new_var);
-
- new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
- if (stmts)
+ if (nested_in_vect_loop)
{
- new_bb = bsi_insert_on_edge_immediate (pe, stmts);
- gcc_assert (!new_bb);
+ /* iv_loop is nested in the loop to be vectorized. init_expr had already
+ been created during vectorization of previous stmts; We obtain it from
+ the STMT_VINFO_VEC_STMT of the defining stmt. */
+ tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi, loop_preheader_edge (iv_loop));
+ vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
}
-
- t = NULL_TREE;
- t = tree_cons (NULL_TREE, new_name, t);
- for (i = 1; i < nunits; i++)
+ else
{
- tree tmp;
+ /* iv_loop is the loop to be vectorized. Create:
+ vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
+ new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
+ add_referenced_var (new_var);
- /* Create: new_name = new_name + step_expr */
- tmp = fold_build2 (PLUS_EXPR, scalar_type, new_name, step_expr);
- init_stmt = build_gimple_modify_stmt (new_var, tmp);
- new_name = make_ssa_name (new_var, init_stmt);
- GIMPLE_STMT_OPERAND (init_stmt, 0) = new_name;
+ new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
+ if (stmts)
+ {
+ new_bb = bsi_insert_on_edge_immediate (pe, stmts);
+ gcc_assert (!new_bb);
+ }
- new_bb = bsi_insert_on_edge_immediate (pe, init_stmt);
- gcc_assert (!new_bb);
+ t = NULL_TREE;
+ t = tree_cons (NULL_TREE, init_expr, t);
+ for (i = 1; i < nunits; i++)
+ {
+ tree tmp;
- if (vect_print_dump_info (REPORT_DETAILS))
- {
- fprintf (vect_dump, "created new init_stmt: ");
- print_generic_expr (vect_dump, init_stmt, TDF_SLIM);
- }
- t = tree_cons (NULL_TREE, new_name, t);
+ /* Create: new_name_i = new_name + step_expr */
+ tmp = fold_build2 (PLUS_EXPR, scalar_type, new_name, step_expr);
+ init_stmt = build_gimple_modify_stmt (new_var, tmp);
+ new_name = make_ssa_name (new_var, init_stmt);
+ GIMPLE_STMT_OPERAND (init_stmt, 0) = new_name;
+
+ new_bb = bsi_insert_on_edge_immediate (pe, init_stmt);
+ gcc_assert (!new_bb);
+
+ if (vect_print_dump_info (REPORT_DETAILS))
+ {
+ fprintf (vect_dump, "created new init_stmt: ");
+ print_generic_expr (vect_dump, init_stmt, TDF_SLIM);
+ }
+ t = tree_cons (NULL_TREE, new_name, t);
+ }
+ /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
+ vec = build_constructor_from_list (vectype, nreverse (t));
+ vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
}
- vec = build_constructor_from_list (vectype, nreverse (t));
- vec_init = vect_init_vector (stmt, vec, vectype);
/* Create the vector that holds the step of the induction. */
- expr = build_int_cst (scalar_type, vf);
- new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr);
+ if (nested_in_vect_loop)
+ /* iv_loop is nested in the loop to be vectorized. Generate:
+ vec_step = [S, S, S, S] */
+ new_name = step_expr;
+ else
+ {
+ /* iv_loop is the loop to be vectorized. Generate:
+ vec_step = [VF*S, VF*S, VF*S, VF*S] */
+ expr = build_int_cst (scalar_type, vf);
+ new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr);
+ }
+
t = NULL_TREE;
for (i = 0; i < nunits; i++)
t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
- vec = build_constructor_from_list (vectype, t);
- vec_step = vect_init_vector (stmt, vec, vectype);
+ gcc_assert (CONSTANT_CLASS_P (new_name));
+ vec = build_vector (vectype, t);
+ vec_step = vect_init_vector (iv_phi, vec, vectype, NULL);
/* Create the following def-use cycle:
loop prolog:
- vec_init = [X, X+S, X+2*S, X+3*S]
- vec_step = [VF*S, VF*S, VF*S, VF*S]
+ vec_init = ...
+ vec_step = ...
loop:
vec_iv = PHI <vec_init, vec_loop>
...
/* Create the induction-phi that defines the induction-operand. */
vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
add_referenced_var (vec_dest);
- induction_phi = create_phi_node (vec_dest, loop->header);
+ induction_phi = create_phi_node (vec_dest, iv_loop->header);
set_stmt_info (get_stmt_ann (induction_phi),
new_stmt_vec_info (induction_phi, loop_vinfo));
induc_def = PHI_RESULT (induction_phi);
induc_def, vec_step));
vec_def = make_ssa_name (vec_dest, new_stmt);
GIMPLE_STMT_OPERAND (new_stmt, 0) = vec_def;
- bsi = bsi_for_stmt (stmt);
- vect_finish_stmt_generation (stmt, new_stmt, &bsi);
+ bsi_insert_before (&si, new_stmt, BSI_SAME_STMT);
+ set_stmt_info (get_stmt_ann (new_stmt),
+ new_stmt_vec_info (new_stmt, loop_vinfo));
/* Set the arguments of the phi node: */
- add_phi_arg (induction_phi, vec_init, loop_preheader_edge (loop));
- add_phi_arg (induction_phi, vec_def, loop_latch_edge (loop));
+ add_phi_arg (induction_phi, vec_init, pe);
+ add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop));
- /* In case the vectorization factor (VF) is bigger than the number
+ /* In case that vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
more than one vector stmt - i.e - we need to "unroll" the
vector stmt by a factor VF/nunits. For more details see documentation
if (ncopies > 1)
{
stmt_vec_info prev_stmt_vinfo;
+ /* FORNOW. This restriction should be relaxed. */
+ gcc_assert (!nested_in_vect_loop);
/* Create the vector that holds the step of the induction. */
expr = build_int_cst (scalar_type, nunits);
t = NULL_TREE;
for (i = 0; i < nunits; i++)
t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
- vec = build_constructor_from_list (vectype, t);
- vec_step = vect_init_vector (stmt, vec, vectype);
+ gcc_assert (CONSTANT_CLASS_P (new_name));
+ vec = build_vector (vectype, t);
+ vec_step = vect_init_vector (iv_phi, vec, vectype, NULL);
vec_def = induc_def;
prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
{
tree tmp;
- /* vec_i = vec_prev + vec_{step*nunits} */
+ /* vec_i = vec_prev + vec_step */
tmp = build2 (PLUS_EXPR, vectype, vec_def, vec_step);
new_stmt = build_gimple_modify_stmt (NULL_TREE, tmp);
vec_def = make_ssa_name (vec_dest, new_stmt);
GIMPLE_STMT_OPERAND (new_stmt, 0) = vec_def;
- bsi = bsi_for_stmt (stmt);
- vect_finish_stmt_generation (stmt, new_stmt, &bsi);
-
+ bsi_insert_before (&si, new_stmt, BSI_SAME_STMT);
+ set_stmt_info (get_stmt_ann (new_stmt),
+ new_stmt_vec_info (new_stmt, loop_vinfo));
STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
}
}
+ if (nested_in_vect_loop)
+ {
+ /* Find the loop-closed exit-phi of the induction, and record
+ the final vector of induction results: */
+ exit_phi = NULL;
+ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
+ {
+ if (!flow_bb_inside_loop_p (iv_loop, bb_for_stmt (USE_STMT (use_p))))
+ {
+ exit_phi = USE_STMT (use_p);
+ break;
+ }
+ }
+ if (exit_phi)
+ {
+ stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
+ /* FORNOW. Currently not supporting the case that an inner-loop induction
+ is not used in the outer-loop (i.e. only outside the outer-loop). */
+ gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
+ && !STMT_VINFO_LIVE_P (stmt_vinfo));
+
+ STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
+ if (vect_print_dump_info (REPORT_DETAILS))
+ {
+ fprintf (vect_dump, "vector of inductions after inner-loop:");
+ print_generic_expr (vect_dump, new_stmt, TDF_SLIM);
+ }
+ }
+ }
+
+
if (vect_print_dump_info (REPORT_DETAILS))
{
fprintf (vect_dump, "transform induction: created def-use cycle:");
tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
int nunits = TYPE_VECTOR_SUBPARTS (vectype);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vec_inv;
tree vec_cst;
tree t = NULL_TREE;
t = tree_cons (NULL_TREE, op, t);
}
vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
+ gcc_assert (vector_type);
vec_cst = build_vector (vector_type, t);
- return vect_init_vector (stmt, vec_cst, vector_type);
+ return vect_init_vector (stmt, vec_cst, vector_type, NULL);
}
/* Case 2: operand is defined outside the loop - loop invariant. */
/* FIXME: use build_constructor directly. */
vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
+ gcc_assert (vector_type);
vec_inv = build_constructor_from_list (vector_type, t);
-
- return vect_init_vector (stmt, vec_inv, vector_type);
+ return vect_init_vector (stmt, vec_inv, vector_type, NULL);
}
/* Case 3: operand is defined inside the loop. */
def_stmt_info = vinfo_for_stmt (def_stmt);
vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
gcc_assert (vec_stmt);
- vec_oprnd = GIMPLE_STMT_OPERAND (vec_stmt, 0);
+ if (TREE_CODE (vec_stmt) == PHI_NODE)
+ vec_oprnd = PHI_RESULT (vec_stmt);
+ else
+ vec_oprnd = GIMPLE_STMT_OPERAND (vec_stmt, 0);
return vec_oprnd;
}
/* Case 4: operand is defined by a loop header phi - reduction */
case vect_reduction_def:
{
+ struct loop *loop;
+
gcc_assert (TREE_CODE (def_stmt) == PHI_NODE);
+ loop = (bb_for_stmt (def_stmt))->loop_father;
/* Get the def before the loop */
op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
{
gcc_assert (TREE_CODE (def_stmt) == PHI_NODE);
- /* Get the def before the loop */
- return get_initial_def_for_induction (def_stmt);
+ /* Get the def from the vectorized stmt. */
+ def_stmt_info = vinfo_for_stmt (def_stmt);
+ vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
+ gcc_assert (vec_stmt && (TREE_CODE (vec_stmt) == PHI_NODE));
+ vec_oprnd = PHI_RESULT (vec_stmt);
+ return vec_oprnd;
}
default:
vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
gcc_assert (vec_stmt_for_operand);
vec_oprnd = GIMPLE_STMT_OPERAND (vec_stmt_for_operand, 0);
-
return vec_oprnd;
}
+/* Get vectorized definitions for the operands to create a copy of an original
+ stmt. See vect_get_vec_def_for_stmt_copy() for details. */
+
+static void
+vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
+ VEC(tree,heap) **vec_oprnds0,
+ VEC(tree,heap) **vec_oprnds1)
+{
+ tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
+
+ vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
+ VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
+
+ if (vec_oprnds1 && *vec_oprnds1)
+ {
+ vec_oprnd = VEC_pop (tree, *vec_oprnds1);
+ vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
+ VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
+ }
+}
+
+
+/* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
+
+static void
+vect_get_vec_defs (tree op0, tree op1, tree stmt, VEC(tree,heap) **vec_oprnds0,
+ VEC(tree,heap) **vec_oprnds1, slp_tree slp_node)
+{
+ if (slp_node)
+ vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1);
+ else
+ {
+ tree vec_oprnd;
+
+ *vec_oprnds0 = VEC_alloc (tree, heap, 1);
+ vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
+ VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
+
+ if (op1)
+ {
+ *vec_oprnds1 = VEC_alloc (tree, heap, 1);
+ vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
+ VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
+ }
+ }
+}
+
+
/* Function vect_finish_stmt_generation.
Insert a new stmt. */
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ gcc_assert (stmt == bsi_stmt (*bsi));
+ gcc_assert (TREE_CODE (stmt) != LABEL_EXPR);
+
bsi_insert_before (bsi, vec_stmt, BSI_SAME_STMT);
+
set_stmt_info (get_stmt_ann (vec_stmt),
new_stmt_vec_info (vec_stmt, loop_vinfo));
/* Make sure bsi points to the stmt that is being vectorized. */
gcc_assert (stmt == bsi_stmt (*bsi));
-#ifdef USE_MAPPED_LOCATION
SET_EXPR_LOCATION (vec_stmt, EXPR_LOCATION (stmt));
-#else
- SET_EXPR_LOCUS (vec_stmt, EXPR_LOCUS (stmt));
-#endif
}
get_initial_def_for_reduction (tree stmt, tree init_val, tree *adjustment_def)
{
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
+ loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
int nunits = TYPE_VECTOR_SUBPARTS (vectype);
enum tree_code code = TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1));
tree t = NULL_TREE;
int i;
tree vector_type;
+ bool nested_in_vect_loop = false;
+
+ gcc_assert (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type));
+ if (nested_in_vect_loop_p (loop, stmt))
+ nested_in_vect_loop = true;
+ else
+ gcc_assert (loop == (bb_for_stmt (stmt))->loop_father);
- gcc_assert (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type));
vecdef = vect_get_vec_def_for_operand (init_val, stmt, NULL);
switch (code)
case WIDEN_SUM_EXPR:
case DOT_PROD_EXPR:
case PLUS_EXPR:
- *adjustment_def = init_val;
- /* Create a vector of zeros for init_def. */
- if (INTEGRAL_TYPE_P (type))
- def_for_init = build_int_cst (type, 0);
+ if (nested_in_vect_loop)
+ *adjustment_def = vecdef;
else
+ *adjustment_def = init_val;
+ /* Create a vector of zeros for init_def. */
+ if (SCALAR_FLOAT_TYPE_P (type))
def_for_init = build_real (type, dconst0);
- for (i = nunits - 1; i >= 0; --i)
- t = tree_cons (NULL_TREE, def_for_init, t);
+ else
+ def_for_init = build_int_cst (type, 0);
+ for (i = nunits - 1; i >= 0; --i)
+ t = tree_cons (NULL_TREE, def_for_init, t);
vector_type = get_vectype_for_scalar_type (TREE_TYPE (def_for_init));
+ gcc_assert (vector_type);
init_def = build_vector (vector_type, t);
break;
tree new_phi;
block_stmt_iterator exit_bsi;
tree vec_dest;
- tree new_temp;
+ tree new_temp = NULL_TREE;
tree new_name;
- tree epilog_stmt;
- tree new_scalar_dest, exit_phi;
+ tree epilog_stmt = NULL_TREE;
+ tree new_scalar_dest, exit_phi, new_dest;
tree bitsize, bitpos, bytesize;
enum tree_code code = TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1));
- tree scalar_initial_def;
+ tree adjustment_def;
tree vec_initial_def;
tree orig_name;
imm_use_iterator imm_iter;
use_operand_p use_p;
- bool extract_scalar_result;
- tree reduction_op;
+ bool extract_scalar_result = false;
+ tree reduction_op, expr;
tree orig_stmt;
tree use_stmt;
tree operation = GIMPLE_STMT_OPERAND (stmt, 1);
+ bool nested_in_vect_loop = false;
int op_type;
+ VEC(tree,heap) *phis = NULL;
+ int i;
+
+ if (nested_in_vect_loop_p (loop, stmt))
+ {
+ loop = loop->inner;
+ nested_in_vect_loop = true;
+ }
op_type = TREE_OPERAND_LENGTH (operation);
reduction_op = TREE_OPERAND (operation, op_type-1);
vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
+ gcc_assert (vectype);
mode = TYPE_MODE (vectype);
/*** 1. Create the reduction def-use cycle ***/
the scalar def before the loop, that defines the initial value
of the reduction variable. */
vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
- &scalar_initial_def);
+ &adjustment_def);
add_phi_arg (reduction_phi, vec_initial_def, loop_preheader_edge (loop));
/* 1.2 set the loop-latch arg for the reduction-phi: */
bitsize = TYPE_SIZE (scalar_type);
bytesize = TYPE_SIZE_UNIT (scalar_type);
+
+ /* In case this is a reduction in an inner-loop while vectorizing an outer
+ loop - we don't need to extract a single scalar result at the end of the
+ inner-loop. The final vector of partial results will be used in the
+ vectorized outer-loop, or reduced to a scalar result at the end of the
+ outer-loop. */
+ if (nested_in_vect_loop)
+ goto vect_finalize_reduction;
+
/* 2.3 Create the reduction code, using one of the three schemes described
above. */
int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
tree vec_temp;
- if (vec_shr_optab->handlers[mode].insn_code != CODE_FOR_nothing)
+ if (optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
shift_code = VEC_RSHIFT_EXPR;
else
have_whole_vector_shift = false;
else
{
optab optab = optab_for_tree_code (code, vectype);
- if (optab->handlers[mode].insn_code == CODE_FOR_nothing)
+ if (optab_handler (optab, mode)->insn_code == CODE_FOR_nothing)
have_whole_vector_shift = false;
}
vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
bitsize_zero_node);
- BIT_FIELD_REF_UNSIGNED (rhs) = TYPE_UNSIGNED (scalar_type);
epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, rhs);
new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
bitpos);
- BIT_FIELD_REF_UNSIGNED (rhs) = TYPE_UNSIGNED (scalar_type);
epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, rhs);
new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_name;
{
tree rhs;
+ gcc_assert (!nested_in_vect_loop);
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "extract scalar result");
bitpos = bitsize_zero_node;
rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
- BIT_FIELD_REF_UNSIGNED (rhs) = TYPE_UNSIGNED (scalar_type);
epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, rhs);
new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
}
- /* 2.4 Adjust the final result by the initial value of the reduction
+vect_finalize_reduction:
+
+ /* 2.5 Adjust the final result by the initial value of the reduction
variable. (When such adjustment is not needed, then
- 'scalar_initial_def' is zero).
+ 'adjustment_def' is zero). For example, if code is PLUS we create:
+ new_temp = loop_exit_def + adjustment_def */
- Create:
- s_out4 = scalar_expr <s_out3, scalar_initial_def> */
-
- if (scalar_initial_def)
+ if (adjustment_def)
{
- tree tmp = build2 (code, scalar_type, new_temp, scalar_initial_def);
- epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, tmp);
- new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
+ if (nested_in_vect_loop)
+ {
+ gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
+ expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
+ new_dest = vect_create_destination_var (scalar_dest, vectype);
+ }
+ else
+ {
+ gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
+ expr = build2 (code, scalar_type, new_temp, adjustment_def);
+ new_dest = vect_create_destination_var (scalar_dest, scalar_type);
+ }
+ epilog_stmt = build_gimple_modify_stmt (new_dest, expr);
+ new_temp = make_ssa_name (new_dest, epilog_stmt);
GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
}
- /* 2.6 Replace uses of s_out0 with uses of s_out3 */
- /* Find the loop-closed-use at the loop exit of the original scalar result.
+ /* 2.6 Handle the loop-exit phi */
+
+ /* Replace uses of s_out0 with uses of s_out3:
+ Find the loop-closed-use at the loop exit of the original scalar result.
(The reduction result is expected to have two immediate uses - one at the
latch block, and one at the loop exit). */
- exit_phi = NULL;
+ phis = VEC_alloc (tree, heap, 10);
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
{
if (!flow_bb_inside_loop_p (loop, bb_for_stmt (USE_STMT (use_p))))
{
exit_phi = USE_STMT (use_p);
- break;
+ VEC_quick_push (tree, phis, exit_phi);
}
}
/* We expect to have found an exit_phi because of loop-closed-ssa form. */
- gcc_assert (exit_phi);
- /* Replace the uses: */
- orig_name = PHI_RESULT (exit_phi);
- FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
- FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
- SET_USE (use_p, new_temp);
+ gcc_assert (!VEC_empty (tree, phis));
+
+ for (i = 0; VEC_iterate (tree, phis, i, exit_phi); i++)
+ {
+ if (nested_in_vect_loop)
+ {
+ stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
+
+ /* FORNOW. Currently not supporting the case that an inner-loop reduction
+ is not used in the outer-loop (but only outside the outer-loop). */
+ gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
+ && !STMT_VINFO_LIVE_P (stmt_vinfo));
+
+ epilog_stmt = adjustment_def ? epilog_stmt : new_phi;
+ STMT_VINFO_VEC_STMT (stmt_vinfo) = epilog_stmt;
+ set_stmt_info (get_stmt_ann (epilog_stmt),
+ new_stmt_vec_info (epilog_stmt, loop_vinfo));
+ continue;
+ }
+
+ /* Replace the uses: */
+ orig_name = PHI_RESULT (exit_phi);
+ FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
+ FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
+ SET_USE (use_p, new_temp);
+ }
+ VEC_free (tree, heap, phis);
}
tree new_stmt = NULL_TREE;
int j;
+ if (nested_in_vect_loop_p (loop, stmt))
+ {
+ loop = loop->inner;
+ /* FORNOW. This restriction should be relaxed. */
+ if (ncopies > 1)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "multiple types in nested loop.");
+ return false;
+ }
+ }
+
gcc_assert (ncopies >= 1);
+ /* FORNOW: SLP not supported. */
+ if (STMT_SLP_TYPE (stmt_info))
+ return false;
+
/* 1. Is vectorizable reduction? */
/* Not supportable if the reduction variable is used in the loop. */
- if (STMT_VINFO_RELEVANT_P (stmt_info))
+ if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer)
return false;
- if (!STMT_VINFO_LIVE_P (stmt_info))
+ /* Reductions that are not used even in an enclosing outer-loop,
+ are expected to be "live" (used out of the loop). */
+ if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_loop
+ && !STMT_VINFO_LIVE_P (stmt_info))
return false;
/* Make sure it was already recognized as a reduction computation. */
return false;
scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
scalar_type = TREE_TYPE (scalar_dest);
+ if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
+ && !SCALAR_FLOAT_TYPE_P (scalar_type))
+ return false;
/* All uses but the last are expected to be defined in the loop.
The last use is the reduction variable. */
gcc_assert (dt == vect_reduction_def);
gcc_assert (TREE_CODE (def_stmt) == PHI_NODE);
if (orig_stmt)
- gcc_assert (orig_stmt == vect_is_simple_reduction (loop, def_stmt));
+ gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo, def_stmt));
else
- gcc_assert (stmt == vect_is_simple_reduction (loop, def_stmt));
+ gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, def_stmt));
if (STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
return false;
return false;
}
vec_mode = TYPE_MODE (vectype);
- if (optab->handlers[(int) vec_mode].insn_code == CODE_FOR_nothing)
+ if (optab_handler (optab, vec_mode)->insn_code == CODE_FOR_nothing)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "op not supported by target.");
reduction variable, and get the tree-code from orig_stmt. */
orig_code = TREE_CODE (GIMPLE_STMT_OPERAND (orig_stmt, 1));
vectype = get_vectype_for_scalar_type (TREE_TYPE (def));
+ if (!vectype)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ {
+ fprintf (vect_dump, "unsupported data-type ");
+ print_generic_expr (vect_dump, TREE_TYPE (def), TDF_SLIM);
+ }
+ return false;
+ }
+
vec_mode = TYPE_MODE (vectype);
}
else
fprintf (vect_dump, "no optab for reduction.");
epilog_reduc_code = NUM_TREE_CODES;
}
- if (reduc_optab->handlers[(int) vec_mode].insn_code == CODE_FOR_nothing)
+ if (optab_handler (reduc_optab, vec_mode)->insn_code == CODE_FOR_nothing)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "reduc op not supported by target.");
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
- vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies);
+ if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
+ return false;
return true;
}
int nunits_in;
int nunits_out;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree fndecl, rhs, new_temp, def, def_stmt, rhs_type, lhs_type;
enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
tree new_stmt;
enum { NARROW, NONE, WIDEN } modifier;
if (!STMT_VINFO_RELEVANT_P (stmt_info))
- return false;
-
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
- return false;
-
- /* FORNOW: not yet supported. */
- if (STMT_VINFO_LIVE_P (stmt_info))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "value used after loop.");
- return false;
- }
+ return false;
+
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
+ return false;
+
+ /* FORNOW: SLP not supported. */
+ if (STMT_SLP_TYPE (stmt_info))
+ return false;
/* Is STMT a vectorizable call? */
if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
return false;
vectype_in = get_vectype_for_scalar_type (rhs_type);
+ if (!vectype_in)
+ return false;
nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
lhs_type = TREE_TYPE (GIMPLE_STMT_OPERAND (stmt, 0));
vectype_out = get_vectype_for_scalar_type (lhs_type);
+ if (!vectype_out)
+ return false;
nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
/* FORNOW */
needs to be generated. */
gcc_assert (ncopies >= 1);
+ /* FORNOW. This restriction should be relaxed. */
+ if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "multiple types in nested loop.");
+ return false;
+ }
+
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vectorizable_call ===");
- vect_model_simple_cost (stmt_info, ncopies, dt);
+ vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
return true;
}
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "transform operation.");
+ /* FORNOW. This restriction should be relaxed. */
+ if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "multiple types in nested loop.");
+ return false;
+ }
+
/* Handle def. */
scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
}
-/* Function vectorizable_conversion.
-
-Check if STMT performs a conversion operation, that can be vectorized.
-If VEC_STMT is also passed, vectorize the STMT: create a vectorized
-stmt to replace it, put it in VEC_STMT, and insert it at BSI.
-Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+/* Check if STMT performs a conversion operation, that can be vectorized.
+ If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+ stmt to replace it, put it in VEC_STMT, and insert it at BSI.
+ Return FALSE if not a vectorizable STMT, TRUE otherwise. */
bool
-vectorizable_conversion (tree stmt, block_stmt_iterator * bsi,
- tree * vec_stmt)
+vectorizable_conversion (tree stmt, block_stmt_iterator *bsi,
+ tree *vec_stmt, slp_tree slp_node)
{
tree vec_dest;
tree scalar_dest;
tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
tree decl1 = NULL_TREE, decl2 = NULL_TREE;
tree new_temp;
tree def, def_stmt;
- enum vect_def_type dt0;
- tree new_stmt;
+ enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
+ tree new_stmt = NULL_TREE;
stmt_vec_info prev_stmt_info;
int nunits_in;
int nunits_out;
tree rhs_type, lhs_type;
tree builtin_decl;
enum { NARROW, NONE, WIDEN } modifier;
+ int i;
+ VEC(tree,heap) *vec_oprnds0 = NULL;
+ tree vop0;
/* Is STMT a vectorizable conversion? */
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
return false;
- if (STMT_VINFO_LIVE_P (stmt_info))
- {
- /* FORNOW: not yet supported. */
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "value used after loop.");
- return false;
- }
-
if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
return false;
if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
return false;
- /* Check types of lhs and rhs */
+ /* Check types of lhs and rhs. */
op0 = TREE_OPERAND (operation, 0);
rhs_type = TREE_TYPE (op0);
vectype_in = get_vectype_for_scalar_type (rhs_type);
+ if (!vectype_in)
+ return false;
nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
lhs_type = TREE_TYPE (scalar_dest);
vectype_out = get_vectype_for_scalar_type (lhs_type);
+ if (!vectype_out)
+ return false;
nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
/* FORNOW */
if (modifier == NONE)
gcc_assert (STMT_VINFO_VECTYPE (stmt_info) == vectype_out);
- /* Bail out if the types are both integral or non-integral */
+ /* Bail out if the types are both integral or non-integral. */
if ((INTEGRAL_TYPE_P (rhs_type) && INTEGRAL_TYPE_P (lhs_type))
|| (!INTEGRAL_TYPE_P (rhs_type) && !INTEGRAL_TYPE_P (lhs_type)))
return false;
else
ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
+ /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
+ this, so we can safely override NCOPIES with 1 here. */
+ if (slp_node)
+ ncopies = 1;
+
/* Sanity check: make sure that at least one copy of the vectorized stmt
needs to be generated. */
gcc_assert (ncopies >= 1);
+ /* FORNOW. This restriction should be relaxed. */
+ if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "multiple types in nested loop.");
+ return false;
+ }
+
/* Check the operands of the operation. */
- if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt0))
+ if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0]))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "use not simple.");
}
if (modifier != NONE)
- STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
+ {
+ STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
+ /* FORNOW: SLP not supported. */
+ if (STMT_SLP_TYPE (stmt_info))
+ return false;
+ }
if (!vec_stmt) /* transformation not required. */
{
/* Handle def. */
vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
+ if (modifier == NONE && !slp_node)
+ vec_oprnds0 = VEC_alloc (tree, heap, 1);
+
prev_stmt_info = NULL;
switch (modifier)
{
ssa_op_iter iter;
if (j == 0)
- vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
+ vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
else
- vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
+ vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
builtin_decl =
targetm.vectorize.builtin_conversion (code, vectype_in);
- new_stmt = build_call_expr (builtin_decl, 1, vec_oprnd0);
+ for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
+ {
+ new_stmt = build_call_expr (builtin_decl, 1, vop0);
- /* Arguments are ready. create the new vector stmt. */
- new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt);
- new_temp = make_ssa_name (vec_dest, new_stmt);
- GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
- vect_finish_stmt_generation (stmt, new_stmt, bsi);
- FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter, SSA_OP_ALL_VIRTUALS)
- {
- if (TREE_CODE (sym) == SSA_NAME)
- sym = SSA_NAME_VAR (sym);
- mark_sym_for_renaming (sym);
+ /* Arguments are ready. create the new vector stmt. */
+ new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
+ vect_finish_stmt_generation (stmt, new_stmt, bsi);
+ FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter,
+ SSA_OP_ALL_VIRTUALS)
+ {
+ if (TREE_CODE (sym) == SSA_NAME)
+ sym = SSA_NAME_VAR (sym);
+ mark_sym_for_renaming (sym);
+ }
+ if (slp_node)
+ VEC_quick_push (tree, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
}
if (j == 0)
if (j == 0)
vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
else
- vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
+ vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
if (j == 0)
{
vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
- vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
+ vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
}
else
{
- vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd1);
- vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
+ vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
+ vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
}
/* Arguments are ready. Create the new vector stmt. */
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
}
+
+ if (vec_oprnds0)
+ VEC_free (tree, heap, vec_oprnds0);
+
return true;
}
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
bool
-vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
+vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt,
+ slp_tree slp_node)
{
tree vec_dest;
tree scalar_dest;
tree op;
- tree vec_oprnd;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
int nunits = TYPE_VECTOR_SUBPARTS (vectype);
int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
+ int i;
+ VEC(tree,heap) *vec_oprnds = NULL;
+ tree vop;
gcc_assert (ncopies >= 1);
if (ncopies > 1)
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
return false;
- /* FORNOW: not yet supported. */
- if (STMT_VINFO_LIVE_P (stmt_info))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "value used after loop.");
- return false;
- }
-
/* Is vectorizable assignment? */
if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
return false;
STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vectorizable_assignment ===");
- vect_model_simple_cost (stmt_info, ncopies, dt);
+ vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
return true;
}
vec_dest = vect_create_destination_var (scalar_dest, vectype);
/* Handle use. */
- op = GIMPLE_STMT_OPERAND (stmt, 1);
- vec_oprnd = vect_get_vec_def_for_operand (op, stmt, NULL);
+ vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
/* Arguments are ready. create the new vector stmt. */
- *vec_stmt = build_gimple_modify_stmt (vec_dest, vec_oprnd);
- new_temp = make_ssa_name (vec_dest, *vec_stmt);
- GIMPLE_STMT_OPERAND (*vec_stmt, 0) = new_temp;
- vect_finish_stmt_generation (stmt, *vec_stmt, bsi);
+ for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
+ {
+ *vec_stmt = build_gimple_modify_stmt (vec_dest, vop);
+ new_temp = make_ssa_name (vec_dest, *vec_stmt);
+ GIMPLE_STMT_OPERAND (*vec_stmt, 0) = new_temp;
+ vect_finish_stmt_generation (stmt, *vec_stmt, bsi);
+ STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt;
+
+ if (slp_node)
+ VEC_quick_push (tree, SLP_TREE_VEC_STMTS (slp_node), *vec_stmt);
+ }
+ VEC_free (tree, heap, vec_oprnds);
return true;
}
if (!STMT_VINFO_RELEVANT_P (stmt_info))
return false;
- gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
+ /* FORNOW: SLP not supported. */
+ if (STMT_SLP_TYPE (stmt_info))
+ return false;
- if (STMT_VINFO_LIVE_P (stmt_info))
- {
- /* FORNOW: not yet supported. */
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "value used after loop.");
- return false;
- }
+ gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
if (TREE_CODE (phi) != PHI_NODE)
return false;
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
bool
-vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
+vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt,
+ slp_tree slp_node)
{
tree vec_dest;
tree scalar_dest;
tree operation;
tree op0, op1 = NULL;
- tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
+ tree vec_oprnd1 = NULL_TREE;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum tree_code code;
enum machine_mode vec_mode;
tree new_temp;
enum machine_mode optab_op2_mode;
tree def, def_stmt;
enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
- tree new_stmt;
+ tree new_stmt = NULL_TREE;
stmt_vec_info prev_stmt_info;
int nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
int nunits_out;
tree vectype_out;
int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
- int j;
-
+ int j, i;
+ VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
+ tree vop0, vop1;
+ unsigned int k;
+ bool scalar_shift_arg = false;
+
+ /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
+ this, so we can safely override NCOPIES with 1 here. */
+ if (slp_node)
+ ncopies = 1;
gcc_assert (ncopies >= 1);
+ /* FORNOW. This restriction should be relaxed. */
+ if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "multiple types in nested loop.");
+ return false;
+ }
if (!STMT_VINFO_RELEVANT_P (stmt_info))
return false;
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
return false;
- /* FORNOW: not yet supported. */
- if (STMT_VINFO_LIVE_P (stmt_info))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "value used after loop.");
- return false;
- }
-
/* Is STMT a vectorizable binary/unary operation? */
if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
return false;
scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
+ if (!vectype_out)
+ return false;
nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
if (nunits_out != nunits_in)
return false;
return false;
}
vec_mode = TYPE_MODE (vectype);
- icode = (int) optab->handlers[(int) vec_mode].insn_code;
+ icode = (int) optab_handler (optab, vec_mode)->insn_code;
if (icode == CODE_FOR_nothing)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "op not supported by target.");
+ /* Check only during analysis. */
if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
- || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
- < vect_min_worthwhile_factor (code))
+ || (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
+ < vect_min_worthwhile_factor (code)
+ && !vec_stmt))
return false;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "proceeding using word mode.");
}
- /* Worthwhile without SIMD support? */
+ /* Worthwhile without SIMD support? Check only during analysis. */
if (!VECTOR_MODE_P (TYPE_MODE (vectype))
&& LOOP_VINFO_VECT_FACTOR (loop_vinfo)
- < vect_min_worthwhile_factor (code))
+ < vect_min_worthwhile_factor (code)
+ && !vec_stmt)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "not worthwhile without SIMD support.");
/* Invariant argument is needed for a vector shift
by a scalar shift operand. */
optab_op2_mode = insn_data[icode].operand[2].mode;
- if (! (VECTOR_MODE_P (optab_op2_mode)
- || dt[1] == vect_constant_def
- || dt[1] == vect_invariant_def))
+ if (!VECTOR_MODE_P (optab_op2_mode))
{
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "operand mode requires invariant argument.");
- return false;
- }
+ if (dt[1] != vect_constant_def && dt[1] != vect_invariant_def)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "operand mode requires invariant"
+ " argument.");
+ return false;
+ }
+
+ scalar_shift_arg = true;
+ }
}
if (!vec_stmt) /* transformation not required. */
STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vectorizable_operation ===");
- vect_model_simple_cost (stmt_info, ncopies, dt);
+ vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
return true;
}
/* Handle def. */
vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ /* Allocate VECs for vector operands. In case of SLP, vector operands are
+ created in the previous stages of the recursion, so no allocation is
+ needed, except for the case of shift with scalar shift argument. In that
+ case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
+ be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
+ In case of loop-based vectorization we allocate VECs of size 1. We
+ allocate VEC_OPRNDS1 only in case of binary operation. */
+ if (!slp_node)
+ {
+ vec_oprnds0 = VEC_alloc (tree, heap, 1);
+ if (op_type == binary_op)
+ vec_oprnds1 = VEC_alloc (tree, heap, 1);
+ }
+ else if (scalar_shift_arg)
+ vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
+
/* In case the vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
more than one vector stmt - i.e - we need to "unroll" the
/* Handle uses. */
if (j == 0)
{
- vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
- if (op_type == binary_op)
+ if (op_type == binary_op
+ && (code == LSHIFT_EXPR || code == RSHIFT_EXPR))
{
- if (code == LSHIFT_EXPR || code == RSHIFT_EXPR)
- {
- /* Vector shl and shr insn patterns can be defined with
- scalar operand 2 (shift operand). In this case, use
- constant or loop invariant op1 directly, without
- extending it to vector mode first. */
- optab_op2_mode = insn_data[icode].operand[2].mode;
- if (!VECTOR_MODE_P (optab_op2_mode))
+ /* Vector shl and shr insn patterns can be defined with scalar
+ operand 2 (shift operand). In this case, use constant or loop
+ invariant op1 directly, without extending it to vector mode
+ first. */
+ optab_op2_mode = insn_data[icode].operand[2].mode;
+ if (!VECTOR_MODE_P (optab_op2_mode))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "operand 1 using scalar mode.");
+ vec_oprnd1 = op1;
+ VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
+ if (slp_node)
{
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "operand 1 using scalar mode.");
- vec_oprnd1 = op1;
+ /* Store vec_oprnd1 for every vector stmt to be created
+ for SLP_NODE. We check during the analysis that all the
+ shift arguments are the same.
+ TODO: Allow different constants for different vector
+ stmts generated for an SLP instance. */
+ for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
+ VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
}
- }
- if (!vec_oprnd1)
- vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
+ }
}
+
+ /* vec_oprnd1 is available if operand 1 should be of a scalar-type
+ (a special case for certain kind of vector shifts); otherwise,
+ operand 1 should be of a vector type (the usual case). */
+ if (op_type == binary_op && !vec_oprnd1)
+ vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
+ slp_node);
+ else
+ vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
+ slp_node);
}
else
- {
- vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
- if (op_type == binary_op)
- vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
- }
+ vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
- /* Arguments are ready. create the new vector stmt. */
+ /* Arguments are ready. Create the new vector stmt. */
+ for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
+ {
+ if (op_type == binary_op)
+ {
+ vop1 = VEC_index (tree, vec_oprnds1, i);
+ new_stmt = build_gimple_modify_stmt (vec_dest,
+ build2 (code, vectype, vop0, vop1));
+ }
+ else
+ new_stmt = build_gimple_modify_stmt (vec_dest,
+ build1 (code, vectype, vop0));
- if (op_type == binary_op)
- new_stmt = build_gimple_modify_stmt (vec_dest,
- build2 (code, vectype, vec_oprnd0, vec_oprnd1));
- else
- new_stmt = build_gimple_modify_stmt (vec_dest,
- build1 (code, vectype, vec_oprnd0));
- new_temp = make_ssa_name (vec_dest, new_stmt);
- GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
- vect_finish_stmt_generation (stmt, new_stmt, bsi);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
+ vect_finish_stmt_generation (stmt, new_stmt, bsi);
+ if (slp_node)
+ VEC_quick_push (tree, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
+ }
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
prev_stmt_info = vinfo_for_stmt (new_stmt);
}
+ VEC_free (tree, heap, vec_oprnds0);
+ if (vec_oprnds1)
+ VEC_free (tree, heap, vec_oprnds1);
+
return true;
}
tree vec_oprnd0=NULL, vec_oprnd1=NULL;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum tree_code code, code1 = ERROR_MARK;
tree new_temp;
tree def, def_stmt;
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
return false;
- /* FORNOW: not yet supported. */
- if (STMT_VINFO_LIVE_P (stmt_info))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "value used after loop.");
- return false;
- }
-
/* Is STMT a vectorizable type-demotion operation? */
if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
return false;
op0 = TREE_OPERAND (operation, 0);
vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
+ if (!vectype_in)
+ return false;
nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
+ if (!vectype_out)
+ return false;
nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
if (nunits_in != nunits_out / 2) /* FORNOW */
return false;
ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
gcc_assert (ncopies >= 1);
+ /* FORNOW. This restriction should be relaxed. */
+ if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "multiple types in nested loop.");
+ return false;
+ }
if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
&& INTEGRAL_TYPE_P (TREE_TYPE (op0)))
STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vectorizable_demotion ===");
- vect_model_simple_cost (stmt_info, ncopies, dt);
+ vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
return true;
}
tree vec_oprnd0=NULL, vec_oprnd1=NULL;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
tree decl1 = NULL_TREE, decl2 = NULL_TREE;
int op_type;
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
return false;
- /* FORNOW: not yet supported. */
- if (STMT_VINFO_LIVE_P (stmt_info))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "value used after loop.");
- return false;
- }
-
/* Is STMT a vectorizable type-promotion operation? */
if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
return false;
op0 = TREE_OPERAND (operation, 0);
vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
+ if (!vectype_in)
+ return false;
nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
+ if (!vectype_out)
+ return false;
nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
if (nunits_out != nunits_in / 2) /* FORNOW */
return false;
ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
gcc_assert (ncopies >= 1);
+ /* FORNOW. This restriction should be relaxed. */
+ if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "multiple types in nested loop.");
+ return false;
+ }
if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
&& INTEGRAL_TYPE_P (TREE_TYPE (op0)))
STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vectorizable_promotion ===");
- vect_model_simple_cost (stmt_info, 2*ncopies, dt);
+ vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
return true;
}
return false;
}
- if (interleave_high_optab->handlers[(int) mode].insn_code
+ if (optab_handler (interleave_high_optab, mode)->insn_code
== CODE_FOR_nothing
- || interleave_low_optab->handlers[(int) mode].insn_code
+ || optab_handler (interleave_low_optab, mode)->insn_code
== CODE_FOR_nothing)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "interleave op not supported by target.");
return false;
}
+
return true;
}
tree scalar_dest, tmp;
int i;
unsigned int j;
- VEC(tree,heap) *first, *second;
scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
- first = VEC_alloc (tree, heap, length/2);
- second = VEC_alloc (tree, heap, length/2);
/* Check that the operation is supported. */
if (!vect_strided_store_supported (vectype))
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
bool
-vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
+vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt,
+ slp_tree slp_node)
{
tree scalar_dest;
tree data_ref;
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum machine_mode vec_mode;
tree dummy;
- enum dr_alignment_support alignment_support_cheme;
+ enum dr_alignment_support alignment_support_scheme;
tree def, def_stmt;
enum vect_def_type dt;
stmt_vec_info prev_stmt_info = NULL;
int nunits = TYPE_VECTOR_SUBPARTS (vectype);
int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
int j;
- tree next_stmt, first_stmt;
+ tree next_stmt, first_stmt = NULL_TREE;
bool strided_store = false;
unsigned int group_size, i;
VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
- gcc_assert (ncopies >= 1);
+ bool inv_p;
+ VEC(tree,heap) *vec_oprnds = NULL;
+ bool slp = (slp_node != NULL);
+ stmt_vec_info first_stmt_vinfo;
+ unsigned int vec_num;
- if (!STMT_VINFO_RELEVANT_P (stmt_info))
- return false;
+ /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
+ this, so we can safely override NCOPIES with 1 here. */
+ if (slp)
+ ncopies = 1;
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
- return false;
+ gcc_assert (ncopies >= 1);
- if (STMT_VINFO_LIVE_P (stmt_info))
+ /* FORNOW. This restriction should be relaxed. */
+ if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
{
if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "value used after loop.");
+ fprintf (vect_dump, "multiple types in nested loop.");
return false;
}
+ if (!STMT_VINFO_RELEVANT_P (stmt_info))
+ return false;
+
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
+ return false;
+
/* Is vectorizable store? */
if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
if (TREE_CODE (scalar_dest) != ARRAY_REF
&& TREE_CODE (scalar_dest) != INDIRECT_REF
- && !DR_GROUP_FIRST_DR (stmt_info))
+ && !STMT_VINFO_STRIDED_ACCESS (stmt_info))
return false;
op = GIMPLE_STMT_OPERAND (stmt, 1);
vec_mode = TYPE_MODE (vectype);
/* FORNOW. In some cases can vectorize even if data-type not supported
(e.g. - array initialization with 0). */
- if (mov_optab->handlers[(int)vec_mode].insn_code == CODE_FOR_nothing)
+ if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
return false;
if (!STMT_VINFO_DATA_REF (stmt_info))
return false;
- if (DR_GROUP_FIRST_DR (stmt_info))
+ if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
{
strided_store = true;
- if (!vect_strided_store_supported (vectype))
- return false;
+ first_stmt = DR_GROUP_FIRST_DR (stmt_info);
+ if (!vect_strided_store_supported (vectype)
+ && !PURE_SLP_STMT (stmt_info) && !slp)
+ return false;
+
+ if (first_stmt == stmt)
+ {
+ /* STMT is the leader of the group. Check the operands of all the
+ stmts of the group. */
+ next_stmt = DR_GROUP_NEXT_DR (stmt_info);
+ while (next_stmt)
+ {
+ op = GIMPLE_STMT_OPERAND (next_stmt, 1);
+ if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "use not simple.");
+ return false;
+ }
+ next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
+ }
+ }
}
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
- vect_model_store_cost (stmt_info, ncopies, dt);
+ if (!PURE_SLP_STMT (stmt_info))
+ vect_model_store_cost (stmt_info, ncopies, dt, NULL);
return true;
}
if (strided_store)
{
- first_stmt = DR_GROUP_FIRST_DR (stmt_info);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
+ /* FORNOW */
+ gcc_assert (!nested_in_vect_loop_p (loop, stmt));
+
/* We vectorize all the stmts of the interleaving group when we
reach the last stmt in the group. */
if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
- < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt)))
+ < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
+ && !slp)
{
*vec_stmt = NULL_TREE;
return true;
}
+
+ if (slp)
+ strided_store = false;
+
+ /* VEC_NUM is the number of vect stmts to be created for this group. */
+ if (slp && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) < group_size)
+ vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ else
+ vec_num = group_size;
}
else
{
first_stmt = stmt;
first_dr = dr;
- group_size = 1;
+ group_size = vec_num = 1;
+ first_stmt_vinfo = stmt_info;
}
if (vect_print_dump_info (REPORT_DETAILS))
dr_chain = VEC_alloc (tree, heap, group_size);
oprnds = VEC_alloc (tree, heap, group_size);
- alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
- gcc_assert (alignment_support_cheme);
- gcc_assert (alignment_support_cheme == dr_aligned); /* FORNOW */
+ alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
+ gcc_assert (alignment_support_scheme);
+ gcc_assert (alignment_support_scheme == dr_aligned); /* FORNOW */
/* In case the vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
if (j == 0)
{
- /* For interleaved stores we collect vectorized defs for all the
- stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then used
- as an input to vect_permute_store_chain(), and OPRNDS as an input
- to vect_get_vec_def_for_stmt_copy() for the next copy.
- If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
- OPRNDS are of size 1. */
- next_stmt = first_stmt;
- for (i = 0; i < group_size; i++)
- {
- /* Since gaps are not supported for interleaved stores, GROUP_SIZE
- is the exact number of stmts in the chain. Therefore, NEXT_STMT
- can't be NULL_TREE. In case that there is no interleaving,
- GROUP_SIZE is 1, and only one iteration of the loop will be
- executed. */
- gcc_assert (next_stmt);
- op = GIMPLE_STMT_OPERAND (next_stmt, 1);
- vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt, NULL);
- VEC_quick_push(tree, dr_chain, vec_oprnd);
- VEC_quick_push(tree, oprnds, vec_oprnd);
- next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
+ if (slp)
+ {
+ /* Get vectorized arguments for SLP_NODE. */
+ vect_get_slp_defs (slp_node, &vec_oprnds, NULL);
+
+ vec_oprnd = VEC_index (tree, vec_oprnds, 0);
+ }
+ else
+ {
+ /* For interleaved stores we collect vectorized defs for all the
+ stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
+ used as an input to vect_permute_store_chain(), and OPRNDS as
+ an input to vect_get_vec_def_for_stmt_copy() for the next copy.
+
+ If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
+ OPRNDS are of size 1. */
+ next_stmt = first_stmt;
+ for (i = 0; i < group_size; i++)
+ {
+ /* Since gaps are not supported for interleaved stores,
+ GROUP_SIZE is the exact number of stmts in the chain.
+ Therefore, NEXT_STMT can't be NULL_TREE. In case that
+ there is no interleaving, GROUP_SIZE is 1, and only one
+ iteration of the loop will be executed. */
+ gcc_assert (next_stmt);
+ op = GIMPLE_STMT_OPERAND (next_stmt, 1);
+
+ vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
+ NULL);
+ VEC_quick_push(tree, dr_chain, vec_oprnd);
+ VEC_quick_push(tree, oprnds, vec_oprnd);
+ next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
+ }
}
- dataref_ptr = vect_create_data_ref_ptr (first_stmt, bsi, NULL_TREE,
+ dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
&dummy, &ptr_incr, false,
- TREE_TYPE (vec_oprnd));
+ TREE_TYPE (vec_oprnd), &inv_p);
+ gcc_assert (!inv_p);
}
else
{
+ /* FORNOW SLP doesn't work for multiple types. */
+ gcc_assert (!slp);
+
/* For interleaved stores we created vectorized defs for all the
defs stored in OPRNDS in the previous iteration (previous copy).
DR_CHAIN is then used as an input to vect_permute_store_chain(),
OPRNDS are of size 1. */
for (i = 0; i < group_size; i++)
{
- vec_oprnd = vect_get_vec_def_for_stmt_copy (dt,
- VEC_index (tree, oprnds, i));
+ op = VEC_index (tree, oprnds, i);
+ vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt);
+ vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
VEC_replace(tree, dr_chain, i, vec_oprnd);
VEC_replace(tree, oprnds, i, vec_oprnd);
}
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt);
+ dataref_ptr =
+ bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt, NULL_TREE);
}
if (strided_store)
}
next_stmt = first_stmt;
- for (i = 0; i < group_size; i++)
+ for (i = 0; i < vec_num; i++)
{
- /* For strided stores vectorized defs are interleaved in
- vect_permute_store_chain(). */
- if (strided_store)
- vec_oprnd = VEC_index(tree, result_chain, i);
+ if (i > 0)
+ /* Bump the vector pointer. */
+ dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt,
+ NULL_TREE);
+
+ if (slp)
+ vec_oprnd = VEC_index (tree, vec_oprnds, i);
+ else if (strided_store)
+ /* For strided stores vectorized defs are interleaved in
+ vect_permute_store_chain(). */
+ vec_oprnd = VEC_index (tree, result_chain, i);
data_ref = build_fold_indirect_ref (dataref_ptr);
/* Arguments are ready. Create the new vector stmt. */
next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
if (!next_stmt)
break;
- /* Bump the vector pointer. */
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt);
}
}
+ VEC_free (tree, heap, dr_chain);
+ VEC_free (tree, heap, oprnds);
+ if (result_chain)
+ VEC_free (tree, heap, result_chain);
+
return true;
}
/* Function vect_setup_realignment
This function is called when vectorizing an unaligned load using
- the dr_unaligned_software_pipeline scheme.
+ the dr_explicit_realign[_optimized] scheme.
This function generates the following code at the loop prolog:
p = initial_addr;
- msq_init = *(floor(p)); # prolog load
+ x msq_init = *(floor(p)); # prolog load
realignment_token = call target_builtin;
loop:
- msq = phi (msq_init, ---)
+ x msq = phi (msq_init, ---)
+
+ The stmts marked with x are generated only for the case of
+ dr_explicit_realign_optimized.
The code above sets up a new (vector) pointer, pointing to the first
location accessed by STMT, and a "floor-aligned" load using that pointer.
whose arguments are the result of the prolog-load (created by this
function) and the result of a load that takes place in the loop (to be
created by the caller to this function).
+
+ For the case of dr_explicit_realign_optimized:
The caller to this function uses the phi-result (msq) to create the
realignment code inside the loop, and sets up the missing phi argument,
as follows:
-
loop:
msq = phi (msq_init, lsq)
lsq = *(floor(p')); # load in loop
result = realign_load (msq, lsq, realignment_token);
+ For the case of dr_explicit_realign:
+ loop:
+ msq = *(floor(p)); # load in loop
+ p' = p + (VS-1);
+ lsq = *(floor(p')); # load in loop
+ result = realign_load (msq, lsq, realignment_token);
+
Input:
STMT - (scalar) load stmt to be vectorized. This load accesses
a memory location that may be unaligned.
BSI - place where new code is to be inserted.
+ ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
+ is used.
Output:
REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
static tree
vect_setup_realignment (tree stmt, block_stmt_iterator *bsi,
- tree *realignment_token)
+ tree *realignment_token,
+ enum dr_alignment_support alignment_support_scheme,
+ tree init_addr,
+ struct loop **at_loop)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- edge pe = loop_preheader_edge (loop);
+ edge pe;
tree scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
tree vec_dest;
- tree init_addr;
tree inc;
tree ptr;
tree data_ref;
tree new_stmt;
basic_block new_bb;
- tree msq_init;
+ tree msq_init = NULL_TREE;
tree new_temp;
tree phi_stmt;
- tree msq;
+ tree msq = NULL_TREE;
+ tree stmts = NULL_TREE;
+ bool inv_p;
+ bool compute_in_loop = false;
+ bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
+ struct loop *containing_loop = (bb_for_stmt (stmt))->loop_father;
+ struct loop *loop_for_initial_load;
+
+ gcc_assert (alignment_support_scheme == dr_explicit_realign
+ || alignment_support_scheme == dr_explicit_realign_optimized);
+
+ /* We need to generate three things:
+ 1. the misalignment computation
+ 2. the extra vector load (for the optimized realignment scheme).
+ 3. the phi node for the two vectors from which the realignment is
+ done (for the optimized realignment scheme).
+ */
+
+ /* 1. Determine where to generate the misalignment computation.
+
+ If INIT_ADDR is NULL_TREE, this indicates that the misalignment
+ calculation will be generated by this function, outside the loop (in the
+ preheader). Otherwise, INIT_ADDR had already been computed for us by the
+ caller, inside the loop.
+
+ Background: If the misalignment remains fixed throughout the iterations of
+ the loop, then both realignment schemes are applicable, and also the
+ misalignment computation can be done outside LOOP. This is because we are
+ vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
+ are a multiple of VS (the Vector Size), and therefore the misalignment in
+ different vectorized LOOP iterations is always the same.
+ The problem arises only if the memory access is in an inner-loop nested
+ inside LOOP, which is now being vectorized using outer-loop vectorization.
+ This is the only case when the misalignment of the memory access may not
+ remain fixed throughout the iterations of the inner-loop (as explained in
+ detail in vect_supportable_dr_alignment). In this case, not only is the
+ optimized realignment scheme not applicable, but also the misalignment
+ computation (and generation of the realignment token that is passed to
+ REALIGN_LOAD) have to be done inside the loop.
+
+ In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
+ or not, which in turn determines if the misalignment is computed inside
+ the inner-loop, or outside LOOP. */
+
+ if (init_addr != NULL_TREE)
+ {
+ compute_in_loop = true;
+ gcc_assert (alignment_support_scheme == dr_explicit_realign);
+ }
+
+
+ /* 2. Determine where to generate the extra vector load.
+
+ For the optimized realignment scheme, instead of generating two vector
+ loads in each iteration, we generate a single extra vector load in the
+ preheader of the loop, and in each iteration reuse the result of the
+ vector load from the previous iteration. In case the memory access is in
+ an inner-loop nested inside LOOP, which is now being vectorized using
+ outer-loop vectorization, we need to determine whether this initial vector
+ load should be generated at the preheader of the inner-loop, or can be
+ generated at the preheader of LOOP. If the memory access has no evolution
+ in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
+ to be generated inside LOOP (in the preheader of the inner-loop). */
+
+ if (nested_in_vect_loop)
+ {
+ tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
+ bool invariant_in_outerloop =
+ (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
+ loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
+ }
+ else
+ loop_for_initial_load = loop;
+ if (at_loop)
+ *at_loop = loop_for_initial_load;
- /* 1. Create msq_init = *(floor(p1)) in the loop preheader */
- vec_dest = vect_create_destination_var (scalar_dest, vectype);
- ptr = vect_create_data_ref_ptr (stmt, bsi, NULL_TREE, &init_addr, &inc, true,
- NULL_TREE);
- data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
- new_stmt = build_gimple_modify_stmt (vec_dest, data_ref);
- new_temp = make_ssa_name (vec_dest, new_stmt);
- GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
- new_bb = bsi_insert_on_edge_immediate (pe, new_stmt);
- gcc_assert (!new_bb);
- msq_init = GIMPLE_STMT_OPERAND (new_stmt, 0);
+ /* 3. For the case of the optimized realignment, create the first vector
+ load at the loop preheader. */
+
+ if (alignment_support_scheme == dr_explicit_realign_optimized)
+ {
+ /* Create msq_init = *(floor(p1)) in the loop preheader */
+
+ gcc_assert (!compute_in_loop);
+ pe = loop_preheader_edge (loop_for_initial_load);
+ vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ ptr = vect_create_data_ref_ptr (stmt, loop_for_initial_load, NULL_TREE,
+ &init_addr, &inc, true, NULL_TREE, &inv_p);
+ data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
+ new_stmt = build_gimple_modify_stmt (vec_dest, data_ref);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
+ mark_symbols_for_renaming (new_stmt);
+ new_bb = bsi_insert_on_edge_immediate (pe, new_stmt);
+ gcc_assert (!new_bb);
+ msq_init = GIMPLE_STMT_OPERAND (new_stmt, 0);
+ }
+
+ /* 4. Create realignment token using a target builtin, if available.
+ It is done either inside the containing loop, or before LOOP (as
+ determined above). */
- /* 2. Create permutation mask, if required, in loop preheader. */
if (targetm.vectorize.builtin_mask_for_load)
{
tree builtin_decl;
+ /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
+ if (compute_in_loop)
+ gcc_assert (init_addr); /* already computed by the caller. */
+ else
+ {
+ /* Generate the INIT_ADDR computation outside LOOP. */
+ init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
+ NULL_TREE, loop);
+ pe = loop_preheader_edge (loop);
+ new_bb = bsi_insert_on_edge_immediate (pe, stmts);
+ gcc_assert (!new_bb);
+ }
+
builtin_decl = targetm.vectorize.builtin_mask_for_load ();
new_stmt = build_call_expr (builtin_decl, 1, init_addr);
vec_dest = vect_create_destination_var (scalar_dest,
new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt);
new_temp = make_ssa_name (vec_dest, new_stmt);
GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
- new_bb = bsi_insert_on_edge_immediate (pe, new_stmt);
- gcc_assert (!new_bb);
+
+ if (compute_in_loop)
+ bsi_insert_before (bsi, new_stmt, BSI_SAME_STMT);
+ else
+ {
+ /* Generate the misalignment computation outside LOOP. */
+ pe = loop_preheader_edge (loop);
+ new_bb = bsi_insert_on_edge_immediate (pe, new_stmt);
+ gcc_assert (!new_bb);
+ }
+
*realignment_token = GIMPLE_STMT_OPERAND (new_stmt, 0);
/* The result of the CALL_EXPR to this builtin is determined from
gcc_assert (TREE_READONLY (builtin_decl));
}
- /* 3. Create msq = phi <msq_init, lsq> in loop */
+ if (alignment_support_scheme == dr_explicit_realign)
+ return msq;
+
+ gcc_assert (!compute_in_loop);
+ gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
+
+
+ /* 5. Create msq = phi <msq_init, lsq> in loop */
+
+ pe = loop_preheader_edge (containing_loop);
vec_dest = vect_create_destination_var (scalar_dest, vectype);
msq = make_ssa_name (vec_dest, NULL_TREE);
- phi_stmt = create_phi_node (msq, loop->header);
+ phi_stmt = create_phi_node (msq, containing_loop->header);
SSA_NAME_DEF_STMT (msq) = phi_stmt;
- add_phi_arg (phi_stmt, msq_init, loop_preheader_edge (loop));
+ add_phi_arg (phi_stmt, msq_init, pe);
return msq;
}
return false;
}
- if (perm_even_optab->handlers[mode].insn_code == CODE_FOR_nothing)
+ if (optab_handler (perm_even_optab, mode)->insn_code == CODE_FOR_nothing)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "perm_even op not supported by target.");
return false;
}
- if (perm_odd_optab->handlers[mode].insn_code == CODE_FOR_nothing)
+ if (optab_handler (perm_odd_optab, mode)->insn_code == CODE_FOR_nothing)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "perm_odd op not supported by target.");
break;
}
}
+
+ VEC_free (tree, heap, result_chain);
return true;
}
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
bool
-vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
+vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt,
+ slp_tree slp_node)
{
tree scalar_dest;
tree vec_dest = NULL;
stmt_vec_info prev_stmt_info;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ struct loop *containing_loop = (bb_for_stmt (stmt))->loop_father;
+ bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
tree new_temp;
int mode;
tree new_stmt = NULL_TREE;
tree dummy;
- enum dr_alignment_support alignment_support_cheme;
+ enum dr_alignment_support alignment_support_scheme;
tree dataref_ptr = NULL_TREE;
tree ptr_incr;
int nunits = TYPE_VECTOR_SUBPARTS (vectype);
tree msq = NULL_TREE, lsq;
tree offset = NULL_TREE;
tree realignment_token = NULL_TREE;
- tree phi_stmt = NULL_TREE;
+ tree phi = NULL_TREE;
VEC(tree,heap) *dr_chain = NULL;
bool strided_load = false;
tree first_stmt;
+ tree scalar_type;
+ bool inv_p;
+ bool compute_in_loop = false;
+ struct loop *at_loop;
+ int vec_num;
+ bool slp = (slp_node != NULL);
- if (!STMT_VINFO_RELEVANT_P (stmt_info))
- return false;
+ /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
+ this, so we can safely override NCOPIES with 1 here. */
+ if (slp)
+ ncopies = 1;
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
- return false;
+ gcc_assert (ncopies >= 1);
- /* FORNOW: not yet supported. */
- if (STMT_VINFO_LIVE_P (stmt_info))
+ /* FORNOW. This restriction should be relaxed. */
+ if (nested_in_vect_loop && ncopies > 1)
{
if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "value used after loop.");
+ fprintf (vect_dump, "multiple types in nested loop.");
return false;
}
+ if (!STMT_VINFO_RELEVANT_P (stmt_info))
+ return false;
+
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
+ return false;
+
/* Is vectorizable load? */
if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
return false;
op = GIMPLE_STMT_OPERAND (stmt, 1);
if (TREE_CODE (op) != ARRAY_REF
&& TREE_CODE (op) != INDIRECT_REF
- && !DR_GROUP_FIRST_DR (stmt_info))
+ && !STMT_VINFO_STRIDED_ACCESS (stmt_info))
return false;
if (!STMT_VINFO_DATA_REF (stmt_info))
return false;
+ scalar_type = TREE_TYPE (DR_REF (dr));
mode = (int) TYPE_MODE (vectype);
/* FORNOW. In some cases can vectorize even if data-type not supported
(e.g. - data copies). */
- if (mov_optab->handlers[mode].insn_code == CODE_FOR_nothing)
+ if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Aligned load, but unsupported type.");
}
/* Check if the load is a part of an interleaving chain. */
- if (DR_GROUP_FIRST_DR (stmt_info))
+ if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
{
strided_load = true;
+ /* FORNOW */
+ gcc_assert (! nested_in_vect_loop);
/* Check if interleaving is supported. */
- if (!vect_strided_load_supported (vectype))
+ if (!vect_strided_load_supported (vectype)
+ && !PURE_SLP_STMT (stmt_info) && !slp)
return false;
}
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
- vect_model_load_cost (stmt_info, ncopies);
+ vect_model_load_cost (stmt_info, ncopies, NULL);
return true;
}
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
dr_chain = VEC_alloc (tree, heap, group_size);
+
+ /* VEC_NUM is the number of vect stmts to be created for this group. */
+ if (slp)
+ {
+ strided_load = false;
+ vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ }
+ else
+ vec_num = group_size;
}
else
{
first_stmt = stmt;
first_dr = dr;
- group_size = 1;
+ group_size = vec_num = 1;
}
- alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
- gcc_assert (alignment_support_cheme);
-
+ alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
+ gcc_assert (alignment_support_scheme);
/* In case the vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
}
Otherwise, the data reference is potentially unaligned on a target that
- does not support unaligned accesses (dr_unaligned_software_pipeline) -
+ does not support unaligned accesses (dr_explicit_realign_optimized) -
then generate the following code, in which the data in each iteration is
obtained by two vector loads, one from the previous iteration, and one
from the current iteration:
msq = lsq;
} */
- if (alignment_support_cheme == dr_unaligned_software_pipeline)
+ /* If the misalignment remains the same throughout the execution of the
+ loop, we can create the init_addr and permutation mask at the loop
+ preheader. Otherwise, it needs to be created inside the loop.
+ This can only occur when vectorizing memory accesses in the inner-loop
+ nested within an outer-loop that is being vectorized. */
+
+ if (nested_in_vect_loop_p (loop, stmt)
+ && (TREE_INT_CST_LOW (DR_STEP (dr)) % UNITS_PER_SIMD_WORD != 0))
+ {
+ gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
+ compute_in_loop = true;
+ }
+
+ if ((alignment_support_scheme == dr_explicit_realign_optimized
+ || alignment_support_scheme == dr_explicit_realign)
+ && !compute_in_loop)
{
- msq = vect_setup_realignment (first_stmt, bsi, &realignment_token);
- phi_stmt = SSA_NAME_DEF_STMT (msq);
- offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
+ msq = vect_setup_realignment (first_stmt, bsi, &realignment_token,
+ alignment_support_scheme, NULL_TREE,
+ &at_loop);
+ if (alignment_support_scheme == dr_explicit_realign_optimized)
+ {
+ phi = SSA_NAME_DEF_STMT (msq);
+ offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
+ }
}
+ else
+ at_loop = loop;
prev_stmt_info = NULL;
for (j = 0; j < ncopies; j++)
{
/* 1. Create the vector pointer update chain. */
if (j == 0)
- dataref_ptr = vect_create_data_ref_ptr (first_stmt, bsi, offset, &dummy,
- &ptr_incr, false, NULL_TREE);
+ dataref_ptr = vect_create_data_ref_ptr (first_stmt,
+ at_loop, offset,
+ &dummy, &ptr_incr, false,
+ NULL_TREE, &inv_p);
else
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt);
+ dataref_ptr =
+ bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt, NULL_TREE);
- for (i = 0; i < group_size; i++)
+ for (i = 0; i < vec_num; i++)
{
+ if (i > 0)
+ dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt,
+ NULL_TREE);
+
/* 2. Create the vector-load in the loop. */
- switch (alignment_support_cheme)
+ switch (alignment_support_scheme)
{
case dr_aligned:
gcc_assert (aligned_access_p (first_dr));
int mis = DR_MISALIGNMENT (first_dr);
tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
- gcc_assert (!aligned_access_p (first_dr));
tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
data_ref =
build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
break;
}
- case dr_unaligned_software_pipeline:
- gcc_assert (!aligned_access_p (first_dr));
+ case dr_explicit_realign:
+ {
+ tree ptr, bump;
+ tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
+
+ if (compute_in_loop)
+ msq = vect_setup_realignment (first_stmt, bsi,
+ &realignment_token,
+ dr_explicit_realign,
+ dataref_ptr, NULL);
+
+ data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
+ vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ new_stmt = build_gimple_modify_stmt (vec_dest, data_ref);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
+ vect_finish_stmt_generation (stmt, new_stmt, bsi);
+ copy_virtual_operands (new_stmt, stmt);
+ mark_symbols_for_renaming (new_stmt);
+ msq = new_temp;
+
+ bump = size_binop (MULT_EXPR, vs_minus_1,
+ TYPE_SIZE_UNIT (scalar_type));
+ ptr = bump_vector_ptr (dataref_ptr, NULL_TREE, bsi, stmt, bump);
+ data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
+ break;
+ }
+ case dr_explicit_realign_optimized:
data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
break;
default:
vect_finish_stmt_generation (stmt, new_stmt, bsi);
mark_symbols_for_renaming (new_stmt);
- /* 3. Handle explicit realignment if necessary/supported. */
- if (alignment_support_cheme == dr_unaligned_software_pipeline)
+ /* 3. Handle explicit realignment if necessary/supported. Create in
+ loop: vec_dest = realign_load (msq, lsq, realignment_token) */
+ if (alignment_support_scheme == dr_explicit_realign_optimized
+ || alignment_support_scheme == dr_explicit_realign)
{
- /* Create in loop:
- <vec_dest = realign_load (msq, lsq, realignment_token)> */
lsq = GIMPLE_STMT_OPERAND (new_stmt, 0);
if (!realignment_token)
realignment_token = dataref_ptr;
vec_dest = vect_create_destination_var (scalar_dest, vectype);
- new_stmt =
- build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq, realignment_token);
+ new_stmt = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
+ realignment_token);
new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt);
new_temp = make_ssa_name (vec_dest, new_stmt);
GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
vect_finish_stmt_generation (stmt, new_stmt, bsi);
- if (i == group_size - 1 && j == ncopies - 1)
- add_phi_arg (phi_stmt, lsq, loop_latch_edge (loop));
- msq = lsq;
+
+ if (alignment_support_scheme == dr_explicit_realign_optimized)
+ {
+ if (i == vec_num - 1 && j == ncopies - 1)
+ add_phi_arg (phi, lsq, loop_latch_edge (containing_loop));
+ msq = lsq;
+ }
+ }
+
+ /* 4. Handle invariant-load. */
+ if (inv_p)
+ {
+ gcc_assert (!strided_load);
+ gcc_assert (nested_in_vect_loop_p (loop, stmt));
+ if (j == 0)
+ {
+ int k;
+ tree t = NULL_TREE;
+ tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
+
+ /* CHECKME: bitpos depends on endianess? */
+ bitpos = bitsize_zero_node;
+ vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
+ bitsize, bitpos);
+ vec_dest =
+ vect_create_destination_var (scalar_dest, NULL_TREE);
+ new_stmt = build_gimple_modify_stmt (vec_dest, vec_inv);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
+ vect_finish_stmt_generation (stmt, new_stmt, bsi);
+
+ for (k = nunits - 1; k >= 0; --k)
+ t = tree_cons (NULL_TREE, new_temp, t);
+ /* FIXME: use build_constructor directly. */
+ vec_inv = build_constructor_from_list (vectype, t);
+ new_temp = vect_init_vector (stmt, vec_inv, vectype, bsi);
+ new_stmt = SSA_NAME_DEF_STMT (new_temp);
+ }
+ else
+ gcc_unreachable (); /* FORNOW. */
}
- if (strided_load)
- VEC_quick_push (tree, dr_chain, new_temp);
- if (i < group_size - 1)
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt);
+
+ /* Collect vector loads and later create their permutation in
+ vect_transform_strided_load (). */
+ if (strided_load)
+ VEC_quick_push (tree, dr_chain, new_temp);
+
+ /* Store vector loads in the corresponding SLP_NODE. */
+ if (slp)
+ VEC_quick_push (tree, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
}
+ /* FORNOW: SLP with multiple types is unsupported. */
+ if (slp)
+ return true;
+
if (strided_load)
{
if (!vect_transform_strided_load (stmt, dr_chain, group_size, bsi))
return false;
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
+ VEC_free (tree, heap, dr_chain);
dr_chain = VEC_alloc (tree, heap, group_size);
}
else
}
}
+ if (dr_chain)
+ VEC_free (tree, heap, dr_chain);
+
return true;
}
tree operation;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
int i;
int op_type;
tree op;
if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME)
return false;
+ /* FORNOW. CHECKME. */
+ if (nested_in_vect_loop_p (loop, stmt))
+ return false;
+
operation = GIMPLE_STMT_OPERAND (stmt, 1);
op_type = TREE_OPERAND_LENGTH (operation);
for (i = 0; i < op_type; i++)
{
op = TREE_OPERAND (operation, i);
- if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt))
+ if (op && !vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "use not simple.");
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
return false;
+ /* FORNOW: SLP not supported. */
+ if (STMT_SLP_TYPE (stmt_info))
+ return false;
+
/* FORNOW: not yet supported. */
if (STMT_VINFO_LIVE_P (stmt_info))
{
return true;
}
+
/* Function vect_transform_stmt.
Create a vectorized stmt to replace STMT, and insert it at BSI. */
-bool
-vect_transform_stmt (tree stmt, block_stmt_iterator *bsi, bool *strided_store)
+static bool
+vect_transform_stmt (tree stmt, block_stmt_iterator *bsi, bool *strided_store,
+ slp_tree slp_node)
{
bool is_store = false;
tree vec_stmt = NULL_TREE;
switch (STMT_VINFO_TYPE (stmt_info))
{
case type_demotion_vec_info_type:
+ gcc_assert (!slp_node);
done = vectorizable_type_demotion (stmt, bsi, &vec_stmt);
gcc_assert (done);
break;
case type_promotion_vec_info_type:
+ gcc_assert (!slp_node);
done = vectorizable_type_promotion (stmt, bsi, &vec_stmt);
gcc_assert (done);
break;
case type_conversion_vec_info_type:
- done = vectorizable_conversion (stmt, bsi, &vec_stmt);
+ done = vectorizable_conversion (stmt, bsi, &vec_stmt, slp_node);
gcc_assert (done);
break;
case induc_vec_info_type:
+ gcc_assert (!slp_node);
done = vectorizable_induction (stmt, bsi, &vec_stmt);
gcc_assert (done);
break;
case op_vec_info_type:
- done = vectorizable_operation (stmt, bsi, &vec_stmt);
+ done = vectorizable_operation (stmt, bsi, &vec_stmt, slp_node);
gcc_assert (done);
break;
case assignment_vec_info_type:
- done = vectorizable_assignment (stmt, bsi, &vec_stmt);
+ done = vectorizable_assignment (stmt, bsi, &vec_stmt, slp_node);
gcc_assert (done);
break;
case load_vec_info_type:
- done = vectorizable_load (stmt, bsi, &vec_stmt);
+ done = vectorizable_load (stmt, bsi, &vec_stmt, slp_node);
gcc_assert (done);
break;
case store_vec_info_type:
- done = vectorizable_store (stmt, bsi, &vec_stmt);
+ done = vectorizable_store (stmt, bsi, &vec_stmt, slp_node);
gcc_assert (done);
- if (DR_GROUP_FIRST_DR (stmt_info))
+ if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
{
/* In case of interleaving, the whole chain is vectorized when the
last store in the chain is reached. Store stmts before the last
break;
case condition_vec_info_type:
+ gcc_assert (!slp_node);
done = vectorizable_condition (stmt, bsi, &vec_stmt);
gcc_assert (done);
break;
case call_vec_info_type:
+ gcc_assert (!slp_node);
done = vectorizable_call (stmt, bsi, &vec_stmt);
break;
case reduc_vec_info_type:
+ gcc_assert (!slp_node);
done = vectorizable_reduction (stmt, bsi, &vec_stmt);
gcc_assert (done);
break;
}
}
+/* Return the more conservative threshold between the
+ min_profitable_iters returned by the cost model and the user
+ specified threshold, if provided. */
+
+static unsigned int
+conservative_cost_threshold (loop_vec_info loop_vinfo,
+ int min_profitable_iters)
+{
+ unsigned int th;
+ int min_scalar_loop_bound;
+
+ min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
+ * LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 1);
+
+ /* Use the cost model only if it is more conservative than user specified
+ threshold. */
+ th = (unsigned) min_scalar_loop_bound;
+ if (min_profitable_iters
+ && (!min_scalar_loop_bound
+ || min_profitable_iters > min_scalar_loop_bound))
+ th = (unsigned) min_profitable_iters;
+
+ if (th && vect_print_dump_info (REPORT_COST))
+ fprintf (vect_dump, "Vectorization may not be profitable.");
+
+ return th;
+}
/* Function vect_do_peeling_for_loop_bound
edge update_e;
basic_block preheader;
int loop_num;
- unsigned int th;
- int min_scalar_loop_bound;
+ bool check_profitability = false;
+ unsigned int th = 0;
int min_profitable_iters;
if (vect_print_dump_info (REPORT_DETAILS))
loop_num = loop->num;
- /* Analyze cost to set threshhold for vectorized loop. */
- min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo);
- min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND))
- * LOOP_VINFO_VECT_FACTOR (loop_vinfo);
-
- /* Use the cost model only if it is more conservative than user specified
- threshold. */
+ /* If cost model check not done during versioning and
+ peeling for alignment. */
+ if (!VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
+ && !VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))
+ && !LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
+ {
+ check_profitability = true;
- th = (unsigned) min_scalar_loop_bound;
- if (min_profitable_iters
- && (!min_scalar_loop_bound
- || min_profitable_iters > min_scalar_loop_bound))
- th = (unsigned) min_profitable_iters;
+ /* Get profitability threshold for vectorized loop. */
+ min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo);
- if (min_profitable_iters
- && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
- && vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "vectorization may not be profitable.");
+ th = conservative_cost_threshold (loop_vinfo,
+ min_profitable_iters);
+ }
new_loop = slpeel_tree_peel_loop_to_edge (loop, single_exit (loop),
ratio_mult_vf_name, ni_name, false,
- th);
+ th, check_profitability);
gcc_assert (new_loop);
gcc_assert (loop_num == loop->num);
#ifdef ENABLE_CHECKING
int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
int nelements = TYPE_VECTOR_SUBPARTS (vectype);
- if (DR_GROUP_FIRST_DR (stmt_info))
+ if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
{
/* For interleaved access element size must be multiplied by the size of
the interleaved group. */
else
{
tree new_stmts = NULL_TREE;
- tree start_addr =
- vect_create_addr_base_for_vector_ref (dr_stmt, &new_stmts, NULL_TREE);
+ tree start_addr = vect_create_addr_base_for_vector_ref (dr_stmt,
+ &new_stmts, NULL_TREE, loop);
tree ptr_type = TREE_TYPE (start_addr);
tree size = TYPE_SIZE (ptr_type);
tree type = lang_hooks.types.type_for_size (tree_low_cst (size, 1), 1);
tree niters_of_prolog_loop, ni_name;
tree n_iters;
struct loop *new_loop;
+ bool check_profitability = false;
+ unsigned int th = 0;
+ int min_profitable_iters;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vect_do_peeling_for_alignment ===");
ni_name = vect_build_loop_niters (loop_vinfo);
niters_of_prolog_loop = vect_gen_niters_for_prolog_loop (loop_vinfo, ni_name);
+
+ /* If cost model check not done during versioning. */
+ if (!VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
+ && !VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
+ {
+ check_profitability = true;
+
+ /* Get profitability threshold for vectorized loop. */
+ min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo);
+
+ th = conservative_cost_threshold (loop_vinfo,
+ min_profitable_iters);
+ }
+
/* Peel the prolog loop and iterate it niters_of_prolog_loop. */
- new_loop =
- slpeel_tree_peel_loop_to_edge (loop, loop_preheader_edge (loop),
- niters_of_prolog_loop, ni_name, true, 0);
+ new_loop =
+ slpeel_tree_peel_loop_to_edge (loop, loop_preheader_edge (loop),
+ niters_of_prolog_loop, ni_name, true,
+ th, check_profitability);
+
gcc_assert (new_loop);
#ifdef ENABLE_CHECKING
slpeel_verify_cfg_after_peeling (new_loop, loop);
checked at runtime.
Input:
+ COND_EXPR - input conditional expression. New conditions will be chained
+ with logical AND operation.
LOOP_VINFO - two fields of the loop information are used.
LOOP_VINFO_PTR_MASK is the mask used to check the alignment.
LOOP_VINFO_MAY_MISALIGN_STMTS contains the refs to be checked.
test can be done as a&(n-1) == 0. For example, for 16
byte vectors the test is a&0xf == 0. */
-static tree
+static void
vect_create_cond_for_align_checks (loop_vec_info loop_vinfo,
+ tree *cond_expr,
tree *cond_expr_stmt_list)
{
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
VEC(tree,heap) *may_misalign_stmts
= LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
tree ref_stmt, tmp;
tree or_tmp_name = NULL_TREE;
tree and_tmp, and_tmp_name, and_stmt;
tree ptrsize_zero;
+ tree part_cond_expr;
/* Check that mask is one less than a power of 2, i.e., mask is
all zeros followed by all ones. */
/* create: addr_tmp = (int)(address_of_first_vector) */
addr_base = vect_create_addr_base_for_vector_ref (ref_stmt,
- &new_stmt_list,
- NULL_TREE);
+ &new_stmt_list, NULL_TREE, loop);
if (new_stmt_list != NULL_TREE)
append_to_statement_list_force (new_stmt_list, cond_expr_stmt_list);
/* Make and_tmp the left operand of the conditional test against zero.
if and_tmp has a nonzero bit then some address is unaligned. */
ptrsize_zero = build_int_cst (int_ptrsize_type, 0);
- return build2 (EQ_EXPR, boolean_type_node,
- and_tmp_name, ptrsize_zero);
+ part_cond_expr = fold_build2 (EQ_EXPR, boolean_type_node,
+ and_tmp_name, ptrsize_zero);
+ if (*cond_expr)
+ *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
+ *cond_expr, part_cond_expr);
+ else
+ *cond_expr = part_cond_expr;
+}
+
+/* Function vect_vfa_segment_size.
+
+ Create an expression that computes the size of segment
+ that will be accessed for a data reference. The functions takes into
+ account that realignment loads may access one more vector.
+
+ Input:
+ DR: The data reference.
+ VECT_FACTOR: vectorization factor.
+
+ Return an expression whose value is the size of segment which will be
+ accessed by DR. */
+
+static tree
+vect_vfa_segment_size (struct data_reference *dr, tree vect_factor)
+{
+ tree segment_length = fold_build2 (MULT_EXPR, integer_type_node,
+ DR_STEP (dr), vect_factor);
+
+ if (vect_supportable_dr_alignment (dr) == dr_explicit_realign_optimized)
+ {
+ tree vector_size = TYPE_SIZE_UNIT
+ (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
+
+ segment_length = fold_build2 (PLUS_EXPR, integer_type_node,
+ segment_length, vector_size);
+ }
+ return fold_convert (sizetype, segment_length);
+}
+
+/* Function vect_create_cond_for_alias_checks.
+
+ Create a conditional expression that represents the run-time checks for
+ overlapping of address ranges represented by a list of data references
+ relations passed as input.
+
+ Input:
+ COND_EXPR - input conditional expression. New conditions will be chained
+ with logical AND operation.
+ LOOP_VINFO - field LOOP_VINFO_MAY_ALIAS_STMTS contains the list of ddrs
+ to be checked.
+
+ Output:
+ COND_EXPR - conditional expression.
+ COND_EXPR_STMT_LIST - statements needed to construct the conditional
+ expression.
+
+
+ The returned value is the conditional expression to be used in the if
+ statement that controls which version of the loop gets executed at runtime.
+*/
+
+static void
+vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
+ tree * cond_expr,
+ tree * cond_expr_stmt_list)
+{
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ VEC (ddr_p, heap) * may_alias_ddrs =
+ LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
+ tree vect_factor =
+ build_int_cst (integer_type_node, LOOP_VINFO_VECT_FACTOR (loop_vinfo));
+
+ ddr_p ddr;
+ unsigned int i;
+ tree part_cond_expr;
+
+ /* Create expression
+ ((store_ptr_0 + store_segment_length_0) < load_ptr_0)
+ || (load_ptr_0 + load_segment_length_0) < store_ptr_0))
+ &&
+ ...
+ &&
+ ((store_ptr_n + store_segment_length_n) < load_ptr_n)
+ || (load_ptr_n + load_segment_length_n) < store_ptr_n)) */
+
+ if (VEC_empty (ddr_p, may_alias_ddrs))
+ return;
+
+ for (i = 0; VEC_iterate (ddr_p, may_alias_ddrs, i, ddr); i++)
+ {
+ struct data_reference *dr_a, *dr_b;
+ tree dr_group_first_a, dr_group_first_b;
+ tree addr_base_a, addr_base_b;
+ tree segment_length_a, segment_length_b;
+ tree stmt_a, stmt_b;
+
+ dr_a = DDR_A (ddr);
+ stmt_a = DR_STMT (DDR_A (ddr));
+ dr_group_first_a = DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_a));
+ if (dr_group_first_a)
+ {
+ stmt_a = dr_group_first_a;
+ dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
+ }
+
+ dr_b = DDR_B (ddr);
+ stmt_b = DR_STMT (DDR_B (ddr));
+ dr_group_first_b = DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_b));
+ if (dr_group_first_b)
+ {
+ stmt_b = dr_group_first_b;
+ dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
+ }
+
+ addr_base_a =
+ vect_create_addr_base_for_vector_ref (stmt_a, cond_expr_stmt_list,
+ NULL_TREE, loop);
+ addr_base_b =
+ vect_create_addr_base_for_vector_ref (stmt_b, cond_expr_stmt_list,
+ NULL_TREE, loop);
+
+ segment_length_a = vect_vfa_segment_size (dr_a, vect_factor);
+ segment_length_b = vect_vfa_segment_size (dr_b, vect_factor);
+
+ if (vect_print_dump_info (REPORT_DR_DETAILS))
+ {
+ fprintf (vect_dump,
+ "create runtime check for data references ");
+ print_generic_expr (vect_dump, DR_REF (dr_a), TDF_SLIM);
+ fprintf (vect_dump, " and ");
+ print_generic_expr (vect_dump, DR_REF (dr_b), TDF_SLIM);
+ }
+
+
+ part_cond_expr =
+ fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
+ fold_build2 (LT_EXPR, boolean_type_node,
+ fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (addr_base_a),
+ addr_base_a,
+ segment_length_a),
+ addr_base_b),
+ fold_build2 (LT_EXPR, boolean_type_node,
+ fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (addr_base_b),
+ addr_base_b,
+ segment_length_b),
+ addr_base_a));
+
+ if (*cond_expr)
+ *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
+ *cond_expr, part_cond_expr);
+ else
+ *cond_expr = part_cond_expr;
+ }
+ if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS))
+ fprintf (vect_dump, "created %u versioning for alias checks.\n",
+ VEC_length (ddr_p, may_alias_ddrs));
+
+}
+
+/* Function vect_loop_versioning.
+
+ If the loop has data references that may or may not be aligned or/and
+ has data reference relations whose independence was not proven then
+ two versions of the loop need to be generated, one which is vectorized
+ and one which isn't. A test is then generated to control which of the
+ loops is executed. The test checks for the alignment of all of the
+ data references that may or may not be aligned. An additional
+ sequence of runtime tests is generated for each pairs of DDRs whose
+ independence was not proven. The vectorized version of loop is
+ executed only if both alias and alignment tests are passed.
+
+ The test generated to check which version of loop is executed
+ is modified to also check for profitability as indicated by the
+ cost model initially. */
+
+static void
+vect_loop_versioning (loop_vec_info loop_vinfo)
+{
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ struct loop *nloop;
+ tree cond_expr = NULL_TREE;
+ tree cond_expr_stmt_list = NULL_TREE;
+ basic_block condition_bb;
+ block_stmt_iterator cond_exp_bsi;
+ basic_block merge_bb;
+ basic_block new_exit_bb;
+ edge new_exit_e, e;
+ tree orig_phi, new_phi, arg;
+ unsigned prob = 4 * REG_BR_PROB_BASE / 5;
+ tree gimplify_stmt_list;
+ tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
+ int min_profitable_iters = 0;
+ unsigned int th;
+
+ /* Get profitability threshold for vectorized loop. */
+ min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo);
+
+ th = conservative_cost_threshold (loop_vinfo,
+ min_profitable_iters);
+
+ cond_expr =
+ build2 (GT_EXPR, boolean_type_node, scalar_loop_iters,
+ build_int_cst (TREE_TYPE (scalar_loop_iters), th));
+
+ cond_expr = force_gimple_operand (cond_expr, &cond_expr_stmt_list,
+ false, NULL_TREE);
+
+ if (VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)))
+ vect_create_cond_for_align_checks (loop_vinfo, &cond_expr,
+ &cond_expr_stmt_list);
+
+ if (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
+ vect_create_cond_for_alias_checks (loop_vinfo, &cond_expr,
+ &cond_expr_stmt_list);
+
+ cond_expr =
+ fold_build2 (NE_EXPR, boolean_type_node, cond_expr, integer_zero_node);
+ cond_expr =
+ force_gimple_operand (cond_expr, &gimplify_stmt_list, true,
+ NULL_TREE);
+ append_to_statement_list (gimplify_stmt_list, &cond_expr_stmt_list);
+
+ initialize_original_copy_tables ();
+ nloop = loop_version (loop, cond_expr, &condition_bb,
+ prob, prob, REG_BR_PROB_BASE - prob, true);
+ free_original_copy_tables();
+
+ /* Loop versioning violates an assumption we try to maintain during
+ vectorization - that the loop exit block has a single predecessor.
+ After versioning, the exit block of both loop versions is the same
+ basic block (i.e. it has two predecessors). Just in order to simplify
+ following transformations in the vectorizer, we fix this situation
+ here by adding a new (empty) block on the exit-edge of the loop,
+ with the proper loop-exit phis to maintain loop-closed-form. */
+
+ merge_bb = single_exit (loop)->dest;
+ gcc_assert (EDGE_COUNT (merge_bb->preds) == 2);
+ new_exit_bb = split_edge (single_exit (loop));
+ new_exit_e = single_exit (loop);
+ e = EDGE_SUCC (new_exit_bb, 0);
+
+ for (orig_phi = phi_nodes (merge_bb); orig_phi;
+ orig_phi = PHI_CHAIN (orig_phi))
+ {
+ new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
+ new_exit_bb);
+ arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
+ add_phi_arg (new_phi, arg, new_exit_e);
+ SET_PHI_ARG_DEF (orig_phi, e->dest_idx, PHI_RESULT (new_phi));
+ }
+
+ /* End loop-exit-fixes after versioning. */
+
+ update_ssa (TODO_update_ssa);
+ if (cond_expr_stmt_list)
+ {
+ cond_exp_bsi = bsi_last (condition_bb);
+ bsi_insert_before (&cond_exp_bsi, cond_expr_stmt_list, BSI_SAME_STMT);
+ }
+}
+
+/* Remove a group of stores (for SLP or interleaving), free their
+ stmt_vec_info. */
+
+static void
+vect_remove_stores (tree first_stmt)
+{
+ tree next = first_stmt;
+ tree tmp;
+ block_stmt_iterator next_si;
+
+ while (next)
+ {
+ /* Free the attached stmt_vec_info and remove the stmt. */
+ next_si = bsi_for_stmt (next);
+ bsi_remove (&next_si, true);
+ tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
+ free_stmt_vec_info (next);
+ next = tmp;
+ }
+}
+
+
+/* Vectorize SLP instance tree in postorder. */
+
+static bool
+vect_schedule_slp_instance (slp_tree node, unsigned int vec_stmts_size)
+{
+ tree stmt;
+ bool strided_store, is_store;
+ block_stmt_iterator si;
+ stmt_vec_info stmt_info;
+
+ if (!node)
+ return false;
+
+ vect_schedule_slp_instance (SLP_TREE_LEFT (node), vec_stmts_size);
+ vect_schedule_slp_instance (SLP_TREE_RIGHT (node), vec_stmts_size);
+
+ stmt = VEC_index(tree, SLP_TREE_SCALAR_STMTS (node), 0);
+ stmt_info = vinfo_for_stmt (stmt);
+ SLP_TREE_VEC_STMTS (node) = VEC_alloc (tree, heap, vec_stmts_size);
+ SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
+
+ if (vect_print_dump_info (REPORT_DETAILS))
+ {
+ fprintf (vect_dump, "------>vectorizing SLP node starting from: ");
+ print_generic_expr (vect_dump, stmt, TDF_SLIM);
+ }
+
+ si = bsi_for_stmt (stmt);
+ is_store = vect_transform_stmt (stmt, &si, &strided_store, node);
+ if (is_store)
+ {
+ if (DR_GROUP_FIRST_DR (stmt_info))
+ /* If IS_STORE is TRUE, the vectorization of the
+ interleaving chain was completed - free all the stores in
+ the chain. */
+ vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
+ else
+ /* FORNOW: SLP originates only from strided stores. */
+ gcc_unreachable ();
+
+ return true;
+ }
+
+ /* FORNOW: SLP originates only from strided stores. */
+ return false;
}
+static bool
+vect_schedule_slp (loop_vec_info loop_vinfo, unsigned int nunits)
+{
+ VEC (slp_instance, heap) *slp_instances =
+ LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
+ slp_instance instance;
+ unsigned int vec_stmts_size;
+ unsigned int group_size, i;
+ unsigned int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ bool is_store = false;
+
+ for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
+ {
+ group_size = SLP_INSTANCE_GROUP_SIZE (instance);
+ /* For each SLP instance calculate number of vector stmts to be created
+ for the scalar stmts in each node of the SLP tree. Number of vector
+ elements in one vector iteration is the number of scalar elements in
+ one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
+ size. */
+ vec_stmts_size = vectorization_factor * group_size / nunits;
+
+ /* Schedule the tree of INSTANCE. */
+ is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
+ vec_stmts_size);
+
+ if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS)
+ || vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
+ fprintf (vect_dump, "vectorizing stmts using SLP.");
+ }
+
+ return is_store;
+}
+
/* Function vect_transform_loop.
The analysis phase has determined that the loop is vectorizable.
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
- block_stmt_iterator si, next_si;
+ block_stmt_iterator si;
int i;
tree ratio = NULL;
int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
bool strided_store;
+ bool slp_scheduled = false;
+ unsigned int nunits;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vec_transform_loop ===");
- /* If the loop has data references that may or may not be aligned then
- two versions of the loop need to be generated, one which is vectorized
- and one which isn't. A test is then generated to control which of the
- loops is executed. The test checks for the alignment of all of the
- data references that may or may not be aligned. */
-
- if (VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)))
- {
- struct loop *nloop;
- tree cond_expr;
- tree cond_expr_stmt_list = NULL_TREE;
- basic_block condition_bb;
- block_stmt_iterator cond_exp_bsi;
- basic_block merge_bb;
- basic_block new_exit_bb;
- edge new_exit_e, e;
- tree orig_phi, new_phi, arg;
- unsigned prob = 4 * REG_BR_PROB_BASE / 5;
-
- cond_expr = vect_create_cond_for_align_checks (loop_vinfo,
- &cond_expr_stmt_list);
- initialize_original_copy_tables ();
- nloop = loop_version (loop, cond_expr, &condition_bb,
- prob, prob, REG_BR_PROB_BASE - prob, true);
- free_original_copy_tables();
-
- /** Loop versioning violates an assumption we try to maintain during
- vectorization - that the loop exit block has a single predecessor.
- After versioning, the exit block of both loop versions is the same
- basic block (i.e. it has two predecessors). Just in order to simplify
- following transformations in the vectorizer, we fix this situation
- here by adding a new (empty) block on the exit-edge of the loop,
- with the proper loop-exit phis to maintain loop-closed-form. **/
-
- merge_bb = single_exit (loop)->dest;
- gcc_assert (EDGE_COUNT (merge_bb->preds) == 2);
- new_exit_bb = split_edge (single_exit (loop));
- new_exit_e = single_exit (loop);
- e = EDGE_SUCC (new_exit_bb, 0);
-
- for (orig_phi = phi_nodes (merge_bb); orig_phi;
- orig_phi = PHI_CHAIN (orig_phi))
- {
- new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
- new_exit_bb);
- arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
- add_phi_arg (new_phi, arg, new_exit_e);
- SET_PHI_ARG_DEF (orig_phi, e->dest_idx, PHI_RESULT (new_phi));
- }
-
- /** end loop-exit-fixes after versioning **/
-
- update_ssa (TODO_update_ssa);
- cond_exp_bsi = bsi_last (condition_bb);
- bsi_insert_before (&cond_exp_bsi, cond_expr_stmt_list, BSI_SAME_STMT);
- }
+ if (VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
+ || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
+ vect_loop_versioning (loop_vinfo);
/* CHECKME: we wouldn't need this if we called update_ssa once
for all loops. */
stmt_info = vinfo_for_stmt (phi);
if (!stmt_info)
continue;
+
if (!STMT_VINFO_RELEVANT_P (stmt_info)
&& !STMT_VINFO_LIVE_P (stmt_info))
continue;
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "transform phi.");
- vect_transform_stmt (phi, NULL, NULL);
+ vect_transform_stmt (phi, NULL, NULL, NULL);
}
}
fprintf (vect_dump, "------>vectorizing statement: ");
print_generic_expr (vect_dump, stmt, TDF_SLIM);
}
+
stmt_info = vinfo_for_stmt (stmt);
- gcc_assert (stmt_info);
+
+ /* vector stmts created in the outer-loop during vectorization of
+ stmts in an inner-loop may not have a stmt_info, and do not
+ need to be vectorized. */
+ if (!stmt_info)
+ {
+ bsi_next (&si);
+ continue;
+ }
+
if (!STMT_VINFO_RELEVANT_P (stmt_info)
&& !STMT_VINFO_LIVE_P (stmt_info))
{
}
gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
- if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
- != (unsigned HOST_WIDE_INT) vectorization_factor)
- && vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "multiple-types.");
+ nunits =
+ (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
+ if (!STMT_SLP_TYPE (stmt_info)
+ && nunits != (unsigned int) vectorization_factor
+ && vect_print_dump_info (REPORT_DETAILS))
+ /* For SLP VF is set according to unrolling factor, and not to
+ vector size, hence for SLP this print is not valid. */
+ fprintf (vect_dump, "multiple-types.");
+
+ /* SLP. Schedule all the SLP instances when the first SLP stmt is
+ reached. */
+ if (STMT_SLP_TYPE (stmt_info))
+ {
+ if (!slp_scheduled)
+ {
+ slp_scheduled = true;
+
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "=== scheduling SLP instances ===");
+
+ is_store = vect_schedule_slp (loop_vinfo, nunits);
+
+ /* IS_STORE is true if STMT is a store. Stores cannot be of
+ hybrid SLP type. They are removed in
+ vect_schedule_slp_instance and their vinfo is destroyed. */
+ if (is_store)
+ {
+ bsi_next (&si);
+ continue;
+ }
+ }
+ /* Hybrid SLP stmts must be vectorized in addition to SLP. */
+ if (PURE_SLP_STMT (stmt_info))
+ {
+ bsi_next (&si);
+ continue;
+ }
+ }
+
/* -------- vectorize statement ------------ */
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "transform statement.");
strided_store = false;
- is_store = vect_transform_stmt (stmt, &si, &strided_store);
+ is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL);
if (is_store)
{
- stmt_ann_t ann;
- if (DR_GROUP_FIRST_DR (stmt_info))
+ if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
{
/* Interleaving. If IS_STORE is TRUE, the vectorization of the
interleaving chain was completed - free all the stores in
the chain. */
- tree next = DR_GROUP_FIRST_DR (stmt_info);
- tree tmp;
- stmt_vec_info next_stmt_info;
-
- while (next)
- {
- next_si = bsi_for_stmt (next);
- next_stmt_info = vinfo_for_stmt (next);
- /* Free the attached stmt_vec_info and remove the stmt. */
- ann = stmt_ann (next);
- tmp = DR_GROUP_NEXT_DR (next_stmt_info);
- free (next_stmt_info);
- set_stmt_info (ann, NULL);
- bsi_remove (&next_si, true);
- next = tmp;
- }
+ vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
bsi_remove (&si, true);
continue;
}
else
{
/* Free the attached stmt_vec_info and remove the stmt. */
- ann = stmt_ann (stmt);
- free (stmt_info);
- set_stmt_info (ann, NULL);
+ free_stmt_vec_info (stmt);
bsi_remove (&si, true);
continue;
}
if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS))
fprintf (vect_dump, "LOOP VECTORIZED.");
+ if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOOPS))
+ fprintf (vect_dump, "OUTER LOOP VECTORIZED.");
}