X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Ftree-vect-transform.c;h=2db01676bdbc6f835649bdbe635847b5f9152f47;hb=961c8f7239d867d8320896dd626796af279903e8;hp=d9152f95a6bfc5640e93f9ecd31a21118d1dbf27;hpb=0de36bdb2d7fa28a41b67b7ab3f8ccb92e1d2e03;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/tree-vect-transform.c b/gcc/tree-vect-transform.c index d9152f95a6b..2db01676bdb 100644 --- a/gcc/tree-vect-transform.c +++ b/gcc/tree-vect-transform.c @@ -1,12 +1,12 @@ /* Transformation Utilities for Loop Vectorization. - Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc. + Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. Contributed by Dorit Naishlos This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY @@ -15,9 +15,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA -02110-1301, USA. */ +along with GCC; see the file COPYING3. If not see +. */ #include "config.h" #include "system.h" @@ -47,21 +46,22 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA #include "real.h" /* Utility functions for the code transformation. */ -static bool vect_transform_stmt (tree, block_stmt_iterator *, bool *); +static bool vect_transform_stmt (gimple, gimple_stmt_iterator *, bool *, + slp_tree, slp_instance); static tree vect_create_destination_var (tree, tree); static tree vect_create_data_ref_ptr - (tree, block_stmt_iterator *, tree, tree *, tree *, bool, tree); -static tree vect_create_addr_base_for_vector_ref (tree, tree *, tree); -static tree vect_setup_realignment (tree, block_stmt_iterator *, tree *); + (gimple, struct loop*, tree, tree *, gimple *, bool, bool *, tree); +static tree vect_create_addr_base_for_vector_ref + (gimple, gimple_seq *, tree, struct loop *); static tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *); -static tree vect_get_vec_def_for_operand (tree, tree, tree *); -static tree vect_init_vector (tree, tree, tree); +static tree vect_get_vec_def_for_operand (tree, gimple, tree *); +static tree vect_init_vector (gimple, tree, tree, gimple_stmt_iterator *); static void vect_finish_stmt_generation - (tree stmt, tree vec_stmt, block_stmt_iterator *bsi); + (gimple stmt, gimple vec_stmt, gimple_stmt_iterator *); static bool vect_is_simple_cond (tree, loop_vec_info); -static void update_vuses_to_preheader (tree, struct loop*); -static void vect_create_epilog_for_reduction (tree, tree, enum tree_code, tree); -static tree get_initial_def_for_reduction (tree, tree, tree *); +static void vect_create_epilog_for_reduction + (tree, gimple, int, enum tree_code, gimple); +static tree get_initial_def_for_reduction (gimple, tree, tree *); /* Utility function dealing with loop peeling (not peeling itself). */ static void vect_generate_tmps_on_preheader @@ -74,6 +74,34 @@ static void vect_update_inits_of_drs (loop_vec_info, tree); static int vect_min_worthwhile_factor (enum tree_code); +static int +cost_for_stmt (gimple stmt) +{ + stmt_vec_info stmt_info = vinfo_for_stmt (stmt); + + switch (STMT_VINFO_TYPE (stmt_info)) + { + case load_vec_info_type: + return TARG_SCALAR_LOAD_COST; + case store_vec_info_type: + return TARG_SCALAR_STORE_COST; + case op_vec_info_type: + case condition_vec_info_type: + case assignment_vec_info_type: + case reduc_vec_info_type: + case induc_vec_info_type: + case type_promotion_vec_info_type: + case type_demotion_vec_info_type: + case type_conversion_vec_info_type: + case call_vec_info_type: + return TARG_SCALAR_STMT_COST; + case undef_vec_info_type: + default: + gcc_unreachable (); + } +} + + /* Function vect_estimate_min_profitable_iters Return the number of iterations required for the vector version of the @@ -93,54 +121,59 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo) int vec_inside_cost = 0; int vec_outside_cost = 0; int scalar_single_iter_cost = 0; + int scalar_outside_cost = 0; + bool runtime_test = false; int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; + int byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo); + int peel_guard_costs = 0; + int innerloop_iters = 0, factor; + VEC (slp_instance, heap) *slp_instances; + slp_instance instance; /* Cost model disabled. */ if (!flag_vect_cost_model) { - if (vect_print_dump_info (REPORT_DETAILS)) + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "cost model disabled."); return 0; } - /* Requires loop versioning tests to handle misalignment. - FIXME: Make cost depend on number of stmts in may_misalign list. */ + /* If the number of iterations is unknown, or the + peeling-for-misalignment amount is unknown, we will have to generate + a runtime test to test the loop count against the threshold. */ + if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) + || (byte_misalign < 0)) + runtime_test = true; + + /* Requires loop versioning tests to handle misalignment. */ - if (LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) + if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))) { - vec_outside_cost += TARG_COND_BRANCH_COST; - if (vect_print_dump_info (REPORT_DETAILS)) + /* FIXME: Make cost depend on complexity of individual check. */ + vec_outside_cost += + VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)); + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "cost model: Adding cost of checks for loop " - "versioning.\n"); + "versioning to treat misalignment.\n"); } - /* Requires a prologue loop when peeling to handle misalignment. Add cost of - two guards, one for the peeled loop and one for the vector loop. */ - - peel_iters_prologue = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo); - if (peel_iters_prologue) + if (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) { - vec_outside_cost += 2 * TARG_COND_BRANCH_COST; - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "cost model: Adding cost of checks for " - "prologue.\n"); + /* FIXME: Make cost depend on complexity of individual check. */ + vec_outside_cost += + VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)); + if (vect_print_dump_info (REPORT_COST)) + fprintf (vect_dump, "cost model: Adding cost of checks for loop " + "versioning aliasing.\n"); } - /* Requires an epilogue loop to finish up remaining iterations after vector - loop. Add cost of two guards, one for the peeled loop and one for the - vector loop. */ - - if ((peel_iters_prologue < 0) - || !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) - || LOOP_VINFO_INT_NITERS (loop_vinfo) % vf) + if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) + || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) { - vec_outside_cost += 2 * TARG_COND_BRANCH_COST; - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "cost model : Adding cost of checks for " - "epilogue.\n"); + vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST; } /* Count statements in scalar loop. Using this as scalar cost for a single @@ -151,95 +184,217 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo) TODO: Consider assigning different costs to different scalar statements. */ + /* FORNOW. */ + if (loop->inner) + innerloop_iters = 50; /* FIXME */ + for (i = 0; i < nbbs; i++) { - block_stmt_iterator si; + gimple_stmt_iterator si; basic_block bb = bbs[i]; - for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si)) - { - tree stmt = bsi_stmt (si); - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); - if (!STMT_VINFO_RELEVANT_P (stmt_info) - && !STMT_VINFO_LIVE_P (stmt_info)) - continue; - scalar_single_iter_cost++; - vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info); - vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info); - } + if (bb->loop_father == loop->inner) + factor = innerloop_iters; + else + factor = 1; + + for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) + { + gimple stmt = gsi_stmt (si); + stmt_vec_info stmt_info = vinfo_for_stmt (stmt); + /* Skip stmts that are not vectorized inside the loop. */ + if (!STMT_VINFO_RELEVANT_P (stmt_info) + && (!STMT_VINFO_LIVE_P (stmt_info) + || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def)) + continue; + scalar_single_iter_cost += cost_for_stmt (stmt) * factor; + vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor; + /* FIXME: for stmts in the inner-loop in outer-loop vectorization, + some of the "outside" costs are generated inside the outer-loop. */ + vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info); + } } /* Add additional cost for the peeled instructions in prologue and epilogue loop. - FORNOW: If we dont know the value of peel_iters for prologue or epilogue - at compile-time - we assume the worst. + FORNOW: If we don't know the value of peel_iters for prologue or epilogue + at compile-time - we assume it's vf/2 (the worst would be vf-1). TODO: Build an expression that represents peel_iters for prologue and epilogue to be used in a run-time test. */ - peel_iters_prologue = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo); - - if (peel_iters_prologue < 0) + if (byte_misalign < 0) { - peel_iters_prologue = vf - 1; - if (vect_print_dump_info (REPORT_DETAILS)) + peel_iters_prologue = vf/2; + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "cost model: " - "prologue peel iters set conservatively."); + "prologue peel iters set to vf/2."); /* If peeling for alignment is unknown, loop bound of main loop becomes unknown. */ - peel_iters_epilogue = vf - 1; - if (vect_print_dump_info (REPORT_DETAILS)) + peel_iters_epilogue = vf/2; + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "cost model: " - "epilogue peel iters set conservatively because " + "epilogue peel iters set to vf/2 because " "peeling for alignment is unknown ."); + + /* If peeled iterations are unknown, count a taken branch and a not taken + branch per peeled loop. Even if scalar loop iterations are known, + vector iterations are not known since peeled prologue iterations are + not known. Hence guards remain the same. */ + peel_guard_costs += 2 * (TARG_COND_TAKEN_BRANCH_COST + + TARG_COND_NOT_TAKEN_BRANCH_COST); + } else { + if (byte_misalign) + { + struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo); + int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr)))); + tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))); + int nelements = TYPE_VECTOR_SUBPARTS (vectype); + + peel_iters_prologue = nelements - (byte_misalign / element_size); + } + else + peel_iters_prologue = 0; + if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { - peel_iters_epilogue = vf - 1; - if (vect_print_dump_info (REPORT_DETAILS)) + peel_iters_epilogue = vf/2; + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "cost model: " - "epilogue peel iters set conservatively because " + "epilogue peel iters set to vf/2 because " "loop iterations are unknown ."); + + /* If peeled iterations are known but number of scalar loop + iterations are unknown, count a taken branch per peeled loop. */ + peel_guard_costs += 2 * TARG_COND_TAKEN_BRANCH_COST; + } else - peel_iters_epilogue = - (LOOP_VINFO_INT_NITERS (loop_vinfo) - peel_iters_prologue) - % vf; + { + int niters = LOOP_VINFO_INT_NITERS (loop_vinfo); + peel_iters_prologue = niters < peel_iters_prologue ? + niters : peel_iters_prologue; + peel_iters_epilogue = (niters - peel_iters_prologue) % vf; + } } vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost) - + (peel_iters_epilogue * scalar_single_iter_cost); + + (peel_iters_epilogue * scalar_single_iter_cost) + + peel_guard_costs; + + /* FORNOW: The scalar outside cost is incremented in one of the + following ways: + + 1. The vectorizer checks for alignment and aliasing and generates + a condition that allows dynamic vectorization. A cost model + check is ANDED with the versioning condition. Hence scalar code + path now has the added cost of the versioning check. + + if (cost > th & versioning_check) + jmp to vector code + + Hence run-time scalar is incremented by not-taken branch cost. + + 2. The vectorizer then checks if a prologue is required. If the + cost model check was not done before during versioning, it has to + be done before the prologue check. + + if (cost <= th) + prologue = scalar_iters + if (prologue == 0) + jmp to vector code + else + execute prologue + if (prologue == num_iters) + go to exit + + Hence the run-time scalar cost is incremented by a taken branch, + plus a not-taken branch, plus a taken branch cost. + + 3. The vectorizer then checks if an epilogue is required. If the + cost model check was not done before during prologue check, it + has to be done with the epilogue check. + + if (prologue == 0) + jmp to vector code + else + execute prologue + if (prologue == num_iters) + go to exit + vector code: + if ((cost <= th) | (scalar_iters-prologue-epilogue == 0)) + jmp to epilogue + + Hence the run-time scalar cost should be incremented by 2 taken + branches. + + TODO: The back end may reorder the BBS's differently and reverse + conditions/branch directions. Change the estimates below to + something more reasonable. */ + + if (runtime_test) + { + /* Cost model check occurs at versioning. */ + if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) + || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) + scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST; + else + { + /* Cost model occurs at prologue generation. */ + if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) + scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST + + TARG_COND_NOT_TAKEN_BRANCH_COST; + /* Cost model check occurs at epilogue generation. */ + else + scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST; + } + } + + /* Add SLP costs. */ + slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo); + for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++) + { + vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance); + vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance); + } /* Calculate number of iterations required to make the vector version profitable, relative to the loop bodies only. The following condition - must hold true: ((SIC*VF)-VIC)*niters > VOC*VF, where + must hold true: + SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + where SIC = scalar iteration cost, VIC = vector iteration cost, - VOC = vector outside cost and VF = vectorization factor. */ + VOC = vector outside cost, VF = vectorization factor, + PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations + SOC = scalar outside cost for run time cost model check. */ if ((scalar_single_iter_cost * vf) > vec_inside_cost) { - if (vec_outside_cost == 0) + if (vec_outside_cost <= 0) min_profitable_iters = 1; else { - min_profitable_iters = (vec_outside_cost * vf) + min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf + - vec_inside_cost * peel_iters_prologue + - vec_inside_cost * peel_iters_epilogue) / ((scalar_single_iter_cost * vf) - vec_inside_cost); if ((scalar_single_iter_cost * vf * min_profitable_iters) <= ((vec_inside_cost * min_profitable_iters) - + (vec_outside_cost * vf))) + + ((vec_outside_cost - scalar_outside_cost) * vf))) min_profitable_iters++; } } /* vector version will never be profitable. */ else { - if (vect_print_dump_info (REPORT_DETAILS)) + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "cost model: vector iteration cost = %d " "is divisible by scalar iteration cost = %d by a factor " "greater than or equal to the vectorization factor = %d .", @@ -247,25 +402,37 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo) return -1; } - if (vect_print_dump_info (REPORT_DETAILS)) + if (vect_print_dump_info (REPORT_COST)) { fprintf (vect_dump, "Cost model analysis: \n"); fprintf (vect_dump, " Vector inside of loop cost: %d\n", vec_inside_cost); fprintf (vect_dump, " Vector outside of loop cost: %d\n", vec_outside_cost); - fprintf (vect_dump, " Scalar cost: %d\n", scalar_single_iter_cost); + fprintf (vect_dump, " Scalar iteration cost: %d\n", + scalar_single_iter_cost); + fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost); fprintf (vect_dump, " prologue iterations: %d\n", peel_iters_prologue); fprintf (vect_dump, " epilogue iterations: %d\n", peel_iters_epilogue); fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n", min_profitable_iters); - fprintf (vect_dump, " Actual minimum iters for profitability: %d\n", - min_profitable_iters < vf ? vf : min_profitable_iters); } - return min_profitable_iters < vf ? vf : min_profitable_iters; + min_profitable_iters = + min_profitable_iters < vf ? vf : min_profitable_iters; + + /* Because the condition we create is: + if (niters <= min_profitable_iters) + then skip the vectorized loop. */ + min_profitable_iters--; + + if (vect_print_dump_info (REPORT_COST)) + fprintf (vect_dump, " Profitability threshold = %d\n", + min_profitable_iters); + + return min_profitable_iters; } @@ -278,7 +445,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo) generated within the strip-mine loop, the initial definition before the loop, and the epilogue code that must be generated. */ -static void +static bool vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code, int ncopies) { @@ -286,63 +453,98 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code, enum tree_code code; optab optab; tree vectype; - tree orig_stmt; + gimple stmt, orig_stmt; tree reduction_op; enum machine_mode mode; - tree operation = GIMPLE_STMT_OPERAND (STMT_VINFO_STMT (stmt_info), 1); - int op_type = TREE_CODE_LENGTH (TREE_CODE (operation)); + loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + /* Cost of reduction op inside loop. */ STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) += ncopies * TARG_VEC_STMT_COST; - reduction_op = TREE_OPERAND (operation, op_type-1); + stmt = STMT_VINFO_STMT (stmt_info); + + switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) + { + case GIMPLE_SINGLE_RHS: + gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op); + reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2); + break; + case GIMPLE_UNARY_RHS: + reduction_op = gimple_assign_rhs1 (stmt); + break; + case GIMPLE_BINARY_RHS: + reduction_op = gimple_assign_rhs2 (stmt); + break; + default: + gcc_unreachable (); + } + vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op)); + if (!vectype) + { + if (vect_print_dump_info (REPORT_COST)) + { + fprintf (vect_dump, "unsupported data-type "); + print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM); + } + return false; + } + mode = TYPE_MODE (vectype); orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); if (!orig_stmt) orig_stmt = STMT_VINFO_STMT (stmt_info); - code = TREE_CODE (GIMPLE_STMT_OPERAND (orig_stmt, 1)); + code = gimple_assign_rhs_code (orig_stmt); /* Add in cost for initial definition. */ - outer_cost += TARG_VEC_STMT_COST; + outer_cost += TARG_SCALAR_TO_VEC_COST; /* Determine cost of epilogue code. We have a reduction operator that will reduce the vector in one statement. Also requires scalar extract. */ - if (reduc_code < NUM_TREE_CODES) - outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST; - else + if (!nested_in_vect_loop_p (loop, orig_stmt)) { - int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1); - tree bitsize = - TYPE_SIZE (TREE_TYPE ( GIMPLE_STMT_OPERAND (orig_stmt, 0))); - int element_bitsize = tree_low_cst (bitsize, 1); - int nelements = vec_size_in_bits / element_bitsize; - - optab = optab_for_tree_code (code, vectype); - - /* We have a whole vector shift available. */ - if (!VECTOR_MODE_P (mode) - || optab->handlers[mode].insn_code == CODE_FOR_nothing) - /* Final reduction via vector shifts and the reduction operator. Also - requires scalar extract. */ - outer_cost += ((exact_log2(nelements) * 2 + 1) * TARG_VEC_STMT_COST); - else - /* Use extracts and reduction op for final reduction. For N elements, - we have N extracts and N-1 reduction ops. */ - outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST); + if (reduc_code < NUM_TREE_CODES) + outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST; + else + { + int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1); + tree bitsize = + TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt))); + int element_bitsize = tree_low_cst (bitsize, 1); + int nelements = vec_size_in_bits / element_bitsize; + + optab = optab_for_tree_code (code, vectype, optab_default); + + /* We have a whole vector shift available. */ + if (VECTOR_MODE_P (mode) + && optab_handler (optab, mode)->insn_code != CODE_FOR_nothing + && optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing) + /* Final reduction via vector shifts and the reduction operator. Also + requires scalar extract. */ + outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST + + TARG_VEC_TO_SCALAR_COST); + else + /* Use extracts and reduction op for final reduction. For N elements, + we have N extracts and N-1 reduction ops. */ + outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST); + } } STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost; - if (vect_print_dump_info (REPORT_DETAILS)) + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, " "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info), STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)); + + return true; } @@ -356,9 +558,9 @@ vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies) /* loop cost for vec_loop. */ STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST; /* prologue cost for vec_init and vec_step. */ - STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_VEC_STMT_COST; + STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_SCALAR_TO_VEC_COST; - if (vect_print_dump_info (REPORT_DETAILS)) + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, " "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info), STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)); @@ -371,15 +573,33 @@ vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies) single op. Right now, this does not account for multiple insns that could be generated for the single vector op. We will handle that shortly. */ -static void -vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies) +void +vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies, + enum vect_def_type *dt, slp_tree slp_node) { - STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST; + int i; + int inside_cost = 0, outside_cost = 0; - if (vect_print_dump_info (REPORT_DETAILS)) + /* The SLP costs were already calculated during SLP tree build. */ + if (PURE_SLP_STMT (stmt_info)) + return; + + inside_cost = ncopies * TARG_VEC_STMT_COST; + + /* FORNOW: Assuming maximum 2 args per stmts. */ + for (i = 0; i < 2; i++) + { + if (dt[i] == vect_constant_def || dt[i] == vect_invariant_def) + outside_cost += TARG_SCALAR_TO_VEC_COST; + } + + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, " - "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info), - STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)); + "outside_cost = %d .", inside_cost, outside_cost); + + /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */ + stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost); + stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost); } @@ -392,7 +612,7 @@ vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies) static int vect_cost_strided_group_size (stmt_vec_info stmt_info) { - tree first_stmt = DR_GROUP_FIRST_DR (stmt_info); + gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info); if (first_stmt == STMT_VINFO_STMT (stmt_info)) return DR_GROUP_SIZE (stmt_info); @@ -406,14 +626,22 @@ vect_cost_strided_group_size (stmt_vec_info stmt_info) Models cost for stores. In the case of strided accesses, one access has the overhead of the strided access attributed to it. */ -static void -vect_model_store_cost (stmt_vec_info stmt_info, int ncopies) +void +vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, + enum vect_def_type dt, slp_tree slp_node) { - int cost = 0; int group_size; + int inside_cost = 0, outside_cost = 0; + + /* The SLP costs were already calculated during SLP tree build. */ + if (PURE_SLP_STMT (stmt_info)) + return; + + if (dt == vect_constant_def || dt == vect_invariant_def) + outside_cost = TARG_SCALAR_TO_VEC_COST; /* Strided access? */ - if (DR_GROUP_FIRST_DR (stmt_info)) + if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node) group_size = vect_cost_strided_group_size (stmt_info); /* Not a strided access. */ else @@ -424,24 +652,25 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies) if (group_size > 1) { /* Uses a high and low interleave operation for each needed permute. */ - cost = ncopies * exact_log2(group_size) * group_size + inside_cost = ncopies * exact_log2(group_size) * group_size * TARG_VEC_STMT_COST; - if (vect_print_dump_info (REPORT_DETAILS)) + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .", group_size); } /* Costs of the stores. */ - cost += ncopies * TARG_VEC_STORE_COST; - - STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = cost; + inside_cost += ncopies * TARG_VEC_STORE_COST; - if (vect_print_dump_info (REPORT_DETAILS)) + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, " - "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info), - STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)); + "outside_cost = %d .", inside_cost, outside_cost); + + /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */ + stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost); + stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost); } @@ -452,19 +681,23 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies) accesses are supported for loads, we also account for the costs of the access scheme chosen. */ -static void -vect_model_load_cost (stmt_vec_info stmt_info, int ncopies) +void +vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node) { - int inner_cost = 0; int group_size; int alignment_support_cheme; - tree first_stmt; + gimple first_stmt; struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr; + int inside_cost = 0, outside_cost = 0; + + /* The SLP costs were already calculated during SLP tree build. */ + if (PURE_SLP_STMT (stmt_info)) + return; /* Strided accesses? */ first_stmt = DR_GROUP_FIRST_DR (stmt_info); - if (first_stmt) + if (first_stmt && !slp_node) { group_size = vect_cost_strided_group_size (stmt_info); first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); @@ -483,10 +716,10 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies) if (group_size > 1) { /* Uses an even and odd extract operations for each needed permute. */ - inner_cost = ncopies * exact_log2(group_size) * group_size - * TARG_VEC_STMT_COST; + inside_cost = ncopies * exact_log2(group_size) * group_size + * TARG_VEC_STMT_COST; - if (vect_print_dump_info (REPORT_DETAILS)) + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .", group_size); @@ -497,9 +730,9 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies) { case dr_aligned: { - inner_cost += ncopies * TARG_VEC_LOAD_COST; + inside_cost += ncopies * TARG_VEC_LOAD_COST; - if (vect_print_dump_info (REPORT_DETAILS)) + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "vect_model_load_cost: aligned."); break; @@ -507,19 +740,29 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies) case dr_unaligned_supported: { /* Here, we assign an additional cost for the unaligned load. */ - inner_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST; + inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST; - if (vect_print_dump_info (REPORT_DETAILS)) + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "vect_model_load_cost: unaligned supported by " "hardware."); break; } - case dr_unaligned_software_pipeline: + case dr_explicit_realign: { - int outer_cost = 0; + inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST); - if (vect_print_dump_info (REPORT_DETAILS)) + /* FIXME: If the misalignment remains fixed across the iterations of + the containing loop, the following cost should be added to the + outside costs. */ + if (targetm.vectorize.builtin_mask_for_load) + inside_cost += TARG_VEC_STMT_COST; + + break; + } + case dr_explicit_realign_optimized: + { + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "vect_model_load_cost: unaligned software " "pipelined."); @@ -530,16 +773,14 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies) access in the group. Inside the loop, there is a load op and a realignment op. */ - if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1) + if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node) { - outer_cost = 2*TARG_VEC_STMT_COST; + outside_cost = 2*TARG_VEC_STMT_COST; if (targetm.vectorize.builtin_mask_for_load) - outer_cost += TARG_VEC_STMT_COST; + outside_cost += TARG_VEC_STMT_COST; } - - STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost; - inner_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST); + inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST); break; } @@ -547,14 +788,14 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies) default: gcc_unreachable (); } - - STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = inner_cost; - - if (vect_print_dump_info (REPORT_DETAILS)) + + if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, " - "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info), - STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)); + "outside_cost = %d .", inside_cost, outside_cost); + /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */ + stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost); + stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost); } @@ -587,7 +828,11 @@ vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name) } if (name) - new_vect_var = create_tmp_var (type, concat (prefix, name, NULL)); + { + char* tmp = concat (prefix, name, NULL); + new_vect_var = create_tmp_var (type, tmp); + free (tmp); + } else new_vect_var = create_tmp_var (type, prefix); @@ -608,6 +853,19 @@ vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name) STMT: The statement containing the data reference. NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list. OFFSET: Optional. If supplied, it is be added to the initial address. + LOOP: Specify relative to which loop-nest should the address be computed. + For example, when the dataref is in an inner-loop nested in an + outer-loop that is now being vectorized, LOOP can be either the + outer-loop, or the inner-loop. The first memory location accessed + by the following dataref ('in' points to short): + + for (i=0; iloop_father; tree data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr)); - tree base_name = build_fold_indirect_ref (data_ref_base); + tree base_name; + tree data_ref_base_var; tree vec_stmt; tree addr_base, addr_expr; - tree dest, new_stmt; + tree dest; + gimple_seq seq = NULL; tree base_offset = unshare_expr (DR_OFFSET (dr)); tree init = unshare_expr (DR_INIT (dr)); tree vect_ptr_type, addr_expr2; + tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr))); + + gcc_assert (loop); + if (loop != containing_loop) + { + loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + + gcc_assert (nested_in_vect_loop_p (loop, stmt)); + + data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info)); + base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info)); + init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info)); + } + + /* Create data_ref_base */ + base_name = build_fold_indirect_ref (data_ref_base); + data_ref_base_var = create_tmp_var (TREE_TYPE (data_ref_base), "batmp"); + add_referenced_var (data_ref_base_var); + data_ref_base = force_gimple_operand (data_ref_base, &seq, true, + data_ref_base_var); + gimple_seq_add_seq (new_stmt_list, seq); /* Create base_offset */ - base_offset = size_binop (PLUS_EXPR, base_offset, init); - base_offset = fold_convert (sizetype, base_offset); - dest = create_tmp_var (TREE_TYPE (base_offset), "base_off"); + base_offset = size_binop (PLUS_EXPR, + fold_convert (sizetype, base_offset), + fold_convert (sizetype, init)); + dest = create_tmp_var (sizetype, "base_off"); add_referenced_var (dest); - base_offset = force_gimple_operand (base_offset, &new_stmt, false, dest); - append_to_statement_list_force (new_stmt, new_stmt_list); + base_offset = force_gimple_operand (base_offset, &seq, true, dest); + gimple_seq_add_seq (new_stmt_list, seq); if (offset) { tree tmp = create_tmp_var (sizetype, "offset"); - tree step; - - /* For interleaved access step we divide STEP by the size of the - interleaving group. */ - if (DR_GROUP_SIZE (stmt_info)) - step = fold_build2 (TRUNC_DIV_EXPR, TREE_TYPE (offset), DR_STEP (dr), - build_int_cst (TREE_TYPE (offset), - DR_GROUP_SIZE (stmt_info))); - else - step = DR_STEP (dr); add_referenced_var (tmp); - offset = fold_build2 (MULT_EXPR, TREE_TYPE (offset), offset, step); - base_offset = fold_build2 (PLUS_EXPR, TREE_TYPE (base_offset), + offset = fold_build2 (MULT_EXPR, sizetype, + fold_convert (sizetype, offset), step); + base_offset = fold_build2 (PLUS_EXPR, sizetype, base_offset, offset); - base_offset = force_gimple_operand (base_offset, &new_stmt, false, tmp); - append_to_statement_list_force (new_stmt, new_stmt_list); + base_offset = force_gimple_operand (base_offset, &seq, false, tmp); + gimple_seq_add_seq (new_stmt_list, seq); } - + /* base + base_offset */ - addr_base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (data_ref_base), data_ref_base, - base_offset); + addr_base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (data_ref_base), + data_ref_base, base_offset); vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info)); @@ -677,8 +953,8 @@ vect_create_addr_base_for_vector_ref (tree stmt, addr_expr2 = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, get_name (base_name)); add_referenced_var (addr_expr2); - vec_stmt = force_gimple_operand (vec_stmt, &new_stmt, false, addr_expr2); - append_to_statement_list_force (new_stmt, new_stmt_list); + vec_stmt = force_gimple_operand (vec_stmt, &seq, false, addr_expr2); + gimple_seq_add_seq (new_stmt_list, seq); if (vect_print_dump_info (REPORT_DETAILS)) { @@ -700,14 +976,14 @@ vect_create_addr_base_for_vector_ref (tree stmt, Input: 1. STMT: a stmt that references memory. Expected to be of the form - GIMPLE_MODIFY_STMT or - GIMPLE_MODIFY_STMT . - 2. BSI: block_stmt_iterator where new stmts can be added. + GIMPLE_ASSIGN or + GIMPLE_ASSIGN . + 2. AT_LOOP: the loop where the vector memref is to be created. 3. OFFSET (optional): an offset to be added to the initial address accessed by the data-ref in STMT. 4. ONLY_INIT: indicate if vp is to be updated in the loop, or remain pointing to the initial address. - 5. TYPE: if not NULL indicates the required type of the data-ref + 5. TYPE: if not NULL indicates the required type of the data-ref. Output: 1. Declare a new ptr to vector_type, and have it point to the base of the @@ -729,31 +1005,55 @@ vect_create_addr_base_for_vector_ref (tree stmt, Return the increment stmt that updates the pointer in PTR_INCR. - 3. Return the pointer. */ + 3. Set INV_P to true if the access pattern of the data reference in the + vectorized loop is invariant. Set it to false otherwise. + + 4. Return the pointer. */ static tree -vect_create_data_ref_ptr (tree stmt, - block_stmt_iterator *bsi ATTRIBUTE_UNUSED, - tree offset, tree *initial_address, tree *ptr_incr, - bool only_init, tree type) +vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop, + tree offset, tree *initial_address, gimple *ptr_incr, + bool only_init, bool *inv_p, tree type) { tree base_name; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); + struct loop *containing_loop = (gimple_bb (stmt))->loop_father; tree vectype = STMT_VINFO_VECTYPE (stmt_info); tree vect_ptr_type; tree vect_ptr; tree tag; tree new_temp; - tree vec_stmt; - tree new_stmt_list = NULL_TREE; - edge pe = loop_preheader_edge (loop); + gimple vec_stmt; + gimple_seq new_stmt_list = NULL; + edge pe; basic_block new_bb; tree vect_ptr_init; struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); + tree vptr; + gimple_stmt_iterator incr_gsi; + bool insert_after; + tree indx_before_incr, indx_after_incr; + gimple incr; + tree step; + + /* Check the step (evolution) of the load in LOOP, and record + whether it's invariant. */ + if (nested_in_vect_loop) + step = STMT_VINFO_DR_STEP (stmt_info); + else + step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info)); + + if (tree_int_cst_compare (step, size_zero_node) == 0) + *inv_p = true; + else + *inv_p = false; - base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr))); + /* Create an expression for the first address accessed by this load + in LOOP. */ + base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr))); if (vect_print_dump_info (REPORT_DETAILS)) { @@ -772,12 +1072,24 @@ vect_create_data_ref_ptr (tree stmt, } /** (1) Create the new vector-pointer variable: **/ - if (type) + if (type) vect_ptr_type = build_pointer_type (type); else vect_ptr_type = build_pointer_type (vectype); + + if (TREE_CODE (DR_BASE_ADDRESS (dr)) == SSA_NAME + && TYPE_RESTRICT (TREE_TYPE (DR_BASE_ADDRESS (dr)))) + vect_ptr_type = build_qualified_type (vect_ptr_type, TYPE_QUAL_RESTRICT); vect_ptr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, get_name (base_name)); + if (TREE_CODE (DR_BASE_ADDRESS (dr)) == SSA_NAME + && TYPE_RESTRICT (TREE_TYPE (DR_BASE_ADDRESS (dr)))) + { + get_alias_set (base_name); + DECL_POINTER_ALIAS_SET (vect_ptr) + = DECL_POINTER_ALIAS_SET (SSA_NAME_VAR (DR_BASE_ADDRESS (dr))); + } + add_referenced_var (vect_ptr); /** (2) Add aliasing information to the new vector-pointer: @@ -793,52 +1105,123 @@ vect_create_data_ref_ptr (tree stmt, else set_symbol_mem_tag (vect_ptr, tag); - var_ann (vect_ptr)->subvars = DR_SUBVARS (dr); + /** Note: If the dataref is in an inner-loop nested in LOOP, and we are + vectorizing LOOP (i.e. outer-loop vectorization), we need to create two + def-use update cycles for the pointer: One relative to the outer-loop + (LOOP), which is what steps (3) and (4) below do. The other is relative + to the inner-loop (which is the inner-most loop containing the dataref), + and this is done be step (5) below. + + When vectorizing inner-most loops, the vectorized loop (LOOP) is also the + inner-most loop, and so steps (3),(4) work the same, and step (5) is + redundant. Steps (3),(4) create the following: + + vp0 = &base_addr; + LOOP: vp1 = phi(vp0,vp2) + ... + ... + vp2 = vp1 + step + goto LOOP + + If there is an inner-loop nested in loop, then step (5) will also be + applied, and an additional update in the inner-loop will be created: + + vp0 = &base_addr; + LOOP: vp1 = phi(vp0,vp2) + ... + inner: vp3 = phi(vp1,vp4) + vp4 = vp3 + inner_step + if () goto inner + ... + vp2 = vp1 + step + if () goto LOOP */ /** (3) Calculate the initial address the vector-pointer, and set the vector-pointer to point to it before the loop: **/ /* Create: (&(base[init_val+offset]) in the loop preheader. */ + new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list, - offset); + offset, loop); pe = loop_preheader_edge (loop); - new_bb = bsi_insert_on_edge_immediate (pe, new_stmt_list); - gcc_assert (!new_bb); + if (new_stmt_list) + { + new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list); + gcc_assert (!new_bb); + } + *initial_address = new_temp; /* Create: p = (vectype *) initial_base */ - vec_stmt = fold_convert (vect_ptr_type, new_temp); - vec_stmt = build_gimple_modify_stmt (vect_ptr, vec_stmt); + vec_stmt = gimple_build_assign (vect_ptr, + fold_convert (vect_ptr_type, new_temp)); vect_ptr_init = make_ssa_name (vect_ptr, vec_stmt); - GIMPLE_STMT_OPERAND (vec_stmt, 0) = vect_ptr_init; - new_bb = bsi_insert_on_edge_immediate (pe, vec_stmt); + gimple_assign_set_lhs (vec_stmt, vect_ptr_init); + new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt); gcc_assert (!new_bb); - /** (4) Handle the updating of the vector-pointer inside the loop: **/ + /** (4) Handle the updating of the vector-pointer inside the loop. + This is needed when ONLY_INIT is false, and also when AT_LOOP + is the inner-loop nested in LOOP (during outer-loop vectorization). + **/ - if (only_init) /* No update in loop is required. */ + if (only_init && at_loop == loop) /* No update in loop is required. */ { /* Copy the points-to information if it exists. */ if (DR_PTR_INFO (dr)) duplicate_ssa_name_ptr_info (vect_ptr_init, DR_PTR_INFO (dr)); - return vect_ptr_init; + vptr = vect_ptr_init; } else { - block_stmt_iterator incr_bsi; - bool insert_after; - tree indx_before_incr, indx_after_incr; - tree incr; + /* The step of the vector pointer is the Vector Size. */ + tree step = TYPE_SIZE_UNIT (vectype); + /* One exception to the above is when the scalar step of the load in + LOOP is zero. In this case the step here is also zero. */ + if (*inv_p) + step = size_zero_node; + + standard_iv_increment_position (loop, &incr_gsi, &insert_after); - standard_iv_increment_position (loop, &incr_bsi, &insert_after); create_iv (vect_ptr_init, - fold_convert (vect_ptr_type, TYPE_SIZE_UNIT (vectype)), - NULL_TREE, loop, &incr_bsi, insert_after, + fold_convert (vect_ptr_type, step), + vect_ptr, loop, &incr_gsi, insert_after, &indx_before_incr, &indx_after_incr); - incr = bsi_stmt (incr_bsi); - set_stmt_info (stmt_ann (incr), - new_stmt_vec_info (incr, loop_vinfo)); + incr = gsi_stmt (incr_gsi); + set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo)); + + /* Copy the points-to information if it exists. */ + if (DR_PTR_INFO (dr)) + { + duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr)); + duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr)); + } + merge_alias_info (vect_ptr_init, indx_before_incr); + merge_alias_info (vect_ptr_init, indx_after_incr); + if (ptr_incr) + *ptr_incr = incr; + + vptr = indx_before_incr; + } + + if (!nested_in_vect_loop || only_init) + return vptr; + + + /** (5) Handle the updating of the vector-pointer inside the inner-loop + nested in LOOP, if exists: **/ + + gcc_assert (nested_in_vect_loop); + if (!only_init) + { + standard_iv_increment_position (containing_loop, &incr_gsi, + &insert_after); + create_iv (vptr, fold_convert (vect_ptr_type, DR_STEP (dr)), vect_ptr, + containing_loop, &incr_gsi, insert_after, &indx_before_incr, + &indx_after_incr); + incr = gsi_stmt (incr_gsi); + set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo)); /* Copy the points-to information if it exists. */ if (DR_PTR_INFO (dr)) @@ -851,15 +1234,19 @@ vect_create_data_ref_ptr (tree stmt, if (ptr_incr) *ptr_incr = incr; - return indx_before_incr; + return indx_before_incr; } + else + gcc_unreachable (); } /* Function bump_vector_ptr - Increment a pointer (to a vector type) by vector-size. Connect the new - increment stmt to the existing def-use update-chain of the pointer. + Increment a pointer (to a vector type) by vector-size. If requested, + i.e. if PTR-INCR is given, then also connect the new increment stmt + to the existing def-use update-chain of the pointer, by modifying + the PTR_INCR as illustrated below: The pointer def-use update-chain before this function: DATAREF_PTR = phi (p_0, p_2) @@ -869,44 +1256,55 @@ vect_create_data_ref_ptr (tree stmt, The pointer def-use update-chain after this function: DATAREF_PTR = phi (p_0, p_2) .... - NEW_DATAREF_PTR = DATAREF_PTR + vector_size + NEW_DATAREF_PTR = DATAREF_PTR + BUMP .... PTR_INCR: p_2 = NEW_DATAREF_PTR + step Input: DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated in the loop. - PTR_INCR - the stmt that updates the pointer in each iteration of the loop. - The increment amount across iterations is also expected to be - vector_size. + PTR_INCR - optional. The stmt that updates the pointer in each iteration of + the loop. The increment amount across iterations is expected + to be vector_size. BSI - location where the new update stmt is to be placed. STMT - the original scalar memory-access stmt that is being vectorized. + BUMP - optional. The offset by which to bump the pointer. If not given, + the offset is assumed to be vector_size. Output: Return NEW_DATAREF_PTR as illustrated above. */ static tree -bump_vector_ptr (tree dataref_ptr, tree ptr_incr, block_stmt_iterator *bsi, - tree stmt) +bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi, + gimple stmt, tree bump) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); tree vectype = STMT_VINFO_VECTYPE (stmt_info); - tree vptr_type = TREE_TYPE (dataref_ptr); tree ptr_var = SSA_NAME_VAR (dataref_ptr); tree update = TYPE_SIZE_UNIT (vectype); - tree incr_stmt; + gimple incr_stmt; ssa_op_iter iter; use_operand_p use_p; tree new_dataref_ptr; - incr_stmt = build_gimple_modify_stmt (ptr_var, - build2 (POINTER_PLUS_EXPR, vptr_type, - dataref_ptr, update)); + if (bump) + update = bump; + + incr_stmt = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, ptr_var, + dataref_ptr, update); new_dataref_ptr = make_ssa_name (ptr_var, incr_stmt); - GIMPLE_STMT_OPERAND (incr_stmt, 0) = new_dataref_ptr; - vect_finish_stmt_generation (stmt, incr_stmt, bsi); + gimple_assign_set_lhs (incr_stmt, new_dataref_ptr); + vect_finish_stmt_generation (stmt, incr_stmt, gsi); + + /* Copy the points-to information if it exists. */ + if (DR_PTR_INFO (dr)) + duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr)); + merge_alias_info (new_dataref_ptr, dataref_ptr); + + if (!ptr_incr) + return new_dataref_ptr; /* Update the vector-pointer's cross-iteration increment. */ FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE) @@ -919,11 +1317,6 @@ bump_vector_ptr (tree dataref_ptr, tree ptr_incr, block_stmt_iterator *bsi, gcc_assert (tree_int_cst_compare (use, update) == 0); } - /* Copy the points-to information if it exists. */ - if (DR_PTR_INFO (dr)) - duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr)); - merge_alias_info (new_dataref_ptr, dataref_ptr); - return new_dataref_ptr; } @@ -958,17 +1351,18 @@ vect_create_destination_var (tree scalar_dest, tree vectype) /* Function vect_init_vector. Insert a new stmt (INIT_STMT) that initializes a new vector variable with - the vector elements of VECTOR_VAR. Return the DEF of INIT_STMT. It will be - used in the vectorization of STMT. */ + the vector elements of VECTOR_VAR. Place the initialization at BSI if it + is not NULL. Otherwise, place the initialization at the loop preheader. + Return the DEF of INIT_STMT. + It will be used in the vectorization of STMT. */ static tree -vect_init_vector (tree stmt, tree vector_var, tree vector_type) +vect_init_vector (gimple stmt, tree vector_var, tree vector_type, + gimple_stmt_iterator *gsi) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); - struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree new_var; - tree init_stmt; + gimple init_stmt; tree vec_oprnd; edge pe; tree new_temp; @@ -976,131 +1370,415 @@ vect_init_vector (tree stmt, tree vector_var, tree vector_type) new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_"); add_referenced_var (new_var); - - init_stmt = build_gimple_modify_stmt (new_var, vector_var); + init_stmt = gimple_build_assign (new_var, vector_var); new_temp = make_ssa_name (new_var, init_stmt); - GIMPLE_STMT_OPERAND (init_stmt, 0) = new_temp; + gimple_assign_set_lhs (init_stmt, new_temp); - pe = loop_preheader_edge (loop); - new_bb = bsi_insert_on_edge_immediate (pe, init_stmt); - gcc_assert (!new_bb); + if (gsi) + vect_finish_stmt_generation (stmt, init_stmt, gsi); + else + { + loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + + if (nested_in_vect_loop_p (loop, stmt)) + loop = loop->inner; + pe = loop_preheader_edge (loop); + new_bb = gsi_insert_on_edge_immediate (pe, init_stmt); + gcc_assert (!new_bb); + } if (vect_print_dump_info (REPORT_DETAILS)) { fprintf (vect_dump, "created new init_stmt: "); - print_generic_expr (vect_dump, init_stmt, TDF_SLIM); + print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM); } - vec_oprnd = GIMPLE_STMT_OPERAND (init_stmt, 0); + vec_oprnd = gimple_assign_lhs (init_stmt); return vec_oprnd; } -/* Function get_initial_def_for_induction +/* For constant and loop invariant defs of SLP_NODE this function returns + (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts. + OP_NUM determines if we gather defs for operand 0 or operand 1 of the scalar + stmts. NUMBER_OF_VECTORS is the number of vector defs to create. */ - Input: - IV_PHI - the initial value of the induction variable +static void +vect_get_constant_vectors (slp_tree slp_node, VEC(tree,heap) **vec_oprnds, + unsigned int op_num, unsigned int number_of_vectors) +{ + VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node); + gimple stmt = VEC_index (gimple, stmts, 0); + stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); + tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo); + int nunits; + tree vec_cst; + tree t = NULL_TREE; + int j, number_of_places_left_in_vector; + tree vector_type; + tree op, vop; + int group_size = VEC_length (gimple, stmts); + unsigned int vec_num, i; + int number_of_copies = 1; + VEC (tree, heap) *voprnds = VEC_alloc (tree, heap, number_of_vectors); + bool constant_p, is_store; - Output: - Return a vector variable, initialized with the first VF values of - the induction variable. E.g., for an iv with IV_PHI='X' and - evolution S, for a vector of 4 units, we want to return: - [X, X + S, X + 2*S, X + 3*S]. */ + if (STMT_VINFO_DATA_REF (stmt_vinfo)) + { + is_store = true; + op = gimple_assign_rhs1 (stmt); + } + else + { + is_store = false; + op = gimple_op (stmt, op_num + 1); + } -static tree -get_initial_def_for_induction (tree iv_phi) -{ - stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi); - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); - struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); - tree scalar_type = TREE_TYPE (PHI_RESULT_TREE (iv_phi)); - tree vectype = get_vectype_for_scalar_type (scalar_type); - int nunits = TYPE_VECTOR_SUBPARTS (vectype); - edge pe = loop_preheader_edge (loop); - basic_block new_bb; - block_stmt_iterator bsi; - tree vec, vec_init, vec_step, t; - tree access_fn; - tree new_var; - tree new_name; - tree init_stmt; - tree induction_phi, induc_def, new_stmt, vec_def, vec_dest; - tree init_expr, step_expr; - int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); - int i; - bool ok; - int ncopies = vf / nunits; - tree expr; - stmt_vec_info phi_info = vinfo_for_stmt (iv_phi); - tree stmts; - tree stmt = NULL_TREE; - block_stmt_iterator si; - basic_block bb = bb_for_stmt (iv_phi); + if (CONSTANT_CLASS_P (op)) + { + vector_type = vectype; + constant_p = true; + } + else + { + vector_type = get_vectype_for_scalar_type (TREE_TYPE (op)); + gcc_assert (vector_type); + constant_p = false; + } - gcc_assert (phi_info); - gcc_assert (ncopies >= 1); + nunits = TYPE_VECTOR_SUBPARTS (vector_type); - /* Find the first insertion point in the BB. */ - si = bsi_after_labels (bb); - stmt = bsi_stmt (si); + /* NUMBER_OF_COPIES is the number of times we need to use the same values in + created vectors. It is greater than 1 if unrolling is performed. - access_fn = analyze_scalar_evolution (loop, PHI_RESULT (iv_phi)); - gcc_assert (access_fn); - ok = vect_is_simple_iv_evolution (loop->num, access_fn, - &init_expr, &step_expr); - gcc_assert (ok); + For example, we have two scalar operands, s1 and s2 (e.g., group of + strided accesses of size two), while NUNITS is four (i.e., four scalars + of this type can be packed in a vector). The output vector will contain + two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES + will be 2). - /* Create the vector that holds the initial_value of the induction. */ - new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_"); - add_referenced_var (new_var); + If GROUP_SIZE > NUNITS, the scalars will be split into several vectors + containing the operands. - new_name = force_gimple_operand (init_expr, &stmts, false, new_var); - if (stmts) + For example, NUNITS is four as before, and the group size is 8 + (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and + {s5, s6, s7, s8}. */ + + number_of_copies = least_common_multiple (nunits, group_size) / group_size; + + number_of_places_left_in_vector = nunits; + for (j = 0; j < number_of_copies; j++) { - new_bb = bsi_insert_on_edge_immediate (pe, stmts); - gcc_assert (!new_bb); + for (i = group_size - 1; VEC_iterate (gimple, stmts, i, stmt); i--) + { + if (is_store) + op = gimple_assign_rhs1 (stmt); + else + op = gimple_op (stmt, op_num + 1); + + /* Create 'vect_ = {op0,op1,...,opn}'. */ + t = tree_cons (NULL_TREE, op, t); + + number_of_places_left_in_vector--; + + if (number_of_places_left_in_vector == 0) + { + number_of_places_left_in_vector = nunits; + + if (constant_p) + vec_cst = build_vector (vector_type, t); + else + vec_cst = build_constructor_from_list (vector_type, t); + VEC_quick_push (tree, voprnds, + vect_init_vector (stmt, vec_cst, vector_type, NULL)); + t = NULL_TREE; + } + } } - t = NULL_TREE; - t = tree_cons (NULL_TREE, new_name, t); - for (i = 1; i < nunits; i++) + /* Since the vectors are created in the reverse order, we should invert + them. */ + vec_num = VEC_length (tree, voprnds); + for (j = vec_num - 1; j >= 0; j--) { - tree tmp; + vop = VEC_index (tree, voprnds, j); + VEC_quick_push (tree, *vec_oprnds, vop); + } - /* Create: new_name = new_name + step_expr */ - tmp = fold_build2 (PLUS_EXPR, scalar_type, new_name, step_expr); - init_stmt = build_gimple_modify_stmt (new_var, tmp); - new_name = make_ssa_name (new_var, init_stmt); - GIMPLE_STMT_OPERAND (init_stmt, 0) = new_name; + VEC_free (tree, heap, voprnds); - new_bb = bsi_insert_on_edge_immediate (pe, init_stmt); - gcc_assert (!new_bb); + /* In case that VF is greater than the unrolling factor needed for the SLP + group of stmts, NUMBER_OF_VECTORS to be created is greater than + NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have + to replicate the vectors. */ + while (number_of_vectors > VEC_length (tree, *vec_oprnds)) + { + for (i = 0; VEC_iterate (tree, *vec_oprnds, i, vop) && i < vec_num; i++) + VEC_quick_push (tree, *vec_oprnds, vop); + } +} - if (vect_print_dump_info (REPORT_DETAILS)) + +/* Get vectorized definitions from SLP_NODE that contains corresponding + vectorized def-stmts. */ + +static void +vect_get_slp_vect_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds) +{ + tree vec_oprnd; + gimple vec_def_stmt; + unsigned int i; + + gcc_assert (SLP_TREE_VEC_STMTS (slp_node)); + + for (i = 0; + VEC_iterate (gimple, SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt); + i++) + { + gcc_assert (vec_def_stmt); + vec_oprnd = gimple_get_lhs (vec_def_stmt); + VEC_quick_push (tree, *vec_oprnds, vec_oprnd); + } +} + + +/* Get vectorized definitions for SLP_NODE. + If the scalar definitions are loop invariants or constants, collect them and + call vect_get_constant_vectors() to create vector stmts. + Otherwise, the def-stmts must be already vectorized and the vectorized stmts + must be stored in the LEFT/RIGHT node of SLP_NODE, and we call + vect_get_slp_vect_defs() to retrieve them. + If VEC_OPRNDS1 is NULL, don't get vector defs for the second operand (from + the right node. This is used when the second operand must remain scalar. */ + +static void +vect_get_slp_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds0, + VEC (tree,heap) **vec_oprnds1) +{ + gimple first_stmt; + enum tree_code code; + int number_of_vects; + HOST_WIDE_INT lhs_size_unit, rhs_size_unit; + + first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0); + /* The number of vector defs is determined by the number of vector statements + in the node from which we get those statements. */ + if (SLP_TREE_LEFT (slp_node)) + number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_LEFT (slp_node)); + else + { + number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + /* Number of vector stmts was calculated according to LHS in + vect_schedule_slp_instance(), fix it by replacing LHS with RHS, if + necessary. See vect_get_smallest_scalar_type() for details. */ + vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit, + &rhs_size_unit); + if (rhs_size_unit != lhs_size_unit) { - fprintf (vect_dump, "created new init_stmt: "); - print_generic_expr (vect_dump, init_stmt, TDF_SLIM); + number_of_vects *= rhs_size_unit; + number_of_vects /= lhs_size_unit; } - t = tree_cons (NULL_TREE, new_name, t); } - vec = build_constructor_from_list (vectype, nreverse (t)); - vec_init = vect_init_vector (stmt, vec, vectype); + + /* Allocate memory for vectorized defs. */ + *vec_oprnds0 = VEC_alloc (tree, heap, number_of_vects); + + /* SLP_NODE corresponds either to a group of stores or to a group of + unary/binary operations. We don't call this function for loads. */ + if (SLP_TREE_LEFT (slp_node)) + /* The defs are already vectorized. */ + vect_get_slp_vect_defs (SLP_TREE_LEFT (slp_node), vec_oprnds0); + else + /* Build vectors from scalar defs. */ + vect_get_constant_vectors (slp_node, vec_oprnds0, 0, number_of_vects); + + if (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt))) + /* Since we don't call this function with loads, this is a group of + stores. */ + return; + + code = gimple_assign_rhs_code (first_stmt); + if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS || !vec_oprnds1) + return; + + /* The number of vector defs is determined by the number of vector statements + in the node from which we get those statements. */ + if (SLP_TREE_RIGHT (slp_node)) + number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_RIGHT (slp_node)); + else + number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + + *vec_oprnds1 = VEC_alloc (tree, heap, number_of_vects); + + if (SLP_TREE_RIGHT (slp_node)) + /* The defs are already vectorized. */ + vect_get_slp_vect_defs (SLP_TREE_RIGHT (slp_node), vec_oprnds1); + else + /* Build vectors from scalar defs. */ + vect_get_constant_vectors (slp_node, vec_oprnds1, 1, number_of_vects); +} + + +/* Function get_initial_def_for_induction + + Input: + STMT - a stmt that performs an induction operation in the loop. + IV_PHI - the initial value of the induction variable + + Output: + Return a vector variable, initialized with the first VF values of + the induction variable. E.g., for an iv with IV_PHI='X' and + evolution S, for a vector of 4 units, we want to return: + [X, X + S, X + 2*S, X + 3*S]. */ + +static tree +get_initial_def_for_induction (gimple iv_phi) +{ + stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi); + loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + tree scalar_type = TREE_TYPE (gimple_phi_result (iv_phi)); + tree vectype; + int nunits; + edge pe = loop_preheader_edge (loop); + struct loop *iv_loop; + basic_block new_bb; + tree vec, vec_init, vec_step, t; + tree access_fn; + tree new_var; + tree new_name; + gimple init_stmt, induction_phi, new_stmt; + tree induc_def, vec_def, vec_dest; + tree init_expr, step_expr; + int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); + int i; + bool ok; + int ncopies; + tree expr; + stmt_vec_info phi_info = vinfo_for_stmt (iv_phi); + bool nested_in_vect_loop = false; + gimple_seq stmts = NULL; + imm_use_iterator imm_iter; + use_operand_p use_p; + gimple exit_phi; + edge latch_e; + tree loop_arg; + gimple_stmt_iterator si; + basic_block bb = gimple_bb (iv_phi); + + vectype = get_vectype_for_scalar_type (scalar_type); + gcc_assert (vectype); + nunits = TYPE_VECTOR_SUBPARTS (vectype); + ncopies = vf / nunits; + + gcc_assert (phi_info); + gcc_assert (ncopies >= 1); + + /* Find the first insertion point in the BB. */ + si = gsi_after_labels (bb); + + if (INTEGRAL_TYPE_P (scalar_type) || POINTER_TYPE_P (scalar_type)) + step_expr = build_int_cst (scalar_type, 0); + else + step_expr = build_real (scalar_type, dconst0); + + /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */ + if (nested_in_vect_loop_p (loop, iv_phi)) + { + nested_in_vect_loop = true; + iv_loop = loop->inner; + } + else + iv_loop = loop; + gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father); + + latch_e = loop_latch_edge (iv_loop); + loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e); + + access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi)); + gcc_assert (access_fn); + ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn, + &init_expr, &step_expr); + gcc_assert (ok); + pe = loop_preheader_edge (iv_loop); + + /* Create the vector that holds the initial_value of the induction. */ + if (nested_in_vect_loop) + { + /* iv_loop is nested in the loop to be vectorized. init_expr had already + been created during vectorization of previous stmts; We obtain it from + the STMT_VINFO_VEC_STMT of the defining stmt. */ + tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi, loop_preheader_edge (iv_loop)); + vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL); + } + else + { + /* iv_loop is the loop to be vectorized. Create: + vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */ + new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_"); + add_referenced_var (new_var); + + new_name = force_gimple_operand (init_expr, &stmts, false, new_var); + if (stmts) + { + new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); + gcc_assert (!new_bb); + } + + t = NULL_TREE; + t = tree_cons (NULL_TREE, init_expr, t); + for (i = 1; i < nunits; i++) + { + /* Create: new_name_i = new_name + step_expr */ + enum tree_code code = POINTER_TYPE_P (scalar_type) + ? POINTER_PLUS_EXPR : PLUS_EXPR; + init_stmt = gimple_build_assign_with_ops (code, new_var, + new_name, step_expr); + new_name = make_ssa_name (new_var, init_stmt); + gimple_assign_set_lhs (init_stmt, new_name); + + new_bb = gsi_insert_on_edge_immediate (pe, init_stmt); + gcc_assert (!new_bb); + + if (vect_print_dump_info (REPORT_DETAILS)) + { + fprintf (vect_dump, "created new init_stmt: "); + print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM); + } + t = tree_cons (NULL_TREE, new_name, t); + } + /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */ + vec = build_constructor_from_list (vectype, nreverse (t)); + vec_init = vect_init_vector (iv_phi, vec, vectype, NULL); + } /* Create the vector that holds the step of the induction. */ - expr = build_int_cst (scalar_type, vf); - new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr); + if (nested_in_vect_loop) + /* iv_loop is nested in the loop to be vectorized. Generate: + vec_step = [S, S, S, S] */ + new_name = step_expr; + else + { + /* iv_loop is the loop to be vectorized. Generate: + vec_step = [VF*S, VF*S, VF*S, VF*S] */ + expr = build_int_cst (scalar_type, vf); + new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr); + } + t = NULL_TREE; for (i = 0; i < nunits; i++) t = tree_cons (NULL_TREE, unshare_expr (new_name), t); - vec = build_constructor_from_list (vectype, t); - vec_step = vect_init_vector (stmt, vec, vectype); + gcc_assert (CONSTANT_CLASS_P (new_name)); + vec = build_vector (vectype, t); + vec_step = vect_init_vector (iv_phi, vec, vectype, NULL); /* Create the following def-use cycle: loop prolog: - vec_init = [X, X+S, X+2*S, X+3*S] - vec_step = [VF*S, VF*S, VF*S, VF*S] + vec_init = ... + vec_step = ... loop: vec_iv = PHI ... @@ -1111,26 +1789,25 @@ get_initial_def_for_induction (tree iv_phi) /* Create the induction-phi that defines the induction-operand. */ vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"); add_referenced_var (vec_dest); - induction_phi = create_phi_node (vec_dest, loop->header); - set_stmt_info (get_stmt_ann (induction_phi), - new_stmt_vec_info (induction_phi, loop_vinfo)); + induction_phi = create_phi_node (vec_dest, iv_loop->header); + set_vinfo_for_stmt (induction_phi, + new_stmt_vec_info (induction_phi, loop_vinfo)); induc_def = PHI_RESULT (induction_phi); /* Create the iv update inside the loop */ - new_stmt = build_gimple_modify_stmt (NULL_TREE, - build2 (PLUS_EXPR, vectype, - induc_def, vec_step)); + new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest, + induc_def, vec_step); vec_def = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = vec_def; - bsi = bsi_for_stmt (stmt); - vect_finish_stmt_generation (stmt, new_stmt, &bsi); + gimple_assign_set_lhs (new_stmt, vec_def); + gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); + set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); /* Set the arguments of the phi node: */ - add_phi_arg (induction_phi, vec_init, loop_preheader_edge (loop)); - add_phi_arg (induction_phi, vec_def, loop_latch_edge (loop)); + add_phi_arg (induction_phi, vec_init, pe); + add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop)); - /* In case the vectorization factor (VF) is bigger than the number + /* In case that vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation @@ -1139,6 +1816,8 @@ get_initial_def_for_induction (tree iv_phi) if (ncopies > 1) { stmt_vec_info prev_stmt_vinfo; + /* FORNOW. This restriction should be relaxed. */ + gcc_assert (!nested_in_vect_loop); /* Create the vector that holds the step of the induction. */ expr = build_int_cst (scalar_type, nunits); @@ -1146,34 +1825,65 @@ get_initial_def_for_induction (tree iv_phi) t = NULL_TREE; for (i = 0; i < nunits; i++) t = tree_cons (NULL_TREE, unshare_expr (new_name), t); - vec = build_constructor_from_list (vectype, t); - vec_step = vect_init_vector (stmt, vec, vectype); + gcc_assert (CONSTANT_CLASS_P (new_name)); + vec = build_vector (vectype, t); + vec_step = vect_init_vector (iv_phi, vec, vectype, NULL); vec_def = induc_def; prev_stmt_vinfo = vinfo_for_stmt (induction_phi); for (i = 1; i < ncopies; i++) { - tree tmp; - - /* vec_i = vec_prev + vec_{step*nunits} */ - tmp = build2 (PLUS_EXPR, vectype, vec_def, vec_step); - new_stmt = build_gimple_modify_stmt (NULL_TREE, tmp); + /* vec_i = vec_prev + vec_step */ + new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest, + vec_def, vec_step); vec_def = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = vec_def; - bsi = bsi_for_stmt (stmt); - vect_finish_stmt_generation (stmt, new_stmt, &bsi); + gimple_assign_set_lhs (new_stmt, vec_def); + gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); + set_vinfo_for_stmt (new_stmt, + new_stmt_vec_info (new_stmt, loop_vinfo)); STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt; prev_stmt_vinfo = vinfo_for_stmt (new_stmt); } } + if (nested_in_vect_loop) + { + /* Find the loop-closed exit-phi of the induction, and record + the final vector of induction results: */ + exit_phi = NULL; + FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg) + { + if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p)))) + { + exit_phi = USE_STMT (use_p); + break; + } + } + if (exit_phi) + { + stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi); + /* FORNOW. Currently not supporting the case that an inner-loop induction + is not used in the outer-loop (i.e. only outside the outer-loop). */ + gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo) + && !STMT_VINFO_LIVE_P (stmt_vinfo)); + + STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt; + if (vect_print_dump_info (REPORT_DETAILS)) + { + fprintf (vect_dump, "vector of inductions after inner-loop:"); + print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM); + } + } + } + + if (vect_print_dump_info (REPORT_DETAILS)) { - fprintf (vect_dump, "transform induction: created def-use cycle:"); - print_generic_expr (vect_dump, induction_phi, TDF_SLIM); + fprintf (vect_dump, "transform induction: created def-use cycle: "); + print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM); fprintf (vect_dump, "\n"); - print_generic_expr (vect_dump, SSA_NAME_DEF_STMT (vec_def), TDF_SLIM); + print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM); } STMT_VINFO_VEC_STMT (phi_info) = induction_phi; @@ -1193,17 +1903,16 @@ get_initial_def_for_induction (tree iv_phi) needs to be introduced. */ static tree -vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def) +vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def) { tree vec_oprnd; - tree vec_stmt; - tree def_stmt; + gimple vec_stmt; + gimple def_stmt; stmt_vec_info def_stmt_info = NULL; stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo); - int nunits = TYPE_VECTOR_SUBPARTS (vectype); + unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); - struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vec_inv; tree vec_cst; tree t = NULL_TREE; @@ -1231,7 +1940,7 @@ vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def) if (def_stmt) { fprintf (vect_dump, " def_stmt = "); - print_generic_expr (vect_dump, def_stmt, TDF_SLIM); + print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM); } } @@ -1251,15 +1960,17 @@ vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def) { t = tree_cons (NULL_TREE, op, t); } - vector_type = get_vectype_for_scalar_type (TREE_TYPE (op)); - vec_cst = build_vector (vector_type, t); - - return vect_init_vector (stmt, vec_cst, vector_type); + vec_cst = build_vector (vectype, t); + return vect_init_vector (stmt, vec_cst, vectype, NULL); } /* Case 2: operand is defined outside the loop - loop invariant. */ case vect_invariant_def: { + vector_type = get_vectype_for_scalar_type (TREE_TYPE (def)); + gcc_assert (vector_type); + nunits = TYPE_VECTOR_SUBPARTS (vector_type); + if (scalar_def) *scalar_def = def; @@ -1273,29 +1984,36 @@ vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def) } /* FIXME: use build_constructor directly. */ - vector_type = get_vectype_for_scalar_type (TREE_TYPE (def)); vec_inv = build_constructor_from_list (vector_type, t); - return vect_init_vector (stmt, vec_inv, vector_type); + return vect_init_vector (stmt, vec_inv, vector_type, NULL); } /* Case 3: operand is defined inside the loop. */ case vect_loop_def: { if (scalar_def) - *scalar_def = def_stmt; + *scalar_def = NULL/* FIXME tuples: def_stmt*/; /* Get the def from the vectorized stmt. */ def_stmt_info = vinfo_for_stmt (def_stmt); vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info); gcc_assert (vec_stmt); - vec_oprnd = GIMPLE_STMT_OPERAND (vec_stmt, 0); + if (gimple_code (vec_stmt) == GIMPLE_PHI) + vec_oprnd = PHI_RESULT (vec_stmt); + else if (is_gimple_call (vec_stmt)) + vec_oprnd = gimple_call_lhs (vec_stmt); + else + vec_oprnd = gimple_assign_lhs (vec_stmt); return vec_oprnd; } /* Case 4: operand is defined by a loop header phi - reduction */ case vect_reduction_def: { - gcc_assert (TREE_CODE (def_stmt) == PHI_NODE); + struct loop *loop; + + gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI); + loop = (gimple_bb (def_stmt))->loop_father; /* Get the def before the loop */ op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop)); @@ -1305,10 +2023,14 @@ vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def) /* Case 5: operand is defined by loop-header phi - induction. */ case vect_induction_def: { - gcc_assert (TREE_CODE (def_stmt) == PHI_NODE); + gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI); - /* Get the def before the loop */ - return get_initial_def_for_induction (def_stmt); + /* Get the def from the vectorized stmt. */ + def_stmt_info = vinfo_for_stmt (def_stmt); + vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info); + gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI); + vec_oprnd = PHI_RESULT (vec_stmt); + return vec_oprnd; } default: @@ -1376,7 +2098,7 @@ vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def) static tree vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd) { - tree vec_stmt_for_operand; + gimple vec_stmt_for_operand; stmt_vec_info def_stmt_info; /* Do nothing; can reuse same def. */ @@ -1388,41 +2110,88 @@ vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd) gcc_assert (def_stmt_info); vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info); gcc_assert (vec_stmt_for_operand); - vec_oprnd = GIMPLE_STMT_OPERAND (vec_stmt_for_operand, 0); - + vec_oprnd = gimple_get_lhs (vec_stmt_for_operand); + if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI) + vec_oprnd = PHI_RESULT (vec_stmt_for_operand); + else + vec_oprnd = gimple_get_lhs (vec_stmt_for_operand); return vec_oprnd; } +/* Get vectorized definitions for the operands to create a copy of an original + stmt. See vect_get_vec_def_for_stmt_copy() for details. */ + +static void +vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt, + VEC(tree,heap) **vec_oprnds0, + VEC(tree,heap) **vec_oprnds1) +{ + tree vec_oprnd = VEC_pop (tree, *vec_oprnds0); + + vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd); + VEC_quick_push (tree, *vec_oprnds0, vec_oprnd); + + if (vec_oprnds1 && *vec_oprnds1) + { + vec_oprnd = VEC_pop (tree, *vec_oprnds1); + vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd); + VEC_quick_push (tree, *vec_oprnds1, vec_oprnd); + } +} + + +/* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */ + +static void +vect_get_vec_defs (tree op0, tree op1, gimple stmt, + VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1, + slp_tree slp_node) +{ + if (slp_node) + vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1); + else + { + tree vec_oprnd; + + *vec_oprnds0 = VEC_alloc (tree, heap, 1); + vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL); + VEC_quick_push (tree, *vec_oprnds0, vec_oprnd); + + if (op1) + { + *vec_oprnds1 = VEC_alloc (tree, heap, 1); + vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL); + VEC_quick_push (tree, *vec_oprnds1, vec_oprnd); + } + } +} + + /* Function vect_finish_stmt_generation. Insert a new stmt. */ static void -vect_finish_stmt_generation (tree stmt, tree vec_stmt, - block_stmt_iterator *bsi) +vect_finish_stmt_generation (gimple stmt, gimple vec_stmt, + gimple_stmt_iterator *gsi) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); - bsi_insert_before (bsi, vec_stmt, BSI_SAME_STMT); - set_stmt_info (get_stmt_ann (vec_stmt), - new_stmt_vec_info (vec_stmt, loop_vinfo)); + gcc_assert (gimple_code (stmt) != GIMPLE_LABEL); + + gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT); + + set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo)); if (vect_print_dump_info (REPORT_DETAILS)) { fprintf (vect_dump, "add new stmt: "); - print_generic_expr (vect_dump, vec_stmt, TDF_SLIM); + print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM); } - /* Make sure bsi points to the stmt that is being vectorized. */ - gcc_assert (stmt == bsi_stmt (*bsi)); - -#ifdef USE_MAPPED_LOCATION - SET_EXPR_LOCATION (vec_stmt, EXPR_LOCATION (stmt)); -#else - SET_EXPR_LOCUS (vec_stmt, EXPR_LOCUS (stmt)); -#endif + gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi))); } @@ -1470,21 +2239,29 @@ vect_finish_stmt_generation (tree stmt, tree vec_stmt, A cost model should help decide between these two schemes. */ static tree -get_initial_def_for_reduction (tree stmt, tree init_val, tree *adjustment_def) +get_initial_def_for_reduction (gimple stmt, tree init_val, tree *adjustment_def) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); + loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo); int nunits = TYPE_VECTOR_SUBPARTS (vectype); - enum tree_code code = TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1)); + tree scalar_type = TREE_TYPE (vectype); + enum tree_code code = gimple_assign_rhs_code (stmt); tree type = TREE_TYPE (init_val); tree vecdef; tree def_for_init; tree init_def; tree t = NULL_TREE; int i; - tree vector_type; + bool nested_in_vect_loop = false; + + gcc_assert (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type)); + if (nested_in_vect_loop_p (loop, stmt)) + nested_in_vect_loop = true; + else + gcc_assert (loop == (gimple_bb (stmt))->loop_father); - gcc_assert (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type)); vecdef = vect_get_vec_def_for_operand (init_val, stmt, NULL); switch (code) @@ -1492,16 +2269,19 @@ get_initial_def_for_reduction (tree stmt, tree init_val, tree *adjustment_def) case WIDEN_SUM_EXPR: case DOT_PROD_EXPR: case PLUS_EXPR: - *adjustment_def = init_val; + if (nested_in_vect_loop) + *adjustment_def = vecdef; + else + *adjustment_def = init_val; /* Create a vector of zeros for init_def. */ - if (INTEGRAL_TYPE_P (type)) - def_for_init = build_int_cst (type, 0); + if (SCALAR_FLOAT_TYPE_P (scalar_type)) + def_for_init = build_real (scalar_type, dconst0); else - def_for_init = build_real (type, dconst0); - for (i = nunits - 1; i >= 0; --i) - t = tree_cons (NULL_TREE, def_for_init, t); - vector_type = get_vectype_for_scalar_type (TREE_TYPE (def_for_init)); - init_def = build_vector (vector_type, t); + def_for_init = build_int_cst (scalar_type, 0); + + for (i = nunits - 1; i >= 0; --i) + t = tree_cons (NULL_TREE, def_for_init, t); + init_def = build_vector (vectype, t); break; case MIN_EXPR: @@ -1525,6 +2305,11 @@ get_initial_def_for_reduction (tree stmt, tree init_val, tree *adjustment_def) VECT_DEF is a vector of partial results. REDUC_CODE is the tree-code for the epilog reduction. + NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the + number of elements that we can fit in a vectype (nunits). In this case + we have to generate more than one vector stmt - i.e - we need to "unroll" + the vector stmt by a factor VF/nunits. For more details see documentation + in vectorizable_operation. STMT is the scalar reduction stmt that is being vectorized. REDUCTION_PHI is the phi-node that carries the reduction computation. @@ -1567,10 +2352,13 @@ get_initial_def_for_reduction (tree stmt, tree init_val, tree *adjustment_def) */ static void -vect_create_epilog_for_reduction (tree vect_def, tree stmt, - enum tree_code reduc_code, tree reduction_phi) +vect_create_epilog_for_reduction (tree vect_def, gimple stmt, + int ncopies, + enum tree_code reduc_code, + gimple reduction_phi) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); + stmt_vec_info prev_phi_info; tree vectype; enum machine_mode mode; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); @@ -1578,53 +2366,86 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, basic_block exit_bb; tree scalar_dest; tree scalar_type; - tree new_phi; - block_stmt_iterator exit_bsi; + gimple new_phi = NULL, phi; + gimple_stmt_iterator exit_gsi; tree vec_dest; - tree new_temp; + tree new_temp = NULL_TREE; tree new_name; - tree epilog_stmt; - tree new_scalar_dest, exit_phi; + gimple epilog_stmt = NULL; + tree new_scalar_dest, new_dest; + gimple exit_phi; tree bitsize, bitpos, bytesize; - enum tree_code code = TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1)); - tree scalar_initial_def; - tree vec_initial_def; + enum tree_code code = gimple_assign_rhs_code (stmt); + tree adjustment_def; + tree vec_initial_def, def; tree orig_name; imm_use_iterator imm_iter; use_operand_p use_p; - bool extract_scalar_result; - tree reduction_op; - tree orig_stmt; - tree use_stmt; - tree operation = GIMPLE_STMT_OPERAND (stmt, 1); - int op_type; + bool extract_scalar_result = false; + tree reduction_op, expr; + gimple orig_stmt; + gimple use_stmt; + bool nested_in_vect_loop = false; + VEC(gimple,heap) *phis = NULL; + enum vect_def_type dt = vect_unknown_def_type; + int j, i; + + if (nested_in_vect_loop_p (loop, stmt)) + { + loop = loop->inner; + nested_in_vect_loop = true; + } - op_type = TREE_OPERAND_LENGTH (operation); - reduction_op = TREE_OPERAND (operation, op_type-1); + switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) + { + case GIMPLE_SINGLE_RHS: + gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op); + reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2); + break; + case GIMPLE_UNARY_RHS: + reduction_op = gimple_assign_rhs1 (stmt); + break; + case GIMPLE_BINARY_RHS: + reduction_op = gimple_assign_rhs2 (stmt); + break; + default: + gcc_unreachable (); + } + vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op)); + gcc_assert (vectype); mode = TYPE_MODE (vectype); /*** 1. Create the reduction def-use cycle ***/ - /* 1.1 set the loop-entry arg of the reduction-phi: */ /* For the case of reduction, vect_get_vec_def_for_operand returns the scalar def before the loop, that defines the initial value of the reduction variable. */ vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt, - &scalar_initial_def); - add_phi_arg (reduction_phi, vec_initial_def, loop_preheader_edge (loop)); + &adjustment_def); - /* 1.2 set the loop-latch arg for the reduction-phi: */ - add_phi_arg (reduction_phi, vect_def, loop_latch_edge (loop)); - - if (vect_print_dump_info (REPORT_DETAILS)) + phi = reduction_phi; + def = vect_def; + for (j = 0; j < ncopies; j++) { - fprintf (vect_dump, "transform reduction: created def-use cycle:"); - print_generic_expr (vect_dump, reduction_phi, TDF_SLIM); - fprintf (vect_dump, "\n"); - print_generic_expr (vect_dump, SSA_NAME_DEF_STMT (vect_def), TDF_SLIM); - } + /* 1.1 set the loop-entry arg of the reduction-phi: */ + add_phi_arg (phi, vec_initial_def, loop_preheader_edge (loop)); + + /* 1.2 set the loop-latch arg for the reduction-phi: */ + if (j > 0) + def = vect_get_vec_def_for_stmt_copy (dt, def); + add_phi_arg (phi, def, loop_latch_edge (loop)); + if (vect_print_dump_info (REPORT_DETAILS)) + { + fprintf (vect_dump, "transform reduction: created def-use cycle: "); + print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM); + fprintf (vect_dump, "\n"); + print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0, TDF_SLIM); + } + + phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)); + } /*** 2. Create epilog code The reduction epilog code operates across the elements of the vector @@ -1648,7 +2469,7 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, s_out3 = extract_field # step 2 s_out4 = adjust_result # step 3 - (step 3 is optional, and step2 1 and 2 may be combined). + (step 3 is optional, and steps 1 and 2 may be combined). Lastly, the uses of s_out0 are replaced by s_out4. ***/ @@ -1657,9 +2478,23 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, v_out1 = phi */ exit_bb = single_exit (loop)->dest; - new_phi = create_phi_node (SSA_NAME_VAR (vect_def), exit_bb); - SET_PHI_ARG_DEF (new_phi, single_exit (loop)->dest_idx, vect_def); - exit_bsi = bsi_after_labels (exit_bb); + def = vect_def; + prev_phi_info = NULL; + for (j = 0; j < ncopies; j++) + { + phi = create_phi_node (SSA_NAME_VAR (vect_def), exit_bb); + set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo)); + if (j == 0) + new_phi = phi; + else + { + def = vect_get_vec_def_for_stmt_copy (dt, def); + STMT_VINFO_RELATED_STMT (prev_phi_info) = phi; + } + SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def); + prev_phi_info = vinfo_for_stmt (phi); + } + exit_gsi = gsi_after_labels (exit_bb); /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3 (i.e. when reduc_code is not available) and in the final adjustment @@ -1683,13 +2518,25 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo)); gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt); } - code = TREE_CODE (GIMPLE_STMT_OPERAND (orig_stmt, 1)); - scalar_dest = GIMPLE_STMT_OPERAND (orig_stmt, 0); + code = gimple_assign_rhs_code (orig_stmt); + scalar_dest = gimple_assign_lhs (orig_stmt); scalar_type = TREE_TYPE (scalar_dest); new_scalar_dest = vect_create_destination_var (scalar_dest, NULL); bitsize = TYPE_SIZE (scalar_type); bytesize = TYPE_SIZE_UNIT (scalar_type); + + /* In case this is a reduction in an inner-loop while vectorizing an outer + loop - we don't need to extract a single scalar result at the end of the + inner-loop. The final vector of partial results will be used in the + vectorized outer-loop, or reduced to a scalar result at the end of the + outer-loop. */ + if (nested_in_vect_loop) + goto vect_finalize_reduction; + + /* FORNOW */ + gcc_assert (ncopies == 1); + /* 2.3 Create the reduction code, using one of the three schemes described above. */ @@ -1705,10 +2552,10 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, vec_dest = vect_create_destination_var (scalar_dest, vectype); tmp = build1 (reduc_code, vectype, PHI_RESULT (new_phi)); - epilog_stmt = build_gimple_modify_stmt (vec_dest, tmp); + epilog_stmt = gimple_build_assign (vec_dest, tmp); new_temp = make_ssa_name (vec_dest, epilog_stmt); - GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp; - bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT); + gimple_assign_set_lhs (epilog_stmt, new_temp); + gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); extract_scalar_result = true; } @@ -1721,7 +2568,7 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1); tree vec_temp; - if (vec_shr_optab->handlers[mode].insn_code != CODE_FOR_nothing) + if (optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing) shift_code = VEC_RSHIFT_EXPR; else have_whole_vector_shift = false; @@ -1736,8 +2583,8 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, have_whole_vector_shift = false; else { - optab optab = optab_for_tree_code (code, vectype); - if (optab->handlers[mode].insn_code == CODE_FOR_nothing) + optab optab = optab_for_tree_code (code, vectype, optab_default); + if (optab_handler (optab, mode)->insn_code == CODE_FOR_nothing) have_whole_vector_shift = false; } @@ -1761,17 +2608,17 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, bit_offset /= 2) { tree bitpos = size_int (bit_offset); - tree tmp = build2 (shift_code, vectype, new_temp, bitpos); - epilog_stmt = build_gimple_modify_stmt (vec_dest, tmp); + epilog_stmt = gimple_build_assign_with_ops (shift_code, vec_dest, + new_temp, bitpos); new_name = make_ssa_name (vec_dest, epilog_stmt); - GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_name; - bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT); + gimple_assign_set_lhs (epilog_stmt, new_name); + gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); - tmp = build2 (code, vectype, new_name, new_temp); - epilog_stmt = build_gimple_modify_stmt (vec_dest, tmp); + epilog_stmt = gimple_build_assign_with_ops (code, vec_dest, + new_name, new_temp); new_temp = make_ssa_name (vec_dest, epilog_stmt); - GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp; - bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT); + gimple_assign_set_lhs (epilog_stmt, new_temp); + gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } extract_scalar_result = true; @@ -1797,32 +2644,30 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1); rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize, bitsize_zero_node); - BIT_FIELD_REF_UNSIGNED (rhs) = TYPE_UNSIGNED (scalar_type); - epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, rhs); + epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); - GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp; - bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT); + gimple_assign_set_lhs (epilog_stmt, new_temp); + gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); for (bit_offset = element_bitsize; bit_offset < vec_size_in_bits; bit_offset += element_bitsize) { - tree tmp; tree bitpos = bitsize_int (bit_offset); tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize, bitpos); - BIT_FIELD_REF_UNSIGNED (rhs) = TYPE_UNSIGNED (scalar_type); - epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, rhs); + epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_name = make_ssa_name (new_scalar_dest, epilog_stmt); - GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_name; - bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT); + gimple_assign_set_lhs (epilog_stmt, new_name); + gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); - tmp = build2 (code, scalar_type, new_name, new_temp); - epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, tmp); + epilog_stmt = gimple_build_assign_with_ops (code, + new_scalar_dest, + new_name, new_temp); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); - GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp; - bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT); + gimple_assign_set_lhs (epilog_stmt, new_temp); + gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } extract_scalar_result = false; @@ -1836,6 +2681,7 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, { tree rhs; + gcc_assert (!nested_in_vect_loop); if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "extract scalar result"); @@ -1847,50 +2693,88 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, bitpos = bitsize_zero_node; rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos); - BIT_FIELD_REF_UNSIGNED (rhs) = TYPE_UNSIGNED (scalar_type); - epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, rhs); + epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); - GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp; - bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT); + gimple_assign_set_lhs (epilog_stmt, new_temp); + gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } - /* 2.4 Adjust the final result by the initial value of the reduction +vect_finalize_reduction: + + /* 2.5 Adjust the final result by the initial value of the reduction variable. (When such adjustment is not needed, then - 'scalar_initial_def' is zero). + 'adjustment_def' is zero). For example, if code is PLUS we create: + new_temp = loop_exit_def + adjustment_def */ - Create: - s_out4 = scalar_expr */ - - if (scalar_initial_def) + if (adjustment_def) { - tree tmp = build2 (code, scalar_type, new_temp, scalar_initial_def); - epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, tmp); - new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); - GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp; - bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT); + if (nested_in_vect_loop) + { + gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE); + expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def); + new_dest = vect_create_destination_var (scalar_dest, vectype); + } + else + { + gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE); + expr = build2 (code, scalar_type, new_temp, adjustment_def); + new_dest = vect_create_destination_var (scalar_dest, scalar_type); + } + epilog_stmt = gimple_build_assign (new_dest, expr); + new_temp = make_ssa_name (new_dest, epilog_stmt); + gimple_assign_set_lhs (epilog_stmt, new_temp); + SSA_NAME_DEF_STMT (new_temp) = epilog_stmt; + gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } - /* 2.6 Replace uses of s_out0 with uses of s_out3 */ - /* Find the loop-closed-use at the loop exit of the original scalar result. + /* 2.6 Handle the loop-exit phi */ + + /* Replace uses of s_out0 with uses of s_out3: + Find the loop-closed-use at the loop exit of the original scalar result. (The reduction result is expected to have two immediate uses - one at the latch block, and one at the loop exit). */ - exit_phi = NULL; + phis = VEC_alloc (gimple, heap, 10); FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) { - if (!flow_bb_inside_loop_p (loop, bb_for_stmt (USE_STMT (use_p)))) + if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))) { exit_phi = USE_STMT (use_p); - break; + VEC_quick_push (gimple, phis, exit_phi); } } /* We expect to have found an exit_phi because of loop-closed-ssa form. */ - gcc_assert (exit_phi); - /* Replace the uses: */ - orig_name = PHI_RESULT (exit_phi); - FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) - FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) - SET_USE (use_p, new_temp); + gcc_assert (!VEC_empty (gimple, phis)); + + for (i = 0; VEC_iterate (gimple, phis, i, exit_phi); i++) + { + if (nested_in_vect_loop) + { + stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi); + + /* FORNOW. Currently not supporting the case that an inner-loop + reduction is not used in the outer-loop (but only outside the + outer-loop). */ + gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo) + && !STMT_VINFO_LIVE_P (stmt_vinfo)); + + epilog_stmt = adjustment_def ? epilog_stmt : new_phi; + STMT_VINFO_VEC_STMT (stmt_vinfo) = epilog_stmt; + set_vinfo_for_stmt (epilog_stmt, + new_stmt_vec_info (epilog_stmt, loop_vinfo)); + if (adjustment_def) + STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) = + STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi)); + continue; + } + + /* Replace the uses: */ + orig_name = PHI_RESULT (exit_phi); + FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) + FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) + SET_USE (use_p, new_temp); + } + VEC_free (gimple, heap, phis); } @@ -1935,47 +2819,61 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, does *NOT* necessarily hold for reduction patterns. */ bool -vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) +vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi, + gimple *vec_stmt) { tree vec_dest; tree scalar_dest; - tree op; tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); - tree operation; enum tree_code code, orig_code, epilog_reduc_code = 0; enum machine_mode vec_mode; int op_type; optab optab, reduc_optab; tree new_temp = NULL_TREE; - tree def, def_stmt; + tree def; + gimple def_stmt; enum vect_def_type dt; - tree new_phi; + gimple new_phi = NULL; tree scalar_type; bool is_simple_use; - tree orig_stmt; + gimple orig_stmt; stmt_vec_info orig_stmt_info; tree expr = NULL_TREE; int i; int nunits = TYPE_VECTOR_SUBPARTS (vectype); int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; - stmt_vec_info prev_stmt_info; + int epilog_copies; + stmt_vec_info prev_stmt_info, prev_phi_info; + gimple first_phi = NULL; + bool single_defuse_cycle = false; tree reduc_def; - tree new_stmt = NULL_TREE; + gimple new_stmt = NULL; int j; + tree ops[3]; + + if (nested_in_vect_loop_p (loop, stmt)) + loop = loop->inner; gcc_assert (ncopies >= 1); + /* FORNOW: SLP not supported. */ + if (STMT_SLP_TYPE (stmt_info)) + return false; + /* 1. Is vectorizable reduction? */ /* Not supportable if the reduction variable is used in the loop. */ - if (STMT_VINFO_RELEVANT_P (stmt_info)) + if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer) return false; - if (!STMT_VINFO_LIVE_P (stmt_info)) + /* Reductions that are not used even in an enclosing outer-loop, + are expected to be "live" (used out of the loop). */ + if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_loop + && !STMT_VINFO_LIVE_P (stmt_info)) return false; /* Make sure it was already recognized as a reduction computation. */ @@ -2002,39 +2900,68 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) inside the loop body. The last operand is the reduction variable, which is defined by the loop-header-phi. */ - gcc_assert (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT); - - operation = GIMPLE_STMT_OPERAND (stmt, 1); - code = TREE_CODE (operation); - op_type = TREE_OPERAND_LENGTH (operation); - if (op_type != binary_op && op_type != ternary_op) - return false; - scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); - scalar_type = TREE_TYPE (scalar_dest); + gcc_assert (is_gimple_assign (stmt)); - /* All uses but the last are expected to be defined in the loop. - The last use is the reduction variable. */ - for (i = 0; i < op_type-1; i++) + /* Flatten RHS */ + switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) { - op = TREE_OPERAND (operation, i); - is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt); - gcc_assert (is_simple_use); - if (dt != vect_loop_def - && dt != vect_invariant_def + case GIMPLE_SINGLE_RHS: + op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)); + if (op_type == ternary_op) + { + tree rhs = gimple_assign_rhs1 (stmt); + ops[0] = TREE_OPERAND (rhs, 0); + ops[1] = TREE_OPERAND (rhs, 1); + ops[2] = TREE_OPERAND (rhs, 2); + code = TREE_CODE (rhs); + } + else + return false; + break; + + case GIMPLE_BINARY_RHS: + code = gimple_assign_rhs_code (stmt); + op_type = TREE_CODE_LENGTH (code); + gcc_assert (op_type == binary_op); + ops[0] = gimple_assign_rhs1 (stmt); + ops[1] = gimple_assign_rhs2 (stmt); + break; + + case GIMPLE_UNARY_RHS: + return false; + + default: + gcc_unreachable (); + } + + scalar_dest = gimple_assign_lhs (stmt); + scalar_type = TREE_TYPE (scalar_dest); + if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type) + && !SCALAR_FLOAT_TYPE_P (scalar_type)) + return false; + + /* All uses but the last are expected to be defined in the loop. + The last use is the reduction variable. */ + for (i = 0; i < op_type-1; i++) + { + is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt, + &def, &dt); + gcc_assert (is_simple_use); + if (dt != vect_loop_def + && dt != vect_invariant_def && dt != vect_constant_def && dt != vect_induction_def) return false; } - op = TREE_OPERAND (operation, i); - is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt); + is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt, &def, &dt); gcc_assert (is_simple_use); gcc_assert (dt == vect_reduction_def); - gcc_assert (TREE_CODE (def_stmt) == PHI_NODE); + gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI); if (orig_stmt) - gcc_assert (orig_stmt == vect_is_simple_reduction (loop, def_stmt)); + gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo, def_stmt)); else - gcc_assert (stmt == vect_is_simple_reduction (loop, def_stmt)); + gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, def_stmt)); if (STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt))) return false; @@ -2042,7 +2969,7 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) /* 4. Supportable by target? */ /* 4.1. check support for the operation in the loop */ - optab = optab_for_tree_code (code, vectype); + optab = optab_for_tree_code (code, vectype, optab_default); if (!optab) { if (vect_print_dump_info (REPORT_DETAILS)) @@ -2050,7 +2977,7 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) return false; } vec_mode = TYPE_MODE (vectype); - if (optab->handlers[(int) vec_mode].insn_code == CODE_FOR_nothing) + if (optab_handler (optab, vec_mode)->insn_code == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "op not supported by target."); @@ -2110,8 +3037,18 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) { /* This is a reduction pattern: get the vectype from the type of the reduction variable, and get the tree-code from orig_stmt. */ - orig_code = TREE_CODE (GIMPLE_STMT_OPERAND (orig_stmt, 1)); + orig_code = gimple_assign_rhs_code (orig_stmt); vectype = get_vectype_for_scalar_type (TREE_TYPE (def)); + if (!vectype) + { + if (vect_print_dump_info (REPORT_DETAILS)) + { + fprintf (vect_dump, "unsupported data-type "); + print_generic_expr (vect_dump, TREE_TYPE (def), TDF_SLIM); + } + return false; + } + vec_mode = TYPE_MODE (vectype); } else @@ -2123,14 +3060,14 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (!reduction_code_for_scalar_code (orig_code, &epilog_reduc_code)) return false; - reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype); + reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype, optab_default); if (!reduc_optab) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "no optab for reduction."); epilog_reduc_code = NUM_TREE_CODES; } - if (reduc_optab->handlers[(int) vec_mode].insn_code == CODE_FOR_nothing) + if (optab_handler (reduc_optab, vec_mode)->insn_code == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "reduc op not supported by target."); @@ -2140,7 +3077,8 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type; - vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies); + if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies)) + return false; return true; } @@ -2152,31 +3090,64 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) /* Create the destination vector */ vec_dest = vect_create_destination_var (scalar_dest, vectype); - /* Create the reduction-phi that defines the reduction-operand. */ - new_phi = create_phi_node (vec_dest, loop->header); - /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation in vectorizable_operation. */ + /* If the reduction is used in an outer loop we need to generate + VF intermediate results, like so (e.g. for ncopies=2): + r0 = phi (init, r0) + r1 = phi (init, r1) + r0 = x0 + r0; + r1 = x1 + r1; + (i.e. we generate VF results in 2 registers). + In this case we have a separate def-use cycle for each copy, and therefore + for each copy we get the vector def for the reduction variable from the + respective phi node created for this copy. + + Otherwise (the reduction is unused in the loop nest), we can combine + together intermediate results, like so (e.g. for ncopies=2): + r = phi (init, r) + r = x0 + r; + r = x1 + r; + (i.e. we generate VF/2 results in a single register). + In this case for each copy we get the vector def for the reduction variable + from the vectorized reduction operation generated in the previous iteration. + */ + + if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_loop) + { + single_defuse_cycle = true; + epilog_copies = 1; + } + else + epilog_copies = ncopies; + prev_stmt_info = NULL; + prev_phi_info = NULL; for (j = 0; j < ncopies; j++) { + if (j == 0 || !single_defuse_cycle) + { + /* Create the reduction-phi that defines the reduction-operand. */ + new_phi = create_phi_node (vec_dest, loop->header); + set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo)); + } + /* Handle uses. */ if (j == 0) { - op = TREE_OPERAND (operation, 0); - loop_vec_def0 = vect_get_vec_def_for_operand (op, stmt, NULL); + loop_vec_def0 = vect_get_vec_def_for_operand (ops[0], stmt, NULL); if (op_type == ternary_op) { - op = TREE_OPERAND (operation, 1); - loop_vec_def1 = vect_get_vec_def_for_operand (op, stmt, NULL); + loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt, NULL); } /* Get the vector def for the reduction variable from the phi node */ reduc_def = PHI_RESULT (new_phi); + first_phi = new_phi; } else { @@ -2185,9 +3156,12 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (op_type == ternary_op) loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def1); - /* Get the vector def for the reduction variable from the vectorized - reduction operation generated in the previous iteration (j-1) */ - reduc_def = GIMPLE_STMT_OPERAND (new_stmt ,0); + if (single_defuse_cycle) + reduc_def = gimple_assign_lhs (new_stmt); + else + reduc_def = PHI_RESULT (new_phi); + + STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi; } /* Arguments are ready. create the new vector stmt. */ @@ -2196,21 +3170,25 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) else expr = build3 (code, vectype, loop_vec_def0, loop_vec_def1, reduc_def); - new_stmt = build_gimple_modify_stmt (vec_dest, expr); + new_stmt = gimple_build_assign (vec_dest, expr); new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, new_stmt, bsi); + gimple_assign_set_lhs (new_stmt, new_temp); + vect_finish_stmt_generation (stmt, new_stmt, gsi); if (j == 0) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; else STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; prev_stmt_info = vinfo_for_stmt (new_stmt); + prev_phi_info = vinfo_for_stmt (new_phi); } - /* Finalize the reduction-phi (set it's arguments) and create the + /* Finalize the reduction-phi (set its arguments) and create the epilog reduction code. */ - vect_create_epilog_for_reduction (new_temp, stmt, epilog_reduc_code, new_phi); + if (!single_defuse_cycle) + new_temp = gimple_assign_lhs (*vec_stmt); + vect_create_epilog_for_reduction (new_temp, stmt, epilog_copies, + epilog_reduc_code, first_phi); return true; } @@ -2219,14 +3197,14 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) of the function, or NULL_TREE if the function cannot be vectorized. */ tree -vectorizable_function (tree call, tree vectype_out, tree vectype_in) +vectorizable_function (gimple call, tree vectype_out, tree vectype_in) { - tree fndecl = get_callee_fndecl (call); + tree fndecl = gimple_call_fndecl (call); enum built_in_function code; /* We only handle functions that do not read or clobber memory -- i.e. const or novops ones. */ - if (!(call_expr_flags (call) & (ECF_CONST | ECF_NOVOPS))) + if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS))) return NULL_TREE; if (!fndecl @@ -2247,19 +3225,25 @@ vectorizable_function (tree call, tree vectype_out, tree vectype_in) Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) +vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt) { tree vec_dest; tree scalar_dest; - tree operation; tree op, type; + tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info; tree vectype_out, vectype_in; + int nunits_in; + int nunits_out; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); - tree fndecl, rhs, new_temp, def, def_stmt, rhs_type, lhs_type; - enum vect_def_type dt[2]; - int ncopies, j, nargs; - call_expr_arg_iterator iter; + tree fndecl, new_temp, def, rhs_type, lhs_type; + gimple def_stmt; + enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; + gimple new_stmt; + int ncopies, j; + VEC(tree, heap) *vargs = NULL; + enum { NARROW, NONE, WIDEN } modifier; + size_t i, nargs; if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; @@ -2267,37 +3251,30 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def) return false; - /* FORNOW: not yet supported. */ - if (STMT_VINFO_LIVE_P (stmt_info)) - { - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "value used after loop."); - return false; - } - - /* Is STMT a vectorizable call? */ - if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) + /* FORNOW: SLP not supported. */ + if (STMT_SLP_TYPE (stmt_info)) return false; - if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME) + /* Is STMT a vectorizable call? */ + if (!is_gimple_call (stmt)) return false; - operation = GIMPLE_STMT_OPERAND (stmt, 1); - if (TREE_CODE (operation) != CALL_EXPR) + if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME) return false; /* Process function arguments. */ rhs_type = NULL_TREE; - nargs = 0; - FOR_EACH_CALL_EXPR_ARG (op, iter, operation) - { - ++nargs; + nargs = gimple_call_num_args (stmt); - /* Bail out if the function has more than two arguments, we - do not have interesting builtin functions to vectorize with - more than two arguments. */ - if (nargs > 2) - return false; + /* Bail out if the function has more than two arguments, we + do not have interesting builtin functions to vectorize with + more than two arguments. No arguments is also not good. */ + if (nargs == 0 || nargs > 2) + return false; + + for (i = 0; i < nargs; i++) + { + op = gimple_call_arg (stmt, i); /* We can only handle calls with arguments of the same type. */ if (rhs_type @@ -2309,7 +3286,7 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) } rhs_type = TREE_TYPE (op); - if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt[nargs-1])) + if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt[i])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -2317,27 +3294,32 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) } } - /* No arguments is also not good. */ - if (nargs == 0) - return false; - vectype_in = get_vectype_for_scalar_type (rhs_type); + if (!vectype_in) + return false; + nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); - lhs_type = TREE_TYPE (GIMPLE_STMT_OPERAND (stmt, 0)); + lhs_type = TREE_TYPE (gimple_call_lhs (stmt)); vectype_out = get_vectype_for_scalar_type (lhs_type); + if (!vectype_out) + return false; + nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); - /* Only handle the case of vectors with the same number of elements. - FIXME: We need a way to handle for example the SSE2 cvtpd2dq - instruction which converts V2DFmode to V4SImode but only - using the lower half of the V4SImode result. */ - if (TYPE_VECTOR_SUBPARTS (vectype_in) != TYPE_VECTOR_SUBPARTS (vectype_out)) + /* FORNOW */ + if (nunits_in == nunits_out / 2) + modifier = NARROW; + else if (nunits_out == nunits_in) + modifier = NONE; + else if (nunits_out == nunits_in / 2) + modifier = WIDEN; + else return false; /* For now, we only vectorize functions if a target specific builtin is available. TODO -- in some cases, it might be profitable to insert the calls for pieces of the vector, in order to be able to vectorize other operations in the loop. */ - fndecl = vectorizable_function (operation, vectype_out, vectype_in); + fndecl = vectorizable_function (stmt, vectype_out, vectype_in); if (fndecl == NULL_TREE) { if (vect_print_dump_info (REPORT_DETAILS)) @@ -2348,15 +3330,21 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) gcc_assert (ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS)); - ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo) - / TYPE_VECTOR_SUBPARTS (vectype_out)); + if (modifier == NARROW) + ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out; + else + ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; + + /* Sanity check: make sure that at least one copy of the vectorized stmt + needs to be generated. */ + gcc_assert (ncopies >= 1); if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "=== vectorizable_call ==="); - vect_model_simple_cost (stmt_info, ncopies); + vect_model_simple_cost (stmt_info, ncopies, dt, NULL); return true; } @@ -2365,58 +3353,120 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "transform operation."); - gcc_assert (ncopies >= 1); - /* Handle def. */ - scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); + scalar_dest = gimple_call_lhs (stmt); vec_dest = vect_create_destination_var (scalar_dest, vectype_out); prev_stmt_info = NULL; - for (j = 0; j < ncopies; ++j) + switch (modifier) { - tree new_stmt, vargs; - tree vec_oprnd[2]; - int n; + case NONE: + for (j = 0; j < ncopies; ++j) + { + /* Build argument list for the vectorized call. */ + if (j == 0) + vargs = VEC_alloc (tree, heap, nargs); + else + VEC_truncate (tree, vargs, 0); - /* Build argument list for the vectorized call. */ - /* FIXME: Rewrite this so that it doesn't construct a temporary - list. */ - vargs = NULL_TREE; - n = -1; - FOR_EACH_CALL_EXPR_ARG (op, iter, operation) + for (i = 0; i < nargs; i++) + { + op = gimple_call_arg (stmt, i); + if (j == 0) + vec_oprnd0 + = vect_get_vec_def_for_operand (op, stmt, NULL); + else + vec_oprnd0 + = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0); + + VEC_quick_push (tree, vargs, vec_oprnd0); + } + + new_stmt = gimple_build_call_vec (fndecl, vargs); + new_temp = make_ssa_name (vec_dest, new_stmt); + gimple_call_set_lhs (new_stmt, new_temp); + + vect_finish_stmt_generation (stmt, new_stmt, gsi); + + if (j == 0) + STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; + else + STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; + + prev_stmt_info = vinfo_for_stmt (new_stmt); + } + + break; + + case NARROW: + for (j = 0; j < ncopies; ++j) { - ++n; + /* Build argument list for the vectorized call. */ + if (j == 0) + vargs = VEC_alloc (tree, heap, nargs * 2); + else + VEC_truncate (tree, vargs, 0); + + for (i = 0; i < nargs; i++) + { + op = gimple_call_arg (stmt, i); + if (j == 0) + { + vec_oprnd0 + = vect_get_vec_def_for_operand (op, stmt, NULL); + vec_oprnd1 + = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0); + } + else + { + vec_oprnd0 + = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd1); + vec_oprnd1 + = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0); + } + + VEC_quick_push (tree, vargs, vec_oprnd0); + VEC_quick_push (tree, vargs, vec_oprnd1); + } + + new_stmt = gimple_build_call_vec (fndecl, vargs); + new_temp = make_ssa_name (vec_dest, new_stmt); + gimple_call_set_lhs (new_stmt, new_temp); + + vect_finish_stmt_generation (stmt, new_stmt, gsi); if (j == 0) - vec_oprnd[n] = vect_get_vec_def_for_operand (op, stmt, NULL); + STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; else - vec_oprnd[n] = vect_get_vec_def_for_stmt_copy (dt[n], vec_oprnd[n]); + STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; - vargs = tree_cons (NULL_TREE, vec_oprnd[n], vargs); + prev_stmt_info = vinfo_for_stmt (new_stmt); } - vargs = nreverse (vargs); - rhs = build_function_call_expr (fndecl, vargs); - new_stmt = build_gimple_modify_stmt (vec_dest, rhs); - new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; + *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); - vect_finish_stmt_generation (stmt, new_stmt, bsi); + break; - if (j == 0) - STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; - else - STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; - prev_stmt_info = vinfo_for_stmt (new_stmt); + case WIDEN: + /* No current target implements this case. */ + return false; } - /* The call in STMT might prevent it from being removed in dce. We however - cannot remove it here, due to the way the ssa name it defines is mapped - to the new definition. So just replace rhs of the statement with something - harmless. */ + VEC_free (tree, heap, vargs); + + /* The call in STMT might prevent it from being removed in dce. + We however cannot remove it here, due to the way the ssa name + it defines is mapped to the new definition. So just replace + rhs of the statement with something harmless. */ + type = TREE_TYPE (scalar_dest); - GIMPLE_STMT_OPERAND (stmt, 1) = fold_convert (type, integer_zero_node); - update_stmt (stmt); + new_stmt = gimple_build_assign (gimple_call_lhs (stmt), + fold_convert (type, integer_zero_node)); + set_vinfo_for_stmt (new_stmt, stmt_info); + set_vinfo_for_stmt (stmt, NULL); + STMT_VINFO_STMT (stmt_info) = new_stmt; + gsi_replace (gsi, new_stmt, false); + SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt; return true; } @@ -2425,20 +3475,20 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) /* Function vect_gen_widened_results_half Create a vector stmt whose code, type, number of arguments, and result - variable are CODE, VECTYPE, OP_TYPE, and VEC_DEST, and its arguments are + variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI. In the case that CODE is a CALL_EXPR, this means that a call to DECL needs to be created (DECL is a function-decl of a target-builtin). STMT is the original scalar stmt that we are vectorizing. */ -static tree -vect_gen_widened_results_half (enum tree_code code, tree vectype, tree decl, +static gimple +vect_gen_widened_results_half (enum tree_code code, + tree decl, tree vec_oprnd0, tree vec_oprnd1, int op_type, - tree vec_dest, block_stmt_iterator *bsi, - tree stmt) + tree vec_dest, gimple_stmt_iterator *gsi, + gimple stmt) { - tree expr; - tree new_stmt; + gimple new_stmt; tree new_temp; tree sym; ssa_op_iter iter; @@ -2448,23 +3498,24 @@ vect_gen_widened_results_half (enum tree_code code, tree vectype, tree decl, { /* Target specific support */ if (op_type == binary_op) - expr = build_call_expr (decl, 2, vec_oprnd0, vec_oprnd1); + new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1); else - expr = build_call_expr (decl, 1, vec_oprnd0); + new_stmt = gimple_build_call (decl, 1, vec_oprnd0); + new_temp = make_ssa_name (vec_dest, new_stmt); + gimple_call_set_lhs (new_stmt, new_temp); } else - { + { /* Generic support */ gcc_assert (op_type == TREE_CODE_LENGTH (code)); - if (op_type == binary_op) - expr = build2 (code, vectype, vec_oprnd0, vec_oprnd1); - else - expr = build1 (code, vectype, vec_oprnd0); + if (op_type != binary_op) + vec_oprnd1 = NULL; + new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0, + vec_oprnd1); + new_temp = make_ssa_name (vec_dest, new_stmt); + gimple_assign_set_lhs (new_stmt, new_temp); } - new_stmt = build_gimple_modify_stmt (vec_dest, expr); - new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, new_stmt, bsi); + vect_finish_stmt_generation (stmt, new_stmt, gsi); if (code == CALL_EXPR) { @@ -2480,20 +3531,17 @@ vect_gen_widened_results_half (enum tree_code code, tree vectype, tree decl, } -/* Function vectorizable_conversion. - -Check if STMT performs a conversion operation, that can be vectorized. -If VEC_STMT is also passed, vectorize the STMT: create a vectorized -stmt to replace it, put it in VEC_STMT, and insert it at BSI. -Return FALSE if not a vectorizable STMT, TRUE otherwise. */ +/* Check if STMT performs a conversion operation, that can be vectorized. + If VEC_STMT is also passed, vectorize the STMT: create a vectorized + stmt to replace it, put it in VEC_STMT, and insert it at BSI. + Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, - tree * vec_stmt) +vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, + gimple *vec_stmt, slp_tree slp_node) { tree vec_dest; tree scalar_dest; - tree operation; tree op0; tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); @@ -2501,9 +3549,10 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK; tree decl1 = NULL_TREE, decl2 = NULL_TREE; tree new_temp; - tree def, def_stmt; - enum vect_def_type dt0; - tree new_stmt; + tree def; + gimple def_stmt; + enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; + gimple new_stmt = NULL; stmt_vec_info prev_stmt_info; int nunits_in; int nunits_out; @@ -2513,6 +3562,12 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, tree rhs_type, lhs_type; tree builtin_decl; enum { NARROW, NONE, WIDEN } modifier; + int i; + VEC(tree,heap) *vec_oprnds0 = NULL; + tree vop0; + tree integral_type; + VEC(tree,heap) *dummy = NULL; + int dummy_int; /* Is STMT a vectorizable conversion? */ @@ -2522,34 +3577,29 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def) return false; - if (STMT_VINFO_LIVE_P (stmt_info)) - { - /* FORNOW: not yet supported. */ - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "value used after loop."); - return false; - } - - if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) + if (!is_gimple_assign (stmt)) return false; - if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME) + if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) return false; - operation = GIMPLE_STMT_OPERAND (stmt, 1); - code = TREE_CODE (operation); + code = gimple_assign_rhs_code (stmt); if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR) return false; - /* Check types of lhs and rhs */ - op0 = TREE_OPERAND (operation, 0); + /* Check types of lhs and rhs. */ + op0 = gimple_assign_rhs1 (stmt); rhs_type = TREE_TYPE (op0); vectype_in = get_vectype_for_scalar_type (rhs_type); + if (!vectype_in) + return false; nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); - scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); + scalar_dest = gimple_assign_lhs (stmt); lhs_type = TREE_TYPE (scalar_dest); vectype_out = get_vectype_for_scalar_type (lhs_type); + if (!vectype_out) + return false; nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); /* FORNOW */ @@ -2565,22 +3615,29 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, if (modifier == NONE) gcc_assert (STMT_VINFO_VECTYPE (stmt_info) == vectype_out); - /* Bail out if the types are both integral or non-integral */ + /* Bail out if the types are both integral or non-integral. */ if ((INTEGRAL_TYPE_P (rhs_type) && INTEGRAL_TYPE_P (lhs_type)) || (!INTEGRAL_TYPE_P (rhs_type) && !INTEGRAL_TYPE_P (lhs_type))) return false; + integral_type = INTEGRAL_TYPE_P (rhs_type) ? vectype_in : vectype_out; + if (modifier == NARROW) ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out; else ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; + /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies + this, so we can safely override NCOPIES with 1 here. */ + if (slp_node) + ncopies = 1; + /* Sanity check: make sure that at least one copy of the vectorized stmt needs to be generated. */ gcc_assert (ncopies >= 1); /* Check the operands of the operation. */ - if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt0)) + if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -2589,22 +3646,28 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, /* Supportable by target? */ if ((modifier == NONE - && !targetm.vectorize.builtin_conversion (code, vectype_in)) + && !targetm.vectorize.builtin_conversion (code, integral_type)) || (modifier == WIDEN && !supportable_widening_operation (code, stmt, vectype_in, &decl1, &decl2, - &code1, &code2)) + &code1, &code2, + &dummy_int, &dummy)) || (modifier == NARROW && !supportable_narrowing_operation (code, stmt, vectype_in, - &code1))) + &code1, &dummy_int, &dummy))) { if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "op not supported by target."); + fprintf (vect_dump, "conversion not supported by target."); return false; } if (modifier != NONE) - STMT_VINFO_VECTYPE (stmt_info) = vectype_in; + { + STMT_VINFO_VECTYPE (stmt_info) = vectype_in; + /* FORNOW: SLP not supported. */ + if (STMT_SLP_TYPE (stmt_info)) + return false; + } if (!vec_stmt) /* transformation not required. */ { @@ -2619,6 +3682,9 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, /* Handle def. */ vec_dest = vect_create_destination_var (scalar_dest, vectype_out); + if (modifier == NONE && !slp_node) + vec_oprnds0 = VEC_alloc (tree, heap, 1); + prev_stmt_info = NULL; switch (modifier) { @@ -2629,24 +3695,28 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, ssa_op_iter iter; if (j == 0) - vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL); + vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node); else - vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0); + vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL); builtin_decl = - targetm.vectorize.builtin_conversion (code, vectype_in); - new_stmt = build_call_expr (builtin_decl, 1, vec_oprnd0); - - /* Arguments are ready. create the new vector stmt. */ - new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt); - new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, new_stmt, bsi); - FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter, SSA_OP_ALL_VIRTUALS) - { - if (TREE_CODE (sym) == SSA_NAME) - sym = SSA_NAME_VAR (sym); - mark_sym_for_renaming (sym); + targetm.vectorize.builtin_conversion (code, integral_type); + for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++) + { + /* Arguments are ready. create the new vector stmt. */ + new_stmt = gimple_build_call (builtin_decl, 1, vop0); + new_temp = make_ssa_name (vec_dest, new_stmt); + gimple_call_set_lhs (new_stmt, new_temp); + vect_finish_stmt_generation (stmt, new_stmt, gsi); + FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter, + SSA_OP_ALL_VIRTUALS) + { + if (TREE_CODE (sym) == SSA_NAME) + sym = SSA_NAME_VAR (sym); + mark_sym_for_renaming (sym); + } + if (slp_node) + VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt); } if (j == 0) @@ -2667,15 +3737,15 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, if (j == 0) vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL); else - vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0); + vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0); STMT_VINFO_VECTYPE (stmt_info) = vectype_in; /* Generate first half of the widened result: */ new_stmt - = vect_gen_widened_results_half (code1, vectype_out, decl1, + = vect_gen_widened_results_half (code1, decl1, vec_oprnd0, vec_oprnd1, - unary_op, vec_dest, bsi, stmt); + unary_op, vec_dest, gsi, stmt); if (j == 0) STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; else @@ -2684,9 +3754,9 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, /* Generate second half of the widened result: */ new_stmt - = vect_gen_widened_results_half (code2, vectype_out, decl2, + = vect_gen_widened_results_half (code2, decl2, vec_oprnd0, vec_oprnd1, - unary_op, vec_dest, bsi, stmt); + unary_op, vec_dest, gsi, stmt); STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; prev_stmt_info = vinfo_for_stmt (new_stmt); } @@ -2703,20 +3773,21 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, if (j == 0) { vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL); - vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0); + vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0); } else { - vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd1); - vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0); + vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1); + vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0); } /* Arguments are ready. Create the new vector stmt. */ expr = build2 (code1, vectype_out, vec_oprnd0, vec_oprnd1); - new_stmt = build_gimple_modify_stmt (vec_dest, expr); + new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0, + vec_oprnd1); new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, new_stmt, bsi); + gimple_assign_set_lhs (new_stmt, new_temp); + vect_finish_stmt_generation (stmt, new_stmt, gsi); if (j == 0) STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; @@ -2728,6 +3799,10 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); } + + if (vec_oprnds0) + VEC_free (tree, heap, vec_oprnds0); + return true; } @@ -2740,20 +3815,32 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) +vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi, + gimple *vec_stmt, slp_tree slp_node) { tree vec_dest; tree scalar_dest; tree op; - tree vec_oprnd; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); tree new_temp; - tree def, def_stmt; - enum vect_def_type dt; + tree def; + gimple def_stmt; + enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; int nunits = TYPE_VECTOR_SUBPARTS (vectype); - int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; + int ncopies; + int i; + VEC(tree,heap) *vec_oprnds = NULL; + tree vop; + + /* Multiple types in SLP are handled by creating the appropriate number of + vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in + case of SLP. */ + if (slp_node) + ncopies = 1; + else + ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; gcc_assert (ncopies >= 1); if (ncopies > 1) @@ -2765,24 +3852,21 @@ vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def) return false; - /* FORNOW: not yet supported. */ - if (STMT_VINFO_LIVE_P (stmt_info)) - { - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "value used after loop."); - return false; - } - /* Is vectorizable assignment? */ - if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) + if (!is_gimple_assign (stmt)) return false; - scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); + scalar_dest = gimple_assign_lhs (stmt); if (TREE_CODE (scalar_dest) != SSA_NAME) return false; - op = GIMPLE_STMT_OPERAND (stmt, 1); - if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt)) + if (gimple_assign_single_p (stmt) + || gimple_assign_rhs_code (stmt) == PAREN_EXPR) + op = gimple_assign_rhs1 (stmt); + else + return false; + + if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt[0])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -2794,7 +3878,7 @@ vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type; if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "=== vectorizable_assignment ==="); - vect_model_simple_cost (stmt_info, ncopies); + vect_model_simple_cost (stmt_info, ncopies, dt, NULL); return true; } @@ -2806,15 +3890,22 @@ vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) vec_dest = vect_create_destination_var (scalar_dest, vectype); /* Handle use. */ - op = GIMPLE_STMT_OPERAND (stmt, 1); - vec_oprnd = vect_get_vec_def_for_operand (op, stmt, NULL); + vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node); /* Arguments are ready. create the new vector stmt. */ - *vec_stmt = build_gimple_modify_stmt (vec_dest, vec_oprnd); - new_temp = make_ssa_name (vec_dest, *vec_stmt); - GIMPLE_STMT_OPERAND (*vec_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, *vec_stmt, bsi); + for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++) + { + *vec_stmt = gimple_build_assign (vec_dest, vop); + new_temp = make_ssa_name (vec_dest, *vec_stmt); + gimple_assign_set_lhs (*vec_stmt, new_temp); + vect_finish_stmt_generation (stmt, *vec_stmt, gsi); + STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt; + + if (slp_node) + VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), *vec_stmt); + } + VEC_free (tree, heap, vec_oprnds); return true; } @@ -2854,32 +3945,36 @@ vect_min_worthwhile_factor (enum tree_code code) Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_induction (tree phi, block_stmt_iterator *bsi ATTRIBUTE_UNUSED, - tree *vec_stmt) +vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, + gimple *vec_stmt) { stmt_vec_info stmt_info = vinfo_for_stmt (phi); tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); int nunits = TYPE_VECTOR_SUBPARTS (vectype); int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; tree vec_def; gcc_assert (ncopies >= 1); + /* FORNOW. This restriction should be relaxed. */ + if (nested_in_vect_loop_p (loop, phi) && ncopies > 1) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "multiple types in nested loop."); + return false; + } if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; - gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def); + /* FORNOW: SLP not supported. */ + if (STMT_SLP_TYPE (stmt_info)) + return false; - if (STMT_VINFO_LIVE_P (stmt_info)) - { - /* FORNOW: not yet supported. */ - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "value used after loop."); - return false; - } + gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def); - if (TREE_CODE (phi) != PHI_NODE) + if (gimple_code (phi) != GIMPLE_PHI) return false; if (!vec_stmt) /* transformation not required. */ @@ -2910,13 +4005,13 @@ vectorizable_induction (tree phi, block_stmt_iterator *bsi ATTRIBUTE_UNUSED, Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) +vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi, + gimple *vec_stmt, slp_tree slp_node) { tree vec_dest; tree scalar_dest; - tree operation; tree op0, op1 = NULL; - tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; + tree vec_oprnd1 = NULL_TREE; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); @@ -2927,15 +4022,29 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) optab optab; int icode; enum machine_mode optab_op2_mode; - tree def, def_stmt; - enum vect_def_type dt0, dt1; - tree new_stmt; + tree def; + gimple def_stmt; + enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; + gimple new_stmt = NULL; stmt_vec_info prev_stmt_info; int nunits_in = TYPE_VECTOR_SUBPARTS (vectype); int nunits_out; tree vectype_out; - int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; - int j; + int ncopies; + int j, i; + VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL; + tree vop0, vop1; + unsigned int k; + bool shift_p = false; + bool scalar_shift_arg = false; + + /* Multiple types in SLP are handled by creating the appropriate number of + vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in + case of SLP. */ + if (slp_node) + ncopies = 1; + else + ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; gcc_assert (ncopies >= 1); @@ -2945,33 +4054,30 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def) return false; - /* FORNOW: not yet supported. */ - if (STMT_VINFO_LIVE_P (stmt_info)) - { - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "value used after loop."); - return false; - } - /* Is STMT a vectorizable binary/unary operation? */ - if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) + if (!is_gimple_assign (stmt)) return false; - if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME) + if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) return false; - scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); + scalar_dest = gimple_assign_lhs (stmt); vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest)); + if (!vectype_out) + return false; nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); if (nunits_out != nunits_in) return false; - operation = GIMPLE_STMT_OPERAND (stmt, 1); - code = TREE_CODE (operation); - optab = optab_for_tree_code (code, vectype); + code = gimple_assign_rhs_code (stmt); + + /* For pointer addition, we should use the normal plus for + the vector addition. */ + if (code == POINTER_PLUS_EXPR) + code = PLUS_EXPR; /* Support only unary or binary operations. */ - op_type = TREE_OPERAND_LENGTH (operation); + op_type = TREE_CODE_LENGTH (code); if (op_type != unary_op && op_type != binary_op) { if (vect_print_dump_info (REPORT_DETAILS)) @@ -2979,8 +4085,8 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) return false; } - op0 = TREE_OPERAND (operation, 0); - if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt0)) + op0 = gimple_assign_rhs1 (stmt); + if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -2989,8 +4095,8 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (op_type == binary_op) { - op1 = TREE_OPERAND (operation, 1); - if (!vect_is_simple_use (op1, loop_vinfo, &def_stmt, &def, &dt1)) + op1 = gimple_assign_rhs2 (stmt); + if (!vect_is_simple_use (op1, loop_vinfo, &def_stmt, &def, &dt[1])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -2998,6 +4104,56 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) } } + /* If this is a shift/rotate, determine whether the shift amount is a vector, + or scalar. If the shift/rotate amount is a vector, use the vector/vector + shift optabs. */ + if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR + || code == RROTATE_EXPR) + { + shift_p = true; + + /* vector shifted by vector */ + if (dt[1] == vect_loop_def) + { + optab = optab_for_tree_code (code, vectype, optab_vector); + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "vector/vector shift/rotate found."); + } + + /* See if the machine has a vector shifted by scalar insn and if not + then see if it has a vector shifted by vector insn */ + else if (dt[1] == vect_constant_def || dt[1] == vect_invariant_def) + { + optab = optab_for_tree_code (code, vectype, optab_scalar); + if (optab + && (optab_handler (optab, TYPE_MODE (vectype))->insn_code + != CODE_FOR_nothing)) + { + scalar_shift_arg = true; + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "vector/scalar shift/rotate found."); + } + else + { + optab = optab_for_tree_code (code, vectype, optab_vector); + if (vect_print_dump_info (REPORT_DETAILS) + && optab + && (optab_handler (optab, TYPE_MODE (vectype))->insn_code + != CODE_FOR_nothing)) + fprintf (vect_dump, "vector/vector shift/rotate found."); + } + } + + else + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "operand mode requires invariant argument."); + return false; + } + } + else + optab = optab_for_tree_code (code, vectype, optab_default); + /* Supportable by target? */ if (!optab) { @@ -3006,54 +4162,38 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) return false; } vec_mode = TYPE_MODE (vectype); - icode = (int) optab->handlers[(int) vec_mode].insn_code; + icode = (int) optab_handler (optab, vec_mode)->insn_code; if (icode == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "op not supported by target."); + /* Check only during analysis. */ if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD - || LOOP_VINFO_VECT_FACTOR (loop_vinfo) - < vect_min_worthwhile_factor (code)) + || (LOOP_VINFO_VECT_FACTOR (loop_vinfo) + < vect_min_worthwhile_factor (code) + && !vec_stmt)) return false; if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "proceeding using word mode."); } - /* Worthwhile without SIMD support? */ + /* Worthwhile without SIMD support? Check only during analysis. */ if (!VECTOR_MODE_P (TYPE_MODE (vectype)) && LOOP_VINFO_VECT_FACTOR (loop_vinfo) - < vect_min_worthwhile_factor (code)) + < vect_min_worthwhile_factor (code) + && !vec_stmt) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "not worthwhile without SIMD support."); return false; } - if (code == LSHIFT_EXPR || code == RSHIFT_EXPR) - { - /* FORNOW: not yet supported. */ - if (!VECTOR_MODE_P (vec_mode)) - return false; - - /* Invariant argument is needed for a vector shift - by a scalar shift operand. */ - optab_op2_mode = insn_data[icode].operand[2].mode; - if (! (VECTOR_MODE_P (optab_op2_mode) - || dt1 == vect_constant_def - || dt1 == vect_invariant_def)) - { - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "operand mode requires invariant argument."); - return false; - } - } - if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = op_vec_info_type; if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "=== vectorizable_operation ==="); - vect_model_simple_cost (stmt_info, ncopies); + vect_model_simple_cost (stmt_info, ncopies, dt, NULL); return true; } @@ -3065,6 +4205,22 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) /* Handle def. */ vec_dest = vect_create_destination_var (scalar_dest, vectype); + /* Allocate VECs for vector operands. In case of SLP, vector operands are + created in the previous stages of the recursion, so no allocation is + needed, except for the case of shift with scalar shift argument. In that + case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to + be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE. + In case of loop-based vectorization we allocate VECs of size 1. We + allocate VEC_OPRNDS1 only in case of binary operation. */ + if (!slp_node) + { + vec_oprnds0 = VEC_alloc (tree, heap, 1); + if (op_type == binary_op) + vec_oprnds1 = VEC_alloc (tree, heap, 1); + } + else if (scalar_shift_arg) + vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size); + /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the @@ -3124,45 +4280,60 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) /* Handle uses. */ if (j == 0) { - vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL); - if (op_type == binary_op) + if (op_type == binary_op && scalar_shift_arg) { - if (code == LSHIFT_EXPR || code == RSHIFT_EXPR) - { - /* Vector shl and shr insn patterns can be defined with - scalar operand 2 (shift operand). In this case, use - constant or loop invariant op1 directly, without - extending it to vector mode first. */ - optab_op2_mode = insn_data[icode].operand[2].mode; - if (!VECTOR_MODE_P (optab_op2_mode)) + /* Vector shl and shr insn patterns can be defined with scalar + operand 2 (shift operand). In this case, use constant or loop + invariant op1 directly, without extending it to vector mode + first. */ + optab_op2_mode = insn_data[icode].operand[2].mode; + if (!VECTOR_MODE_P (optab_op2_mode)) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "operand 1 using scalar mode."); + vec_oprnd1 = op1; + VEC_quick_push (tree, vec_oprnds1, vec_oprnd1); + if (slp_node) { - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "operand 1 using scalar mode."); - vec_oprnd1 = op1; + /* Store vec_oprnd1 for every vector stmt to be created + for SLP_NODE. We check during the analysis that all the + shift arguments are the same. + TODO: Allow different constants for different vector + stmts generated for an SLP instance. */ + for (k = 0; k < slp_node->vec_stmts_size - 1; k++) + VEC_quick_push (tree, vec_oprnds1, vec_oprnd1); } - } - if (!vec_oprnd1) - vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL); + } } + + /* vec_oprnd1 is available if operand 1 should be of a scalar-type + (a special case for certain kind of vector shifts); otherwise, + operand 1 should be of a vector type (the usual case). */ + if (op_type == binary_op && !vec_oprnd1) + vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1, + slp_node); + else + vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, + slp_node); } else - { - vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0); - if (op_type == binary_op) - vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt1, vec_oprnd1); - } + vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1); - /* Arguments are ready. create the new vector stmt. */ + /* Arguments are ready. Create the new vector stmt. */ + for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++) + { + vop1 = ((op_type == binary_op) + ? VEC_index (tree, vec_oprnds1, i) : NULL); + new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1); + new_temp = make_ssa_name (vec_dest, new_stmt); + gimple_assign_set_lhs (new_stmt, new_temp); + vect_finish_stmt_generation (stmt, new_stmt, gsi); + if (slp_node) + VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt); + } - if (op_type == binary_op) - new_stmt = build_gimple_modify_stmt (vec_dest, - build2 (code, vectype, vec_oprnd0, vec_oprnd1)); - else - new_stmt = build_gimple_modify_stmt (vec_dest, - build1 (code, vectype, vec_oprnd0)); - new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, new_stmt, bsi); + if (slp_node) + continue; if (j == 0) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; @@ -3171,10 +4342,117 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) prev_stmt_info = vinfo_for_stmt (new_stmt); } + VEC_free (tree, heap, vec_oprnds0); + if (vec_oprnds1) + VEC_free (tree, heap, vec_oprnds1); + return true; } +/* Get vectorized definitions for loop-based vectorization. For the first + operand we call vect_get_vec_def_for_operand() (with OPRND containing + scalar operand), and for the rest we get a copy with + vect_get_vec_def_for_stmt_copy() using the previous vector definition + (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details. + The vectors are collected into VEC_OPRNDS. */ + +static void +vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt, + VEC (tree, heap) **vec_oprnds, int multi_step_cvt) +{ + tree vec_oprnd; + + /* Get first vector operand. */ + /* All the vector operands except the very first one (that is scalar oprnd) + are stmt copies. */ + if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE) + vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL); + else + vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd); + + VEC_quick_push (tree, *vec_oprnds, vec_oprnd); + + /* Get second vector operand. */ + vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd); + VEC_quick_push (tree, *vec_oprnds, vec_oprnd); + + *oprnd = vec_oprnd; + + /* For conversion in multiple steps, continue to get operands + recursively. */ + if (multi_step_cvt) + vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1); +} + + +/* Create vectorized demotion statements for vector operands from VEC_OPRNDS. + For multi-step conversions store the resulting vectors and call the function + recursively. */ + +static void +vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds, + int multi_step_cvt, gimple stmt, + VEC (tree, heap) *vec_dsts, + gimple_stmt_iterator *gsi, + slp_tree slp_node, enum tree_code code, + stmt_vec_info *prev_stmt_info) +{ + unsigned int i; + tree vop0, vop1, new_tmp, vec_dest; + gimple new_stmt; + stmt_vec_info stmt_info = vinfo_for_stmt (stmt); + + vec_dest = VEC_pop (tree, vec_dsts); + + for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2) + { + /* Create demotion operation. */ + vop0 = VEC_index (tree, *vec_oprnds, i); + vop1 = VEC_index (tree, *vec_oprnds, i + 1); + new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1); + new_tmp = make_ssa_name (vec_dest, new_stmt); + gimple_assign_set_lhs (new_stmt, new_tmp); + vect_finish_stmt_generation (stmt, new_stmt, gsi); + + if (multi_step_cvt) + /* Store the resulting vector for next recursive call. */ + VEC_replace (tree, *vec_oprnds, i/2, new_tmp); + else + { + /* This is the last step of the conversion sequence. Store the + vectors in SLP_NODE or in vector info of the scalar statement + (or in STMT_VINFO_RELATED_STMT chain). */ + if (slp_node) + VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt); + else + { + if (!*prev_stmt_info) + STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; + else + STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt; + + *prev_stmt_info = vinfo_for_stmt (new_stmt); + } + } + } + + /* For multi-step demotion operations we first generate demotion operations + from the source type to the intermediate types, and then combine the + results (stored in VEC_OPRNDS) in demotion operation to the destination + type. */ + if (multi_step_cvt) + { + /* At each level of recursion we have have of the operands we had at the + previous level. */ + VEC_truncate (tree, *vec_oprnds, (i+1)/2); + vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1, + stmt, vec_dsts, gsi, slp_node, + code, prev_stmt_info); + } +} + + /* Function vectorizable_type_demotion Check if STMT performs a binary or unary operation that involves @@ -3184,29 +4462,29 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, - tree *vec_stmt) +vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi, + gimple *vec_stmt, slp_tree slp_node) { tree vec_dest; tree scalar_dest; - tree operation; tree op0; - tree vec_oprnd0=NULL, vec_oprnd1=NULL; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); enum tree_code code, code1 = ERROR_MARK; - tree new_temp; - tree def, def_stmt; - enum vect_def_type dt0; - tree new_stmt; + tree def; + gimple def_stmt; + enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; stmt_vec_info prev_stmt_info; int nunits_in; int nunits_out; tree vectype_out; int ncopies; - int j; - tree expr; + int j, i; tree vectype_in; + int multi_step_cvt = 0; + VEC (tree, heap) *vec_oprnds0 = NULL; + VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL; + tree last_oprnd, intermediate_type; if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; @@ -3214,48 +4492,50 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def) return false; - /* FORNOW: not yet supported. */ - if (STMT_VINFO_LIVE_P (stmt_info)) - { - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "value used after loop."); - return false; - } - /* Is STMT a vectorizable type-demotion operation? */ - if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) + if (!is_gimple_assign (stmt)) return false; - if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME) + if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) return false; - operation = GIMPLE_STMT_OPERAND (stmt, 1); - code = TREE_CODE (operation); - if (code != NOP_EXPR && code != CONVERT_EXPR) + code = gimple_assign_rhs_code (stmt); + if (!CONVERT_EXPR_CODE_P (code)) return false; - op0 = TREE_OPERAND (operation, 0); + op0 = gimple_assign_rhs1 (stmt); vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0)); + if (!vectype_in) + return false; nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); - scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); + scalar_dest = gimple_assign_lhs (stmt); vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest)); + if (!vectype_out) + return false; nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); - if (nunits_in != nunits_out / 2) /* FORNOW */ + if (nunits_in >= nunits_out) return false; - ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out; + /* Multiple types in SLP are handled by creating the appropriate number of + vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in + case of SLP. */ + if (slp_node) + ncopies = 1; + else + ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out; + gcc_assert (ncopies >= 1); if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest)) && INTEGRAL_TYPE_P (TREE_TYPE (op0))) || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest)) && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0)) - && (code == NOP_EXPR || code == CONVERT_EXPR)))) + && CONVERT_EXPR_CODE_P (code)))) return false; /* Check the operands of the operation. */ - if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt0)) + if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -3263,7 +4543,8 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, } /* Supportable by target? */ - if (!supportable_narrowing_operation (code, stmt, vectype_in, &code1)) + if (!supportable_narrowing_operation (code, stmt, vectype_in, &code1, + &multi_step_cvt, &interm_types)) return false; STMT_VINFO_VECTYPE (stmt_info) = vectype_in; @@ -3273,7 +4554,7 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type; if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "=== vectorizable_demotion ==="); - vect_model_simple_cost (stmt_info, ncopies); + vect_model_simple_cost (stmt_info, ncopies, dt, NULL); return true; } @@ -3282,45 +4563,157 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, fprintf (vect_dump, "transform type demotion operation. ncopies = %d.", ncopies); - /* Handle def. */ + /* In case of multi-step demotion, we first generate demotion operations to + the intermediate types, and then from that types to the final one. + We create vector destinations for the intermediate type (TYPES) received + from supportable_narrowing_operation, and store them in the correct order + for future use in vect_create_vectorized_demotion_stmts(). */ + if (multi_step_cvt) + vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1); + else + vec_dsts = VEC_alloc (tree, heap, 1); + vec_dest = vect_create_destination_var (scalar_dest, vectype_out); - + VEC_quick_push (tree, vec_dsts, vec_dest); + + if (multi_step_cvt) + { + for (i = VEC_length (tree, interm_types) - 1; + VEC_iterate (tree, interm_types, i, intermediate_type); i--) + { + vec_dest = vect_create_destination_var (scalar_dest, + intermediate_type); + VEC_quick_push (tree, vec_dsts, vec_dest); + } + } + /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. */ + last_oprnd = op0; prev_stmt_info = NULL; for (j = 0; j < ncopies; j++) { /* Handle uses. */ - if (j == 0) - { - vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL); - vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0); - } + if (slp_node) + vect_get_slp_defs (slp_node, &vec_oprnds0, NULL); else - { - vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd1); - vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0); - } + { + VEC_free (tree, heap, vec_oprnds0); + vec_oprnds0 = VEC_alloc (tree, heap, + (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2)); + vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0, + vect_pow2 (multi_step_cvt) - 1); + } - /* Arguments are ready. Create the new vector stmt. */ - expr = build2 (code1, vectype_out, vec_oprnd0, vec_oprnd1); - new_stmt = build_gimple_modify_stmt (vec_dest, expr); - new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, new_stmt, bsi); + /* Arguments are ready. Create the new vector stmts. */ + tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts); + vect_create_vectorized_demotion_stmts (&vec_oprnds0, + multi_step_cvt, stmt, tmp_vec_dsts, + gsi, slp_node, code1, + &prev_stmt_info); + } - if (j == 0) - STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; + VEC_free (tree, heap, vec_oprnds0); + VEC_free (tree, heap, vec_dsts); + VEC_free (tree, heap, tmp_vec_dsts); + VEC_free (tree, heap, interm_types); + + *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); + return true; +} + + +/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0 + and VEC_OPRNDS1 (for binary operations). For multi-step conversions store + the resulting vectors and call the function recursively. */ + +static void +vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0, + VEC (tree, heap) **vec_oprnds1, + int multi_step_cvt, gimple stmt, + VEC (tree, heap) *vec_dsts, + gimple_stmt_iterator *gsi, + slp_tree slp_node, enum tree_code code1, + enum tree_code code2, tree decl1, + tree decl2, int op_type, + stmt_vec_info *prev_stmt_info) +{ + int i; + tree vop0, vop1, new_tmp1, new_tmp2, vec_dest; + gimple new_stmt1, new_stmt2; + stmt_vec_info stmt_info = vinfo_for_stmt (stmt); + VEC (tree, heap) *vec_tmp; + + vec_dest = VEC_pop (tree, vec_dsts); + vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2); + + for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++) + { + if (op_type == binary_op) + vop1 = VEC_index (tree, *vec_oprnds1, i); else - STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; + vop1 = NULL_TREE; + + /* Generate the two halves of promotion operation. */ + new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1, + op_type, vec_dest, gsi, stmt); + new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1, + op_type, vec_dest, gsi, stmt); + if (is_gimple_call (new_stmt1)) + { + new_tmp1 = gimple_call_lhs (new_stmt1); + new_tmp2 = gimple_call_lhs (new_stmt2); + } + else + { + new_tmp1 = gimple_assign_lhs (new_stmt1); + new_tmp2 = gimple_assign_lhs (new_stmt2); + } - prev_stmt_info = vinfo_for_stmt (new_stmt); + if (multi_step_cvt) + { + /* Store the results for the recursive call. */ + VEC_quick_push (tree, vec_tmp, new_tmp1); + VEC_quick_push (tree, vec_tmp, new_tmp2); + } + else + { + /* Last step of promotion sequience - store the results. */ + if (slp_node) + { + VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1); + VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2); + } + else + { + if (!*prev_stmt_info) + STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1; + else + STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1; + + *prev_stmt_info = vinfo_for_stmt (new_stmt1); + STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2; + *prev_stmt_info = vinfo_for_stmt (new_stmt2); + } + } } - *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); - return true; + if (multi_step_cvt) + { + /* For multi-step promotion operation we first generate we call the + function recurcively for every stage. We start from the input type, + create promotion operations to the intermediate types, and then + create promotions to the output type. */ + *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp); + VEC_free (tree, heap, vec_tmp); + vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1, + multi_step_cvt - 1, stmt, + vec_dsts, gsi, slp_node, code1, + code2, decl2, decl2, op_type, + prev_stmt_info); + } } @@ -3333,12 +4726,11 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, - tree *vec_stmt) +vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi, + gimple *vec_stmt, slp_tree slp_node) { tree vec_dest; tree scalar_dest; - tree operation; tree op0, op1 = NULL; tree vec_oprnd0=NULL, vec_oprnd1=NULL; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); @@ -3346,16 +4738,20 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK; tree decl1 = NULL_TREE, decl2 = NULL_TREE; int op_type; - tree def, def_stmt; - enum vect_def_type dt0, dt1; - tree new_stmt; + tree def; + gimple def_stmt; + enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; stmt_vec_info prev_stmt_info; int nunits_in; int nunits_out; tree vectype_out; int ncopies; - int j; + int j, i; tree vectype_in; + tree intermediate_type = NULL_TREE; + int multi_step_cvt = 0; + VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL; + VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL; if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; @@ -3363,49 +4759,51 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def) return false; - /* FORNOW: not yet supported. */ - if (STMT_VINFO_LIVE_P (stmt_info)) - { - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "value used after loop."); - return false; - } - /* Is STMT a vectorizable type-promotion operation? */ - if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) + if (!is_gimple_assign (stmt)) return false; - if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME) + if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) return false; - operation = GIMPLE_STMT_OPERAND (stmt, 1); - code = TREE_CODE (operation); - if (code != NOP_EXPR && code != CONVERT_EXPR + code = gimple_assign_rhs_code (stmt); + if (!CONVERT_EXPR_CODE_P (code) && code != WIDEN_MULT_EXPR) return false; - op0 = TREE_OPERAND (operation, 0); + op0 = gimple_assign_rhs1 (stmt); vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0)); + if (!vectype_in) + return false; nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); - scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); + scalar_dest = gimple_assign_lhs (stmt); vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest)); + if (!vectype_out) + return false; nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); - if (nunits_out != nunits_in / 2) /* FORNOW */ + if (nunits_in <= nunits_out) return false; - ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; + /* Multiple types in SLP are handled by creating the appropriate number of + vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in + case of SLP. */ + if (slp_node) + ncopies = 1; + else + ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; + gcc_assert (ncopies >= 1); if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest)) && INTEGRAL_TYPE_P (TREE_TYPE (op0))) || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest)) && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0)) - && (code == CONVERT_EXPR || code == NOP_EXPR)))) + && CONVERT_EXPR_CODE_P (code)))) return false; /* Check the operands of the operation. */ - if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt0)) + if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -3415,8 +4813,8 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, op_type = TREE_CODE_LENGTH (code); if (op_type == binary_op) { - op1 = TREE_OPERAND (operation, 1); - if (!vect_is_simple_use (op1, loop_vinfo, &def_stmt, &def, &dt1)) + op1 = gimple_assign_rhs2 (stmt); + if (!vect_is_simple_use (op1, loop_vinfo, &def_stmt, &def, &dt[1])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -3426,9 +4824,14 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, /* Supportable by target? */ if (!supportable_widening_operation (code, stmt, vectype_in, - &decl1, &decl2, &code1, &code2)) + &decl1, &decl2, &code1, &code2, + &multi_step_cvt, &interm_types)) return false; + /* Binary widening operation can only be supported directly by the + architecture. */ + gcc_assert (!(multi_step_cvt && op_type == binary_op)); + STMT_VINFO_VECTYPE (stmt_info) = vectype_in; if (!vec_stmt) /* transformation not required. */ @@ -3436,7 +4839,7 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type; if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "=== vectorizable_promotion ==="); - vect_model_simple_cost (stmt_info, 2*ncopies); + vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL); return true; } @@ -3447,7 +4850,38 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, ncopies); /* Handle def. */ + /* In case of multi-step promotion, we first generate promotion operations + to the intermediate types, and then from that types to the final one. + We store vector destination in VEC_DSTS in the correct order for + recursive creation of promotion operations in + vect_create_vectorized_promotion_stmts(). Vector destinations are created + according to TYPES recieved from supportable_widening_operation(). */ + if (multi_step_cvt) + vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1); + else + vec_dsts = VEC_alloc (tree, heap, 1); + vec_dest = vect_create_destination_var (scalar_dest, vectype_out); + VEC_quick_push (tree, vec_dsts, vec_dest); + + if (multi_step_cvt) + { + for (i = VEC_length (tree, interm_types) - 1; + VEC_iterate (tree, interm_types, i, intermediate_type); i--) + { + vec_dest = vect_create_destination_var (scalar_dest, + intermediate_type); + VEC_quick_push (tree, vec_dsts, vec_dest); + } + } + + if (!slp_node) + { + vec_oprnds0 = VEC_alloc (tree, heap, + (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1)); + if (op_type == binary_op) + vec_oprnds1 = VEC_alloc (tree, heap, 1); + } /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate @@ -3460,38 +4894,46 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, /* Handle uses. */ if (j == 0) { - vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL); - if (op_type == binary_op) - vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL); + if (slp_node) + vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1); + else + { + vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL); + VEC_quick_push (tree, vec_oprnds0, vec_oprnd0); + if (op_type == binary_op) + { + vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL); + VEC_quick_push (tree, vec_oprnds1, vec_oprnd1); + } + } } else { - vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0); - if (op_type == binary_op) - vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt1, vec_oprnd1); + vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0); + VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0); + if (op_type == binary_op) + { + vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1); + VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1); + } } - /* Arguments are ready. Create the new vector stmt. We are creating - two vector defs because the widened result does not fit in one vector. - The vectorized stmt can be expressed as a call to a taregt builtin, - or a using a tree-code. */ - /* Generate first half of the widened result: */ - new_stmt = vect_gen_widened_results_half (code1, vectype_out, decl1, - vec_oprnd0, vec_oprnd1, op_type, vec_dest, bsi, stmt); - if (j == 0) - STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; - else - STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; - prev_stmt_info = vinfo_for_stmt (new_stmt); - - /* Generate second half of the widened result: */ - new_stmt = vect_gen_widened_results_half (code2, vectype_out, decl2, - vec_oprnd0, vec_oprnd1, op_type, vec_dest, bsi, stmt); - STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; - prev_stmt_info = vinfo_for_stmt (new_stmt); - + /* Arguments are ready. Create the new vector stmts. */ + tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts); + vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1, + multi_step_cvt, stmt, + tmp_vec_dsts, + gsi, slp_node, code1, code2, + decl1, decl2, op_type, + &prev_stmt_info); } + VEC_free (tree, heap, vec_dsts); + VEC_free (tree, heap, tmp_vec_dsts); + VEC_free (tree, heap, interm_types); + VEC_free (tree, heap, vec_oprnds0); + VEC_free (tree, heap, vec_oprnds1); + *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); return true; } @@ -3512,9 +4954,9 @@ vect_strided_store_supported (tree vectype) /* Check that the operation is supported. */ interleave_high_optab = optab_for_tree_code (VEC_INTERLEAVE_HIGH_EXPR, - vectype); + vectype, optab_default); interleave_low_optab = optab_for_tree_code (VEC_INTERLEAVE_LOW_EXPR, - vectype); + vectype, optab_default); if (!interleave_high_optab || !interleave_low_optab) { if (vect_print_dump_info (REPORT_DETAILS)) @@ -3522,15 +4964,16 @@ vect_strided_store_supported (tree vectype) return false; } - if (interleave_high_optab->handlers[(int) mode].insn_code + if (optab_handler (interleave_high_optab, mode)->insn_code == CODE_FOR_nothing - || interleave_low_optab->handlers[(int) mode].insn_code + || optab_handler (interleave_low_optab, mode)->insn_code == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "interleave op not supported by target."); return false; } + return true; } @@ -3599,20 +5042,19 @@ vect_strided_store_supported (tree vectype) static bool vect_permute_store_chain (VEC(tree,heap) *dr_chain, unsigned int length, - tree stmt, - block_stmt_iterator *bsi, + gimple stmt, + gimple_stmt_iterator *gsi, VEC(tree,heap) **result_chain) { - tree perm_dest, perm_stmt, vect1, vect2, high, low; + tree perm_dest, vect1, vect2, high, low; + gimple perm_stmt; tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); - tree scalar_dest, tmp; + tree scalar_dest; int i; unsigned int j; - VEC(tree,heap) *first, *second; + enum tree_code high_code, low_code; - scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); - first = VEC_alloc (tree, heap, length/2); - second = VEC_alloc (tree, heap, length/2); + scalar_dest = gimple_assign_lhs (stmt); /* Check that the operation is supported. */ if (!vect_strided_store_supported (vectype)) @@ -3636,13 +5078,20 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain, DECL_GIMPLE_REG_P (perm_dest) = 1; add_referenced_var (perm_dest); if (BYTES_BIG_ENDIAN) - tmp = build2 (VEC_INTERLEAVE_HIGH_EXPR, vectype, vect1, vect2); + { + high_code = VEC_INTERLEAVE_HIGH_EXPR; + low_code = VEC_INTERLEAVE_LOW_EXPR; + } else - tmp = build2 (VEC_INTERLEAVE_LOW_EXPR, vectype, vect1, vect2); - perm_stmt = build_gimple_modify_stmt (perm_dest, tmp); + { + low_code = VEC_INTERLEAVE_HIGH_EXPR; + high_code = VEC_INTERLEAVE_LOW_EXPR; + } + perm_stmt = gimple_build_assign_with_ops (high_code, perm_dest, + vect1, vect2); high = make_ssa_name (perm_dest, perm_stmt); - GIMPLE_STMT_OPERAND (perm_stmt, 0) = high; - vect_finish_stmt_generation (stmt, perm_stmt, bsi); + gimple_assign_set_lhs (perm_stmt, high); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); VEC_replace (tree, *result_chain, 2*j, high); /* Create interleaving stmt: @@ -3653,14 +5102,11 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain, perm_dest = create_tmp_var (vectype, "vect_inter_low"); DECL_GIMPLE_REG_P (perm_dest) = 1; add_referenced_var (perm_dest); - if (BYTES_BIG_ENDIAN) - tmp = build2 (VEC_INTERLEAVE_LOW_EXPR, vectype, vect1, vect2); - else - tmp = build2 (VEC_INTERLEAVE_HIGH_EXPR, vectype, vect1, vect2); - perm_stmt = build_gimple_modify_stmt (perm_dest, tmp); + perm_stmt = gimple_build_assign_with_ops (low_code, perm_dest, + vect1, vect2); low = make_ssa_name (perm_dest, perm_stmt); - GIMPLE_STMT_OPERAND (perm_stmt, 0) = low; - vect_finish_stmt_generation (stmt, perm_stmt, bsi); + gimple_assign_set_lhs (perm_stmt, low); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); VEC_replace (tree, *result_chain, 2*j+1, low); } dr_chain = VEC_copy (tree, heap, *result_chain); @@ -3678,7 +5124,8 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain, Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) +vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, + slp_tree slp_node) { tree scalar_dest; tree data_ref; @@ -3688,49 +5135,65 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL; tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); enum machine_mode vec_mode; tree dummy; - enum dr_alignment_support alignment_support_cheme; - ssa_op_iter iter; - def_operand_p def_p; - tree def, def_stmt; + enum dr_alignment_support alignment_support_scheme; + tree def; + gimple def_stmt; enum vect_def_type dt; stmt_vec_info prev_stmt_info = NULL; tree dataref_ptr = NULL_TREE; int nunits = TYPE_VECTOR_SUBPARTS (vectype); - int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; + int ncopies; int j; - tree next_stmt, first_stmt; + gimple next_stmt, first_stmt = NULL; bool strided_store = false; unsigned int group_size, i; VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL; - gcc_assert (ncopies >= 1); - - if (!STMT_VINFO_RELEVANT_P (stmt_info)) - return false; + bool inv_p; + VEC(tree,heap) *vec_oprnds = NULL; + bool slp = (slp_node != NULL); + stmt_vec_info first_stmt_vinfo; + unsigned int vec_num; + + /* Multiple types in SLP are handled by creating the appropriate number of + vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in + case of SLP. */ + if (slp) + ncopies = 1; + else + ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; - if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def) - return false; + gcc_assert (ncopies >= 1); - if (STMT_VINFO_LIVE_P (stmt_info)) + /* FORNOW. This restriction should be relaxed. */ + if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1) { if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "value used after loop."); + fprintf (vect_dump, "multiple types in nested loop."); return false; } + if (!STMT_VINFO_RELEVANT_P (stmt_info)) + return false; + + if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def) + return false; + /* Is vectorizable store? */ - if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) + if (!is_gimple_assign (stmt)) return false; - scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); + scalar_dest = gimple_assign_lhs (stmt); if (TREE_CODE (scalar_dest) != ARRAY_REF && TREE_CODE (scalar_dest) != INDIRECT_REF - && !DR_GROUP_FIRST_DR (stmt_info)) + && !STMT_VINFO_STRIDED_ACCESS (stmt_info)) return false; - op = GIMPLE_STMT_OPERAND (stmt, 1); + gcc_assert (gimple_assign_single_p (stmt)); + op = gimple_assign_rhs1 (stmt); if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt)) { if (vect_print_dump_info (REPORT_DETAILS)) @@ -3738,26 +5201,56 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) return false; } + /* The scalar rhs type needs to be trivially convertible to the vector + component type. This should always be the case. */ + if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op))) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "??? operands of different types"); + return false; + } + vec_mode = TYPE_MODE (vectype); /* FORNOW. In some cases can vectorize even if data-type not supported (e.g. - array initialization with 0). */ - if (mov_optab->handlers[(int)vec_mode].insn_code == CODE_FOR_nothing) + if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing) return false; if (!STMT_VINFO_DATA_REF (stmt_info)) return false; - if (DR_GROUP_FIRST_DR (stmt_info)) + if (STMT_VINFO_STRIDED_ACCESS (stmt_info)) { strided_store = true; - if (!vect_strided_store_supported (vectype)) - return false; + first_stmt = DR_GROUP_FIRST_DR (stmt_info); + if (!vect_strided_store_supported (vectype) + && !PURE_SLP_STMT (stmt_info) && !slp) + return false; + + if (first_stmt == stmt) + { + /* STMT is the leader of the group. Check the operands of all the + stmts of the group. */ + next_stmt = DR_GROUP_NEXT_DR (stmt_info); + while (next_stmt) + { + gcc_assert (gimple_assign_single_p (next_stmt)); + op = gimple_assign_rhs1 (next_stmt); + if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt)) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "use not simple."); + return false; + } + next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt)); + } + } } if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = store_vec_info_type; - vect_model_store_cost (stmt_info, ncopies); + vect_model_store_cost (stmt_info, ncopies, dt, NULL); return true; } @@ -3765,26 +5258,39 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (strided_store) { - first_stmt = DR_GROUP_FIRST_DR (stmt_info); first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt)); DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++; + /* FORNOW */ + gcc_assert (!nested_in_vect_loop_p (loop, stmt)); + /* We vectorize all the stmts of the interleaving group when we reach the last stmt in the group. */ if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt)) - < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))) + < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt)) + && !slp) { - *vec_stmt = NULL_TREE; + *vec_stmt = NULL; return true; } + + if (slp) + strided_store = false; + + /* VEC_NUM is the number of vect stmts to be created for this group. */ + if (slp) + vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + else + vec_num = group_size; } else { first_stmt = stmt; first_dr = dr; - group_size = 1; + group_size = vec_num = 1; + first_stmt_vinfo = stmt_info; } if (vect_print_dump_info (REPORT_DETAILS)) @@ -3793,9 +5299,9 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) dr_chain = VEC_alloc (tree, heap, group_size); oprnds = VEC_alloc (tree, heap, group_size); - alignment_support_cheme = vect_supportable_dr_alignment (first_dr); - gcc_assert (alignment_support_cheme); - gcc_assert (alignment_support_cheme == dr_aligned); /* FORNOW */ + alignment_support_scheme = vect_supportable_dr_alignment (first_dr); + gcc_assert (alignment_support_scheme); + gcc_assert (alignment_support_scheme == dr_aligned); /* FORNOW */ /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate @@ -3839,35 +5345,54 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) prev_stmt_info = NULL; for (j = 0; j < ncopies; j++) { - tree new_stmt; - tree ptr_incr; + gimple new_stmt; + gimple ptr_incr; if (j == 0) { - /* For interleaved stores we collect vectorized defs for all the - stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then used - as an input to vect_permute_store_chain(), and OPRNDS as an input - to vect_get_vec_def_for_stmt_copy() for the next copy. - If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and - OPRNDS are of size 1. */ - next_stmt = first_stmt; - for (i = 0; i < group_size; i++) - { - /* Since gaps are not supported for interleaved stores, GROUP_SIZE - is the exact number of stmts in the chain. Therefore, NEXT_STMT - can't be NULL_TREE. In case that there is no interleaving, - GROUP_SIZE is 1, and only one iteration of the loop will be - executed. */ - gcc_assert (next_stmt); - op = GIMPLE_STMT_OPERAND (next_stmt, 1); - vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt, NULL); - VEC_quick_push(tree, dr_chain, vec_oprnd); - VEC_quick_push(tree, oprnds, vec_oprnd); - next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt)); + if (slp) + { + /* Get vectorized arguments for SLP_NODE. */ + vect_get_slp_defs (slp_node, &vec_oprnds, NULL); + + vec_oprnd = VEC_index (tree, vec_oprnds, 0); + } + else + { + /* For interleaved stores we collect vectorized defs for all the + stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then + used as an input to vect_permute_store_chain(), and OPRNDS as + an input to vect_get_vec_def_for_stmt_copy() for the next copy. + + If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and + OPRNDS are of size 1. */ + next_stmt = first_stmt; + for (i = 0; i < group_size; i++) + { + /* Since gaps are not supported for interleaved stores, + GROUP_SIZE is the exact number of stmts in the chain. + Therefore, NEXT_STMT can't be NULL_TREE. In case that + there is no interleaving, GROUP_SIZE is 1, and only one + iteration of the loop will be executed. */ + gcc_assert (next_stmt + && gimple_assign_single_p (next_stmt)); + op = gimple_assign_rhs1 (next_stmt); + + vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt, + NULL); + VEC_quick_push(tree, dr_chain, vec_oprnd); + VEC_quick_push(tree, oprnds, vec_oprnd); + next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt)); + } } - dataref_ptr = vect_create_data_ref_ptr (first_stmt, bsi, NULL_TREE, - &dummy, &ptr_incr, false, - TREE_TYPE (vec_oprnd)); + + /* We should have catched mismatched types earlier. */ + gcc_assert (useless_type_conversion_p (vectype, + TREE_TYPE (vec_oprnd))); + dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE, + &dummy, &ptr_incr, false, + &inv_p, NULL); + gcc_assert (!inv_p); } else { @@ -3880,75 +5405,67 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) OPRNDS are of size 1. */ for (i = 0; i < group_size; i++) { - vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, - VEC_index (tree, oprnds, i)); + op = VEC_index (tree, oprnds, i); + vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt); + vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op); VEC_replace(tree, dr_chain, i, vec_oprnd); VEC_replace(tree, oprnds, i, vec_oprnd); } - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt); + dataref_ptr = + bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE); } if (strided_store) { result_chain = VEC_alloc (tree, heap, group_size); /* Permute. */ - if (!vect_permute_store_chain (dr_chain, group_size, stmt, bsi, + if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi, &result_chain)) return false; } next_stmt = first_stmt; - for (i = 0; i < group_size; i++) + for (i = 0; i < vec_num; i++) { - /* For strided stores vectorized defs are interleaved in - vect_permute_store_chain(). */ - if (strided_store) - vec_oprnd = VEC_index(tree, result_chain, i); + if (i > 0) + /* Bump the vector pointer. */ + dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, + NULL_TREE); + + if (slp) + vec_oprnd = VEC_index (tree, vec_oprnds, i); + else if (strided_store) + /* For strided stores vectorized defs are interleaved in + vect_permute_store_chain(). */ + vec_oprnd = VEC_index (tree, result_chain, i); data_ref = build_fold_indirect_ref (dataref_ptr); + /* Arguments are ready. Create the new vector stmt. */ - new_stmt = build_gimple_modify_stmt (data_ref, vec_oprnd); - vect_finish_stmt_generation (stmt, new_stmt, bsi); - - /* Set the VDEFs for the vector pointer. If this virtual def - has a use outside the loop and a loop peel is performed - then the def may be renamed by the peel. Mark it for - renaming so the later use will also be renamed. */ - copy_virtual_operands (new_stmt, next_stmt); - if (j == 0) - { - /* The original store is deleted so the same SSA_NAMEs - can be used. */ - FOR_EACH_SSA_TREE_OPERAND (def, next_stmt, iter, SSA_OP_VDEF) - { - SSA_NAME_DEF_STMT (def) = new_stmt; - mark_sym_for_renaming (SSA_NAME_VAR (def)); - } - - STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; - } + new_stmt = gimple_build_assign (data_ref, vec_oprnd); + vect_finish_stmt_generation (stmt, new_stmt, gsi); + mark_symbols_for_renaming (new_stmt); + + if (slp) + continue; + + if (j == 0) + STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; else - { - /* Create new names for all the definitions created by COPY and - add replacement mappings for each new name. */ - FOR_EACH_SSA_DEF_OPERAND (def_p, new_stmt, iter, SSA_OP_VDEF) - { - create_new_def_for (DEF_FROM_PTR (def_p), new_stmt, def_p); - mark_sym_for_renaming (SSA_NAME_VAR (DEF_FROM_PTR (def_p))); - } - - STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; - } + STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; prev_stmt_info = vinfo_for_stmt (new_stmt); next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt)); if (!next_stmt) break; - /* Bump the vector pointer. */ - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt); } } + VEC_free (tree, heap, dr_chain); + VEC_free (tree, heap, oprnds); + if (result_chain) + VEC_free (tree, heap, result_chain); + return true; } @@ -3956,14 +5473,17 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) /* Function vect_setup_realignment This function is called when vectorizing an unaligned load using - the dr_unaligned_software_pipeline scheme. + the dr_explicit_realign[_optimized] scheme. This function generates the following code at the loop prolog: p = initial_addr; - msq_init = *(floor(p)); # prolog load + x msq_init = *(floor(p)); # prolog load realignment_token = call target_builtin; loop: - msq = phi (msq_init, ---) + x msq = phi (msq_init, ---) + + The stmts marked with x are generated only for the case of + dr_explicit_realign_optimized. The code above sets up a new (vector) pointer, pointing to the first location accessed by STMT, and a "floor-aligned" load using that pointer. @@ -3972,19 +5492,29 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) whose arguments are the result of the prolog-load (created by this function) and the result of a load that takes place in the loop (to be created by the caller to this function). + + For the case of dr_explicit_realign_optimized: The caller to this function uses the phi-result (msq) to create the realignment code inside the loop, and sets up the missing phi argument, as follows: - loop: msq = phi (msq_init, lsq) lsq = *(floor(p')); # load in loop result = realign_load (msq, lsq, realignment_token); + For the case of dr_explicit_realign: + loop: + msq = *(floor(p)); # load in loop + p' = p + (VS-1); + lsq = *(floor(p')); # load in loop + result = realign_load (msq, lsq, realignment_token); + Input: STMT - (scalar) load stmt to be vectorized. This load accesses a memory location that may be unaligned. BSI - place where new code is to be inserted. + ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes + is used. Output: REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load @@ -3992,56 +5522,165 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) Return value - the result of the loop-header phi node. */ static tree -vect_setup_realignment (tree stmt, block_stmt_iterator *bsi, - tree *realignment_token) +vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi, + tree *realignment_token, + enum dr_alignment_support alignment_support_scheme, + tree init_addr, + struct loop **at_loop) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); - edge pe = loop_preheader_edge (loop); - tree scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); + edge pe; + tree scalar_dest = gimple_assign_lhs (stmt); tree vec_dest; - tree init_addr; - tree inc; + gimple inc; tree ptr; tree data_ref; - tree new_stmt; + gimple new_stmt; basic_block new_bb; - tree msq_init; + tree msq_init = NULL_TREE; tree new_temp; - tree phi_stmt; - tree msq; + gimple phi_stmt; + tree msq = NULL_TREE; + gimple_seq stmts = NULL; + bool inv_p; + bool compute_in_loop = false; + bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); + struct loop *containing_loop = (gimple_bb (stmt))->loop_father; + struct loop *loop_for_initial_load; + + gcc_assert (alignment_support_scheme == dr_explicit_realign + || alignment_support_scheme == dr_explicit_realign_optimized); + + /* We need to generate three things: + 1. the misalignment computation + 2. the extra vector load (for the optimized realignment scheme). + 3. the phi node for the two vectors from which the realignment is + done (for the optimized realignment scheme). + */ + + /* 1. Determine where to generate the misalignment computation. + + If INIT_ADDR is NULL_TREE, this indicates that the misalignment + calculation will be generated by this function, outside the loop (in the + preheader). Otherwise, INIT_ADDR had already been computed for us by the + caller, inside the loop. + + Background: If the misalignment remains fixed throughout the iterations of + the loop, then both realignment schemes are applicable, and also the + misalignment computation can be done outside LOOP. This is because we are + vectorizing LOOP, and so the memory accesses in LOOP advance in steps that + are a multiple of VS (the Vector Size), and therefore the misalignment in + different vectorized LOOP iterations is always the same. + The problem arises only if the memory access is in an inner-loop nested + inside LOOP, which is now being vectorized using outer-loop vectorization. + This is the only case when the misalignment of the memory access may not + remain fixed throughout the iterations of the inner-loop (as explained in + detail in vect_supportable_dr_alignment). In this case, not only is the + optimized realignment scheme not applicable, but also the misalignment + computation (and generation of the realignment token that is passed to + REALIGN_LOAD) have to be done inside the loop. + + In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode + or not, which in turn determines if the misalignment is computed inside + the inner-loop, or outside LOOP. */ + + if (init_addr != NULL_TREE) + { + compute_in_loop = true; + gcc_assert (alignment_support_scheme == dr_explicit_realign); + } + + + /* 2. Determine where to generate the extra vector load. + + For the optimized realignment scheme, instead of generating two vector + loads in each iteration, we generate a single extra vector load in the + preheader of the loop, and in each iteration reuse the result of the + vector load from the previous iteration. In case the memory access is in + an inner-loop nested inside LOOP, which is now being vectorized using + outer-loop vectorization, we need to determine whether this initial vector + load should be generated at the preheader of the inner-loop, or can be + generated at the preheader of LOOP. If the memory access has no evolution + in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has + to be generated inside LOOP (in the preheader of the inner-loop). */ + + if (nested_in_vect_loop) + { + tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info); + bool invariant_in_outerloop = + (tree_int_cst_compare (outerloop_step, size_zero_node) == 0); + loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner); + } + else + loop_for_initial_load = loop; + if (at_loop) + *at_loop = loop_for_initial_load; - /* 1. Create msq_init = *(floor(p1)) in the loop preheader */ - vec_dest = vect_create_destination_var (scalar_dest, vectype); - ptr = vect_create_data_ref_ptr (stmt, bsi, NULL_TREE, &init_addr, &inc, true, - NULL_TREE); - data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr); - new_stmt = build_gimple_modify_stmt (vec_dest, data_ref); - new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - new_bb = bsi_insert_on_edge_immediate (pe, new_stmt); - gcc_assert (!new_bb); - msq_init = GIMPLE_STMT_OPERAND (new_stmt, 0); - copy_virtual_operands (new_stmt, stmt); - update_vuses_to_preheader (new_stmt, loop); + /* 3. For the case of the optimized realignment, create the first vector + load at the loop preheader. */ + + if (alignment_support_scheme == dr_explicit_realign_optimized) + { + /* Create msq_init = *(floor(p1)) in the loop preheader */ + + gcc_assert (!compute_in_loop); + pe = loop_preheader_edge (loop_for_initial_load); + vec_dest = vect_create_destination_var (scalar_dest, vectype); + ptr = vect_create_data_ref_ptr (stmt, loop_for_initial_load, NULL_TREE, + &init_addr, &inc, true, &inv_p, NULL_TREE); + data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr); + new_stmt = gimple_build_assign (vec_dest, data_ref); + new_temp = make_ssa_name (vec_dest, new_stmt); + gimple_assign_set_lhs (new_stmt, new_temp); + mark_symbols_for_renaming (new_stmt); + new_bb = gsi_insert_on_edge_immediate (pe, new_stmt); + gcc_assert (!new_bb); + msq_init = gimple_assign_lhs (new_stmt); + } + + /* 4. Create realignment token using a target builtin, if available. + It is done either inside the containing loop, or before LOOP (as + determined above). */ - /* 2. Create permutation mask, if required, in loop preheader. */ if (targetm.vectorize.builtin_mask_for_load) { tree builtin_decl; + /* Compute INIT_ADDR - the initial addressed accessed by this memref. */ + if (compute_in_loop) + gcc_assert (init_addr); /* already computed by the caller. */ + else + { + /* Generate the INIT_ADDR computation outside LOOP. */ + init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts, + NULL_TREE, loop); + pe = loop_preheader_edge (loop); + new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); + gcc_assert (!new_bb); + } + builtin_decl = targetm.vectorize.builtin_mask_for_load (); - new_stmt = build_call_expr (builtin_decl, 1, init_addr); - vec_dest = vect_create_destination_var (scalar_dest, - TREE_TYPE (new_stmt)); - new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt); + new_stmt = gimple_build_call (builtin_decl, 1, init_addr); + vec_dest = + vect_create_destination_var (scalar_dest, + gimple_call_return_type (new_stmt)); new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - new_bb = bsi_insert_on_edge_immediate (pe, new_stmt); - gcc_assert (!new_bb); - *realignment_token = GIMPLE_STMT_OPERAND (new_stmt, 0); + gimple_call_set_lhs (new_stmt, new_temp); + + if (compute_in_loop) + gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); + else + { + /* Generate the misalignment computation outside LOOP. */ + pe = loop_preheader_edge (loop); + new_bb = gsi_insert_on_edge_immediate (pe, new_stmt); + gcc_assert (!new_bb); + } + + *realignment_token = gimple_call_lhs (new_stmt); /* The result of the CALL_EXPR to this builtin is determined from the value of the parameter and no global variables are touched @@ -4051,12 +5690,21 @@ vect_setup_realignment (tree stmt, block_stmt_iterator *bsi, gcc_assert (TREE_READONLY (builtin_decl)); } - /* 3. Create msq = phi in loop */ + if (alignment_support_scheme == dr_explicit_realign) + return msq; + + gcc_assert (!compute_in_loop); + gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized); + + + /* 5. Create msq = phi in loop */ + + pe = loop_preheader_edge (containing_loop); vec_dest = vect_create_destination_var (scalar_dest, vectype); - msq = make_ssa_name (vec_dest, NULL_TREE); - phi_stmt = create_phi_node (msq, loop->header); + msq = make_ssa_name (vec_dest, NULL); + phi_stmt = create_phi_node (msq, containing_loop->header); SSA_NAME_DEF_STMT (msq) = phi_stmt; - add_phi_arg (phi_stmt, msq_init, loop_preheader_edge (loop)); + add_phi_arg (phi_stmt, msq_init, pe); return msq; } @@ -4075,7 +5723,8 @@ vect_strided_load_supported (tree vectype) mode = (int) TYPE_MODE (vectype); - perm_even_optab = optab_for_tree_code (VEC_EXTRACT_EVEN_EXPR, vectype); + perm_even_optab = optab_for_tree_code (VEC_EXTRACT_EVEN_EXPR, vectype, + optab_default); if (!perm_even_optab) { if (vect_print_dump_info (REPORT_DETAILS)) @@ -4083,14 +5732,15 @@ vect_strided_load_supported (tree vectype) return false; } - if (perm_even_optab->handlers[mode].insn_code == CODE_FOR_nothing) + if (optab_handler (perm_even_optab, mode)->insn_code == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "perm_even op not supported by target."); return false; } - perm_odd_optab = optab_for_tree_code (VEC_EXTRACT_ODD_EXPR, vectype); + perm_odd_optab = optab_for_tree_code (VEC_EXTRACT_ODD_EXPR, vectype, + optab_default); if (!perm_odd_optab) { if (vect_print_dump_info (REPORT_DETAILS)) @@ -4098,7 +5748,7 @@ vect_strided_load_supported (tree vectype) return false; } - if (perm_odd_optab->handlers[mode].insn_code == CODE_FOR_nothing) + if (optab_handler (perm_odd_optab, mode)->insn_code == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "perm_odd op not supported by target."); @@ -4187,13 +5837,13 @@ vect_strided_load_supported (tree vectype) static bool vect_permute_load_chain (VEC(tree,heap) *dr_chain, unsigned int length, - tree stmt, - block_stmt_iterator *bsi, + gimple stmt, + gimple_stmt_iterator *gsi, VEC(tree,heap) **result_chain) { - tree perm_dest, perm_stmt, data_ref, first_vect, second_vect; + tree perm_dest, data_ref, first_vect, second_vect; + gimple perm_stmt; tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); - tree tmp; int i; unsigned int j; @@ -4214,13 +5864,13 @@ vect_permute_load_chain (VEC(tree,heap) *dr_chain, DECL_GIMPLE_REG_P (perm_dest) = 1; add_referenced_var (perm_dest); - tmp = build2 (VEC_EXTRACT_EVEN_EXPR, vectype, - first_vect, second_vect); - perm_stmt = build_gimple_modify_stmt (perm_dest, tmp); + perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_EVEN_EXPR, + perm_dest, first_vect, + second_vect); data_ref = make_ssa_name (perm_dest, perm_stmt); - GIMPLE_STMT_OPERAND (perm_stmt, 0) = data_ref; - vect_finish_stmt_generation (stmt, perm_stmt, bsi); + gimple_assign_set_lhs (perm_stmt, data_ref); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); mark_symbols_for_renaming (perm_stmt); VEC_replace (tree, *result_chain, j/2, data_ref); @@ -4230,12 +5880,12 @@ vect_permute_load_chain (VEC(tree,heap) *dr_chain, DECL_GIMPLE_REG_P (perm_dest) = 1; add_referenced_var (perm_dest); - tmp = build2 (VEC_EXTRACT_ODD_EXPR, vectype, - first_vect, second_vect); - perm_stmt = build_gimple_modify_stmt (perm_dest, tmp); + perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_ODD_EXPR, + perm_dest, first_vect, + second_vect); data_ref = make_ssa_name (perm_dest, perm_stmt); - GIMPLE_STMT_OPERAND (perm_stmt, 0) = data_ref; - vect_finish_stmt_generation (stmt, perm_stmt, bsi); + gimple_assign_set_lhs (perm_stmt, data_ref); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); mark_symbols_for_renaming (perm_stmt); VEC_replace (tree, *result_chain, j/2+length/2, data_ref); @@ -4254,12 +5904,12 @@ vect_permute_load_chain (VEC(tree,heap) *dr_chain, */ static bool -vect_transform_strided_load (tree stmt, VEC(tree,heap) *dr_chain, int size, - block_stmt_iterator *bsi) +vect_transform_strided_load (gimple stmt, VEC(tree,heap) *dr_chain, int size, + gimple_stmt_iterator *gsi) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); - tree first_stmt = DR_GROUP_FIRST_DR (stmt_info); - tree next_stmt, new_stmt; + gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info); + gimple next_stmt, new_stmt; VEC(tree,heap) *result_chain = NULL; unsigned int i, gap_count; tree tmp_data_ref; @@ -4269,7 +5919,7 @@ vect_transform_strided_load (tree stmt, VEC(tree,heap) *dr_chain, int size, vectors, that are ready for vector computation. */ result_chain = VEC_alloc (tree, heap, size); /* Permute. */ - if (!vect_permute_load_chain (dr_chain, size, stmt, bsi, &result_chain)) + if (!vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain)) return false; /* Put a permuted data-ref in the VECTORIZED_STMT field. @@ -4277,18 +5927,20 @@ vect_transform_strided_load (tree stmt, VEC(tree,heap) *dr_chain, int size, corresponds the order of data-refs in RESULT_CHAIN. */ next_stmt = first_stmt; gap_count = 1; - for (i = 0; VEC_iterate(tree, result_chain, i, tmp_data_ref); i++) + for (i = 0; VEC_iterate (tree, result_chain, i, tmp_data_ref); i++) { if (!next_stmt) break; /* Skip the gaps. Loads created for the gaps will be removed by dead - code elimination pass later. + code elimination pass later. No need to check for the first stmt in + the group, since it always exists. DR_GROUP_GAP is the number of steps in elements from the previous access (if there is no gap DR_GROUP_GAP is 1). We skip loads that correspond to the gaps. */ - if (gap_count < DR_GROUP_GAP (vinfo_for_stmt (next_stmt))) + if (next_stmt != first_stmt + && gap_count < DR_GROUP_GAP (vinfo_for_stmt (next_stmt))) { gap_count++; continue; @@ -4304,16 +5956,24 @@ vect_transform_strided_load (tree stmt, VEC(tree,heap) *dr_chain, int size, STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt; else { - tree prev_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)); - tree rel_stmt = STMT_VINFO_RELATED_STMT ( - vinfo_for_stmt (prev_stmt)); - while (rel_stmt) - { - prev_stmt = rel_stmt; - rel_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt)); - } - STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) = new_stmt; + if (!DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt))) + { + gimple prev_stmt = + STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)); + gimple rel_stmt = + STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)); + while (rel_stmt) + { + prev_stmt = rel_stmt; + rel_stmt = + STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt)); + } + + STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) = + new_stmt; + } } + next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt)); gap_count = 1; /* If NEXT_STMT accesses the same DR as the previous statement, @@ -4323,10 +5983,318 @@ vect_transform_strided_load (tree stmt, VEC(tree,heap) *dr_chain, int size, break; } } + + VEC_free (tree, heap, result_chain); + return true; +} + + +/* Create NCOPIES permutation statements using the mask MASK_BYTES (by + building a vector of type MASK_TYPE from it) and two input vectors placed in + DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and + shifting by STRIDE elements of DR_CHAIN for every copy. + (STRIDE is the number of vectorized stmts for NODE divided by the number of + copies). + VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where + the created stmts must be inserted. */ + +static inline void +vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt, + int *mask_array, int mask_nunits, + tree mask_element_type, tree mask_type, + int first_vec_indx, int second_vec_indx, + gimple_stmt_iterator *gsi, slp_tree node, + tree builtin_decl, tree vectype, + VEC(tree,heap) *dr_chain, + int ncopies, int vect_stmts_counter) +{ + tree t = NULL_TREE, mask_vec, mask, perm_dest; + gimple perm_stmt = NULL; + stmt_vec_info next_stmt_info; + int i, group_size, stride, dr_chain_size; + tree first_vec, second_vec, data_ref; + tree sym; + ssa_op_iter iter; + VEC (tree, heap) *params = NULL; + + /* Create a vector mask. */ + for (i = mask_nunits - 1; i >= 0; --i) + t = tree_cons (NULL_TREE, build_int_cst (mask_element_type, mask_array[i]), + t); + mask_vec = build_vector (mask_type, t); + mask = vect_init_vector (stmt, mask_vec, mask_type, NULL); + + group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (node)); + stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies; + dr_chain_size = VEC_length (tree, dr_chain); + + /* Initialize the vect stmts of NODE to properly insert the generated + stmts later. */ + for (i = VEC_length (gimple, SLP_TREE_VEC_STMTS (node)); + i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++) + VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (node), NULL); + + perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype); + for (i = 0; i < ncopies; i++) + { + first_vec = VEC_index (tree, dr_chain, first_vec_indx); + second_vec = VEC_index (tree, dr_chain, second_vec_indx); + + /* Build argument list for the vectorized call. */ + VEC_free (tree, heap, params); + params = VEC_alloc (tree, heap, 3); + VEC_quick_push (tree, params, first_vec); + VEC_quick_push (tree, params, second_vec); + VEC_quick_push (tree, params, mask); + + /* Generate the permute statement. */ + perm_stmt = gimple_build_call_vec (builtin_decl, params); + data_ref = make_ssa_name (perm_dest, perm_stmt); + gimple_call_set_lhs (perm_stmt, data_ref); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); + FOR_EACH_SSA_TREE_OPERAND (sym, perm_stmt, iter, SSA_OP_ALL_VIRTUALS) + { + if (TREE_CODE (sym) == SSA_NAME) + sym = SSA_NAME_VAR (sym); + mark_sym_for_renaming (sym); + } + + /* Store the vector statement in NODE. */ + VEC_replace (gimple, SLP_TREE_VEC_STMTS (node), + stride * i + vect_stmts_counter, perm_stmt); + + first_vec_indx += stride; + second_vec_indx += stride; + } + + /* Mark the scalar stmt as vectorized. */ + next_stmt_info = vinfo_for_stmt (next_scalar_stmt); + STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt; +} + + +/* Given FIRST_MASK_ELEMENT - the mask element in element representation, + return in CURRENT_MASK_ELEMENT its equivalent in target specific + representation. Check that the mask is valid and return FALSE if not. + Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to + the next vector, i.e., the current first vector is not needed. */ + +static bool +vect_get_mask_element (gimple stmt, int first_mask_element, int m, + int mask_nunits, bool only_one_vec, int index, + int *mask, int *current_mask_element, + bool *need_next_vector) +{ + int i; + static int number_of_mask_fixes = 1; + static bool mask_fixed = false; + static bool needs_first_vector = false; + + /* Convert to target specific representation. */ + *current_mask_element = first_mask_element + m; + /* Adjust the value in case it's a mask for second and third vectors. */ + *current_mask_element -= mask_nunits * (number_of_mask_fixes - 1); + + if (*current_mask_element < mask_nunits) + needs_first_vector = true; + + /* We have only one input vector to permute but the mask accesses values in + the next vector as well. */ + if (only_one_vec && *current_mask_element >= mask_nunits) + { + if (vect_print_dump_info (REPORT_DETAILS)) + { + fprintf (vect_dump, "permutation requires at least two vectors "); + print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); + } + + return false; + } + + /* The mask requires the next vector. */ + if (*current_mask_element >= mask_nunits * 2) + { + if (needs_first_vector || mask_fixed) + { + /* We either need the first vector too or have already moved to the + next vector. In both cases, this permutation needs three + vectors. */ + if (vect_print_dump_info (REPORT_DETAILS)) + { + fprintf (vect_dump, "permutation requires at " + "least three vectors "); + print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); + } + + return false; + } + + /* We move to the next vector, dropping the first one and working with + the second and the third - we need to adjust the values of the mask + accordingly. */ + *current_mask_element -= mask_nunits * number_of_mask_fixes; + + for (i = 0; i < index; i++) + mask[i] -= mask_nunits * number_of_mask_fixes; + + (number_of_mask_fixes)++; + mask_fixed = true; + } + + *need_next_vector = mask_fixed; + + /* This was the last element of this mask. Start a new one. */ + if (index == mask_nunits - 1) + { + number_of_mask_fixes = 1; + mask_fixed = false; + needs_first_vector = false; + } + return true; } +/* Generate vector permute statements from a list of loads in DR_CHAIN. + If ANALYZE_ONLY is TRUE, only check that it is possible to create valid + permute statements for SLP_NODE_INSTANCE. */ +bool +vect_transform_slp_perm_load (gimple stmt, VEC (tree, heap) *dr_chain, + gimple_stmt_iterator *gsi, int vf, + slp_instance slp_node_instance, bool analyze_only) +{ + stmt_vec_info stmt_info = vinfo_for_stmt (stmt); + tree mask_element_type = NULL_TREE, mask_type; + int i, j, k, m, scale, mask_nunits, nunits, vec_index = 0, scalar_index; + slp_tree node; + tree vectype = STMT_VINFO_VECTYPE (stmt_info), builtin_decl; + gimple next_scalar_stmt; + int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance); + int first_mask_element; + int index, unroll_factor, *mask, current_mask_element, ncopies; + bool only_one_vec = false, need_next_vector = false; + int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter; + + if (!targetm.vectorize.builtin_vec_perm) + { + if (vect_print_dump_info (REPORT_DETAILS)) + { + fprintf (vect_dump, "no builtin for vect permute for "); + print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); + } + + return false; + } + + builtin_decl = targetm.vectorize.builtin_vec_perm (vectype, + &mask_element_type); + if (!builtin_decl || !mask_element_type) + { + if (vect_print_dump_info (REPORT_DETAILS)) + { + fprintf (vect_dump, "no builtin for vect permute for "); + print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); + } + + return false; + } + + mask_type = get_vectype_for_scalar_type (mask_element_type); + mask_nunits = TYPE_VECTOR_SUBPARTS (mask_type); + mask = (int *) xmalloc (sizeof (int) * mask_nunits); + nunits = TYPE_VECTOR_SUBPARTS (vectype); + scale = mask_nunits / nunits; + unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance); + + /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE + unrolling factor. */ + orig_vec_stmts_num = group_size * + SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits; + if (orig_vec_stmts_num == 1) + only_one_vec = true; + + /* Number of copies is determined by the final vectorization factor + relatively to SLP_NODE_INSTANCE unrolling factor. */ + ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance); + + /* Generate permutation masks for every NODE. Number of masks for each NODE + is equal to GROUP_SIZE. + E.g., we have a group of three nodes with three loads from the same + location in each node, and the vector size is 4. I.e., we have a + a0b0c0a1b1c1... sequence and we need to create the following vectors: + for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3 + for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3 + ... + + The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9} (in target + scpecific type, e.g., in bytes for Altivec. + The last mask is illegal since we assume two operands for permute + operation, and the mask element values can't be outside that range. Hence, + the last mask must be converted into {2,5,5,5}. + For the first two permutations we need the first and the second input + vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation + we need the second and the third vectors: {b1,c1,a2,b2} and + {c2,a3,b3,c3}. */ + + for (i = 0; + VEC_iterate (slp_tree, SLP_INSTANCE_LOADS (slp_node_instance), + i, node); + i++) + { + scalar_index = 0; + index = 0; + vect_stmts_counter = 0; + vec_index = 0; + first_vec_index = vec_index++; + if (only_one_vec) + second_vec_index = first_vec_index; + else + second_vec_index = vec_index++; + + for (j = 0; j < unroll_factor; j++) + { + for (k = 0; k < group_size; k++) + { + first_mask_element = (i + j * group_size) * scale; + for (m = 0; m < scale; m++) + { + if (!vect_get_mask_element (stmt, first_mask_element, m, + mask_nunits, only_one_vec, index, mask, + ¤t_mask_element, &need_next_vector)) + return false; + + mask[index++] = current_mask_element; + } + + if (index == mask_nunits) + { + index = 0; + if (!analyze_only) + { + if (need_next_vector) + { + first_vec_index = second_vec_index; + second_vec_index = vec_index; + } + + next_scalar_stmt = VEC_index (gimple, + SLP_TREE_SCALAR_STMTS (node), scalar_index++); + + vect_create_mask_and_perm (stmt, next_scalar_stmt, + mask, mask_nunits, mask_element_type, mask_type, + first_vec_index, second_vec_index, gsi, node, + builtin_decl, vectype, dr_chain, ncopies, + vect_stmts_counter++); + } + } + } + } + } + + free (mask); + return true; +} + /* vectorizable_load. Check if STMT reads a non scalar data-ref (array/pointer/structure) that @@ -4336,92 +6304,128 @@ vect_transform_strided_load (tree stmt, VEC(tree,heap) *dr_chain, int size, Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) +vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, + slp_tree slp_node, slp_instance slp_node_instance) { tree scalar_dest; tree vec_dest = NULL; tree data_ref = NULL; - tree op; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); stmt_vec_info prev_stmt_info; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + struct loop *containing_loop = (gimple_bb (stmt))->loop_father; + bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr; tree vectype = STMT_VINFO_VECTYPE (stmt_info); tree new_temp; int mode; - tree new_stmt = NULL_TREE; + gimple new_stmt = NULL; tree dummy; - enum dr_alignment_support alignment_support_cheme; + enum dr_alignment_support alignment_support_scheme; tree dataref_ptr = NULL_TREE; - tree ptr_incr; + gimple ptr_incr; int nunits = TYPE_VECTOR_SUBPARTS (vectype); - int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; + int ncopies; int i, j, group_size; tree msq = NULL_TREE, lsq; tree offset = NULL_TREE; tree realignment_token = NULL_TREE; - tree phi_stmt = NULL_TREE; + gimple phi = NULL; VEC(tree,heap) *dr_chain = NULL; bool strided_load = false; - tree first_stmt; + gimple first_stmt; + tree scalar_type; + bool inv_p; + bool compute_in_loop = false; + struct loop *at_loop; + int vec_num; + bool slp = (slp_node != NULL); + bool slp_perm = false; + enum tree_code code; - if (!STMT_VINFO_RELEVANT_P (stmt_info)) - return false; + /* Multiple types in SLP are handled by creating the appropriate number of + vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in + case of SLP. */ + if (slp) + ncopies = 1; + else + ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; - if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def) - return false; + gcc_assert (ncopies >= 1); - /* FORNOW: not yet supported. */ - if (STMT_VINFO_LIVE_P (stmt_info)) + /* FORNOW. This restriction should be relaxed. */ + if (nested_in_vect_loop && ncopies > 1) { if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "value used after loop."); + fprintf (vect_dump, "multiple types in nested loop."); return false; } + if (slp && SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance)) + slp_perm = true; + + if (!STMT_VINFO_RELEVANT_P (stmt_info)) + return false; + + if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def) + return false; + /* Is vectorizable load? */ - if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) + if (!is_gimple_assign (stmt)) return false; - scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); + scalar_dest = gimple_assign_lhs (stmt); if (TREE_CODE (scalar_dest) != SSA_NAME) return false; - op = GIMPLE_STMT_OPERAND (stmt, 1); - if (TREE_CODE (op) != ARRAY_REF - && TREE_CODE (op) != INDIRECT_REF - && !DR_GROUP_FIRST_DR (stmt_info)) + code = gimple_assign_rhs_code (stmt); + if (code != ARRAY_REF + && code != INDIRECT_REF + && !STMT_VINFO_STRIDED_ACCESS (stmt_info)) return false; if (!STMT_VINFO_DATA_REF (stmt_info)) return false; + scalar_type = TREE_TYPE (DR_REF (dr)); mode = (int) TYPE_MODE (vectype); /* FORNOW. In some cases can vectorize even if data-type not supported (e.g. - data copies). */ - if (mov_optab->handlers[mode].insn_code == CODE_FOR_nothing) + if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "Aligned load, but unsupported type."); return false; } + /* The vector component type needs to be trivially convertible to the + scalar lhs. This should always be the case. */ + if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype))) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "??? operands of different types"); + return false; + } + /* Check if the load is a part of an interleaving chain. */ - if (DR_GROUP_FIRST_DR (stmt_info)) + if (STMT_VINFO_STRIDED_ACCESS (stmt_info)) { strided_load = true; + /* FORNOW */ + gcc_assert (! nested_in_vect_loop); /* Check if interleaving is supported. */ - if (!vect_strided_load_supported (vectype)) + if (!vect_strided_load_supported (vectype) + && !PURE_SLP_STMT (stmt_info) && !slp) return false; } if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = load_vec_info_type; - vect_model_load_cost (stmt_info, ncopies); + vect_model_load_cost (stmt_info, ncopies, NULL); return true; } @@ -4441,18 +6445,27 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) } first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt)); - dr_chain = VEC_alloc (tree, heap, group_size); + + /* VEC_NUM is the number of vect stmts to be created for this group. */ + if (slp) + { + strided_load = false; + vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + } + else + vec_num = group_size; + + dr_chain = VEC_alloc (tree, heap, vec_num); } else { first_stmt = stmt; first_dr = dr; - group_size = 1; + group_size = vec_num = 1; } - alignment_support_cheme = vect_supportable_dr_alignment (first_dr); - gcc_assert (alignment_support_cheme); - + alignment_support_scheme = vect_supportable_dr_alignment (first_dr); + gcc_assert (alignment_support_scheme); /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate @@ -4534,7 +6547,7 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) } Otherwise, the data reference is potentially unaligned on a target that - does not support unaligned accesses (dr_unaligned_software_pipeline) - + does not support unaligned accesses (dr_explicit_realign_optimized) - then generate the following code, in which the data in each iteration is obtained by two vector loads, one from the previous iteration, and one from the current iteration: @@ -4551,27 +6564,57 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) msq = lsq; } */ - if (alignment_support_cheme == dr_unaligned_software_pipeline) + /* If the misalignment remains the same throughout the execution of the + loop, we can create the init_addr and permutation mask at the loop + preheader. Otherwise, it needs to be created inside the loop. + This can only occur when vectorizing memory accesses in the inner-loop + nested within an outer-loop that is being vectorized. */ + + if (nested_in_vect_loop_p (loop, stmt) + && (TREE_INT_CST_LOW (DR_STEP (dr)) + % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)) + { + gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized); + compute_in_loop = true; + } + + if ((alignment_support_scheme == dr_explicit_realign_optimized + || alignment_support_scheme == dr_explicit_realign) + && !compute_in_loop) { - msq = vect_setup_realignment (first_stmt, bsi, &realignment_token); - phi_stmt = SSA_NAME_DEF_STMT (msq); - offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1); + msq = vect_setup_realignment (first_stmt, gsi, &realignment_token, + alignment_support_scheme, NULL_TREE, + &at_loop); + if (alignment_support_scheme == dr_explicit_realign_optimized) + { + phi = SSA_NAME_DEF_STMT (msq); + offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1); + } } + else + at_loop = loop; prev_stmt_info = NULL; for (j = 0; j < ncopies; j++) { /* 1. Create the vector pointer update chain. */ if (j == 0) - dataref_ptr = vect_create_data_ref_ptr (first_stmt, bsi, offset, &dummy, - &ptr_incr, false, NULL_TREE); + dataref_ptr = vect_create_data_ref_ptr (first_stmt, + at_loop, offset, + &dummy, &ptr_incr, false, + &inv_p, NULL_TREE); else - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt); + dataref_ptr = + bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE); - for (i = 0; i < group_size; i++) + for (i = 0; i < vec_num; i++) { + if (i > 0) + dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, + NULL_TREE); + /* 2. Create the vector-load in the loop. */ - switch (alignment_support_cheme) + switch (alignment_support_scheme) { case dr_aligned: gcc_assert (aligned_access_p (first_dr)); @@ -4582,69 +6625,159 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) int mis = DR_MISALIGNMENT (first_dr); tree tmis = (mis == -1 ? size_zero_node : size_int (mis)); - gcc_assert (!aligned_access_p (first_dr)); tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT)); data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis); break; } - case dr_unaligned_software_pipeline: - gcc_assert (!aligned_access_p (first_dr)); + case dr_explicit_realign: + { + tree ptr, bump; + tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1); + + if (compute_in_loop) + msq = vect_setup_realignment (first_stmt, gsi, + &realignment_token, + dr_explicit_realign, + dataref_ptr, NULL); + + data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr); + vec_dest = vect_create_destination_var (scalar_dest, vectype); + new_stmt = gimple_build_assign (vec_dest, data_ref); + new_temp = make_ssa_name (vec_dest, new_stmt); + gimple_assign_set_lhs (new_stmt, new_temp); + vect_finish_stmt_generation (stmt, new_stmt, gsi); + copy_virtual_operands (new_stmt, stmt); + mark_symbols_for_renaming (new_stmt); + msq = new_temp; + + bump = size_binop (MULT_EXPR, vs_minus_1, + TYPE_SIZE_UNIT (scalar_type)); + ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump); + data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr); + break; + } + case dr_explicit_realign_optimized: data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr); break; default: gcc_unreachable (); } vec_dest = vect_create_destination_var (scalar_dest, vectype); - new_stmt = build_gimple_modify_stmt (vec_dest, data_ref); + new_stmt = gimple_build_assign (vec_dest, data_ref); new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, new_stmt, bsi); - copy_virtual_operands (new_stmt, stmt); + gimple_assign_set_lhs (new_stmt, new_temp); + vect_finish_stmt_generation (stmt, new_stmt, gsi); mark_symbols_for_renaming (new_stmt); - /* 3. Handle explicit realignment if necessary/supported. */ - if (alignment_support_cheme == dr_unaligned_software_pipeline) + /* 3. Handle explicit realignment if necessary/supported. Create in + loop: vec_dest = realign_load (msq, lsq, realignment_token) */ + if (alignment_support_scheme == dr_explicit_realign_optimized + || alignment_support_scheme == dr_explicit_realign) { - /* Create in loop: - */ - lsq = GIMPLE_STMT_OPERAND (new_stmt, 0); + tree tmp; + + lsq = gimple_assign_lhs (new_stmt); if (!realignment_token) realignment_token = dataref_ptr; vec_dest = vect_create_destination_var (scalar_dest, vectype); - new_stmt = - build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq, realignment_token); - new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt); + tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq, + realignment_token); + new_stmt = gimple_build_assign (vec_dest, tmp); new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, new_stmt, bsi); - if (i == group_size - 1 && j == ncopies - 1) - add_phi_arg (phi_stmt, lsq, loop_latch_edge (loop)); - msq = lsq; + gimple_assign_set_lhs (new_stmt, new_temp); + vect_finish_stmt_generation (stmt, new_stmt, gsi); + + if (alignment_support_scheme == dr_explicit_realign_optimized) + { + gcc_assert (phi); + if (i == vec_num - 1 && j == ncopies - 1) + add_phi_arg (phi, lsq, loop_latch_edge (containing_loop)); + msq = lsq; + } } - if (strided_load) - VEC_quick_push (tree, dr_chain, new_temp); - if (i < group_size - 1) - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt); - } - if (strided_load) - { - if (!vect_transform_strided_load (stmt, dr_chain, group_size, bsi)) - return false; - *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); - dr_chain = VEC_alloc (tree, heap, group_size); + /* 4. Handle invariant-load. */ + if (inv_p) + { + gcc_assert (!strided_load); + gcc_assert (nested_in_vect_loop_p (loop, stmt)); + if (j == 0) + { + int k; + tree t = NULL_TREE; + tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type); + + /* CHECKME: bitpos depends on endianess? */ + bitpos = bitsize_zero_node; + vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp, + bitsize, bitpos); + vec_dest = + vect_create_destination_var (scalar_dest, NULL_TREE); + new_stmt = gimple_build_assign (vec_dest, vec_inv); + new_temp = make_ssa_name (vec_dest, new_stmt); + gimple_assign_set_lhs (new_stmt, new_temp); + vect_finish_stmt_generation (stmt, new_stmt, gsi); + + for (k = nunits - 1; k >= 0; --k) + t = tree_cons (NULL_TREE, new_temp, t); + /* FIXME: use build_constructor directly. */ + vec_inv = build_constructor_from_list (vectype, t); + new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi); + new_stmt = SSA_NAME_DEF_STMT (new_temp); + } + else + gcc_unreachable (); /* FORNOW. */ + } + + /* Collect vector loads and later create their permutation in + vect_transform_strided_load (). */ + if (strided_load || slp_perm) + VEC_quick_push (tree, dr_chain, new_temp); + + /* Store vector loads in the corresponding SLP_NODE. */ + if (slp && !slp_perm) + VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt); } + + if (slp && !slp_perm) + continue; + + if (slp_perm) + { + if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, + LOOP_VINFO_VECT_FACTOR (loop_vinfo), + slp_node_instance, false)) + { + VEC_free (tree, heap, dr_chain); + return false; + } + } else - { - if (j == 0) - STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; - else - STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; - prev_stmt_info = vinfo_for_stmt (new_stmt); - } + { + if (strided_load) + { + if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi)) + return false; + + *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); + VEC_free (tree, heap, dr_chain); + dr_chain = VEC_alloc (tree, heap, group_size); + } + else + { + if (j == 0) + STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; + else + STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; + prev_stmt_info = vinfo_for_stmt (new_stmt); + } + } } + if (dr_chain) + VEC_free (tree, heap, dr_chain); + return true; } @@ -4655,32 +6788,42 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) it can be supported. */ bool -vectorizable_live_operation (tree stmt, - block_stmt_iterator *bsi ATTRIBUTE_UNUSED, - tree *vec_stmt ATTRIBUTE_UNUSED) +vectorizable_live_operation (gimple stmt, + gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, + gimple *vec_stmt ATTRIBUTE_UNUSED) { - tree operation; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); int i; int op_type; tree op; - tree def, def_stmt; + tree def; + gimple def_stmt; enum vect_def_type dt; + enum tree_code code; + enum gimple_rhs_class rhs_class; gcc_assert (STMT_VINFO_LIVE_P (stmt_info)); - if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def) + if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def) + return false; + + if (!is_gimple_assign (stmt)) return false; - if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) + if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) return false; - if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME) + /* FORNOW. CHECKME. */ + if (nested_in_vect_loop_p (loop, stmt)) return false; - operation = GIMPLE_STMT_OPERAND (stmt, 1); - op_type = TREE_OPERAND_LENGTH (operation); + code = gimple_assign_rhs_code (stmt); + op_type = TREE_CODE_LENGTH (code); + rhs_class = get_gimple_rhs_class (code); + gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op); + gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op); /* FORNOW: support only if all uses are invariant. This means that the scalar operations can remain in place, unvectorized. @@ -4688,8 +6831,11 @@ vectorizable_live_operation (tree stmt, for (i = 0; i < op_type; i++) { - op = TREE_OPERAND (operation, i); - if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt)) + if (rhs_class == GIMPLE_SINGLE_RHS) + op = TREE_OPERAND (gimple_op (stmt, 1), i); + else + op = gimple_op (stmt, i + 1); + if (op && !vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt)) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -4729,20 +6875,22 @@ vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo) if (TREE_CODE (lhs) == SSA_NAME) { - tree lhs_def_stmt = SSA_NAME_DEF_STMT (lhs); + gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs); if (!vect_is_simple_use (lhs, loop_vinfo, &lhs_def_stmt, &def, &dt)) return false; } - else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST) + else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST + && TREE_CODE (lhs) != FIXED_CST) return false; if (TREE_CODE (rhs) == SSA_NAME) { - tree rhs_def_stmt = SSA_NAME_DEF_STMT (rhs); + gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs); if (!vect_is_simple_use (rhs, loop_vinfo, &rhs_def_stmt, &def, &dt)) return false; } - else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST) + else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST + && TREE_CODE (rhs) != FIXED_CST) return false; return true; @@ -4758,7 +6906,8 @@ vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo) Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) +vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi, + gimple *vec_stmt) { tree scalar_dest = NULL_TREE; tree vec_dest = NULL_TREE; @@ -4775,6 +6924,7 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) enum vect_def_type dt; int nunits = TYPE_VECTOR_SUBPARTS (vectype); int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; + enum tree_code code; gcc_assert (ncopies >= 1); if (ncopies > 1) @@ -4786,6 +6936,10 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def) return false; + /* FORNOW: SLP not supported. */ + if (STMT_SLP_TYPE (stmt_info)) + return false; + /* FORNOW: not yet supported. */ if (STMT_VINFO_LIVE_P (stmt_info)) { @@ -4795,14 +6949,16 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) } /* Is vectorizable conditional operation? */ - if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) + if (!is_gimple_assign (stmt)) return false; - op = GIMPLE_STMT_OPERAND (stmt, 1); + code = gimple_assign_rhs_code (stmt); - if (TREE_CODE (op) != COND_EXPR) + if (code != COND_EXPR) return false; + gcc_assert (gimple_assign_single_p (stmt)); + op = gimple_assign_rhs1 (stmt); cond_expr = TREE_OPERAND (op, 0); then_clause = TREE_OPERAND (op, 1); else_clause = TREE_OPERAND (op, 2); @@ -4817,24 +6973,26 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (TREE_CODE (then_clause) == SSA_NAME) { - tree then_def_stmt = SSA_NAME_DEF_STMT (then_clause); + gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause); if (!vect_is_simple_use (then_clause, loop_vinfo, &then_def_stmt, &def, &dt)) return false; } else if (TREE_CODE (then_clause) != INTEGER_CST - && TREE_CODE (then_clause) != REAL_CST) + && TREE_CODE (then_clause) != REAL_CST + && TREE_CODE (then_clause) != FIXED_CST) return false; if (TREE_CODE (else_clause) == SSA_NAME) { - tree else_def_stmt = SSA_NAME_DEF_STMT (else_clause); + gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause); if (!vect_is_simple_use (else_clause, loop_vinfo, &else_def_stmt, &def, &dt)) return false; } else if (TREE_CODE (else_clause) != INTEGER_CST - && TREE_CODE (else_clause) != REAL_CST) + && TREE_CODE (else_clause) != REAL_CST + && TREE_CODE (else_clause) != FIXED_CST) return false; @@ -4849,7 +7007,7 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) /* Transform */ /* Handle def. */ - scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); + scalar_dest = gimple_assign_lhs (stmt); vec_dest = vect_create_destination_var (scalar_dest, vectype); /* Handle cond expr. */ @@ -4860,74 +7018,81 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL); vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL); - /* Arguments are ready. create the new vector stmt. */ + /* Arguments are ready. Create the new vector stmt. */ vec_compare = build2 (TREE_CODE (cond_expr), vectype, vec_cond_lhs, vec_cond_rhs); vec_cond_expr = build3 (VEC_COND_EXPR, vectype, vec_compare, vec_then_clause, vec_else_clause); - *vec_stmt = build_gimple_modify_stmt (vec_dest, vec_cond_expr); + *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr); new_temp = make_ssa_name (vec_dest, *vec_stmt); - GIMPLE_STMT_OPERAND (*vec_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, *vec_stmt, bsi); + gimple_assign_set_lhs (*vec_stmt, new_temp); + vect_finish_stmt_generation (stmt, *vec_stmt, gsi); return true; } + /* Function vect_transform_stmt. Create a vectorized stmt to replace STMT, and insert it at BSI. */ -bool -vect_transform_stmt (tree stmt, block_stmt_iterator *bsi, bool *strided_store) +static bool +vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi, + bool *strided_store, slp_tree slp_node, + slp_instance slp_node_instance) { bool is_store = false; - tree vec_stmt = NULL_TREE; + gimple vec_stmt = NULL; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); - tree orig_stmt_in_pattern; + gimple orig_stmt_in_pattern; bool done; + loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); switch (STMT_VINFO_TYPE (stmt_info)) { case type_demotion_vec_info_type: - done = vectorizable_type_demotion (stmt, bsi, &vec_stmt); + done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node); gcc_assert (done); break; case type_promotion_vec_info_type: - done = vectorizable_type_promotion (stmt, bsi, &vec_stmt); + done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node); gcc_assert (done); break; case type_conversion_vec_info_type: - done = vectorizable_conversion (stmt, bsi, &vec_stmt); + done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node); gcc_assert (done); break; case induc_vec_info_type: - done = vectorizable_induction (stmt, bsi, &vec_stmt); + gcc_assert (!slp_node); + done = vectorizable_induction (stmt, gsi, &vec_stmt); gcc_assert (done); break; case op_vec_info_type: - done = vectorizable_operation (stmt, bsi, &vec_stmt); + done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node); gcc_assert (done); break; case assignment_vec_info_type: - done = vectorizable_assignment (stmt, bsi, &vec_stmt); + done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node); gcc_assert (done); break; case load_vec_info_type: - done = vectorizable_load (stmt, bsi, &vec_stmt); + done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node, + slp_node_instance); gcc_assert (done); break; case store_vec_info_type: - done = vectorizable_store (stmt, bsi, &vec_stmt); + done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node); gcc_assert (done); - if (DR_GROUP_FIRST_DR (stmt_info)) + if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node) { /* In case of interleaving, the whole chain is vectorized when the last store in the chain is reached. Store stmts before the last @@ -4942,16 +7107,19 @@ vect_transform_stmt (tree stmt, block_stmt_iterator *bsi, bool *strided_store) break; case condition_vec_info_type: - done = vectorizable_condition (stmt, bsi, &vec_stmt); + gcc_assert (!slp_node); + done = vectorizable_condition (stmt, gsi, &vec_stmt); gcc_assert (done); break; case call_vec_info_type: - done = vectorizable_call (stmt, bsi, &vec_stmt); + gcc_assert (!slp_node); + done = vectorizable_call (stmt, gsi, &vec_stmt); break; case reduc_vec_info_type: - done = vectorizable_reduction (stmt, bsi, &vec_stmt); + gcc_assert (!slp_node); + done = vectorizable_reduction (stmt, gsi, &vec_stmt); gcc_assert (done); break; @@ -4964,10 +7132,47 @@ vect_transform_stmt (tree stmt, block_stmt_iterator *bsi, bool *strided_store) } } + /* Handle inner-loop stmts whose DEF is used in the loop-nest that + is being vectorized, but outside the immediately enclosing loop. */ + if (vec_stmt + && nested_in_vect_loop_p (loop, stmt) + && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type + && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer + || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer_by_reduction)) + { + struct loop *innerloop = loop->inner; + imm_use_iterator imm_iter; + use_operand_p use_p; + tree scalar_dest; + gimple exit_phi; + + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "Record the vdef for outer-loop vectorization."); + + /* Find the relevant loop-exit phi-node, and reord the vec_stmt there + (to be used when vectorizing outer-loop stmts that use the DEF of + STMT). */ + if (gimple_code (stmt) == GIMPLE_PHI) + scalar_dest = PHI_RESULT (stmt); + else + scalar_dest = gimple_assign_lhs (stmt); + + FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) + { + if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p)))) + { + exit_phi = USE_STMT (use_p); + STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt; + } + } + } + + /* Handle stmts whose DEF is used outside the loop-nest that is + being vectorized. */ if (STMT_VINFO_LIVE_P (stmt_info) && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type) { - done = vectorizable_live_operation (stmt, bsi, &vec_stmt); + done = vectorizable_live_operation (stmt, gsi, &vec_stmt); gcc_assert (done); } @@ -5001,19 +7206,20 @@ vect_transform_stmt (tree stmt, block_stmt_iterator *bsi, bool *strided_store) static tree vect_build_loop_niters (loop_vec_info loop_vinfo) { - tree ni_name, stmt, var; + tree ni_name, var; + gimple_seq stmts = NULL; edge pe; struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree ni = unshare_expr (LOOP_VINFO_NITERS (loop_vinfo)); var = create_tmp_var (TREE_TYPE (ni), "niters"); add_referenced_var (var); - ni_name = force_gimple_operand (ni, &stmt, false, var); + ni_name = force_gimple_operand (ni, &stmts, false, var); pe = loop_preheader_edge (loop); - if (stmt) + if (stmts) { - basic_block new_bb = bsi_insert_on_edge_immediate (pe, stmt); + basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } @@ -5038,7 +7244,8 @@ vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo, edge pe; basic_block new_bb; - tree stmt, ni_name; + gimple_seq stmts; + tree ni_name; tree var; tree ratio_name; tree ratio_mult_vf_name; @@ -5063,9 +7270,10 @@ vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo, var = create_tmp_var (TREE_TYPE (ni), "bnd"); add_referenced_var (var); - ratio_name = force_gimple_operand (ratio_name, &stmt, true, var); + stmts = NULL; + ratio_name = force_gimple_operand (ratio_name, &stmts, true, var); pe = loop_preheader_edge (loop); - new_bb = bsi_insert_on_edge_immediate (pe, stmt); + new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } @@ -5078,10 +7286,11 @@ vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo, var = create_tmp_var (TREE_TYPE (ni), "ratio_mult_vf"); add_referenced_var (var); - ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmt, + stmts = NULL; + ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmts, true, var); pe = loop_preheader_edge (loop); - new_bb = bsi_insert_on_edge_immediate (pe, stmt); + new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } @@ -5093,82 +7302,6 @@ vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo, } -/* Function update_vuses_to_preheader. - - Input: - STMT - a statement with potential VUSEs. - LOOP - the loop whose preheader will contain STMT. - - It's possible to vectorize a loop even though an SSA_NAME from a VUSE - appears to be defined in a VDEF in another statement in a loop. - One such case is when the VUSE is at the dereference of a __restricted__ - pointer in a load and the VDEF is at the dereference of a different - __restricted__ pointer in a store. Vectorization may result in - copy_virtual_uses being called to copy the problematic VUSE to a new - statement that is being inserted in the loop preheader. This procedure - is called to change the SSA_NAME in the new statement's VUSE from the - SSA_NAME updated in the loop to the related SSA_NAME available on the - path entering the loop. - - When this function is called, we have the following situation: - - # vuse - S1: vload - do { - # name1 = phi < name0 , name2> - - # vuse - S2: vload - - # name2 = vdef - S3: vstore - - }while... - - Stmt S1 was created in the loop preheader block as part of misaligned-load - handling. This function fixes the name of the vuse of S1 from 'name1' to - 'name0'. */ - -static void -update_vuses_to_preheader (tree stmt, struct loop *loop) -{ - basic_block header_bb = loop->header; - edge preheader_e = loop_preheader_edge (loop); - ssa_op_iter iter; - use_operand_p use_p; - - FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_VUSE) - { - tree ssa_name = USE_FROM_PTR (use_p); - tree def_stmt = SSA_NAME_DEF_STMT (ssa_name); - tree name_var = SSA_NAME_VAR (ssa_name); - basic_block bb = bb_for_stmt (def_stmt); - - /* For a use before any definitions, def_stmt is a NOP_EXPR. */ - if (!IS_EMPTY_STMT (def_stmt) - && flow_bb_inside_loop_p (loop, bb)) - { - /* If the block containing the statement defining the SSA_NAME - is in the loop then it's necessary to find the definition - outside the loop using the PHI nodes of the header. */ - tree phi; - bool updated = false; - - for (phi = phi_nodes (header_bb); phi; phi = PHI_CHAIN (phi)) - { - if (SSA_NAME_VAR (PHI_RESULT (phi)) == name_var) - { - SET_USE (use_p, PHI_ARG_DEF (phi, preheader_e->dest_idx)); - updated = true; - break; - } - } - gcc_assert (updated); - } - } -} - - /* Function vect_update_ivs_after_vectorizer. "Advance" the induction variables of LOOP to the value they should take @@ -5216,7 +7349,8 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters, { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block exit_bb = single_exit (loop)->dest; - tree phi, phi1; + gimple phi, phi1; + gimple_stmt_iterator gsi, gsi1; basic_block update_bb = update_e->dest; /* gcc_assert (vect_can_advance_ivs_p (loop_vinfo)); */ @@ -5224,21 +7358,23 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters, /* Make sure there exists a single-predecessor exit bb: */ gcc_assert (single_pred_p (exit_bb)); - for (phi = phi_nodes (loop->header), phi1 = phi_nodes (update_bb); - phi && phi1; - phi = PHI_CHAIN (phi), phi1 = PHI_CHAIN (phi1)) + for (gsi = gsi_start_phis (loop->header), gsi1 = gsi_start_phis (update_bb); + !gsi_end_p (gsi) && !gsi_end_p (gsi1); + gsi_next (&gsi), gsi_next (&gsi1)) { tree access_fn = NULL; tree evolution_part; tree init_expr; tree step_expr; - tree var, stmt, ni, ni_name; - block_stmt_iterator last_bsi; + tree var, ni, ni_name; + gimple_stmt_iterator last_gsi; + phi = gsi_stmt (gsi); + phi1 = gsi_stmt (gsi1); if (vect_print_dump_info (REPORT_DETAILS)) { fprintf (vect_dump, "vect_update_ivs_after_vectorizer: phi: "); - print_generic_expr (vect_dump, phi, TDF_SLIM); + print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM); } /* Skip virtual phi's. */ @@ -5259,6 +7395,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters, access_fn = analyze_scalar_evolution (loop, PHI_RESULT (phi)); gcc_assert (access_fn); + STRIP_NOPS (access_fn); evolution_part = unshare_expr (evolution_part_in_loop_num (access_fn, loop->num)); gcc_assert (evolution_part != NULL_TREE); @@ -5290,18 +7427,42 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters, var = create_tmp_var (TREE_TYPE (init_expr), "tmp"); add_referenced_var (var); - ni_name = force_gimple_operand (ni, &stmt, false, var); + last_gsi = gsi_last_bb (exit_bb); + ni_name = force_gimple_operand_gsi (&last_gsi, ni, false, var, + true, GSI_SAME_STMT); - /* Insert stmt into exit_bb. */ - last_bsi = bsi_last (exit_bb); - if (stmt) - bsi_insert_before (&last_bsi, stmt, BSI_SAME_STMT); - /* Fix phi expressions in the successor bb. */ SET_PHI_ARG_DEF (phi1, update_e->dest_idx, ni_name); } } +/* Return the more conservative threshold between the + min_profitable_iters returned by the cost model and the user + specified threshold, if provided. */ + +static unsigned int +conservative_cost_threshold (loop_vec_info loop_vinfo, + int min_profitable_iters) +{ + unsigned int th; + int min_scalar_loop_bound; + + min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND) + * LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 1); + + /* Use the cost model only if it is more conservative than user specified + threshold. */ + th = (unsigned) min_scalar_loop_bound; + if (min_profitable_iters + && (!min_scalar_loop_bound + || min_profitable_iters > min_scalar_loop_bound)) + th = (unsigned) min_profitable_iters; + + if (th && vect_print_dump_info (REPORT_COST)) + fprintf (vect_dump, "Vectorization may not be profitable."); + + return th; +} /* Function vect_do_peeling_for_loop_bound @@ -5322,8 +7483,8 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio) edge update_e; basic_block preheader; int loop_num; - unsigned int th; - int min_scalar_loop_bound; + bool check_profitability = false; + unsigned int th = 0; int min_profitable_iters; if (vect_print_dump_info (REPORT_DETAILS)) @@ -5341,27 +7502,24 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio) loop_num = loop->num; - /* Analyze cost to set threshhold for vectorized loop. */ - min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo); - - min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)) - * LOOP_VINFO_VECT_FACTOR (loop_vinfo); - - /* Use the cost model only if it is more conservative than user specified - threshold. */ + /* If cost model check not done during versioning and + peeling for alignment. */ + if (!VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) + && !VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)) + && !LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo)) + { + check_profitability = true; - th = (unsigned) min_scalar_loop_bound; - if (min_profitable_iters - && (!min_scalar_loop_bound - || min_profitable_iters > min_scalar_loop_bound)) - th = (unsigned) min_profitable_iters; + /* Get profitability threshold for vectorized loop. */ + min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo); - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "vectorization may not be profitable."); + th = conservative_cost_threshold (loop_vinfo, + min_profitable_iters); + } new_loop = slpeel_tree_peel_loop_to_edge (loop, single_exit (loop), ratio_mult_vf_name, ni_name, false, - th); + th, check_profitability); gcc_assert (new_loop); gcc_assert (loop_num == loop->num); #ifdef ENABLE_CHECKING @@ -5406,16 +7564,14 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio) Else, compute address misalignment in bytes: addr_mis = addr & (vectype_size - 1) - prolog_niters = min ( LOOP_NITERS , (VF - addr_mis/elem_size)&(VF-1) ) - - (elem_size = element type size; an element is the scalar element - whose type is the inner type of the vectype) + prolog_niters = min (LOOP_NITERS, ((VF - addr_mis/elem_size)&(VF-1))/step) - For interleaving, + (elem_size = element type size; an element is the scalar element whose type + is the inner type of the vectype) - prolog_niters = min ( LOOP_NITERS , - (VF/group_size - addr_mis/elem_size)&(VF/group_size-1) ) - where group_size is the size of the interleaved group. + When the step of the data-ref in the loop is not 1 (as in interleaved data + and SLP), the number of iterations of the prolog must be divided by the step + (which is equal to the size of interleaved group). The above formulas assume that VF == number of elements in the vector. This may not hold when there are multiple-types in the loop. @@ -5428,27 +7584,22 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters) { struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); - tree var, stmt; + tree var; + gimple_seq stmts; tree iters, iters_name; edge pe; basic_block new_bb; - tree dr_stmt = DR_STMT (dr); + gimple dr_stmt = DR_STMT (dr); stmt_vec_info stmt_info = vinfo_for_stmt (dr_stmt); tree vectype = STMT_VINFO_VECTYPE (stmt_info); int vectype_align = TYPE_ALIGN (vectype) / BITS_PER_UNIT; tree niters_type = TREE_TYPE (loop_niters); - int group_size = 1; + int step = 1; int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr)))); int nelements = TYPE_VECTOR_SUBPARTS (vectype); - if (DR_GROUP_FIRST_DR (stmt_info)) - { - /* For interleaved access element size must be multiplied by the size of - the interleaved group. */ - group_size = DR_GROUP_SIZE (vinfo_for_stmt ( - DR_GROUP_FIRST_DR (stmt_info))); - element_size *= group_size; - } + if (STMT_VINFO_STRIDED_ACCESS (stmt_info)) + step = DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_DR (stmt_info))); pe = loop_preheader_edge (loop); @@ -5459,14 +7610,15 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters) if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "known alignment = %d.", byte_misalign); - iters = build_int_cst (niters_type, - (nelements - elem_misalign)&(nelements/group_size-1)); + + iters = build_int_cst (niters_type, + (((nelements - elem_misalign) & (nelements - 1)) / step)); } else { - tree new_stmts = NULL_TREE; - tree start_addr = - vect_create_addr_base_for_vector_ref (dr_stmt, &new_stmts, NULL_TREE); + gimple_seq new_stmts = NULL; + tree start_addr = vect_create_addr_base_for_vector_ref (dr_stmt, + &new_stmts, NULL_TREE, loop); tree ptr_type = TREE_TYPE (start_addr); tree size = TYPE_SIZE (ptr_type); tree type = lang_hooks.types.type_for_size (tree_low_cst (size, 1), 1); @@ -5478,7 +7630,7 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters) tree byte_misalign; tree elem_misalign; - new_bb = bsi_insert_on_edge_immediate (pe, new_stmts); + new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmts); gcc_assert (!new_bb); /* Create: byte_misalign = addr & (vectype_size - 1) */ @@ -5510,12 +7662,13 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters) var = create_tmp_var (niters_type, "prolog_loop_niters"); add_referenced_var (var); - iters_name = force_gimple_operand (iters, &stmt, false, var); + stmts = NULL; + iters_name = force_gimple_operand (iters, &stmts, false, var); /* Insert stmt on loop preheader edge. */ - if (stmt) + if (stmts) { - basic_block new_bb = bsi_insert_on_edge_immediate (pe, stmt); + basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } @@ -5535,8 +7688,10 @@ vect_update_init_of_dr (struct data_reference *dr, tree niters) { tree offset = DR_OFFSET (dr); - niters = fold_build2 (MULT_EXPR, TREE_TYPE (niters), niters, DR_STEP (dr)); - offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset, niters); + niters = fold_build2 (MULT_EXPR, sizetype, + fold_convert (sizetype, niters), + fold_convert (sizetype, DR_STEP (dr))); + offset = fold_build2 (PLUS_EXPR, sizetype, offset, niters); DR_OFFSET (dr) = offset; } @@ -5579,6 +7734,9 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo) tree niters_of_prolog_loop, ni_name; tree n_iters; struct loop *new_loop; + bool check_profitability = false; + unsigned int th = 0; + int min_profitable_iters; if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "=== vect_do_peeling_for_alignment ==="); @@ -5588,10 +7746,26 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo) ni_name = vect_build_loop_niters (loop_vinfo); niters_of_prolog_loop = vect_gen_niters_for_prolog_loop (loop_vinfo, ni_name); + + /* If cost model check not done during versioning. */ + if (!VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) + && !VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) + { + check_profitability = true; + + /* Get profitability threshold for vectorized loop. */ + min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo); + + th = conservative_cost_threshold (loop_vinfo, + min_profitable_iters); + } + /* Peel the prolog loop and iterate it niters_of_prolog_loop. */ - new_loop = - slpeel_tree_peel_loop_to_edge (loop, loop_preheader_edge (loop), - niters_of_prolog_loop, ni_name, true, 0); + new_loop = + slpeel_tree_peel_loop_to_edge (loop, loop_preheader_edge (loop), + niters_of_prolog_loop, ni_name, true, + th, check_profitability); + gcc_assert (new_loop); #ifdef ENABLE_CHECKING slpeel_verify_cfg_after_peeling (new_loop, loop); @@ -5619,6 +7793,8 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo) checked at runtime. Input: + COND_EXPR - input conditional expression. New conditions will be chained + with logical AND operation. LOOP_VINFO - two fields of the loop information are used. LOOP_VINFO_PTR_MASK is the mask used to check the alignment. LOOP_VINFO_MAY_MISALIGN_STMTS contains the refs to be checked. @@ -5635,13 +7811,15 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo) test can be done as a&(n-1) == 0. For example, for 16 byte vectors the test is a&0xf == 0. */ -static tree +static void vect_create_cond_for_align_checks (loop_vec_info loop_vinfo, - tree *cond_expr_stmt_list) + tree *cond_expr, + gimple_seq *cond_expr_stmt_list) { - VEC(tree,heap) *may_misalign_stmts + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + VEC(gimple,heap) *may_misalign_stmts = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo); - tree ref_stmt, tmp; + gimple ref_stmt; int mask = LOOP_VINFO_PTR_MASK (loop_vinfo); tree mask_cst; unsigned int i; @@ -5649,8 +7827,10 @@ vect_create_cond_for_align_checks (loop_vec_info loop_vinfo, tree int_ptrsize_type; char tmp_name[20]; tree or_tmp_name = NULL_TREE; - tree and_tmp, and_tmp_name, and_stmt; + tree and_tmp, and_tmp_name; + gimple and_stmt; tree ptrsize_zero; + tree part_cond_expr; /* Check that mask is one less than a power of 2, i.e., mask is all zeros followed by all ones. */ @@ -5665,29 +7845,29 @@ vect_create_cond_for_align_checks (loop_vec_info loop_vinfo, /* Create expression (mask & (dr_1 || ... || dr_n)) where dr_i is the address of the first vector of the i'th data reference. */ - for (i = 0; VEC_iterate (tree, may_misalign_stmts, i, ref_stmt); i++) + for (i = 0; VEC_iterate (gimple, may_misalign_stmts, i, ref_stmt); i++) { - tree new_stmt_list = NULL_TREE; + gimple_seq new_stmt_list = NULL; tree addr_base; - tree addr_tmp, addr_tmp_name, addr_stmt; - tree or_tmp, new_or_tmp_name, or_stmt; + tree addr_tmp, addr_tmp_name; + tree or_tmp, new_or_tmp_name; + gimple addr_stmt, or_stmt; /* create: addr_tmp = (int)(address_of_first_vector) */ - addr_base = vect_create_addr_base_for_vector_ref (ref_stmt, - &new_stmt_list, - NULL_TREE); - - if (new_stmt_list != NULL_TREE) - append_to_statement_list_force (new_stmt_list, cond_expr_stmt_list); + addr_base = + vect_create_addr_base_for_vector_ref (ref_stmt, &new_stmt_list, + NULL_TREE, loop); + if (new_stmt_list != NULL) + gimple_seq_add_seq (cond_expr_stmt_list, new_stmt_list); sprintf (tmp_name, "%s%d", "addr2int", i); addr_tmp = create_tmp_var (int_ptrsize_type, tmp_name); add_referenced_var (addr_tmp); - addr_tmp_name = make_ssa_name (addr_tmp, NULL_TREE); - addr_stmt = fold_convert (int_ptrsize_type, addr_base); - addr_stmt = build_gimple_modify_stmt (addr_tmp_name, addr_stmt); + addr_tmp_name = make_ssa_name (addr_tmp, NULL); + addr_stmt = gimple_build_assign_with_ops (NOP_EXPR, addr_tmp_name, + addr_base, NULL_TREE); SSA_NAME_DEF_STMT (addr_tmp_name) = addr_stmt; - append_to_statement_list_force (addr_stmt, cond_expr_stmt_list); + gimple_seq_add_stmt (cond_expr_stmt_list, addr_stmt); /* The addresses are OR together. */ @@ -5697,12 +7877,12 @@ vect_create_cond_for_align_checks (loop_vec_info loop_vinfo, sprintf (tmp_name, "%s%d", "orptrs", i); or_tmp = create_tmp_var (int_ptrsize_type, tmp_name); add_referenced_var (or_tmp); - new_or_tmp_name = make_ssa_name (or_tmp, NULL_TREE); - tmp = build2 (BIT_IOR_EXPR, int_ptrsize_type, - or_tmp_name, addr_tmp_name); - or_stmt = build_gimple_modify_stmt (new_or_tmp_name, tmp); + new_or_tmp_name = make_ssa_name (or_tmp, NULL); + or_stmt = gimple_build_assign_with_ops (BIT_IOR_EXPR, + new_or_tmp_name, + or_tmp_name, addr_tmp_name); SSA_NAME_DEF_STMT (new_or_tmp_name) = or_stmt; - append_to_statement_list_force (or_stmt, cond_expr_stmt_list); + gimple_seq_add_stmt (cond_expr_stmt_list, or_stmt); or_tmp_name = new_or_tmp_name; } else @@ -5715,21 +7895,417 @@ vect_create_cond_for_align_checks (loop_vec_info loop_vinfo, /* create: and_tmp = or_tmp & mask */ and_tmp = create_tmp_var (int_ptrsize_type, "andmask" ); add_referenced_var (and_tmp); - and_tmp_name = make_ssa_name (and_tmp, NULL_TREE); + and_tmp_name = make_ssa_name (and_tmp, NULL); - tmp = build2 (BIT_AND_EXPR, int_ptrsize_type, or_tmp_name, mask_cst); - and_stmt = build_gimple_modify_stmt (and_tmp_name, tmp); + and_stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, and_tmp_name, + or_tmp_name, mask_cst); SSA_NAME_DEF_STMT (and_tmp_name) = and_stmt; - append_to_statement_list_force (and_stmt, cond_expr_stmt_list); + gimple_seq_add_stmt (cond_expr_stmt_list, and_stmt); /* Make and_tmp the left operand of the conditional test against zero. if and_tmp has a nonzero bit then some address is unaligned. */ ptrsize_zero = build_int_cst (int_ptrsize_type, 0); - return build2 (EQ_EXPR, boolean_type_node, - and_tmp_name, ptrsize_zero); + part_cond_expr = fold_build2 (EQ_EXPR, boolean_type_node, + and_tmp_name, ptrsize_zero); + if (*cond_expr) + *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, + *cond_expr, part_cond_expr); + else + *cond_expr = part_cond_expr; +} + +/* Function vect_vfa_segment_size. + + Create an expression that computes the size of segment + that will be accessed for a data reference. The functions takes into + account that realignment loads may access one more vector. + + Input: + DR: The data reference. + VECT_FACTOR: vectorization factor. + + Return an expression whose value is the size of segment which will be + accessed by DR. */ + +static tree +vect_vfa_segment_size (struct data_reference *dr, tree vect_factor) +{ + tree segment_length = fold_build2 (MULT_EXPR, integer_type_node, + DR_STEP (dr), vect_factor); + + if (vect_supportable_dr_alignment (dr) == dr_explicit_realign_optimized) + { + tree vector_size = TYPE_SIZE_UNIT + (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)))); + + segment_length = fold_build2 (PLUS_EXPR, integer_type_node, + segment_length, vector_size); + } + return fold_convert (sizetype, segment_length); +} + +/* Function vect_create_cond_for_alias_checks. + + Create a conditional expression that represents the run-time checks for + overlapping of address ranges represented by a list of data references + relations passed as input. + + Input: + COND_EXPR - input conditional expression. New conditions will be chained + with logical AND operation. + LOOP_VINFO - field LOOP_VINFO_MAY_ALIAS_STMTS contains the list of ddrs + to be checked. + + Output: + COND_EXPR - conditional expression. + COND_EXPR_STMT_LIST - statements needed to construct the conditional + expression. + + + The returned value is the conditional expression to be used in the if + statement that controls which version of the loop gets executed at runtime. +*/ + +static void +vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo, + tree * cond_expr, + gimple_seq * cond_expr_stmt_list) +{ + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + VEC (ddr_p, heap) * may_alias_ddrs = + LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo); + tree vect_factor = + build_int_cst (integer_type_node, LOOP_VINFO_VECT_FACTOR (loop_vinfo)); + + ddr_p ddr; + unsigned int i; + tree part_cond_expr; + + /* Create expression + ((store_ptr_0 + store_segment_length_0) < load_ptr_0) + || (load_ptr_0 + load_segment_length_0) < store_ptr_0)) + && + ... + && + ((store_ptr_n + store_segment_length_n) < load_ptr_n) + || (load_ptr_n + load_segment_length_n) < store_ptr_n)) */ + + if (VEC_empty (ddr_p, may_alias_ddrs)) + return; + + for (i = 0; VEC_iterate (ddr_p, may_alias_ddrs, i, ddr); i++) + { + struct data_reference *dr_a, *dr_b; + gimple dr_group_first_a, dr_group_first_b; + tree addr_base_a, addr_base_b; + tree segment_length_a, segment_length_b; + gimple stmt_a, stmt_b; + + dr_a = DDR_A (ddr); + stmt_a = DR_STMT (DDR_A (ddr)); + dr_group_first_a = DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_a)); + if (dr_group_first_a) + { + stmt_a = dr_group_first_a; + dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a)); + } + + dr_b = DDR_B (ddr); + stmt_b = DR_STMT (DDR_B (ddr)); + dr_group_first_b = DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_b)); + if (dr_group_first_b) + { + stmt_b = dr_group_first_b; + dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b)); + } + + addr_base_a = + vect_create_addr_base_for_vector_ref (stmt_a, cond_expr_stmt_list, + NULL_TREE, loop); + addr_base_b = + vect_create_addr_base_for_vector_ref (stmt_b, cond_expr_stmt_list, + NULL_TREE, loop); + + segment_length_a = vect_vfa_segment_size (dr_a, vect_factor); + segment_length_b = vect_vfa_segment_size (dr_b, vect_factor); + + if (vect_print_dump_info (REPORT_DR_DETAILS)) + { + fprintf (vect_dump, + "create runtime check for data references "); + print_generic_expr (vect_dump, DR_REF (dr_a), TDF_SLIM); + fprintf (vect_dump, " and "); + print_generic_expr (vect_dump, DR_REF (dr_b), TDF_SLIM); + } + + + part_cond_expr = + fold_build2 (TRUTH_OR_EXPR, boolean_type_node, + fold_build2 (LT_EXPR, boolean_type_node, + fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (addr_base_a), + addr_base_a, + segment_length_a), + addr_base_b), + fold_build2 (LT_EXPR, boolean_type_node, + fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (addr_base_b), + addr_base_b, + segment_length_b), + addr_base_a)); + + if (*cond_expr) + *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, + *cond_expr, part_cond_expr); + else + *cond_expr = part_cond_expr; + } + if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS)) + fprintf (vect_dump, "created %u versioning for alias checks.\n", + VEC_length (ddr_p, may_alias_ddrs)); + +} + +/* Function vect_loop_versioning. + + If the loop has data references that may or may not be aligned or/and + has data reference relations whose independence was not proven then + two versions of the loop need to be generated, one which is vectorized + and one which isn't. A test is then generated to control which of the + loops is executed. The test checks for the alignment of all of the + data references that may or may not be aligned. An additional + sequence of runtime tests is generated for each pairs of DDRs whose + independence was not proven. The vectorized version of loop is + executed only if both alias and alignment tests are passed. + + The test generated to check which version of loop is executed + is modified to also check for profitability as indicated by the + cost model initially. */ + +static void +vect_loop_versioning (loop_vec_info loop_vinfo) +{ + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + struct loop *nloop; + tree cond_expr = NULL_TREE; + gimple_seq cond_expr_stmt_list = NULL; + basic_block condition_bb; + gimple_stmt_iterator gsi, cond_exp_gsi; + basic_block merge_bb; + basic_block new_exit_bb; + edge new_exit_e, e; + gimple orig_phi, new_phi; + tree arg; + unsigned prob = 4 * REG_BR_PROB_BASE / 5; + gimple_seq gimplify_stmt_list = NULL; + tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo); + int min_profitable_iters = 0; + unsigned int th; + + /* Get profitability threshold for vectorized loop. */ + min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo); + + th = conservative_cost_threshold (loop_vinfo, + min_profitable_iters); + + cond_expr = + build2 (GT_EXPR, boolean_type_node, scalar_loop_iters, + build_int_cst (TREE_TYPE (scalar_loop_iters), th)); + + cond_expr = force_gimple_operand (cond_expr, &cond_expr_stmt_list, + false, NULL_TREE); + + if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))) + vect_create_cond_for_align_checks (loop_vinfo, &cond_expr, + &cond_expr_stmt_list); + + if (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) + vect_create_cond_for_alias_checks (loop_vinfo, &cond_expr, + &cond_expr_stmt_list); + + cond_expr = + fold_build2 (NE_EXPR, boolean_type_node, cond_expr, integer_zero_node); + cond_expr = + force_gimple_operand (cond_expr, &gimplify_stmt_list, true, NULL_TREE); + gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list); + + initialize_original_copy_tables (); + nloop = loop_version (loop, cond_expr, &condition_bb, + prob, prob, REG_BR_PROB_BASE - prob, true); + free_original_copy_tables(); + + /* Loop versioning violates an assumption we try to maintain during + vectorization - that the loop exit block has a single predecessor. + After versioning, the exit block of both loop versions is the same + basic block (i.e. it has two predecessors). Just in order to simplify + following transformations in the vectorizer, we fix this situation + here by adding a new (empty) block on the exit-edge of the loop, + with the proper loop-exit phis to maintain loop-closed-form. */ + + merge_bb = single_exit (loop)->dest; + gcc_assert (EDGE_COUNT (merge_bb->preds) == 2); + new_exit_bb = split_edge (single_exit (loop)); + new_exit_e = single_exit (loop); + e = EDGE_SUCC (new_exit_bb, 0); + + for (gsi = gsi_start_phis (merge_bb); !gsi_end_p (gsi); gsi_next (&gsi)) + { + orig_phi = gsi_stmt (gsi); + new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)), + new_exit_bb); + arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e); + add_phi_arg (new_phi, arg, new_exit_e); + SET_PHI_ARG_DEF (orig_phi, e->dest_idx, PHI_RESULT (new_phi)); + } + + /* End loop-exit-fixes after versioning. */ + + update_ssa (TODO_update_ssa); + if (cond_expr_stmt_list) + { + cond_exp_gsi = gsi_last_bb (condition_bb); + gsi_insert_seq_before (&cond_exp_gsi, cond_expr_stmt_list, GSI_SAME_STMT); + } +} + +/* Remove a group of stores (for SLP or interleaving), free their + stmt_vec_info. */ + +static void +vect_remove_stores (gimple first_stmt) +{ + gimple next = first_stmt; + gimple tmp; + gimple_stmt_iterator next_si; + + while (next) + { + /* Free the attached stmt_vec_info and remove the stmt. */ + next_si = gsi_for_stmt (next); + gsi_remove (&next_si, true); + tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next)); + free_stmt_vec_info (next); + next = tmp; + } +} + + +/* Vectorize SLP instance tree in postorder. */ + +static bool +vect_schedule_slp_instance (slp_tree node, slp_instance instance, + unsigned int vectorization_factor) +{ + gimple stmt; + bool strided_store, is_store; + gimple_stmt_iterator si; + stmt_vec_info stmt_info; + unsigned int vec_stmts_size, nunits, group_size; + tree vectype; + int i; + slp_tree loads_node; + + if (!node) + return false; + + vect_schedule_slp_instance (SLP_TREE_LEFT (node), instance, + vectorization_factor); + vect_schedule_slp_instance (SLP_TREE_RIGHT (node), instance, + vectorization_factor); + + stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0); + stmt_info = vinfo_for_stmt (stmt); + + /* VECTYPE is the type of the destination. */ + vectype = get_vectype_for_scalar_type (TREE_TYPE (gimple_assign_lhs (stmt))); + nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype); + group_size = SLP_INSTANCE_GROUP_SIZE (instance); + + /* For each SLP instance calculate number of vector stmts to be created + for the scalar stmts in each node of the SLP tree. Number of vector + elements in one vector iteration is the number of scalar elements in + one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector + size. */ + vec_stmts_size = (vectorization_factor * group_size) / nunits; + + /* In case of load permutation we have to allocate vectorized statements for + all the nodes that participate in that permutation. */ + if (SLP_INSTANCE_LOAD_PERMUTATION (instance)) + { + for (i = 0; + VEC_iterate (slp_tree, SLP_INSTANCE_LOADS (instance), i, loads_node); + i++) + { + if (!SLP_TREE_VEC_STMTS (loads_node)) + { + SLP_TREE_VEC_STMTS (loads_node) = VEC_alloc (gimple, heap, + vec_stmts_size); + SLP_TREE_NUMBER_OF_VEC_STMTS (loads_node) = vec_stmts_size; + } + } + } + + if (!SLP_TREE_VEC_STMTS (node)) + { + SLP_TREE_VEC_STMTS (node) = VEC_alloc (gimple, heap, vec_stmts_size); + SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size; + } + + if (vect_print_dump_info (REPORT_DETAILS)) + { + fprintf (vect_dump, "------>vectorizing SLP node starting from: "); + print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); + } + + /* Loads should be inserted before the first load. */ + if (SLP_INSTANCE_FIRST_LOAD_STMT (instance) + && STMT_VINFO_STRIDED_ACCESS (stmt_info) + && !REFERENCE_CLASS_P (gimple_get_lhs (stmt))) + si = gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance)); + else + si = gsi_for_stmt (stmt); + + is_store = vect_transform_stmt (stmt, &si, &strided_store, node, instance); + if (is_store) + { + if (DR_GROUP_FIRST_DR (stmt_info)) + /* If IS_STORE is TRUE, the vectorization of the + interleaving chain was completed - free all the stores in + the chain. */ + vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info)); + else + /* FORNOW: SLP originates only from strided stores. */ + gcc_unreachable (); + + return true; + } + + /* FORNOW: SLP originates only from strided stores. */ + return false; } +static bool +vect_schedule_slp (loop_vec_info loop_vinfo) +{ + VEC (slp_instance, heap) *slp_instances = + LOOP_VINFO_SLP_INSTANCES (loop_vinfo); + slp_instance instance; + unsigned int i; + bool is_store = false; + + for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++) + { + /* Schedule the tree of INSTANCE. */ + is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance), + instance, LOOP_VINFO_VECT_FACTOR (loop_vinfo)); + + if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS) + || vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) + fprintf (vect_dump, "vectorizing stmts using SLP."); + } + + return is_store; +} + /* Function vect_transform_loop. The analysis phase has determined that the loop is vectorizable. @@ -5742,71 +8318,20 @@ vect_transform_loop (loop_vec_info loop_vinfo) struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; - block_stmt_iterator si, next_si; + gimple_stmt_iterator si; int i; tree ratio = NULL; int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); bool strided_store; + bool slp_scheduled = false; + unsigned int nunits; if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "=== vec_transform_loop ==="); - /* If the loop has data references that may or may not be aligned then - two versions of the loop need to be generated, one which is vectorized - and one which isn't. A test is then generated to control which of the - loops is executed. The test checks for the alignment of all of the - data references that may or may not be aligned. */ - - if (VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))) - { - struct loop *nloop; - tree cond_expr; - tree cond_expr_stmt_list = NULL_TREE; - basic_block condition_bb; - block_stmt_iterator cond_exp_bsi; - basic_block merge_bb; - basic_block new_exit_bb; - edge new_exit_e, e; - tree orig_phi, new_phi, arg; - unsigned prob = 4 * REG_BR_PROB_BASE / 5; - - cond_expr = vect_create_cond_for_align_checks (loop_vinfo, - &cond_expr_stmt_list); - initialize_original_copy_tables (); - nloop = loop_version (loop, cond_expr, &condition_bb, - prob, prob, REG_BR_PROB_BASE - prob, true); - free_original_copy_tables(); - - /** Loop versioning violates an assumption we try to maintain during - vectorization - that the loop exit block has a single predecessor. - After versioning, the exit block of both loop versions is the same - basic block (i.e. it has two predecessors). Just in order to simplify - following transformations in the vectorizer, we fix this situation - here by adding a new (empty) block on the exit-edge of the loop, - with the proper loop-exit phis to maintain loop-closed-form. **/ - - merge_bb = single_exit (loop)->dest; - gcc_assert (EDGE_COUNT (merge_bb->preds) == 2); - new_exit_bb = split_edge (single_exit (loop)); - new_exit_e = single_exit (loop); - e = EDGE_SUCC (new_exit_bb, 0); - - for (orig_phi = phi_nodes (merge_bb); orig_phi; - orig_phi = PHI_CHAIN (orig_phi)) - { - new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)), - new_exit_bb); - arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e); - add_phi_arg (new_phi, arg, new_exit_e); - SET_PHI_ARG_DEF (orig_phi, e->dest_idx, PHI_RESULT (new_phi)); - } - - /** end loop-exit-fixes after versioning **/ - - update_ssa (TODO_update_ssa); - cond_exp_bsi = bsi_last (condition_bb); - bsi_insert_before (&cond_exp_bsi, cond_expr_stmt_list, BSI_SAME_STMT); - } + if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) + || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) + vect_loop_versioning (loop_vinfo); /* CHECKME: we wouldn't need this if we called update_ssa once for all loops. */ @@ -5850,18 +8375,20 @@ vect_transform_loop (loop_vec_info loop_vinfo) { basic_block bb = bbs[i]; stmt_vec_info stmt_info; - tree phi; + gimple phi; - for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) + for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { + phi = gsi_stmt (si); if (vect_print_dump_info (REPORT_DETAILS)) { fprintf (vect_dump, "------>vectorizing phi: "); - print_generic_expr (vect_dump, phi, TDF_SLIM); + print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM); } stmt_info = vinfo_for_stmt (phi); if (!stmt_info) continue; + if (!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) continue; @@ -5875,79 +8402,106 @@ vect_transform_loop (loop_vec_info loop_vinfo) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "transform phi."); - vect_transform_stmt (phi, NULL, NULL); + vect_transform_stmt (phi, NULL, NULL, NULL, NULL); } } - for (si = bsi_start (bb); !bsi_end_p (si);) + for (si = gsi_start_bb (bb); !gsi_end_p (si);) { - tree stmt = bsi_stmt (si); + gimple stmt = gsi_stmt (si); bool is_store; if (vect_print_dump_info (REPORT_DETAILS)) { fprintf (vect_dump, "------>vectorizing statement: "); - print_generic_expr (vect_dump, stmt, TDF_SLIM); + print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); } + stmt_info = vinfo_for_stmt (stmt); - gcc_assert (stmt_info); + + /* vector stmts created in the outer-loop during vectorization of + stmts in an inner-loop may not have a stmt_info, and do not + need to be vectorized. */ + if (!stmt_info) + { + gsi_next (&si); + continue; + } + if (!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) { - bsi_next (&si); + gsi_next (&si); continue; } gcc_assert (STMT_VINFO_VECTYPE (stmt_info)); - if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)) - != (unsigned HOST_WIDE_INT) vectorization_factor) - && vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "multiple-types."); + nunits = + (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)); + if (!STMT_SLP_TYPE (stmt_info) + && nunits != (unsigned int) vectorization_factor + && vect_print_dump_info (REPORT_DETAILS)) + /* For SLP VF is set according to unrolling factor, and not to + vector size, hence for SLP this print is not valid. */ + fprintf (vect_dump, "multiple-types."); + + /* SLP. Schedule all the SLP instances when the first SLP stmt is + reached. */ + if (STMT_SLP_TYPE (stmt_info)) + { + if (!slp_scheduled) + { + slp_scheduled = true; + + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "=== scheduling SLP instances ==="); + + is_store = vect_schedule_slp (loop_vinfo); + + /* IS_STORE is true if STMT is a store. Stores cannot be of + hybrid SLP type. They are removed in + vect_schedule_slp_instance and their vinfo is destroyed. */ + if (is_store) + { + gsi_next (&si); + continue; + } + } + /* Hybrid SLP stmts must be vectorized in addition to SLP. */ + if (PURE_SLP_STMT (stmt_info)) + { + gsi_next (&si); + continue; + } + } + /* -------- vectorize statement ------------ */ if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "transform statement."); strided_store = false; - is_store = vect_transform_stmt (stmt, &si, &strided_store); + is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL); if (is_store) { - stmt_ann_t ann; - if (DR_GROUP_FIRST_DR (stmt_info)) + if (STMT_VINFO_STRIDED_ACCESS (stmt_info)) { /* Interleaving. If IS_STORE is TRUE, the vectorization of the interleaving chain was completed - free all the stores in the chain. */ - tree next = DR_GROUP_FIRST_DR (stmt_info); - tree tmp; - stmt_vec_info next_stmt_info; - - while (next) - { - next_si = bsi_for_stmt (next); - next_stmt_info = vinfo_for_stmt (next); - /* Free the attached stmt_vec_info and remove the stmt. */ - ann = stmt_ann (next); - tmp = DR_GROUP_NEXT_DR (next_stmt_info); - free (next_stmt_info); - set_stmt_info (ann, NULL); - bsi_remove (&next_si, true); - next = tmp; - } - bsi_remove (&si, true); + vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info)); + gsi_remove (&si, true); continue; } else { /* Free the attached stmt_vec_info and remove the stmt. */ - ann = stmt_ann (stmt); - free (stmt_info); - set_stmt_info (ann, NULL); - bsi_remove (&si, true); + free_stmt_vec_info (stmt); + gsi_remove (&si, true); continue; } } - bsi_next (&si); + gsi_next (&si); } /* stmts in BB */ } /* BBs in loop */ @@ -5962,4 +8516,6 @@ vect_transform_loop (loop_vec_info loop_vinfo) if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS)) fprintf (vect_dump, "LOOP VECTORIZED."); + if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOOPS)) + fprintf (vect_dump, "OUTER LOOP VECTORIZED."); }