X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Ftree-vect-transform.c;h=c718e0742127284762526bcc17542349d737cedf;hb=c7684b8e2ecd2622f304401306439aad2a7f7949;hp=4775e2cd58282db48b992ea500b1dc9ea8398612;hpb=30f263a419dd1cf9804dea818dab0c2c40fda265;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/tree-vect-transform.c b/gcc/tree-vect-transform.c index 4775e2cd582..c718e074212 100644 --- a/gcc/tree-vect-transform.c +++ b/gcc/tree-vect-transform.c @@ -6,7 +6,7 @@ This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY @@ -15,9 +15,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA -02110-1301, USA. */ +along with GCC; see the file COPYING3. If not see +. */ #include "config.h" #include "system.h" @@ -47,19 +46,18 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA #include "real.h" /* Utility functions for the code transformation. */ -static bool vect_transform_stmt (tree, block_stmt_iterator *, bool *); +static bool vect_transform_stmt (tree, block_stmt_iterator *, bool *, slp_tree); static tree vect_create_destination_var (tree, tree); static tree vect_create_data_ref_ptr - (tree, block_stmt_iterator *, tree, tree *, tree *, bool, tree); -static tree vect_create_addr_base_for_vector_ref (tree, tree *, tree); -static tree vect_setup_realignment (tree, block_stmt_iterator *, tree *); + (tree, struct loop*, tree, tree *, tree *, bool, tree, bool *); +static tree vect_create_addr_base_for_vector_ref + (tree, tree *, tree, struct loop *); static tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *); static tree vect_get_vec_def_for_operand (tree, tree, tree *); -static tree vect_init_vector (tree, tree, tree); +static tree vect_init_vector (tree, tree, tree, block_stmt_iterator *); static void vect_finish_stmt_generation - (tree stmt, tree vec_stmt, block_stmt_iterator *bsi); + (tree stmt, tree vec_stmt, block_stmt_iterator *); static bool vect_is_simple_cond (tree, loop_vec_info); -static void update_vuses_to_preheader (tree, struct loop*); static void vect_create_epilog_for_reduction (tree, tree, enum tree_code, tree); static tree get_initial_def_for_reduction (tree, tree, tree *); @@ -74,6 +72,628 @@ static void vect_update_inits_of_drs (loop_vec_info, tree); static int vect_min_worthwhile_factor (enum tree_code); +static int +cost_for_stmt (tree stmt) +{ + stmt_vec_info stmt_info = vinfo_for_stmt (stmt); + + switch (STMT_VINFO_TYPE (stmt_info)) + { + case load_vec_info_type: + return TARG_SCALAR_LOAD_COST; + case store_vec_info_type: + return TARG_SCALAR_STORE_COST; + case op_vec_info_type: + case condition_vec_info_type: + case assignment_vec_info_type: + case reduc_vec_info_type: + case induc_vec_info_type: + case type_promotion_vec_info_type: + case type_demotion_vec_info_type: + case type_conversion_vec_info_type: + case call_vec_info_type: + return TARG_SCALAR_STMT_COST; + case undef_vec_info_type: + default: + gcc_unreachable (); + } +} + + +/* Function vect_estimate_min_profitable_iters + + Return the number of iterations required for the vector version of the + loop to be profitable relative to the cost of the scalar version of the + loop. + + TODO: Take profile info into account before making vectorization + decisions, if available. */ + +int +vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo) +{ + int i; + int min_profitable_iters; + int peel_iters_prologue; + int peel_iters_epilogue; + int vec_inside_cost = 0; + int vec_outside_cost = 0; + int scalar_single_iter_cost = 0; + int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); + int nbbs = loop->num_nodes; + int byte_misalign; + int peel_guard_costs = 0; + int innerloop_iters = 0, factor; + VEC (slp_instance, heap) *slp_instances; + slp_instance instance; + + /* Cost model disabled. */ + if (!flag_vect_cost_model) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "cost model disabled."); + return 0; + } + + /* Requires loop versioning tests to handle misalignment. */ + + if (VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))) + { + /* FIXME: Make cost depend on complexity of individual check. */ + vec_outside_cost += + VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)); + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "cost model: Adding cost of checks for loop " + "versioning to treat misalignment.\n"); + } + + if (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) + { + /* FIXME: Make cost depend on complexity of individual check. */ + vec_outside_cost += + VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)); + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "cost model: Adding cost of checks for loop " + "versioning aliasing.\n"); + } + + if (VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) + || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) + { + vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST; + } + + /* Count statements in scalar loop. Using this as scalar cost for a single + iteration for now. + + TODO: Add outer loop support. + + TODO: Consider assigning different costs to different scalar + statements. */ + + /* FORNOW. */ + if (loop->inner) + innerloop_iters = 50; /* FIXME */ + + for (i = 0; i < nbbs; i++) + { + block_stmt_iterator si; + basic_block bb = bbs[i]; + + if (bb->loop_father == loop->inner) + factor = innerloop_iters; + else + factor = 1; + + for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si)) + { + tree stmt = bsi_stmt (si); + stmt_vec_info stmt_info = vinfo_for_stmt (stmt); + if (!STMT_VINFO_RELEVANT_P (stmt_info) + && !STMT_VINFO_LIVE_P (stmt_info)) + continue; + scalar_single_iter_cost += cost_for_stmt (stmt) * factor; + vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor; + /* FIXME: for stmts in the inner-loop in outer-loop vectorization, + some of the "outside" costs are generated inside the outer-loop. */ + vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info); + } + } + + /* Add additional cost for the peeled instructions in prologue and epilogue + loop. + + FORNOW: If we dont know the value of peel_iters for prologue or epilogue + at compile-time - we assume it's vf/2 (the worst would be vf-1). + + TODO: Build an expression that represents peel_iters for prologue and + epilogue to be used in a run-time test. */ + + byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo); + + if (byte_misalign < 0) + { + peel_iters_prologue = vf/2; + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "cost model: " + "prologue peel iters set to vf/2."); + + /* If peeling for alignment is unknown, loop bound of main loop becomes + unknown. */ + peel_iters_epilogue = vf/2; + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "cost model: " + "epilogue peel iters set to vf/2 because " + "peeling for alignment is unknown ."); + + /* If peeled iterations are unknown, count a taken branch and a not taken + branch per peeled loop. Even if scalar loop iterations are known, + vector iterations are not known since peeled prologue iterations are + not known. Hence guards remain the same. */ + peel_guard_costs += 2 * (TARG_COND_TAKEN_BRANCH_COST + + TARG_COND_NOT_TAKEN_BRANCH_COST); + + } + else + { + if (byte_misalign) + { + struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo); + int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr)))); + tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))); + int nelements = TYPE_VECTOR_SUBPARTS (vectype); + + peel_iters_prologue = nelements - (byte_misalign / element_size); + } + else + peel_iters_prologue = 0; + + if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) + { + peel_iters_epilogue = vf/2; + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "cost model: " + "epilogue peel iters set to vf/2 because " + "loop iterations are unknown ."); + + /* If peeled iterations are known but number of scalar loop + iterations are unknown, count a taken branch per peeled loop. */ + peel_guard_costs += 2 * TARG_COND_TAKEN_BRANCH_COST; + + } + else + { + int niters = LOOP_VINFO_INT_NITERS (loop_vinfo); + peel_iters_prologue = niters < peel_iters_prologue ? + niters : peel_iters_prologue; + peel_iters_epilogue = (niters - peel_iters_prologue) % vf; + } + } + + vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost) + + (peel_iters_epilogue * scalar_single_iter_cost) + + peel_guard_costs; + + /* Allow targets add additional (outside-of-loop) costs. FORNOW, the only + information we provide for the target is whether testing against the + threshold involves a runtime test. */ + if (targetm.vectorize.builtin_vectorization_cost) + { + bool runtime_test = false; + + /* If the number of iterations is unknown, or the + peeling-for-misalignment amount is unknown, we eill have to generate + a runtime test to test the loop count against the threshold. */ + if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) + || (byte_misalign < 0)) + runtime_test = true; + vec_outside_cost += + targetm.vectorize.builtin_vectorization_cost (runtime_test); + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "cost model : Adding target out-of-loop cost = %d", + targetm.vectorize.builtin_vectorization_cost (runtime_test)); + } + + /* Add SLP costs. */ + slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo); + for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++) + { + vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance); + vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance); + } + + /* Calculate number of iterations required to make the vector version + profitable, relative to the loop bodies only. The following condition + must hold true: ((SIC*VF)-VIC)*niters > VOC*VF, where + SIC = scalar iteration cost, VIC = vector iteration cost, + VOC = vector outside cost and VF = vectorization factor. */ + + if ((scalar_single_iter_cost * vf) > vec_inside_cost) + { + if (vec_outside_cost <= 0) + min_profitable_iters = 1; + else + { + min_profitable_iters = (vec_outside_cost * vf + - vec_inside_cost * peel_iters_prologue + - vec_inside_cost * peel_iters_epilogue) + / ((scalar_single_iter_cost * vf) + - vec_inside_cost); + + if ((scalar_single_iter_cost * vf * min_profitable_iters) + <= ((vec_inside_cost * min_profitable_iters) + + (vec_outside_cost * vf))) + min_profitable_iters++; + } + } + /* vector version will never be profitable. */ + else + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "cost model: vector iteration cost = %d " + "is divisible by scalar iteration cost = %d by a factor " + "greater than or equal to the vectorization factor = %d .", + vec_inside_cost, scalar_single_iter_cost, vf); + return -1; + } + + if (vect_print_dump_info (REPORT_DETAILS)) + { + fprintf (vect_dump, "Cost model analysis: \n"); + fprintf (vect_dump, " Vector inside of loop cost: %d\n", + vec_inside_cost); + fprintf (vect_dump, " Vector outside of loop cost: %d\n", + vec_outside_cost); + fprintf (vect_dump, " Scalar cost: %d\n", scalar_single_iter_cost); + fprintf (vect_dump, " prologue iterations: %d\n", + peel_iters_prologue); + fprintf (vect_dump, " epilogue iterations: %d\n", + peel_iters_epilogue); + fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n", + min_profitable_iters); + } + + min_profitable_iters = + min_profitable_iters < vf ? vf : min_profitable_iters; + + /* Because the condition we create is: + if (niters <= min_profitable_iters) + then skip the vectorized loop. */ + min_profitable_iters--; + + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, " Profitability threshold = %d\n", + min_profitable_iters); + + return min_profitable_iters; +} + + +/* TODO: Close dependency between vect_model_*_cost and vectorizable_* + functions. Design better to avoid maintenance issues. */ + +/* Function vect_model_reduction_cost. + + Models cost for a reduction operation, including the vector ops + generated within the strip-mine loop, the initial definition before + the loop, and the epilogue code that must be generated. */ + +static void +vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code, + int ncopies) +{ + int outer_cost = 0; + enum tree_code code; + optab optab; + tree vectype; + tree orig_stmt; + tree reduction_op; + enum machine_mode mode; + tree operation = GIMPLE_STMT_OPERAND (STMT_VINFO_STMT (stmt_info), 1); + int op_type = TREE_CODE_LENGTH (TREE_CODE (operation)); + loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + + /* Cost of reduction op inside loop. */ + STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) += ncopies * TARG_VEC_STMT_COST; + + reduction_op = TREE_OPERAND (operation, op_type-1); + vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op)); + mode = TYPE_MODE (vectype); + orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); + + if (!orig_stmt) + orig_stmt = STMT_VINFO_STMT (stmt_info); + + code = TREE_CODE (GIMPLE_STMT_OPERAND (orig_stmt, 1)); + + /* Add in cost for initial definition. */ + outer_cost += TARG_SCALAR_TO_VEC_COST; + + /* Determine cost of epilogue code. + + We have a reduction operator that will reduce the vector in one statement. + Also requires scalar extract. */ + + if (!nested_in_vect_loop_p (loop, orig_stmt)) + { + if (reduc_code < NUM_TREE_CODES) + outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST; + else + { + int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1); + tree bitsize = + TYPE_SIZE (TREE_TYPE ( GIMPLE_STMT_OPERAND (orig_stmt, 0))); + int element_bitsize = tree_low_cst (bitsize, 1); + int nelements = vec_size_in_bits / element_bitsize; + + optab = optab_for_tree_code (code, vectype); + + /* We have a whole vector shift available. */ + if (VECTOR_MODE_P (mode) + && optab_handler (optab, mode)->insn_code != CODE_FOR_nothing + && optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing) + /* Final reduction via vector shifts and the reduction operator. Also + requires scalar extract. */ + outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST + + TARG_VEC_TO_SCALAR_COST); + else + /* Use extracts and reduction op for final reduction. For N elements, + we have N extracts and N-1 reduction ops. */ + outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST); + } + } + + STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost; + + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, " + "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info), + STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)); +} + + +/* Function vect_model_induction_cost. + + Models cost for induction operations. */ + +static void +vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies) +{ + /* loop cost for vec_loop. */ + STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST; + /* prologue cost for vec_init and vec_step. */ + STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_SCALAR_TO_VEC_COST; + + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, " + "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info), + STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)); +} + + +/* Function vect_model_simple_cost. + + Models cost for simple operations, i.e. those that only emit ncopies of a + single op. Right now, this does not account for multiple insns that could + be generated for the single vector op. We will handle that shortly. */ + +void +vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies, + enum vect_def_type *dt, slp_tree slp_node) +{ + int i; + int inside_cost = 0, outside_cost = 0; + + inside_cost = ncopies * TARG_VEC_STMT_COST; + + /* FORNOW: Assuming maximum 2 args per stmts. */ + for (i = 0; i < 2; i++) + { + if (dt[i] == vect_constant_def || dt[i] == vect_invariant_def) + outside_cost += TARG_SCALAR_TO_VEC_COST; + } + + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, " + "outside_cost = %d .", inside_cost, outside_cost); + + /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */ + stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost); + stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost); +} + + +/* Function vect_cost_strided_group_size + + For strided load or store, return the group_size only if it is the first + load or store of a group, else return 1. This ensures that group size is + only returned once per group. */ + +static int +vect_cost_strided_group_size (stmt_vec_info stmt_info) +{ + tree first_stmt = DR_GROUP_FIRST_DR (stmt_info); + + if (first_stmt == STMT_VINFO_STMT (stmt_info)) + return DR_GROUP_SIZE (stmt_info); + + return 1; +} + + +/* Function vect_model_store_cost + + Models cost for stores. In the case of strided accesses, one access + has the overhead of the strided access attributed to it. */ + +void +vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, + enum vect_def_type dt, slp_tree slp_node) +{ + int group_size; + int inside_cost = 0, outside_cost = 0; + + if (dt == vect_constant_def || dt == vect_invariant_def) + outside_cost = TARG_SCALAR_TO_VEC_COST; + + /* Strided access? */ + if (DR_GROUP_FIRST_DR (stmt_info)) + group_size = vect_cost_strided_group_size (stmt_info); + /* Not a strided access. */ + else + group_size = 1; + + /* Is this an access in a group of stores, which provide strided access? + If so, add in the cost of the permutes. */ + if (group_size > 1) + { + /* Uses a high and low interleave operation for each needed permute. */ + inside_cost = ncopies * exact_log2(group_size) * group_size + * TARG_VEC_STMT_COST; + + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .", + group_size); + + } + + /* Costs of the stores. */ + inside_cost += ncopies * TARG_VEC_STORE_COST; + + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, " + "outside_cost = %d .", inside_cost, outside_cost); + + /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */ + stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost); + stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost); +} + + +/* Function vect_model_load_cost + + Models cost for loads. In the case of strided accesses, the last access + has the overhead of the strided access attributed to it. Since unaligned + accesses are supported for loads, we also account for the costs of the + access scheme chosen. */ + +void +vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node) + +{ + int group_size; + int alignment_support_cheme; + tree first_stmt; + struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr; + int inside_cost = 0, outside_cost = 0; + + /* Strided accesses? */ + first_stmt = DR_GROUP_FIRST_DR (stmt_info); + if (first_stmt && !slp_node) + { + group_size = vect_cost_strided_group_size (stmt_info); + first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); + } + /* Not a strided access. */ + else + { + group_size = 1; + first_dr = dr; + } + + alignment_support_cheme = vect_supportable_dr_alignment (first_dr); + + /* Is this an access in a group of loads providing strided access? + If so, add in the cost of the permutes. */ + if (group_size > 1) + { + /* Uses an even and odd extract operations for each needed permute. */ + inside_cost = ncopies * exact_log2(group_size) * group_size + * TARG_VEC_STMT_COST; + + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .", + group_size); + + } + + /* The loads themselves. */ + switch (alignment_support_cheme) + { + case dr_aligned: + { + inside_cost += ncopies * TARG_VEC_LOAD_COST; + + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "vect_model_load_cost: aligned."); + + break; + } + case dr_unaligned_supported: + { + /* Here, we assign an additional cost for the unaligned load. */ + inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST; + + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "vect_model_load_cost: unaligned supported by " + "hardware."); + + break; + } + case dr_explicit_realign: + { + inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST); + + /* FIXME: If the misalignment remains fixed across the iterations of + the containing loop, the following cost should be added to the + outside costs. */ + if (targetm.vectorize.builtin_mask_for_load) + inside_cost += TARG_VEC_STMT_COST; + + break; + } + case dr_explicit_realign_optimized: + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "vect_model_load_cost: unaligned software " + "pipelined."); + + /* Unaligned software pipeline has a load of an address, an initial + load, and possibly a mask operation to "prime" the loop. However, + if this is an access in a group of loads, which provide strided + access, then the above cost should only be considered for one + access in the group. Inside the loop, there is a load op + and a realignment op. */ + + if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node) + { + outside_cost = 2*TARG_VEC_STMT_COST; + if (targetm.vectorize.builtin_mask_for_load) + outside_cost += TARG_VEC_STMT_COST; + } + + inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST); + + break; + } + + default: + gcc_unreachable (); + } + + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, " + "outside_cost = %d .", inside_cost, outside_cost); + + /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */ + stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost); + stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost); +} + + /* Function vect_get_new_vect_var. Returns a name for a new variable. The current naming scheme appends the @@ -103,7 +723,11 @@ vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name) } if (name) - new_vect_var = create_tmp_var (type, concat (prefix, name, NULL)); + { + char* tmp = concat (prefix, name, NULL); + new_vect_var = create_tmp_var (type, tmp); + free (tmp); + } else new_vect_var = create_tmp_var (type, prefix); @@ -124,6 +748,19 @@ vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name) STMT: The statement containing the data reference. NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list. OFFSET: Optional. If supplied, it is be added to the initial address. + LOOP: Specify relative to which loop-nest should the address be computed. + For example, when the dataref is in an inner-loop nested in an + outer-loop that is now being vectorized, LOOP can be either the + outer-loop, or the inner-loop. The first memory location accessed + by the following dataref ('in' points to short): + + for (i=0; iloop_father; tree data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr)); - tree base_name = build_fold_indirect_ref (data_ref_base); + tree base_name; + tree data_ref_base_var; + tree new_base_stmt; tree vec_stmt; tree addr_base, addr_expr; tree dest, new_stmt; tree base_offset = unshare_expr (DR_OFFSET (dr)); tree init = unshare_expr (DR_INIT (dr)); tree vect_ptr_type, addr_expr2; + tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr))); + + gcc_assert (loop); + if (loop != containing_loop) + { + loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + + gcc_assert (nested_in_vect_loop_p (loop, stmt)); + + data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info)); + base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info)); + init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info)); + } + + /* Create data_ref_base */ + base_name = build_fold_indirect_ref (data_ref_base); + data_ref_base_var = create_tmp_var (TREE_TYPE (data_ref_base), "batmp"); + add_referenced_var (data_ref_base_var); + data_ref_base = force_gimple_operand (data_ref_base, &new_base_stmt, + true, data_ref_base_var); + append_to_statement_list_force(new_base_stmt, new_stmt_list); /* Create base_offset */ base_offset = size_binop (PLUS_EXPR, base_offset, init); + base_offset = fold_convert (sizetype, base_offset); dest = create_tmp_var (TREE_TYPE (base_offset), "base_off"); add_referenced_var (dest); - base_offset = force_gimple_operand (base_offset, &new_stmt, false, dest); + base_offset = force_gimple_operand (base_offset, &new_stmt, true, dest); append_to_statement_list_force (new_stmt, new_stmt_list); if (offset) { - tree tmp = create_tmp_var (TREE_TYPE (base_offset), "offset"); - tree step; - - /* For interleaved access step we divide STEP by the size of the - interleaving group. */ - if (DR_GROUP_SIZE (stmt_info)) - step = fold_build2 (TRUNC_DIV_EXPR, TREE_TYPE (offset), DR_STEP (dr), - build_int_cst (TREE_TYPE (offset), - DR_GROUP_SIZE (stmt_info))); - else - step = DR_STEP (dr); + tree tmp = create_tmp_var (sizetype, "offset"); add_referenced_var (tmp); offset = fold_build2 (MULT_EXPR, TREE_TYPE (offset), offset, step); @@ -179,8 +833,8 @@ vect_create_addr_base_for_vector_ref (tree stmt, } /* base + base_offset */ - addr_base = fold_build2 (PLUS_EXPR, TREE_TYPE (data_ref_base), data_ref_base, - base_offset); + addr_base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (data_ref_base), + data_ref_base, base_offset); vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info)); @@ -210,14 +864,14 @@ vect_create_addr_base_for_vector_ref (tree stmt, accessed in the loop by STMT, along with the def-use update chain to appropriately advance the pointer through the loop iterations. Also set aliasing information for the pointer. This vector pointer is used by the - callers to this function to create a memory reference expression for vector + callers to this function to create a memory reference expression for vector load/store access. Input: 1. STMT: a stmt that references memory. Expected to be of the form GIMPLE_MODIFY_STMT or GIMPLE_MODIFY_STMT . - 2. BSI: block_stmt_iterator where new stmts can be added. + 2. AT_LOOP: the loop where the vector memref is to be created. 3. OFFSET (optional): an offset to be added to the initial address accessed by the data-ref in STMT. 4. ONLY_INIT: indicate if vp is to be updated in the loop, or remain @@ -244,18 +898,22 @@ vect_create_addr_base_for_vector_ref (tree stmt, Return the increment stmt that updates the pointer in PTR_INCR. - 3. Return the pointer. */ + 3. Set INV_P to true if the access pattern of the data reference in the + vectorized loop is invariant. Set it to false otherwise. + + 4. Return the pointer. */ static tree -vect_create_data_ref_ptr (tree stmt, - block_stmt_iterator *bsi ATTRIBUTE_UNUSED, +vect_create_data_ref_ptr (tree stmt, struct loop *at_loop, tree offset, tree *initial_address, tree *ptr_incr, - bool only_init, tree type) + bool only_init, tree type, bool *inv_p) { tree base_name; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); + struct loop *containing_loop = (bb_for_stmt (stmt))->loop_father; tree vectype = STMT_VINFO_VECTYPE (stmt_info); tree vect_ptr_type; tree vect_ptr; @@ -263,11 +921,31 @@ vect_create_data_ref_ptr (tree stmt, tree new_temp; tree vec_stmt; tree new_stmt_list = NULL_TREE; - edge pe = loop_preheader_edge (loop); + edge pe; basic_block new_bb; tree vect_ptr_init; struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); + tree vptr; + block_stmt_iterator incr_bsi; + bool insert_after; + tree indx_before_incr, indx_after_incr; + tree incr; + tree step; + + /* Check the step (evolution) of the load in LOOP, and record + whether it's invariant. */ + if (nested_in_vect_loop) + step = STMT_VINFO_DR_STEP (stmt_info); + else + step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info)); + + if (tree_int_cst_compare (step, size_zero_node) == 0) + *inv_p = true; + else + *inv_p = false; + /* Create an expression for the first address accessed by this load + in LOOP. */ base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr))); if (vect_print_dump_info (REPORT_DETAILS)) @@ -298,7 +976,7 @@ vect_create_data_ref_ptr (tree stmt, /** (2) Add aliasing information to the new vector-pointer: (The points-to info (DR_PTR_INFO) may be defined later.) **/ - tag = DR_MEMTAG (dr); + tag = DR_SYMBOL_TAG (dr); gcc_assert (tag); /* If tag is a variable (and NOT_A_TAG) than a new symbol memory @@ -310,12 +988,44 @@ vect_create_data_ref_ptr (tree stmt, var_ann (vect_ptr)->subvars = DR_SUBVARS (dr); + /** Note: If the dataref is in an inner-loop nested in LOOP, and we are + vectorizing LOOP (i.e. outer-loop vectorization), we need to create two + def-use update cycles for the pointer: One relative to the outer-loop + (LOOP), which is what steps (3) and (4) below do. The other is relative + to the inner-loop (which is the inner-most loop containing the dataref), + and this is done be step (5) below. + + When vectorizing inner-most loops, the vectorized loop (LOOP) is also the + inner-most loop, and so steps (3),(4) work the same, and step (5) is + redundant. Steps (3),(4) create the following: + + vp0 = &base_addr; + LOOP: vp1 = phi(vp0,vp2) + ... + ... + vp2 = vp1 + step + goto LOOP + + If there is an inner-loop nested in loop, then step (5) will also be + applied, and an additional update in the inner-loop will be created: + + vp0 = &base_addr; + LOOP: vp1 = phi(vp0,vp2) + ... + inner: vp3 = phi(vp1,vp4) + vp4 = vp3 + inner_step + if () goto inner + ... + vp2 = vp1 + step + if () goto LOOP */ + /** (3) Calculate the initial address the vector-pointer, and set the vector-pointer to point to it before the loop: **/ /* Create: (&(base[init_val+offset]) in the loop preheader. */ + new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list, - offset); + offset, loop); pe = loop_preheader_edge (loop); new_bb = bsi_insert_on_edge_immediate (pe, new_stmt_list); gcc_assert (!new_bb); @@ -330,25 +1040,31 @@ vect_create_data_ref_ptr (tree stmt, gcc_assert (!new_bb); - /** (4) Handle the updating of the vector-pointer inside the loop: **/ + /** (4) Handle the updating of the vector-pointer inside the loop. + This is needed when ONLY_INIT is false, and also when AT_LOOP + is the inner-loop nested in LOOP (during outer-loop vectorization). + **/ - if (only_init) /* No update in loop is required. */ + if (only_init && at_loop == loop) /* No update in loop is required. */ { /* Copy the points-to information if it exists. */ if (DR_PTR_INFO (dr)) duplicate_ssa_name_ptr_info (vect_ptr_init, DR_PTR_INFO (dr)); - return vect_ptr_init; + vptr = vect_ptr_init; } else { - block_stmt_iterator incr_bsi; - bool insert_after; - tree indx_before_incr, indx_after_incr; - tree incr; + /* The step of the vector pointer is the Vector Size. */ + tree step = TYPE_SIZE_UNIT (vectype); + /* One exception to the above is when the scalar step of the load in + LOOP is zero. In this case the step here is also zero. */ + if (*inv_p) + step = size_zero_node; standard_iv_increment_position (loop, &incr_bsi, &insert_after); + create_iv (vect_ptr_init, - fold_convert (vect_ptr_type, TYPE_SIZE_UNIT (vectype)), + fold_convert (vect_ptr_type, step), NULL_TREE, loop, &incr_bsi, insert_after, &indx_before_incr, &indx_after_incr); incr = bsi_stmt (incr_bsi); @@ -366,15 +1082,51 @@ vect_create_data_ref_ptr (tree stmt, if (ptr_incr) *ptr_incr = incr; - return indx_before_incr; + vptr = indx_before_incr; } + + if (!nested_in_vect_loop || only_init) + return vptr; + + + /** (5) Handle the updating of the vector-pointer inside the inner-loop + nested in LOOP, if exists: **/ + + gcc_assert (nested_in_vect_loop); + if (!only_init) + { + standard_iv_increment_position (containing_loop, &incr_bsi, + &insert_after); + create_iv (vptr, fold_convert (vect_ptr_type, DR_STEP (dr)), NULL_TREE, + containing_loop, &incr_bsi, insert_after, &indx_before_incr, + &indx_after_incr); + incr = bsi_stmt (incr_bsi); + set_stmt_info (stmt_ann (incr), new_stmt_vec_info (incr, loop_vinfo)); + + /* Copy the points-to information if it exists. */ + if (DR_PTR_INFO (dr)) + { + duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr)); + duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr)); + } + merge_alias_info (vect_ptr_init, indx_before_incr); + merge_alias_info (vect_ptr_init, indx_after_incr); + if (ptr_incr) + *ptr_incr = incr; + + return indx_before_incr; + } + else + gcc_unreachable (); } /* Function bump_vector_ptr - Increment a pointer (to a vector type) by vector-size. Connect the new - increment stmt to the existing def-use update-chain of the pointer. + Increment a pointer (to a vector type) by vector-size. If requested, + i.e. if PTR-INCR is given, then also connect the new increment stmt + to the existing def-use update-chain of the pointer, by modifying + the PTR_INCR as illustrated below: The pointer def-use update-chain before this function: DATAREF_PTR = phi (p_0, p_2) @@ -384,18 +1136,20 @@ vect_create_data_ref_ptr (tree stmt, The pointer def-use update-chain after this function: DATAREF_PTR = phi (p_0, p_2) .... - NEW_DATAREF_PTR = DATAREF_PTR + vector_size + NEW_DATAREF_PTR = DATAREF_PTR + BUMP .... PTR_INCR: p_2 = NEW_DATAREF_PTR + step Input: DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated in the loop. - PTR_INCR - the stmt that updates the pointer in each iteration of the loop. - The increment amount across iterations is also expected to be - vector_size. + PTR_INCR - optional. The stmt that updates the pointer in each iteration of + the loop. The increment amount across iterations is expected + to be vector_size. BSI - location where the new update stmt is to be placed. STMT - the original scalar memory-access stmt that is being vectorized. + BUMP - optional. The offset by which to bump the pointer. If not given, + the offset is assumed to be vector_size. Output: Return NEW_DATAREF_PTR as illustrated above. @@ -403,26 +1157,37 @@ vect_create_data_ref_ptr (tree stmt, static tree bump_vector_ptr (tree dataref_ptr, tree ptr_incr, block_stmt_iterator *bsi, - tree stmt) + tree stmt, tree bump) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); tree vectype = STMT_VINFO_VECTYPE (stmt_info); tree vptr_type = TREE_TYPE (dataref_ptr); tree ptr_var = SSA_NAME_VAR (dataref_ptr); - tree update = fold_convert (vptr_type, TYPE_SIZE_UNIT (vectype)); + tree update = TYPE_SIZE_UNIT (vectype); tree incr_stmt; ssa_op_iter iter; use_operand_p use_p; tree new_dataref_ptr; + if (bump) + update = bump; + incr_stmt = build_gimple_modify_stmt (ptr_var, - build2 (PLUS_EXPR, vptr_type, + build2 (POINTER_PLUS_EXPR, vptr_type, dataref_ptr, update)); new_dataref_ptr = make_ssa_name (ptr_var, incr_stmt); GIMPLE_STMT_OPERAND (incr_stmt, 0) = new_dataref_ptr; vect_finish_stmt_generation (stmt, incr_stmt, bsi); + /* Copy the points-to information if it exists. */ + if (DR_PTR_INFO (dr)) + duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr)); + merge_alias_info (new_dataref_ptr, dataref_ptr); + + if (!ptr_incr) + return new_dataref_ptr; + /* Update the vector-pointer's cross-iteration increment. */ FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE) { @@ -434,11 +1199,6 @@ bump_vector_ptr (tree dataref_ptr, tree ptr_incr, block_stmt_iterator *bsi, gcc_assert (tree_int_cst_compare (use, update) == 0); } - /* Copy the points-to information if it exists. */ - if (DR_PTR_INFO (dr)) - duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr)); - merge_alias_info (new_dataref_ptr, dataref_ptr); - return new_dataref_ptr; } @@ -473,15 +1233,16 @@ vect_create_destination_var (tree scalar_dest, tree vectype) /* Function vect_init_vector. Insert a new stmt (INIT_STMT) that initializes a new vector variable with - the vector elements of VECTOR_VAR. Return the DEF of INIT_STMT. It will be - used in the vectorization of STMT. */ + the vector elements of VECTOR_VAR. Place the initialization at BSI if it + is not NULL. Otherwise, place the initialization at the loop preheader. + Return the DEF of INIT_STMT. + It will be used in the vectorization of STMT. */ static tree -vect_init_vector (tree stmt, tree vector_var, tree vector_type) +vect_init_vector (tree stmt, tree vector_var, tree vector_type, + block_stmt_iterator *bsi) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); - struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree new_var; tree init_stmt; tree vec_oprnd; @@ -491,14 +1252,23 @@ vect_init_vector (tree stmt, tree vector_var, tree vector_type) new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_"); add_referenced_var (new_var); - init_stmt = build_gimple_modify_stmt (new_var, vector_var); new_temp = make_ssa_name (new_var, init_stmt); GIMPLE_STMT_OPERAND (init_stmt, 0) = new_temp; - pe = loop_preheader_edge (loop); - new_bb = bsi_insert_on_edge_immediate (pe, init_stmt); - gcc_assert (!new_bb); + if (bsi) + vect_finish_stmt_generation (stmt, init_stmt, bsi); + else + { + loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + + if (nested_in_vect_loop_p (loop, stmt)) + loop = loop->inner; + pe = loop_preheader_edge (loop); + new_bb = bsi_insert_on_edge_immediate (pe, init_stmt); + gcc_assert (!new_bb); + } if (vect_print_dump_info (REPORT_DETAILS)) { @@ -511,9 +1281,183 @@ vect_init_vector (tree stmt, tree vector_var, tree vector_type) } +/* For constant and loop invariant defs of SLP_NODE this function returns + (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts. + OP_NUM determines if we gather defs for operand 0 or operand 1 of the scalar + stmts. */ + +static void +vect_get_constant_vectors (slp_tree slp_node, VEC(tree,heap) **vec_oprnds, + unsigned int op_num) +{ + VEC (tree, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node); + tree stmt = VEC_index (tree, stmts, 0); + stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); + tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo); + int nunits = TYPE_VECTOR_SUBPARTS (vectype); + tree vec_cst; + tree t = NULL_TREE; + int j, number_of_places_left_in_vector; + tree vector_type; + tree op, vop, operation; + int group_size = VEC_length (tree, stmts); + unsigned int vec_num, i; + int number_of_copies = 1; + bool is_store = false; + unsigned int number_of_vectors = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + VEC (tree, heap) *voprnds = VEC_alloc (tree, heap, number_of_vectors); + + if (STMT_VINFO_DATA_REF (stmt_vinfo)) + is_store = true; + + /* NUMBER_OF_COPIES is the number of times we need to use the same values in + created vectors. It is greater than 1 if unrolling is performed. + + For example, we have two scalar operands, s1 and s2 (e.g., group of + strided accesses of size two), while NUINTS is four (i.e., four scalars + of this type can be packed in a vector). The output vector will contain + two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES + will be 2). + + If GROUP_SIZE > NUNITS, the scalars will be split into several vectors + containing the operands. + + For example, NUINTS is four as before, and the group size is 8 + (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and + {s5, s6, s7, s8}. */ + + number_of_copies = least_common_multiple (nunits, group_size) / group_size; + + number_of_places_left_in_vector = nunits; + for (j = 0; j < number_of_copies; j++) + { + for (i = group_size - 1; VEC_iterate (tree, stmts, i, stmt); i--) + { + operation = GIMPLE_STMT_OPERAND (stmt, 1); + if (is_store) + op = operation; + else + op = TREE_OPERAND (operation, op_num); + + /* Create 'vect_ = {op0,op1,...,opn}'. */ + t = tree_cons (NULL_TREE, op, t); + + number_of_places_left_in_vector--; + + if (number_of_places_left_in_vector == 0) + { + number_of_places_left_in_vector = nunits; + + vector_type = get_vectype_for_scalar_type (TREE_TYPE (op)); + vec_cst = build_constructor_from_list (vector_type, t); + VEC_quick_push (tree, voprnds, + vect_init_vector (stmt, vec_cst, vector_type, + NULL)); + t = NULL_TREE; + } + } + } + + /* Since the vectors are created in the reverse order, we should invert + them. */ + vec_num = VEC_length (tree, voprnds); + for (j = vec_num - 1; j >= 0; j--) + { + vop = VEC_index (tree, voprnds, j); + VEC_quick_push (tree, *vec_oprnds, vop); + } + + VEC_free (tree, heap, voprnds); + + /* In case that VF is greater than the unrolling factor needed for the SLP + group of stmts, NUMBER_OF_VECTORS to be created is greater than + NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have + to replicate the vectors. */ + while (number_of_vectors > VEC_length (tree, *vec_oprnds)) + { + for (i = 0; VEC_iterate (tree, *vec_oprnds, i, vop) && i < vec_num; i++) + VEC_quick_push (tree, *vec_oprnds, vop); + } +} + + +/* Get vectorized definitions from SLP_NODE that contains corresponding + vectorized def-stmts. */ + +static void +vect_get_slp_vect_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds) +{ + tree vec_oprnd; + tree vec_def_stmt; + unsigned int i; + + gcc_assert (SLP_TREE_VEC_STMTS (slp_node)); + + for (i = 0; + VEC_iterate (tree, SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt); + i++) + { + gcc_assert (vec_def_stmt); + vec_oprnd = GIMPLE_STMT_OPERAND (vec_def_stmt, 0); + VEC_quick_push (tree, *vec_oprnds, vec_oprnd); + } +} + + +/* Get vectorized definitions for SLP_NODE. + If the scalar definitions are loop invariants or constants, collect them and + call vect_get_constant_vectors() to create vector stmts. + Otherwise, the def-stmts must be already vectorized and the vectorized stmts + must be stored in the LEFT/RIGHT node of SLP_NODE, and we call + vect_get_slp_vect_defs() to retrieve them. + If VEC_OPRNDS1 is NULL, don't get vector defs for the second operand (from + the right node. This is used when the second operand must remain scalar. */ + +static void +vect_get_slp_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds0, + VEC (tree,heap) **vec_oprnds1) +{ + tree operation, first_stmt; + + /* Allocate memory for vectorized defs. */ + *vec_oprnds0 = VEC_alloc (tree, heap, + SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node)); + + /* SLP_NODE corresponds either to a group of stores or to a group of + unary/binary operations. We don't call this function for loads. */ + if (SLP_TREE_LEFT (slp_node)) + /* The defs are already vectorized. */ + vect_get_slp_vect_defs (SLP_TREE_LEFT (slp_node), vec_oprnds0); + else + /* Build vectors from scalar defs. */ + vect_get_constant_vectors (slp_node, vec_oprnds0, 0); + + first_stmt = VEC_index (tree, SLP_TREE_SCALAR_STMTS (slp_node), 0); + if (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt))) + /* Since we don't call this function with loads, this is a group of + stores. */ + return; + + operation = GIMPLE_STMT_OPERAND (first_stmt, 1); + if (TREE_OPERAND_LENGTH (operation) == unary_op || !vec_oprnds1) + return; + + *vec_oprnds1 = VEC_alloc (tree, heap, + SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node)); + + if (SLP_TREE_RIGHT (slp_node)) + /* The defs are already vectorized. */ + vect_get_slp_vect_defs (SLP_TREE_RIGHT (slp_node), vec_oprnds1); + else + /* Build vectors from scalar defs. */ + vect_get_constant_vectors (slp_node, vec_oprnds1, 1); +} + + /* Function get_initial_def_for_induction Input: + STMT - a stmt that performs an induction operation in the loop. IV_PHI - the initial value of the induction variable Output: @@ -532,8 +1476,8 @@ get_initial_def_for_induction (tree iv_phi) tree vectype = get_vectype_for_scalar_type (scalar_type); int nunits = TYPE_VECTOR_SUBPARTS (vectype); edge pe = loop_preheader_edge (loop); + struct loop *iv_loop; basic_block new_bb; - block_stmt_iterator bsi; tree vec, vec_init, vec_step, t; tree access_fn; tree new_var; @@ -547,8 +1491,13 @@ get_initial_def_for_induction (tree iv_phi) int ncopies = vf / nunits; tree expr; stmt_vec_info phi_info = vinfo_for_stmt (iv_phi); + bool nested_in_vect_loop = false; tree stmts; - tree stmt = NULL_TREE; + imm_use_iterator imm_iter; + use_operand_p use_p; + tree exit_phi; + edge latch_e; + tree loop_arg; block_stmt_iterator si; basic_block bb = bb_for_stmt (iv_phi); @@ -557,65 +1506,107 @@ get_initial_def_for_induction (tree iv_phi) /* Find the first insertion point in the BB. */ si = bsi_after_labels (bb); - stmt = bsi_stmt (si); - access_fn = analyze_scalar_evolution (loop, PHI_RESULT (iv_phi)); + if (INTEGRAL_TYPE_P (scalar_type)) + step_expr = build_int_cst (scalar_type, 0); + else + step_expr = build_real (scalar_type, dconst0); + + /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */ + if (nested_in_vect_loop_p (loop, iv_phi)) + { + nested_in_vect_loop = true; + iv_loop = loop->inner; + } + else + iv_loop = loop; + gcc_assert (iv_loop == (bb_for_stmt (iv_phi))->loop_father); + + latch_e = loop_latch_edge (iv_loop); + loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e); + + access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi)); gcc_assert (access_fn); - ok = vect_is_simple_iv_evolution (loop->num, access_fn, - &init_expr, &step_expr); + ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn, + &init_expr, &step_expr); gcc_assert (ok); + pe = loop_preheader_edge (iv_loop); /* Create the vector that holds the initial_value of the induction. */ - new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_"); - add_referenced_var (new_var); - - new_name = force_gimple_operand (init_expr, &stmts, false, new_var); - if (stmts) + if (nested_in_vect_loop) { - new_bb = bsi_insert_on_edge_immediate (pe, stmts); - gcc_assert (!new_bb); + /* iv_loop is nested in the loop to be vectorized. init_expr had already + been created during vectorization of previous stmts; We obtain it from + the STMT_VINFO_VEC_STMT of the defining stmt. */ + tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi, loop_preheader_edge (iv_loop)); + vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL); } - - t = NULL_TREE; - t = tree_cons (NULL_TREE, new_name, t); - for (i = 1; i < nunits; i++) + else { - tree tmp; + /* iv_loop is the loop to be vectorized. Create: + vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */ + new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_"); + add_referenced_var (new_var); - /* Create: new_name = new_name + step_expr */ - tmp = fold_build2 (PLUS_EXPR, scalar_type, new_name, step_expr); - init_stmt = build_gimple_modify_stmt (new_var, tmp); - new_name = make_ssa_name (new_var, init_stmt); - GIMPLE_STMT_OPERAND (init_stmt, 0) = new_name; + new_name = force_gimple_operand (init_expr, &stmts, false, new_var); + if (stmts) + { + new_bb = bsi_insert_on_edge_immediate (pe, stmts); + gcc_assert (!new_bb); + } - new_bb = bsi_insert_on_edge_immediate (pe, init_stmt); - gcc_assert (!new_bb); + t = NULL_TREE; + t = tree_cons (NULL_TREE, init_expr, t); + for (i = 1; i < nunits; i++) + { + tree tmp; - if (vect_print_dump_info (REPORT_DETAILS)) - { - fprintf (vect_dump, "created new init_stmt: "); - print_generic_expr (vect_dump, init_stmt, TDF_SLIM); - } - t = tree_cons (NULL_TREE, new_name, t); + /* Create: new_name_i = new_name + step_expr */ + tmp = fold_build2 (PLUS_EXPR, scalar_type, new_name, step_expr); + init_stmt = build_gimple_modify_stmt (new_var, tmp); + new_name = make_ssa_name (new_var, init_stmt); + GIMPLE_STMT_OPERAND (init_stmt, 0) = new_name; + + new_bb = bsi_insert_on_edge_immediate (pe, init_stmt); + gcc_assert (!new_bb); + + if (vect_print_dump_info (REPORT_DETAILS)) + { + fprintf (vect_dump, "created new init_stmt: "); + print_generic_expr (vect_dump, init_stmt, TDF_SLIM); + } + t = tree_cons (NULL_TREE, new_name, t); + } + /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */ + vec = build_constructor_from_list (vectype, nreverse (t)); + vec_init = vect_init_vector (iv_phi, vec, vectype, NULL); } - vec = build_constructor_from_list (vectype, nreverse (t)); - vec_init = vect_init_vector (stmt, vec, vectype); /* Create the vector that holds the step of the induction. */ - expr = build_int_cst (scalar_type, vf); - new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr); + if (nested_in_vect_loop) + /* iv_loop is nested in the loop to be vectorized. Generate: + vec_step = [S, S, S, S] */ + new_name = step_expr; + else + { + /* iv_loop is the loop to be vectorized. Generate: + vec_step = [VF*S, VF*S, VF*S, VF*S] */ + expr = build_int_cst (scalar_type, vf); + new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr); + } + t = NULL_TREE; for (i = 0; i < nunits; i++) t = tree_cons (NULL_TREE, unshare_expr (new_name), t); vec = build_constructor_from_list (vectype, t); - vec_step = vect_init_vector (stmt, vec, vectype); + vec_step = vect_init_vector (iv_phi, vec, vectype, NULL); /* Create the following def-use cycle: loop prolog: - vec_init = [X, X+S, X+2*S, X+3*S] - vec_step = [VF*S, VF*S, VF*S, VF*S] + vec_init = ... + vec_step = ... loop: vec_iv = PHI ... @@ -626,7 +1617,7 @@ get_initial_def_for_induction (tree iv_phi) /* Create the induction-phi that defines the induction-operand. */ vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"); add_referenced_var (vec_dest); - induction_phi = create_phi_node (vec_dest, loop->header); + induction_phi = create_phi_node (vec_dest, iv_loop->header); set_stmt_info (get_stmt_ann (induction_phi), new_stmt_vec_info (induction_phi, loop_vinfo)); induc_def = PHI_RESULT (induction_phi); @@ -637,15 +1628,16 @@ get_initial_def_for_induction (tree iv_phi) induc_def, vec_step)); vec_def = make_ssa_name (vec_dest, new_stmt); GIMPLE_STMT_OPERAND (new_stmt, 0) = vec_def; - bsi = bsi_for_stmt (stmt); - vect_finish_stmt_generation (stmt, new_stmt, &bsi); + bsi_insert_before (&si, new_stmt, BSI_SAME_STMT); + set_stmt_info (get_stmt_ann (new_stmt), + new_stmt_vec_info (new_stmt, loop_vinfo)); /* Set the arguments of the phi node: */ - add_phi_arg (induction_phi, vec_init, loop_preheader_edge (loop)); - add_phi_arg (induction_phi, vec_def, loop_latch_edge (loop)); + add_phi_arg (induction_phi, vec_init, pe); + add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop)); - /* In case the vectorization factor (VF) is bigger than the number + /* In case that vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation @@ -654,6 +1646,8 @@ get_initial_def_for_induction (tree iv_phi) if (ncopies > 1) { stmt_vec_info prev_stmt_vinfo; + /* FORNOW. This restriction should be relaxed. */ + gcc_assert (!nested_in_vect_loop); /* Create the vector that holds the step of the induction. */ expr = build_int_cst (scalar_type, nunits); @@ -662,7 +1656,7 @@ get_initial_def_for_induction (tree iv_phi) for (i = 0; i < nunits; i++) t = tree_cons (NULL_TREE, unshare_expr (new_name), t); vec = build_constructor_from_list (vectype, t); - vec_step = vect_init_vector (stmt, vec, vectype); + vec_step = vect_init_vector (iv_phi, vec, vectype, NULL); vec_def = induc_def; prev_stmt_vinfo = vinfo_for_stmt (induction_phi); @@ -670,19 +1664,50 @@ get_initial_def_for_induction (tree iv_phi) { tree tmp; - /* vec_i = vec_prev + vec_{step*nunits} */ + /* vec_i = vec_prev + vec_step */ tmp = build2 (PLUS_EXPR, vectype, vec_def, vec_step); new_stmt = build_gimple_modify_stmt (NULL_TREE, tmp); vec_def = make_ssa_name (vec_dest, new_stmt); GIMPLE_STMT_OPERAND (new_stmt, 0) = vec_def; - bsi = bsi_for_stmt (stmt); - vect_finish_stmt_generation (stmt, new_stmt, &bsi); - + bsi_insert_before (&si, new_stmt, BSI_SAME_STMT); + set_stmt_info (get_stmt_ann (new_stmt), + new_stmt_vec_info (new_stmt, loop_vinfo)); STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt; prev_stmt_vinfo = vinfo_for_stmt (new_stmt); } } + if (nested_in_vect_loop) + { + /* Find the loop-closed exit-phi of the induction, and record + the final vector of induction results: */ + exit_phi = NULL; + FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg) + { + if (!flow_bb_inside_loop_p (iv_loop, bb_for_stmt (USE_STMT (use_p)))) + { + exit_phi = USE_STMT (use_p); + break; + } + } + if (exit_phi) + { + stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi); + /* FORNOW. Currently not supporting the case that an inner-loop induction + is not used in the outer-loop (i.e. only outside the outer-loop). */ + gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo) + && !STMT_VINFO_LIVE_P (stmt_vinfo)); + + STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt; + if (vect_print_dump_info (REPORT_DETAILS)) + { + fprintf (vect_dump, "vector of inductions after inner-loop:"); + print_generic_expr (vect_dump, new_stmt, TDF_SLIM); + } + } + } + + if (vect_print_dump_info (REPORT_DETAILS)) { fprintf (vect_dump, "transform induction: created def-use cycle:"); @@ -718,7 +1743,6 @@ vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def) tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo); int nunits = TYPE_VECTOR_SUBPARTS (vectype); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); - struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vec_inv; tree vec_cst; tree t = NULL_TREE; @@ -769,7 +1793,7 @@ vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def) vector_type = get_vectype_for_scalar_type (TREE_TYPE (op)); vec_cst = build_vector (vector_type, t); - return vect_init_vector (stmt, vec_cst, vector_type); + return vect_init_vector (stmt, vec_cst, vector_type, NULL); } /* Case 2: operand is defined outside the loop - loop invariant. */ @@ -790,7 +1814,7 @@ vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def) /* FIXME: use build_constructor directly. */ vector_type = get_vectype_for_scalar_type (TREE_TYPE (def)); vec_inv = build_constructor_from_list (vector_type, t); - return vect_init_vector (stmt, vec_inv, vector_type); + return vect_init_vector (stmt, vec_inv, vector_type, NULL); } /* Case 3: operand is defined inside the loop. */ @@ -803,14 +1827,20 @@ vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def) def_stmt_info = vinfo_for_stmt (def_stmt); vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info); gcc_assert (vec_stmt); - vec_oprnd = GIMPLE_STMT_OPERAND (vec_stmt, 0); + if (TREE_CODE (vec_stmt) == PHI_NODE) + vec_oprnd = PHI_RESULT (vec_stmt); + else + vec_oprnd = GIMPLE_STMT_OPERAND (vec_stmt, 0); return vec_oprnd; } /* Case 4: operand is defined by a loop header phi - reduction */ case vect_reduction_def: { + struct loop *loop; + gcc_assert (TREE_CODE (def_stmt) == PHI_NODE); + loop = (bb_for_stmt (def_stmt))->loop_father; /* Get the def before the loop */ op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop)); @@ -822,8 +1852,12 @@ vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def) { gcc_assert (TREE_CODE (def_stmt) == PHI_NODE); - /* Get the def before the loop */ - return get_initial_def_for_induction (def_stmt); + /* Get the def from the vectorized stmt. */ + def_stmt_info = vinfo_for_stmt (def_stmt); + vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info); + gcc_assert (vec_stmt && (TREE_CODE (vec_stmt) == PHI_NODE)); + vec_oprnd = PHI_RESULT (vec_stmt); + return vec_oprnd; } default: @@ -904,11 +1938,58 @@ vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd) vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info); gcc_assert (vec_stmt_for_operand); vec_oprnd = GIMPLE_STMT_OPERAND (vec_stmt_for_operand, 0); - return vec_oprnd; } +/* Get vectorized definitions for the operands to create a copy of an original + stmt. See vect_get_vec_def_for_stmt_copy() for details. */ + +static void +vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt, + VEC(tree,heap) **vec_oprnds0, + VEC(tree,heap) **vec_oprnds1) +{ + tree vec_oprnd = VEC_pop (tree, *vec_oprnds0); + + vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd); + VEC_quick_push (tree, *vec_oprnds0, vec_oprnd); + + if (vec_oprnds1 && *vec_oprnds1) + { + vec_oprnd = VEC_pop (tree, *vec_oprnds1); + vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd); + VEC_quick_push (tree, *vec_oprnds1, vec_oprnd); + } +} + + +/* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */ + +static void +vect_get_vec_defs (tree op0, tree op1, tree stmt, VEC(tree,heap) **vec_oprnds0, + VEC(tree,heap) **vec_oprnds1, slp_tree slp_node) +{ + if (slp_node) + vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1); + else + { + tree vec_oprnd; + + *vec_oprnds0 = VEC_alloc (tree, heap, 1); + vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL); + VEC_quick_push (tree, *vec_oprnds0, vec_oprnd); + + if (op1) + { + *vec_oprnds1 = VEC_alloc (tree, heap, 1); + vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL); + VEC_quick_push (tree, *vec_oprnds1, vec_oprnd); + } + } +} + + /* Function vect_finish_stmt_generation. Insert a new stmt. */ @@ -920,7 +2001,11 @@ vect_finish_stmt_generation (tree stmt, tree vec_stmt, stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + gcc_assert (stmt == bsi_stmt (*bsi)); + gcc_assert (TREE_CODE (stmt) != LABEL_EXPR); + bsi_insert_before (bsi, vec_stmt, BSI_SAME_STMT); + set_stmt_info (get_stmt_ann (vec_stmt), new_stmt_vec_info (vec_stmt, loop_vinfo)); @@ -988,6 +2073,8 @@ static tree get_initial_def_for_reduction (tree stmt, tree init_val, tree *adjustment_def) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); + loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo); int nunits = TYPE_VECTOR_SUBPARTS (vectype); enum tree_code code = TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1)); @@ -998,8 +2085,14 @@ get_initial_def_for_reduction (tree stmt, tree init_val, tree *adjustment_def) tree t = NULL_TREE; int i; tree vector_type; + bool nested_in_vect_loop = false; gcc_assert (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type)); + if (nested_in_vect_loop_p (loop, stmt)) + nested_in_vect_loop = true; + else + gcc_assert (loop == (bb_for_stmt (stmt))->loop_father); + vecdef = vect_get_vec_def_for_operand (init_val, stmt, NULL); switch (code) @@ -1007,7 +2100,10 @@ get_initial_def_for_reduction (tree stmt, tree init_val, tree *adjustment_def) case WIDEN_SUM_EXPR: case DOT_PROD_EXPR: case PLUS_EXPR: - *adjustment_def = init_val; + if (nested_in_vect_loop) + *adjustment_def = vecdef; + else + *adjustment_def = init_val; /* Create a vector of zeros for init_def. */ if (INTEGRAL_TYPE_P (type)) def_for_init = build_int_cst (type, 0); @@ -1096,23 +2192,32 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, tree new_phi; block_stmt_iterator exit_bsi; tree vec_dest; - tree new_temp; + tree new_temp = NULL_TREE; tree new_name; - tree epilog_stmt; - tree new_scalar_dest, exit_phi; + tree epilog_stmt = NULL_TREE; + tree new_scalar_dest, exit_phi, new_dest; tree bitsize, bitpos, bytesize; enum tree_code code = TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1)); - tree scalar_initial_def; + tree adjustment_def; tree vec_initial_def; tree orig_name; imm_use_iterator imm_iter; use_operand_p use_p; - bool extract_scalar_result; - tree reduction_op; + bool extract_scalar_result = false; + tree reduction_op, expr; tree orig_stmt; tree use_stmt; tree operation = GIMPLE_STMT_OPERAND (stmt, 1); + bool nested_in_vect_loop = false; int op_type; + VEC(tree,heap) *phis = NULL; + int i; + + if (nested_in_vect_loop_p (loop, stmt)) + { + loop = loop->inner; + nested_in_vect_loop = true; + } op_type = TREE_OPERAND_LENGTH (operation); reduction_op = TREE_OPERAND (operation, op_type-1); @@ -1126,7 +2231,7 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, the scalar def before the loop, that defines the initial value of the reduction variable. */ vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt, - &scalar_initial_def); + &adjustment_def); add_phi_arg (reduction_phi, vec_initial_def, loop_preheader_edge (loop)); /* 1.2 set the loop-latch arg for the reduction-phi: */ @@ -1205,6 +2310,15 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, bitsize = TYPE_SIZE (scalar_type); bytesize = TYPE_SIZE_UNIT (scalar_type); + + /* In case this is a reduction in an inner-loop while vectorizing an outer + loop - we don't need to extract a single scalar result at the end of the + inner-loop. The final vector of partial results will be used in the + vectorized outer-loop, or reduced to a scalar result at the end of the + outer-loop. */ + if (nested_in_vect_loop) + goto vect_finalize_reduction; + /* 2.3 Create the reduction code, using one of the three schemes described above. */ @@ -1236,7 +2350,7 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1); tree vec_temp; - if (vec_shr_optab->handlers[mode].insn_code != CODE_FOR_nothing) + if (optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing) shift_code = VEC_RSHIFT_EXPR; else have_whole_vector_shift = false; @@ -1252,7 +2366,7 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, else { optab optab = optab_for_tree_code (code, vectype); - if (optab->handlers[mode].insn_code == CODE_FOR_nothing) + if (optab_handler (optab, mode)->insn_code == CODE_FOR_nothing) have_whole_vector_shift = false; } @@ -1351,6 +2465,7 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, { tree rhs; + gcc_assert (!nested_in_vect_loop); if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "extract scalar result"); @@ -1369,43 +2484,77 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt, bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT); } - /* 2.4 Adjust the final result by the initial value of the reduction +vect_finalize_reduction: + + /* 2.5 Adjust the final result by the initial value of the reduction variable. (When such adjustment is not needed, then - 'scalar_initial_def' is zero). + 'adjustment_def' is zero). For example, if code is PLUS we create: + new_temp = loop_exit_def + adjustment_def */ - Create: - s_out4 = scalar_expr */ - - if (scalar_initial_def) + if (adjustment_def) { - tree tmp = build2 (code, scalar_type, new_temp, scalar_initial_def); - epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, tmp); - new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); + if (nested_in_vect_loop) + { + gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE); + expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def); + new_dest = vect_create_destination_var (scalar_dest, vectype); + } + else + { + gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE); + expr = build2 (code, scalar_type, new_temp, adjustment_def); + new_dest = vect_create_destination_var (scalar_dest, scalar_type); + } + epilog_stmt = build_gimple_modify_stmt (new_dest, expr); + new_temp = make_ssa_name (new_dest, epilog_stmt); GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp; bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT); } - /* 2.6 Replace uses of s_out0 with uses of s_out3 */ - /* Find the loop-closed-use at the loop exit of the original scalar result. + /* 2.6 Handle the loop-exit phi */ + + /* Replace uses of s_out0 with uses of s_out3: + Find the loop-closed-use at the loop exit of the original scalar result. (The reduction result is expected to have two immediate uses - one at the latch block, and one at the loop exit). */ - exit_phi = NULL; + phis = VEC_alloc (tree, heap, 10); FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) { if (!flow_bb_inside_loop_p (loop, bb_for_stmt (USE_STMT (use_p)))) { exit_phi = USE_STMT (use_p); - break; + VEC_quick_push (tree, phis, exit_phi); } } /* We expect to have found an exit_phi because of loop-closed-ssa form. */ - gcc_assert (exit_phi); - /* Replace the uses: */ - orig_name = PHI_RESULT (exit_phi); - FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) - FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) - SET_USE (use_p, new_temp); + gcc_assert (!VEC_empty (tree, phis)); + + for (i = 0; VEC_iterate (tree, phis, i, exit_phi); i++) + { + if (nested_in_vect_loop) + { + stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi); + + /* FORNOW. Currently not supporting the case that an inner-loop reduction + is not used in the outer-loop (but only outside the outer-loop). */ + gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo) + && !STMT_VINFO_LIVE_P (stmt_vinfo)); + + epilog_stmt = adjustment_def ? epilog_stmt : new_phi; + STMT_VINFO_VEC_STMT (stmt_vinfo) = epilog_stmt; + set_stmt_info (get_stmt_ann (epilog_stmt), + new_stmt_vec_info (epilog_stmt, loop_vinfo)); + continue; + } + + /* Replace the uses: */ + orig_name = PHI_RESULT (exit_phi); + FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) + FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) + SET_USE (use_p, new_temp); + } + VEC_free (tree, heap, phis); } @@ -1482,15 +2631,34 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) tree new_stmt = NULL_TREE; int j; + if (nested_in_vect_loop_p (loop, stmt)) + { + loop = loop->inner; + /* FORNOW. This restriction should be relaxed. */ + if (ncopies > 1) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "multiple types in nested loop."); + return false; + } + } + gcc_assert (ncopies >= 1); + /* FORNOW: SLP not supported. */ + if (STMT_SLP_TYPE (stmt_info)) + return false; + /* 1. Is vectorizable reduction? */ /* Not supportable if the reduction variable is used in the loop. */ - if (STMT_VINFO_RELEVANT_P (stmt_info)) + if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer) return false; - if (!STMT_VINFO_LIVE_P (stmt_info)) + /* Reductions that are not used even in an enclosing outer-loop, + are expected to be "live" (used out of the loop). */ + if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_loop + && !STMT_VINFO_LIVE_P (stmt_info)) return false; /* Make sure it was already recognized as a reduction computation. */ @@ -1547,9 +2715,9 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) gcc_assert (dt == vect_reduction_def); gcc_assert (TREE_CODE (def_stmt) == PHI_NODE); if (orig_stmt) - gcc_assert (orig_stmt == vect_is_simple_reduction (loop, def_stmt)); + gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo, def_stmt)); else - gcc_assert (stmt == vect_is_simple_reduction (loop, def_stmt)); + gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, def_stmt)); if (STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt))) return false; @@ -1565,7 +2733,7 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) return false; } vec_mode = TYPE_MODE (vectype); - if (optab->handlers[(int) vec_mode].insn_code == CODE_FOR_nothing) + if (optab_handler (optab, vec_mode)->insn_code == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "op not supported by target."); @@ -1645,7 +2813,7 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) fprintf (vect_dump, "no optab for reduction."); epilog_reduc_code = NUM_TREE_CODES; } - if (reduc_optab->handlers[(int) vec_mode].insn_code == CODE_FOR_nothing) + if (optab_handler (reduc_optab, vec_mode)->insn_code == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "reduc op not supported by target."); @@ -1655,6 +2823,7 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type; + vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies); return true; } @@ -1767,13 +2936,20 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) tree scalar_dest; tree operation; tree op, type; + tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info; tree vectype_out, vectype_in; + int nunits_in; + int nunits_out; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree fndecl, rhs, new_temp, def, def_stmt, rhs_type, lhs_type; - enum vect_def_type dt[2]; + enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; + tree new_stmt; int ncopies, j, nargs; call_expr_arg_iterator iter; + tree vargs; + enum { NARROW, NONE, WIDEN } modifier; if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; @@ -1781,6 +2957,10 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def) return false; + /* FORNOW: SLP not supported. */ + if (STMT_SLP_TYPE (stmt_info)) + return false; + /* FORNOW: not yet supported. */ if (STMT_VINFO_LIVE_P (stmt_info)) { @@ -1805,12 +2985,10 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) nargs = 0; FOR_EACH_CALL_EXPR_ARG (op, iter, operation) { - ++nargs; - /* Bail out if the function has more than two arguments, we do not have interesting builtin functions to vectorize with more than two arguments. */ - if (nargs > 2) + if (nargs >= 2) return false; /* We can only handle calls with arguments of the same type. */ @@ -1823,12 +3001,14 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) } rhs_type = TREE_TYPE (op); - if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt[nargs-1])) + if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt[nargs])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); return false; } + + ++nargs; } /* No arguments is also not good. */ @@ -1836,15 +3016,24 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) return false; vectype_in = get_vectype_for_scalar_type (rhs_type); + if (!vectype_in) + return false; + nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); lhs_type = TREE_TYPE (GIMPLE_STMT_OPERAND (stmt, 0)); vectype_out = get_vectype_for_scalar_type (lhs_type); + if (!vectype_out) + return false; + nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); - /* Only handle the case of vectors with the same number of elements. - FIXME: We need a way to handle for example the SSE2 cvtpd2dq - instruction which converts V2DFmode to V4SImode but only - using the lower half of the V4SImode result. */ - if (TYPE_VECTOR_SUBPARTS (vectype_in) != TYPE_VECTOR_SUBPARTS (vectype_out)) + /* FORNOW */ + if (nunits_in == nunits_out / 2) + modifier = NARROW; + else if (nunits_out == nunits_in) + modifier = NONE; + else if (nunits_out == nunits_in / 2) + modifier = WIDEN; + else return false; /* For now, we only vectorize functions if a target specific builtin @@ -1862,9 +3051,29 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) gcc_assert (ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS)); + if (modifier == NARROW) + ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out; + else + ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; + + /* Sanity check: make sure that at least one copy of the vectorized stmt + needs to be generated. */ + gcc_assert (ncopies >= 1); + + /* FORNOW. This restriction should be relaxed. */ + if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "multiple types in nested loop."); + return false; + } + if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "=== vectorizable_call ==="); + vect_model_simple_cost (stmt_info, ncopies, dt, NULL); return true; } @@ -1873,94 +3082,222 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "transform operation."); - ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo) - / TYPE_VECTOR_SUBPARTS (vectype_out)); - gcc_assert (ncopies >= 1); + /* FORNOW. This restriction should be relaxed. */ + if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "multiple types in nested loop."); + return false; + } /* Handle def. */ scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); vec_dest = vect_create_destination_var (scalar_dest, vectype_out); prev_stmt_info = NULL; - for (j = 0; j < ncopies; ++j) - { - tree new_stmt, vargs; - tree vec_oprnd[2]; - int n; - - /* Build argument list for the vectorized call. */ - /* FIXME: Rewrite this so that it doesn't construct a temporary - list. */ - vargs = NULL_TREE; - n = -1; - FOR_EACH_CALL_EXPR_ARG (op, iter, operation) + switch (modifier) + { + case NONE: + for (j = 0; j < ncopies; ++j) { - ++n; + /* Build argument list for the vectorized call. */ + /* FIXME: Rewrite this so that it doesn't + construct a temporary list. */ + vargs = NULL_TREE; + nargs = 0; + FOR_EACH_CALL_EXPR_ARG (op, iter, operation) + { + if (j == 0) + vec_oprnd0 + = vect_get_vec_def_for_operand (op, stmt, NULL); + else + vec_oprnd0 + = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0); + + vargs = tree_cons (NULL_TREE, vec_oprnd0, vargs); + + ++nargs; + } + vargs = nreverse (vargs); + + rhs = build_function_call_expr (fndecl, vargs); + new_stmt = build_gimple_modify_stmt (vec_dest, rhs); + new_temp = make_ssa_name (vec_dest, new_stmt); + GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; + + vect_finish_stmt_generation (stmt, new_stmt, bsi); if (j == 0) - vec_oprnd[n] = vect_get_vec_def_for_operand (op, stmt, NULL); + STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; else - vec_oprnd[n] = vect_get_vec_def_for_stmt_copy (dt[n], vec_oprnd[n]); + STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; - vargs = tree_cons (NULL_TREE, vec_oprnd[n], vargs); + prev_stmt_info = vinfo_for_stmt (new_stmt); } - vargs = nreverse (vargs); - rhs = build_function_call_expr (fndecl, vargs); - new_stmt = build_gimple_modify_stmt (vec_dest, rhs); - new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; + break; - vect_finish_stmt_generation (stmt, new_stmt, bsi); + case NARROW: + for (j = 0; j < ncopies; ++j) + { + /* Build argument list for the vectorized call. */ + /* FIXME: Rewrite this so that it doesn't + construct a temporary list. */ + vargs = NULL_TREE; + nargs = 0; + FOR_EACH_CALL_EXPR_ARG (op, iter, operation) + { + if (j == 0) + { + vec_oprnd0 + = vect_get_vec_def_for_operand (op, stmt, NULL); + vec_oprnd1 + = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0); + } + else + { + vec_oprnd0 + = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd1); + vec_oprnd1 + = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0); + } - if (j == 0) - STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; - else - STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; - prev_stmt_info = vinfo_for_stmt (new_stmt); + vargs = tree_cons (NULL_TREE, vec_oprnd0, vargs); + vargs = tree_cons (NULL_TREE, vec_oprnd1, vargs); + + ++nargs; + } + vargs = nreverse (vargs); + + rhs = build_function_call_expr (fndecl, vargs); + new_stmt = build_gimple_modify_stmt (vec_dest, rhs); + new_temp = make_ssa_name (vec_dest, new_stmt); + GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; + + vect_finish_stmt_generation (stmt, new_stmt, bsi); + + if (j == 0) + STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; + else + STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; + + prev_stmt_info = vinfo_for_stmt (new_stmt); + } + + *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); + + break; + + case WIDEN: + /* No current target implements this case. */ + return false; } - /* The call in STMT might prevent it from being removed in dce. We however - cannot remove it here, due to the way the ssa name it defines is mapped - to the new definition. So just replace rhs of the statement with something - harmless. */ + /* The call in STMT might prevent it from being removed in dce. + We however cannot remove it here, due to the way the ssa name + it defines is mapped to the new definition. So just replace + rhs of the statement with something harmless. */ type = TREE_TYPE (scalar_dest); GIMPLE_STMT_OPERAND (stmt, 1) = fold_convert (type, integer_zero_node); + update_stmt (stmt); return true; } -/* Function vectorizable_conversion. +/* Function vect_gen_widened_results_half + + Create a vector stmt whose code, type, number of arguments, and result + variable are CODE, VECTYPE, OP_TYPE, and VEC_DEST, and its arguments are + VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI. + In the case that CODE is a CALL_EXPR, this means that a call to DECL + needs to be created (DECL is a function-decl of a target-builtin). + STMT is the original scalar stmt that we are vectorizing. */ + +static tree +vect_gen_widened_results_half (enum tree_code code, tree vectype, tree decl, + tree vec_oprnd0, tree vec_oprnd1, int op_type, + tree vec_dest, block_stmt_iterator *bsi, + tree stmt) +{ + tree expr; + tree new_stmt; + tree new_temp; + tree sym; + ssa_op_iter iter; + + /* Generate half of the widened result: */ + if (code == CALL_EXPR) + { + /* Target specific support */ + if (op_type == binary_op) + expr = build_call_expr (decl, 2, vec_oprnd0, vec_oprnd1); + else + expr = build_call_expr (decl, 1, vec_oprnd0); + } + else + { + /* Generic support */ + gcc_assert (op_type == TREE_CODE_LENGTH (code)); + if (op_type == binary_op) + expr = build2 (code, vectype, vec_oprnd0, vec_oprnd1); + else + expr = build1 (code, vectype, vec_oprnd0); + } + new_stmt = build_gimple_modify_stmt (vec_dest, expr); + new_temp = make_ssa_name (vec_dest, new_stmt); + GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; + vect_finish_stmt_generation (stmt, new_stmt, bsi); + + if (code == CALL_EXPR) + { + FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter, SSA_OP_ALL_VIRTUALS) + { + if (TREE_CODE (sym) == SSA_NAME) + sym = SSA_NAME_VAR (sym); + mark_sym_for_renaming (sym); + } + } + + return new_stmt; +} + -Check if STMT performs a conversion operation, that can be vectorized. -If VEC_STMT is also passed, vectorize the STMT: create a vectorized -stmt to replace it, put it in VEC_STMT, and insert it at BSI. -Return FALSE if not a vectorizable STMT, TRUE otherwise. */ +/* Check if STMT performs a conversion operation, that can be vectorized. + If VEC_STMT is also passed, vectorize the STMT: create a vectorized + stmt to replace it, put it in VEC_STMT, and insert it at BSI. + Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, - tree * vec_stmt) +vectorizable_conversion (tree stmt, block_stmt_iterator *bsi, + tree *vec_stmt, slp_tree slp_node) { tree vec_dest; tree scalar_dest; tree operation; tree op0; - tree vec_oprnd0 = NULL_TREE; + tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); - enum tree_code code; + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK; + tree decl1 = NULL_TREE, decl2 = NULL_TREE; tree new_temp; tree def, def_stmt; - enum vect_def_type dt0; - tree new_stmt; + enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; + tree new_stmt = NULL_TREE; + stmt_vec_info prev_stmt_info; int nunits_in; int nunits_out; - int ncopies, j; tree vectype_out, vectype_in; + int ncopies, j; + tree expr; tree rhs_type, lhs_type; tree builtin_decl; - stmt_vec_info prev_stmt_info; + enum { NARROW, NONE, WIDEN } modifier; + int i; + VEC(tree,heap) *vec_oprnds0 = NULL; + tree vop0; /* Is STMT a vectorizable conversion? */ @@ -1989,33 +3326,63 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR) return false; - /* Check types of lhs and rhs */ + /* Check types of lhs and rhs. */ op0 = TREE_OPERAND (operation, 0); rhs_type = TREE_TYPE (op0); vectype_in = get_vectype_for_scalar_type (rhs_type); + if (!vectype_in) + return false; nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); lhs_type = TREE_TYPE (scalar_dest); vectype_out = get_vectype_for_scalar_type (lhs_type); - gcc_assert (STMT_VINFO_VECTYPE (stmt_info) == vectype_out); + if (!vectype_out) + return false; nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); - /* FORNOW: need to extend to support short<->float conversions as well. */ - if (nunits_out != nunits_in) + /* FORNOW */ + if (nunits_in == nunits_out / 2) + modifier = NARROW; + else if (nunits_out == nunits_in) + modifier = NONE; + else if (nunits_out == nunits_in / 2) + modifier = WIDEN; + else return false; - /* Bail out if the types are both integral or non-integral */ + if (modifier == NONE) + gcc_assert (STMT_VINFO_VECTYPE (stmt_info) == vectype_out); + + /* Bail out if the types are both integral or non-integral. */ if ((INTEGRAL_TYPE_P (rhs_type) && INTEGRAL_TYPE_P (lhs_type)) || (!INTEGRAL_TYPE_P (rhs_type) && !INTEGRAL_TYPE_P (lhs_type))) return false; + if (modifier == NARROW) + ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out; + else + ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; + + /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies + this, so we can safely override NCOPIES with 1 here. */ + if (slp_node) + ncopies = 1; + /* Sanity check: make sure that at least one copy of the vectorized stmt needs to be generated. */ - ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; gcc_assert (ncopies >= 1); - if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt0)) + /* FORNOW. This restriction should be relaxed. */ + if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "multiple types in nested loop."); + return false; + } + + /* Check the operands of the operation. */ + if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -2023,60 +3390,161 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, } /* Supportable by target? */ - if (!targetm.vectorize.builtin_conversion (code, vectype_in)) + if ((modifier == NONE + && !targetm.vectorize.builtin_conversion (code, vectype_in)) + || (modifier == WIDEN + && !supportable_widening_operation (code, stmt, vectype_in, + &decl1, &decl2, + &code1, &code2)) + || (modifier == NARROW + && !supportable_narrowing_operation (code, stmt, vectype_in, + &code1))) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "op not supported by target."); return false; } + if (modifier != NONE) + { + STMT_VINFO_VECTYPE (stmt_info) = vectype_in; + /* FORNOW: SLP not supported. */ + if (STMT_SLP_TYPE (stmt_info)) + return false; + } + if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type; return true; } - /** Transform. **/ - + /** Transform. **/ if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "transform conversion."); /* Handle def. */ vec_dest = vect_create_destination_var (scalar_dest, vectype_out); + if (modifier == NONE && !slp_node) + vec_oprnds0 = VEC_alloc (tree, heap, 1); + prev_stmt_info = NULL; - for (j = 0; j < ncopies; j++) + switch (modifier) { - tree sym; - ssa_op_iter iter; + case NONE: + for (j = 0; j < ncopies; j++) + { + tree sym; + ssa_op_iter iter; + + if (j == 0) + vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node); + else + vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL); + + builtin_decl = + targetm.vectorize.builtin_conversion (code, vectype_in); + for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++) + { + new_stmt = build_call_expr (builtin_decl, 1, vop0); + + /* Arguments are ready. create the new vector stmt. */ + new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt); + new_temp = make_ssa_name (vec_dest, new_stmt); + GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; + vect_finish_stmt_generation (stmt, new_stmt, bsi); + FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter, + SSA_OP_ALL_VIRTUALS) + { + if (TREE_CODE (sym) == SSA_NAME) + sym = SSA_NAME_VAR (sym); + mark_sym_for_renaming (sym); + } + if (slp_node) + VEC_quick_push (tree, SLP_TREE_VEC_STMTS (slp_node), new_stmt); + } + + if (j == 0) + STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; + else + STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; + prev_stmt_info = vinfo_for_stmt (new_stmt); + } + break; + + case WIDEN: + /* In case the vectorization factor (VF) is bigger than the number + of elements that we can fit in a vectype (nunits), we have to + generate more than one vector stmt - i.e - we need to "unroll" + the vector stmt by a factor VF/nunits. */ + for (j = 0; j < ncopies; j++) + { + if (j == 0) + vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL); + else + vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0); + + STMT_VINFO_VECTYPE (stmt_info) = vectype_in; + + /* Generate first half of the widened result: */ + new_stmt + = vect_gen_widened_results_half (code1, vectype_out, decl1, + vec_oprnd0, vec_oprnd1, + unary_op, vec_dest, bsi, stmt); + if (j == 0) + STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; + else + STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; + prev_stmt_info = vinfo_for_stmt (new_stmt); + + /* Generate second half of the widened result: */ + new_stmt + = vect_gen_widened_results_half (code2, vectype_out, decl2, + vec_oprnd0, vec_oprnd1, + unary_op, vec_dest, bsi, stmt); + STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; + prev_stmt_info = vinfo_for_stmt (new_stmt); + } + break; + + case NARROW: + /* In case the vectorization factor (VF) is bigger than the number + of elements that we can fit in a vectype (nunits), we have to + generate more than one vector stmt - i.e - we need to "unroll" + the vector stmt by a factor VF/nunits. */ + for (j = 0; j < ncopies; j++) + { + /* Handle uses. */ + if (j == 0) + { + vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL); + vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0); + } + else + { + vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1); + vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0); + } - if (j == 0) - vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL); - else - vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0); + /* Arguments are ready. Create the new vector stmt. */ + expr = build2 (code1, vectype_out, vec_oprnd0, vec_oprnd1); + new_stmt = build_gimple_modify_stmt (vec_dest, expr); + new_temp = make_ssa_name (vec_dest, new_stmt); + GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; + vect_finish_stmt_generation (stmt, new_stmt, bsi); - builtin_decl = - targetm.vectorize.builtin_conversion (code, vectype_in); - new_stmt = build_call_expr (builtin_decl, 1, vec_oprnd0); + if (j == 0) + STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; + else + STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; - /* Arguments are ready. create the new vector stmt. */ - new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt); - new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, new_stmt, bsi); - FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter, SSA_OP_ALL_VIRTUALS) - { - if (TREE_CODE (sym) == SSA_NAME) - sym = SSA_NAME_VAR (sym); - mark_sym_for_renaming (sym); - } + prev_stmt_info = vinfo_for_stmt (new_stmt); + } - if (j == 0) - STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; - else - STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; - prev_stmt_info = vinfo_for_stmt (new_stmt); + *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); } + return true; } @@ -2089,20 +3557,23 @@ vectorizable_conversion (tree stmt, block_stmt_iterator * bsi, Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) +vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt, + slp_tree slp_node) { tree vec_dest; tree scalar_dest; tree op; - tree vec_oprnd; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); tree new_temp; tree def, def_stmt; - enum vect_def_type dt; + enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; int nunits = TYPE_VECTOR_SUBPARTS (vectype); int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; + int i; + VEC(tree,heap) *vec_oprnds = NULL; + tree vop; gcc_assert (ncopies >= 1); if (ncopies > 1) @@ -2131,7 +3602,7 @@ vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) return false; op = GIMPLE_STMT_OPERAND (stmt, 1); - if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt)) + if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt[0])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -2141,6 +3612,9 @@ vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type; + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "=== vectorizable_assignment ==="); + vect_model_simple_cost (stmt_info, ncopies, dt, NULL); return true; } @@ -2152,15 +3626,22 @@ vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) vec_dest = vect_create_destination_var (scalar_dest, vectype); /* Handle use. */ - op = GIMPLE_STMT_OPERAND (stmt, 1); - vec_oprnd = vect_get_vec_def_for_operand (op, stmt, NULL); + vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node); /* Arguments are ready. create the new vector stmt. */ - *vec_stmt = build_gimple_modify_stmt (vec_dest, vec_oprnd); - new_temp = make_ssa_name (vec_dest, *vec_stmt); - GIMPLE_STMT_OPERAND (*vec_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, *vec_stmt, bsi); + for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++) + { + *vec_stmt = build_gimple_modify_stmt (vec_dest, vop); + new_temp = make_ssa_name (vec_dest, *vec_stmt); + GIMPLE_STMT_OPERAND (*vec_stmt, 0) = new_temp; + vect_finish_stmt_generation (stmt, *vec_stmt, bsi); + STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt; + + if (slp_node) + VEC_quick_push (tree, SLP_TREE_VEC_STMTS (slp_node), *vec_stmt); + } + VEC_free (tree, heap, vec_oprnds); return true; } @@ -2215,6 +3696,10 @@ vectorizable_induction (tree phi, block_stmt_iterator *bsi ATTRIBUTE_UNUSED, if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; + /* FORNOW: SLP not supported. */ + if (STMT_SLP_TYPE (stmt_info)) + return false; + gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def); if (STMT_VINFO_LIVE_P (stmt_info)) @@ -2231,6 +3716,9 @@ vectorizable_induction (tree phi, block_stmt_iterator *bsi ATTRIBUTE_UNUSED, if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type; + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "=== vectorizable_induction ==="); + vect_model_induction_cost (stmt_info, ncopies); return true; } @@ -2253,16 +3741,18 @@ vectorizable_induction (tree phi, block_stmt_iterator *bsi ATTRIBUTE_UNUSED, Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) +vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt, + slp_tree slp_node) { tree vec_dest; tree scalar_dest; tree operation; tree op0, op1 = NULL; - tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; + tree vec_oprnd1 = NULL_TREE; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); enum tree_code code; enum machine_mode vec_mode; tree new_temp; @@ -2271,16 +3761,29 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) int icode; enum machine_mode optab_op2_mode; tree def, def_stmt; - enum vect_def_type dt0, dt1; - tree new_stmt; + enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; + tree new_stmt = NULL_TREE; stmt_vec_info prev_stmt_info; int nunits_in = TYPE_VECTOR_SUBPARTS (vectype); int nunits_out; tree vectype_out; int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; - int j; - + int j, i; + VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL; + tree vop0, vop1; + + /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies + this, so we can safely override NCOPIES with 1 here. */ + if (slp_node) + ncopies = 1; gcc_assert (ncopies >= 1); + /* FORNOW. This restriction should be relaxed. */ + if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "multiple types in nested loop."); + return false; + } if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; @@ -2305,12 +3808,20 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest)); + if (!vectype_out) + return false; nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); if (nunits_out != nunits_in) return false; operation = GIMPLE_STMT_OPERAND (stmt, 1); code = TREE_CODE (operation); + + /* For pointer addition, we should use the normal plus for + the vector addition. */ + if (code == POINTER_PLUS_EXPR) + code = PLUS_EXPR; + optab = optab_for_tree_code (code, vectype); /* Support only unary or binary operations. */ @@ -2323,7 +3834,7 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) } op0 = TREE_OPERAND (operation, 0); - if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt0)) + if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -2333,7 +3844,7 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (op_type == binary_op) { op1 = TREE_OPERAND (operation, 1); - if (!vect_is_simple_use (op1, loop_vinfo, &def_stmt, &def, &dt1)) + if (!vect_is_simple_use (op1, loop_vinfo, &def_stmt, &def, &dt[1])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -2349,7 +3860,7 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) return false; } vec_mode = TYPE_MODE (vectype); - icode = (int) optab->handlers[(int) vec_mode].insn_code; + icode = (int) optab_handler (optab, vec_mode)->insn_code; if (icode == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) @@ -2382,8 +3893,8 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) by a scalar shift operand. */ optab_op2_mode = insn_data[icode].operand[2].mode; if (! (VECTOR_MODE_P (optab_op2_mode) - || dt1 == vect_constant_def - || dt1 == vect_invariant_def)) + || dt[1] == vect_constant_def + || dt[1] == vect_invariant_def)) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "operand mode requires invariant argument."); @@ -2394,6 +3905,9 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = op_vec_info_type; + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "=== vectorizable_operation ==="); + vect_model_simple_cost (stmt_info, ncopies, dt, NULL); return true; } @@ -2405,6 +3919,11 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) /* Handle def. */ vec_dest = vect_create_destination_var (scalar_dest, vectype); + if (!slp_node) + vec_oprnds0 = VEC_alloc (tree, heap, 1); + if (op_type == binary_op) + vec_oprnds1 = VEC_alloc (tree, heap, 1); + /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the @@ -2464,45 +3983,55 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) /* Handle uses. */ if (j == 0) { - vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL); - if (op_type == binary_op) + if (op_type == binary_op + && (code == LSHIFT_EXPR || code == RSHIFT_EXPR)) { - if (code == LSHIFT_EXPR || code == RSHIFT_EXPR) - { - /* Vector shl and shr insn patterns can be defined with - scalar operand 2 (shift operand). In this case, use - constant or loop invariant op1 directly, without - extending it to vector mode first. */ - optab_op2_mode = insn_data[icode].operand[2].mode; - if (!VECTOR_MODE_P (optab_op2_mode)) - { - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "operand 1 using scalar mode."); - vec_oprnd1 = op1; - } - } - if (!vec_oprnd1) - vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL); + /* Vector shl and shr insn patterns can be defined with scalar + operand 2 (shift operand). In this case, use constant or loop + invariant op1 directly, without extending it to vector mode + first. */ + optab_op2_mode = insn_data[icode].operand[2].mode; + if (!VECTOR_MODE_P (optab_op2_mode)) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "operand 1 using scalar mode."); + vec_oprnd1 = op1; + VEC_quick_push (tree, vec_oprnds1, vec_oprnd1); + } } + + /* vec_oprnd is available if operand 1 should be of a scalar-type + (a special case for certain kind of vector shifts); otherwise, + operand 1 should be of a vector type (the usual case). */ + if (op_type == binary_op && !vec_oprnd1) + vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1, + slp_node); + else + vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, + slp_node); } else - { - vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0); - if (op_type == binary_op) - vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt1, vec_oprnd1); - } + vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1); - /* Arguments are ready. create the new vector stmt. */ + /* Arguments are ready. Create the new vector stmt. */ + for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++) + { + if (op_type == binary_op) + { + vop1 = VEC_index (tree, vec_oprnds1, i); + new_stmt = build_gimple_modify_stmt (vec_dest, + build2 (code, vectype, vop0, vop1)); + } + else + new_stmt = build_gimple_modify_stmt (vec_dest, + build1 (code, vectype, vop0)); - if (op_type == binary_op) - new_stmt = build_gimple_modify_stmt (vec_dest, - build2 (code, vectype, vec_oprnd0, vec_oprnd1)); - else - new_stmt = build_gimple_modify_stmt (vec_dest, - build1 (code, vectype, vec_oprnd0)); - new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, new_stmt, bsi); + new_temp = make_ssa_name (vec_dest, new_stmt); + GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; + vect_finish_stmt_generation (stmt, new_stmt, bsi); + if (slp_node) + VEC_quick_push (tree, SLP_TREE_VEC_STMTS (slp_node), new_stmt); + } if (j == 0) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; @@ -2511,6 +4040,10 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) prev_stmt_info = vinfo_for_stmt (new_stmt); } + VEC_free (tree, heap, vec_oprnds0); + if (vec_oprnds1) + VEC_free (tree, heap, vec_oprnds1); + return true; } @@ -2525,7 +4058,7 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) bool vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, - tree *vec_stmt) + tree *vec_stmt) { tree vec_dest; tree scalar_dest; @@ -2534,10 +4067,11 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, tree vec_oprnd0=NULL, vec_oprnd1=NULL; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); - enum tree_code code; + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + enum tree_code code, code1 = ERROR_MARK; tree new_temp; tree def, def_stmt; - enum vect_def_type dt0; + enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; tree new_stmt; stmt_vec_info prev_stmt_info; int nunits_in; @@ -2547,9 +4081,6 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, int j; tree expr; tree vectype_in; - tree scalar_type; - optab optab; - enum machine_mode vec_mode; if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; @@ -2579,17 +4110,27 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, op0 = TREE_OPERAND (operation, 0); vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0)); + if (!vectype_in) + return false; nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); - scalar_type = TREE_TYPE (scalar_dest); - vectype_out = get_vectype_for_scalar_type (scalar_type); + vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest)); + if (!vectype_out) + return false; nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); if (nunits_in != nunits_out / 2) /* FORNOW */ return false; ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out; gcc_assert (ncopies >= 1); + /* FORNOW. This restriction should be relaxed. */ + if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "multiple types in nested loop."); + return false; + } if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest)) && INTEGRAL_TYPE_P (TREE_TYPE (op0))) @@ -2599,7 +4140,7 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, return false; /* Check the operands of the operation. */ - if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt0)) + if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -2607,13 +4148,7 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, } /* Supportable by target? */ - code = VEC_PACK_TRUNC_EXPR; - optab = optab_for_tree_code (code, vectype_in); - if (!optab) - return false; - - vec_mode = TYPE_MODE (vectype_in); - if (optab->handlers[(int) vec_mode].insn_code == CODE_FOR_nothing) + if (!supportable_narrowing_operation (code, stmt, vectype_in, &code1)) return false; STMT_VINFO_VECTYPE (stmt_info) = vectype_in; @@ -2621,6 +4156,9 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type; + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "=== vectorizable_demotion ==="); + vect_model_simple_cost (stmt_info, ncopies, dt, NULL); return true; } @@ -2643,16 +4181,16 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, if (j == 0) { vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL); - vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0); + vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0); } else { - vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd1); - vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0); + vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1); + vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0); } /* Arguments are ready. Create the new vector stmt. */ - expr = build2 (code, vectype_out, vec_oprnd0, vec_oprnd1); + expr = build2 (code1, vectype_out, vec_oprnd0, vec_oprnd1); new_stmt = build_gimple_modify_stmt (vec_dest, expr); new_temp = make_ssa_name (vec_dest, new_stmt); GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; @@ -2671,64 +4209,6 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi, } -/* Function vect_gen_widened_results_half - - Create a vector stmt whose code, type, number of arguments, and result - variable are CODE, VECTYPE, OP_TYPE, and VEC_DEST, and its arguments are - VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI. - In the case that CODE is a CALL_EXPR, this means that a call to DECL - needs to be created (DECL is a function-decl of a target-builtin). - STMT is the original scalar stmt that we are vectorizing. */ - -static tree -vect_gen_widened_results_half (enum tree_code code, tree vectype, tree decl, - tree vec_oprnd0, tree vec_oprnd1, int op_type, - tree vec_dest, block_stmt_iterator *bsi, - tree stmt) -{ - tree expr; - tree new_stmt; - tree new_temp; - tree sym; - ssa_op_iter iter; - - /* Generate half of the widened result: */ - if (code == CALL_EXPR) - { - /* Target specific support */ - if (op_type == binary_op) - expr = build_call_expr (decl, 2, vec_oprnd0, vec_oprnd1); - else - expr = build_call_expr (decl, 1, vec_oprnd0); - } - else - { - /* Generic support */ - gcc_assert (op_type == TREE_CODE_LENGTH (code)); - if (op_type == binary_op) - expr = build2 (code, vectype, vec_oprnd0, vec_oprnd1); - else - expr = build1 (code, vectype, vec_oprnd0); - } - new_stmt = build_gimple_modify_stmt (vec_dest, expr); - new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - vect_finish_stmt_generation (stmt, new_stmt, bsi); - - if (code == CALL_EXPR) - { - FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter, SSA_OP_ALL_VIRTUALS) - { - if (TREE_CODE (sym) == SSA_NAME) - sym = SSA_NAME_VAR (sym); - mark_sym_for_renaming (sym); - } - } - - return new_stmt; -} - - /* Function vectorizable_type_promotion Check if STMT performs a binary or unary operation that involves @@ -2748,11 +4228,12 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, tree vec_oprnd0=NULL, vec_oprnd1=NULL; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); - enum tree_code code, code1 = CODE_FOR_nothing, code2 = CODE_FOR_nothing; + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK; tree decl1 = NULL_TREE, decl2 = NULL_TREE; int op_type; tree def, def_stmt; - enum vect_def_type dt0, dt1; + enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; tree new_stmt; stmt_vec_info prev_stmt_info; int nunits_in; @@ -2785,21 +4266,34 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, operation = GIMPLE_STMT_OPERAND (stmt, 1); code = TREE_CODE (operation); - if (code != NOP_EXPR && code != WIDEN_MULT_EXPR) + if (code != NOP_EXPR && code != CONVERT_EXPR + && code != WIDEN_MULT_EXPR) return false; op0 = TREE_OPERAND (operation, 0); vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0)); + if (!vectype_in) + return false; nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); - ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; - gcc_assert (ncopies >= 1); scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest)); + if (!vectype_out) + return false; nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); if (nunits_out != nunits_in / 2) /* FORNOW */ return false; + ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in; + gcc_assert (ncopies >= 1); + /* FORNOW. This restriction should be relaxed. */ + if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "multiple types in nested loop."); + return false; + } + if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest)) && INTEGRAL_TYPE_P (TREE_TYPE (op0))) || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest)) @@ -2808,7 +4302,7 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, return false; /* Check the operands of the operation. */ - if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt0)) + if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -2819,7 +4313,7 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, if (op_type == binary_op) { op1 = TREE_OPERAND (operation, 1); - if (!vect_is_simple_use (op1, loop_vinfo, &def_stmt, &def, &dt1)) + if (!vect_is_simple_use (op1, loop_vinfo, &def_stmt, &def, &dt[1])) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "use not simple."); @@ -2837,6 +4331,9 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type; + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "=== vectorizable_promotion ==="); + vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL); return true; } @@ -2866,9 +4363,9 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi, } else { - vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0); + vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0); if (op_type == binary_op) - vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt1, vec_oprnd1); + vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1); } /* Arguments are ready. Create the new vector stmt. We are creating @@ -2922,15 +4419,16 @@ vect_strided_store_supported (tree vectype) return false; } - if (interleave_high_optab->handlers[(int) mode].insn_code + if (optab_handler (interleave_high_optab, mode)->insn_code == CODE_FOR_nothing - || interleave_low_optab->handlers[(int) mode].insn_code + || optab_handler (interleave_low_optab, mode)->insn_code == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "interleave op not supported by target."); return false; } + return true; } @@ -3078,7 +4576,8 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain, Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) +vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt, + slp_tree slp_node) { tree scalar_dest; tree data_ref; @@ -3088,11 +4587,10 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL; tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); enum machine_mode vec_mode; tree dummy; - enum dr_alignment_support alignment_support_cheme; - ssa_op_iter iter; - def_operand_p def_p; + enum dr_alignment_support alignment_support_scheme; tree def, def_stmt; enum vect_def_type dt; stmt_vec_info prev_stmt_info = NULL; @@ -3104,8 +4602,27 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) bool strided_store = false; unsigned int group_size, i; VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL; + bool inv_p; + VEC(tree,heap) *vec_oprnds = NULL; + bool slp = (slp_node != NULL); + stmt_vec_info first_stmt_vinfo; + unsigned int vec_num; + + /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies + this, so we can safely override NCOPIES with 1 here. */ + if (slp) + ncopies = 1; + gcc_assert (ncopies >= 1); + /* FORNOW. This restriction should be relaxed. */ + if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "multiple types in nested loop."); + return false; + } + if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; @@ -3127,7 +4644,7 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); if (TREE_CODE (scalar_dest) != ARRAY_REF && TREE_CODE (scalar_dest) != INDIRECT_REF - && !DR_GROUP_FIRST_DR (stmt_info)) + && !STMT_VINFO_STRIDED_ACCESS (stmt_info)) return false; op = GIMPLE_STMT_OPERAND (stmt, 1); @@ -3141,30 +4658,30 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) vec_mode = TYPE_MODE (vectype); /* FORNOW. In some cases can vectorize even if data-type not supported (e.g. - array initialization with 0). */ - if (mov_optab->handlers[(int)vec_mode].insn_code == CODE_FOR_nothing) + if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing) return false; if (!STMT_VINFO_DATA_REF (stmt_info)) return false; - if (DR_GROUP_FIRST_DR (stmt_info)) + if (STMT_VINFO_STRIDED_ACCESS (stmt_info)) { strided_store = true; - if (!vect_strided_store_supported (vectype)) + if (!vect_strided_store_supported (vectype) + && !PURE_SLP_STMT (stmt_info) && !slp) return false; } if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = store_vec_info_type; + if (!PURE_SLP_STMT (stmt_info)) + vect_model_store_cost (stmt_info, ncopies, dt, NULL); return true; } /** Transform. **/ - if (vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "transform store. ncopies = %d",ncopies); - if (strided_store) { first_stmt = DR_GROUP_FIRST_DR (stmt_info); @@ -3173,28 +4690,45 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++; + /* FORNOW */ + gcc_assert (!nested_in_vect_loop_p (loop, stmt)); + /* We vectorize all the stmts of the interleaving group when we reach the last stmt in the group. */ if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt)) - < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))) + < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt)) + && !slp) { *vec_stmt = NULL_TREE; return true; } + + if (slp) + strided_store = false; + + /* VEC_NUM is the number of vect stmts to be created for this group. */ + if (slp && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) < group_size) + vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + else + vec_num = group_size; } else { first_stmt = stmt; first_dr = dr; - group_size = 1; + group_size = vec_num = 1; + first_stmt_vinfo = stmt_info; } + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "transform store. ncopies = %d",ncopies); + dr_chain = VEC_alloc (tree, heap, group_size); oprnds = VEC_alloc (tree, heap, group_size); - alignment_support_cheme = vect_supportable_dr_alignment (first_dr); - gcc_assert (alignment_support_cheme); - gcc_assert (alignment_support_cheme == dr_aligned); /* FORNOW */ + alignment_support_scheme = vect_supportable_dr_alignment (first_dr); + gcc_assert (alignment_support_scheme); + gcc_assert (alignment_support_scheme == dr_aligned); /* FORNOW */ /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate @@ -3243,33 +4777,50 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (j == 0) { - /* For interleaved stores we collect vectorized defs for all the - stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then used - as an input to vect_permute_store_chain(), and OPRNDS as an input - to vect_get_vec_def_for_stmt_copy() for the next copy. - If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and - OPRNDS are of size 1. */ - next_stmt = first_stmt; - for (i = 0; i < group_size; i++) - { - /* Since gaps are not supported for interleaved stores, GROUP_SIZE - is the exact number of stmts in the chain. Therefore, NEXT_STMT - can't be NULL_TREE. In case that there is no interleaving, - GROUP_SIZE is 1, and only one iteration of the loop will be - executed. */ - gcc_assert (next_stmt); - op = GIMPLE_STMT_OPERAND (next_stmt, 1); - vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt, NULL); - VEC_quick_push(tree, dr_chain, vec_oprnd); - VEC_quick_push(tree, oprnds, vec_oprnd); - next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt)); + if (slp) + { + /* Get vectorized arguments for SLP_NODE. */ + vect_get_slp_defs (slp_node, &vec_oprnds, NULL); + + vec_oprnd = VEC_index (tree, vec_oprnds, 0); + } + else + { + /* For interleaved stores we collect vectorized defs for all the + stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then + used as an input to vect_permute_store_chain(), and OPRNDS as + an input to vect_get_vec_def_for_stmt_copy() for the next copy. + + If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and + OPRNDS are of size 1. */ + next_stmt = first_stmt; + for (i = 0; i < group_size; i++) + { + /* Since gaps are not supported for interleaved stores, + GROUP_SIZE is the exact number of stmts in the chain. + Therefore, NEXT_STMT can't be NULL_TREE. In case that + there is no interleaving, GROUP_SIZE is 1, and only one + iteration of the loop will be executed. */ + gcc_assert (next_stmt); + op = GIMPLE_STMT_OPERAND (next_stmt, 1); + + vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt, + NULL); + VEC_quick_push(tree, dr_chain, vec_oprnd); + VEC_quick_push(tree, oprnds, vec_oprnd); + next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt)); + } } - dataref_ptr = vect_create_data_ref_ptr (first_stmt, bsi, NULL_TREE, + dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE, &dummy, &ptr_incr, false, - TREE_TYPE (vec_oprnd)); + TREE_TYPE (vec_oprnd), &inv_p); + gcc_assert (!inv_p); } else { + /* FORNOW SLP doesn't work for multiple types. */ + gcc_assert (!slp); + /* For interleaved stores we created vectorized defs for all the defs stored in OPRNDS in the previous iteration (previous copy). DR_CHAIN is then used as an input to vect_permute_store_chain(), @@ -3284,7 +4835,8 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) VEC_replace(tree, dr_chain, i, vec_oprnd); VEC_replace(tree, oprnds, i, vec_oprnd); } - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt); + dataref_ptr = + bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt, NULL_TREE); } if (strided_store) @@ -3297,54 +4849,35 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) } next_stmt = first_stmt; - for (i = 0; i < group_size; i++) + for (i = 0; i < vec_num; i++) { - /* For strided stores vectorized defs are interleaved in - vect_permute_store_chain(). */ - if (strided_store) - vec_oprnd = VEC_index(tree, result_chain, i); + if (i > 0) + /* Bump the vector pointer. */ + dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt, + NULL_TREE); + + if (slp) + vec_oprnd = VEC_index (tree, vec_oprnds, i); + else if (strided_store) + /* For strided stores vectorized defs are interleaved in + vect_permute_store_chain(). */ + vec_oprnd = VEC_index (tree, result_chain, i); data_ref = build_fold_indirect_ref (dataref_ptr); /* Arguments are ready. Create the new vector stmt. */ new_stmt = build_gimple_modify_stmt (data_ref, vec_oprnd); vect_finish_stmt_generation (stmt, new_stmt, bsi); - - /* Set the VDEFs for the vector pointer. If this virtual def - has a use outside the loop and a loop peel is performed - then the def may be renamed by the peel. Mark it for - renaming so the later use will also be renamed. */ - copy_virtual_operands (new_stmt, next_stmt); - if (j == 0) - { - /* The original store is deleted so the same SSA_NAMEs - can be used. */ - FOR_EACH_SSA_TREE_OPERAND (def, next_stmt, iter, SSA_OP_VDEF) - { - SSA_NAME_DEF_STMT (def) = new_stmt; - mark_sym_for_renaming (SSA_NAME_VAR (def)); - } - - STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; - } + mark_symbols_for_renaming (new_stmt); + + if (j == 0) + STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; else - { - /* Create new names for all the definitions created by COPY and - add replacement mappings for each new name. */ - FOR_EACH_SSA_DEF_OPERAND (def_p, new_stmt, iter, SSA_OP_VDEF) - { - create_new_def_for (DEF_FROM_PTR (def_p), new_stmt, def_p); - mark_sym_for_renaming (SSA_NAME_VAR (DEF_FROM_PTR (def_p))); - } - - STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; - } + STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; prev_stmt_info = vinfo_for_stmt (new_stmt); next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt)); if (!next_stmt) break; - /* Bump the vector pointer. */ - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt); } } @@ -3355,14 +4888,17 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) /* Function vect_setup_realignment This function is called when vectorizing an unaligned load using - the dr_unaligned_software_pipeline scheme. + the dr_explicit_realign[_optimized] scheme. This function generates the following code at the loop prolog: p = initial_addr; - msq_init = *(floor(p)); # prolog load + x msq_init = *(floor(p)); # prolog load realignment_token = call target_builtin; loop: - msq = phi (msq_init, ---) + x msq = phi (msq_init, ---) + + The stmts marked with x are generated only for the case of + dr_explicit_realign_optimized. The code above sets up a new (vector) pointer, pointing to the first location accessed by STMT, and a "floor-aligned" load using that pointer. @@ -3371,19 +4907,29 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) whose arguments are the result of the prolog-load (created by this function) and the result of a load that takes place in the loop (to be created by the caller to this function). + + For the case of dr_explicit_realign_optimized: The caller to this function uses the phi-result (msq) to create the realignment code inside the loop, and sets up the missing phi argument, as follows: - loop: msq = phi (msq_init, lsq) lsq = *(floor(p')); # load in loop result = realign_load (msq, lsq, realignment_token); + For the case of dr_explicit_realign: + loop: + msq = *(floor(p)); # load in loop + p' = p + (VS-1); + lsq = *(floor(p')); # load in loop + result = realign_load (msq, lsq, realignment_token); + Input: STMT - (scalar) load stmt to be vectorized. This load accesses a memory location that may be unaligned. BSI - place where new code is to be inserted. + ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes + is used. Output: REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load @@ -3392,45 +4938,144 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) static tree vect_setup_realignment (tree stmt, block_stmt_iterator *bsi, - tree *realignment_token) + tree *realignment_token, + enum dr_alignment_support alignment_support_scheme, + tree init_addr, + struct loop **at_loop) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); - edge pe = loop_preheader_edge (loop); + edge pe; tree scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0); tree vec_dest; - tree init_addr; tree inc; tree ptr; tree data_ref; tree new_stmt; basic_block new_bb; - tree msq_init; + tree msq_init = NULL_TREE; tree new_temp; tree phi_stmt; - tree msq; + tree msq = NULL_TREE; + tree stmts = NULL_TREE; + bool inv_p; + bool compute_in_loop = false; + bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); + struct loop *containing_loop = (bb_for_stmt (stmt))->loop_father; + struct loop *loop_for_initial_load; + + gcc_assert (alignment_support_scheme == dr_explicit_realign + || alignment_support_scheme == dr_explicit_realign_optimized); + + /* We need to generate three things: + 1. the misalignment computation + 2. the extra vector load (for the optimized realignment scheme). + 3. the phi node for the two vectors from which the realignment is + done (for the optimized realignment scheme). + */ + + /* 1. Determine where to generate the misalignment computation. + + If INIT_ADDR is NULL_TREE, this indicates that the misalignment + calculation will be generated by this function, outside the loop (in the + preheader). Otherwise, INIT_ADDR had already been computed for us by the + caller, inside the loop. + + Background: If the misalignment remains fixed throughout the iterations of + the loop, then both realignment schemes are applicable, and also the + misalignment computation can be done outside LOOP. This is because we are + vectorizing LOOP, and so the memory accesses in LOOP advance in steps that + are a multiple of VS (the Vector Size), and therefore the misalignment in + different vectorized LOOP iterations is always the same. + The problem arises only if the memory access is in an inner-loop nested + inside LOOP, which is now being vectorized using outer-loop vectorization. + This is the only case when the misalignment of the memory access may not + remain fixed throughout the iterations of the inner-loop (as explained in + detail in vect_supportable_dr_alignment). In this case, not only is the + optimized realignment scheme not applicable, but also the misalignment + computation (and generation of the realignment token that is passed to + REALIGN_LOAD) have to be done inside the loop. + + In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode + or not, which in turn determines if the misalignment is computed inside + the inner-loop, or outside LOOP. */ + + if (init_addr != NULL_TREE) + { + compute_in_loop = true; + gcc_assert (alignment_support_scheme == dr_explicit_realign); + } - /* 1. Create msq_init = *(floor(p1)) in the loop preheader */ - vec_dest = vect_create_destination_var (scalar_dest, vectype); - ptr = vect_create_data_ref_ptr (stmt, bsi, NULL_TREE, &init_addr, &inc, true, - NULL_TREE); - data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr); - new_stmt = build_gimple_modify_stmt (vec_dest, data_ref); - new_temp = make_ssa_name (vec_dest, new_stmt); - GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - new_bb = bsi_insert_on_edge_immediate (pe, new_stmt); - gcc_assert (!new_bb); - msq_init = GIMPLE_STMT_OPERAND (new_stmt, 0); - copy_virtual_operands (new_stmt, stmt); - update_vuses_to_preheader (new_stmt, loop); - /* 2. Create permutation mask, if required, in loop preheader. */ + /* 2. Determine where to generate the extra vector load. + + For the optimized realignment scheme, instead of generating two vector + loads in each iteration, we generate a single extra vector load in the + preheader of the loop, and in each iteration reuse the result of the + vector load from the previous iteration. In case the memory access is in + an inner-loop nested inside LOOP, which is now being vectorized using + outer-loop vectorization, we need to determine whether this initial vector + load should be generated at the preheader of the inner-loop, or can be + generated at the preheader of LOOP. If the memory access has no evolution + in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has + to be generated inside LOOP (in the preheader of the inner-loop). */ + + if (nested_in_vect_loop) + { + tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info); + bool invariant_in_outerloop = + (tree_int_cst_compare (outerloop_step, size_zero_node) == 0); + loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner); + } + else + loop_for_initial_load = loop; + if (at_loop) + *at_loop = loop_for_initial_load; + + /* 3. For the case of the optimized realignment, create the first vector + load at the loop preheader. */ + + if (alignment_support_scheme == dr_explicit_realign_optimized) + { + /* Create msq_init = *(floor(p1)) in the loop preheader */ + + gcc_assert (!compute_in_loop); + pe = loop_preheader_edge (loop_for_initial_load); + vec_dest = vect_create_destination_var (scalar_dest, vectype); + ptr = vect_create_data_ref_ptr (stmt, loop_for_initial_load, NULL_TREE, + &init_addr, &inc, true, NULL_TREE, &inv_p); + data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr); + new_stmt = build_gimple_modify_stmt (vec_dest, data_ref); + new_temp = make_ssa_name (vec_dest, new_stmt); + GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; + new_bb = bsi_insert_on_edge_immediate (pe, new_stmt); + gcc_assert (!new_bb); + msq_init = GIMPLE_STMT_OPERAND (new_stmt, 0); + } + + /* 4. Create realignment token using a target builtin, if available. + It is done either inside the containing loop, or before LOOP (as + determined above). */ + if (targetm.vectorize.builtin_mask_for_load) { tree builtin_decl; + /* Compute INIT_ADDR - the initial addressed accessed by this memref. */ + if (compute_in_loop) + gcc_assert (init_addr); /* already computed by the caller. */ + else + { + /* Generate the INIT_ADDR computation outside LOOP. */ + init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts, + NULL_TREE, loop); + pe = loop_preheader_edge (loop); + new_bb = bsi_insert_on_edge_immediate (pe, stmts); + gcc_assert (!new_bb); + } + builtin_decl = targetm.vectorize.builtin_mask_for_load (); new_stmt = build_call_expr (builtin_decl, 1, init_addr); vec_dest = vect_create_destination_var (scalar_dest, @@ -3438,8 +5083,17 @@ vect_setup_realignment (tree stmt, block_stmt_iterator *bsi, new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt); GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; - new_bb = bsi_insert_on_edge_immediate (pe, new_stmt); - gcc_assert (!new_bb); + + if (compute_in_loop) + bsi_insert_before (bsi, new_stmt, BSI_SAME_STMT); + else + { + /* Generate the misalignment computation outside LOOP. */ + pe = loop_preheader_edge (loop); + new_bb = bsi_insert_on_edge_immediate (pe, new_stmt); + gcc_assert (!new_bb); + } + *realignment_token = GIMPLE_STMT_OPERAND (new_stmt, 0); /* The result of the CALL_EXPR to this builtin is determined from @@ -3450,12 +5104,21 @@ vect_setup_realignment (tree stmt, block_stmt_iterator *bsi, gcc_assert (TREE_READONLY (builtin_decl)); } - /* 3. Create msq = phi in loop */ + if (alignment_support_scheme == dr_explicit_realign) + return msq; + + gcc_assert (!compute_in_loop); + gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized); + + + /* 5. Create msq = phi in loop */ + + pe = loop_preheader_edge (containing_loop); vec_dest = vect_create_destination_var (scalar_dest, vectype); msq = make_ssa_name (vec_dest, NULL_TREE); - phi_stmt = create_phi_node (msq, loop->header); + phi_stmt = create_phi_node (msq, containing_loop->header); SSA_NAME_DEF_STMT (msq) = phi_stmt; - add_phi_arg (phi_stmt, msq_init, loop_preheader_edge (loop)); + add_phi_arg (phi_stmt, msq_init, pe); return msq; } @@ -3482,7 +5145,7 @@ vect_strided_load_supported (tree vectype) return false; } - if (perm_even_optab->handlers[mode].insn_code == CODE_FOR_nothing) + if (optab_handler (perm_even_optab, mode)->insn_code == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "perm_even op not supported by target."); @@ -3497,7 +5160,7 @@ vect_strided_load_supported (tree vectype) return false; } - if (perm_odd_optab->handlers[mode].insn_code == CODE_FOR_nothing) + if (optab_handler (perm_odd_optab, mode)->insn_code == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "perm_odd op not supported by target."); @@ -3676,7 +5339,7 @@ vect_transform_strided_load (tree stmt, VEC(tree,heap) *dr_chain, int size, corresponds the order of data-refs in RESULT_CHAIN. */ next_stmt = first_stmt; gap_count = 1; - for (i = 0; VEC_iterate(tree, result_chain, i, tmp_data_ref); i++) + for (i = 0; VEC_iterate (tree, result_chain, i, tmp_data_ref); i++) { if (!next_stmt) break; @@ -3735,7 +5398,8 @@ vect_transform_strided_load (tree stmt, VEC(tree,heap) *dr_chain, int size, Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool -vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) +vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt, + slp_tree slp_node) { tree scalar_dest; tree vec_dest = NULL; @@ -3745,13 +5409,15 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) stmt_vec_info prev_stmt_info; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + struct loop *containing_loop = (bb_for_stmt (stmt))->loop_father; + bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr; tree vectype = STMT_VINFO_VECTYPE (stmt_info); tree new_temp; int mode; tree new_stmt = NULL_TREE; tree dummy; - enum dr_alignment_support alignment_support_cheme; + enum dr_alignment_support alignment_support_scheme; tree dataref_ptr = NULL_TREE; tree ptr_incr; int nunits = TYPE_VECTOR_SUBPARTS (vectype); @@ -3760,10 +5426,31 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) tree msq = NULL_TREE, lsq; tree offset = NULL_TREE; tree realignment_token = NULL_TREE; - tree phi_stmt = NULL_TREE; + tree phi = NULL_TREE; VEC(tree,heap) *dr_chain = NULL; bool strided_load = false; tree first_stmt; + tree scalar_type; + bool inv_p; + bool compute_in_loop = false; + struct loop *at_loop; + int vec_num; + bool slp = (slp_node != NULL); + + /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies + this, so we can safely override NCOPIES with 1 here. */ + if (slp) + ncopies = 1; + + gcc_assert (ncopies >= 1); + + /* FORNOW. This restriction should be relaxed. */ + if (nested_in_vect_loop && ncopies > 1) + { + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "multiple types in nested loop."); + return false; + } if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; @@ -3790,17 +5477,18 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) op = GIMPLE_STMT_OPERAND (stmt, 1); if (TREE_CODE (op) != ARRAY_REF && TREE_CODE (op) != INDIRECT_REF - && !DR_GROUP_FIRST_DR (stmt_info)) + && !STMT_VINFO_STRIDED_ACCESS (stmt_info)) return false; if (!STMT_VINFO_DATA_REF (stmt_info)) return false; + scalar_type = TREE_TYPE (DR_REF (dr)); mode = (int) TYPE_MODE (vectype); /* FORNOW. In some cases can vectorize even if data-type not supported (e.g. - data copies). */ - if (mov_optab->handlers[mode].insn_code == CODE_FOR_nothing) + if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "Aligned load, but unsupported type."); @@ -3808,26 +5496,30 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) } /* Check if the load is a part of an interleaving chain. */ - if (DR_GROUP_FIRST_DR (stmt_info)) + if (STMT_VINFO_STRIDED_ACCESS (stmt_info)) { strided_load = true; + /* FORNOW */ + gcc_assert (! nested_in_vect_loop); /* Check if interleaving is supported. */ - if (!vect_strided_load_supported (vectype)) + if (!vect_strided_load_supported (vectype) + && !PURE_SLP_STMT (stmt_info) && !slp) return false; } if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = load_vec_info_type; + vect_model_load_cost (stmt_info, ncopies, NULL); return true; } - /** Transform. **/ - if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "transform load."); + /** Transform. **/ + if (strided_load) { first_stmt = DR_GROUP_FIRST_DR (stmt_info); @@ -3840,17 +5532,25 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt)); dr_chain = VEC_alloc (tree, heap, group_size); + + /* VEC_NUM is the number of vect stmts to be created for this group. */ + if (slp) + { + strided_load = false; + vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + } + else + vec_num = group_size; } else { first_stmt = stmt; first_dr = dr; - group_size = 1; + group_size = vec_num = 1; } - alignment_support_cheme = vect_supportable_dr_alignment (first_dr); - gcc_assert (alignment_support_cheme); - + alignment_support_scheme = vect_supportable_dr_alignment (first_dr); + gcc_assert (alignment_support_scheme); /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate @@ -3932,7 +5632,7 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) } Otherwise, the data reference is potentially unaligned on a target that - does not support unaligned accesses (dr_unaligned_software_pipeline) - + does not support unaligned accesses (dr_explicit_realign_optimized) - then generate the following code, in which the data in each iteration is obtained by two vector loads, one from the previous iteration, and one from the current iteration: @@ -3949,27 +5649,56 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) msq = lsq; } */ - if (alignment_support_cheme == dr_unaligned_software_pipeline) + /* If the misalignment remains the same throughout the execution of the + loop, we can create the init_addr and permutation mask at the loop + preheader. Otherwise, it needs to be created inside the loop. + This can only occur when vectorizing memory accesses in the inner-loop + nested within an outer-loop that is being vectorized. */ + + if (nested_in_vect_loop_p (loop, stmt) + && (TREE_INT_CST_LOW (DR_STEP (dr)) % UNITS_PER_SIMD_WORD != 0)) + { + gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized); + compute_in_loop = true; + } + + if ((alignment_support_scheme == dr_explicit_realign_optimized + || alignment_support_scheme == dr_explicit_realign) + && !compute_in_loop) { - msq = vect_setup_realignment (first_stmt, bsi, &realignment_token); - phi_stmt = SSA_NAME_DEF_STMT (msq); - offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1); + msq = vect_setup_realignment (first_stmt, bsi, &realignment_token, + alignment_support_scheme, NULL_TREE, + &at_loop); + if (alignment_support_scheme == dr_explicit_realign_optimized) + { + phi = SSA_NAME_DEF_STMT (msq); + offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1); + } } + else + at_loop = loop; prev_stmt_info = NULL; for (j = 0; j < ncopies; j++) { /* 1. Create the vector pointer update chain. */ if (j == 0) - dataref_ptr = vect_create_data_ref_ptr (first_stmt, bsi, offset, &dummy, - &ptr_incr, false, NULL_TREE); + dataref_ptr = vect_create_data_ref_ptr (first_stmt, + at_loop, offset, + &dummy, &ptr_incr, false, + NULL_TREE, &inv_p); else - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt); + dataref_ptr = + bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt, NULL_TREE); - for (i = 0; i < group_size; i++) + for (i = 0; i < vec_num; i++) { + if (i > 0) + dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt, + NULL_TREE); + /* 2. Create the vector-load in the loop. */ - switch (alignment_support_cheme) + switch (alignment_support_scheme) { case dr_aligned: gcc_assert (aligned_access_p (first_dr)); @@ -3980,14 +5709,39 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) int mis = DR_MISALIGNMENT (first_dr); tree tmis = (mis == -1 ? size_zero_node : size_int (mis)); - gcc_assert (!aligned_access_p (first_dr)); tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT)); data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis); break; } - case dr_unaligned_software_pipeline: - gcc_assert (!aligned_access_p (first_dr)); + case dr_explicit_realign: + { + tree ptr, bump; + tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1); + + if (compute_in_loop) + msq = vect_setup_realignment (first_stmt, bsi, + &realignment_token, + dr_explicit_realign, + dataref_ptr, NULL); + + data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr); + vec_dest = vect_create_destination_var (scalar_dest, vectype); + new_stmt = build_gimple_modify_stmt (vec_dest, data_ref); + new_temp = make_ssa_name (vec_dest, new_stmt); + GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; + vect_finish_stmt_generation (stmt, new_stmt, bsi); + copy_virtual_operands (new_stmt, stmt); + mark_symbols_for_renaming (new_stmt); + msq = new_temp; + + bump = size_binop (MULT_EXPR, vs_minus_1, + TYPE_SIZE_UNIT (scalar_type)); + ptr = bump_vector_ptr (dataref_ptr, NULL_TREE, bsi, stmt, bump); + data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr); + break; + } + case dr_explicit_realign_optimized: data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr); break; default: @@ -3998,34 +5752,81 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) new_temp = make_ssa_name (vec_dest, new_stmt); GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; vect_finish_stmt_generation (stmt, new_stmt, bsi); - copy_virtual_operands (new_stmt, stmt); mark_symbols_for_renaming (new_stmt); - /* 3. Handle explicit realignment if necessary/supported. */ - if (alignment_support_cheme == dr_unaligned_software_pipeline) + /* 3. Handle explicit realignment if necessary/supported. Create in + loop: vec_dest = realign_load (msq, lsq, realignment_token) */ + if (alignment_support_scheme == dr_explicit_realign_optimized + || alignment_support_scheme == dr_explicit_realign) { - /* Create in loop: - */ lsq = GIMPLE_STMT_OPERAND (new_stmt, 0); if (!realignment_token) realignment_token = dataref_ptr; vec_dest = vect_create_destination_var (scalar_dest, vectype); - new_stmt = - build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq, realignment_token); + new_stmt = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq, + realignment_token); new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt); GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; vect_finish_stmt_generation (stmt, new_stmt, bsi); - if (i == group_size - 1 && j == ncopies - 1) - add_phi_arg (phi_stmt, lsq, loop_latch_edge (loop)); - msq = lsq; + + if (alignment_support_scheme == dr_explicit_realign_optimized) + { + if (i == vec_num - 1 && j == ncopies - 1) + add_phi_arg (phi, lsq, loop_latch_edge (containing_loop)); + msq = lsq; + } + } + + /* 4. Handle invariant-load. */ + if (inv_p) + { + gcc_assert (!strided_load); + gcc_assert (nested_in_vect_loop_p (loop, stmt)); + if (j == 0) + { + int k; + tree t = NULL_TREE; + tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type); + + /* CHECKME: bitpos depends on endianess? */ + bitpos = bitsize_zero_node; + vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp, + bitsize, bitpos); + BIT_FIELD_REF_UNSIGNED (vec_inv) = + TYPE_UNSIGNED (scalar_type); + vec_dest = + vect_create_destination_var (scalar_dest, NULL_TREE); + new_stmt = build_gimple_modify_stmt (vec_dest, vec_inv); + new_temp = make_ssa_name (vec_dest, new_stmt); + GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; + vect_finish_stmt_generation (stmt, new_stmt, bsi); + + for (k = nunits - 1; k >= 0; --k) + t = tree_cons (NULL_TREE, new_temp, t); + /* FIXME: use build_constructor directly. */ + vec_inv = build_constructor_from_list (vectype, t); + new_temp = vect_init_vector (stmt, vec_inv, vectype, bsi); + new_stmt = SSA_NAME_DEF_STMT (new_temp); + } + else + gcc_unreachable (); /* FORNOW. */ } - if (strided_load) - VEC_quick_push (tree, dr_chain, new_temp); - if (i < group_size - 1) - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt); + + /* Collect vector loads and later create their permutation in + vect_transform_strided_load (). */ + if (strided_load) + VEC_quick_push (tree, dr_chain, new_temp); + + /* Store vector loads in the corresponding SLP_NODE. */ + if (slp) + VEC_quick_push (tree, SLP_TREE_VEC_STMTS (slp_node), new_stmt); } + /* FORNOW: SLP with multiple types is unsupported. */ + if (slp) + return true; + if (strided_load) { if (!vect_transform_strided_load (stmt, dr_chain, group_size, bsi)) @@ -4060,6 +5861,7 @@ vectorizable_live_operation (tree stmt, tree operation; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); int i; int op_type; tree op; @@ -4077,6 +5879,10 @@ vectorizable_live_operation (tree stmt, if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME) return false; + /* FORNOW. CHECKME. */ + if (nested_in_vect_loop_p (loop, stmt)) + return false; + operation = GIMPLE_STMT_OPERAND (stmt, 1); op_type = TREE_OPERAND_LENGTH (operation); @@ -4131,7 +5937,8 @@ vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo) if (!vect_is_simple_use (lhs, loop_vinfo, &lhs_def_stmt, &def, &dt)) return false; } - else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST) + else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST + && TREE_CODE (lhs) != FIXED_CST) return false; if (TREE_CODE (rhs) == SSA_NAME) @@ -4140,7 +5947,8 @@ vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo) if (!vect_is_simple_use (rhs, loop_vinfo, &rhs_def_stmt, &def, &dt)) return false; } - else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST) + else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST + && TREE_CODE (rhs) != FIXED_CST) return false; return true; @@ -4184,6 +5992,10 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def) return false; + /* FORNOW: SLP not supported. */ + if (STMT_SLP_TYPE (stmt_info)) + return false; + /* FORNOW: not yet supported. */ if (STMT_VINFO_LIVE_P (stmt_info)) { @@ -4221,7 +6033,8 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) return false; } else if (TREE_CODE (then_clause) != INTEGER_CST - && TREE_CODE (then_clause) != REAL_CST) + && TREE_CODE (then_clause) != REAL_CST + && TREE_CODE (then_clause) != FIXED_CST) return false; if (TREE_CODE (else_clause) == SSA_NAME) @@ -4232,7 +6045,8 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) return false; } else if (TREE_CODE (else_clause) != INTEGER_CST - && TREE_CODE (else_clause) != REAL_CST) + && TREE_CODE (else_clause) != REAL_CST + && TREE_CODE (else_clause) != FIXED_CST) return false; @@ -4272,12 +6086,14 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt) return true; } + /* Function vect_transform_stmt. Create a vectorized stmt to replace STMT, and insert it at BSI. */ -bool -vect_transform_stmt (tree stmt, block_stmt_iterator *bsi, bool *strided_store) +static bool +vect_transform_stmt (tree stmt, block_stmt_iterator *bsi, bool *strided_store, + slp_tree slp_node) { bool is_store = false; tree vec_stmt = NULL_TREE; @@ -4288,44 +6104,47 @@ vect_transform_stmt (tree stmt, block_stmt_iterator *bsi, bool *strided_store) switch (STMT_VINFO_TYPE (stmt_info)) { case type_demotion_vec_info_type: + gcc_assert (!slp_node); done = vectorizable_type_demotion (stmt, bsi, &vec_stmt); gcc_assert (done); break; case type_promotion_vec_info_type: + gcc_assert (!slp_node); done = vectorizable_type_promotion (stmt, bsi, &vec_stmt); gcc_assert (done); break; case type_conversion_vec_info_type: - done = vectorizable_conversion (stmt, bsi, &vec_stmt); + done = vectorizable_conversion (stmt, bsi, &vec_stmt, slp_node); gcc_assert (done); break; case induc_vec_info_type: + gcc_assert (!slp_node); done = vectorizable_induction (stmt, bsi, &vec_stmt); gcc_assert (done); break; case op_vec_info_type: - done = vectorizable_operation (stmt, bsi, &vec_stmt); + done = vectorizable_operation (stmt, bsi, &vec_stmt, slp_node); gcc_assert (done); break; case assignment_vec_info_type: - done = vectorizable_assignment (stmt, bsi, &vec_stmt); + done = vectorizable_assignment (stmt, bsi, &vec_stmt, slp_node); gcc_assert (done); break; case load_vec_info_type: - done = vectorizable_load (stmt, bsi, &vec_stmt); + done = vectorizable_load (stmt, bsi, &vec_stmt, slp_node); gcc_assert (done); break; case store_vec_info_type: - done = vectorizable_store (stmt, bsi, &vec_stmt); + done = vectorizable_store (stmt, bsi, &vec_stmt, slp_node); gcc_assert (done); - if (DR_GROUP_FIRST_DR (stmt_info)) + if (STMT_VINFO_STRIDED_ACCESS (stmt_info)) { /* In case of interleaving, the whole chain is vectorized when the last store in the chain is reached. Store stmts before the last @@ -4340,15 +6159,18 @@ vect_transform_stmt (tree stmt, block_stmt_iterator *bsi, bool *strided_store) break; case condition_vec_info_type: + gcc_assert (!slp_node); done = vectorizable_condition (stmt, bsi, &vec_stmt); gcc_assert (done); break; case call_vec_info_type: + gcc_assert (!slp_node); done = vectorizable_call (stmt, bsi, &vec_stmt); break; case reduc_vec_info_type: + gcc_assert (!slp_node); done = vectorizable_reduction (stmt, bsi, &vec_stmt); gcc_assert (done); break; @@ -4491,82 +6313,6 @@ vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo, } -/* Function update_vuses_to_preheader. - - Input: - STMT - a statement with potential VUSEs. - LOOP - the loop whose preheader will contain STMT. - - It's possible to vectorize a loop even though an SSA_NAME from a VUSE - appears to be defined in a VDEF in another statement in a loop. - One such case is when the VUSE is at the dereference of a __restricted__ - pointer in a load and the VDEF is at the dereference of a different - __restricted__ pointer in a store. Vectorization may result in - copy_virtual_uses being called to copy the problematic VUSE to a new - statement that is being inserted in the loop preheader. This procedure - is called to change the SSA_NAME in the new statement's VUSE from the - SSA_NAME updated in the loop to the related SSA_NAME available on the - path entering the loop. - - When this function is called, we have the following situation: - - # vuse - S1: vload - do { - # name1 = phi < name0 , name2> - - # vuse - S2: vload - - # name2 = vdef - S3: vstore - - }while... - - Stmt S1 was created in the loop preheader block as part of misaligned-load - handling. This function fixes the name of the vuse of S1 from 'name1' to - 'name0'. */ - -static void -update_vuses_to_preheader (tree stmt, struct loop *loop) -{ - basic_block header_bb = loop->header; - edge preheader_e = loop_preheader_edge (loop); - ssa_op_iter iter; - use_operand_p use_p; - - FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_VUSE) - { - tree ssa_name = USE_FROM_PTR (use_p); - tree def_stmt = SSA_NAME_DEF_STMT (ssa_name); - tree name_var = SSA_NAME_VAR (ssa_name); - basic_block bb = bb_for_stmt (def_stmt); - - /* For a use before any definitions, def_stmt is a NOP_EXPR. */ - if (!IS_EMPTY_STMT (def_stmt) - && flow_bb_inside_loop_p (loop, bb)) - { - /* If the block containing the statement defining the SSA_NAME - is in the loop then it's necessary to find the definition - outside the loop using the PHI nodes of the header. */ - tree phi; - bool updated = false; - - for (phi = phi_nodes (header_bb); phi; phi = PHI_CHAIN (phi)) - { - if (SSA_NAME_VAR (PHI_RESULT (phi)) == name_var) - { - SET_USE (use_p, PHI_ARG_DEF (phi, preheader_e->dest_idx)); - updated = true; - break; - } - } - gcc_assert (updated); - } - } -} - - /* Function vect_update_ivs_after_vectorizer. "Advance" the induction variables of LOOP to the value they should take @@ -4630,7 +6376,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters, tree evolution_part; tree init_expr; tree step_expr; - tree var, stmt, ni, ni_name; + tree var, ni, ni_name; block_stmt_iterator last_bsi; if (vect_print_dump_info (REPORT_DETAILS)) @@ -4669,23 +6415,29 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters, init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop->num)); - ni = fold_build2 (PLUS_EXPR, TREE_TYPE (init_expr), - fold_build2 (MULT_EXPR, TREE_TYPE (init_expr), - fold_convert (TREE_TYPE (init_expr), - niters), - step_expr), - init_expr); + if (POINTER_TYPE_P (TREE_TYPE (init_expr))) + ni = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (init_expr), + init_expr, + fold_convert (sizetype, + fold_build2 (MULT_EXPR, TREE_TYPE (niters), + niters, step_expr))); + else + ni = fold_build2 (PLUS_EXPR, TREE_TYPE (init_expr), + fold_build2 (MULT_EXPR, TREE_TYPE (init_expr), + fold_convert (TREE_TYPE (init_expr), + niters), + step_expr), + init_expr); + + var = create_tmp_var (TREE_TYPE (init_expr), "tmp"); add_referenced_var (var); - ni_name = force_gimple_operand (ni, &stmt, false, var); - - /* Insert stmt into exit_bb. */ last_bsi = bsi_last (exit_bb); - if (stmt) - bsi_insert_before (&last_bsi, stmt, BSI_SAME_STMT); - + ni_name = force_gimple_operand_bsi (&last_bsi, ni, false, var, + true, BSI_SAME_STMT); + /* Fix phi expressions in the successor bb. */ SET_PHI_ARG_DEF (phi1, update_e->dest_idx, ni_name); } @@ -4712,6 +6464,8 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio) basic_block preheader; int loop_num; unsigned int th; + int min_scalar_loop_bound; + int min_profitable_iters; if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "=== vect_do_peeling_for_loop_bound ==="); @@ -4727,11 +6481,29 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio) &ratio_mult_vf_name, ratio); loop_num = loop->num; - /* Threshold for vectorized loop. */ - th = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)) * - LOOP_VINFO_VECT_FACTOR (loop_vinfo); + + /* Analyze cost to set threshhold for vectorized loop. */ + min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo); + min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND) + * LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 1); + + /* Use the cost model only if it is more conservative than user specified + threshold. */ + + th = (unsigned) min_scalar_loop_bound; + if (min_profitable_iters + && (!min_scalar_loop_bound + || min_profitable_iters > min_scalar_loop_bound)) + th = (unsigned) min_profitable_iters; + + if (((LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0) + || !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) + && vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "vectorization may not be profitable."); + new_loop = slpeel_tree_peel_loop_to_edge (loop, single_exit (loop), - ratio_mult_vf_name, ni_name, false, th); + ratio_mult_vf_name, ni_name, false, + th); gcc_assert (new_loop); gcc_assert (loop_num == loop->num); #ifdef ENABLE_CHECKING @@ -4811,7 +6583,7 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters) int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr)))); int nelements = TYPE_VECTOR_SUBPARTS (vectype); - if (DR_GROUP_FIRST_DR (stmt_info)) + if (STMT_VINFO_STRIDED_ACCESS (stmt_info)) { /* For interleaved access element size must be multiplied by the size of the interleaved group. */ @@ -4835,8 +6607,8 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters) else { tree new_stmts = NULL_TREE; - tree start_addr = - vect_create_addr_base_for_vector_ref (dr_stmt, &new_stmts, NULL_TREE); + tree start_addr = vect_create_addr_base_for_vector_ref (dr_stmt, + &new_stmts, NULL_TREE, loop); tree ptr_type = TREE_TYPE (start_addr); tree size = TYPE_SIZE (ptr_type); tree type = lang_hooks.types.type_for_size (tree_low_cst (size, 1), 1); @@ -4853,7 +6625,7 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters) /* Create: byte_misalign = addr & (vectype_size - 1) */ byte_misalign = - fold_build2 (BIT_AND_EXPR, type, start_addr, vectype_size_minus_1); + fold_build2 (BIT_AND_EXPR, type, fold_convert (type, start_addr), vectype_size_minus_1); /* Create: elem_misalign = byte_misalign / element_size */ elem_misalign = @@ -4916,8 +6688,8 @@ vect_update_init_of_dr (struct data_reference *dr, tree niters) NITERS iterations were peeled from the loop represented by LOOP_VINFO. This function updates the information recorded for the data references in the loop to account for the fact that the first NITERS iterations had - already been executed. Specifically, it updates the initial_condition of the - access_function of all the data_references in the loop. */ + already been executed. Specifically, it updates the initial_condition of + the access_function of all the data_references in the loop. */ static void vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters) @@ -4926,7 +6698,7 @@ vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters) VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); struct data_reference *dr; - if (vect_dump && (dump_flags & TDF_DETAILS)) + if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "=== vect_update_inits_of_dr ==="); for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++) @@ -5009,6 +6781,7 @@ static tree vect_create_cond_for_align_checks (loop_vec_info loop_vinfo, tree *cond_expr_stmt_list) { + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); VEC(tree,heap) *may_misalign_stmts = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo); tree ref_stmt, tmp; @@ -5044,8 +6817,7 @@ vect_create_cond_for_align_checks (loop_vec_info loop_vinfo, /* create: addr_tmp = (int)(address_of_first_vector) */ addr_base = vect_create_addr_base_for_vector_ref (ref_stmt, - &new_stmt_list, - NULL_TREE); + &new_stmt_list, NULL_TREE, loop); if (new_stmt_list != NULL_TREE) append_to_statement_list_force (new_stmt_list, cond_expr_stmt_list); @@ -5099,6 +6871,348 @@ vect_create_cond_for_align_checks (loop_vec_info loop_vinfo, and_tmp_name, ptrsize_zero); } +/* Function vect_vfa_segment_size. + + Create an expression that computes the size of segment + that will be accessed for a data reference. The functions takes into + account that realignment loads may access one more vector. + + Input: + DR: The data reference. + VECT_FACTOR: vectorization factor. + + Return an expression whose value is the size of segment which will be + accessed by DR. */ + +static tree +vect_vfa_segment_size (struct data_reference *dr, tree vect_factor) +{ + tree segment_length = fold_build2 (MULT_EXPR, integer_type_node, + DR_STEP (dr), vect_factor); + + if (vect_supportable_dr_alignment (dr) == dr_explicit_realign_optimized) + { + tree vector_size = TYPE_SIZE_UNIT + (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)))); + + segment_length = fold_build2 (PLUS_EXPR, integer_type_node, + segment_length, vector_size); + } + return fold_convert (sizetype, segment_length); +} + +/* Function vect_create_cond_for_alias_checks. + + Create a conditional expression that represents the run-time checks for + overlapping of address ranges represented by a list of data references + relations passed as input. + + Input: + COND_EXPR - input conditional expression. New conditions will be chained + with logical and operation. + LOOP_VINFO - field LOOP_VINFO_MAY_ALIAS_STMTS contains the list of ddrs + to be checked. + + Output: + COND_EXPR - conditional expression. + COND_EXPR_STMT_LIST - statements needed to construct the conditional + expression. + + + The returned value is the conditional expression to be used in the if + statement that controls which version of the loop gets executed at runtime. +*/ + +static void +vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo, + tree * cond_expr, + tree * cond_expr_stmt_list) +{ + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + VEC (ddr_p, heap) * may_alias_ddrs = + LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo); + tree vect_factor = + build_int_cst (integer_type_node, LOOP_VINFO_VECT_FACTOR (loop_vinfo)); + + ddr_p ddr; + unsigned int i; + tree part_cond_expr; + + /* Create expression + ((store_ptr_0 + store_segment_length_0) < load_ptr_0) + || (load_ptr_0 + load_segment_length_0) < store_ptr_0)) + && + ... + && + ((store_ptr_n + store_segment_length_n) < load_ptr_n) + || (load_ptr_n + load_segment_length_n) < store_ptr_n)) */ + + if (VEC_empty (ddr_p, may_alias_ddrs)) + return; + + for (i = 0; VEC_iterate (ddr_p, may_alias_ddrs, i, ddr); i++) + { + struct data_reference *dr_a, *dr_b; + tree dr_group_first_a, dr_group_first_b; + tree addr_base_a, addr_base_b; + tree segment_length_a, segment_length_b; + tree stmt_a, stmt_b; + + dr_a = DDR_A (ddr); + stmt_a = DR_STMT (DDR_A (ddr)); + dr_group_first_a = DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_a)); + if (dr_group_first_a) + { + stmt_a = dr_group_first_a; + dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a)); + } + + dr_b = DDR_B (ddr); + stmt_b = DR_STMT (DDR_B (ddr)); + dr_group_first_b = DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_b)); + if (dr_group_first_b) + { + stmt_b = dr_group_first_b; + dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b)); + } + + addr_base_a = + vect_create_addr_base_for_vector_ref (stmt_a, cond_expr_stmt_list, + NULL_TREE, loop); + addr_base_b = + vect_create_addr_base_for_vector_ref (stmt_b, cond_expr_stmt_list, + NULL_TREE, loop); + + segment_length_a = vect_vfa_segment_size (dr_a, vect_factor); + segment_length_b = vect_vfa_segment_size (dr_b, vect_factor); + + if (vect_print_dump_info (REPORT_DR_DETAILS)) + { + fprintf (vect_dump, + "create runtime check for data references "); + print_generic_expr (vect_dump, DR_REF (dr_a), TDF_SLIM); + fprintf (vect_dump, " and "); + print_generic_expr (vect_dump, DR_REF (dr_b), TDF_SLIM); + } + + + part_cond_expr = + fold_build2 (TRUTH_OR_EXPR, boolean_type_node, + fold_build2 (LT_EXPR, boolean_type_node, + fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (addr_base_a), + addr_base_a, + segment_length_a), + addr_base_b), + fold_build2 (LT_EXPR, boolean_type_node, + fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (addr_base_b), + addr_base_b, + segment_length_b), + addr_base_a)); + + if (*cond_expr) + *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, + *cond_expr, part_cond_expr); + else + *cond_expr = part_cond_expr; + } + if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS)) + fprintf (vect_dump, "created %u versioning for alias checks.\n", + VEC_length (ddr_p, may_alias_ddrs)); + +} + +/* Function vect_loop_versioning. + + If the loop has data references that may or may not be aligned or/and + has data reference relations whose independence was not proven then + two versions of the loop need to be generated, one which is vectorized + and one which isn't. A test is then generated to control which of the + loops is executed. The test checks for the alignment of all of the + data references that may or may not be aligned. An additional + sequence of runtime tests is generated for each pairs of DDRs whose + independence was not proven. The vectorized version of loop is + executed only if both alias and alignment tests are passed. */ + +static void +vect_loop_versioning (loop_vec_info loop_vinfo) +{ + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + struct loop *nloop; + tree cond_expr = NULL_TREE; + tree cond_expr_stmt_list = NULL_TREE; + basic_block condition_bb; + block_stmt_iterator cond_exp_bsi; + basic_block merge_bb; + basic_block new_exit_bb; + edge new_exit_e, e; + tree orig_phi, new_phi, arg; + unsigned prob = 4 * REG_BR_PROB_BASE / 5; + tree gimplify_stmt_list; + + if (!VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) + && !VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) + return; + + if (VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))) + cond_expr = + vect_create_cond_for_align_checks (loop_vinfo, &cond_expr_stmt_list); + + if (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) + vect_create_cond_for_alias_checks (loop_vinfo, &cond_expr, &cond_expr_stmt_list); + + cond_expr = + fold_build2 (NE_EXPR, boolean_type_node, cond_expr, integer_zero_node); + cond_expr = + force_gimple_operand (cond_expr, &gimplify_stmt_list, true, + NULL_TREE); + append_to_statement_list (gimplify_stmt_list, &cond_expr_stmt_list); + + initialize_original_copy_tables (); + nloop = loop_version (loop, cond_expr, &condition_bb, + prob, prob, REG_BR_PROB_BASE - prob, true); + free_original_copy_tables(); + + /* Loop versioning violates an assumption we try to maintain during + vectorization - that the loop exit block has a single predecessor. + After versioning, the exit block of both loop versions is the same + basic block (i.e. it has two predecessors). Just in order to simplify + following transformations in the vectorizer, we fix this situation + here by adding a new (empty) block on the exit-edge of the loop, + with the proper loop-exit phis to maintain loop-closed-form. */ + + merge_bb = single_exit (loop)->dest; + gcc_assert (EDGE_COUNT (merge_bb->preds) == 2); + new_exit_bb = split_edge (single_exit (loop)); + new_exit_e = single_exit (loop); + e = EDGE_SUCC (new_exit_bb, 0); + + for (orig_phi = phi_nodes (merge_bb); orig_phi; + orig_phi = PHI_CHAIN (orig_phi)) + { + new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)), + new_exit_bb); + arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e); + add_phi_arg (new_phi, arg, new_exit_e); + SET_PHI_ARG_DEF (orig_phi, e->dest_idx, PHI_RESULT (new_phi)); + } + + /* End loop-exit-fixes after versioning. */ + + update_ssa (TODO_update_ssa); + if (cond_expr_stmt_list) + { + cond_exp_bsi = bsi_last (condition_bb); + bsi_insert_before (&cond_exp_bsi, cond_expr_stmt_list, BSI_SAME_STMT); + } +} + +/* Remove a group of stores (for SLP or interleaving), free their + stmt_vec_info. */ + +static void +vect_remove_stores (tree first_stmt) +{ + stmt_ann_t ann; + tree next = first_stmt; + tree tmp; + stmt_vec_info next_stmt_info; + block_stmt_iterator next_si; + + while (next) + { + /* Free the attached stmt_vec_info and remove the stmt. */ + next_si = bsi_for_stmt (next); + bsi_remove (&next_si, true); + next_stmt_info = vinfo_for_stmt (next); + ann = stmt_ann (next); + tmp = DR_GROUP_NEXT_DR (next_stmt_info); + free (next_stmt_info); + set_stmt_info (ann, NULL); + next = tmp; + } +} + + +/* Vectorize SLP instance tree in postorder. */ + +static bool +vect_schedule_slp_instance (slp_tree node, unsigned int vec_stmts_size) +{ + tree stmt; + bool strided_store, is_store; + block_stmt_iterator si; + stmt_vec_info stmt_info; + + if (!node) + return false; + + vect_schedule_slp_instance (SLP_TREE_LEFT (node), vec_stmts_size); + vect_schedule_slp_instance (SLP_TREE_RIGHT (node), vec_stmts_size); + + stmt = VEC_index(tree, SLP_TREE_SCALAR_STMTS (node), 0); + stmt_info = vinfo_for_stmt (stmt); + SLP_TREE_VEC_STMTS (node) = VEC_alloc (tree, heap, vec_stmts_size); + SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size; + + if (vect_print_dump_info (REPORT_DETAILS)) + { + fprintf (vect_dump, "------>vectorizing SLP node starting from: "); + print_generic_expr (vect_dump, stmt, TDF_SLIM); + } + + si = bsi_for_stmt (stmt); + is_store = vect_transform_stmt (stmt, &si, &strided_store, node); + if (is_store) + { + if (DR_GROUP_FIRST_DR (stmt_info)) + /* If IS_STORE is TRUE, the vectorization of the + interleaving chain was completed - free all the stores in + the chain. */ + vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info)); + else + /* FORNOW: SLP originates only from strided stores. */ + gcc_unreachable (); + + return true; + } + + /* FORNOW: SLP originates only from strided stores. */ + return false; +} + + +static bool +vect_schedule_slp (loop_vec_info loop_vinfo, unsigned int nunits) +{ + VEC (slp_instance, heap) *slp_instances = + LOOP_VINFO_SLP_INSTANCES (loop_vinfo); + slp_instance instance; + unsigned int vec_stmts_size; + unsigned int group_size, i; + unsigned int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); + bool is_store = false; + + for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++) + { + group_size = SLP_INSTANCE_GROUP_SIZE (instance); + /* For each SLP instance calculate number of vector stmts to be created + for the scalar stmts in each node of the SLP tree. Number of vector + elements in one vector iteration is the number of scalar elements in + one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector + size. */ + vec_stmts_size = vectorization_factor * group_size / nunits; + + /* Schedule the tree of INSTANCE. */ + is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance), + vec_stmts_size); + + if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS) + || vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) + fprintf (vect_dump, "vectorizing stmts using SLP."); + } + + return is_store; +} /* Function vect_transform_loop. @@ -5117,66 +7231,12 @@ vect_transform_loop (loop_vec_info loop_vinfo) tree ratio = NULL; int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); bool strided_store; + bool slp_scheduled = false; + unsigned int nunits; if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "=== vec_transform_loop ==="); - - /* If the loop has data references that may or may not be aligned then - two versions of the loop need to be generated, one which is vectorized - and one which isn't. A test is then generated to control which of the - loops is executed. The test checks for the alignment of all of the - data references that may or may not be aligned. */ - - if (VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))) - { - struct loop *nloop; - tree cond_expr; - tree cond_expr_stmt_list = NULL_TREE; - basic_block condition_bb; - block_stmt_iterator cond_exp_bsi; - basic_block merge_bb; - basic_block new_exit_bb; - edge new_exit_e, e; - tree orig_phi, new_phi, arg; - unsigned prob = 4 * REG_BR_PROB_BASE / 5; - - cond_expr = vect_create_cond_for_align_checks (loop_vinfo, - &cond_expr_stmt_list); - initialize_original_copy_tables (); - nloop = loop_version (loop, cond_expr, &condition_bb, - prob, prob, REG_BR_PROB_BASE - prob, true); - free_original_copy_tables(); - - /** Loop versioning violates an assumption we try to maintain during - vectorization - that the loop exit block has a single predecessor. - After versioning, the exit block of both loop versions is the same - basic block (i.e. it has two predecessors). Just in order to simplify - following transformations in the vectorizer, we fix this situation - here by adding a new (empty) block on the exit-edge of the loop, - with the proper loop-exit phis to maintain loop-closed-form. **/ - - merge_bb = single_exit (loop)->dest; - gcc_assert (EDGE_COUNT (merge_bb->preds) == 2); - new_exit_bb = split_edge (single_exit (loop)); - new_exit_e = single_exit (loop); - e = EDGE_SUCC (new_exit_bb, 0); - - for (orig_phi = phi_nodes (merge_bb); orig_phi; - orig_phi = PHI_CHAIN (orig_phi)) - { - new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)), - new_exit_bb); - arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e); - add_phi_arg (new_phi, arg, new_exit_e); - SET_PHI_ARG_DEF (orig_phi, e->dest_idx, PHI_RESULT (new_phi)); - } - - /** end loop-exit-fixes after versioning **/ - - update_ssa (TODO_update_ssa); - cond_exp_bsi = bsi_last (condition_bb); - bsi_insert_before (&cond_exp_bsi, cond_expr_stmt_list, BSI_SAME_STMT); - } + vect_loop_versioning (loop_vinfo); /* CHECKME: we wouldn't need this if we called update_ssa once for all loops. */ @@ -5232,6 +7292,7 @@ vect_transform_loop (loop_vec_info loop_vinfo) stmt_info = vinfo_for_stmt (phi); if (!stmt_info) continue; + if (!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) continue; @@ -5245,7 +7306,7 @@ vect_transform_loop (loop_vec_info loop_vinfo) { if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "transform phi."); - vect_transform_stmt (phi, NULL, NULL); + vect_transform_stmt (phi, NULL, NULL, NULL); } } @@ -5259,8 +7320,18 @@ vect_transform_loop (loop_vec_info loop_vinfo) fprintf (vect_dump, "------>vectorizing statement: "); print_generic_expr (vect_dump, stmt, TDF_SLIM); } + stmt_info = vinfo_for_stmt (stmt); - gcc_assert (stmt_info); + + /* vector stmts created in the outer-loop during vectorization of + stmts in an inner-loop may not have a stmt_info, and do not + need to be vectorized. */ + if (!stmt_info) + { + bsi_next (&si); + continue; + } + if (!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) { @@ -5269,21 +7340,56 @@ vect_transform_loop (loop_vec_info loop_vinfo) } gcc_assert (STMT_VINFO_VECTYPE (stmt_info)); - if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)) - != (unsigned HOST_WIDE_INT) vectorization_factor) - && vect_print_dump_info (REPORT_DETAILS)) - fprintf (vect_dump, "multiple-types."); + nunits = + (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)); + if (!STMT_SLP_TYPE (stmt_info) + && nunits != (unsigned int) vectorization_factor + && vect_print_dump_info (REPORT_DETAILS)) + /* For SLP VF is set according to unrolling factor, and not to + vector size, hence for SLP this print is not valid. */ + fprintf (vect_dump, "multiple-types."); + + /* SLP. Schedule all the SLP instances when the first SLP stmt is + reached. */ + if (STMT_SLP_TYPE (stmt_info)) + { + if (!slp_scheduled) + { + slp_scheduled = true; + + if (vect_print_dump_info (REPORT_DETAILS)) + fprintf (vect_dump, "=== scheduling SLP instances ==="); + + is_store = vect_schedule_slp (loop_vinfo, nunits); + /* IS_STORE is true if STMT is a store. Stores cannot be of + hybrid SLP type. They are removed in + vect_schedule_slp_instance and their vinfo is destroyed. */ + if (is_store) + { + bsi_next (&si); + continue; + } + } + + /* Hybrid SLP stmts must be vectorized in addition to SLP. */ + if (PURE_SLP_STMT (stmt_info)) + { + bsi_next (&si); + continue; + } + } + /* -------- vectorize statement ------------ */ if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "transform statement."); strided_store = false; - is_store = vect_transform_stmt (stmt, &si, &strided_store); + is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL); if (is_store) { stmt_ann_t ann; - if (DR_GROUP_FIRST_DR (stmt_info)) + if (STMT_VINFO_STRIDED_ACCESS (stmt_info)) { /* Interleaving. If IS_STORE is TRUE, the vectorization of the interleaving chain was completed - free all the stores in @@ -5332,4 +7438,6 @@ vect_transform_loop (loop_vec_info loop_vinfo) if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS)) fprintf (vect_dump, "LOOP VECTORIZED."); + if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOOPS)) + fprintf (vect_dump, "OUTER LOOP VECTORIZED."); }