1 /* Transformation Utilities for Loop Vectorization.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "basic-block.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
39 #include "tree-data-ref.h"
40 #include "tree-chrec.h"
41 #include "tree-scalar-evolution.h"
42 #include "tree-vectorizer.h"
43 #include "langhooks.h"
44 #include "tree-pass.h"
48 /* Utility functions for the code transformation. */
49 static bool vect_transform_stmt (gimple, gimple_stmt_iterator *, bool *,
50 slp_tree, slp_instance);
51 static tree vect_create_destination_var (tree, tree);
52 static tree vect_create_data_ref_ptr
53 (gimple, struct loop*, tree, tree *, gimple *, bool, bool *, tree);
54 static tree vect_create_addr_base_for_vector_ref
55 (gimple, gimple_seq *, tree, struct loop *);
56 static tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
57 static tree vect_get_vec_def_for_operand (tree, gimple, tree *);
58 static tree vect_init_vector (gimple, tree, tree, gimple_stmt_iterator *);
59 static void vect_finish_stmt_generation
60 (gimple stmt, gimple vec_stmt, gimple_stmt_iterator *);
61 static bool vect_is_simple_cond (tree, loop_vec_info);
62 static void vect_create_epilog_for_reduction
63 (tree, gimple, int, enum tree_code, gimple);
64 static tree get_initial_def_for_reduction (gimple, tree, tree *);
66 /* Utility function dealing with loop peeling (not peeling itself). */
67 static void vect_generate_tmps_on_preheader
68 (loop_vec_info, tree *, tree *, tree *);
69 static tree vect_build_loop_niters (loop_vec_info);
70 static void vect_update_ivs_after_vectorizer (loop_vec_info, tree, edge);
71 static tree vect_gen_niters_for_prolog_loop (loop_vec_info, tree);
72 static void vect_update_init_of_dr (struct data_reference *, tree niters);
73 static void vect_update_inits_of_drs (loop_vec_info, tree);
74 static int vect_min_worthwhile_factor (enum tree_code);
78 cost_for_stmt (gimple stmt)
80 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
82 switch (STMT_VINFO_TYPE (stmt_info))
84 case load_vec_info_type:
85 return TARG_SCALAR_LOAD_COST;
86 case store_vec_info_type:
87 return TARG_SCALAR_STORE_COST;
88 case op_vec_info_type:
89 case condition_vec_info_type:
90 case assignment_vec_info_type:
91 case reduc_vec_info_type:
92 case induc_vec_info_type:
93 case type_promotion_vec_info_type:
94 case type_demotion_vec_info_type:
95 case type_conversion_vec_info_type:
96 case call_vec_info_type:
97 return TARG_SCALAR_STMT_COST;
98 case undef_vec_info_type:
105 /* Function vect_estimate_min_profitable_iters
107 Return the number of iterations required for the vector version of the
108 loop to be profitable relative to the cost of the scalar version of the
111 TODO: Take profile info into account before making vectorization
112 decisions, if available. */
115 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
118 int min_profitable_iters;
119 int peel_iters_prologue;
120 int peel_iters_epilogue;
121 int vec_inside_cost = 0;
122 int vec_outside_cost = 0;
123 int scalar_single_iter_cost = 0;
124 int scalar_outside_cost = 0;
125 bool runtime_test = false;
126 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
127 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
128 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
129 int nbbs = loop->num_nodes;
130 int byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
131 int peel_guard_costs = 0;
132 int innerloop_iters = 0, factor;
133 VEC (slp_instance, heap) *slp_instances;
134 slp_instance instance;
136 /* Cost model disabled. */
137 if (!flag_vect_cost_model)
139 if (vect_print_dump_info (REPORT_COST))
140 fprintf (vect_dump, "cost model disabled.");
144 /* If the number of iterations is unknown, or the
145 peeling-for-misalignment amount is unknown, we will have to generate
146 a runtime test to test the loop count against the threshold. */
147 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
148 || (byte_misalign < 0))
151 /* Requires loop versioning tests to handle misalignment. */
153 if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)))
155 /* FIXME: Make cost depend on complexity of individual check. */
157 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
158 if (vect_print_dump_info (REPORT_COST))
159 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
160 "versioning to treat misalignment.\n");
163 if (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
165 /* FIXME: Make cost depend on complexity of individual check. */
167 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
168 if (vect_print_dump_info (REPORT_COST))
169 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
170 "versioning aliasing.\n");
173 if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
174 || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
176 vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST;
179 /* Count statements in scalar loop. Using this as scalar cost for a single
182 TODO: Add outer loop support.
184 TODO: Consider assigning different costs to different scalar
189 innerloop_iters = 50; /* FIXME */
191 for (i = 0; i < nbbs; i++)
193 gimple_stmt_iterator si;
194 basic_block bb = bbs[i];
196 if (bb->loop_father == loop->inner)
197 factor = innerloop_iters;
201 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
203 gimple stmt = gsi_stmt (si);
204 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
205 /* Skip stmts that are not vectorized inside the loop. */
206 if (!STMT_VINFO_RELEVANT_P (stmt_info)
207 && (!STMT_VINFO_LIVE_P (stmt_info)
208 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
210 scalar_single_iter_cost += cost_for_stmt (stmt) * factor;
211 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
212 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
213 some of the "outside" costs are generated inside the outer-loop. */
214 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
218 /* Add additional cost for the peeled instructions in prologue and epilogue
221 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
222 at compile-time - we assume it's vf/2 (the worst would be vf-1).
224 TODO: Build an expression that represents peel_iters for prologue and
225 epilogue to be used in a run-time test. */
227 if (byte_misalign < 0)
229 peel_iters_prologue = vf/2;
230 if (vect_print_dump_info (REPORT_COST))
231 fprintf (vect_dump, "cost model: "
232 "prologue peel iters set to vf/2.");
234 /* If peeling for alignment is unknown, loop bound of main loop becomes
236 peel_iters_epilogue = vf/2;
237 if (vect_print_dump_info (REPORT_COST))
238 fprintf (vect_dump, "cost model: "
239 "epilogue peel iters set to vf/2 because "
240 "peeling for alignment is unknown .");
242 /* If peeled iterations are unknown, count a taken branch and a not taken
243 branch per peeled loop. Even if scalar loop iterations are known,
244 vector iterations are not known since peeled prologue iterations are
245 not known. Hence guards remain the same. */
246 peel_guard_costs += 2 * (TARG_COND_TAKEN_BRANCH_COST
247 + TARG_COND_NOT_TAKEN_BRANCH_COST);
254 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
255 int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
256 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
257 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
259 peel_iters_prologue = nelements - (byte_misalign / element_size);
262 peel_iters_prologue = 0;
264 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
266 peel_iters_epilogue = vf/2;
267 if (vect_print_dump_info (REPORT_COST))
268 fprintf (vect_dump, "cost model: "
269 "epilogue peel iters set to vf/2 because "
270 "loop iterations are unknown .");
272 /* If peeled iterations are known but number of scalar loop
273 iterations are unknown, count a taken branch per peeled loop. */
274 peel_guard_costs += 2 * TARG_COND_TAKEN_BRANCH_COST;
279 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
280 peel_iters_prologue = niters < peel_iters_prologue ?
281 niters : peel_iters_prologue;
282 peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
286 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
287 + (peel_iters_epilogue * scalar_single_iter_cost)
290 /* FORNOW: The scalar outside cost is incremented in one of the
293 1. The vectorizer checks for alignment and aliasing and generates
294 a condition that allows dynamic vectorization. A cost model
295 check is ANDED with the versioning condition. Hence scalar code
296 path now has the added cost of the versioning check.
298 if (cost > th & versioning_check)
301 Hence run-time scalar is incremented by not-taken branch cost.
303 2. The vectorizer then checks if a prologue is required. If the
304 cost model check was not done before during versioning, it has to
305 be done before the prologue check.
308 prologue = scalar_iters
313 if (prologue == num_iters)
316 Hence the run-time scalar cost is incremented by a taken branch,
317 plus a not-taken branch, plus a taken branch cost.
319 3. The vectorizer then checks if an epilogue is required. If the
320 cost model check was not done before during prologue check, it
321 has to be done with the epilogue check.
327 if (prologue == num_iters)
330 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
333 Hence the run-time scalar cost should be incremented by 2 taken
336 TODO: The back end may reorder the BBS's differently and reverse
337 conditions/branch directions. Change the estimates below to
338 something more reasonable. */
342 /* Cost model check occurs at versioning. */
343 if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
344 || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
345 scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST;
348 /* Cost model occurs at prologue generation. */
349 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
350 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST
351 + TARG_COND_NOT_TAKEN_BRANCH_COST;
352 /* Cost model check occurs at epilogue generation. */
354 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST;
359 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
360 for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
362 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
363 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
366 /* Calculate number of iterations required to make the vector version
367 profitable, relative to the loop bodies only. The following condition
369 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
371 SIC = scalar iteration cost, VIC = vector iteration cost,
372 VOC = vector outside cost, VF = vectorization factor,
373 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
374 SOC = scalar outside cost for run time cost model check. */
376 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
378 if (vec_outside_cost <= 0)
379 min_profitable_iters = 1;
382 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
383 - vec_inside_cost * peel_iters_prologue
384 - vec_inside_cost * peel_iters_epilogue)
385 / ((scalar_single_iter_cost * vf)
388 if ((scalar_single_iter_cost * vf * min_profitable_iters)
389 <= ((vec_inside_cost * min_profitable_iters)
390 + ((vec_outside_cost - scalar_outside_cost) * vf)))
391 min_profitable_iters++;
394 /* vector version will never be profitable. */
397 if (vect_print_dump_info (REPORT_COST))
398 fprintf (vect_dump, "cost model: vector iteration cost = %d "
399 "is divisible by scalar iteration cost = %d by a factor "
400 "greater than or equal to the vectorization factor = %d .",
401 vec_inside_cost, scalar_single_iter_cost, vf);
405 if (vect_print_dump_info (REPORT_COST))
407 fprintf (vect_dump, "Cost model analysis: \n");
408 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
410 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
412 fprintf (vect_dump, " Scalar iteration cost: %d\n",
413 scalar_single_iter_cost);
414 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
415 fprintf (vect_dump, " prologue iterations: %d\n",
416 peel_iters_prologue);
417 fprintf (vect_dump, " epilogue iterations: %d\n",
418 peel_iters_epilogue);
419 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
420 min_profitable_iters);
423 min_profitable_iters =
424 min_profitable_iters < vf ? vf : min_profitable_iters;
426 /* Because the condition we create is:
427 if (niters <= min_profitable_iters)
428 then skip the vectorized loop. */
429 min_profitable_iters--;
431 if (vect_print_dump_info (REPORT_COST))
432 fprintf (vect_dump, " Profitability threshold = %d\n",
433 min_profitable_iters);
435 return min_profitable_iters;
439 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
440 functions. Design better to avoid maintenance issues. */
442 /* Function vect_model_reduction_cost.
444 Models cost for a reduction operation, including the vector ops
445 generated within the strip-mine loop, the initial definition before
446 the loop, and the epilogue code that must be generated. */
449 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
456 gimple stmt, orig_stmt;
458 enum machine_mode mode;
459 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
460 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
463 /* Cost of reduction op inside loop. */
464 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) += ncopies * TARG_VEC_STMT_COST;
466 stmt = STMT_VINFO_STMT (stmt_info);
468 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
470 case GIMPLE_SINGLE_RHS:
471 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
472 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
474 case GIMPLE_UNARY_RHS:
475 reduction_op = gimple_assign_rhs1 (stmt);
477 case GIMPLE_BINARY_RHS:
478 reduction_op = gimple_assign_rhs2 (stmt);
484 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
487 if (vect_print_dump_info (REPORT_COST))
489 fprintf (vect_dump, "unsupported data-type ");
490 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
495 mode = TYPE_MODE (vectype);
496 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
499 orig_stmt = STMT_VINFO_STMT (stmt_info);
501 code = gimple_assign_rhs_code (orig_stmt);
503 /* Add in cost for initial definition. */
504 outer_cost += TARG_SCALAR_TO_VEC_COST;
506 /* Determine cost of epilogue code.
508 We have a reduction operator that will reduce the vector in one statement.
509 Also requires scalar extract. */
511 if (!nested_in_vect_loop_p (loop, orig_stmt))
513 if (reduc_code < NUM_TREE_CODES)
514 outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
517 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
519 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
520 int element_bitsize = tree_low_cst (bitsize, 1);
521 int nelements = vec_size_in_bits / element_bitsize;
523 optab = optab_for_tree_code (code, vectype, optab_default);
525 /* We have a whole vector shift available. */
526 if (VECTOR_MODE_P (mode)
527 && optab_handler (optab, mode)->insn_code != CODE_FOR_nothing
528 && optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
529 /* Final reduction via vector shifts and the reduction operator. Also
530 requires scalar extract. */
531 outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST
532 + TARG_VEC_TO_SCALAR_COST);
534 /* Use extracts and reduction op for final reduction. For N elements,
535 we have N extracts and N-1 reduction ops. */
536 outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST);
540 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
542 if (vect_print_dump_info (REPORT_COST))
543 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
544 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
545 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
551 /* Function vect_model_induction_cost.
553 Models cost for induction operations. */
556 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
558 /* loop cost for vec_loop. */
559 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
560 /* prologue cost for vec_init and vec_step. */
561 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_SCALAR_TO_VEC_COST;
563 if (vect_print_dump_info (REPORT_COST))
564 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
565 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
566 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
570 /* Function vect_model_simple_cost.
572 Models cost for simple operations, i.e. those that only emit ncopies of a
573 single op. Right now, this does not account for multiple insns that could
574 be generated for the single vector op. We will handle that shortly. */
577 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
578 enum vect_def_type *dt, slp_tree slp_node)
581 int inside_cost = 0, outside_cost = 0;
583 /* The SLP costs were already calculated during SLP tree build. */
584 if (PURE_SLP_STMT (stmt_info))
587 inside_cost = ncopies * TARG_VEC_STMT_COST;
589 /* FORNOW: Assuming maximum 2 args per stmts. */
590 for (i = 0; i < 2; i++)
592 if (dt[i] == vect_constant_def || dt[i] == vect_invariant_def)
593 outside_cost += TARG_SCALAR_TO_VEC_COST;
596 if (vect_print_dump_info (REPORT_COST))
597 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
598 "outside_cost = %d .", inside_cost, outside_cost);
600 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
601 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
602 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
606 /* Function vect_cost_strided_group_size
608 For strided load or store, return the group_size only if it is the first
609 load or store of a group, else return 1. This ensures that group size is
610 only returned once per group. */
613 vect_cost_strided_group_size (stmt_vec_info stmt_info)
615 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
617 if (first_stmt == STMT_VINFO_STMT (stmt_info))
618 return DR_GROUP_SIZE (stmt_info);
624 /* Function vect_model_store_cost
626 Models cost for stores. In the case of strided accesses, one access
627 has the overhead of the strided access attributed to it. */
630 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
631 enum vect_def_type dt, slp_tree slp_node)
634 int inside_cost = 0, outside_cost = 0;
636 /* The SLP costs were already calculated during SLP tree build. */
637 if (PURE_SLP_STMT (stmt_info))
640 if (dt == vect_constant_def || dt == vect_invariant_def)
641 outside_cost = TARG_SCALAR_TO_VEC_COST;
643 /* Strided access? */
644 if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
645 group_size = vect_cost_strided_group_size (stmt_info);
646 /* Not a strided access. */
650 /* Is this an access in a group of stores, which provide strided access?
651 If so, add in the cost of the permutes. */
654 /* Uses a high and low interleave operation for each needed permute. */
655 inside_cost = ncopies * exact_log2(group_size) * group_size
656 * TARG_VEC_STMT_COST;
658 if (vect_print_dump_info (REPORT_COST))
659 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
664 /* Costs of the stores. */
665 inside_cost += ncopies * TARG_VEC_STORE_COST;
667 if (vect_print_dump_info (REPORT_COST))
668 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
669 "outside_cost = %d .", inside_cost, outside_cost);
671 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
672 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
673 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
677 /* Function vect_model_load_cost
679 Models cost for loads. In the case of strided accesses, the last access
680 has the overhead of the strided access attributed to it. Since unaligned
681 accesses are supported for loads, we also account for the costs of the
682 access scheme chosen. */
685 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
689 int alignment_support_cheme;
691 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
692 int inside_cost = 0, outside_cost = 0;
694 /* The SLP costs were already calculated during SLP tree build. */
695 if (PURE_SLP_STMT (stmt_info))
698 /* Strided accesses? */
699 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
700 if (first_stmt && !slp_node)
702 group_size = vect_cost_strided_group_size (stmt_info);
703 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
705 /* Not a strided access. */
712 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
714 /* Is this an access in a group of loads providing strided access?
715 If so, add in the cost of the permutes. */
718 /* Uses an even and odd extract operations for each needed permute. */
719 inside_cost = ncopies * exact_log2(group_size) * group_size
720 * TARG_VEC_STMT_COST;
722 if (vect_print_dump_info (REPORT_COST))
723 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
728 /* The loads themselves. */
729 switch (alignment_support_cheme)
733 inside_cost += ncopies * TARG_VEC_LOAD_COST;
735 if (vect_print_dump_info (REPORT_COST))
736 fprintf (vect_dump, "vect_model_load_cost: aligned.");
740 case dr_unaligned_supported:
742 /* Here, we assign an additional cost for the unaligned load. */
743 inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
745 if (vect_print_dump_info (REPORT_COST))
746 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
751 case dr_explicit_realign:
753 inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
755 /* FIXME: If the misalignment remains fixed across the iterations of
756 the containing loop, the following cost should be added to the
758 if (targetm.vectorize.builtin_mask_for_load)
759 inside_cost += TARG_VEC_STMT_COST;
763 case dr_explicit_realign_optimized:
765 if (vect_print_dump_info (REPORT_COST))
766 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
769 /* Unaligned software pipeline has a load of an address, an initial
770 load, and possibly a mask operation to "prime" the loop. However,
771 if this is an access in a group of loads, which provide strided
772 access, then the above cost should only be considered for one
773 access in the group. Inside the loop, there is a load op
774 and a realignment op. */
776 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
778 outside_cost = 2*TARG_VEC_STMT_COST;
779 if (targetm.vectorize.builtin_mask_for_load)
780 outside_cost += TARG_VEC_STMT_COST;
783 inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
792 if (vect_print_dump_info (REPORT_COST))
793 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
794 "outside_cost = %d .", inside_cost, outside_cost);
796 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
797 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
798 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
802 /* Function vect_get_new_vect_var.
804 Returns a name for a new variable. The current naming scheme appends the
805 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
806 the name of vectorizer generated variables, and appends that to NAME if
810 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
817 case vect_simple_var:
820 case vect_scalar_var:
823 case vect_pointer_var:
832 char* tmp = concat (prefix, name, NULL);
833 new_vect_var = create_tmp_var (type, tmp);
837 new_vect_var = create_tmp_var (type, prefix);
839 /* Mark vector typed variable as a gimple register variable. */
840 if (TREE_CODE (type) == VECTOR_TYPE)
841 DECL_GIMPLE_REG_P (new_vect_var) = true;
847 /* Function vect_create_addr_base_for_vector_ref.
849 Create an expression that computes the address of the first memory location
850 that will be accessed for a data reference.
853 STMT: The statement containing the data reference.
854 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
855 OFFSET: Optional. If supplied, it is be added to the initial address.
856 LOOP: Specify relative to which loop-nest should the address be computed.
857 For example, when the dataref is in an inner-loop nested in an
858 outer-loop that is now being vectorized, LOOP can be either the
859 outer-loop, or the inner-loop. The first memory location accessed
860 by the following dataref ('in' points to short):
867 if LOOP=i_loop: &in (relative to i_loop)
868 if LOOP=j_loop: &in+i*2B (relative to j_loop)
871 1. Return an SSA_NAME whose value is the address of the memory location of
872 the first vector of the data reference.
873 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
874 these statement(s) which define the returned SSA_NAME.
876 FORNOW: We are only handling array accesses with step 1. */
879 vect_create_addr_base_for_vector_ref (gimple stmt,
880 gimple_seq *new_stmt_list,
884 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
885 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
886 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
887 tree data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
889 tree data_ref_base_var;
891 tree addr_base, addr_expr;
893 gimple_seq seq = NULL;
894 tree base_offset = unshare_expr (DR_OFFSET (dr));
895 tree init = unshare_expr (DR_INIT (dr));
896 tree vect_ptr_type, addr_expr2;
897 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
900 if (loop != containing_loop)
902 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
903 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
905 gcc_assert (nested_in_vect_loop_p (loop, stmt));
907 data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
908 base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
909 init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
912 /* Create data_ref_base */
913 base_name = build_fold_indirect_ref (data_ref_base);
914 data_ref_base_var = create_tmp_var (TREE_TYPE (data_ref_base), "batmp");
915 add_referenced_var (data_ref_base_var);
916 data_ref_base = force_gimple_operand (data_ref_base, &seq, true,
918 gimple_seq_add_seq (new_stmt_list, seq);
920 /* Create base_offset */
921 base_offset = size_binop (PLUS_EXPR,
922 fold_convert (sizetype, base_offset),
923 fold_convert (sizetype, init));
924 dest = create_tmp_var (sizetype, "base_off");
925 add_referenced_var (dest);
926 base_offset = force_gimple_operand (base_offset, &seq, true, dest);
927 gimple_seq_add_seq (new_stmt_list, seq);
931 tree tmp = create_tmp_var (sizetype, "offset");
933 add_referenced_var (tmp);
934 offset = fold_build2 (MULT_EXPR, sizetype,
935 fold_convert (sizetype, offset), step);
936 base_offset = fold_build2 (PLUS_EXPR, sizetype,
937 base_offset, offset);
938 base_offset = force_gimple_operand (base_offset, &seq, false, tmp);
939 gimple_seq_add_seq (new_stmt_list, seq);
942 /* base + base_offset */
943 addr_base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (data_ref_base),
944 data_ref_base, base_offset);
946 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
948 /* addr_expr = addr_base */
949 addr_expr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
950 get_name (base_name));
951 add_referenced_var (addr_expr);
952 vec_stmt = fold_convert (vect_ptr_type, addr_base);
953 addr_expr2 = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
954 get_name (base_name));
955 add_referenced_var (addr_expr2);
956 vec_stmt = force_gimple_operand (vec_stmt, &seq, false, addr_expr2);
957 gimple_seq_add_seq (new_stmt_list, seq);
959 if (vect_print_dump_info (REPORT_DETAILS))
961 fprintf (vect_dump, "created ");
962 print_generic_expr (vect_dump, vec_stmt, TDF_SLIM);
968 /* Function vect_create_data_ref_ptr.
970 Create a new pointer to vector type (vp), that points to the first location
971 accessed in the loop by STMT, along with the def-use update chain to
972 appropriately advance the pointer through the loop iterations. Also set
973 aliasing information for the pointer. This vector pointer is used by the
974 callers to this function to create a memory reference expression for vector
978 1. STMT: a stmt that references memory. Expected to be of the form
979 GIMPLE_ASSIGN <name, data-ref> or
980 GIMPLE_ASSIGN <data-ref, name>.
981 2. AT_LOOP: the loop where the vector memref is to be created.
982 3. OFFSET (optional): an offset to be added to the initial address accessed
983 by the data-ref in STMT.
984 4. ONLY_INIT: indicate if vp is to be updated in the loop, or remain
985 pointing to the initial address.
986 5. TYPE: if not NULL indicates the required type of the data-ref.
989 1. Declare a new ptr to vector_type, and have it point to the base of the
990 data reference (initial addressed accessed by the data reference).
991 For example, for vector of type V8HI, the following code is generated:
994 vp = (v8hi *)initial_address;
996 if OFFSET is not supplied:
997 initial_address = &a[init];
998 if OFFSET is supplied:
999 initial_address = &a[init + OFFSET];
1001 Return the initial_address in INITIAL_ADDRESS.
1003 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
1004 update the pointer in each iteration of the loop.
1006 Return the increment stmt that updates the pointer in PTR_INCR.
1008 3. Set INV_P to true if the access pattern of the data reference in the
1009 vectorized loop is invariant. Set it to false otherwise.
1011 4. Return the pointer. */
1014 vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
1015 tree offset, tree *initial_address, gimple *ptr_incr,
1016 bool only_init, bool *inv_p, tree type)
1019 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1020 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1021 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1022 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1023 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
1024 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1030 gimple_seq new_stmt_list = NULL;
1034 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1036 gimple_stmt_iterator incr_gsi;
1038 tree indx_before_incr, indx_after_incr;
1042 /* Check the step (evolution) of the load in LOOP, and record
1043 whether it's invariant. */
1044 if (nested_in_vect_loop)
1045 step = STMT_VINFO_DR_STEP (stmt_info);
1047 step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
1049 if (tree_int_cst_compare (step, size_zero_node) == 0)
1054 /* Create an expression for the first address accessed by this load
1056 base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr)));
1058 if (vect_print_dump_info (REPORT_DETAILS))
1060 tree data_ref_base = base_name;
1061 fprintf (vect_dump, "create vector-pointer variable to type: ");
1062 print_generic_expr (vect_dump, vectype, TDF_SLIM);
1063 if (TREE_CODE (data_ref_base) == VAR_DECL)
1064 fprintf (vect_dump, " vectorizing a one dimensional array ref: ");
1065 else if (TREE_CODE (data_ref_base) == ARRAY_REF)
1066 fprintf (vect_dump, " vectorizing a multidimensional array ref: ");
1067 else if (TREE_CODE (data_ref_base) == COMPONENT_REF)
1068 fprintf (vect_dump, " vectorizing a record based array ref: ");
1069 else if (TREE_CODE (data_ref_base) == SSA_NAME)
1070 fprintf (vect_dump, " vectorizing a pointer ref: ");
1071 print_generic_expr (vect_dump, base_name, TDF_SLIM);
1074 /** (1) Create the new vector-pointer variable: **/
1076 vect_ptr_type = build_pointer_type (type);
1078 vect_ptr_type = build_pointer_type (vectype);
1080 vect_ptr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
1081 get_name (base_name));
1082 add_referenced_var (vect_ptr);
1084 /** (2) Add aliasing information to the new vector-pointer:
1085 (The points-to info (DR_PTR_INFO) may be defined later.) **/
1087 tag = DR_SYMBOL_TAG (dr);
1090 /* If tag is a variable (and NOT_A_TAG) than a new symbol memory
1091 tag must be created with tag added to its may alias list. */
1093 new_type_alias (vect_ptr, tag, DR_REF (dr));
1095 set_symbol_mem_tag (vect_ptr, tag);
1097 /** Note: If the dataref is in an inner-loop nested in LOOP, and we are
1098 vectorizing LOOP (i.e. outer-loop vectorization), we need to create two
1099 def-use update cycles for the pointer: One relative to the outer-loop
1100 (LOOP), which is what steps (3) and (4) below do. The other is relative
1101 to the inner-loop (which is the inner-most loop containing the dataref),
1102 and this is done be step (5) below.
1104 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
1105 inner-most loop, and so steps (3),(4) work the same, and step (5) is
1106 redundant. Steps (3),(4) create the following:
1109 LOOP: vp1 = phi(vp0,vp2)
1115 If there is an inner-loop nested in loop, then step (5) will also be
1116 applied, and an additional update in the inner-loop will be created:
1119 LOOP: vp1 = phi(vp0,vp2)
1121 inner: vp3 = phi(vp1,vp4)
1122 vp4 = vp3 + inner_step
1128 /** (3) Calculate the initial address the vector-pointer, and set
1129 the vector-pointer to point to it before the loop: **/
1131 /* Create: (&(base[init_val+offset]) in the loop preheader. */
1133 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
1135 pe = loop_preheader_edge (loop);
1138 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
1139 gcc_assert (!new_bb);
1142 *initial_address = new_temp;
1144 /* Create: p = (vectype *) initial_base */
1145 vec_stmt = gimple_build_assign (vect_ptr,
1146 fold_convert (vect_ptr_type, new_temp));
1147 vect_ptr_init = make_ssa_name (vect_ptr, vec_stmt);
1148 gimple_assign_set_lhs (vec_stmt, vect_ptr_init);
1149 new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt);
1150 gcc_assert (!new_bb);
1153 /** (4) Handle the updating of the vector-pointer inside the loop.
1154 This is needed when ONLY_INIT is false, and also when AT_LOOP
1155 is the inner-loop nested in LOOP (during outer-loop vectorization).
1158 if (only_init && at_loop == loop) /* No update in loop is required. */
1160 /* Copy the points-to information if it exists. */
1161 if (DR_PTR_INFO (dr))
1162 duplicate_ssa_name_ptr_info (vect_ptr_init, DR_PTR_INFO (dr));
1163 vptr = vect_ptr_init;
1167 /* The step of the vector pointer is the Vector Size. */
1168 tree step = TYPE_SIZE_UNIT (vectype);
1169 /* One exception to the above is when the scalar step of the load in
1170 LOOP is zero. In this case the step here is also zero. */
1172 step = size_zero_node;
1174 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
1176 create_iv (vect_ptr_init,
1177 fold_convert (vect_ptr_type, step),
1178 NULL_TREE, loop, &incr_gsi, insert_after,
1179 &indx_before_incr, &indx_after_incr);
1180 incr = gsi_stmt (incr_gsi);
1181 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
1183 /* Copy the points-to information if it exists. */
1184 if (DR_PTR_INFO (dr))
1186 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
1187 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
1189 merge_alias_info (vect_ptr_init, indx_before_incr);
1190 merge_alias_info (vect_ptr_init, indx_after_incr);
1194 vptr = indx_before_incr;
1197 if (!nested_in_vect_loop || only_init)
1201 /** (5) Handle the updating of the vector-pointer inside the inner-loop
1202 nested in LOOP, if exists: **/
1204 gcc_assert (nested_in_vect_loop);
1207 standard_iv_increment_position (containing_loop, &incr_gsi,
1209 create_iv (vptr, fold_convert (vect_ptr_type, DR_STEP (dr)), NULL_TREE,
1210 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
1212 incr = gsi_stmt (incr_gsi);
1213 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
1215 /* Copy the points-to information if it exists. */
1216 if (DR_PTR_INFO (dr))
1218 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
1219 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
1221 merge_alias_info (vect_ptr_init, indx_before_incr);
1222 merge_alias_info (vect_ptr_init, indx_after_incr);
1226 return indx_before_incr;
1233 /* Function bump_vector_ptr
1235 Increment a pointer (to a vector type) by vector-size. If requested,
1236 i.e. if PTR-INCR is given, then also connect the new increment stmt
1237 to the existing def-use update-chain of the pointer, by modifying
1238 the PTR_INCR as illustrated below:
1240 The pointer def-use update-chain before this function:
1241 DATAREF_PTR = phi (p_0, p_2)
1243 PTR_INCR: p_2 = DATAREF_PTR + step
1245 The pointer def-use update-chain after this function:
1246 DATAREF_PTR = phi (p_0, p_2)
1248 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
1250 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
1253 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
1255 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
1256 the loop. The increment amount across iterations is expected
1258 BSI - location where the new update stmt is to be placed.
1259 STMT - the original scalar memory-access stmt that is being vectorized.
1260 BUMP - optional. The offset by which to bump the pointer. If not given,
1261 the offset is assumed to be vector_size.
1263 Output: Return NEW_DATAREF_PTR as illustrated above.
1268 bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
1269 gimple stmt, tree bump)
1271 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1272 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1273 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1274 tree ptr_var = SSA_NAME_VAR (dataref_ptr);
1275 tree update = TYPE_SIZE_UNIT (vectype);
1278 use_operand_p use_p;
1279 tree new_dataref_ptr;
1284 incr_stmt = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, ptr_var,
1285 dataref_ptr, update);
1286 new_dataref_ptr = make_ssa_name (ptr_var, incr_stmt);
1287 gimple_assign_set_lhs (incr_stmt, new_dataref_ptr);
1288 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
1290 /* Copy the points-to information if it exists. */
1291 if (DR_PTR_INFO (dr))
1292 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
1293 merge_alias_info (new_dataref_ptr, dataref_ptr);
1296 return new_dataref_ptr;
1298 /* Update the vector-pointer's cross-iteration increment. */
1299 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
1301 tree use = USE_FROM_PTR (use_p);
1303 if (use == dataref_ptr)
1304 SET_USE (use_p, new_dataref_ptr);
1306 gcc_assert (tree_int_cst_compare (use, update) == 0);
1309 return new_dataref_ptr;
1313 /* Function vect_create_destination_var.
1315 Create a new temporary of type VECTYPE. */
1318 vect_create_destination_var (tree scalar_dest, tree vectype)
1321 const char *new_name;
1323 enum vect_var_kind kind;
1325 kind = vectype ? vect_simple_var : vect_scalar_var;
1326 type = vectype ? vectype : TREE_TYPE (scalar_dest);
1328 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
1330 new_name = get_name (scalar_dest);
1333 vec_dest = vect_get_new_vect_var (type, kind, new_name);
1334 add_referenced_var (vec_dest);
1340 /* Function vect_init_vector.
1342 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
1343 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
1344 is not NULL. Otherwise, place the initialization at the loop preheader.
1345 Return the DEF of INIT_STMT.
1346 It will be used in the vectorization of STMT. */
1349 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
1350 gimple_stmt_iterator *gsi)
1352 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1360 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
1361 add_referenced_var (new_var);
1362 init_stmt = gimple_build_assign (new_var, vector_var);
1363 new_temp = make_ssa_name (new_var, init_stmt);
1364 gimple_assign_set_lhs (init_stmt, new_temp);
1367 vect_finish_stmt_generation (stmt, init_stmt, gsi);
1370 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1371 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1373 if (nested_in_vect_loop_p (loop, stmt))
1375 pe = loop_preheader_edge (loop);
1376 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
1377 gcc_assert (!new_bb);
1380 if (vect_print_dump_info (REPORT_DETAILS))
1382 fprintf (vect_dump, "created new init_stmt: ");
1383 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
1386 vec_oprnd = gimple_assign_lhs (init_stmt);
1391 /* For constant and loop invariant defs of SLP_NODE this function returns
1392 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
1393 OP_NUM determines if we gather defs for operand 0 or operand 1 of the scalar
1394 stmts. NUMBER_OF_VECTORS is the number of vector defs to create. */
1397 vect_get_constant_vectors (slp_tree slp_node, VEC(tree,heap) **vec_oprnds,
1398 unsigned int op_num, unsigned int number_of_vectors)
1400 VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node);
1401 gimple stmt = VEC_index (gimple, stmts, 0);
1402 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1403 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1404 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1407 int j, number_of_places_left_in_vector;
1410 int group_size = VEC_length (gimple, stmts);
1411 unsigned int vec_num, i;
1412 int number_of_copies = 1;
1413 bool is_store = false;
1414 VEC (tree, heap) *voprnds = VEC_alloc (tree, heap, number_of_vectors);
1417 if (STMT_VINFO_DATA_REF (stmt_vinfo))
1420 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
1421 created vectors. It is greater than 1 if unrolling is performed.
1423 For example, we have two scalar operands, s1 and s2 (e.g., group of
1424 strided accesses of size two), while NUNITS is four (i.e., four scalars
1425 of this type can be packed in a vector). The output vector will contain
1426 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
1429 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
1430 containing the operands.
1432 For example, NUNITS is four as before, and the group size is 8
1433 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
1434 {s5, s6, s7, s8}. */
1436 number_of_copies = least_common_multiple (nunits, group_size) / group_size;
1438 number_of_places_left_in_vector = nunits;
1440 for (j = 0; j < number_of_copies; j++)
1442 for (i = group_size - 1; VEC_iterate (gimple, stmts, i, stmt); i--)
1445 op = gimple_assign_rhs1 (stmt);
1447 op = gimple_op (stmt, op_num + 1);
1448 if (!CONSTANT_CLASS_P (op))
1451 /* Create 'vect_ = {op0,op1,...,opn}'. */
1452 t = tree_cons (NULL_TREE, op, t);
1454 number_of_places_left_in_vector--;
1456 if (number_of_places_left_in_vector == 0)
1458 number_of_places_left_in_vector = nunits;
1460 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1461 gcc_assert (vector_type);
1463 vec_cst = build_vector (vector_type, t);
1465 vec_cst = build_constructor_from_list (vector_type, t);
1467 VEC_quick_push (tree, voprnds,
1468 vect_init_vector (stmt, vec_cst, vector_type,
1475 /* Since the vectors are created in the reverse order, we should invert
1477 vec_num = VEC_length (tree, voprnds);
1478 for (j = vec_num - 1; j >= 0; j--)
1480 vop = VEC_index (tree, voprnds, j);
1481 VEC_quick_push (tree, *vec_oprnds, vop);
1484 VEC_free (tree, heap, voprnds);
1486 /* In case that VF is greater than the unrolling factor needed for the SLP
1487 group of stmts, NUMBER_OF_VECTORS to be created is greater than
1488 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
1489 to replicate the vectors. */
1490 while (number_of_vectors > VEC_length (tree, *vec_oprnds))
1492 for (i = 0; VEC_iterate (tree, *vec_oprnds, i, vop) && i < vec_num; i++)
1493 VEC_quick_push (tree, *vec_oprnds, vop);
1498 /* Get vectorized definitions from SLP_NODE that contains corresponding
1499 vectorized def-stmts. */
1502 vect_get_slp_vect_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds)
1505 gimple vec_def_stmt;
1508 gcc_assert (SLP_TREE_VEC_STMTS (slp_node));
1511 VEC_iterate (gimple, SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt);
1514 gcc_assert (vec_def_stmt);
1515 vec_oprnd = gimple_get_lhs (vec_def_stmt);
1516 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
1521 /* Get vectorized definitions for SLP_NODE.
1522 If the scalar definitions are loop invariants or constants, collect them and
1523 call vect_get_constant_vectors() to create vector stmts.
1524 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
1525 must be stored in the LEFT/RIGHT node of SLP_NODE, and we call
1526 vect_get_slp_vect_defs() to retrieve them.
1527 If VEC_OPRNDS1 is NULL, don't get vector defs for the second operand (from
1528 the right node. This is used when the second operand must remain scalar. */
1531 vect_get_slp_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds0,
1532 VEC (tree,heap) **vec_oprnds1)
1535 enum tree_code code;
1536 int number_of_vects;
1537 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
1539 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
1540 /* The number of vector defs is determined by the number of vector statements
1541 in the node from which we get those statements. */
1542 if (SLP_TREE_LEFT (slp_node))
1543 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_LEFT (slp_node));
1546 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
1547 /* Number of vector stmts was calculated according to LHS in
1548 vect_schedule_slp_instance(), fix it by replacing LHS with RHS, if
1549 necessary. See vect_get_smallest_scalar_type() for details. */
1550 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
1552 if (rhs_size_unit != lhs_size_unit)
1554 number_of_vects *= rhs_size_unit;
1555 number_of_vects /= lhs_size_unit;
1559 /* Allocate memory for vectorized defs. */
1560 *vec_oprnds0 = VEC_alloc (tree, heap, number_of_vects);
1562 /* SLP_NODE corresponds either to a group of stores or to a group of
1563 unary/binary operations. We don't call this function for loads. */
1564 if (SLP_TREE_LEFT (slp_node))
1565 /* The defs are already vectorized. */
1566 vect_get_slp_vect_defs (SLP_TREE_LEFT (slp_node), vec_oprnds0);
1568 /* Build vectors from scalar defs. */
1569 vect_get_constant_vectors (slp_node, vec_oprnds0, 0, number_of_vects);
1571 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)))
1572 /* Since we don't call this function with loads, this is a group of
1576 code = gimple_assign_rhs_code (first_stmt);
1577 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS || !vec_oprnds1)
1580 /* The number of vector defs is determined by the number of vector statements
1581 in the node from which we get those statements. */
1582 if (SLP_TREE_RIGHT (slp_node))
1583 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_RIGHT (slp_node));
1585 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
1587 *vec_oprnds1 = VEC_alloc (tree, heap, number_of_vects);
1589 if (SLP_TREE_RIGHT (slp_node))
1590 /* The defs are already vectorized. */
1591 vect_get_slp_vect_defs (SLP_TREE_RIGHT (slp_node), vec_oprnds1);
1593 /* Build vectors from scalar defs. */
1594 vect_get_constant_vectors (slp_node, vec_oprnds1, 1, number_of_vects);
1598 /* Function get_initial_def_for_induction
1601 STMT - a stmt that performs an induction operation in the loop.
1602 IV_PHI - the initial value of the induction variable
1605 Return a vector variable, initialized with the first VF values of
1606 the induction variable. E.g., for an iv with IV_PHI='X' and
1607 evolution S, for a vector of 4 units, we want to return:
1608 [X, X + S, X + 2*S, X + 3*S]. */
1611 get_initial_def_for_induction (gimple iv_phi)
1613 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
1614 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1615 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1616 tree scalar_type = TREE_TYPE (gimple_phi_result (iv_phi));
1619 edge pe = loop_preheader_edge (loop);
1620 struct loop *iv_loop;
1622 tree vec, vec_init, vec_step, t;
1626 gimple init_stmt, induction_phi, new_stmt;
1627 tree induc_def, vec_def, vec_dest;
1628 tree init_expr, step_expr;
1629 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1634 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
1635 bool nested_in_vect_loop = false;
1636 gimple_seq stmts = NULL;
1637 imm_use_iterator imm_iter;
1638 use_operand_p use_p;
1642 gimple_stmt_iterator si;
1643 basic_block bb = gimple_bb (iv_phi);
1645 vectype = get_vectype_for_scalar_type (scalar_type);
1646 gcc_assert (vectype);
1647 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1648 ncopies = vf / nunits;
1650 gcc_assert (phi_info);
1651 gcc_assert (ncopies >= 1);
1653 /* Find the first insertion point in the BB. */
1654 si = gsi_after_labels (bb);
1656 if (INTEGRAL_TYPE_P (scalar_type) || POINTER_TYPE_P (scalar_type))
1657 step_expr = build_int_cst (scalar_type, 0);
1659 step_expr = build_real (scalar_type, dconst0);
1661 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
1662 if (nested_in_vect_loop_p (loop, iv_phi))
1664 nested_in_vect_loop = true;
1665 iv_loop = loop->inner;
1669 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
1671 latch_e = loop_latch_edge (iv_loop);
1672 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
1674 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
1675 gcc_assert (access_fn);
1676 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
1677 &init_expr, &step_expr);
1679 pe = loop_preheader_edge (iv_loop);
1681 /* Create the vector that holds the initial_value of the induction. */
1682 if (nested_in_vect_loop)
1684 /* iv_loop is nested in the loop to be vectorized. init_expr had already
1685 been created during vectorization of previous stmts; We obtain it from
1686 the STMT_VINFO_VEC_STMT of the defining stmt. */
1687 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi, loop_preheader_edge (iv_loop));
1688 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
1692 /* iv_loop is the loop to be vectorized. Create:
1693 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
1694 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
1695 add_referenced_var (new_var);
1697 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
1700 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1701 gcc_assert (!new_bb);
1705 t = tree_cons (NULL_TREE, init_expr, t);
1706 for (i = 1; i < nunits; i++)
1708 /* Create: new_name_i = new_name + step_expr */
1709 enum tree_code code = POINTER_TYPE_P (scalar_type)
1710 ? POINTER_PLUS_EXPR : PLUS_EXPR;
1711 init_stmt = gimple_build_assign_with_ops (code, new_var,
1712 new_name, step_expr);
1713 new_name = make_ssa_name (new_var, init_stmt);
1714 gimple_assign_set_lhs (init_stmt, new_name);
1716 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
1717 gcc_assert (!new_bb);
1719 if (vect_print_dump_info (REPORT_DETAILS))
1721 fprintf (vect_dump, "created new init_stmt: ");
1722 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
1724 t = tree_cons (NULL_TREE, new_name, t);
1726 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
1727 vec = build_constructor_from_list (vectype, nreverse (t));
1728 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
1732 /* Create the vector that holds the step of the induction. */
1733 if (nested_in_vect_loop)
1734 /* iv_loop is nested in the loop to be vectorized. Generate:
1735 vec_step = [S, S, S, S] */
1736 new_name = step_expr;
1739 /* iv_loop is the loop to be vectorized. Generate:
1740 vec_step = [VF*S, VF*S, VF*S, VF*S] */
1741 expr = build_int_cst (scalar_type, vf);
1742 new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr);
1746 for (i = 0; i < nunits; i++)
1747 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
1748 gcc_assert (CONSTANT_CLASS_P (new_name));
1749 vec = build_vector (vectype, t);
1750 vec_step = vect_init_vector (iv_phi, vec, vectype, NULL);
1753 /* Create the following def-use cycle:
1758 vec_iv = PHI <vec_init, vec_loop>
1762 vec_loop = vec_iv + vec_step; */
1764 /* Create the induction-phi that defines the induction-operand. */
1765 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
1766 add_referenced_var (vec_dest);
1767 induction_phi = create_phi_node (vec_dest, iv_loop->header);
1768 set_vinfo_for_stmt (induction_phi,
1769 new_stmt_vec_info (induction_phi, loop_vinfo));
1770 induc_def = PHI_RESULT (induction_phi);
1772 /* Create the iv update inside the loop */
1773 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
1774 induc_def, vec_step);
1775 vec_def = make_ssa_name (vec_dest, new_stmt);
1776 gimple_assign_set_lhs (new_stmt, vec_def);
1777 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
1778 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo));
1780 /* Set the arguments of the phi node: */
1781 add_phi_arg (induction_phi, vec_init, pe);
1782 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop));
1785 /* In case that vectorization factor (VF) is bigger than the number
1786 of elements that we can fit in a vectype (nunits), we have to generate
1787 more than one vector stmt - i.e - we need to "unroll" the
1788 vector stmt by a factor VF/nunits. For more details see documentation
1789 in vectorizable_operation. */
1793 stmt_vec_info prev_stmt_vinfo;
1794 /* FORNOW. This restriction should be relaxed. */
1795 gcc_assert (!nested_in_vect_loop);
1797 /* Create the vector that holds the step of the induction. */
1798 expr = build_int_cst (scalar_type, nunits);
1799 new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr);
1801 for (i = 0; i < nunits; i++)
1802 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
1803 gcc_assert (CONSTANT_CLASS_P (new_name));
1804 vec = build_vector (vectype, t);
1805 vec_step = vect_init_vector (iv_phi, vec, vectype, NULL);
1807 vec_def = induc_def;
1808 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
1809 for (i = 1; i < ncopies; i++)
1811 /* vec_i = vec_prev + vec_step */
1812 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
1814 vec_def = make_ssa_name (vec_dest, new_stmt);
1815 gimple_assign_set_lhs (new_stmt, vec_def);
1817 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
1818 set_vinfo_for_stmt (new_stmt,
1819 new_stmt_vec_info (new_stmt, loop_vinfo));
1820 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
1821 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
1825 if (nested_in_vect_loop)
1827 /* Find the loop-closed exit-phi of the induction, and record
1828 the final vector of induction results: */
1830 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
1832 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
1834 exit_phi = USE_STMT (use_p);
1840 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
1841 /* FORNOW. Currently not supporting the case that an inner-loop induction
1842 is not used in the outer-loop (i.e. only outside the outer-loop). */
1843 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
1844 && !STMT_VINFO_LIVE_P (stmt_vinfo));
1846 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
1847 if (vect_print_dump_info (REPORT_DETAILS))
1849 fprintf (vect_dump, "vector of inductions after inner-loop:");
1850 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
1856 if (vect_print_dump_info (REPORT_DETAILS))
1858 fprintf (vect_dump, "transform induction: created def-use cycle: ");
1859 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
1860 fprintf (vect_dump, "\n");
1861 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
1864 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
1869 /* Function vect_get_vec_def_for_operand.
1871 OP is an operand in STMT. This function returns a (vector) def that will be
1872 used in the vectorized stmt for STMT.
1874 In the case that OP is an SSA_NAME which is defined in the loop, then
1875 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1877 In case OP is an invariant or constant, a new stmt that creates a vector def
1878 needs to be introduced. */
1881 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
1886 stmt_vec_info def_stmt_info = NULL;
1887 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1888 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1889 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1890 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1896 enum vect_def_type dt;
1900 if (vect_print_dump_info (REPORT_DETAILS))
1902 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
1903 print_generic_expr (vect_dump, op, TDF_SLIM);
1906 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt);
1907 gcc_assert (is_simple_use);
1908 if (vect_print_dump_info (REPORT_DETAILS))
1912 fprintf (vect_dump, "def = ");
1913 print_generic_expr (vect_dump, def, TDF_SLIM);
1917 fprintf (vect_dump, " def_stmt = ");
1918 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1924 /* Case 1: operand is a constant. */
1925 case vect_constant_def:
1930 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1931 if (vect_print_dump_info (REPORT_DETAILS))
1932 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1934 for (i = nunits - 1; i >= 0; --i)
1936 t = tree_cons (NULL_TREE, op, t);
1938 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1939 gcc_assert (vector_type);
1940 vec_cst = build_vector (vector_type, t);
1942 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
1945 /* Case 2: operand is defined outside the loop - loop invariant. */
1946 case vect_invariant_def:
1951 /* Create 'vec_inv = {inv,inv,..,inv}' */
1952 if (vect_print_dump_info (REPORT_DETAILS))
1953 fprintf (vect_dump, "Create vector_inv.");
1955 for (i = nunits - 1; i >= 0; --i)
1957 t = tree_cons (NULL_TREE, def, t);
1960 /* FIXME: use build_constructor directly. */
1961 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1962 gcc_assert (vector_type);
1963 vec_inv = build_constructor_from_list (vector_type, t);
1964 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1967 /* Case 3: operand is defined inside the loop. */
1971 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1973 /* Get the def from the vectorized stmt. */
1974 def_stmt_info = vinfo_for_stmt (def_stmt);
1975 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1976 gcc_assert (vec_stmt);
1977 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1978 vec_oprnd = PHI_RESULT (vec_stmt);
1979 else if (is_gimple_call (vec_stmt))
1980 vec_oprnd = gimple_call_lhs (vec_stmt);
1982 vec_oprnd = gimple_assign_lhs (vec_stmt);
1986 /* Case 4: operand is defined by a loop header phi - reduction */
1987 case vect_reduction_def:
1991 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1992 loop = (gimple_bb (def_stmt))->loop_father;
1994 /* Get the def before the loop */
1995 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1996 return get_initial_def_for_reduction (stmt, op, scalar_def);
1999 /* Case 5: operand is defined by loop-header phi - induction. */
2000 case vect_induction_def:
2002 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
2004 /* Get the def from the vectorized stmt. */
2005 def_stmt_info = vinfo_for_stmt (def_stmt);
2006 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
2007 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
2008 vec_oprnd = PHI_RESULT (vec_stmt);
2018 /* Function vect_get_vec_def_for_stmt_copy
2020 Return a vector-def for an operand. This function is used when the
2021 vectorized stmt to be created (by the caller to this function) is a "copy"
2022 created in case the vectorized result cannot fit in one vector, and several
2023 copies of the vector-stmt are required. In this case the vector-def is
2024 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
2025 of the stmt that defines VEC_OPRND.
2026 DT is the type of the vector def VEC_OPRND.
2029 In case the vectorization factor (VF) is bigger than the number
2030 of elements that can fit in a vectype (nunits), we have to generate
2031 more than one vector stmt to vectorize the scalar stmt. This situation
2032 arises when there are multiple data-types operated upon in the loop; the
2033 smallest data-type determines the VF, and as a result, when vectorizing
2034 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
2035 vector stmt (each computing a vector of 'nunits' results, and together
2036 computing 'VF' results in each iteration). This function is called when
2037 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
2038 which VF=16 and nunits=4, so the number of copies required is 4):
2040 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
2042 S1: x = load VS1.0: vx.0 = memref0 VS1.1
2043 VS1.1: vx.1 = memref1 VS1.2
2044 VS1.2: vx.2 = memref2 VS1.3
2045 VS1.3: vx.3 = memref3
2047 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
2048 VSnew.1: vz1 = vx.1 + ... VSnew.2
2049 VSnew.2: vz2 = vx.2 + ... VSnew.3
2050 VSnew.3: vz3 = vx.3 + ...
2052 The vectorization of S1 is explained in vectorizable_load.
2053 The vectorization of S2:
2054 To create the first vector-stmt out of the 4 copies - VSnew.0 -
2055 the function 'vect_get_vec_def_for_operand' is called to
2056 get the relevant vector-def for each operand of S2. For operand x it
2057 returns the vector-def 'vx.0'.
2059 To create the remaining copies of the vector-stmt (VSnew.j), this
2060 function is called to get the relevant vector-def for each operand. It is
2061 obtained from the respective VS1.j stmt, which is recorded in the
2062 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
2064 For example, to obtain the vector-def 'vx.1' in order to create the
2065 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
2066 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
2067 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
2068 and return its def ('vx.1').
2069 Overall, to create the above sequence this function will be called 3 times:
2070 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
2071 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
2072 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
2075 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
2077 gimple vec_stmt_for_operand;
2078 stmt_vec_info def_stmt_info;
2080 /* Do nothing; can reuse same def. */
2081 if (dt == vect_invariant_def || dt == vect_constant_def )
2084 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
2085 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
2086 gcc_assert (def_stmt_info);
2087 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
2088 gcc_assert (vec_stmt_for_operand);
2089 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
2090 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
2091 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
2093 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
2098 /* Get vectorized definitions for the operands to create a copy of an original
2099 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
2102 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
2103 VEC(tree,heap) **vec_oprnds0,
2104 VEC(tree,heap) **vec_oprnds1)
2106 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
2108 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
2109 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
2111 if (vec_oprnds1 && *vec_oprnds1)
2113 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
2114 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
2115 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
2120 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
2123 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
2124 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
2128 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1);
2133 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
2134 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
2135 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
2139 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
2140 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
2141 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
2147 /* Function vect_finish_stmt_generation.
2149 Insert a new stmt. */
2152 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
2153 gimple_stmt_iterator *gsi)
2155 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2156 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2158 gcc_assert (stmt == gsi_stmt (*gsi));
2159 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
2161 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
2163 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo));
2165 if (vect_print_dump_info (REPORT_DETAILS))
2167 fprintf (vect_dump, "add new stmt: ");
2168 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
2171 /* Make sure gsi points to the stmt that is being vectorized. */
2172 gcc_assert (stmt == gsi_stmt (*gsi));
2174 gimple_set_location (vec_stmt, gimple_location (stmt));
2178 /* Function get_initial_def_for_reduction
2181 STMT - a stmt that performs a reduction operation in the loop.
2182 INIT_VAL - the initial value of the reduction variable
2185 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
2186 of the reduction (used for adjusting the epilog - see below).
2187 Return a vector variable, initialized according to the operation that STMT
2188 performs. This vector will be used as the initial value of the
2189 vector of partial results.
2191 Option1 (adjust in epilog): Initialize the vector as follows:
2194 min/max: [init_val,init_val,..,init_val,init_val]
2195 bit and/or: [init_val,init_val,..,init_val,init_val]
2196 and when necessary (e.g. add/mult case) let the caller know
2197 that it needs to adjust the result by init_val.
2199 Option2: Initialize the vector as follows:
2200 add: [0,0,...,0,init_val]
2201 mult: [1,1,...,1,init_val]
2202 min/max: [init_val,init_val,...,init_val]
2203 bit and/or: [init_val,init_val,...,init_val]
2204 and no adjustments are needed.
2206 For example, for the following code:
2212 STMT is 's = s + a[i]', and the reduction variable is 's'.
2213 For a vector of 4 units, we want to return either [0,0,0,init_val],
2214 or [0,0,0,0] and let the caller know that it needs to adjust
2215 the result at the end by 'init_val'.
2217 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
2218 initialization vector is simpler (same element in all entries).
2219 A cost model should help decide between these two schemes. */
2222 get_initial_def_for_reduction (gimple stmt, tree init_val, tree *adjustment_def)
2224 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2225 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2226 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2227 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
2228 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2229 enum tree_code code = gimple_assign_rhs_code (stmt);
2230 tree type = TREE_TYPE (init_val);
2237 bool nested_in_vect_loop = false;
2239 gcc_assert (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type));
2240 if (nested_in_vect_loop_p (loop, stmt))
2241 nested_in_vect_loop = true;
2243 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
2245 vecdef = vect_get_vec_def_for_operand (init_val, stmt, NULL);
2249 case WIDEN_SUM_EXPR:
2252 if (nested_in_vect_loop)
2253 *adjustment_def = vecdef;
2255 *adjustment_def = init_val;
2256 /* Create a vector of zeros for init_def. */
2257 if (SCALAR_FLOAT_TYPE_P (type))
2258 def_for_init = build_real (type, dconst0);
2260 def_for_init = build_int_cst (type, 0);
2261 for (i = nunits - 1; i >= 0; --i)
2262 t = tree_cons (NULL_TREE, def_for_init, t);
2263 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def_for_init));
2264 gcc_assert (vector_type);
2265 init_def = build_vector (vector_type, t);
2270 *adjustment_def = NULL_TREE;
2282 /* Function vect_create_epilog_for_reduction
2284 Create code at the loop-epilog to finalize the result of a reduction
2287 VECT_DEF is a vector of partial results.
2288 REDUC_CODE is the tree-code for the epilog reduction.
2289 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
2290 number of elements that we can fit in a vectype (nunits). In this case
2291 we have to generate more than one vector stmt - i.e - we need to "unroll"
2292 the vector stmt by a factor VF/nunits. For more details see documentation
2293 in vectorizable_operation.
2294 STMT is the scalar reduction stmt that is being vectorized.
2295 REDUCTION_PHI is the phi-node that carries the reduction computation.
2298 1. Creates the reduction def-use cycle: sets the arguments for
2300 The loop-entry argument is the vectorized initial-value of the reduction.
2301 The loop-latch argument is VECT_DEF - the vector of partial sums.
2302 2. "Reduces" the vector of partial results VECT_DEF into a single result,
2303 by applying the operation specified by REDUC_CODE if available, or by
2304 other means (whole-vector shifts or a scalar loop).
2305 The function also creates a new phi node at the loop exit to preserve
2306 loop-closed form, as illustrated below.
2308 The flow at the entry to this function:
2311 vec_def = phi <null, null> # REDUCTION_PHI
2312 VECT_DEF = vector_stmt # vectorized form of STMT
2313 s_loop = scalar_stmt # (scalar) STMT
2315 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2319 The above is transformed by this function into:
2322 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
2323 VECT_DEF = vector_stmt # vectorized form of STMT
2324 s_loop = scalar_stmt # (scalar) STMT
2326 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2327 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2328 v_out2 = reduce <v_out1>
2329 s_out3 = extract_field <v_out2, 0>
2330 s_out4 = adjust_result <s_out3>
2336 vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
2338 enum tree_code reduc_code,
2339 gimple reduction_phi)
2341 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2342 stmt_vec_info prev_phi_info;
2344 enum machine_mode mode;
2345 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2346 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2347 basic_block exit_bb;
2350 gimple new_phi = NULL, phi;
2351 gimple_stmt_iterator exit_gsi;
2353 tree new_temp = NULL_TREE;
2355 gimple epilog_stmt = NULL;
2356 tree new_scalar_dest, new_dest;
2358 tree bitsize, bitpos, bytesize;
2359 enum tree_code code = gimple_assign_rhs_code (stmt);
2360 tree adjustment_def;
2361 tree vec_initial_def, def;
2363 imm_use_iterator imm_iter;
2364 use_operand_p use_p;
2365 bool extract_scalar_result = false;
2366 tree reduction_op, expr;
2369 bool nested_in_vect_loop = false;
2370 VEC(gimple,heap) *phis = NULL;
2371 enum vect_def_type dt = vect_unknown_def_type;
2374 if (nested_in_vect_loop_p (loop, stmt))
2377 nested_in_vect_loop = true;
2380 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2382 case GIMPLE_SINGLE_RHS:
2383 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2384 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2386 case GIMPLE_UNARY_RHS:
2387 reduction_op = gimple_assign_rhs1 (stmt);
2389 case GIMPLE_BINARY_RHS:
2390 reduction_op = gimple_assign_rhs2 (stmt);
2396 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2397 gcc_assert (vectype);
2398 mode = TYPE_MODE (vectype);
2400 /*** 1. Create the reduction def-use cycle ***/
2402 /* For the case of reduction, vect_get_vec_def_for_operand returns
2403 the scalar def before the loop, that defines the initial value
2404 of the reduction variable. */
2405 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
2408 phi = reduction_phi;
2410 for (j = 0; j < ncopies; j++)
2412 /* 1.1 set the loop-entry arg of the reduction-phi: */
2413 add_phi_arg (phi, vec_initial_def, loop_preheader_edge (loop));
2415 /* 1.2 set the loop-latch arg for the reduction-phi: */
2417 def = vect_get_vec_def_for_stmt_copy (dt, def);
2418 add_phi_arg (phi, def, loop_latch_edge (loop));
2420 if (vect_print_dump_info (REPORT_DETAILS))
2422 fprintf (vect_dump, "transform reduction: created def-use cycle: ");
2423 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
2424 fprintf (vect_dump, "\n");
2425 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0, TDF_SLIM);
2428 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
2431 /*** 2. Create epilog code
2432 The reduction epilog code operates across the elements of the vector
2433 of partial results computed by the vectorized loop.
2434 The reduction epilog code consists of:
2435 step 1: compute the scalar result in a vector (v_out2)
2436 step 2: extract the scalar result (s_out3) from the vector (v_out2)
2437 step 3: adjust the scalar result (s_out3) if needed.
2439 Step 1 can be accomplished using one the following three schemes:
2440 (scheme 1) using reduc_code, if available.
2441 (scheme 2) using whole-vector shifts, if available.
2442 (scheme 3) using a scalar loop. In this case steps 1+2 above are
2445 The overall epilog code looks like this:
2447 s_out0 = phi <s_loop> # original EXIT_PHI
2448 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2449 v_out2 = reduce <v_out1> # step 1
2450 s_out3 = extract_field <v_out2, 0> # step 2
2451 s_out4 = adjust_result <s_out3> # step 3
2453 (step 3 is optional, and steps 1 and 2 may be combined).
2454 Lastly, the uses of s_out0 are replaced by s_out4.
2458 /* 2.1 Create new loop-exit-phi to preserve loop-closed form:
2459 v_out1 = phi <v_loop> */
2461 exit_bb = single_exit (loop)->dest;
2463 prev_phi_info = NULL;
2464 for (j = 0; j < ncopies; j++)
2466 phi = create_phi_node (SSA_NAME_VAR (vect_def), exit_bb);
2467 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo));
2472 def = vect_get_vec_def_for_stmt_copy (dt, def);
2473 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
2475 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
2476 prev_phi_info = vinfo_for_stmt (phi);
2478 exit_gsi = gsi_after_labels (exit_bb);
2480 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
2481 (i.e. when reduc_code is not available) and in the final adjustment
2482 code (if needed). Also get the original scalar reduction variable as
2483 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
2484 represents a reduction pattern), the tree-code and scalar-def are
2485 taken from the original stmt that the pattern-stmt (STMT) replaces.
2486 Otherwise (it is a regular reduction) - the tree-code and scalar-def
2487 are taken from STMT. */
2489 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2492 /* Regular reduction */
2497 /* Reduction pattern */
2498 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
2499 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
2500 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
2502 code = gimple_assign_rhs_code (orig_stmt);
2503 scalar_dest = gimple_assign_lhs (orig_stmt);
2504 scalar_type = TREE_TYPE (scalar_dest);
2505 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
2506 bitsize = TYPE_SIZE (scalar_type);
2507 bytesize = TYPE_SIZE_UNIT (scalar_type);
2510 /* In case this is a reduction in an inner-loop while vectorizing an outer
2511 loop - we don't need to extract a single scalar result at the end of the
2512 inner-loop. The final vector of partial results will be used in the
2513 vectorized outer-loop, or reduced to a scalar result at the end of the
2515 if (nested_in_vect_loop)
2516 goto vect_finalize_reduction;
2519 gcc_assert (ncopies == 1);
2521 /* 2.3 Create the reduction code, using one of the three schemes described
2524 if (reduc_code < NUM_TREE_CODES)
2528 /*** Case 1: Create:
2529 v_out2 = reduc_expr <v_out1> */
2531 if (vect_print_dump_info (REPORT_DETAILS))
2532 fprintf (vect_dump, "Reduce using direct vector reduction.");
2534 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2535 tmp = build1 (reduc_code, vectype, PHI_RESULT (new_phi));
2536 epilog_stmt = gimple_build_assign (vec_dest, tmp);
2537 new_temp = make_ssa_name (vec_dest, epilog_stmt);
2538 gimple_assign_set_lhs (epilog_stmt, new_temp);
2539 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2541 extract_scalar_result = true;
2545 enum tree_code shift_code = 0;
2546 bool have_whole_vector_shift = true;
2548 int element_bitsize = tree_low_cst (bitsize, 1);
2549 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2552 if (optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
2553 shift_code = VEC_RSHIFT_EXPR;
2555 have_whole_vector_shift = false;
2557 /* Regardless of whether we have a whole vector shift, if we're
2558 emulating the operation via tree-vect-generic, we don't want
2559 to use it. Only the first round of the reduction is likely
2560 to still be profitable via emulation. */
2561 /* ??? It might be better to emit a reduction tree code here, so that
2562 tree-vect-generic can expand the first round via bit tricks. */
2563 if (!VECTOR_MODE_P (mode))
2564 have_whole_vector_shift = false;
2567 optab optab = optab_for_tree_code (code, vectype, optab_default);
2568 if (optab_handler (optab, mode)->insn_code == CODE_FOR_nothing)
2569 have_whole_vector_shift = false;
2572 if (have_whole_vector_shift)
2574 /*** Case 2: Create:
2575 for (offset = VS/2; offset >= element_size; offset/=2)
2577 Create: va' = vec_shift <va, offset>
2578 Create: va = vop <va, va'>
2581 if (vect_print_dump_info (REPORT_DETAILS))
2582 fprintf (vect_dump, "Reduce using vector shifts");
2584 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2585 new_temp = PHI_RESULT (new_phi);
2587 for (bit_offset = vec_size_in_bits/2;
2588 bit_offset >= element_bitsize;
2591 tree bitpos = size_int (bit_offset);
2592 epilog_stmt = gimple_build_assign_with_ops (shift_code, vec_dest,
2594 new_name = make_ssa_name (vec_dest, epilog_stmt);
2595 gimple_assign_set_lhs (epilog_stmt, new_name);
2596 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2598 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
2599 new_name, new_temp);
2600 new_temp = make_ssa_name (vec_dest, epilog_stmt);
2601 gimple_assign_set_lhs (epilog_stmt, new_temp);
2602 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2605 extract_scalar_result = true;
2611 /*** Case 3: Create:
2612 s = extract_field <v_out2, 0>
2613 for (offset = element_size;
2614 offset < vector_size;
2615 offset += element_size;)
2617 Create: s' = extract_field <v_out2, offset>
2618 Create: s = op <s, s'>
2621 if (vect_print_dump_info (REPORT_DETAILS))
2622 fprintf (vect_dump, "Reduce using scalar code. ");
2624 vec_temp = PHI_RESULT (new_phi);
2625 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2626 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
2628 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
2629 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
2630 gimple_assign_set_lhs (epilog_stmt, new_temp);
2631 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2633 for (bit_offset = element_bitsize;
2634 bit_offset < vec_size_in_bits;
2635 bit_offset += element_bitsize)
2637 tree bitpos = bitsize_int (bit_offset);
2638 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
2641 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
2642 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
2643 gimple_assign_set_lhs (epilog_stmt, new_name);
2644 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2646 epilog_stmt = gimple_build_assign_with_ops (code,
2648 new_name, new_temp);
2649 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
2650 gimple_assign_set_lhs (epilog_stmt, new_temp);
2651 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2654 extract_scalar_result = false;
2658 /* 2.4 Extract the final scalar result. Create:
2659 s_out3 = extract_field <v_out2, bitpos> */
2661 if (extract_scalar_result)
2665 gcc_assert (!nested_in_vect_loop);
2666 if (vect_print_dump_info (REPORT_DETAILS))
2667 fprintf (vect_dump, "extract scalar result");
2669 if (BYTES_BIG_ENDIAN)
2670 bitpos = size_binop (MULT_EXPR,
2671 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
2672 TYPE_SIZE (scalar_type));
2674 bitpos = bitsize_zero_node;
2676 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
2677 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
2678 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
2679 gimple_assign_set_lhs (epilog_stmt, new_temp);
2680 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2683 vect_finalize_reduction:
2685 /* 2.5 Adjust the final result by the initial value of the reduction
2686 variable. (When such adjustment is not needed, then
2687 'adjustment_def' is zero). For example, if code is PLUS we create:
2688 new_temp = loop_exit_def + adjustment_def */
2692 if (nested_in_vect_loop)
2694 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
2695 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
2696 new_dest = vect_create_destination_var (scalar_dest, vectype);
2700 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
2701 expr = build2 (code, scalar_type, new_temp, adjustment_def);
2702 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
2704 epilog_stmt = gimple_build_assign (new_dest, expr);
2705 new_temp = make_ssa_name (new_dest, epilog_stmt);
2706 gimple_assign_set_lhs (epilog_stmt, new_temp);
2707 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
2708 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2712 /* 2.6 Handle the loop-exit phi */
2714 /* Replace uses of s_out0 with uses of s_out3:
2715 Find the loop-closed-use at the loop exit of the original scalar result.
2716 (The reduction result is expected to have two immediate uses - one at the
2717 latch block, and one at the loop exit). */
2718 phis = VEC_alloc (gimple, heap, 10);
2719 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
2721 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
2723 exit_phi = USE_STMT (use_p);
2724 VEC_quick_push (gimple, phis, exit_phi);
2727 /* We expect to have found an exit_phi because of loop-closed-ssa form. */
2728 gcc_assert (!VEC_empty (gimple, phis));
2730 for (i = 0; VEC_iterate (gimple, phis, i, exit_phi); i++)
2732 if (nested_in_vect_loop)
2734 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
2736 /* FORNOW. Currently not supporting the case that an inner-loop
2737 reduction is not used in the outer-loop (but only outside the
2739 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
2740 && !STMT_VINFO_LIVE_P (stmt_vinfo));
2742 epilog_stmt = adjustment_def ? epilog_stmt : new_phi;
2743 STMT_VINFO_VEC_STMT (stmt_vinfo) = epilog_stmt;
2744 set_vinfo_for_stmt (epilog_stmt,
2745 new_stmt_vec_info (epilog_stmt, loop_vinfo));
2747 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
2748 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
2752 /* Replace the uses: */
2753 orig_name = PHI_RESULT (exit_phi);
2754 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
2755 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
2756 SET_USE (use_p, new_temp);
2758 VEC_free (gimple, heap, phis);
2762 /* Function vectorizable_reduction.
2764 Check if STMT performs a reduction operation that can be vectorized.
2765 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2766 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2767 Return FALSE if not a vectorizable STMT, TRUE otherwise.
2769 This function also handles reduction idioms (patterns) that have been
2770 recognized in advance during vect_pattern_recog. In this case, STMT may be
2772 X = pattern_expr (arg0, arg1, ..., X)
2773 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
2774 sequence that had been detected and replaced by the pattern-stmt (STMT).
2776 In some cases of reduction patterns, the type of the reduction variable X is
2777 different than the type of the other arguments of STMT.
2778 In such cases, the vectype that is used when transforming STMT into a vector
2779 stmt is different than the vectype that is used to determine the
2780 vectorization factor, because it consists of a different number of elements
2781 than the actual number of elements that are being operated upon in parallel.
2783 For example, consider an accumulation of shorts into an int accumulator.
2784 On some targets it's possible to vectorize this pattern operating on 8
2785 shorts at a time (hence, the vectype for purposes of determining the
2786 vectorization factor should be V8HI); on the other hand, the vectype that
2787 is used to create the vector form is actually V4SI (the type of the result).
2789 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
2790 indicates what is the actual level of parallelism (V8HI in the example), so
2791 that the right vectorization factor would be derived. This vectype
2792 corresponds to the type of arguments to the reduction stmt, and should *NOT*
2793 be used to create the vectorized stmt. The right vectype for the vectorized
2794 stmt is obtained from the type of the result X:
2795 get_vectype_for_scalar_type (TREE_TYPE (X))
2797 This means that, contrary to "regular" reductions (or "regular" stmts in
2798 general), the following equation:
2799 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
2800 does *NOT* necessarily hold for reduction patterns. */
2803 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
2808 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
2809 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2810 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2811 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2812 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2813 enum tree_code code, orig_code, epilog_reduc_code = 0;
2814 enum machine_mode vec_mode;
2816 optab optab, reduc_optab;
2817 tree new_temp = NULL_TREE;
2820 enum vect_def_type dt;
2821 gimple new_phi = NULL;
2825 stmt_vec_info orig_stmt_info;
2826 tree expr = NULL_TREE;
2828 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2829 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2831 stmt_vec_info prev_stmt_info, prev_phi_info;
2832 gimple first_phi = NULL;
2833 bool single_defuse_cycle = false;
2835 gimple new_stmt = NULL;
2839 if (nested_in_vect_loop_p (loop, stmt))
2842 gcc_assert (ncopies >= 1);
2844 /* FORNOW: SLP not supported. */
2845 if (STMT_SLP_TYPE (stmt_info))
2848 /* 1. Is vectorizable reduction? */
2850 /* Not supportable if the reduction variable is used in the loop. */
2851 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer)
2854 /* Reductions that are not used even in an enclosing outer-loop,
2855 are expected to be "live" (used out of the loop). */
2856 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_loop
2857 && !STMT_VINFO_LIVE_P (stmt_info))
2860 /* Make sure it was already recognized as a reduction computation. */
2861 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def)
2864 /* 2. Has this been recognized as a reduction pattern?
2866 Check if STMT represents a pattern that has been recognized
2867 in earlier analysis stages. For stmts that represent a pattern,
2868 the STMT_VINFO_RELATED_STMT field records the last stmt in
2869 the original sequence that constitutes the pattern. */
2871 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2874 orig_stmt_info = vinfo_for_stmt (orig_stmt);
2875 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
2876 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
2877 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
2880 /* 3. Check the operands of the operation. The first operands are defined
2881 inside the loop body. The last operand is the reduction variable,
2882 which is defined by the loop-header-phi. */
2884 gcc_assert (is_gimple_assign (stmt));
2887 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2889 case GIMPLE_SINGLE_RHS:
2890 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
2891 if (op_type == ternary_op)
2893 tree rhs = gimple_assign_rhs1 (stmt);
2894 ops[0] = TREE_OPERAND (rhs, 0);
2895 ops[1] = TREE_OPERAND (rhs, 1);
2896 ops[2] = TREE_OPERAND (rhs, 2);
2897 code = TREE_CODE (rhs);
2903 case GIMPLE_BINARY_RHS:
2904 code = gimple_assign_rhs_code (stmt);
2905 op_type = TREE_CODE_LENGTH (code);
2906 gcc_assert (op_type == binary_op);
2907 ops[0] = gimple_assign_rhs1 (stmt);
2908 ops[1] = gimple_assign_rhs2 (stmt);
2911 case GIMPLE_UNARY_RHS:
2918 scalar_dest = gimple_assign_lhs (stmt);
2919 scalar_type = TREE_TYPE (scalar_dest);
2920 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
2921 && !SCALAR_FLOAT_TYPE_P (scalar_type))
2924 /* All uses but the last are expected to be defined in the loop.
2925 The last use is the reduction variable. */
2926 for (i = 0; i < op_type-1; i++)
2928 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt,
2930 gcc_assert (is_simple_use);
2931 if (dt != vect_loop_def
2932 && dt != vect_invariant_def
2933 && dt != vect_constant_def
2934 && dt != vect_induction_def)
2938 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt, &def, &dt);
2939 gcc_assert (is_simple_use);
2940 gcc_assert (dt == vect_reduction_def);
2941 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
2943 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo, def_stmt));
2945 gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, def_stmt));
2947 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
2950 /* 4. Supportable by target? */
2952 /* 4.1. check support for the operation in the loop */
2953 optab = optab_for_tree_code (code, vectype, optab_default);
2956 if (vect_print_dump_info (REPORT_DETAILS))
2957 fprintf (vect_dump, "no optab.");
2960 vec_mode = TYPE_MODE (vectype);
2961 if (optab_handler (optab, vec_mode)->insn_code == CODE_FOR_nothing)
2963 if (vect_print_dump_info (REPORT_DETAILS))
2964 fprintf (vect_dump, "op not supported by target.");
2965 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2966 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2967 < vect_min_worthwhile_factor (code))
2969 if (vect_print_dump_info (REPORT_DETAILS))
2970 fprintf (vect_dump, "proceeding using word mode.");
2973 /* Worthwhile without SIMD support? */
2974 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2975 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2976 < vect_min_worthwhile_factor (code))
2978 if (vect_print_dump_info (REPORT_DETAILS))
2979 fprintf (vect_dump, "not worthwhile without SIMD support.");
2983 /* 4.2. Check support for the epilog operation.
2985 If STMT represents a reduction pattern, then the type of the
2986 reduction variable may be different than the type of the rest
2987 of the arguments. For example, consider the case of accumulation
2988 of shorts into an int accumulator; The original code:
2989 S1: int_a = (int) short_a;
2990 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
2993 STMT: int_acc = widen_sum <short_a, int_acc>
2996 1. The tree-code that is used to create the vector operation in the
2997 epilog code (that reduces the partial results) is not the
2998 tree-code of STMT, but is rather the tree-code of the original
2999 stmt from the pattern that STMT is replacing. I.e, in the example
3000 above we want to use 'widen_sum' in the loop, but 'plus' in the
3002 2. The type (mode) we use to check available target support
3003 for the vector operation to be created in the *epilog*, is
3004 determined by the type of the reduction variable (in the example
3005 above we'd check this: plus_optab[vect_int_mode]).
3006 However the type (mode) we use to check available target support
3007 for the vector operation to be created *inside the loop*, is
3008 determined by the type of the other arguments to STMT (in the
3009 example we'd check this: widen_sum_optab[vect_short_mode]).
3011 This is contrary to "regular" reductions, in which the types of all
3012 the arguments are the same as the type of the reduction variable.
3013 For "regular" reductions we can therefore use the same vector type
3014 (and also the same tree-code) when generating the epilog code and
3015 when generating the code inside the loop. */
3019 /* This is a reduction pattern: get the vectype from the type of the
3020 reduction variable, and get the tree-code from orig_stmt. */
3021 orig_code = gimple_assign_rhs_code (orig_stmt);
3022 vectype = get_vectype_for_scalar_type (TREE_TYPE (def));
3025 if (vect_print_dump_info (REPORT_DETAILS))
3027 fprintf (vect_dump, "unsupported data-type ");
3028 print_generic_expr (vect_dump, TREE_TYPE (def), TDF_SLIM);
3033 vec_mode = TYPE_MODE (vectype);
3037 /* Regular reduction: use the same vectype and tree-code as used for
3038 the vector code inside the loop can be used for the epilog code. */
3042 if (!reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
3044 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype, optab_default);
3047 if (vect_print_dump_info (REPORT_DETAILS))
3048 fprintf (vect_dump, "no optab for reduction.");
3049 epilog_reduc_code = NUM_TREE_CODES;
3051 if (optab_handler (reduc_optab, vec_mode)->insn_code == CODE_FOR_nothing)
3053 if (vect_print_dump_info (REPORT_DETAILS))
3054 fprintf (vect_dump, "reduc op not supported by target.");
3055 epilog_reduc_code = NUM_TREE_CODES;
3058 if (!vec_stmt) /* transformation not required. */
3060 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3061 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
3068 if (vect_print_dump_info (REPORT_DETAILS))
3069 fprintf (vect_dump, "transform reduction.");
3071 /* Create the destination vector */
3072 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3074 /* In case the vectorization factor (VF) is bigger than the number
3075 of elements that we can fit in a vectype (nunits), we have to generate
3076 more than one vector stmt - i.e - we need to "unroll" the
3077 vector stmt by a factor VF/nunits. For more details see documentation
3078 in vectorizable_operation. */
3080 /* If the reduction is used in an outer loop we need to generate
3081 VF intermediate results, like so (e.g. for ncopies=2):
3086 (i.e. we generate VF results in 2 registers).
3087 In this case we have a separate def-use cycle for each copy, and therefore
3088 for each copy we get the vector def for the reduction variable from the
3089 respective phi node created for this copy.
3091 Otherwise (the reduction is unused in the loop nest), we can combine
3092 together intermediate results, like so (e.g. for ncopies=2):
3096 (i.e. we generate VF/2 results in a single register).
3097 In this case for each copy we get the vector def for the reduction variable
3098 from the vectorized reduction operation generated in the previous iteration.
3101 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_loop)
3103 single_defuse_cycle = true;
3107 epilog_copies = ncopies;
3109 prev_stmt_info = NULL;
3110 prev_phi_info = NULL;
3111 for (j = 0; j < ncopies; j++)
3113 if (j == 0 || !single_defuse_cycle)
3115 /* Create the reduction-phi that defines the reduction-operand. */
3116 new_phi = create_phi_node (vec_dest, loop->header);
3117 set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo));
3123 loop_vec_def0 = vect_get_vec_def_for_operand (ops[0], stmt, NULL);
3124 if (op_type == ternary_op)
3126 loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt, NULL);
3129 /* Get the vector def for the reduction variable from the phi node */
3130 reduc_def = PHI_RESULT (new_phi);
3131 first_phi = new_phi;
3135 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
3136 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
3137 if (op_type == ternary_op)
3138 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def1);
3140 if (single_defuse_cycle)
3141 reduc_def = gimple_assign_lhs (new_stmt);
3143 reduc_def = PHI_RESULT (new_phi);
3145 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
3148 /* Arguments are ready. create the new vector stmt. */
3149 if (op_type == binary_op)
3150 expr = build2 (code, vectype, loop_vec_def0, reduc_def);
3152 expr = build3 (code, vectype, loop_vec_def0, loop_vec_def1,
3154 new_stmt = gimple_build_assign (vec_dest, expr);
3155 new_temp = make_ssa_name (vec_dest, new_stmt);
3156 gimple_assign_set_lhs (new_stmt, new_temp);
3157 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3160 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3162 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3163 prev_stmt_info = vinfo_for_stmt (new_stmt);
3164 prev_phi_info = vinfo_for_stmt (new_phi);
3167 /* Finalize the reduction-phi (set its arguments) and create the
3168 epilog reduction code. */
3169 if (!single_defuse_cycle)
3170 new_temp = gimple_assign_lhs (*vec_stmt);
3171 vect_create_epilog_for_reduction (new_temp, stmt, epilog_copies,
3172 epilog_reduc_code, first_phi);
3176 /* Checks if CALL can be vectorized in type VECTYPE. Returns
3177 a function declaration if the target has a vectorized version
3178 of the function, or NULL_TREE if the function cannot be vectorized. */
3181 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
3183 tree fndecl = gimple_call_fndecl (call);
3184 enum built_in_function code;
3186 /* We only handle functions that do not read or clobber memory -- i.e.
3187 const or novops ones. */
3188 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
3192 || TREE_CODE (fndecl) != FUNCTION_DECL
3193 || !DECL_BUILT_IN (fndecl))
3196 code = DECL_FUNCTION_CODE (fndecl);
3197 return targetm.vectorize.builtin_vectorized_function (code, vectype_out,
3201 /* Function vectorizable_call.
3203 Check if STMT performs a function call that can be vectorized.
3204 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3205 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3206 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3209 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
3214 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3215 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
3216 tree vectype_out, vectype_in;
3219 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3220 tree fndecl, new_temp, def, rhs_type, lhs_type;
3222 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3225 VEC(tree, heap) *vargs = NULL;
3226 enum { NARROW, NONE, WIDEN } modifier;
3229 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3232 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
3235 /* FORNOW: SLP not supported. */
3236 if (STMT_SLP_TYPE (stmt_info))
3239 /* Is STMT a vectorizable call? */
3240 if (!is_gimple_call (stmt))
3243 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3246 /* Process function arguments. */
3247 rhs_type = NULL_TREE;
3248 nargs = gimple_call_num_args (stmt);
3250 /* Bail out if the function has more than two arguments, we
3251 do not have interesting builtin functions to vectorize with
3252 more than two arguments. No arguments is also not good. */
3253 if (nargs == 0 || nargs > 2)
3256 for (i = 0; i < nargs; i++)
3258 op = gimple_call_arg (stmt, i);
3260 /* We can only handle calls with arguments of the same type. */
3262 && rhs_type != TREE_TYPE (op))
3264 if (vect_print_dump_info (REPORT_DETAILS))
3265 fprintf (vect_dump, "argument types differ.");
3268 rhs_type = TREE_TYPE (op);
3270 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt[i]))
3272 if (vect_print_dump_info (REPORT_DETAILS))
3273 fprintf (vect_dump, "use not simple.");
3278 vectype_in = get_vectype_for_scalar_type (rhs_type);
3281 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3283 lhs_type = TREE_TYPE (gimple_call_lhs (stmt));
3284 vectype_out = get_vectype_for_scalar_type (lhs_type);
3287 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3290 if (nunits_in == nunits_out / 2)
3292 else if (nunits_out == nunits_in)
3294 else if (nunits_out == nunits_in / 2)
3299 /* For now, we only vectorize functions if a target specific builtin
3300 is available. TODO -- in some cases, it might be profitable to
3301 insert the calls for pieces of the vector, in order to be able
3302 to vectorize other operations in the loop. */
3303 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
3304 if (fndecl == NULL_TREE)
3306 if (vect_print_dump_info (REPORT_DETAILS))
3307 fprintf (vect_dump, "function is not vectorizable.");
3312 gcc_assert (ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS));
3314 if (modifier == NARROW)
3315 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3317 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3319 /* Sanity check: make sure that at least one copy of the vectorized stmt
3320 needs to be generated. */
3321 gcc_assert (ncopies >= 1);
3323 if (!vec_stmt) /* transformation not required. */
3325 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
3326 if (vect_print_dump_info (REPORT_DETAILS))
3327 fprintf (vect_dump, "=== vectorizable_call ===");
3328 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
3334 if (vect_print_dump_info (REPORT_DETAILS))
3335 fprintf (vect_dump, "transform operation.");
3338 scalar_dest = gimple_call_lhs (stmt);
3339 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3341 prev_stmt_info = NULL;
3345 for (j = 0; j < ncopies; ++j)
3347 /* Build argument list for the vectorized call. */
3349 vargs = VEC_alloc (tree, heap, nargs);
3351 VEC_truncate (tree, vargs, 0);
3353 for (i = 0; i < nargs; i++)
3355 op = gimple_call_arg (stmt, i);
3358 = vect_get_vec_def_for_operand (op, stmt, NULL);
3361 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
3363 VEC_quick_push (tree, vargs, vec_oprnd0);
3366 new_stmt = gimple_build_call_vec (fndecl, vargs);
3367 new_temp = make_ssa_name (vec_dest, new_stmt);
3368 gimple_call_set_lhs (new_stmt, new_temp);
3370 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3373 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3375 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3377 prev_stmt_info = vinfo_for_stmt (new_stmt);
3383 for (j = 0; j < ncopies; ++j)
3385 /* Build argument list for the vectorized call. */
3387 vargs = VEC_alloc (tree, heap, nargs * 2);
3389 VEC_truncate (tree, vargs, 0);
3391 for (i = 0; i < nargs; i++)
3393 op = gimple_call_arg (stmt, i);
3397 = vect_get_vec_def_for_operand (op, stmt, NULL);
3399 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
3404 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd1);
3406 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
3409 VEC_quick_push (tree, vargs, vec_oprnd0);
3410 VEC_quick_push (tree, vargs, vec_oprnd1);
3413 new_stmt = gimple_build_call_vec (fndecl, vargs);
3414 new_temp = make_ssa_name (vec_dest, new_stmt);
3415 gimple_call_set_lhs (new_stmt, new_temp);
3417 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3420 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3422 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3424 prev_stmt_info = vinfo_for_stmt (new_stmt);
3427 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3432 /* No current target implements this case. */
3436 VEC_free (tree, heap, vargs);
3438 /* The call in STMT might prevent it from being removed in dce.
3439 We however cannot remove it here, due to the way the ssa name
3440 it defines is mapped to the new definition. So just replace
3441 rhs of the statement with something harmless. */
3443 type = TREE_TYPE (scalar_dest);
3444 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
3445 fold_convert (type, integer_zero_node));
3446 set_vinfo_for_stmt (new_stmt, stmt_info);
3447 set_vinfo_for_stmt (stmt, NULL);
3448 STMT_VINFO_STMT (stmt_info) = new_stmt;
3449 gsi_replace (gsi, new_stmt, false);
3450 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3456 /* Function vect_gen_widened_results_half
3458 Create a vector stmt whose code, type, number of arguments, and result
3459 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3460 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3461 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3462 needs to be created (DECL is a function-decl of a target-builtin).
3463 STMT is the original scalar stmt that we are vectorizing. */
3466 vect_gen_widened_results_half (enum tree_code code,
3468 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3469 tree vec_dest, gimple_stmt_iterator *gsi,
3477 /* Generate half of the widened result: */
3478 if (code == CALL_EXPR)
3480 /* Target specific support */
3481 if (op_type == binary_op)
3482 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3484 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3485 new_temp = make_ssa_name (vec_dest, new_stmt);
3486 gimple_call_set_lhs (new_stmt, new_temp);
3490 /* Generic support */
3491 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3492 if (op_type != binary_op)
3494 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
3496 new_temp = make_ssa_name (vec_dest, new_stmt);
3497 gimple_assign_set_lhs (new_stmt, new_temp);
3499 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3501 if (code == CALL_EXPR)
3503 FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter, SSA_OP_ALL_VIRTUALS)
3505 if (TREE_CODE (sym) == SSA_NAME)
3506 sym = SSA_NAME_VAR (sym);
3507 mark_sym_for_renaming (sym);
3515 /* Check if STMT performs a conversion operation, that can be vectorized.
3516 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3517 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3518 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3521 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
3522 gimple *vec_stmt, slp_tree slp_node)
3527 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3528 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3529 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3530 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3531 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3535 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3536 gimple new_stmt = NULL;
3537 stmt_vec_info prev_stmt_info;
3540 tree vectype_out, vectype_in;
3543 tree rhs_type, lhs_type;
3545 enum { NARROW, NONE, WIDEN } modifier;
3547 VEC(tree,heap) *vec_oprnds0 = NULL;
3550 VEC(tree,heap) *dummy = NULL;
3553 /* Is STMT a vectorizable conversion? */
3555 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3558 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
3561 if (!is_gimple_assign (stmt))
3564 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3567 code = gimple_assign_rhs_code (stmt);
3568 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3571 /* Check types of lhs and rhs. */
3572 op0 = gimple_assign_rhs1 (stmt);
3573 rhs_type = TREE_TYPE (op0);
3574 vectype_in = get_vectype_for_scalar_type (rhs_type);
3577 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3579 scalar_dest = gimple_assign_lhs (stmt);
3580 lhs_type = TREE_TYPE (scalar_dest);
3581 vectype_out = get_vectype_for_scalar_type (lhs_type);
3584 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3587 if (nunits_in == nunits_out / 2)
3589 else if (nunits_out == nunits_in)
3591 else if (nunits_out == nunits_in / 2)
3596 if (modifier == NONE)
3597 gcc_assert (STMT_VINFO_VECTYPE (stmt_info) == vectype_out);
3599 /* Bail out if the types are both integral or non-integral. */
3600 if ((INTEGRAL_TYPE_P (rhs_type) && INTEGRAL_TYPE_P (lhs_type))
3601 || (!INTEGRAL_TYPE_P (rhs_type) && !INTEGRAL_TYPE_P (lhs_type)))
3604 integral_type = INTEGRAL_TYPE_P (rhs_type) ? vectype_in : vectype_out;
3606 if (modifier == NARROW)
3607 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3609 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3611 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
3612 this, so we can safely override NCOPIES with 1 here. */
3616 /* Sanity check: make sure that at least one copy of the vectorized stmt
3617 needs to be generated. */
3618 gcc_assert (ncopies >= 1);
3620 /* Check the operands of the operation. */
3621 if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0]))
3623 if (vect_print_dump_info (REPORT_DETAILS))
3624 fprintf (vect_dump, "use not simple.");
3628 /* Supportable by target? */
3629 if ((modifier == NONE
3630 && !targetm.vectorize.builtin_conversion (code, integral_type))
3631 || (modifier == WIDEN
3632 && !supportable_widening_operation (code, stmt, vectype_in,
3635 &dummy_int, &dummy))
3636 || (modifier == NARROW
3637 && !supportable_narrowing_operation (code, stmt, vectype_in,
3638 &code1, &dummy_int, &dummy)))
3640 if (vect_print_dump_info (REPORT_DETAILS))
3641 fprintf (vect_dump, "conversion not supported by target.");
3645 if (modifier != NONE)
3647 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
3648 /* FORNOW: SLP not supported. */
3649 if (STMT_SLP_TYPE (stmt_info))
3653 if (!vec_stmt) /* transformation not required. */
3655 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
3660 if (vect_print_dump_info (REPORT_DETAILS))
3661 fprintf (vect_dump, "transform conversion.");
3664 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3666 if (modifier == NONE && !slp_node)
3667 vec_oprnds0 = VEC_alloc (tree, heap, 1);
3669 prev_stmt_info = NULL;
3673 for (j = 0; j < ncopies; j++)
3679 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
3681 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
3684 targetm.vectorize.builtin_conversion (code, integral_type);
3685 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
3687 /* Arguments are ready. create the new vector stmt. */
3688 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
3689 new_temp = make_ssa_name (vec_dest, new_stmt);
3690 gimple_call_set_lhs (new_stmt, new_temp);
3691 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3692 FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter,
3693 SSA_OP_ALL_VIRTUALS)
3695 if (TREE_CODE (sym) == SSA_NAME)
3696 sym = SSA_NAME_VAR (sym);
3697 mark_sym_for_renaming (sym);
3700 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3704 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3706 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3707 prev_stmt_info = vinfo_for_stmt (new_stmt);
3712 /* In case the vectorization factor (VF) is bigger than the number
3713 of elements that we can fit in a vectype (nunits), we have to
3714 generate more than one vector stmt - i.e - we need to "unroll"
3715 the vector stmt by a factor VF/nunits. */
3716 for (j = 0; j < ncopies; j++)
3719 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3721 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3723 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
3725 /* Generate first half of the widened result: */
3727 = vect_gen_widened_results_half (code1, decl1,
3728 vec_oprnd0, vec_oprnd1,
3729 unary_op, vec_dest, gsi, stmt);
3731 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3733 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3734 prev_stmt_info = vinfo_for_stmt (new_stmt);
3736 /* Generate second half of the widened result: */
3738 = vect_gen_widened_results_half (code2, decl2,
3739 vec_oprnd0, vec_oprnd1,
3740 unary_op, vec_dest, gsi, stmt);
3741 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3742 prev_stmt_info = vinfo_for_stmt (new_stmt);
3747 /* In case the vectorization factor (VF) is bigger than the number
3748 of elements that we can fit in a vectype (nunits), we have to
3749 generate more than one vector stmt - i.e - we need to "unroll"
3750 the vector stmt by a factor VF/nunits. */
3751 for (j = 0; j < ncopies; j++)
3756 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3757 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3761 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
3762 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3765 /* Arguments are ready. Create the new vector stmt. */
3766 expr = build2 (code1, vectype_out, vec_oprnd0, vec_oprnd1);
3767 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
3769 new_temp = make_ssa_name (vec_dest, new_stmt);
3770 gimple_assign_set_lhs (new_stmt, new_temp);
3771 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3774 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3776 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3778 prev_stmt_info = vinfo_for_stmt (new_stmt);
3781 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3785 VEC_free (tree, heap, vec_oprnds0);
3791 /* Function vectorizable_assignment.
3793 Check if STMT performs an assignment (copy) that can be vectorized.
3794 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3795 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3796 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3799 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
3800 gimple *vec_stmt, slp_tree slp_node)
3805 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3806 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3807 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3811 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3812 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3815 VEC(tree,heap) *vec_oprnds = NULL;
3818 /* Multiple types in SLP are handled by creating the appropriate number of
3819 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3824 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3826 gcc_assert (ncopies >= 1);
3828 return false; /* FORNOW */
3830 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3833 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
3836 /* Is vectorizable assignment? */
3837 if (!is_gimple_assign (stmt))
3840 scalar_dest = gimple_assign_lhs (stmt);
3841 if (TREE_CODE (scalar_dest) != SSA_NAME)
3844 if (gimple_assign_single_p (stmt)
3845 || gimple_assign_rhs_code (stmt) == PAREN_EXPR)
3846 op = gimple_assign_rhs1 (stmt);
3850 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt[0]))
3852 if (vect_print_dump_info (REPORT_DETAILS))
3853 fprintf (vect_dump, "use not simple.");
3857 if (!vec_stmt) /* transformation not required. */
3859 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
3860 if (vect_print_dump_info (REPORT_DETAILS))
3861 fprintf (vect_dump, "=== vectorizable_assignment ===");
3862 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
3867 if (vect_print_dump_info (REPORT_DETAILS))
3868 fprintf (vect_dump, "transform assignment.");
3871 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3874 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
3876 /* Arguments are ready. create the new vector stmt. */
3877 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
3879 *vec_stmt = gimple_build_assign (vec_dest, vop);
3880 new_temp = make_ssa_name (vec_dest, *vec_stmt);
3881 gimple_assign_set_lhs (*vec_stmt, new_temp);
3882 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
3883 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt;
3886 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), *vec_stmt);
3889 VEC_free (tree, heap, vec_oprnds);
3894 /* Function vect_min_worthwhile_factor.
3896 For a loop where we could vectorize the operation indicated by CODE,
3897 return the minimum vectorization factor that makes it worthwhile
3898 to use generic vectors. */
3900 vect_min_worthwhile_factor (enum tree_code code)
3921 /* Function vectorizable_induction
3923 Check if PHI performs an induction computation that can be vectorized.
3924 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
3925 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
3926 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3929 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
3932 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
3933 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3934 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3935 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3936 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3937 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3940 gcc_assert (ncopies >= 1);
3941 /* FORNOW. This restriction should be relaxed. */
3942 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
3944 if (vect_print_dump_info (REPORT_DETAILS))
3945 fprintf (vect_dump, "multiple types in nested loop.");
3949 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3952 /* FORNOW: SLP not supported. */
3953 if (STMT_SLP_TYPE (stmt_info))
3956 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
3958 if (gimple_code (phi) != GIMPLE_PHI)
3961 if (!vec_stmt) /* transformation not required. */
3963 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
3964 if (vect_print_dump_info (REPORT_DETAILS))
3965 fprintf (vect_dump, "=== vectorizable_induction ===");
3966 vect_model_induction_cost (stmt_info, ncopies);
3972 if (vect_print_dump_info (REPORT_DETAILS))
3973 fprintf (vect_dump, "transform induction phi.");
3975 vec_def = get_initial_def_for_induction (phi);
3976 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
3981 /* Function vectorizable_operation.
3983 Check if STMT performs a binary or unary operation that can be vectorized.
3984 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3985 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3986 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3989 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
3990 gimple *vec_stmt, slp_tree slp_node)
3994 tree op0, op1 = NULL;
3995 tree vec_oprnd1 = NULL_TREE;
3996 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3997 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3998 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3999 enum tree_code code;
4000 enum machine_mode vec_mode;
4005 enum machine_mode optab_op2_mode;
4008 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4009 gimple new_stmt = NULL;
4010 stmt_vec_info prev_stmt_info;
4011 int nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4016 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
4019 bool shift_p = false;
4020 bool scalar_shift_arg = false;
4022 /* Multiple types in SLP are handled by creating the appropriate number of
4023 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4028 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4030 gcc_assert (ncopies >= 1);
4032 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4035 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
4038 /* Is STMT a vectorizable binary/unary operation? */
4039 if (!is_gimple_assign (stmt))
4042 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4045 scalar_dest = gimple_assign_lhs (stmt);
4046 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
4049 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4050 if (nunits_out != nunits_in)
4053 code = gimple_assign_rhs_code (stmt);
4055 /* For pointer addition, we should use the normal plus for
4056 the vector addition. */
4057 if (code == POINTER_PLUS_EXPR)
4060 /* Support only unary or binary operations. */
4061 op_type = TREE_CODE_LENGTH (code);
4062 if (op_type != unary_op && op_type != binary_op)
4064 if (vect_print_dump_info (REPORT_DETAILS))
4065 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
4069 op0 = gimple_assign_rhs1 (stmt);
4070 if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0]))
4072 if (vect_print_dump_info (REPORT_DETAILS))
4073 fprintf (vect_dump, "use not simple.");
4077 if (op_type == binary_op)
4079 op1 = gimple_assign_rhs2 (stmt);
4080 if (!vect_is_simple_use (op1, loop_vinfo, &def_stmt, &def, &dt[1]))
4082 if (vect_print_dump_info (REPORT_DETAILS))
4083 fprintf (vect_dump, "use not simple.");
4088 /* If this is a shift/rotate, determine whether the shift amount is a vector,
4089 or scalar. If the shift/rotate amount is a vector, use the vector/vector
4091 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4092 || code == RROTATE_EXPR)
4096 /* vector shifted by vector */
4097 if (dt[1] == vect_loop_def)
4099 optab = optab_for_tree_code (code, vectype, optab_vector);
4100 if (vect_print_dump_info (REPORT_DETAILS))
4101 fprintf (vect_dump, "vector/vector shift/rotate found.");
4104 /* See if the machine has a vector shifted by scalar insn and if not
4105 then see if it has a vector shifted by vector insn */
4106 else if (dt[1] == vect_constant_def || dt[1] == vect_invariant_def)
4108 optab = optab_for_tree_code (code, vectype, optab_scalar);
4110 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
4111 != CODE_FOR_nothing))
4113 scalar_shift_arg = true;
4114 if (vect_print_dump_info (REPORT_DETAILS))
4115 fprintf (vect_dump, "vector/scalar shift/rotate found.");
4119 optab = optab_for_tree_code (code, vectype, optab_vector);
4120 if (vect_print_dump_info (REPORT_DETAILS)
4122 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
4123 != CODE_FOR_nothing))
4124 fprintf (vect_dump, "vector/vector shift/rotate found.");
4130 if (vect_print_dump_info (REPORT_DETAILS))
4131 fprintf (vect_dump, "operand mode requires invariant argument.");
4136 optab = optab_for_tree_code (code, vectype, optab_default);
4138 /* Supportable by target? */
4141 if (vect_print_dump_info (REPORT_DETAILS))
4142 fprintf (vect_dump, "no optab.");
4145 vec_mode = TYPE_MODE (vectype);
4146 icode = (int) optab_handler (optab, vec_mode)->insn_code;
4147 if (icode == CODE_FOR_nothing)
4149 if (vect_print_dump_info (REPORT_DETAILS))
4150 fprintf (vect_dump, "op not supported by target.");
4151 /* Check only during analysis. */
4152 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4153 || (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4154 < vect_min_worthwhile_factor (code)
4157 if (vect_print_dump_info (REPORT_DETAILS))
4158 fprintf (vect_dump, "proceeding using word mode.");
4161 /* Worthwhile without SIMD support? Check only during analysis. */
4162 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4163 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4164 < vect_min_worthwhile_factor (code)
4167 if (vect_print_dump_info (REPORT_DETAILS))
4168 fprintf (vect_dump, "not worthwhile without SIMD support.");
4172 if (!vec_stmt) /* transformation not required. */
4174 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
4175 if (vect_print_dump_info (REPORT_DETAILS))
4176 fprintf (vect_dump, "=== vectorizable_operation ===");
4177 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
4183 if (vect_print_dump_info (REPORT_DETAILS))
4184 fprintf (vect_dump, "transform binary/unary operation.");
4187 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4189 /* Allocate VECs for vector operands. In case of SLP, vector operands are
4190 created in the previous stages of the recursion, so no allocation is
4191 needed, except for the case of shift with scalar shift argument. In that
4192 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
4193 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
4194 In case of loop-based vectorization we allocate VECs of size 1. We
4195 allocate VEC_OPRNDS1 only in case of binary operation. */
4198 vec_oprnds0 = VEC_alloc (tree, heap, 1);
4199 if (op_type == binary_op)
4200 vec_oprnds1 = VEC_alloc (tree, heap, 1);
4202 else if (scalar_shift_arg)
4203 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
4205 /* In case the vectorization factor (VF) is bigger than the number
4206 of elements that we can fit in a vectype (nunits), we have to generate
4207 more than one vector stmt - i.e - we need to "unroll" the
4208 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4209 from one copy of the vector stmt to the next, in the field
4210 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4211 stages to find the correct vector defs to be used when vectorizing
4212 stmts that use the defs of the current stmt. The example below illustrates
4213 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
4214 4 vectorized stmts):
4216 before vectorization:
4217 RELATED_STMT VEC_STMT
4221 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4223 RELATED_STMT VEC_STMT
4224 VS1_0: vx0 = memref0 VS1_1 -
4225 VS1_1: vx1 = memref1 VS1_2 -
4226 VS1_2: vx2 = memref2 VS1_3 -
4227 VS1_3: vx3 = memref3 - -
4228 S1: x = load - VS1_0
4231 step2: vectorize stmt S2 (done here):
4232 To vectorize stmt S2 we first need to find the relevant vector
4233 def for the first operand 'x'. This is, as usual, obtained from
4234 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4235 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4236 relevant vector def 'vx0'. Having found 'vx0' we can generate
4237 the vector stmt VS2_0, and as usual, record it in the
4238 STMT_VINFO_VEC_STMT of stmt S2.
4239 When creating the second copy (VS2_1), we obtain the relevant vector
4240 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4241 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4242 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4243 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4244 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4245 chain of stmts and pointers:
4246 RELATED_STMT VEC_STMT
4247 VS1_0: vx0 = memref0 VS1_1 -
4248 VS1_1: vx1 = memref1 VS1_2 -
4249 VS1_2: vx2 = memref2 VS1_3 -
4250 VS1_3: vx3 = memref3 - -
4251 S1: x = load - VS1_0
4252 VS2_0: vz0 = vx0 + v1 VS2_1 -
4253 VS2_1: vz1 = vx1 + v1 VS2_2 -
4254 VS2_2: vz2 = vx2 + v1 VS2_3 -
4255 VS2_3: vz3 = vx3 + v1 - -
4256 S2: z = x + 1 - VS2_0 */
4258 prev_stmt_info = NULL;
4259 for (j = 0; j < ncopies; j++)
4264 if (op_type == binary_op && scalar_shift_arg)
4266 /* Vector shl and shr insn patterns can be defined with scalar
4267 operand 2 (shift operand). In this case, use constant or loop
4268 invariant op1 directly, without extending it to vector mode
4270 optab_op2_mode = insn_data[icode].operand[2].mode;
4271 if (!VECTOR_MODE_P (optab_op2_mode))
4273 if (vect_print_dump_info (REPORT_DETAILS))
4274 fprintf (vect_dump, "operand 1 using scalar mode.");
4276 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
4279 /* Store vec_oprnd1 for every vector stmt to be created
4280 for SLP_NODE. We check during the analysis that all the
4281 shift arguments are the same.
4282 TODO: Allow different constants for different vector
4283 stmts generated for an SLP instance. */
4284 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4285 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
4290 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4291 (a special case for certain kind of vector shifts); otherwise,
4292 operand 1 should be of a vector type (the usual case). */
4293 if (op_type == binary_op && !vec_oprnd1)
4294 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4297 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4301 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4303 /* Arguments are ready. Create the new vector stmt. */
4304 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
4306 vop1 = ((op_type == binary_op)
4307 ? VEC_index (tree, vec_oprnds1, i) : NULL);
4308 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
4309 new_temp = make_ssa_name (vec_dest, new_stmt);
4310 gimple_assign_set_lhs (new_stmt, new_temp);
4311 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4313 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4320 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4322 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4323 prev_stmt_info = vinfo_for_stmt (new_stmt);
4326 VEC_free (tree, heap, vec_oprnds0);
4328 VEC_free (tree, heap, vec_oprnds1);
4334 /* Get vectorized definitions for loop-based vectorization. For the first
4335 operand we call vect_get_vec_def_for_operand() (with OPRND containing
4336 scalar operand), and for the rest we get a copy with
4337 vect_get_vec_def_for_stmt_copy() using the previous vector definition
4338 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
4339 The vectors are collected into VEC_OPRNDS. */
4342 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
4343 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
4347 /* Get first vector operand. */
4348 /* All the vector operands except the very first one (that is scalar oprnd)
4350 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
4351 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
4353 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
4355 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
4357 /* Get second vector operand. */
4358 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
4359 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
4363 /* For conversion in multiple steps, continue to get operands
4366 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
4370 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
4371 For multi-step conversions store the resulting vectors and call the function
4375 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
4376 int multi_step_cvt, gimple stmt,
4377 VEC (tree, heap) *vec_dsts,
4378 gimple_stmt_iterator *gsi,
4379 slp_tree slp_node, enum tree_code code,
4380 stmt_vec_info *prev_stmt_info)
4383 tree vop0, vop1, new_tmp, vec_dest;
4385 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4387 vec_dest = VEC_pop (tree, vec_dsts);
4389 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
4391 /* Create demotion operation. */
4392 vop0 = VEC_index (tree, *vec_oprnds, i);
4393 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
4394 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
4395 new_tmp = make_ssa_name (vec_dest, new_stmt);
4396 gimple_assign_set_lhs (new_stmt, new_tmp);
4397 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4400 /* Store the resulting vector for next recursive call. */
4401 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
4404 /* This is the last step of the conversion sequence. Store the
4405 vectors in SLP_NODE or in vector info of the scalar statement
4406 (or in STMT_VINFO_RELATED_STMT chain). */
4408 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4411 if (!*prev_stmt_info)
4412 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4414 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
4416 *prev_stmt_info = vinfo_for_stmt (new_stmt);
4421 /* For multi-step demotion operations we first generate demotion operations
4422 from the source type to the intermediate types, and then combine the
4423 results (stored in VEC_OPRNDS) in demotion operation to the destination
4427 /* At each level of recursion we have have of the operands we had at the
4429 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
4430 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
4431 stmt, vec_dsts, gsi, slp_node,
4432 code, prev_stmt_info);
4437 /* Function vectorizable_type_demotion
4439 Check if STMT performs a binary or unary operation that involves
4440 type demotion, and if it can be vectorized.
4441 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4442 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4443 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4446 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
4447 gimple *vec_stmt, slp_tree slp_node)
4452 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4453 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4454 enum tree_code code, code1 = ERROR_MARK;
4457 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4458 stmt_vec_info prev_stmt_info;
4465 int multi_step_cvt = 0;
4466 VEC (tree, heap) *vec_oprnds0 = NULL;
4467 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
4468 tree last_oprnd, intermediate_type;
4470 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4473 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
4476 /* Is STMT a vectorizable type-demotion operation? */
4477 if (!is_gimple_assign (stmt))
4480 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4483 code = gimple_assign_rhs_code (stmt);
4484 if (!CONVERT_EXPR_CODE_P (code))
4487 op0 = gimple_assign_rhs1 (stmt);
4488 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
4491 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4493 scalar_dest = gimple_assign_lhs (stmt);
4494 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
4497 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4498 if (nunits_in >= nunits_out)
4501 /* Multiple types in SLP are handled by creating the appropriate number of
4502 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4507 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
4509 gcc_assert (ncopies >= 1);
4511 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4512 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
4513 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
4514 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
4515 && CONVERT_EXPR_CODE_P (code))))
4518 /* Check the operands of the operation. */
4519 if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0]))
4521 if (vect_print_dump_info (REPORT_DETAILS))
4522 fprintf (vect_dump, "use not simple.");
4526 /* Supportable by target? */
4527 if (!supportable_narrowing_operation (code, stmt, vectype_in, &code1,
4528 &multi_step_cvt, &interm_types))
4531 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
4533 if (!vec_stmt) /* transformation not required. */
4535 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4536 if (vect_print_dump_info (REPORT_DETAILS))
4537 fprintf (vect_dump, "=== vectorizable_demotion ===");
4538 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
4543 if (vect_print_dump_info (REPORT_DETAILS))
4544 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
4547 /* In case of multi-step demotion, we first generate demotion operations to
4548 the intermediate types, and then from that types to the final one.
4549 We create vector destinations for the intermediate type (TYPES) received
4550 from supportable_narrowing_operation, and store them in the correct order
4551 for future use in vect_create_vectorized_demotion_stmts(). */
4553 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
4555 vec_dsts = VEC_alloc (tree, heap, 1);
4557 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
4558 VEC_quick_push (tree, vec_dsts, vec_dest);
4562 for (i = VEC_length (tree, interm_types) - 1;
4563 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
4565 vec_dest = vect_create_destination_var (scalar_dest,
4567 VEC_quick_push (tree, vec_dsts, vec_dest);
4571 /* In case the vectorization factor (VF) is bigger than the number
4572 of elements that we can fit in a vectype (nunits), we have to generate
4573 more than one vector stmt - i.e - we need to "unroll" the
4574 vector stmt by a factor VF/nunits. */
4576 prev_stmt_info = NULL;
4577 for (j = 0; j < ncopies; j++)
4581 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL);
4584 VEC_free (tree, heap, vec_oprnds0);
4585 vec_oprnds0 = VEC_alloc (tree, heap,
4586 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
4587 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4588 vect_pow2 (multi_step_cvt) - 1);
4591 /* Arguments are ready. Create the new vector stmts. */
4592 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
4593 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
4594 multi_step_cvt, stmt, tmp_vec_dsts,
4595 gsi, slp_node, code1,
4599 VEC_free (tree, heap, vec_oprnds0);
4600 VEC_free (tree, heap, vec_dsts);
4601 VEC_free (tree, heap, tmp_vec_dsts);
4602 VEC_free (tree, heap, interm_types);
4604 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4609 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4610 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4611 the resulting vectors and call the function recursively. */
4614 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
4615 VEC (tree, heap) **vec_oprnds1,
4616 int multi_step_cvt, gimple stmt,
4617 VEC (tree, heap) *vec_dsts,
4618 gimple_stmt_iterator *gsi,
4619 slp_tree slp_node, enum tree_code code1,
4620 enum tree_code code2, tree decl1,
4621 tree decl2, int op_type,
4622 stmt_vec_info *prev_stmt_info)
4625 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
4626 gimple new_stmt1, new_stmt2;
4627 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4628 VEC (tree, heap) *vec_tmp;
4630 vec_dest = VEC_pop (tree, vec_dsts);
4631 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
4633 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
4635 if (op_type == binary_op)
4636 vop1 = VEC_index (tree, *vec_oprnds1, i);
4640 /* Generate the two halves of promotion operation. */
4641 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4642 op_type, vec_dest, gsi, stmt);
4643 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4644 op_type, vec_dest, gsi, stmt);
4645 if (is_gimple_call (new_stmt1))
4647 new_tmp1 = gimple_call_lhs (new_stmt1);
4648 new_tmp2 = gimple_call_lhs (new_stmt2);
4652 new_tmp1 = gimple_assign_lhs (new_stmt1);
4653 new_tmp2 = gimple_assign_lhs (new_stmt2);
4658 /* Store the results for the recursive call. */
4659 VEC_quick_push (tree, vec_tmp, new_tmp1);
4660 VEC_quick_push (tree, vec_tmp, new_tmp2);
4664 /* Last step of promotion sequience - store the results. */
4667 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
4668 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
4672 if (!*prev_stmt_info)
4673 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
4675 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
4677 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
4678 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
4679 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
4686 /* For multi-step promotion operation we first generate we call the
4687 function recurcively for every stage. We start from the input type,
4688 create promotion operations to the intermediate types, and then
4689 create promotions to the output type. */
4690 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
4691 VEC_free (tree, heap, vec_tmp);
4692 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
4693 multi_step_cvt - 1, stmt,
4694 vec_dsts, gsi, slp_node, code1,
4695 code2, decl2, decl2, op_type,
4701 /* Function vectorizable_type_promotion
4703 Check if STMT performs a binary or unary operation that involves
4704 type promotion, and if it can be vectorized.
4705 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4706 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4707 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4710 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
4711 gimple *vec_stmt, slp_tree slp_node)
4715 tree op0, op1 = NULL;
4716 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
4717 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4718 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4719 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4720 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4724 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4725 stmt_vec_info prev_stmt_info;
4732 tree intermediate_type = NULL_TREE;
4733 int multi_step_cvt = 0;
4734 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
4735 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
4737 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4740 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
4743 /* Is STMT a vectorizable type-promotion operation? */
4744 if (!is_gimple_assign (stmt))
4747 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4750 code = gimple_assign_rhs_code (stmt);
4751 if (!CONVERT_EXPR_CODE_P (code)
4752 && code != WIDEN_MULT_EXPR)
4755 op0 = gimple_assign_rhs1 (stmt);
4756 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
4759 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4761 scalar_dest = gimple_assign_lhs (stmt);
4762 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
4765 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4766 if (nunits_in <= nunits_out)
4769 /* Multiple types in SLP are handled by creating the appropriate number of
4770 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4775 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4777 gcc_assert (ncopies >= 1);
4779 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4780 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
4781 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
4782 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
4783 && CONVERT_EXPR_CODE_P (code))))
4786 /* Check the operands of the operation. */
4787 if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0]))
4789 if (vect_print_dump_info (REPORT_DETAILS))
4790 fprintf (vect_dump, "use not simple.");
4794 op_type = TREE_CODE_LENGTH (code);
4795 if (op_type == binary_op)
4797 op1 = gimple_assign_rhs2 (stmt);
4798 if (!vect_is_simple_use (op1, loop_vinfo, &def_stmt, &def, &dt[1]))
4800 if (vect_print_dump_info (REPORT_DETAILS))
4801 fprintf (vect_dump, "use not simple.");
4806 /* Supportable by target? */
4807 if (!supportable_widening_operation (code, stmt, vectype_in,
4808 &decl1, &decl2, &code1, &code2,
4809 &multi_step_cvt, &interm_types))
4812 /* Binary widening operation can only be supported directly by the
4814 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4816 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
4818 if (!vec_stmt) /* transformation not required. */
4820 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4821 if (vect_print_dump_info (REPORT_DETAILS))
4822 fprintf (vect_dump, "=== vectorizable_promotion ===");
4823 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
4829 if (vect_print_dump_info (REPORT_DETAILS))
4830 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
4834 /* In case of multi-step promotion, we first generate promotion operations
4835 to the intermediate types, and then from that types to the final one.
4836 We store vector destination in VEC_DSTS in the correct order for
4837 recursive creation of promotion operations in
4838 vect_create_vectorized_promotion_stmts(). Vector destinations are created
4839 according to TYPES recieved from supportable_widening_operation(). */
4841 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
4843 vec_dsts = VEC_alloc (tree, heap, 1);
4845 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
4846 VEC_quick_push (tree, vec_dsts, vec_dest);
4850 for (i = VEC_length (tree, interm_types) - 1;
4851 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
4853 vec_dest = vect_create_destination_var (scalar_dest,
4855 VEC_quick_push (tree, vec_dsts, vec_dest);
4861 vec_oprnds0 = VEC_alloc (tree, heap,
4862 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4863 if (op_type == binary_op)
4864 vec_oprnds1 = VEC_alloc (tree, heap, 1);
4867 /* In case the vectorization factor (VF) is bigger than the number
4868 of elements that we can fit in a vectype (nunits), we have to generate
4869 more than one vector stmt - i.e - we need to "unroll" the
4870 vector stmt by a factor VF/nunits. */
4872 prev_stmt_info = NULL;
4873 for (j = 0; j < ncopies; j++)
4879 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1);
4882 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
4883 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
4884 if (op_type == binary_op)
4886 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
4887 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
4893 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
4894 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
4895 if (op_type == binary_op)
4897 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
4898 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
4902 /* Arguments are ready. Create the new vector stmts. */
4903 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
4904 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
4905 multi_step_cvt, stmt,
4907 gsi, slp_node, code1, code2,
4908 decl1, decl2, op_type,
4912 VEC_free (tree, heap, vec_dsts);
4913 VEC_free (tree, heap, tmp_vec_dsts);
4914 VEC_free (tree, heap, interm_types);
4915 VEC_free (tree, heap, vec_oprnds0);
4916 VEC_free (tree, heap, vec_oprnds1);
4918 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4923 /* Function vect_strided_store_supported.
4925 Returns TRUE is INTERLEAVE_HIGH and INTERLEAVE_LOW operations are supported,
4926 and FALSE otherwise. */
4929 vect_strided_store_supported (tree vectype)
4931 optab interleave_high_optab, interleave_low_optab;
4934 mode = (int) TYPE_MODE (vectype);
4936 /* Check that the operation is supported. */
4937 interleave_high_optab = optab_for_tree_code (VEC_INTERLEAVE_HIGH_EXPR,
4938 vectype, optab_default);
4939 interleave_low_optab = optab_for_tree_code (VEC_INTERLEAVE_LOW_EXPR,
4940 vectype, optab_default);
4941 if (!interleave_high_optab || !interleave_low_optab)
4943 if (vect_print_dump_info (REPORT_DETAILS))
4944 fprintf (vect_dump, "no optab for interleave.");
4948 if (optab_handler (interleave_high_optab, mode)->insn_code
4950 || optab_handler (interleave_low_optab, mode)->insn_code
4951 == CODE_FOR_nothing)
4953 if (vect_print_dump_info (REPORT_DETAILS))
4954 fprintf (vect_dump, "interleave op not supported by target.");
4962 /* Function vect_permute_store_chain.
4964 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
4965 a power of 2, generate interleave_high/low stmts to reorder the data
4966 correctly for the stores. Return the final references for stores in
4969 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4970 The input is 4 vectors each containing 8 elements. We assign a number to each
4971 element, the input sequence is:
4973 1st vec: 0 1 2 3 4 5 6 7
4974 2nd vec: 8 9 10 11 12 13 14 15
4975 3rd vec: 16 17 18 19 20 21 22 23
4976 4th vec: 24 25 26 27 28 29 30 31
4978 The output sequence should be:
4980 1st vec: 0 8 16 24 1 9 17 25
4981 2nd vec: 2 10 18 26 3 11 19 27
4982 3rd vec: 4 12 20 28 5 13 21 30
4983 4th vec: 6 14 22 30 7 15 23 31
4985 i.e., we interleave the contents of the four vectors in their order.
4987 We use interleave_high/low instructions to create such output. The input of
4988 each interleave_high/low operation is two vectors:
4991 the even elements of the result vector are obtained left-to-right from the
4992 high/low elements of the first vector. The odd elements of the result are
4993 obtained left-to-right from the high/low elements of the second vector.
4994 The output of interleave_high will be: 0 4 1 5
4995 and of interleave_low: 2 6 3 7
4998 The permutation is done in log LENGTH stages. In each stage interleave_high
4999 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
5000 where the first argument is taken from the first half of DR_CHAIN and the
5001 second argument from it's second half.
5004 I1: interleave_high (1st vec, 3rd vec)
5005 I2: interleave_low (1st vec, 3rd vec)
5006 I3: interleave_high (2nd vec, 4th vec)
5007 I4: interleave_low (2nd vec, 4th vec)
5009 The output for the first stage is:
5011 I1: 0 16 1 17 2 18 3 19
5012 I2: 4 20 5 21 6 22 7 23
5013 I3: 8 24 9 25 10 26 11 27
5014 I4: 12 28 13 29 14 30 15 31
5016 The output of the second stage, i.e. the final result is:
5018 I1: 0 8 16 24 1 9 17 25
5019 I2: 2 10 18 26 3 11 19 27
5020 I3: 4 12 20 28 5 13 21 30
5021 I4: 6 14 22 30 7 15 23 31. */
5024 vect_permute_store_chain (VEC(tree,heap) *dr_chain,
5025 unsigned int length,
5027 gimple_stmt_iterator *gsi,
5028 VEC(tree,heap) **result_chain)
5030 tree perm_dest, vect1, vect2, high, low;
5032 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5036 enum tree_code high_code, low_code;
5038 scalar_dest = gimple_assign_lhs (stmt);
5040 /* Check that the operation is supported. */
5041 if (!vect_strided_store_supported (vectype))
5044 *result_chain = VEC_copy (tree, heap, dr_chain);
5046 for (i = 0; i < exact_log2 (length); i++)
5048 for (j = 0; j < length/2; j++)
5050 vect1 = VEC_index (tree, dr_chain, j);
5051 vect2 = VEC_index (tree, dr_chain, j+length/2);
5053 /* Create interleaving stmt:
5054 in the case of big endian:
5055 high = interleave_high (vect1, vect2)
5056 and in the case of little endian:
5057 high = interleave_low (vect1, vect2). */
5058 perm_dest = create_tmp_var (vectype, "vect_inter_high");
5059 DECL_GIMPLE_REG_P (perm_dest) = 1;
5060 add_referenced_var (perm_dest);
5061 if (BYTES_BIG_ENDIAN)
5063 high_code = VEC_INTERLEAVE_HIGH_EXPR;
5064 low_code = VEC_INTERLEAVE_LOW_EXPR;
5068 low_code = VEC_INTERLEAVE_HIGH_EXPR;
5069 high_code = VEC_INTERLEAVE_LOW_EXPR;
5071 perm_stmt = gimple_build_assign_with_ops (high_code, perm_dest,
5073 high = make_ssa_name (perm_dest, perm_stmt);
5074 gimple_assign_set_lhs (perm_stmt, high);
5075 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5076 VEC_replace (tree, *result_chain, 2*j, high);
5078 /* Create interleaving stmt:
5079 in the case of big endian:
5080 low = interleave_low (vect1, vect2)
5081 and in the case of little endian:
5082 low = interleave_high (vect1, vect2). */
5083 perm_dest = create_tmp_var (vectype, "vect_inter_low");
5084 DECL_GIMPLE_REG_P (perm_dest) = 1;
5085 add_referenced_var (perm_dest);
5086 perm_stmt = gimple_build_assign_with_ops (low_code, perm_dest,
5088 low = make_ssa_name (perm_dest, perm_stmt);
5089 gimple_assign_set_lhs (perm_stmt, low);
5090 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5091 VEC_replace (tree, *result_chain, 2*j+1, low);
5093 dr_chain = VEC_copy (tree, heap, *result_chain);
5099 /* Function vectorizable_store.
5101 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5103 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5104 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5105 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5108 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
5114 tree vec_oprnd = NULL_TREE;
5115 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5116 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5117 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5118 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5119 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5120 enum machine_mode vec_mode;
5122 enum dr_alignment_support alignment_support_scheme;
5125 enum vect_def_type dt;
5126 stmt_vec_info prev_stmt_info = NULL;
5127 tree dataref_ptr = NULL_TREE;
5128 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5131 gimple next_stmt, first_stmt = NULL;
5132 bool strided_store = false;
5133 unsigned int group_size, i;
5134 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
5136 VEC(tree,heap) *vec_oprnds = NULL;
5137 bool slp = (slp_node != NULL);
5138 stmt_vec_info first_stmt_vinfo;
5139 unsigned int vec_num;
5141 /* Multiple types in SLP are handled by creating the appropriate number of
5142 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5147 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5149 gcc_assert (ncopies >= 1);
5151 /* FORNOW. This restriction should be relaxed. */
5152 if (nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5154 if (vect_print_dump_info (REPORT_DETAILS))
5155 fprintf (vect_dump, "multiple types in nested loop.");
5159 if (!STMT_VINFO_RELEVANT_P (stmt_info))
5162 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
5165 /* Is vectorizable store? */
5167 if (!is_gimple_assign (stmt))
5170 scalar_dest = gimple_assign_lhs (stmt);
5171 if (TREE_CODE (scalar_dest) != ARRAY_REF
5172 && TREE_CODE (scalar_dest) != INDIRECT_REF
5173 && !STMT_VINFO_STRIDED_ACCESS (stmt_info))
5176 gcc_assert (gimple_assign_single_p (stmt));
5177 op = gimple_assign_rhs1 (stmt);
5178 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt))
5180 if (vect_print_dump_info (REPORT_DETAILS))
5181 fprintf (vect_dump, "use not simple.");
5185 /* The type of the vector store is determined by the rhs. */
5186 vectype = get_vectype_for_scalar_type (TREE_TYPE (op));
5188 /* If accesses through a pointer to vectype do not alias the original
5189 memory reference we have a problem. */
5190 if (get_alias_set (vectype) != get_alias_set (TREE_TYPE (scalar_dest))
5191 && !alias_set_subset_of (get_alias_set (vectype),
5192 get_alias_set (TREE_TYPE (scalar_dest))))
5194 if (vect_print_dump_info (REPORT_DETAILS))
5195 fprintf (vect_dump, "vector type does not alias scalar type");
5199 if (!useless_type_conversion_p (TREE_TYPE (op), TREE_TYPE (scalar_dest)))
5201 if (vect_print_dump_info (REPORT_DETAILS))
5202 fprintf (vect_dump, "operands of different types");
5206 vec_mode = TYPE_MODE (vectype);
5207 /* FORNOW. In some cases can vectorize even if data-type not supported
5208 (e.g. - array initialization with 0). */
5209 if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
5212 if (!STMT_VINFO_DATA_REF (stmt_info))
5215 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
5217 strided_store = true;
5218 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
5219 if (!vect_strided_store_supported (vectype)
5220 && !PURE_SLP_STMT (stmt_info) && !slp)
5223 if (first_stmt == stmt)
5225 /* STMT is the leader of the group. Check the operands of all the
5226 stmts of the group. */
5227 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
5230 gcc_assert (gimple_assign_single_p (next_stmt));
5231 op = gimple_assign_rhs1 (next_stmt);
5232 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt))
5234 if (vect_print_dump_info (REPORT_DETAILS))
5235 fprintf (vect_dump, "use not simple.");
5238 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
5243 if (!vec_stmt) /* transformation not required. */
5245 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5246 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
5254 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5255 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
5257 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5260 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5262 /* We vectorize all the stmts of the interleaving group when we
5263 reach the last stmt in the group. */
5264 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5265 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
5273 strided_store = false;
5275 /* VEC_NUM is the number of vect stmts to be created for this group. */
5277 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5279 vec_num = group_size;
5285 group_size = vec_num = 1;
5286 first_stmt_vinfo = stmt_info;
5289 if (vect_print_dump_info (REPORT_DETAILS))
5290 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
5292 dr_chain = VEC_alloc (tree, heap, group_size);
5293 oprnds = VEC_alloc (tree, heap, group_size);
5295 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
5296 gcc_assert (alignment_support_scheme);
5297 gcc_assert (alignment_support_scheme == dr_aligned); /* FORNOW */
5299 /* In case the vectorization factor (VF) is bigger than the number
5300 of elements that we can fit in a vectype (nunits), we have to generate
5301 more than one vector stmt - i.e - we need to "unroll" the
5302 vector stmt by a factor VF/nunits. For more details see documentation in
5303 vect_get_vec_def_for_copy_stmt. */
5305 /* In case of interleaving (non-unit strided access):
5312 We create vectorized stores starting from base address (the access of the
5313 first stmt in the chain (S2 in the above example), when the last store stmt
5314 of the chain (S4) is reached:
5317 VS2: &base + vec_size*1 = vx0
5318 VS3: &base + vec_size*2 = vx1
5319 VS4: &base + vec_size*3 = vx3
5321 Then permutation statements are generated:
5323 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
5324 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
5327 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5328 (the order of the data-refs in the output of vect_permute_store_chain
5329 corresponds to the order of scalar stmts in the interleaving chain - see
5330 the documentation of vect_permute_store_chain()).
5332 In case of both multiple types and interleaving, above vector stores and
5333 permutation stmts are created for every copy. The result vector stmts are
5334 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5335 STMT_VINFO_RELATED_STMT for the next copies.
5338 prev_stmt_info = NULL;
5339 for (j = 0; j < ncopies; j++)
5348 /* Get vectorized arguments for SLP_NODE. */
5349 vect_get_slp_defs (slp_node, &vec_oprnds, NULL);
5351 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
5355 /* For interleaved stores we collect vectorized defs for all the
5356 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5357 used as an input to vect_permute_store_chain(), and OPRNDS as
5358 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5360 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
5361 OPRNDS are of size 1. */
5362 next_stmt = first_stmt;
5363 for (i = 0; i < group_size; i++)
5365 /* Since gaps are not supported for interleaved stores,
5366 GROUP_SIZE is the exact number of stmts in the chain.
5367 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5368 there is no interleaving, GROUP_SIZE is 1, and only one
5369 iteration of the loop will be executed. */
5370 gcc_assert (next_stmt);
5371 gcc_assert (gimple_assign_single_p (next_stmt));
5372 op = gimple_assign_rhs1 (next_stmt);
5374 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
5376 VEC_quick_push(tree, dr_chain, vec_oprnd);
5377 VEC_quick_push(tree, oprnds, vec_oprnd);
5378 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
5382 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
5383 &dummy, &ptr_incr, false,
5384 &inv_p, TREE_TYPE (vec_oprnd));
5385 gcc_assert (!inv_p);
5389 /* For interleaved stores we created vectorized defs for all the
5390 defs stored in OPRNDS in the previous iteration (previous copy).
5391 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5392 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5394 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
5395 OPRNDS are of size 1. */
5396 for (i = 0; i < group_size; i++)
5398 op = VEC_index (tree, oprnds, i);
5399 vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt);
5400 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
5401 VEC_replace(tree, dr_chain, i, vec_oprnd);
5402 VEC_replace(tree, oprnds, i, vec_oprnd);
5405 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
5410 result_chain = VEC_alloc (tree, heap, group_size);
5412 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5417 next_stmt = first_stmt;
5418 for (i = 0; i < vec_num; i++)
5421 /* Bump the vector pointer. */
5422 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5426 vec_oprnd = VEC_index (tree, vec_oprnds, i);
5427 else if (strided_store)
5428 /* For strided stores vectorized defs are interleaved in
5429 vect_permute_store_chain(). */
5430 vec_oprnd = VEC_index (tree, result_chain, i);
5432 data_ref = build_fold_indirect_ref (dataref_ptr);
5433 /* Arguments are ready. Create the new vector stmt. */
5434 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
5435 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5436 mark_symbols_for_renaming (new_stmt);
5442 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5444 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5446 prev_stmt_info = vinfo_for_stmt (new_stmt);
5447 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
5453 VEC_free (tree, heap, dr_chain);
5454 VEC_free (tree, heap, oprnds);
5456 VEC_free (tree, heap, result_chain);
5462 /* Function vect_setup_realignment
5464 This function is called when vectorizing an unaligned load using
5465 the dr_explicit_realign[_optimized] scheme.
5466 This function generates the following code at the loop prolog:
5469 x msq_init = *(floor(p)); # prolog load
5470 realignment_token = call target_builtin;
5472 x msq = phi (msq_init, ---)
5474 The stmts marked with x are generated only for the case of
5475 dr_explicit_realign_optimized.
5477 The code above sets up a new (vector) pointer, pointing to the first
5478 location accessed by STMT, and a "floor-aligned" load using that pointer.
5479 It also generates code to compute the "realignment-token" (if the relevant
5480 target hook was defined), and creates a phi-node at the loop-header bb
5481 whose arguments are the result of the prolog-load (created by this
5482 function) and the result of a load that takes place in the loop (to be
5483 created by the caller to this function).
5485 For the case of dr_explicit_realign_optimized:
5486 The caller to this function uses the phi-result (msq) to create the
5487 realignment code inside the loop, and sets up the missing phi argument,
5490 msq = phi (msq_init, lsq)
5491 lsq = *(floor(p')); # load in loop
5492 result = realign_load (msq, lsq, realignment_token);
5494 For the case of dr_explicit_realign:
5496 msq = *(floor(p)); # load in loop
5498 lsq = *(floor(p')); # load in loop
5499 result = realign_load (msq, lsq, realignment_token);
5502 STMT - (scalar) load stmt to be vectorized. This load accesses
5503 a memory location that may be unaligned.
5504 BSI - place where new code is to be inserted.
5505 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
5509 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
5510 target hook, if defined.
5511 Return value - the result of the loop-header phi node. */
5514 vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
5515 tree *realignment_token,
5516 enum dr_alignment_support alignment_support_scheme,
5518 struct loop **at_loop)
5520 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5521 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5522 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5523 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5525 tree scalar_dest = gimple_assign_lhs (stmt);
5532 tree msq_init = NULL_TREE;
5535 tree msq = NULL_TREE;
5536 gimple_seq stmts = NULL;
5538 bool compute_in_loop = false;
5539 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
5540 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
5541 struct loop *loop_for_initial_load;
5543 gcc_assert (alignment_support_scheme == dr_explicit_realign
5544 || alignment_support_scheme == dr_explicit_realign_optimized);
5546 /* We need to generate three things:
5547 1. the misalignment computation
5548 2. the extra vector load (for the optimized realignment scheme).
5549 3. the phi node for the two vectors from which the realignment is
5550 done (for the optimized realignment scheme).
5553 /* 1. Determine where to generate the misalignment computation.
5555 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
5556 calculation will be generated by this function, outside the loop (in the
5557 preheader). Otherwise, INIT_ADDR had already been computed for us by the
5558 caller, inside the loop.
5560 Background: If the misalignment remains fixed throughout the iterations of
5561 the loop, then both realignment schemes are applicable, and also the
5562 misalignment computation can be done outside LOOP. This is because we are
5563 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
5564 are a multiple of VS (the Vector Size), and therefore the misalignment in
5565 different vectorized LOOP iterations is always the same.
5566 The problem arises only if the memory access is in an inner-loop nested
5567 inside LOOP, which is now being vectorized using outer-loop vectorization.
5568 This is the only case when the misalignment of the memory access may not
5569 remain fixed throughout the iterations of the inner-loop (as explained in
5570 detail in vect_supportable_dr_alignment). In this case, not only is the
5571 optimized realignment scheme not applicable, but also the misalignment
5572 computation (and generation of the realignment token that is passed to
5573 REALIGN_LOAD) have to be done inside the loop.
5575 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
5576 or not, which in turn determines if the misalignment is computed inside
5577 the inner-loop, or outside LOOP. */
5579 if (init_addr != NULL_TREE)
5581 compute_in_loop = true;
5582 gcc_assert (alignment_support_scheme == dr_explicit_realign);
5586 /* 2. Determine where to generate the extra vector load.
5588 For the optimized realignment scheme, instead of generating two vector
5589 loads in each iteration, we generate a single extra vector load in the
5590 preheader of the loop, and in each iteration reuse the result of the
5591 vector load from the previous iteration. In case the memory access is in
5592 an inner-loop nested inside LOOP, which is now being vectorized using
5593 outer-loop vectorization, we need to determine whether this initial vector
5594 load should be generated at the preheader of the inner-loop, or can be
5595 generated at the preheader of LOOP. If the memory access has no evolution
5596 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
5597 to be generated inside LOOP (in the preheader of the inner-loop). */
5599 if (nested_in_vect_loop)
5601 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
5602 bool invariant_in_outerloop =
5603 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
5604 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
5607 loop_for_initial_load = loop;
5609 *at_loop = loop_for_initial_load;
5611 /* 3. For the case of the optimized realignment, create the first vector
5612 load at the loop preheader. */
5614 if (alignment_support_scheme == dr_explicit_realign_optimized)
5616 /* Create msq_init = *(floor(p1)) in the loop preheader */
5618 gcc_assert (!compute_in_loop);
5619 pe = loop_preheader_edge (loop_for_initial_load);
5620 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5621 ptr = vect_create_data_ref_ptr (stmt, loop_for_initial_load, NULL_TREE,
5622 &init_addr, &inc, true, &inv_p, NULL_TREE);
5623 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
5624 new_stmt = gimple_build_assign (vec_dest, data_ref);
5625 new_temp = make_ssa_name (vec_dest, new_stmt);
5626 gimple_assign_set_lhs (new_stmt, new_temp);
5627 mark_symbols_for_renaming (new_stmt);
5628 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
5629 gcc_assert (!new_bb);
5630 msq_init = gimple_assign_lhs (new_stmt);
5633 /* 4. Create realignment token using a target builtin, if available.
5634 It is done either inside the containing loop, or before LOOP (as
5635 determined above). */
5637 if (targetm.vectorize.builtin_mask_for_load)
5641 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
5642 if (compute_in_loop)
5643 gcc_assert (init_addr); /* already computed by the caller. */
5646 /* Generate the INIT_ADDR computation outside LOOP. */
5647 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
5649 pe = loop_preheader_edge (loop);
5650 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
5651 gcc_assert (!new_bb);
5654 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
5655 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
5657 vect_create_destination_var (scalar_dest,
5658 gimple_call_return_type (new_stmt));
5659 new_temp = make_ssa_name (vec_dest, new_stmt);
5660 gimple_call_set_lhs (new_stmt, new_temp);
5662 if (compute_in_loop)
5663 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5666 /* Generate the misalignment computation outside LOOP. */
5667 pe = loop_preheader_edge (loop);
5668 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
5669 gcc_assert (!new_bb);
5672 *realignment_token = gimple_call_lhs (new_stmt);
5674 /* The result of the CALL_EXPR to this builtin is determined from
5675 the value of the parameter and no global variables are touched
5676 which makes the builtin a "const" function. Requiring the
5677 builtin to have the "const" attribute makes it unnecessary
5678 to call mark_call_clobbered. */
5679 gcc_assert (TREE_READONLY (builtin_decl));
5682 if (alignment_support_scheme == dr_explicit_realign)
5685 gcc_assert (!compute_in_loop);
5686 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
5689 /* 5. Create msq = phi <msq_init, lsq> in loop */
5691 pe = loop_preheader_edge (containing_loop);
5692 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5693 msq = make_ssa_name (vec_dest, NULL);
5694 phi_stmt = create_phi_node (msq, containing_loop->header);
5695 SSA_NAME_DEF_STMT (msq) = phi_stmt;
5696 add_phi_arg (phi_stmt, msq_init, pe);
5702 /* Function vect_strided_load_supported.
5704 Returns TRUE is EXTRACT_EVEN and EXTRACT_ODD operations are supported,
5705 and FALSE otherwise. */
5708 vect_strided_load_supported (tree vectype)
5710 optab perm_even_optab, perm_odd_optab;
5713 mode = (int) TYPE_MODE (vectype);
5715 perm_even_optab = optab_for_tree_code (VEC_EXTRACT_EVEN_EXPR, vectype,
5717 if (!perm_even_optab)
5719 if (vect_print_dump_info (REPORT_DETAILS))
5720 fprintf (vect_dump, "no optab for perm_even.");
5724 if (optab_handler (perm_even_optab, mode)->insn_code == CODE_FOR_nothing)
5726 if (vect_print_dump_info (REPORT_DETAILS))
5727 fprintf (vect_dump, "perm_even op not supported by target.");
5731 perm_odd_optab = optab_for_tree_code (VEC_EXTRACT_ODD_EXPR, vectype,
5733 if (!perm_odd_optab)
5735 if (vect_print_dump_info (REPORT_DETAILS))
5736 fprintf (vect_dump, "no optab for perm_odd.");
5740 if (optab_handler (perm_odd_optab, mode)->insn_code == CODE_FOR_nothing)
5742 if (vect_print_dump_info (REPORT_DETAILS))
5743 fprintf (vect_dump, "perm_odd op not supported by target.");
5750 /* Function vect_permute_load_chain.
5752 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
5753 a power of 2, generate extract_even/odd stmts to reorder the input data
5754 correctly. Return the final references for loads in RESULT_CHAIN.
5756 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
5757 The input is 4 vectors each containing 8 elements. We assign a number to each
5758 element, the input sequence is:
5760 1st vec: 0 1 2 3 4 5 6 7
5761 2nd vec: 8 9 10 11 12 13 14 15
5762 3rd vec: 16 17 18 19 20 21 22 23
5763 4th vec: 24 25 26 27 28 29 30 31
5765 The output sequence should be:
5767 1st vec: 0 4 8 12 16 20 24 28
5768 2nd vec: 1 5 9 13 17 21 25 29
5769 3rd vec: 2 6 10 14 18 22 26 30
5770 4th vec: 3 7 11 15 19 23 27 31
5772 i.e., the first output vector should contain the first elements of each
5773 interleaving group, etc.
5775 We use extract_even/odd instructions to create such output. The input of each
5776 extract_even/odd operation is two vectors
5780 and the output is the vector of extracted even/odd elements. The output of
5781 extract_even will be: 0 2 4 6
5782 and of extract_odd: 1 3 5 7
5785 The permutation is done in log LENGTH stages. In each stage extract_even and
5786 extract_odd stmts are created for each pair of vectors in DR_CHAIN in their
5787 order. In our example,
5789 E1: extract_even (1st vec, 2nd vec)
5790 E2: extract_odd (1st vec, 2nd vec)
5791 E3: extract_even (3rd vec, 4th vec)
5792 E4: extract_odd (3rd vec, 4th vec)
5794 The output for the first stage will be:
5796 E1: 0 2 4 6 8 10 12 14
5797 E2: 1 3 5 7 9 11 13 15
5798 E3: 16 18 20 22 24 26 28 30
5799 E4: 17 19 21 23 25 27 29 31
5801 In order to proceed and create the correct sequence for the next stage (or
5802 for the correct output, if the second stage is the last one, as in our
5803 example), we first put the output of extract_even operation and then the
5804 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
5805 The input for the second stage is:
5807 1st vec (E1): 0 2 4 6 8 10 12 14
5808 2nd vec (E3): 16 18 20 22 24 26 28 30
5809 3rd vec (E2): 1 3 5 7 9 11 13 15
5810 4th vec (E4): 17 19 21 23 25 27 29 31
5812 The output of the second stage:
5814 E1: 0 4 8 12 16 20 24 28
5815 E2: 2 6 10 14 18 22 26 30
5816 E3: 1 5 9 13 17 21 25 29
5817 E4: 3 7 11 15 19 23 27 31
5819 And RESULT_CHAIN after reordering:
5821 1st vec (E1): 0 4 8 12 16 20 24 28
5822 2nd vec (E3): 1 5 9 13 17 21 25 29
5823 3rd vec (E2): 2 6 10 14 18 22 26 30
5824 4th vec (E4): 3 7 11 15 19 23 27 31. */
5827 vect_permute_load_chain (VEC(tree,heap) *dr_chain,
5828 unsigned int length,
5830 gimple_stmt_iterator *gsi,
5831 VEC(tree,heap) **result_chain)
5833 tree perm_dest, data_ref, first_vect, second_vect;
5835 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5839 /* Check that the operation is supported. */
5840 if (!vect_strided_load_supported (vectype))
5843 *result_chain = VEC_copy (tree, heap, dr_chain);
5844 for (i = 0; i < exact_log2 (length); i++)
5846 for (j = 0; j < length; j +=2)
5848 first_vect = VEC_index (tree, dr_chain, j);
5849 second_vect = VEC_index (tree, dr_chain, j+1);
5851 /* data_ref = permute_even (first_data_ref, second_data_ref); */
5852 perm_dest = create_tmp_var (vectype, "vect_perm_even");
5853 DECL_GIMPLE_REG_P (perm_dest) = 1;
5854 add_referenced_var (perm_dest);
5856 perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_EVEN_EXPR,
5857 perm_dest, first_vect,
5860 data_ref = make_ssa_name (perm_dest, perm_stmt);
5861 gimple_assign_set_lhs (perm_stmt, data_ref);
5862 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5863 mark_symbols_for_renaming (perm_stmt);
5865 VEC_replace (tree, *result_chain, j/2, data_ref);
5867 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
5868 perm_dest = create_tmp_var (vectype, "vect_perm_odd");
5869 DECL_GIMPLE_REG_P (perm_dest) = 1;
5870 add_referenced_var (perm_dest);
5872 perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_ODD_EXPR,
5873 perm_dest, first_vect,
5875 data_ref = make_ssa_name (perm_dest, perm_stmt);
5876 gimple_assign_set_lhs (perm_stmt, data_ref);
5877 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5878 mark_symbols_for_renaming (perm_stmt);
5880 VEC_replace (tree, *result_chain, j/2+length/2, data_ref);
5882 dr_chain = VEC_copy (tree, heap, *result_chain);
5888 /* Function vect_transform_strided_load.
5890 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
5891 to perform their permutation and ascribe the result vectorized statements to
5892 the scalar statements.
5896 vect_transform_strided_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
5897 gimple_stmt_iterator *gsi)
5899 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5900 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
5901 gimple next_stmt, new_stmt;
5902 VEC(tree,heap) *result_chain = NULL;
5903 unsigned int i, gap_count;
5906 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
5907 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
5908 vectors, that are ready for vector computation. */
5909 result_chain = VEC_alloc (tree, heap, size);
5911 if (!vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain))
5914 /* Put a permuted data-ref in the VECTORIZED_STMT field.
5915 Since we scan the chain starting from it's first node, their order
5916 corresponds the order of data-refs in RESULT_CHAIN. */
5917 next_stmt = first_stmt;
5919 for (i = 0; VEC_iterate (tree, result_chain, i, tmp_data_ref); i++)
5924 /* Skip the gaps. Loads created for the gaps will be removed by dead
5925 code elimination pass later. No need to check for the first stmt in
5926 the group, since it always exists.
5927 DR_GROUP_GAP is the number of steps in elements from the previous
5928 access (if there is no gap DR_GROUP_GAP is 1). We skip loads that
5929 correspond to the gaps.
5931 if (next_stmt != first_stmt
5932 && gap_count < DR_GROUP_GAP (vinfo_for_stmt (next_stmt)))
5940 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
5941 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
5942 copies, and we put the new vector statement in the first available
5944 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
5945 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
5949 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
5951 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
5954 prev_stmt = rel_stmt;
5955 rel_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
5957 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) = new_stmt;
5959 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
5961 /* If NEXT_STMT accesses the same DR as the previous statement,
5962 put the same TMP_DATA_REF as its vectorized statement; otherwise
5963 get the next data-ref from RESULT_CHAIN. */
5964 if (!next_stmt || !DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5969 VEC_free (tree, heap, result_chain);
5974 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
5975 building a vector of type MASK_TYPE from it) and two input vectors placed in
5976 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
5977 shifting by STRIDE elements of DR_CHAIN for every copy.
5978 (STRIDE is the number of vectorized stmts for NODE divided by the number of
5980 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
5981 the created stmts must be inserted. */
5984 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
5985 int *mask_array, int mask_nunits,
5986 tree mask_element_type, tree mask_type,
5987 int first_vec_indx, int second_vec_indx,
5988 gimple_stmt_iterator *gsi, slp_tree node,
5989 tree builtin_decl, tree vectype,
5990 VEC(tree,heap) *dr_chain,
5991 int ncopies, int vect_stmts_counter)
5993 tree t = NULL_TREE, mask_vec, mask, perm_dest;
5994 gimple perm_stmt = NULL;
5995 stmt_vec_info next_stmt_info;
5996 int i, group_size, stride, dr_chain_size;
5997 tree first_vec, second_vec, data_ref;
6000 VEC (tree, heap) *params = NULL;
6002 /* Create a vector mask. */
6003 for (i = mask_nunits - 1; i >= 0; --i)
6004 t = tree_cons (NULL_TREE, build_int_cst (mask_element_type, mask_array[i]),
6007 mask_vec = build_vector (mask_type, t);
6008 mask = vect_init_vector (stmt, mask_vec, mask_type, NULL);
6010 group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (node));
6011 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
6012 dr_chain_size = VEC_length (tree, dr_chain);
6014 /* Initialize the vect stmts of NODE to properly insert the generated
6016 for (i = VEC_length (gimple, SLP_TREE_VEC_STMTS (node));
6017 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
6018 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (node), NULL);
6020 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
6021 for (i = 0; i < ncopies; i++)
6023 first_vec = VEC_index (tree, dr_chain, first_vec_indx);
6024 second_vec = VEC_index (tree, dr_chain, second_vec_indx);
6026 /* Build argument list for the vectorized call. */
6027 VEC_free (tree, heap, params);
6028 params = VEC_alloc (tree, heap, 3);
6029 VEC_quick_push (tree, params, first_vec);
6030 VEC_quick_push (tree, params, second_vec);
6031 VEC_quick_push (tree, params, mask);
6033 /* Generate the permute statement. */
6034 perm_stmt = gimple_build_call_vec (builtin_decl, params);
6035 data_ref = make_ssa_name (perm_dest, perm_stmt);
6036 gimple_call_set_lhs (perm_stmt, data_ref);
6037 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6038 FOR_EACH_SSA_TREE_OPERAND (sym, perm_stmt, iter, SSA_OP_ALL_VIRTUALS)
6040 if (TREE_CODE (sym) == SSA_NAME)
6041 sym = SSA_NAME_VAR (sym);
6042 mark_sym_for_renaming (sym);
6045 /* Store the vector statement in NODE. */
6046 VEC_replace (gimple, SLP_TREE_VEC_STMTS (node),
6047 stride * i + vect_stmts_counter, perm_stmt);
6049 first_vec_indx += stride;
6050 second_vec_indx += stride;
6053 /* Mark the scalar stmt as vectorized. */
6054 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
6055 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
6059 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
6060 return in CURRENT_MASK_ELEMENT its equivalent in target specific
6061 representation. Check that the mask is valid and return FALSE if not.
6062 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
6063 the next vector, i.e., the current first vector is not needed. */
6066 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
6067 int mask_nunits, bool only_one_vec, int index,
6068 int *mask, int *current_mask_element,
6069 bool *need_next_vector)
6072 static int number_of_mask_fixes = 1;
6073 static bool mask_fixed = false;
6074 static bool needs_first_vector = false;
6076 /* Convert to target specific representation. */
6077 *current_mask_element = first_mask_element + m;
6078 /* Adjust the value in case it's a mask for second and third vectors. */
6079 *current_mask_element -= mask_nunits * (number_of_mask_fixes - 1);
6081 if (*current_mask_element < mask_nunits)
6082 needs_first_vector = true;
6084 /* We have only one input vector to permute but the mask accesses values in
6085 the next vector as well. */
6086 if (only_one_vec && *current_mask_element >= mask_nunits)
6088 if (vect_print_dump_info (REPORT_DETAILS))
6090 fprintf (vect_dump, "permutation requires at least two vectors ");
6091 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
6097 /* The mask requires the next vector. */
6098 if (*current_mask_element >= mask_nunits * 2)
6100 if (needs_first_vector || mask_fixed)
6102 /* We either need the first vector too or have already moved to the
6103 next vector. In both cases, this permutation needs three
6105 if (vect_print_dump_info (REPORT_DETAILS))
6107 fprintf (vect_dump, "permutation requires at "
6108 "least three vectors ");
6109 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
6115 /* We move to the next vector, dropping the first one and working with
6116 the second and the third - we need to adjust the values of the mask
6118 *current_mask_element -= mask_nunits * number_of_mask_fixes;
6120 for (i = 0; i < index; i++)
6121 mask[i] -= mask_nunits * number_of_mask_fixes;
6123 (number_of_mask_fixes)++;
6127 *need_next_vector = mask_fixed;
6129 /* This was the last element of this mask. Start a new one. */
6130 if (index == mask_nunits - 1)
6132 number_of_mask_fixes = 1;
6134 needs_first_vector = false;
6141 /* Generate vector permute statements from a list of loads in DR_CHAIN.
6142 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
6143 permute statements for SLP_NODE_INSTANCE. */
6145 vect_transform_slp_perm_load (gimple stmt, VEC (tree, heap) *dr_chain,
6146 gimple_stmt_iterator *gsi, int vf,
6147 slp_instance slp_node_instance, bool analyze_only)
6149 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6150 tree mask_element_type = NULL_TREE, mask_type;
6151 int i, j, k, m, scale, mask_nunits, nunits, vec_index = 0, scalar_index;
6153 tree vectype = STMT_VINFO_VECTYPE (stmt_info), builtin_decl;
6154 gimple next_scalar_stmt;
6155 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
6156 int first_mask_element;
6157 int index, unroll_factor, *mask, current_mask_element, ncopies;
6158 bool only_one_vec = false, need_next_vector = false;
6159 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
6161 if (!targetm.vectorize.builtin_vec_perm)
6163 if (vect_print_dump_info (REPORT_DETAILS))
6165 fprintf (vect_dump, "no builtin for vect permute for ");
6166 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
6172 builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
6173 &mask_element_type);
6174 if (!builtin_decl || !mask_element_type)
6176 if (vect_print_dump_info (REPORT_DETAILS))
6178 fprintf (vect_dump, "no builtin for vect permute for ");
6179 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
6185 mask_type = get_vectype_for_scalar_type (mask_element_type);
6186 mask_nunits = TYPE_VECTOR_SUBPARTS (mask_type);
6187 mask = (int *) xmalloc (sizeof (int) * mask_nunits);
6188 nunits = TYPE_VECTOR_SUBPARTS (vectype);
6189 scale = mask_nunits / nunits;
6190 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
6192 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
6193 unrolling factor. */
6194 orig_vec_stmts_num = group_size *
6195 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
6196 if (orig_vec_stmts_num == 1)
6197 only_one_vec = true;
6199 /* Number of copies is determined by the final vectorization factor
6200 relatively to SLP_NODE_INSTANCE unrolling factor. */
6201 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
6203 /* Generate permutation masks for every NODE. Number of masks for each NODE
6204 is equal to GROUP_SIZE.
6205 E.g., we have a group of three nodes with three loads from the same
6206 location in each node, and the vector size is 4. I.e., we have a
6207 a0b0c0a1b1c1... sequence and we need to create the following vectors:
6208 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
6209 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
6212 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9} (in target
6213 scpecific type, e.g., in bytes for Altivec.
6214 The last mask is illegal since we assume two operands for permute
6215 operation, and the mask element values can't be outside that range. Hence,
6216 the last mask must be converted into {2,5,5,5}.
6217 For the first two permutations we need the first and the second input
6218 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
6219 we need the second and the third vectors: {b1,c1,a2,b2} and
6223 VEC_iterate (slp_tree, SLP_INSTANCE_LOADS (slp_node_instance),
6229 vect_stmts_counter = 0;
6231 first_vec_index = vec_index++;
6233 second_vec_index = first_vec_index;
6235 second_vec_index = vec_index++;
6237 for (j = 0; j < unroll_factor; j++)
6239 for (k = 0; k < group_size; k++)
6241 first_mask_element = (i + j * group_size) * scale;
6242 for (m = 0; m < scale; m++)
6244 if (!vect_get_mask_element (stmt, first_mask_element, m,
6245 mask_nunits, only_one_vec, index, mask,
6246 ¤t_mask_element, &need_next_vector))
6249 mask[index++] = current_mask_element;
6252 if (index == mask_nunits)
6257 if (need_next_vector)
6259 first_vec_index = second_vec_index;
6260 second_vec_index = vec_index;
6263 next_scalar_stmt = VEC_index (gimple,
6264 SLP_TREE_SCALAR_STMTS (node), scalar_index++);
6266 vect_create_mask_and_perm (stmt, next_scalar_stmt,
6267 mask, mask_nunits, mask_element_type, mask_type,
6268 first_vec_index, second_vec_index, gsi, node,
6269 builtin_decl, vectype, dr_chain, ncopies,
6270 vect_stmts_counter++);
6281 /* vectorizable_load.
6283 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6285 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6286 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6287 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6290 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
6291 slp_tree slp_node, slp_instance slp_node_instance)
6294 tree vec_dest = NULL;
6295 tree data_ref = NULL;
6296 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6297 stmt_vec_info prev_stmt_info;
6298 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6299 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6300 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
6301 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6302 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
6303 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6306 gimple new_stmt = NULL;
6308 enum dr_alignment_support alignment_support_scheme;
6309 tree dataref_ptr = NULL_TREE;
6311 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6313 int i, j, group_size;
6314 tree msq = NULL_TREE, lsq;
6315 tree offset = NULL_TREE;
6316 tree realignment_token = NULL_TREE;
6318 VEC(tree,heap) *dr_chain = NULL;
6319 bool strided_load = false;
6323 bool compute_in_loop = false;
6324 struct loop *at_loop;
6326 bool slp = (slp_node != NULL);
6327 bool slp_perm = false;
6328 enum tree_code code;
6330 /* Multiple types in SLP are handled by creating the appropriate number of
6331 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6336 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6338 gcc_assert (ncopies >= 1);
6340 /* FORNOW. This restriction should be relaxed. */
6341 if (nested_in_vect_loop && ncopies > 1)
6343 if (vect_print_dump_info (REPORT_DETAILS))
6344 fprintf (vect_dump, "multiple types in nested loop.");
6348 if (slp && SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
6351 if (!STMT_VINFO_RELEVANT_P (stmt_info))
6354 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
6357 /* Is vectorizable load? */
6358 if (!is_gimple_assign (stmt))
6361 scalar_dest = gimple_assign_lhs (stmt);
6362 if (TREE_CODE (scalar_dest) != SSA_NAME)
6365 code = gimple_assign_rhs_code (stmt);
6366 if (code != ARRAY_REF
6367 && code != INDIRECT_REF
6368 && !STMT_VINFO_STRIDED_ACCESS (stmt_info))
6371 if (!STMT_VINFO_DATA_REF (stmt_info))
6374 scalar_type = TREE_TYPE (DR_REF (dr));
6375 mode = (int) TYPE_MODE (vectype);
6377 /* FORNOW. In some cases can vectorize even if data-type not supported
6378 (e.g. - data copies). */
6379 if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
6381 if (vect_print_dump_info (REPORT_DETAILS))
6382 fprintf (vect_dump, "Aligned load, but unsupported type.");
6386 /* If accesses through a pointer to vectype do not alias the original
6387 memory reference we have a problem. */
6388 if (get_alias_set (vectype) != get_alias_set (scalar_type)
6389 && !alias_set_subset_of (get_alias_set (vectype),
6390 get_alias_set (scalar_type)))
6392 if (vect_print_dump_info (REPORT_DETAILS))
6393 fprintf (vect_dump, "vector type does not alias scalar type");
6397 /* Check if the load is a part of an interleaving chain. */
6398 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
6400 strided_load = true;
6402 gcc_assert (! nested_in_vect_loop);
6404 /* Check if interleaving is supported. */
6405 if (!vect_strided_load_supported (vectype)
6406 && !PURE_SLP_STMT (stmt_info) && !slp)
6410 if (!vec_stmt) /* transformation not required. */
6412 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6413 vect_model_load_cost (stmt_info, ncopies, NULL);
6417 if (vect_print_dump_info (REPORT_DETAILS))
6418 fprintf (vect_dump, "transform load.");
6424 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
6425 /* Check if the chain of loads is already vectorized. */
6426 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
6428 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6431 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6432 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
6434 /* VEC_NUM is the number of vect stmts to be created for this group. */
6437 strided_load = false;
6438 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6441 vec_num = group_size;
6443 dr_chain = VEC_alloc (tree, heap, vec_num);
6449 group_size = vec_num = 1;
6452 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
6453 gcc_assert (alignment_support_scheme);
6455 /* In case the vectorization factor (VF) is bigger than the number
6456 of elements that we can fit in a vectype (nunits), we have to generate
6457 more than one vector stmt - i.e - we need to "unroll" the
6458 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6459 from one copy of the vector stmt to the next, in the field
6460 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6461 stages to find the correct vector defs to be used when vectorizing
6462 stmts that use the defs of the current stmt. The example below illustrates
6463 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
6464 4 vectorized stmts):
6466 before vectorization:
6467 RELATED_STMT VEC_STMT
6471 step 1: vectorize stmt S1:
6472 We first create the vector stmt VS1_0, and, as usual, record a
6473 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6474 Next, we create the vector stmt VS1_1, and record a pointer to
6475 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6476 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6478 RELATED_STMT VEC_STMT
6479 VS1_0: vx0 = memref0 VS1_1 -
6480 VS1_1: vx1 = memref1 VS1_2 -
6481 VS1_2: vx2 = memref2 VS1_3 -
6482 VS1_3: vx3 = memref3 - -
6483 S1: x = load - VS1_0
6486 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6487 information we recorded in RELATED_STMT field is used to vectorize
6490 /* In case of interleaving (non-unit strided access):
6497 Vectorized loads are created in the order of memory accesses
6498 starting from the access of the first stmt of the chain:
6501 VS2: vx1 = &base + vec_size*1
6502 VS3: vx3 = &base + vec_size*2
6503 VS4: vx4 = &base + vec_size*3
6505 Then permutation statements are generated:
6507 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
6508 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
6511 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6512 (the order of the data-refs in the output of vect_permute_load_chain
6513 corresponds to the order of scalar stmts in the interleaving chain - see
6514 the documentation of vect_permute_load_chain()).
6515 The generation of permutation stmts and recording them in
6516 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
6518 In case of both multiple types and interleaving, the vector loads and
6519 permutation stmts above are created for every copy. The result vector stmts
6520 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6521 STMT_VINFO_RELATED_STMT for the next copies. */
6523 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6524 on a target that supports unaligned accesses (dr_unaligned_supported)
6525 we generate the following code:
6529 p = p + indx * vectype_size;
6534 Otherwise, the data reference is potentially unaligned on a target that
6535 does not support unaligned accesses (dr_explicit_realign_optimized) -
6536 then generate the following code, in which the data in each iteration is
6537 obtained by two vector loads, one from the previous iteration, and one
6538 from the current iteration:
6540 msq_init = *(floor(p1))
6541 p2 = initial_addr + VS - 1;
6542 realignment_token = call target_builtin;
6545 p2 = p2 + indx * vectype_size
6547 vec_dest = realign_load (msq, lsq, realignment_token)
6552 /* If the misalignment remains the same throughout the execution of the
6553 loop, we can create the init_addr and permutation mask at the loop
6554 preheader. Otherwise, it needs to be created inside the loop.
6555 This can only occur when vectorizing memory accesses in the inner-loop
6556 nested within an outer-loop that is being vectorized. */
6558 if (nested_in_vect_loop_p (loop, stmt)
6559 && (TREE_INT_CST_LOW (DR_STEP (dr))
6560 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
6562 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
6563 compute_in_loop = true;
6566 if ((alignment_support_scheme == dr_explicit_realign_optimized
6567 || alignment_support_scheme == dr_explicit_realign)
6568 && !compute_in_loop)
6570 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
6571 alignment_support_scheme, NULL_TREE,
6573 if (alignment_support_scheme == dr_explicit_realign_optimized)
6575 phi = SSA_NAME_DEF_STMT (msq);
6576 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
6582 prev_stmt_info = NULL;
6583 for (j = 0; j < ncopies; j++)
6585 /* 1. Create the vector pointer update chain. */
6587 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
6589 &dummy, &ptr_incr, false,
6593 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
6595 for (i = 0; i < vec_num; i++)
6598 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6601 /* 2. Create the vector-load in the loop. */
6602 switch (alignment_support_scheme)
6605 gcc_assert (aligned_access_p (first_dr));
6606 data_ref = build_fold_indirect_ref (dataref_ptr);
6608 case dr_unaligned_supported:
6610 int mis = DR_MISALIGNMENT (first_dr);
6611 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
6613 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
6615 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
6618 case dr_explicit_realign:
6621 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
6623 if (compute_in_loop)
6624 msq = vect_setup_realignment (first_stmt, gsi,
6626 dr_explicit_realign,
6629 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
6630 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6631 new_stmt = gimple_build_assign (vec_dest, data_ref);
6632 new_temp = make_ssa_name (vec_dest, new_stmt);
6633 gimple_assign_set_lhs (new_stmt, new_temp);
6634 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6635 copy_virtual_operands (new_stmt, stmt);
6636 mark_symbols_for_renaming (new_stmt);
6639 bump = size_binop (MULT_EXPR, vs_minus_1,
6640 TYPE_SIZE_UNIT (scalar_type));
6641 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
6642 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
6645 case dr_explicit_realign_optimized:
6646 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
6651 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6652 new_stmt = gimple_build_assign (vec_dest, data_ref);
6653 new_temp = make_ssa_name (vec_dest, new_stmt);
6654 gimple_assign_set_lhs (new_stmt, new_temp);
6655 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6656 mark_symbols_for_renaming (new_stmt);
6658 /* 3. Handle explicit realignment if necessary/supported. Create in
6659 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
6660 if (alignment_support_scheme == dr_explicit_realign_optimized
6661 || alignment_support_scheme == dr_explicit_realign)
6665 lsq = gimple_assign_lhs (new_stmt);
6666 if (!realignment_token)
6667 realignment_token = dataref_ptr;
6668 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6669 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
6671 new_stmt = gimple_build_assign (vec_dest, tmp);
6672 new_temp = make_ssa_name (vec_dest, new_stmt);
6673 gimple_assign_set_lhs (new_stmt, new_temp);
6674 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6676 if (alignment_support_scheme == dr_explicit_realign_optimized)
6679 if (i == vec_num - 1 && j == ncopies - 1)
6680 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop));
6685 /* 4. Handle invariant-load. */
6688 gcc_assert (!strided_load);
6689 gcc_assert (nested_in_vect_loop_p (loop, stmt));
6694 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
6696 /* CHECKME: bitpos depends on endianess? */
6697 bitpos = bitsize_zero_node;
6698 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
6701 vect_create_destination_var (scalar_dest, NULL_TREE);
6702 new_stmt = gimple_build_assign (vec_dest, vec_inv);
6703 new_temp = make_ssa_name (vec_dest, new_stmt);
6704 gimple_assign_set_lhs (new_stmt, new_temp);
6705 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6707 for (k = nunits - 1; k >= 0; --k)
6708 t = tree_cons (NULL_TREE, new_temp, t);
6709 /* FIXME: use build_constructor directly. */
6710 vec_inv = build_constructor_from_list (vectype, t);
6711 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6712 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6715 gcc_unreachable (); /* FORNOW. */
6718 /* Collect vector loads and later create their permutation in
6719 vect_transform_strided_load (). */
6720 if (strided_load || slp_perm)
6721 VEC_quick_push (tree, dr_chain, new_temp);
6723 /* Store vector loads in the corresponding SLP_NODE. */
6724 if (slp && !slp_perm)
6725 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
6728 if (slp && !slp_perm)
6733 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi,
6734 LOOP_VINFO_VECT_FACTOR (loop_vinfo),
6735 slp_node_instance, false))
6737 VEC_free (tree, heap, dr_chain);
6745 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
6748 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6749 VEC_free (tree, heap, dr_chain);
6750 dr_chain = VEC_alloc (tree, heap, group_size);
6755 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6757 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6758 prev_stmt_info = vinfo_for_stmt (new_stmt);
6764 VEC_free (tree, heap, dr_chain);
6770 /* Function vectorizable_live_operation.
6772 STMT computes a value that is used outside the loop. Check if
6773 it can be supported. */
6776 vectorizable_live_operation (gimple stmt,
6777 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
6778 gimple *vec_stmt ATTRIBUTE_UNUSED)
6780 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6781 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6782 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6788 enum vect_def_type dt;
6789 enum tree_code code;
6790 enum gimple_rhs_class rhs_class;
6792 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
6794 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
6797 if (!is_gimple_assign (stmt))
6800 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
6803 /* FORNOW. CHECKME. */
6804 if (nested_in_vect_loop_p (loop, stmt))
6807 code = gimple_assign_rhs_code (stmt);
6808 op_type = TREE_CODE_LENGTH (code);
6809 rhs_class = get_gimple_rhs_class (code);
6810 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
6811 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
6813 /* FORNOW: support only if all uses are invariant. This means
6814 that the scalar operations can remain in place, unvectorized.
6815 The original last scalar value that they compute will be used. */
6817 for (i = 0; i < op_type; i++)
6819 if (rhs_class == GIMPLE_SINGLE_RHS)
6820 op = TREE_OPERAND (gimple_op (stmt, 1), i);
6822 op = gimple_op (stmt, i + 1);
6823 if (op && !vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt))
6825 if (vect_print_dump_info (REPORT_DETAILS))
6826 fprintf (vect_dump, "use not simple.");
6830 if (dt != vect_invariant_def && dt != vect_constant_def)
6834 /* No transformation is required for the cases we currently support. */
6839 /* Function vect_is_simple_cond.
6842 LOOP - the loop that is being vectorized.
6843 COND - Condition that is checked for simple use.
6845 Returns whether a COND can be vectorized. Checks whether
6846 condition operands are supportable using vec_is_simple_use. */
6849 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
6853 enum vect_def_type dt;
6855 if (!COMPARISON_CLASS_P (cond))
6858 lhs = TREE_OPERAND (cond, 0);
6859 rhs = TREE_OPERAND (cond, 1);
6861 if (TREE_CODE (lhs) == SSA_NAME)
6863 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
6864 if (!vect_is_simple_use (lhs, loop_vinfo, &lhs_def_stmt, &def, &dt))
6867 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
6868 && TREE_CODE (lhs) != FIXED_CST)
6871 if (TREE_CODE (rhs) == SSA_NAME)
6873 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
6874 if (!vect_is_simple_use (rhs, loop_vinfo, &rhs_def_stmt, &def, &dt))
6877 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
6878 && TREE_CODE (rhs) != FIXED_CST)
6884 /* vectorizable_condition.
6886 Check if STMT is conditional modify expression that can be vectorized.
6887 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6888 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
6891 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6894 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
6897 tree scalar_dest = NULL_TREE;
6898 tree vec_dest = NULL_TREE;
6899 tree op = NULL_TREE;
6900 tree cond_expr, then_clause, else_clause;
6901 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6902 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6903 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
6904 tree vec_compare, vec_cond_expr;
6906 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6907 enum machine_mode vec_mode;
6909 enum vect_def_type dt;
6910 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6911 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6912 enum tree_code code;
6914 gcc_assert (ncopies >= 1);
6916 return false; /* FORNOW */
6918 if (!STMT_VINFO_RELEVANT_P (stmt_info))
6921 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
6924 /* FORNOW: SLP not supported. */
6925 if (STMT_SLP_TYPE (stmt_info))
6928 /* FORNOW: not yet supported. */
6929 if (STMT_VINFO_LIVE_P (stmt_info))
6931 if (vect_print_dump_info (REPORT_DETAILS))
6932 fprintf (vect_dump, "value used after loop.");
6936 /* Is vectorizable conditional operation? */
6937 if (!is_gimple_assign (stmt))
6940 code = gimple_assign_rhs_code (stmt);
6942 if (code != COND_EXPR)
6945 gcc_assert (gimple_assign_single_p (stmt));
6946 op = gimple_assign_rhs1 (stmt);
6947 cond_expr = TREE_OPERAND (op, 0);
6948 then_clause = TREE_OPERAND (op, 1);
6949 else_clause = TREE_OPERAND (op, 2);
6951 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
6954 /* We do not handle two different vector types for the condition
6956 if (TREE_TYPE (TREE_OPERAND (cond_expr, 0)) != TREE_TYPE (vectype))
6959 if (TREE_CODE (then_clause) == SSA_NAME)
6961 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
6962 if (!vect_is_simple_use (then_clause, loop_vinfo,
6963 &then_def_stmt, &def, &dt))
6966 else if (TREE_CODE (then_clause) != INTEGER_CST
6967 && TREE_CODE (then_clause) != REAL_CST
6968 && TREE_CODE (then_clause) != FIXED_CST)
6971 if (TREE_CODE (else_clause) == SSA_NAME)
6973 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
6974 if (!vect_is_simple_use (else_clause, loop_vinfo,
6975 &else_def_stmt, &def, &dt))
6978 else if (TREE_CODE (else_clause) != INTEGER_CST
6979 && TREE_CODE (else_clause) != REAL_CST
6980 && TREE_CODE (else_clause) != FIXED_CST)
6984 vec_mode = TYPE_MODE (vectype);
6988 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
6989 return expand_vec_cond_expr_p (op, vec_mode);
6995 scalar_dest = gimple_assign_lhs (stmt);
6996 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6998 /* Handle cond expr. */
7000 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
7002 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
7003 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
7004 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
7006 /* Arguments are ready. Create the new vector stmt. */
7007 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
7008 vec_cond_lhs, vec_cond_rhs);
7009 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
7010 vec_compare, vec_then_clause, vec_else_clause);
7012 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
7013 new_temp = make_ssa_name (vec_dest, *vec_stmt);
7014 gimple_assign_set_lhs (*vec_stmt, new_temp);
7015 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
7021 /* Function vect_transform_stmt.
7023 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7026 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
7027 bool *strided_store, slp_tree slp_node,
7028 slp_instance slp_node_instance)
7030 bool is_store = false;
7031 gimple vec_stmt = NULL;
7032 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7033 gimple orig_stmt_in_pattern;
7036 switch (STMT_VINFO_TYPE (stmt_info))
7038 case type_demotion_vec_info_type:
7039 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
7043 case type_promotion_vec_info_type:
7044 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
7048 case type_conversion_vec_info_type:
7049 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
7053 case induc_vec_info_type:
7054 gcc_assert (!slp_node);
7055 done = vectorizable_induction (stmt, gsi, &vec_stmt);
7059 case op_vec_info_type:
7060 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
7064 case assignment_vec_info_type:
7065 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
7069 case load_vec_info_type:
7070 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
7075 case store_vec_info_type:
7076 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
7078 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
7080 /* In case of interleaving, the whole chain is vectorized when the
7081 last store in the chain is reached. Store stmts before the last
7082 one are skipped, and there vec_stmt_info shouldn't be freed
7084 *strided_store = true;
7085 if (STMT_VINFO_VEC_STMT (stmt_info))
7092 case condition_vec_info_type:
7093 gcc_assert (!slp_node);
7094 done = vectorizable_condition (stmt, gsi, &vec_stmt);
7098 case call_vec_info_type:
7099 gcc_assert (!slp_node);
7100 done = vectorizable_call (stmt, gsi, &vec_stmt);
7103 case reduc_vec_info_type:
7104 gcc_assert (!slp_node);
7105 done = vectorizable_reduction (stmt, gsi, &vec_stmt);
7110 if (!STMT_VINFO_LIVE_P (stmt_info))
7112 if (vect_print_dump_info (REPORT_DETAILS))
7113 fprintf (vect_dump, "stmt not supported.");
7118 if (STMT_VINFO_LIVE_P (stmt_info)
7119 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7121 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
7127 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
7128 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
7129 if (orig_stmt_in_pattern)
7131 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
7132 /* STMT was inserted by the vectorizer to replace a computation idiom.
7133 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
7134 computed this idiom. We need to record a pointer to VEC_STMT in
7135 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
7136 documentation of vect_pattern_recog. */
7137 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
7139 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
7140 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
7149 /* This function builds ni_name = number of iterations loop executes
7150 on the loop preheader. */
7153 vect_build_loop_niters (loop_vec_info loop_vinfo)
7156 gimple_seq stmts = NULL;
7158 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7159 tree ni = unshare_expr (LOOP_VINFO_NITERS (loop_vinfo));
7161 var = create_tmp_var (TREE_TYPE (ni), "niters");
7162 add_referenced_var (var);
7163 ni_name = force_gimple_operand (ni, &stmts, false, var);
7165 pe = loop_preheader_edge (loop);
7168 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7169 gcc_assert (!new_bb);
7176 /* This function generates the following statements:
7178 ni_name = number of iterations loop executes
7179 ratio = ni_name / vf
7180 ratio_mult_vf_name = ratio * vf
7182 and places them at the loop preheader edge. */
7185 vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
7187 tree *ratio_mult_vf_name_ptr,
7188 tree *ratio_name_ptr)
7197 tree ratio_mult_vf_name;
7198 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7199 tree ni = LOOP_VINFO_NITERS (loop_vinfo);
7200 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7203 pe = loop_preheader_edge (loop);
7205 /* Generate temporary variable that contains
7206 number of iterations loop executes. */
7208 ni_name = vect_build_loop_niters (loop_vinfo);
7209 log_vf = build_int_cst (TREE_TYPE (ni), exact_log2 (vf));
7211 /* Create: ratio = ni >> log2(vf) */
7213 ratio_name = fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_name), ni_name, log_vf);
7214 if (!is_gimple_val (ratio_name))
7216 var = create_tmp_var (TREE_TYPE (ni), "bnd");
7217 add_referenced_var (var);
7220 ratio_name = force_gimple_operand (ratio_name, &stmts, true, var);
7221 pe = loop_preheader_edge (loop);
7222 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7223 gcc_assert (!new_bb);
7226 /* Create: ratio_mult_vf = ratio << log2 (vf). */
7228 ratio_mult_vf_name = fold_build2 (LSHIFT_EXPR, TREE_TYPE (ratio_name),
7229 ratio_name, log_vf);
7230 if (!is_gimple_val (ratio_mult_vf_name))
7232 var = create_tmp_var (TREE_TYPE (ni), "ratio_mult_vf");
7233 add_referenced_var (var);
7236 ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmts,
7238 pe = loop_preheader_edge (loop);
7239 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7240 gcc_assert (!new_bb);
7243 *ni_name_ptr = ni_name;
7244 *ratio_mult_vf_name_ptr = ratio_mult_vf_name;
7245 *ratio_name_ptr = ratio_name;
7251 /* Function vect_update_ivs_after_vectorizer.
7253 "Advance" the induction variables of LOOP to the value they should take
7254 after the execution of LOOP. This is currently necessary because the
7255 vectorizer does not handle induction variables that are used after the
7256 loop. Such a situation occurs when the last iterations of LOOP are
7258 1. We introduced new uses after LOOP for IVs that were not originally used
7259 after LOOP: the IVs of LOOP are now used by an epilog loop.
7260 2. LOOP is going to be vectorized; this means that it will iterate N/VF
7261 times, whereas the loop IVs should be bumped N times.
7264 - LOOP - a loop that is going to be vectorized. The last few iterations
7265 of LOOP were peeled.
7266 - NITERS - the number of iterations that LOOP executes (before it is
7267 vectorized). i.e, the number of times the ivs should be bumped.
7268 - UPDATE_E - a successor edge of LOOP->exit that is on the (only) path
7269 coming out from LOOP on which there are uses of the LOOP ivs
7270 (this is the path from LOOP->exit to epilog_loop->preheader).
7272 The new definitions of the ivs are placed in LOOP->exit.
7273 The phi args associated with the edge UPDATE_E in the bb
7274 UPDATE_E->dest are updated accordingly.
7276 Assumption 1: Like the rest of the vectorizer, this function assumes
7277 a single loop exit that has a single predecessor.
7279 Assumption 2: The phi nodes in the LOOP header and in update_bb are
7280 organized in the same order.
7282 Assumption 3: The access function of the ivs is simple enough (see
7283 vect_can_advance_ivs_p). This assumption will be relaxed in the future.
7285 Assumption 4: Exactly one of the successors of LOOP exit-bb is on a path
7286 coming out of LOOP on which the ivs of LOOP are used (this is the path
7287 that leads to the epilog loop; other paths skip the epilog loop). This
7288 path starts with the edge UPDATE_E, and its destination (denoted update_bb)
7289 needs to have its phis updated.
7293 vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
7296 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7297 basic_block exit_bb = single_exit (loop)->dest;
7299 gimple_stmt_iterator gsi, gsi1;
7300 basic_block update_bb = update_e->dest;
7302 /* gcc_assert (vect_can_advance_ivs_p (loop_vinfo)); */
7304 /* Make sure there exists a single-predecessor exit bb: */
7305 gcc_assert (single_pred_p (exit_bb));
7307 for (gsi = gsi_start_phis (loop->header), gsi1 = gsi_start_phis (update_bb);
7308 !gsi_end_p (gsi) && !gsi_end_p (gsi1);
7309 gsi_next (&gsi), gsi_next (&gsi1))
7311 tree access_fn = NULL;
7312 tree evolution_part;
7315 tree var, ni, ni_name;
7316 gimple_stmt_iterator last_gsi;
7318 phi = gsi_stmt (gsi);
7319 phi1 = gsi_stmt (gsi1);
7320 if (vect_print_dump_info (REPORT_DETAILS))
7322 fprintf (vect_dump, "vect_update_ivs_after_vectorizer: phi: ");
7323 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
7326 /* Skip virtual phi's. */
7327 if (!is_gimple_reg (SSA_NAME_VAR (PHI_RESULT (phi))))
7329 if (vect_print_dump_info (REPORT_DETAILS))
7330 fprintf (vect_dump, "virtual phi. skip.");
7334 /* Skip reduction phis. */
7335 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (phi)) == vect_reduction_def)
7337 if (vect_print_dump_info (REPORT_DETAILS))
7338 fprintf (vect_dump, "reduc phi. skip.");
7342 access_fn = analyze_scalar_evolution (loop, PHI_RESULT (phi));
7343 gcc_assert (access_fn);
7344 STRIP_NOPS (access_fn);
7346 unshare_expr (evolution_part_in_loop_num (access_fn, loop->num));
7347 gcc_assert (evolution_part != NULL_TREE);
7349 /* FORNOW: We do not support IVs whose evolution function is a polynomial
7350 of degree >= 2 or exponential. */
7351 gcc_assert (!tree_is_chrec (evolution_part));
7353 step_expr = evolution_part;
7354 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn,
7357 if (POINTER_TYPE_P (TREE_TYPE (init_expr)))
7358 ni = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (init_expr),
7360 fold_convert (sizetype,
7361 fold_build2 (MULT_EXPR, TREE_TYPE (niters),
7362 niters, step_expr)));
7364 ni = fold_build2 (PLUS_EXPR, TREE_TYPE (init_expr),
7365 fold_build2 (MULT_EXPR, TREE_TYPE (init_expr),
7366 fold_convert (TREE_TYPE (init_expr),
7373 var = create_tmp_var (TREE_TYPE (init_expr), "tmp");
7374 add_referenced_var (var);
7376 last_gsi = gsi_last_bb (exit_bb);
7377 ni_name = force_gimple_operand_gsi (&last_gsi, ni, false, var,
7378 true, GSI_SAME_STMT);
7380 /* Fix phi expressions in the successor bb. */
7381 SET_PHI_ARG_DEF (phi1, update_e->dest_idx, ni_name);
7385 /* Return the more conservative threshold between the
7386 min_profitable_iters returned by the cost model and the user
7387 specified threshold, if provided. */
7390 conservative_cost_threshold (loop_vec_info loop_vinfo,
7391 int min_profitable_iters)
7394 int min_scalar_loop_bound;
7396 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
7397 * LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 1);
7399 /* Use the cost model only if it is more conservative than user specified
7401 th = (unsigned) min_scalar_loop_bound;
7402 if (min_profitable_iters
7403 && (!min_scalar_loop_bound
7404 || min_profitable_iters > min_scalar_loop_bound))
7405 th = (unsigned) min_profitable_iters;
7407 if (th && vect_print_dump_info (REPORT_COST))
7408 fprintf (vect_dump, "Vectorization may not be profitable.");
7413 /* Function vect_do_peeling_for_loop_bound
7415 Peel the last iterations of the loop represented by LOOP_VINFO.
7416 The peeled iterations form a new epilog loop. Given that the loop now
7417 iterates NITERS times, the new epilog loop iterates
7418 NITERS % VECTORIZATION_FACTOR times.
7420 The original loop will later be made to iterate
7421 NITERS / VECTORIZATION_FACTOR times (this value is placed into RATIO). */
7424 vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio)
7426 tree ni_name, ratio_mult_vf_name;
7427 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7428 struct loop *new_loop;
7430 basic_block preheader;
7432 bool check_profitability = false;
7433 unsigned int th = 0;
7434 int min_profitable_iters;
7436 if (vect_print_dump_info (REPORT_DETAILS))
7437 fprintf (vect_dump, "=== vect_do_peeling_for_loop_bound ===");
7439 initialize_original_copy_tables ();
7441 /* Generate the following variables on the preheader of original loop:
7443 ni_name = number of iteration the original loop executes
7444 ratio = ni_name / vf
7445 ratio_mult_vf_name = ratio * vf */
7446 vect_generate_tmps_on_preheader (loop_vinfo, &ni_name,
7447 &ratio_mult_vf_name, ratio);
7449 loop_num = loop->num;
7451 /* If cost model check not done during versioning and
7452 peeling for alignment. */
7453 if (!VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
7454 && !VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))
7455 && !LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
7457 check_profitability = true;
7459 /* Get profitability threshold for vectorized loop. */
7460 min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo);
7462 th = conservative_cost_threshold (loop_vinfo,
7463 min_profitable_iters);
7466 new_loop = slpeel_tree_peel_loop_to_edge (loop, single_exit (loop),
7467 ratio_mult_vf_name, ni_name, false,
7468 th, check_profitability);
7469 gcc_assert (new_loop);
7470 gcc_assert (loop_num == loop->num);
7471 #ifdef ENABLE_CHECKING
7472 slpeel_verify_cfg_after_peeling (loop, new_loop);
7475 /* A guard that controls whether the new_loop is to be executed or skipped
7476 is placed in LOOP->exit. LOOP->exit therefore has two successors - one
7477 is the preheader of NEW_LOOP, where the IVs from LOOP are used. The other
7478 is a bb after NEW_LOOP, where these IVs are not used. Find the edge that
7479 is on the path where the LOOP IVs are used and need to be updated. */
7481 preheader = loop_preheader_edge (new_loop)->src;
7482 if (EDGE_PRED (preheader, 0)->src == single_exit (loop)->dest)
7483 update_e = EDGE_PRED (preheader, 0);
7485 update_e = EDGE_PRED (preheader, 1);
7487 /* Update IVs of original loop as if they were advanced
7488 by ratio_mult_vf_name steps. */
7489 vect_update_ivs_after_vectorizer (loop_vinfo, ratio_mult_vf_name, update_e);
7491 /* After peeling we have to reset scalar evolution analyzer. */
7494 free_original_copy_tables ();
7498 /* Function vect_gen_niters_for_prolog_loop
7500 Set the number of iterations for the loop represented by LOOP_VINFO
7501 to the minimum between LOOP_NITERS (the original iteration count of the loop)
7502 and the misalignment of DR - the data reference recorded in
7503 LOOP_VINFO_UNALIGNED_DR (LOOP_VINFO). As a result, after the execution of
7504 this loop, the data reference DR will refer to an aligned location.
7506 The following computation is generated:
7508 If the misalignment of DR is known at compile time:
7509 addr_mis = int mis = DR_MISALIGNMENT (dr);
7510 Else, compute address misalignment in bytes:
7511 addr_mis = addr & (vectype_size - 1)
7513 prolog_niters = min (LOOP_NITERS, ((VF - addr_mis/elem_size)&(VF-1))/step)
7515 (elem_size = element type size; an element is the scalar element whose type
7516 is the inner type of the vectype)
7518 When the step of the data-ref in the loop is not 1 (as in interleaved data
7519 and SLP), the number of iterations of the prolog must be divided by the step
7520 (which is equal to the size of interleaved group).
7522 The above formulas assume that VF == number of elements in the vector. This
7523 may not hold when there are multiple-types in the loop.
7524 In this case, for some data-references in the loop the VF does not represent
7525 the number of elements that fit in the vector. Therefore, instead of VF we
7526 use TYPE_VECTOR_SUBPARTS. */
7529 vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
7531 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
7532 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7535 tree iters, iters_name;
7538 gimple dr_stmt = DR_STMT (dr);
7539 stmt_vec_info stmt_info = vinfo_for_stmt (dr_stmt);
7540 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7541 int vectype_align = TYPE_ALIGN (vectype) / BITS_PER_UNIT;
7542 tree niters_type = TREE_TYPE (loop_niters);
7544 int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
7545 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
7547 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
7548 step = DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_DR (stmt_info)));
7550 pe = loop_preheader_edge (loop);
7552 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
7554 int byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
7555 int elem_misalign = byte_misalign / element_size;
7557 if (vect_print_dump_info (REPORT_DETAILS))
7558 fprintf (vect_dump, "known alignment = %d.", byte_misalign);
7560 iters = build_int_cst (niters_type,
7561 (((nelements - elem_misalign) & (nelements - 1)) / step));
7565 gimple_seq new_stmts = NULL;
7566 tree start_addr = vect_create_addr_base_for_vector_ref (dr_stmt,
7567 &new_stmts, NULL_TREE, loop);
7568 tree ptr_type = TREE_TYPE (start_addr);
7569 tree size = TYPE_SIZE (ptr_type);
7570 tree type = lang_hooks.types.type_for_size (tree_low_cst (size, 1), 1);
7571 tree vectype_size_minus_1 = build_int_cst (type, vectype_align - 1);
7572 tree elem_size_log =
7573 build_int_cst (type, exact_log2 (vectype_align/nelements));
7574 tree nelements_minus_1 = build_int_cst (type, nelements - 1);
7575 tree nelements_tree = build_int_cst (type, nelements);
7579 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmts);
7580 gcc_assert (!new_bb);
7582 /* Create: byte_misalign = addr & (vectype_size - 1) */
7584 fold_build2 (BIT_AND_EXPR, type, fold_convert (type, start_addr), vectype_size_minus_1);
7586 /* Create: elem_misalign = byte_misalign / element_size */
7588 fold_build2 (RSHIFT_EXPR, type, byte_misalign, elem_size_log);
7590 /* Create: (niters_type) (nelements - elem_misalign)&(nelements - 1) */
7591 iters = fold_build2 (MINUS_EXPR, type, nelements_tree, elem_misalign);
7592 iters = fold_build2 (BIT_AND_EXPR, type, iters, nelements_minus_1);
7593 iters = fold_convert (niters_type, iters);
7596 /* Create: prolog_loop_niters = min (iters, loop_niters) */
7597 /* If the loop bound is known at compile time we already verified that it is
7598 greater than vf; since the misalignment ('iters') is at most vf, there's
7599 no need to generate the MIN_EXPR in this case. */
7600 if (TREE_CODE (loop_niters) != INTEGER_CST)
7601 iters = fold_build2 (MIN_EXPR, niters_type, iters, loop_niters);
7603 if (vect_print_dump_info (REPORT_DETAILS))
7605 fprintf (vect_dump, "niters for prolog loop: ");
7606 print_generic_expr (vect_dump, iters, TDF_SLIM);
7609 var = create_tmp_var (niters_type, "prolog_loop_niters");
7610 add_referenced_var (var);
7612 iters_name = force_gimple_operand (iters, &stmts, false, var);
7614 /* Insert stmt on loop preheader edge. */
7617 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7618 gcc_assert (!new_bb);
7625 /* Function vect_update_init_of_dr
7627 NITERS iterations were peeled from LOOP. DR represents a data reference
7628 in LOOP. This function updates the information recorded in DR to
7629 account for the fact that the first NITERS iterations had already been
7630 executed. Specifically, it updates the OFFSET field of DR. */
7633 vect_update_init_of_dr (struct data_reference *dr, tree niters)
7635 tree offset = DR_OFFSET (dr);
7637 niters = fold_build2 (MULT_EXPR, sizetype,
7638 fold_convert (sizetype, niters),
7639 fold_convert (sizetype, DR_STEP (dr)));
7640 offset = fold_build2 (PLUS_EXPR, sizetype, offset, niters);
7641 DR_OFFSET (dr) = offset;
7645 /* Function vect_update_inits_of_drs
7647 NITERS iterations were peeled from the loop represented by LOOP_VINFO.
7648 This function updates the information recorded for the data references in
7649 the loop to account for the fact that the first NITERS iterations had
7650 already been executed. Specifically, it updates the initial_condition of
7651 the access_function of all the data_references in the loop. */
7654 vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
7657 VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
7658 struct data_reference *dr;
7660 if (vect_print_dump_info (REPORT_DETAILS))
7661 fprintf (vect_dump, "=== vect_update_inits_of_dr ===");
7663 for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
7664 vect_update_init_of_dr (dr, niters);
7668 /* Function vect_do_peeling_for_alignment
7670 Peel the first 'niters' iterations of the loop represented by LOOP_VINFO.
7671 'niters' is set to the misalignment of one of the data references in the
7672 loop, thereby forcing it to refer to an aligned location at the beginning
7673 of the execution of this loop. The data reference for which we are
7674 peeling is recorded in LOOP_VINFO_UNALIGNED_DR. */
7677 vect_do_peeling_for_alignment (loop_vec_info loop_vinfo)
7679 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7680 tree niters_of_prolog_loop, ni_name;
7682 struct loop *new_loop;
7683 bool check_profitability = false;
7684 unsigned int th = 0;
7685 int min_profitable_iters;
7687 if (vect_print_dump_info (REPORT_DETAILS))
7688 fprintf (vect_dump, "=== vect_do_peeling_for_alignment ===");
7690 initialize_original_copy_tables ();
7692 ni_name = vect_build_loop_niters (loop_vinfo);
7693 niters_of_prolog_loop = vect_gen_niters_for_prolog_loop (loop_vinfo, ni_name);
7696 /* If cost model check not done during versioning. */
7697 if (!VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
7698 && !VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
7700 check_profitability = true;
7702 /* Get profitability threshold for vectorized loop. */
7703 min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo);
7705 th = conservative_cost_threshold (loop_vinfo,
7706 min_profitable_iters);
7709 /* Peel the prolog loop and iterate it niters_of_prolog_loop. */
7711 slpeel_tree_peel_loop_to_edge (loop, loop_preheader_edge (loop),
7712 niters_of_prolog_loop, ni_name, true,
7713 th, check_profitability);
7715 gcc_assert (new_loop);
7716 #ifdef ENABLE_CHECKING
7717 slpeel_verify_cfg_after_peeling (new_loop, loop);
7720 /* Update number of times loop executes. */
7721 n_iters = LOOP_VINFO_NITERS (loop_vinfo);
7722 LOOP_VINFO_NITERS (loop_vinfo) = fold_build2 (MINUS_EXPR,
7723 TREE_TYPE (n_iters), n_iters, niters_of_prolog_loop);
7725 /* Update the init conditions of the access functions of all data refs. */
7726 vect_update_inits_of_drs (loop_vinfo, niters_of_prolog_loop);
7728 /* After peeling we have to reset scalar evolution analyzer. */
7731 free_original_copy_tables ();
7735 /* Function vect_create_cond_for_align_checks.
7737 Create a conditional expression that represents the alignment checks for
7738 all of data references (array element references) whose alignment must be
7742 COND_EXPR - input conditional expression. New conditions will be chained
7743 with logical AND operation.
7744 LOOP_VINFO - two fields of the loop information are used.
7745 LOOP_VINFO_PTR_MASK is the mask used to check the alignment.
7746 LOOP_VINFO_MAY_MISALIGN_STMTS contains the refs to be checked.
7749 COND_EXPR_STMT_LIST - statements needed to construct the conditional
7751 The returned value is the conditional expression to be used in the if
7752 statement that controls which version of the loop gets executed at runtime.
7754 The algorithm makes two assumptions:
7755 1) The number of bytes "n" in a vector is a power of 2.
7756 2) An address "a" is aligned if a%n is zero and that this
7757 test can be done as a&(n-1) == 0. For example, for 16
7758 byte vectors the test is a&0xf == 0. */
7761 vect_create_cond_for_align_checks (loop_vec_info loop_vinfo,
7763 gimple_seq *cond_expr_stmt_list)
7765 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7766 VEC(gimple,heap) *may_misalign_stmts
7767 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
7769 int mask = LOOP_VINFO_PTR_MASK (loop_vinfo);
7773 tree int_ptrsize_type;
7775 tree or_tmp_name = NULL_TREE;
7776 tree and_tmp, and_tmp_name;
7779 tree part_cond_expr;
7781 /* Check that mask is one less than a power of 2, i.e., mask is
7782 all zeros followed by all ones. */
7783 gcc_assert ((mask != 0) && ((mask & (mask+1)) == 0));
7785 /* CHECKME: what is the best integer or unsigned type to use to hold a
7786 cast from a pointer value? */
7787 psize = TYPE_SIZE (ptr_type_node);
7789 = lang_hooks.types.type_for_size (tree_low_cst (psize, 1), 0);
7791 /* Create expression (mask & (dr_1 || ... || dr_n)) where dr_i is the address
7792 of the first vector of the i'th data reference. */
7794 for (i = 0; VEC_iterate (gimple, may_misalign_stmts, i, ref_stmt); i++)
7796 gimple_seq new_stmt_list = NULL;
7798 tree addr_tmp, addr_tmp_name;
7799 tree or_tmp, new_or_tmp_name;
7800 gimple addr_stmt, or_stmt;
7802 /* create: addr_tmp = (int)(address_of_first_vector) */
7804 vect_create_addr_base_for_vector_ref (ref_stmt, &new_stmt_list,
7806 if (new_stmt_list != NULL)
7807 gimple_seq_add_seq (cond_expr_stmt_list, new_stmt_list);
7809 sprintf (tmp_name, "%s%d", "addr2int", i);
7810 addr_tmp = create_tmp_var (int_ptrsize_type, tmp_name);
7811 add_referenced_var (addr_tmp);
7812 addr_tmp_name = make_ssa_name (addr_tmp, NULL);
7813 addr_stmt = gimple_build_assign_with_ops (NOP_EXPR, addr_tmp_name,
7814 addr_base, NULL_TREE);
7815 SSA_NAME_DEF_STMT (addr_tmp_name) = addr_stmt;
7816 gimple_seq_add_stmt (cond_expr_stmt_list, addr_stmt);
7818 /* The addresses are OR together. */
7820 if (or_tmp_name != NULL_TREE)
7822 /* create: or_tmp = or_tmp | addr_tmp */
7823 sprintf (tmp_name, "%s%d", "orptrs", i);
7824 or_tmp = create_tmp_var (int_ptrsize_type, tmp_name);
7825 add_referenced_var (or_tmp);
7826 new_or_tmp_name = make_ssa_name (or_tmp, NULL);
7827 or_stmt = gimple_build_assign_with_ops (BIT_IOR_EXPR,
7829 or_tmp_name, addr_tmp_name);
7830 SSA_NAME_DEF_STMT (new_or_tmp_name) = or_stmt;
7831 gimple_seq_add_stmt (cond_expr_stmt_list, or_stmt);
7832 or_tmp_name = new_or_tmp_name;
7835 or_tmp_name = addr_tmp_name;
7839 mask_cst = build_int_cst (int_ptrsize_type, mask);
7841 /* create: and_tmp = or_tmp & mask */
7842 and_tmp = create_tmp_var (int_ptrsize_type, "andmask" );
7843 add_referenced_var (and_tmp);
7844 and_tmp_name = make_ssa_name (and_tmp, NULL);
7846 and_stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, and_tmp_name,
7847 or_tmp_name, mask_cst);
7848 SSA_NAME_DEF_STMT (and_tmp_name) = and_stmt;
7849 gimple_seq_add_stmt (cond_expr_stmt_list, and_stmt);
7851 /* Make and_tmp the left operand of the conditional test against zero.
7852 if and_tmp has a nonzero bit then some address is unaligned. */
7853 ptrsize_zero = build_int_cst (int_ptrsize_type, 0);
7854 part_cond_expr = fold_build2 (EQ_EXPR, boolean_type_node,
7855 and_tmp_name, ptrsize_zero);
7857 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
7858 *cond_expr, part_cond_expr);
7860 *cond_expr = part_cond_expr;
7863 /* Function vect_vfa_segment_size.
7865 Create an expression that computes the size of segment
7866 that will be accessed for a data reference. The functions takes into
7867 account that realignment loads may access one more vector.
7870 DR: The data reference.
7871 VECT_FACTOR: vectorization factor.
7873 Return an expression whose value is the size of segment which will be
7877 vect_vfa_segment_size (struct data_reference *dr, tree vect_factor)
7879 tree segment_length = fold_build2 (MULT_EXPR, integer_type_node,
7880 DR_STEP (dr), vect_factor);
7882 if (vect_supportable_dr_alignment (dr) == dr_explicit_realign_optimized)
7884 tree vector_size = TYPE_SIZE_UNIT
7885 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
7887 segment_length = fold_build2 (PLUS_EXPR, integer_type_node,
7888 segment_length, vector_size);
7890 return fold_convert (sizetype, segment_length);
7893 /* Function vect_create_cond_for_alias_checks.
7895 Create a conditional expression that represents the run-time checks for
7896 overlapping of address ranges represented by a list of data references
7897 relations passed as input.
7900 COND_EXPR - input conditional expression. New conditions will be chained
7901 with logical AND operation.
7902 LOOP_VINFO - field LOOP_VINFO_MAY_ALIAS_STMTS contains the list of ddrs
7906 COND_EXPR - conditional expression.
7907 COND_EXPR_STMT_LIST - statements needed to construct the conditional
7911 The returned value is the conditional expression to be used in the if
7912 statement that controls which version of the loop gets executed at runtime.
7916 vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
7918 gimple_seq * cond_expr_stmt_list)
7920 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7921 VEC (ddr_p, heap) * may_alias_ddrs =
7922 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
7924 build_int_cst (integer_type_node, LOOP_VINFO_VECT_FACTOR (loop_vinfo));
7928 tree part_cond_expr;
7930 /* Create expression
7931 ((store_ptr_0 + store_segment_length_0) < load_ptr_0)
7932 || (load_ptr_0 + load_segment_length_0) < store_ptr_0))
7936 ((store_ptr_n + store_segment_length_n) < load_ptr_n)
7937 || (load_ptr_n + load_segment_length_n) < store_ptr_n)) */
7939 if (VEC_empty (ddr_p, may_alias_ddrs))
7942 for (i = 0; VEC_iterate (ddr_p, may_alias_ddrs, i, ddr); i++)
7944 struct data_reference *dr_a, *dr_b;
7945 gimple dr_group_first_a, dr_group_first_b;
7946 tree addr_base_a, addr_base_b;
7947 tree segment_length_a, segment_length_b;
7948 gimple stmt_a, stmt_b;
7951 stmt_a = DR_STMT (DDR_A (ddr));
7952 dr_group_first_a = DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_a));
7953 if (dr_group_first_a)
7955 stmt_a = dr_group_first_a;
7956 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
7960 stmt_b = DR_STMT (DDR_B (ddr));
7961 dr_group_first_b = DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_b));
7962 if (dr_group_first_b)
7964 stmt_b = dr_group_first_b;
7965 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
7969 vect_create_addr_base_for_vector_ref (stmt_a, cond_expr_stmt_list,
7972 vect_create_addr_base_for_vector_ref (stmt_b, cond_expr_stmt_list,
7975 segment_length_a = vect_vfa_segment_size (dr_a, vect_factor);
7976 segment_length_b = vect_vfa_segment_size (dr_b, vect_factor);
7978 if (vect_print_dump_info (REPORT_DR_DETAILS))
7981 "create runtime check for data references ");
7982 print_generic_expr (vect_dump, DR_REF (dr_a), TDF_SLIM);
7983 fprintf (vect_dump, " and ");
7984 print_generic_expr (vect_dump, DR_REF (dr_b), TDF_SLIM);
7989 fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
7990 fold_build2 (LT_EXPR, boolean_type_node,
7991 fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (addr_base_a),
7995 fold_build2 (LT_EXPR, boolean_type_node,
7996 fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (addr_base_b),
8002 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
8003 *cond_expr, part_cond_expr);
8005 *cond_expr = part_cond_expr;
8007 if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS))
8008 fprintf (vect_dump, "created %u versioning for alias checks.\n",
8009 VEC_length (ddr_p, may_alias_ddrs));
8013 /* Function vect_loop_versioning.
8015 If the loop has data references that may or may not be aligned or/and
8016 has data reference relations whose independence was not proven then
8017 two versions of the loop need to be generated, one which is vectorized
8018 and one which isn't. A test is then generated to control which of the
8019 loops is executed. The test checks for the alignment of all of the
8020 data references that may or may not be aligned. An additional
8021 sequence of runtime tests is generated for each pairs of DDRs whose
8022 independence was not proven. The vectorized version of loop is
8023 executed only if both alias and alignment tests are passed.
8025 The test generated to check which version of loop is executed
8026 is modified to also check for profitability as indicated by the
8027 cost model initially. */
8030 vect_loop_versioning (loop_vec_info loop_vinfo)
8032 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8034 tree cond_expr = NULL_TREE;
8035 gimple_seq cond_expr_stmt_list = NULL;
8036 basic_block condition_bb;
8037 gimple_stmt_iterator gsi, cond_exp_gsi;
8038 basic_block merge_bb;
8039 basic_block new_exit_bb;
8041 gimple orig_phi, new_phi;
8043 unsigned prob = 4 * REG_BR_PROB_BASE / 5;
8044 gimple_seq gimplify_stmt_list = NULL;
8045 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
8046 int min_profitable_iters = 0;
8049 /* Get profitability threshold for vectorized loop. */
8050 min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo);
8052 th = conservative_cost_threshold (loop_vinfo,
8053 min_profitable_iters);
8056 build2 (GT_EXPR, boolean_type_node, scalar_loop_iters,
8057 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
8059 cond_expr = force_gimple_operand (cond_expr, &cond_expr_stmt_list,
8062 if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)))
8063 vect_create_cond_for_align_checks (loop_vinfo, &cond_expr,
8064 &cond_expr_stmt_list);
8066 if (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
8067 vect_create_cond_for_alias_checks (loop_vinfo, &cond_expr,
8068 &cond_expr_stmt_list);
8071 fold_build2 (NE_EXPR, boolean_type_node, cond_expr, integer_zero_node);
8073 force_gimple_operand (cond_expr, &gimplify_stmt_list, true, NULL_TREE);
8074 gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list);
8076 initialize_original_copy_tables ();
8077 nloop = loop_version (loop, cond_expr, &condition_bb,
8078 prob, prob, REG_BR_PROB_BASE - prob, true);
8079 free_original_copy_tables();
8081 /* Loop versioning violates an assumption we try to maintain during
8082 vectorization - that the loop exit block has a single predecessor.
8083 After versioning, the exit block of both loop versions is the same
8084 basic block (i.e. it has two predecessors). Just in order to simplify
8085 following transformations in the vectorizer, we fix this situation
8086 here by adding a new (empty) block on the exit-edge of the loop,
8087 with the proper loop-exit phis to maintain loop-closed-form. */
8089 merge_bb = single_exit (loop)->dest;
8090 gcc_assert (EDGE_COUNT (merge_bb->preds) == 2);
8091 new_exit_bb = split_edge (single_exit (loop));
8092 new_exit_e = single_exit (loop);
8093 e = EDGE_SUCC (new_exit_bb, 0);
8095 for (gsi = gsi_start_phis (merge_bb); !gsi_end_p (gsi); gsi_next (&gsi))
8097 orig_phi = gsi_stmt (gsi);
8098 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
8100 arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
8101 add_phi_arg (new_phi, arg, new_exit_e);
8102 SET_PHI_ARG_DEF (orig_phi, e->dest_idx, PHI_RESULT (new_phi));
8105 /* End loop-exit-fixes after versioning. */
8107 update_ssa (TODO_update_ssa);
8108 if (cond_expr_stmt_list)
8110 cond_exp_gsi = gsi_last_bb (condition_bb);
8111 gsi_insert_seq_before (&cond_exp_gsi, cond_expr_stmt_list, GSI_SAME_STMT);
8115 /* Remove a group of stores (for SLP or interleaving), free their
8119 vect_remove_stores (gimple first_stmt)
8121 gimple next = first_stmt;
8123 gimple_stmt_iterator next_si;
8127 /* Free the attached stmt_vec_info and remove the stmt. */
8128 next_si = gsi_for_stmt (next);
8129 gsi_remove (&next_si, true);
8130 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
8131 free_stmt_vec_info (next);
8137 /* Vectorize SLP instance tree in postorder. */
8140 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
8141 unsigned int vectorization_factor)
8144 bool strided_store, is_store;
8145 gimple_stmt_iterator si;
8146 stmt_vec_info stmt_info;
8147 unsigned int vec_stmts_size, nunits, group_size;
8150 slp_tree loads_node;
8155 vect_schedule_slp_instance (SLP_TREE_LEFT (node), instance,
8156 vectorization_factor);
8157 vect_schedule_slp_instance (SLP_TREE_RIGHT (node), instance,
8158 vectorization_factor);
8160 stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0);
8161 stmt_info = vinfo_for_stmt (stmt);
8162 /* VECTYPE is the type of the destination. */
8163 vectype = get_vectype_for_scalar_type (TREE_TYPE (gimple_assign_lhs (stmt)));
8164 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
8165 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
8167 /* For each SLP instance calculate number of vector stmts to be created
8168 for the scalar stmts in each node of the SLP tree. Number of vector
8169 elements in one vector iteration is the number of scalar elements in
8170 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
8172 vec_stmts_size = (vectorization_factor * group_size) / nunits;
8174 /* In case of load permutation we have to allocate vectorized statements for
8175 all the nodes that participate in that permutation. */
8176 if (SLP_INSTANCE_LOAD_PERMUTATION (instance))
8179 VEC_iterate (slp_tree, SLP_INSTANCE_LOADS (instance), i, loads_node);
8182 if (!SLP_TREE_VEC_STMTS (loads_node))
8184 SLP_TREE_VEC_STMTS (loads_node) = VEC_alloc (gimple, heap,
8186 SLP_TREE_NUMBER_OF_VEC_STMTS (loads_node) = vec_stmts_size;
8191 if (!SLP_TREE_VEC_STMTS (node))
8193 SLP_TREE_VEC_STMTS (node) = VEC_alloc (gimple, heap, vec_stmts_size);
8194 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
8197 if (vect_print_dump_info (REPORT_DETAILS))
8199 fprintf (vect_dump, "------>vectorizing SLP node starting from: ");
8200 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
8203 si = gsi_for_stmt (stmt);
8204 is_store = vect_transform_stmt (stmt, &si, &strided_store, node, instance);
8207 if (DR_GROUP_FIRST_DR (stmt_info))
8208 /* If IS_STORE is TRUE, the vectorization of the
8209 interleaving chain was completed - free all the stores in
8211 vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
8213 /* FORNOW: SLP originates only from strided stores. */
8219 /* FORNOW: SLP originates only from strided stores. */
8225 vect_schedule_slp (loop_vec_info loop_vinfo)
8227 VEC (slp_instance, heap) *slp_instances =
8228 LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
8229 slp_instance instance;
8231 bool is_store = false;
8233 for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
8235 /* Schedule the tree of INSTANCE. */
8236 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
8238 LOOP_VINFO_VECT_FACTOR (loop_vinfo));
8240 if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS)
8241 || vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
8242 fprintf (vect_dump, "vectorizing stmts using SLP.");
8248 /* Function vect_transform_loop.
8250 The analysis phase has determined that the loop is vectorizable.
8251 Vectorize the loop - created vectorized stmts to replace the scalar
8252 stmts in the loop, and update the loop exit condition. */
8255 vect_transform_loop (loop_vec_info loop_vinfo)
8257 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8258 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
8259 int nbbs = loop->num_nodes;
8260 gimple_stmt_iterator si;
8263 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8265 bool slp_scheduled = false;
8266 unsigned int nunits;
8268 if (vect_print_dump_info (REPORT_DETAILS))
8269 fprintf (vect_dump, "=== vec_transform_loop ===");
8271 if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
8272 || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
8273 vect_loop_versioning (loop_vinfo);
8275 /* CHECKME: we wouldn't need this if we called update_ssa once
8277 bitmap_zero (vect_memsyms_to_rename);
8279 /* Peel the loop if there are data refs with unknown alignment.
8280 Only one data ref with unknown store is allowed. */
8282 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
8283 vect_do_peeling_for_alignment (loop_vinfo);
8285 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
8286 compile time constant), or it is a constant that doesn't divide by the
8287 vectorization factor, then an epilog loop needs to be created.
8288 We therefore duplicate the loop: the original loop will be vectorized,
8289 and will compute the first (n/VF) iterations. The second copy of the loop
8290 will remain scalar and will compute the remaining (n%VF) iterations.
8291 (VF is the vectorization factor). */
8293 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8294 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8295 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0))
8296 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio);
8298 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
8299 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
8301 /* 1) Make sure the loop header has exactly two entries
8302 2) Make sure we have a preheader basic block. */
8304 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
8306 split_edge (loop_preheader_edge (loop));
8308 /* FORNOW: the vectorizer supports only loops which body consist
8309 of one basic block (header + empty latch). When the vectorizer will
8310 support more involved loop forms, the order by which the BBs are
8311 traversed need to be reconsidered. */
8313 for (i = 0; i < nbbs; i++)
8315 basic_block bb = bbs[i];
8316 stmt_vec_info stmt_info;
8319 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
8321 phi = gsi_stmt (si);
8322 if (vect_print_dump_info (REPORT_DETAILS))
8324 fprintf (vect_dump, "------>vectorizing phi: ");
8325 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
8327 stmt_info = vinfo_for_stmt (phi);
8331 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8332 && !STMT_VINFO_LIVE_P (stmt_info))
8335 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
8336 != (unsigned HOST_WIDE_INT) vectorization_factor)
8337 && vect_print_dump_info (REPORT_DETAILS))
8338 fprintf (vect_dump, "multiple-types.");
8340 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
8342 if (vect_print_dump_info (REPORT_DETAILS))
8343 fprintf (vect_dump, "transform phi.");
8344 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
8348 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
8350 gimple stmt = gsi_stmt (si);
8353 if (vect_print_dump_info (REPORT_DETAILS))
8355 fprintf (vect_dump, "------>vectorizing statement: ");
8356 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
8359 stmt_info = vinfo_for_stmt (stmt);
8361 /* vector stmts created in the outer-loop during vectorization of
8362 stmts in an inner-loop may not have a stmt_info, and do not
8363 need to be vectorized. */
8370 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8371 && !STMT_VINFO_LIVE_P (stmt_info))
8377 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
8379 (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
8380 if (!STMT_SLP_TYPE (stmt_info)
8381 && nunits != (unsigned int) vectorization_factor
8382 && vect_print_dump_info (REPORT_DETAILS))
8383 /* For SLP VF is set according to unrolling factor, and not to
8384 vector size, hence for SLP this print is not valid. */
8385 fprintf (vect_dump, "multiple-types.");
8387 /* SLP. Schedule all the SLP instances when the first SLP stmt is
8389 if (STMT_SLP_TYPE (stmt_info))
8393 slp_scheduled = true;
8395 if (vect_print_dump_info (REPORT_DETAILS))
8396 fprintf (vect_dump, "=== scheduling SLP instances ===");
8398 is_store = vect_schedule_slp (loop_vinfo);
8400 /* IS_STORE is true if STMT is a store. Stores cannot be of
8401 hybrid SLP type. They are removed in
8402 vect_schedule_slp_instance and their vinfo is destroyed. */
8410 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
8411 if (PURE_SLP_STMT (stmt_info))
8418 /* -------- vectorize statement ------------ */
8419 if (vect_print_dump_info (REPORT_DETAILS))
8420 fprintf (vect_dump, "transform statement.");
8422 strided_store = false;
8423 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
8426 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
8428 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
8429 interleaving chain was completed - free all the stores in
8431 vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
8432 gsi_remove (&si, true);
8437 /* Free the attached stmt_vec_info and remove the stmt. */
8438 free_stmt_vec_info (stmt);
8439 gsi_remove (&si, true);
8447 slpeel_make_loop_iterate_ntimes (loop, ratio);
8449 mark_set_for_renaming (vect_memsyms_to_rename);
8451 /* The memory tags and pointers in vectorized statements need to
8452 have their SSA forms updated. FIXME, why can't this be delayed
8453 until all the loops have been transformed? */
8454 update_ssa (TODO_update_ssa);
8456 if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS))
8457 fprintf (vect_dump, "LOOP VECTORIZED.");
8458 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOOPS))
8459 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");