1 /* Transformation Utilities for Loop Vectorization.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "basic-block.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
39 #include "tree-data-ref.h"
40 #include "tree-chrec.h"
41 #include "tree-scalar-evolution.h"
42 #include "tree-vectorizer.h"
43 #include "langhooks.h"
44 #include "tree-pass.h"
48 /* Utility functions for the code transformation. */
49 static bool vect_transform_stmt (tree, block_stmt_iterator *, bool *);
50 static tree vect_create_destination_var (tree, tree);
51 static tree vect_create_data_ref_ptr
52 (tree, block_stmt_iterator *, tree, tree *, tree *, bool, tree);
53 static tree vect_create_addr_base_for_vector_ref (tree, tree *, tree);
54 static tree vect_setup_realignment (tree, block_stmt_iterator *, tree *);
55 static tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
56 static tree vect_get_vec_def_for_operand (tree, tree, tree *);
57 static tree vect_init_vector (tree, tree, tree);
58 static void vect_finish_stmt_generation
59 (tree stmt, tree vec_stmt, block_stmt_iterator *bsi);
60 static bool vect_is_simple_cond (tree, loop_vec_info);
61 static void update_vuses_to_preheader (tree, struct loop*);
62 static void vect_create_epilog_for_reduction (tree, tree, enum tree_code, tree);
63 static tree get_initial_def_for_reduction (tree, tree, tree *);
65 /* Utility function dealing with loop peeling (not peeling itself). */
66 static void vect_generate_tmps_on_preheader
67 (loop_vec_info, tree *, tree *, tree *);
68 static tree vect_build_loop_niters (loop_vec_info);
69 static void vect_update_ivs_after_vectorizer (loop_vec_info, tree, edge);
70 static tree vect_gen_niters_for_prolog_loop (loop_vec_info, tree);
71 static void vect_update_init_of_dr (struct data_reference *, tree niters);
72 static void vect_update_inits_of_drs (loop_vec_info, tree);
73 static int vect_min_worthwhile_factor (enum tree_code);
77 cost_for_stmt (tree stmt)
79 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
81 switch (STMT_VINFO_TYPE (stmt_info))
83 case load_vec_info_type:
84 return TARG_SCALAR_LOAD_COST;
85 case store_vec_info_type:
86 return TARG_SCALAR_STORE_COST;
87 case op_vec_info_type:
88 case condition_vec_info_type:
89 case assignment_vec_info_type:
90 case reduc_vec_info_type:
91 case induc_vec_info_type:
92 case type_promotion_vec_info_type:
93 case type_demotion_vec_info_type:
94 case type_conversion_vec_info_type:
95 case call_vec_info_type:
96 return TARG_SCALAR_STMT_COST;
97 case undef_vec_info_type:
104 /* Function vect_estimate_min_profitable_iters
106 Return the number of iterations required for the vector version of the
107 loop to be profitable relative to the cost of the scalar version of the
110 TODO: Take profile info into account before making vectorization
111 decisions, if available. */
114 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
117 int min_profitable_iters;
118 int peel_iters_prologue;
119 int peel_iters_epilogue;
120 int vec_inside_cost = 0;
121 int vec_outside_cost = 0;
122 int scalar_single_iter_cost = 0;
123 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
124 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
125 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
126 int nbbs = loop->num_nodes;
129 /* Cost model disabled. */
130 if (!flag_vect_cost_model)
132 if (vect_print_dump_info (REPORT_DETAILS))
133 fprintf (vect_dump, "cost model disabled.");
137 /* Requires loop versioning tests to handle misalignment.
138 FIXME: Make cost depend on number of stmts in may_misalign list. */
140 if (VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)))
142 vec_outside_cost += TARG_COND_BRANCH_COST;
143 if (vect_print_dump_info (REPORT_DETAILS))
144 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
148 /* Count statements in scalar loop. Using this as scalar cost for a single
151 TODO: Add outer loop support.
153 TODO: Consider assigning different costs to different scalar
156 for (i = 0; i < nbbs; i++)
158 block_stmt_iterator si;
159 basic_block bb = bbs[i];
161 for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
163 tree stmt = bsi_stmt (si);
164 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
165 if (!STMT_VINFO_RELEVANT_P (stmt_info)
166 && !STMT_VINFO_LIVE_P (stmt_info))
168 scalar_single_iter_cost += cost_for_stmt (stmt);
169 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info);
170 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
174 /* Add additional cost for the peeled instructions in prologue and epilogue
177 FORNOW: If we dont know the value of peel_iters for prologue or epilogue
178 at compile-time - we assume it's (vf-1)/2 (the worst would be vf-1).
180 TODO: Build an expression that represents peel_iters for prologue and
181 epilogue to be used in a run-time test. */
183 byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
185 if (byte_misalign < 0)
187 peel_iters_prologue = (vf - 1)/2;
188 if (vect_print_dump_info (REPORT_DETAILS))
189 fprintf (vect_dump, "cost model: "
190 "prologue peel iters set to (vf-1)/2.");
192 /* If peeling for alignment is unknown, loop bound of main loop becomes
194 peel_iters_epilogue = (vf - 1)/2;
195 if (vect_print_dump_info (REPORT_DETAILS))
196 fprintf (vect_dump, "cost model: "
197 "epilogue peel iters set to (vf-1)/2 because "
198 "peeling for alignment is unknown .");
204 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
205 int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
206 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
207 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
209 peel_iters_prologue = nelements - (byte_misalign / element_size);
212 peel_iters_prologue = 0;
214 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
216 peel_iters_epilogue = (vf - 1)/2;
217 if (vect_print_dump_info (REPORT_DETAILS))
218 fprintf (vect_dump, "cost model: "
219 "epilogue peel iters set to (vf-1)/2 because "
220 "loop iterations are unknown .");
224 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
225 peel_iters_prologue = niters < peel_iters_prologue ?
226 niters : peel_iters_prologue;
227 peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
231 /* Requires a prologue loop when peeling to handle misalignment. Add cost of
232 two guards, one for the peeled loop and one for the vector loop. */
234 if (peel_iters_prologue)
236 vec_outside_cost += 2 * TARG_COND_BRANCH_COST;
237 if (vect_print_dump_info (REPORT_DETAILS))
238 fprintf (vect_dump, "cost model: Adding cost of checks for "
242 /* Requires an epilogue loop to finish up remaining iterations after vector
243 loop. Add cost of two guards, one for the peeled loop and one for the
246 if (peel_iters_epilogue
247 || !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
248 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vf)
250 vec_outside_cost += 2 * TARG_COND_BRANCH_COST;
251 if (vect_print_dump_info (REPORT_DETAILS))
252 fprintf (vect_dump, "cost model : Adding cost of checks for "
256 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
257 + (peel_iters_epilogue * scalar_single_iter_cost);
259 /* Allow targets add additional (outside-of-loop) costs. FORNOW, the only
260 information we provide for the target is whether testing against the
261 threshold involves a runtime test. */
262 if (targetm.vectorize.builtin_vectorization_cost)
264 bool runtime_test = false;
266 /* If the number of iterations is unknown, or the
267 peeling-for-misalignment amount is unknown, we eill have to generate
268 a runtime test to test the loop count against the threshold. */
269 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
270 || (byte_misalign < 0))
273 targetm.vectorize.builtin_vectorization_cost (runtime_test);
274 if (vect_print_dump_info (REPORT_DETAILS))
275 fprintf (vect_dump, "cost model : Adding target out-of-loop cost = %d",
276 targetm.vectorize.builtin_vectorization_cost (runtime_test));
279 /* Calculate number of iterations required to make the vector version
280 profitable, relative to the loop bodies only. The following condition
281 must hold true: ((SIC*VF)-VIC)*niters > VOC*VF, where
282 SIC = scalar iteration cost, VIC = vector iteration cost,
283 VOC = vector outside cost and VF = vectorization factor. */
285 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
287 if (vec_outside_cost == 0)
288 min_profitable_iters = 1;
291 min_profitable_iters = (vec_outside_cost * vf)
292 / ((scalar_single_iter_cost * vf)
295 if ((scalar_single_iter_cost * vf * min_profitable_iters)
296 <= ((vec_inside_cost * min_profitable_iters)
297 + (vec_outside_cost * vf)))
298 min_profitable_iters++;
301 /* vector version will never be profitable. */
304 if (vect_print_dump_info (REPORT_DETAILS))
305 fprintf (vect_dump, "cost model: vector iteration cost = %d "
306 "is divisible by scalar iteration cost = %d by a factor "
307 "greater than or equal to the vectorization factor = %d .",
308 vec_inside_cost, scalar_single_iter_cost, vf);
312 if (vect_print_dump_info (REPORT_DETAILS))
314 fprintf (vect_dump, "Cost model analysis: \n");
315 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
317 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
319 fprintf (vect_dump, " Scalar cost: %d\n", scalar_single_iter_cost);
320 fprintf (vect_dump, " prologue iterations: %d\n",
321 peel_iters_prologue);
322 fprintf (vect_dump, " epilogue iterations: %d\n",
323 peel_iters_epilogue);
324 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
325 min_profitable_iters);
326 fprintf (vect_dump, " Actual minimum iters for profitability: %d\n",
327 min_profitable_iters < vf ? vf : min_profitable_iters);
330 min_profitable_iters =
331 min_profitable_iters < vf ? vf : min_profitable_iters;
333 /* Because the condition we create is:
334 if (niters <= min_profitable_iters)
335 then skip the vectorized loop. */
336 min_profitable_iters--;
337 return min_profitable_iters;
341 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
342 functions. Design better to avoid maintenance issues. */
344 /* Function vect_model_reduction_cost.
346 Models cost for a reduction operation, including the vector ops
347 generated within the strip-mine loop, the initial definition before
348 the loop, and the epilogue code that must be generated. */
351 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
360 enum machine_mode mode;
361 tree operation = GIMPLE_STMT_OPERAND (STMT_VINFO_STMT (stmt_info), 1);
362 int op_type = TREE_CODE_LENGTH (TREE_CODE (operation));
364 /* Cost of reduction op inside loop. */
365 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) += ncopies * TARG_VEC_STMT_COST;
367 reduction_op = TREE_OPERAND (operation, op_type-1);
368 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
369 mode = TYPE_MODE (vectype);
370 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
373 orig_stmt = STMT_VINFO_STMT (stmt_info);
375 code = TREE_CODE (GIMPLE_STMT_OPERAND (orig_stmt, 1));
377 /* Add in cost for initial definition. */
378 outer_cost += TARG_SCALAR_TO_VEC_COST;
380 /* Determine cost of epilogue code.
382 We have a reduction operator that will reduce the vector in one statement.
383 Also requires scalar extract. */
385 if (reduc_code < NUM_TREE_CODES)
386 outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
389 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
391 TYPE_SIZE (TREE_TYPE ( GIMPLE_STMT_OPERAND (orig_stmt, 0)));
392 int element_bitsize = tree_low_cst (bitsize, 1);
393 int nelements = vec_size_in_bits / element_bitsize;
395 optab = optab_for_tree_code (code, vectype);
397 /* We have a whole vector shift available. */
398 if (VECTOR_MODE_P (mode)
399 && optab->handlers[mode].insn_code != CODE_FOR_nothing
400 && vec_shr_optab->handlers[mode].insn_code != CODE_FOR_nothing)
401 /* Final reduction via vector shifts and the reduction operator. Also
402 requires scalar extract. */
403 outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST
404 + TARG_VEC_TO_SCALAR_COST);
406 /* Use extracts and reduction op for final reduction. For N elements,
407 we have N extracts and N-1 reduction ops. */
408 outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST);
411 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
413 if (vect_print_dump_info (REPORT_DETAILS))
414 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
415 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
416 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
420 /* Function vect_model_induction_cost.
422 Models cost for induction operations. */
425 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
427 /* loop cost for vec_loop. */
428 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
429 /* prologue cost for vec_init and vec_step. */
430 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_SCALAR_TO_VEC_COST;
432 if (vect_print_dump_info (REPORT_DETAILS))
433 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
434 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
435 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
439 /* Function vect_model_simple_cost.
441 Models cost for simple operations, i.e. those that only emit ncopies of a
442 single op. Right now, this does not account for multiple insns that could
443 be generated for the single vector op. We will handle that shortly. */
446 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies, enum vect_def_type *dt)
450 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
452 /* FORNOW: Assuming maximum 2 args per stmts. */
455 if (dt[i] == vect_constant_def || dt[i] == vect_invariant_def)
456 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) += TARG_SCALAR_TO_VEC_COST;
459 if (vect_print_dump_info (REPORT_DETAILS))
460 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
461 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
462 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
466 /* Function vect_cost_strided_group_size
468 For strided load or store, return the group_size only if it is the first
469 load or store of a group, else return 1. This ensures that group size is
470 only returned once per group. */
473 vect_cost_strided_group_size (stmt_vec_info stmt_info)
475 tree first_stmt = DR_GROUP_FIRST_DR (stmt_info);
477 if (first_stmt == STMT_VINFO_STMT (stmt_info))
478 return DR_GROUP_SIZE (stmt_info);
484 /* Function vect_model_store_cost
486 Models cost for stores. In the case of strided accesses, one access
487 has the overhead of the strided access attributed to it. */
490 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, enum vect_def_type dt)
495 if (dt == vect_constant_def || dt == vect_invariant_def)
496 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = TARG_SCALAR_TO_VEC_COST;
498 /* Strided access? */
499 if (DR_GROUP_FIRST_DR (stmt_info))
500 group_size = vect_cost_strided_group_size (stmt_info);
501 /* Not a strided access. */
505 /* Is this an access in a group of stores, which provide strided access?
506 If so, add in the cost of the permutes. */
509 /* Uses a high and low interleave operation for each needed permute. */
510 cost = ncopies * exact_log2(group_size) * group_size
511 * TARG_VEC_STMT_COST;
513 if (vect_print_dump_info (REPORT_DETAILS))
514 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
519 /* Costs of the stores. */
520 cost += ncopies * TARG_VEC_STORE_COST;
522 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = cost;
524 if (vect_print_dump_info (REPORT_DETAILS))
525 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
526 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
527 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
531 /* Function vect_model_load_cost
533 Models cost for loads. In the case of strided accesses, the last access
534 has the overhead of the strided access attributed to it. Since unaligned
535 accesses are supported for loads, we also account for the costs of the
536 access scheme chosen. */
539 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies)
544 int alignment_support_cheme;
546 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
548 /* Strided accesses? */
549 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
552 group_size = vect_cost_strided_group_size (stmt_info);
553 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
555 /* Not a strided access. */
562 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
564 /* Is this an access in a group of loads providing strided access?
565 If so, add in the cost of the permutes. */
568 /* Uses an even and odd extract operations for each needed permute. */
569 inner_cost = ncopies * exact_log2(group_size) * group_size
570 * TARG_VEC_STMT_COST;
572 if (vect_print_dump_info (REPORT_DETAILS))
573 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
578 /* The loads themselves. */
579 switch (alignment_support_cheme)
583 inner_cost += ncopies * TARG_VEC_LOAD_COST;
585 if (vect_print_dump_info (REPORT_DETAILS))
586 fprintf (vect_dump, "vect_model_load_cost: aligned.");
590 case dr_unaligned_supported:
592 /* Here, we assign an additional cost for the unaligned load. */
593 inner_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
595 if (vect_print_dump_info (REPORT_DETAILS))
596 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
601 case dr_unaligned_software_pipeline:
605 if (vect_print_dump_info (REPORT_DETAILS))
606 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
609 /* Unaligned software pipeline has a load of an address, an initial
610 load, and possibly a mask operation to "prime" the loop. However,
611 if this is an access in a group of loads, which provide strided
612 access, then the above cost should only be considered for one
613 access in the group. Inside the loop, there is a load op
614 and a realignment op. */
616 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1)
618 outer_cost = 2*TARG_VEC_STMT_COST;
619 if (targetm.vectorize.builtin_mask_for_load)
620 outer_cost += TARG_VEC_STMT_COST;
623 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
625 inner_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
634 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = inner_cost;
636 if (vect_print_dump_info (REPORT_DETAILS))
637 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
638 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
639 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
644 /* Function vect_get_new_vect_var.
646 Returns a name for a new variable. The current naming scheme appends the
647 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
648 the name of vectorizer generated variables, and appends that to NAME if
652 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
659 case vect_simple_var:
662 case vect_scalar_var:
665 case vect_pointer_var:
674 char* tmp = concat (prefix, name, NULL);
675 new_vect_var = create_tmp_var (type, tmp);
679 new_vect_var = create_tmp_var (type, prefix);
681 /* Mark vector typed variable as a gimple register variable. */
682 if (TREE_CODE (type) == VECTOR_TYPE)
683 DECL_GIMPLE_REG_P (new_vect_var) = true;
689 /* Function vect_create_addr_base_for_vector_ref.
691 Create an expression that computes the address of the first memory location
692 that will be accessed for a data reference.
695 STMT: The statement containing the data reference.
696 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
697 OFFSET: Optional. If supplied, it is be added to the initial address.
700 1. Return an SSA_NAME whose value is the address of the memory location of
701 the first vector of the data reference.
702 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
703 these statement(s) which define the returned SSA_NAME.
705 FORNOW: We are only handling array accesses with step 1. */
708 vect_create_addr_base_for_vector_ref (tree stmt,
712 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
713 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
714 tree data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
715 tree base_name = build_fold_indirect_ref (data_ref_base);
717 tree addr_base, addr_expr;
719 tree base_offset = unshare_expr (DR_OFFSET (dr));
720 tree init = unshare_expr (DR_INIT (dr));
721 tree vect_ptr_type, addr_expr2;
723 /* Create base_offset */
724 base_offset = size_binop (PLUS_EXPR, base_offset, init);
725 base_offset = fold_convert (sizetype, base_offset);
726 dest = create_tmp_var (TREE_TYPE (base_offset), "base_off");
727 add_referenced_var (dest);
728 base_offset = force_gimple_operand (base_offset, &new_stmt, false, dest);
729 append_to_statement_list_force (new_stmt, new_stmt_list);
733 tree tmp = create_tmp_var (sizetype, "offset");
736 /* For interleaved access step we divide STEP by the size of the
737 interleaving group. */
738 if (DR_GROUP_SIZE (stmt_info))
739 step = fold_build2 (TRUNC_DIV_EXPR, TREE_TYPE (offset), DR_STEP (dr),
740 build_int_cst (TREE_TYPE (offset),
741 DR_GROUP_SIZE (stmt_info)));
745 add_referenced_var (tmp);
746 offset = fold_build2 (MULT_EXPR, TREE_TYPE (offset), offset, step);
747 base_offset = fold_build2 (PLUS_EXPR, TREE_TYPE (base_offset),
748 base_offset, offset);
749 base_offset = force_gimple_operand (base_offset, &new_stmt, false, tmp);
750 append_to_statement_list_force (new_stmt, new_stmt_list);
753 /* base + base_offset */
754 addr_base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (data_ref_base), data_ref_base,
757 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
759 /* addr_expr = addr_base */
760 addr_expr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
761 get_name (base_name));
762 add_referenced_var (addr_expr);
763 vec_stmt = fold_convert (vect_ptr_type, addr_base);
764 addr_expr2 = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
765 get_name (base_name));
766 add_referenced_var (addr_expr2);
767 vec_stmt = force_gimple_operand (vec_stmt, &new_stmt, false, addr_expr2);
768 append_to_statement_list_force (new_stmt, new_stmt_list);
770 if (vect_print_dump_info (REPORT_DETAILS))
772 fprintf (vect_dump, "created ");
773 print_generic_expr (vect_dump, vec_stmt, TDF_SLIM);
779 /* Function vect_create_data_ref_ptr.
781 Create a new pointer to vector type (vp), that points to the first location
782 accessed in the loop by STMT, along with the def-use update chain to
783 appropriately advance the pointer through the loop iterations. Also set
784 aliasing information for the pointer. This vector pointer is used by the
785 callers to this function to create a memory reference expression for vector
789 1. STMT: a stmt that references memory. Expected to be of the form
790 GIMPLE_MODIFY_STMT <name, data-ref> or
791 GIMPLE_MODIFY_STMT <data-ref, name>.
792 2. BSI: block_stmt_iterator where new stmts can be added.
793 3. OFFSET (optional): an offset to be added to the initial address accessed
794 by the data-ref in STMT.
795 4. ONLY_INIT: indicate if vp is to be updated in the loop, or remain
796 pointing to the initial address.
797 5. TYPE: if not NULL indicates the required type of the data-ref
800 1. Declare a new ptr to vector_type, and have it point to the base of the
801 data reference (initial addressed accessed by the data reference).
802 For example, for vector of type V8HI, the following code is generated:
805 vp = (v8hi *)initial_address;
807 if OFFSET is not supplied:
808 initial_address = &a[init];
809 if OFFSET is supplied:
810 initial_address = &a[init + OFFSET];
812 Return the initial_address in INITIAL_ADDRESS.
814 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
815 update the pointer in each iteration of the loop.
817 Return the increment stmt that updates the pointer in PTR_INCR.
819 3. Return the pointer. */
822 vect_create_data_ref_ptr (tree stmt,
823 block_stmt_iterator *bsi ATTRIBUTE_UNUSED,
824 tree offset, tree *initial_address, tree *ptr_incr,
825 bool only_init, tree type)
828 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
829 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
830 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
831 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
837 tree new_stmt_list = NULL_TREE;
838 edge pe = loop_preheader_edge (loop);
841 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
843 base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr)));
845 if (vect_print_dump_info (REPORT_DETAILS))
847 tree data_ref_base = base_name;
848 fprintf (vect_dump, "create vector-pointer variable to type: ");
849 print_generic_expr (vect_dump, vectype, TDF_SLIM);
850 if (TREE_CODE (data_ref_base) == VAR_DECL)
851 fprintf (vect_dump, " vectorizing a one dimensional array ref: ");
852 else if (TREE_CODE (data_ref_base) == ARRAY_REF)
853 fprintf (vect_dump, " vectorizing a multidimensional array ref: ");
854 else if (TREE_CODE (data_ref_base) == COMPONENT_REF)
855 fprintf (vect_dump, " vectorizing a record based array ref: ");
856 else if (TREE_CODE (data_ref_base) == SSA_NAME)
857 fprintf (vect_dump, " vectorizing a pointer ref: ");
858 print_generic_expr (vect_dump, base_name, TDF_SLIM);
861 /** (1) Create the new vector-pointer variable: **/
863 vect_ptr_type = build_pointer_type (type);
865 vect_ptr_type = build_pointer_type (vectype);
866 vect_ptr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
867 get_name (base_name));
868 add_referenced_var (vect_ptr);
870 /** (2) Add aliasing information to the new vector-pointer:
871 (The points-to info (DR_PTR_INFO) may be defined later.) **/
873 tag = DR_SYMBOL_TAG (dr);
876 /* If tag is a variable (and NOT_A_TAG) than a new symbol memory
877 tag must be created with tag added to its may alias list. */
879 new_type_alias (vect_ptr, tag, DR_REF (dr));
881 set_symbol_mem_tag (vect_ptr, tag);
883 var_ann (vect_ptr)->subvars = DR_SUBVARS (dr);
885 /** (3) Calculate the initial address the vector-pointer, and set
886 the vector-pointer to point to it before the loop: **/
888 /* Create: (&(base[init_val+offset]) in the loop preheader. */
889 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
891 pe = loop_preheader_edge (loop);
892 new_bb = bsi_insert_on_edge_immediate (pe, new_stmt_list);
893 gcc_assert (!new_bb);
894 *initial_address = new_temp;
896 /* Create: p = (vectype *) initial_base */
897 vec_stmt = fold_convert (vect_ptr_type, new_temp);
898 vec_stmt = build_gimple_modify_stmt (vect_ptr, vec_stmt);
899 vect_ptr_init = make_ssa_name (vect_ptr, vec_stmt);
900 GIMPLE_STMT_OPERAND (vec_stmt, 0) = vect_ptr_init;
901 new_bb = bsi_insert_on_edge_immediate (pe, vec_stmt);
902 gcc_assert (!new_bb);
905 /** (4) Handle the updating of the vector-pointer inside the loop: **/
907 if (only_init) /* No update in loop is required. */
909 /* Copy the points-to information if it exists. */
910 if (DR_PTR_INFO (dr))
911 duplicate_ssa_name_ptr_info (vect_ptr_init, DR_PTR_INFO (dr));
912 return vect_ptr_init;
916 block_stmt_iterator incr_bsi;
918 tree indx_before_incr, indx_after_incr;
921 standard_iv_increment_position (loop, &incr_bsi, &insert_after);
922 create_iv (vect_ptr_init,
923 fold_convert (vect_ptr_type, TYPE_SIZE_UNIT (vectype)),
924 NULL_TREE, loop, &incr_bsi, insert_after,
925 &indx_before_incr, &indx_after_incr);
926 incr = bsi_stmt (incr_bsi);
927 set_stmt_info (stmt_ann (incr),
928 new_stmt_vec_info (incr, loop_vinfo));
930 /* Copy the points-to information if it exists. */
931 if (DR_PTR_INFO (dr))
933 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
934 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
936 merge_alias_info (vect_ptr_init, indx_before_incr);
937 merge_alias_info (vect_ptr_init, indx_after_incr);
941 return indx_before_incr;
946 /* Function bump_vector_ptr
948 Increment a pointer (to a vector type) by vector-size. Connect the new
949 increment stmt to the existing def-use update-chain of the pointer.
951 The pointer def-use update-chain before this function:
952 DATAREF_PTR = phi (p_0, p_2)
954 PTR_INCR: p_2 = DATAREF_PTR + step
956 The pointer def-use update-chain after this function:
957 DATAREF_PTR = phi (p_0, p_2)
959 NEW_DATAREF_PTR = DATAREF_PTR + vector_size
961 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
964 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
966 PTR_INCR - the stmt that updates the pointer in each iteration of the loop.
967 The increment amount across iterations is also expected to be
969 BSI - location where the new update stmt is to be placed.
970 STMT - the original scalar memory-access stmt that is being vectorized.
972 Output: Return NEW_DATAREF_PTR as illustrated above.
977 bump_vector_ptr (tree dataref_ptr, tree ptr_incr, block_stmt_iterator *bsi,
980 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
981 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
982 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
983 tree vptr_type = TREE_TYPE (dataref_ptr);
984 tree ptr_var = SSA_NAME_VAR (dataref_ptr);
985 tree update = TYPE_SIZE_UNIT (vectype);
989 tree new_dataref_ptr;
991 incr_stmt = build_gimple_modify_stmt (ptr_var,
992 build2 (POINTER_PLUS_EXPR, vptr_type,
993 dataref_ptr, update));
994 new_dataref_ptr = make_ssa_name (ptr_var, incr_stmt);
995 GIMPLE_STMT_OPERAND (incr_stmt, 0) = new_dataref_ptr;
996 vect_finish_stmt_generation (stmt, incr_stmt, bsi);
998 /* Update the vector-pointer's cross-iteration increment. */
999 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
1001 tree use = USE_FROM_PTR (use_p);
1003 if (use == dataref_ptr)
1004 SET_USE (use_p, new_dataref_ptr);
1006 gcc_assert (tree_int_cst_compare (use, update) == 0);
1009 /* Copy the points-to information if it exists. */
1010 if (DR_PTR_INFO (dr))
1011 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
1012 merge_alias_info (new_dataref_ptr, dataref_ptr);
1014 return new_dataref_ptr;
1018 /* Function vect_create_destination_var.
1020 Create a new temporary of type VECTYPE. */
1023 vect_create_destination_var (tree scalar_dest, tree vectype)
1026 const char *new_name;
1028 enum vect_var_kind kind;
1030 kind = vectype ? vect_simple_var : vect_scalar_var;
1031 type = vectype ? vectype : TREE_TYPE (scalar_dest);
1033 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
1035 new_name = get_name (scalar_dest);
1038 vec_dest = vect_get_new_vect_var (type, kind, new_name);
1039 add_referenced_var (vec_dest);
1045 /* Function vect_init_vector.
1047 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
1048 the vector elements of VECTOR_VAR. Return the DEF of INIT_STMT. It will be
1049 used in the vectorization of STMT. */
1052 vect_init_vector (tree stmt, tree vector_var, tree vector_type)
1054 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1055 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1056 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1064 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
1065 add_referenced_var (new_var);
1067 init_stmt = build_gimple_modify_stmt (new_var, vector_var);
1068 new_temp = make_ssa_name (new_var, init_stmt);
1069 GIMPLE_STMT_OPERAND (init_stmt, 0) = new_temp;
1071 pe = loop_preheader_edge (loop);
1072 new_bb = bsi_insert_on_edge_immediate (pe, init_stmt);
1073 gcc_assert (!new_bb);
1075 if (vect_print_dump_info (REPORT_DETAILS))
1077 fprintf (vect_dump, "created new init_stmt: ");
1078 print_generic_expr (vect_dump, init_stmt, TDF_SLIM);
1081 vec_oprnd = GIMPLE_STMT_OPERAND (init_stmt, 0);
1086 /* Function get_initial_def_for_induction
1089 IV_PHI - the initial value of the induction variable
1092 Return a vector variable, initialized with the first VF values of
1093 the induction variable. E.g., for an iv with IV_PHI='X' and
1094 evolution S, for a vector of 4 units, we want to return:
1095 [X, X + S, X + 2*S, X + 3*S]. */
1098 get_initial_def_for_induction (tree iv_phi)
1100 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
1101 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1102 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1103 tree scalar_type = TREE_TYPE (PHI_RESULT_TREE (iv_phi));
1104 tree vectype = get_vectype_for_scalar_type (scalar_type);
1105 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1106 edge pe = loop_preheader_edge (loop);
1108 block_stmt_iterator bsi;
1109 tree vec, vec_init, vec_step, t;
1114 tree induction_phi, induc_def, new_stmt, vec_def, vec_dest;
1115 tree init_expr, step_expr;
1116 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1119 int ncopies = vf / nunits;
1121 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
1123 tree stmt = NULL_TREE;
1124 block_stmt_iterator si;
1125 basic_block bb = bb_for_stmt (iv_phi);
1127 gcc_assert (phi_info);
1128 gcc_assert (ncopies >= 1);
1130 /* Find the first insertion point in the BB. */
1131 si = bsi_after_labels (bb);
1132 stmt = bsi_stmt (si);
1134 access_fn = analyze_scalar_evolution (loop, PHI_RESULT (iv_phi));
1135 gcc_assert (access_fn);
1136 ok = vect_is_simple_iv_evolution (loop->num, access_fn,
1137 &init_expr, &step_expr);
1140 /* Create the vector that holds the initial_value of the induction. */
1141 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
1142 add_referenced_var (new_var);
1144 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
1147 new_bb = bsi_insert_on_edge_immediate (pe, stmts);
1148 gcc_assert (!new_bb);
1152 t = tree_cons (NULL_TREE, new_name, t);
1153 for (i = 1; i < nunits; i++)
1157 /* Create: new_name = new_name + step_expr */
1158 tmp = fold_build2 (PLUS_EXPR, scalar_type, new_name, step_expr);
1159 init_stmt = build_gimple_modify_stmt (new_var, tmp);
1160 new_name = make_ssa_name (new_var, init_stmt);
1161 GIMPLE_STMT_OPERAND (init_stmt, 0) = new_name;
1163 new_bb = bsi_insert_on_edge_immediate (pe, init_stmt);
1164 gcc_assert (!new_bb);
1166 if (vect_print_dump_info (REPORT_DETAILS))
1168 fprintf (vect_dump, "created new init_stmt: ");
1169 print_generic_expr (vect_dump, init_stmt, TDF_SLIM);
1171 t = tree_cons (NULL_TREE, new_name, t);
1173 vec = build_constructor_from_list (vectype, nreverse (t));
1174 vec_init = vect_init_vector (stmt, vec, vectype);
1177 /* Create the vector that holds the step of the induction. */
1178 expr = build_int_cst (scalar_type, vf);
1179 new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr);
1181 for (i = 0; i < nunits; i++)
1182 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
1183 vec = build_constructor_from_list (vectype, t);
1184 vec_step = vect_init_vector (stmt, vec, vectype);
1187 /* Create the following def-use cycle:
1189 vec_init = [X, X+S, X+2*S, X+3*S]
1190 vec_step = [VF*S, VF*S, VF*S, VF*S]
1192 vec_iv = PHI <vec_init, vec_loop>
1196 vec_loop = vec_iv + vec_step; */
1198 /* Create the induction-phi that defines the induction-operand. */
1199 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
1200 add_referenced_var (vec_dest);
1201 induction_phi = create_phi_node (vec_dest, loop->header);
1202 set_stmt_info (get_stmt_ann (induction_phi),
1203 new_stmt_vec_info (induction_phi, loop_vinfo));
1204 induc_def = PHI_RESULT (induction_phi);
1206 /* Create the iv update inside the loop */
1207 new_stmt = build_gimple_modify_stmt (NULL_TREE,
1208 build2 (PLUS_EXPR, vectype,
1209 induc_def, vec_step));
1210 vec_def = make_ssa_name (vec_dest, new_stmt);
1211 GIMPLE_STMT_OPERAND (new_stmt, 0) = vec_def;
1212 bsi = bsi_for_stmt (stmt);
1213 vect_finish_stmt_generation (stmt, new_stmt, &bsi);
1215 /* Set the arguments of the phi node: */
1216 add_phi_arg (induction_phi, vec_init, loop_preheader_edge (loop));
1217 add_phi_arg (induction_phi, vec_def, loop_latch_edge (loop));
1220 /* In case the vectorization factor (VF) is bigger than the number
1221 of elements that we can fit in a vectype (nunits), we have to generate
1222 more than one vector stmt - i.e - we need to "unroll" the
1223 vector stmt by a factor VF/nunits. For more details see documentation
1224 in vectorizable_operation. */
1228 stmt_vec_info prev_stmt_vinfo;
1230 /* Create the vector that holds the step of the induction. */
1231 expr = build_int_cst (scalar_type, nunits);
1232 new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr);
1234 for (i = 0; i < nunits; i++)
1235 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
1236 vec = build_constructor_from_list (vectype, t);
1237 vec_step = vect_init_vector (stmt, vec, vectype);
1239 vec_def = induc_def;
1240 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
1241 for (i = 1; i < ncopies; i++)
1245 /* vec_i = vec_prev + vec_{step*nunits} */
1246 tmp = build2 (PLUS_EXPR, vectype, vec_def, vec_step);
1247 new_stmt = build_gimple_modify_stmt (NULL_TREE, tmp);
1248 vec_def = make_ssa_name (vec_dest, new_stmt);
1249 GIMPLE_STMT_OPERAND (new_stmt, 0) = vec_def;
1250 bsi = bsi_for_stmt (stmt);
1251 vect_finish_stmt_generation (stmt, new_stmt, &bsi);
1253 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
1254 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
1258 if (vect_print_dump_info (REPORT_DETAILS))
1260 fprintf (vect_dump, "transform induction: created def-use cycle:");
1261 print_generic_expr (vect_dump, induction_phi, TDF_SLIM);
1262 fprintf (vect_dump, "\n");
1263 print_generic_expr (vect_dump, SSA_NAME_DEF_STMT (vec_def), TDF_SLIM);
1266 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
1271 /* Function vect_get_vec_def_for_operand.
1273 OP is an operand in STMT. This function returns a (vector) def that will be
1274 used in the vectorized stmt for STMT.
1276 In the case that OP is an SSA_NAME which is defined in the loop, then
1277 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1279 In case OP is an invariant or constant, a new stmt that creates a vector def
1280 needs to be introduced. */
1283 vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def)
1288 stmt_vec_info def_stmt_info = NULL;
1289 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1290 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1291 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1292 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1293 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1299 enum vect_def_type dt;
1303 if (vect_print_dump_info (REPORT_DETAILS))
1305 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
1306 print_generic_expr (vect_dump, op, TDF_SLIM);
1309 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt);
1310 gcc_assert (is_simple_use);
1311 if (vect_print_dump_info (REPORT_DETAILS))
1315 fprintf (vect_dump, "def = ");
1316 print_generic_expr (vect_dump, def, TDF_SLIM);
1320 fprintf (vect_dump, " def_stmt = ");
1321 print_generic_expr (vect_dump, def_stmt, TDF_SLIM);
1327 /* Case 1: operand is a constant. */
1328 case vect_constant_def:
1333 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1334 if (vect_print_dump_info (REPORT_DETAILS))
1335 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1337 for (i = nunits - 1; i >= 0; --i)
1339 t = tree_cons (NULL_TREE, op, t);
1341 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1342 vec_cst = build_vector (vector_type, t);
1344 return vect_init_vector (stmt, vec_cst, vector_type);
1347 /* Case 2: operand is defined outside the loop - loop invariant. */
1348 case vect_invariant_def:
1353 /* Create 'vec_inv = {inv,inv,..,inv}' */
1354 if (vect_print_dump_info (REPORT_DETAILS))
1355 fprintf (vect_dump, "Create vector_inv.");
1357 for (i = nunits - 1; i >= 0; --i)
1359 t = tree_cons (NULL_TREE, def, t);
1362 /* FIXME: use build_constructor directly. */
1363 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1364 vec_inv = build_constructor_from_list (vector_type, t);
1366 return vect_init_vector (stmt, vec_inv, vector_type);
1369 /* Case 3: operand is defined inside the loop. */
1373 *scalar_def = def_stmt;
1375 /* Get the def from the vectorized stmt. */
1376 def_stmt_info = vinfo_for_stmt (def_stmt);
1377 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1378 gcc_assert (vec_stmt);
1379 vec_oprnd = GIMPLE_STMT_OPERAND (vec_stmt, 0);
1383 /* Case 4: operand is defined by a loop header phi - reduction */
1384 case vect_reduction_def:
1386 gcc_assert (TREE_CODE (def_stmt) == PHI_NODE);
1388 /* Get the def before the loop */
1389 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1390 return get_initial_def_for_reduction (stmt, op, scalar_def);
1393 /* Case 5: operand is defined by loop-header phi - induction. */
1394 case vect_induction_def:
1396 gcc_assert (TREE_CODE (def_stmt) == PHI_NODE);
1398 /* Get the def before the loop */
1399 return get_initial_def_for_induction (def_stmt);
1408 /* Function vect_get_vec_def_for_stmt_copy
1410 Return a vector-def for an operand. This function is used when the
1411 vectorized stmt to be created (by the caller to this function) is a "copy"
1412 created in case the vectorized result cannot fit in one vector, and several
1413 copies of the vector-stmt are required. In this case the vector-def is
1414 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1415 of the stmt that defines VEC_OPRND.
1416 DT is the type of the vector def VEC_OPRND.
1419 In case the vectorization factor (VF) is bigger than the number
1420 of elements that can fit in a vectype (nunits), we have to generate
1421 more than one vector stmt to vectorize the scalar stmt. This situation
1422 arises when there are multiple data-types operated upon in the loop; the
1423 smallest data-type determines the VF, and as a result, when vectorizing
1424 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1425 vector stmt (each computing a vector of 'nunits' results, and together
1426 computing 'VF' results in each iteration). This function is called when
1427 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1428 which VF=16 and nunits=4, so the number of copies required is 4):
1430 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1432 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1433 VS1.1: vx.1 = memref1 VS1.2
1434 VS1.2: vx.2 = memref2 VS1.3
1435 VS1.3: vx.3 = memref3
1437 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1438 VSnew.1: vz1 = vx.1 + ... VSnew.2
1439 VSnew.2: vz2 = vx.2 + ... VSnew.3
1440 VSnew.3: vz3 = vx.3 + ...
1442 The vectorization of S1 is explained in vectorizable_load.
1443 The vectorization of S2:
1444 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1445 the function 'vect_get_vec_def_for_operand' is called to
1446 get the relevant vector-def for each operand of S2. For operand x it
1447 returns the vector-def 'vx.0'.
1449 To create the remaining copies of the vector-stmt (VSnew.j), this
1450 function is called to get the relevant vector-def for each operand. It is
1451 obtained from the respective VS1.j stmt, which is recorded in the
1452 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1454 For example, to obtain the vector-def 'vx.1' in order to create the
1455 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1456 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1457 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1458 and return its def ('vx.1').
1459 Overall, to create the above sequence this function will be called 3 times:
1460 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1461 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1462 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1465 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1467 tree vec_stmt_for_operand;
1468 stmt_vec_info def_stmt_info;
1470 /* Do nothing; can reuse same def. */
1471 if (dt == vect_invariant_def || dt == vect_constant_def )
1474 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1475 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1476 gcc_assert (def_stmt_info);
1477 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1478 gcc_assert (vec_stmt_for_operand);
1479 vec_oprnd = GIMPLE_STMT_OPERAND (vec_stmt_for_operand, 0);
1485 /* Function vect_finish_stmt_generation.
1487 Insert a new stmt. */
1490 vect_finish_stmt_generation (tree stmt, tree vec_stmt,
1491 block_stmt_iterator *bsi)
1493 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1494 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1496 bsi_insert_before (bsi, vec_stmt, BSI_SAME_STMT);
1497 set_stmt_info (get_stmt_ann (vec_stmt),
1498 new_stmt_vec_info (vec_stmt, loop_vinfo));
1500 if (vect_print_dump_info (REPORT_DETAILS))
1502 fprintf (vect_dump, "add new stmt: ");
1503 print_generic_expr (vect_dump, vec_stmt, TDF_SLIM);
1506 /* Make sure bsi points to the stmt that is being vectorized. */
1507 gcc_assert (stmt == bsi_stmt (*bsi));
1509 #ifdef USE_MAPPED_LOCATION
1510 SET_EXPR_LOCATION (vec_stmt, EXPR_LOCATION (stmt));
1512 SET_EXPR_LOCUS (vec_stmt, EXPR_LOCUS (stmt));
1517 /* Function get_initial_def_for_reduction
1520 STMT - a stmt that performs a reduction operation in the loop.
1521 INIT_VAL - the initial value of the reduction variable
1524 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
1525 of the reduction (used for adjusting the epilog - see below).
1526 Return a vector variable, initialized according to the operation that STMT
1527 performs. This vector will be used as the initial value of the
1528 vector of partial results.
1530 Option1 (adjust in epilog): Initialize the vector as follows:
1533 min/max: [init_val,init_val,..,init_val,init_val]
1534 bit and/or: [init_val,init_val,..,init_val,init_val]
1535 and when necessary (e.g. add/mult case) let the caller know
1536 that it needs to adjust the result by init_val.
1538 Option2: Initialize the vector as follows:
1539 add: [0,0,...,0,init_val]
1540 mult: [1,1,...,1,init_val]
1541 min/max: [init_val,init_val,...,init_val]
1542 bit and/or: [init_val,init_val,...,init_val]
1543 and no adjustments are needed.
1545 For example, for the following code:
1551 STMT is 's = s + a[i]', and the reduction variable is 's'.
1552 For a vector of 4 units, we want to return either [0,0,0,init_val],
1553 or [0,0,0,0] and let the caller know that it needs to adjust
1554 the result at the end by 'init_val'.
1556 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
1557 initialization vector is simpler (same element in all entries).
1558 A cost model should help decide between these two schemes. */
1561 get_initial_def_for_reduction (tree stmt, tree init_val, tree *adjustment_def)
1563 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1564 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1565 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1566 enum tree_code code = TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1));
1567 tree type = TREE_TYPE (init_val);
1575 gcc_assert (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type));
1576 vecdef = vect_get_vec_def_for_operand (init_val, stmt, NULL);
1580 case WIDEN_SUM_EXPR:
1583 *adjustment_def = init_val;
1584 /* Create a vector of zeros for init_def. */
1585 if (INTEGRAL_TYPE_P (type))
1586 def_for_init = build_int_cst (type, 0);
1588 def_for_init = build_real (type, dconst0);
1589 for (i = nunits - 1; i >= 0; --i)
1590 t = tree_cons (NULL_TREE, def_for_init, t);
1591 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def_for_init));
1592 init_def = build_vector (vector_type, t);
1597 *adjustment_def = NULL_TREE;
1609 /* Function vect_create_epilog_for_reduction
1611 Create code at the loop-epilog to finalize the result of a reduction
1614 VECT_DEF is a vector of partial results.
1615 REDUC_CODE is the tree-code for the epilog reduction.
1616 STMT is the scalar reduction stmt that is being vectorized.
1617 REDUCTION_PHI is the phi-node that carries the reduction computation.
1620 1. Creates the reduction def-use cycle: sets the arguments for
1622 The loop-entry argument is the vectorized initial-value of the reduction.
1623 The loop-latch argument is VECT_DEF - the vector of partial sums.
1624 2. "Reduces" the vector of partial results VECT_DEF into a single result,
1625 by applying the operation specified by REDUC_CODE if available, or by
1626 other means (whole-vector shifts or a scalar loop).
1627 The function also creates a new phi node at the loop exit to preserve
1628 loop-closed form, as illustrated below.
1630 The flow at the entry to this function:
1633 vec_def = phi <null, null> # REDUCTION_PHI
1634 VECT_DEF = vector_stmt # vectorized form of STMT
1635 s_loop = scalar_stmt # (scalar) STMT
1637 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
1641 The above is transformed by this function into:
1644 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
1645 VECT_DEF = vector_stmt # vectorized form of STMT
1646 s_loop = scalar_stmt # (scalar) STMT
1648 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
1649 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
1650 v_out2 = reduce <v_out1>
1651 s_out3 = extract_field <v_out2, 0>
1652 s_out4 = adjust_result <s_out3>
1658 vect_create_epilog_for_reduction (tree vect_def, tree stmt,
1659 enum tree_code reduc_code, tree reduction_phi)
1661 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1663 enum machine_mode mode;
1664 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1665 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1666 basic_block exit_bb;
1670 block_stmt_iterator exit_bsi;
1675 tree new_scalar_dest, exit_phi;
1676 tree bitsize, bitpos, bytesize;
1677 enum tree_code code = TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1));
1678 tree scalar_initial_def;
1679 tree vec_initial_def;
1681 imm_use_iterator imm_iter;
1682 use_operand_p use_p;
1683 bool extract_scalar_result;
1687 tree operation = GIMPLE_STMT_OPERAND (stmt, 1);
1690 op_type = TREE_OPERAND_LENGTH (operation);
1691 reduction_op = TREE_OPERAND (operation, op_type-1);
1692 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
1693 mode = TYPE_MODE (vectype);
1695 /*** 1. Create the reduction def-use cycle ***/
1697 /* 1.1 set the loop-entry arg of the reduction-phi: */
1698 /* For the case of reduction, vect_get_vec_def_for_operand returns
1699 the scalar def before the loop, that defines the initial value
1700 of the reduction variable. */
1701 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
1702 &scalar_initial_def);
1703 add_phi_arg (reduction_phi, vec_initial_def, loop_preheader_edge (loop));
1705 /* 1.2 set the loop-latch arg for the reduction-phi: */
1706 add_phi_arg (reduction_phi, vect_def, loop_latch_edge (loop));
1708 if (vect_print_dump_info (REPORT_DETAILS))
1710 fprintf (vect_dump, "transform reduction: created def-use cycle:");
1711 print_generic_expr (vect_dump, reduction_phi, TDF_SLIM);
1712 fprintf (vect_dump, "\n");
1713 print_generic_expr (vect_dump, SSA_NAME_DEF_STMT (vect_def), TDF_SLIM);
1717 /*** 2. Create epilog code
1718 The reduction epilog code operates across the elements of the vector
1719 of partial results computed by the vectorized loop.
1720 The reduction epilog code consists of:
1721 step 1: compute the scalar result in a vector (v_out2)
1722 step 2: extract the scalar result (s_out3) from the vector (v_out2)
1723 step 3: adjust the scalar result (s_out3) if needed.
1725 Step 1 can be accomplished using one the following three schemes:
1726 (scheme 1) using reduc_code, if available.
1727 (scheme 2) using whole-vector shifts, if available.
1728 (scheme 3) using a scalar loop. In this case steps 1+2 above are
1731 The overall epilog code looks like this:
1733 s_out0 = phi <s_loop> # original EXIT_PHI
1734 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
1735 v_out2 = reduce <v_out1> # step 1
1736 s_out3 = extract_field <v_out2, 0> # step 2
1737 s_out4 = adjust_result <s_out3> # step 3
1739 (step 3 is optional, and step2 1 and 2 may be combined).
1740 Lastly, the uses of s_out0 are replaced by s_out4.
1744 /* 2.1 Create new loop-exit-phi to preserve loop-closed form:
1745 v_out1 = phi <v_loop> */
1747 exit_bb = single_exit (loop)->dest;
1748 new_phi = create_phi_node (SSA_NAME_VAR (vect_def), exit_bb);
1749 SET_PHI_ARG_DEF (new_phi, single_exit (loop)->dest_idx, vect_def);
1750 exit_bsi = bsi_after_labels (exit_bb);
1752 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
1753 (i.e. when reduc_code is not available) and in the final adjustment
1754 code (if needed). Also get the original scalar reduction variable as
1755 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
1756 represents a reduction pattern), the tree-code and scalar-def are
1757 taken from the original stmt that the pattern-stmt (STMT) replaces.
1758 Otherwise (it is a regular reduction) - the tree-code and scalar-def
1759 are taken from STMT. */
1761 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
1764 /* Regular reduction */
1769 /* Reduction pattern */
1770 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
1771 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
1772 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
1774 code = TREE_CODE (GIMPLE_STMT_OPERAND (orig_stmt, 1));
1775 scalar_dest = GIMPLE_STMT_OPERAND (orig_stmt, 0);
1776 scalar_type = TREE_TYPE (scalar_dest);
1777 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
1778 bitsize = TYPE_SIZE (scalar_type);
1779 bytesize = TYPE_SIZE_UNIT (scalar_type);
1781 /* 2.3 Create the reduction code, using one of the three schemes described
1784 if (reduc_code < NUM_TREE_CODES)
1788 /*** Case 1: Create:
1789 v_out2 = reduc_expr <v_out1> */
1791 if (vect_print_dump_info (REPORT_DETAILS))
1792 fprintf (vect_dump, "Reduce using direct vector reduction.");
1794 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1795 tmp = build1 (reduc_code, vectype, PHI_RESULT (new_phi));
1796 epilog_stmt = build_gimple_modify_stmt (vec_dest, tmp);
1797 new_temp = make_ssa_name (vec_dest, epilog_stmt);
1798 GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
1799 bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
1801 extract_scalar_result = true;
1805 enum tree_code shift_code = 0;
1806 bool have_whole_vector_shift = true;
1808 int element_bitsize = tree_low_cst (bitsize, 1);
1809 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
1812 if (vec_shr_optab->handlers[mode].insn_code != CODE_FOR_nothing)
1813 shift_code = VEC_RSHIFT_EXPR;
1815 have_whole_vector_shift = false;
1817 /* Regardless of whether we have a whole vector shift, if we're
1818 emulating the operation via tree-vect-generic, we don't want
1819 to use it. Only the first round of the reduction is likely
1820 to still be profitable via emulation. */
1821 /* ??? It might be better to emit a reduction tree code here, so that
1822 tree-vect-generic can expand the first round via bit tricks. */
1823 if (!VECTOR_MODE_P (mode))
1824 have_whole_vector_shift = false;
1827 optab optab = optab_for_tree_code (code, vectype);
1828 if (optab->handlers[mode].insn_code == CODE_FOR_nothing)
1829 have_whole_vector_shift = false;
1832 if (have_whole_vector_shift)
1834 /*** Case 2: Create:
1835 for (offset = VS/2; offset >= element_size; offset/=2)
1837 Create: va' = vec_shift <va, offset>
1838 Create: va = vop <va, va'>
1841 if (vect_print_dump_info (REPORT_DETAILS))
1842 fprintf (vect_dump, "Reduce using vector shifts");
1844 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1845 new_temp = PHI_RESULT (new_phi);
1847 for (bit_offset = vec_size_in_bits/2;
1848 bit_offset >= element_bitsize;
1851 tree bitpos = size_int (bit_offset);
1852 tree tmp = build2 (shift_code, vectype, new_temp, bitpos);
1853 epilog_stmt = build_gimple_modify_stmt (vec_dest, tmp);
1854 new_name = make_ssa_name (vec_dest, epilog_stmt);
1855 GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_name;
1856 bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
1858 tmp = build2 (code, vectype, new_name, new_temp);
1859 epilog_stmt = build_gimple_modify_stmt (vec_dest, tmp);
1860 new_temp = make_ssa_name (vec_dest, epilog_stmt);
1861 GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
1862 bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
1865 extract_scalar_result = true;
1871 /*** Case 3: Create:
1872 s = extract_field <v_out2, 0>
1873 for (offset = element_size;
1874 offset < vector_size;
1875 offset += element_size;)
1877 Create: s' = extract_field <v_out2, offset>
1878 Create: s = op <s, s'>
1881 if (vect_print_dump_info (REPORT_DETAILS))
1882 fprintf (vect_dump, "Reduce using scalar code. ");
1884 vec_temp = PHI_RESULT (new_phi);
1885 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
1886 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
1888 BIT_FIELD_REF_UNSIGNED (rhs) = TYPE_UNSIGNED (scalar_type);
1889 epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, rhs);
1890 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
1891 GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
1892 bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
1894 for (bit_offset = element_bitsize;
1895 bit_offset < vec_size_in_bits;
1896 bit_offset += element_bitsize)
1899 tree bitpos = bitsize_int (bit_offset);
1900 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
1903 BIT_FIELD_REF_UNSIGNED (rhs) = TYPE_UNSIGNED (scalar_type);
1904 epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, rhs);
1905 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
1906 GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_name;
1907 bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
1909 tmp = build2 (code, scalar_type, new_name, new_temp);
1910 epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, tmp);
1911 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
1912 GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
1913 bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
1916 extract_scalar_result = false;
1920 /* 2.4 Extract the final scalar result. Create:
1921 s_out3 = extract_field <v_out2, bitpos> */
1923 if (extract_scalar_result)
1927 if (vect_print_dump_info (REPORT_DETAILS))
1928 fprintf (vect_dump, "extract scalar result");
1930 if (BYTES_BIG_ENDIAN)
1931 bitpos = size_binop (MULT_EXPR,
1932 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
1933 TYPE_SIZE (scalar_type));
1935 bitpos = bitsize_zero_node;
1937 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
1938 BIT_FIELD_REF_UNSIGNED (rhs) = TYPE_UNSIGNED (scalar_type);
1939 epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, rhs);
1940 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
1941 GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
1942 bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
1945 /* 2.4 Adjust the final result by the initial value of the reduction
1946 variable. (When such adjustment is not needed, then
1947 'scalar_initial_def' is zero).
1950 s_out4 = scalar_expr <s_out3, scalar_initial_def> */
1952 if (scalar_initial_def)
1954 tree tmp = build2 (code, scalar_type, new_temp, scalar_initial_def);
1955 epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, tmp);
1956 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
1957 GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
1958 bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
1961 /* 2.6 Replace uses of s_out0 with uses of s_out3 */
1963 /* Find the loop-closed-use at the loop exit of the original scalar result.
1964 (The reduction result is expected to have two immediate uses - one at the
1965 latch block, and one at the loop exit). */
1967 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
1969 if (!flow_bb_inside_loop_p (loop, bb_for_stmt (USE_STMT (use_p))))
1971 exit_phi = USE_STMT (use_p);
1975 /* We expect to have found an exit_phi because of loop-closed-ssa form. */
1976 gcc_assert (exit_phi);
1977 /* Replace the uses: */
1978 orig_name = PHI_RESULT (exit_phi);
1979 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
1980 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1981 SET_USE (use_p, new_temp);
1985 /* Function vectorizable_reduction.
1987 Check if STMT performs a reduction operation that can be vectorized.
1988 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1989 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1990 Return FALSE if not a vectorizable STMT, TRUE otherwise.
1992 This function also handles reduction idioms (patterns) that have been
1993 recognized in advance during vect_pattern_recog. In this case, STMT may be
1995 X = pattern_expr (arg0, arg1, ..., X)
1996 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
1997 sequence that had been detected and replaced by the pattern-stmt (STMT).
1999 In some cases of reduction patterns, the type of the reduction variable X is
2000 different than the type of the other arguments of STMT.
2001 In such cases, the vectype that is used when transforming STMT into a vector
2002 stmt is different than the vectype that is used to determine the
2003 vectorization factor, because it consists of a different number of elements
2004 than the actual number of elements that are being operated upon in parallel.
2006 For example, consider an accumulation of shorts into an int accumulator.
2007 On some targets it's possible to vectorize this pattern operating on 8
2008 shorts at a time (hence, the vectype for purposes of determining the
2009 vectorization factor should be V8HI); on the other hand, the vectype that
2010 is used to create the vector form is actually V4SI (the type of the result).
2012 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
2013 indicates what is the actual level of parallelism (V8HI in the example), so
2014 that the right vectorization factor would be derived. This vectype
2015 corresponds to the type of arguments to the reduction stmt, and should *NOT*
2016 be used to create the vectorized stmt. The right vectype for the vectorized
2017 stmt is obtained from the type of the result X:
2018 get_vectype_for_scalar_type (TREE_TYPE (X))
2020 This means that, contrary to "regular" reductions (or "regular" stmts in
2021 general), the following equation:
2022 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
2023 does *NOT* necessarily hold for reduction patterns. */
2026 vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
2031 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
2032 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2033 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2034 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2035 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2037 enum tree_code code, orig_code, epilog_reduc_code = 0;
2038 enum machine_mode vec_mode;
2040 optab optab, reduc_optab;
2041 tree new_temp = NULL_TREE;
2043 enum vect_def_type dt;
2048 stmt_vec_info orig_stmt_info;
2049 tree expr = NULL_TREE;
2051 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2052 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2053 stmt_vec_info prev_stmt_info;
2055 tree new_stmt = NULL_TREE;
2058 gcc_assert (ncopies >= 1);
2060 /* 1. Is vectorizable reduction? */
2062 /* Not supportable if the reduction variable is used in the loop. */
2063 if (STMT_VINFO_RELEVANT_P (stmt_info))
2066 if (!STMT_VINFO_LIVE_P (stmt_info))
2069 /* Make sure it was already recognized as a reduction computation. */
2070 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def)
2073 /* 2. Has this been recognized as a reduction pattern?
2075 Check if STMT represents a pattern that has been recognized
2076 in earlier analysis stages. For stmts that represent a pattern,
2077 the STMT_VINFO_RELATED_STMT field records the last stmt in
2078 the original sequence that constitutes the pattern. */
2080 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2083 orig_stmt_info = vinfo_for_stmt (orig_stmt);
2084 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
2085 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
2086 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
2089 /* 3. Check the operands of the operation. The first operands are defined
2090 inside the loop body. The last operand is the reduction variable,
2091 which is defined by the loop-header-phi. */
2093 gcc_assert (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT);
2095 operation = GIMPLE_STMT_OPERAND (stmt, 1);
2096 code = TREE_CODE (operation);
2097 op_type = TREE_OPERAND_LENGTH (operation);
2098 if (op_type != binary_op && op_type != ternary_op)
2100 scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
2101 scalar_type = TREE_TYPE (scalar_dest);
2103 /* All uses but the last are expected to be defined in the loop.
2104 The last use is the reduction variable. */
2105 for (i = 0; i < op_type-1; i++)
2107 op = TREE_OPERAND (operation, i);
2108 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt);
2109 gcc_assert (is_simple_use);
2110 if (dt != vect_loop_def
2111 && dt != vect_invariant_def
2112 && dt != vect_constant_def
2113 && dt != vect_induction_def)
2117 op = TREE_OPERAND (operation, i);
2118 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt);
2119 gcc_assert (is_simple_use);
2120 gcc_assert (dt == vect_reduction_def);
2121 gcc_assert (TREE_CODE (def_stmt) == PHI_NODE);
2123 gcc_assert (orig_stmt == vect_is_simple_reduction (loop, def_stmt));
2125 gcc_assert (stmt == vect_is_simple_reduction (loop, def_stmt));
2127 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
2130 /* 4. Supportable by target? */
2132 /* 4.1. check support for the operation in the loop */
2133 optab = optab_for_tree_code (code, vectype);
2136 if (vect_print_dump_info (REPORT_DETAILS))
2137 fprintf (vect_dump, "no optab.");
2140 vec_mode = TYPE_MODE (vectype);
2141 if (optab->handlers[(int) vec_mode].insn_code == CODE_FOR_nothing)
2143 if (vect_print_dump_info (REPORT_DETAILS))
2144 fprintf (vect_dump, "op not supported by target.");
2145 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2146 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2147 < vect_min_worthwhile_factor (code))
2149 if (vect_print_dump_info (REPORT_DETAILS))
2150 fprintf (vect_dump, "proceeding using word mode.");
2153 /* Worthwhile without SIMD support? */
2154 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2155 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2156 < vect_min_worthwhile_factor (code))
2158 if (vect_print_dump_info (REPORT_DETAILS))
2159 fprintf (vect_dump, "not worthwhile without SIMD support.");
2163 /* 4.2. Check support for the epilog operation.
2165 If STMT represents a reduction pattern, then the type of the
2166 reduction variable may be different than the type of the rest
2167 of the arguments. For example, consider the case of accumulation
2168 of shorts into an int accumulator; The original code:
2169 S1: int_a = (int) short_a;
2170 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
2173 STMT: int_acc = widen_sum <short_a, int_acc>
2176 1. The tree-code that is used to create the vector operation in the
2177 epilog code (that reduces the partial results) is not the
2178 tree-code of STMT, but is rather the tree-code of the original
2179 stmt from the pattern that STMT is replacing. I.e, in the example
2180 above we want to use 'widen_sum' in the loop, but 'plus' in the
2182 2. The type (mode) we use to check available target support
2183 for the vector operation to be created in the *epilog*, is
2184 determined by the type of the reduction variable (in the example
2185 above we'd check this: plus_optab[vect_int_mode]).
2186 However the type (mode) we use to check available target support
2187 for the vector operation to be created *inside the loop*, is
2188 determined by the type of the other arguments to STMT (in the
2189 example we'd check this: widen_sum_optab[vect_short_mode]).
2191 This is contrary to "regular" reductions, in which the types of all
2192 the arguments are the same as the type of the reduction variable.
2193 For "regular" reductions we can therefore use the same vector type
2194 (and also the same tree-code) when generating the epilog code and
2195 when generating the code inside the loop. */
2199 /* This is a reduction pattern: get the vectype from the type of the
2200 reduction variable, and get the tree-code from orig_stmt. */
2201 orig_code = TREE_CODE (GIMPLE_STMT_OPERAND (orig_stmt, 1));
2202 vectype = get_vectype_for_scalar_type (TREE_TYPE (def));
2203 vec_mode = TYPE_MODE (vectype);
2207 /* Regular reduction: use the same vectype and tree-code as used for
2208 the vector code inside the loop can be used for the epilog code. */
2212 if (!reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
2214 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype);
2217 if (vect_print_dump_info (REPORT_DETAILS))
2218 fprintf (vect_dump, "no optab for reduction.");
2219 epilog_reduc_code = NUM_TREE_CODES;
2221 if (reduc_optab->handlers[(int) vec_mode].insn_code == CODE_FOR_nothing)
2223 if (vect_print_dump_info (REPORT_DETAILS))
2224 fprintf (vect_dump, "reduc op not supported by target.");
2225 epilog_reduc_code = NUM_TREE_CODES;
2228 if (!vec_stmt) /* transformation not required. */
2230 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
2231 vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies);
2237 if (vect_print_dump_info (REPORT_DETAILS))
2238 fprintf (vect_dump, "transform reduction.");
2240 /* Create the destination vector */
2241 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2243 /* Create the reduction-phi that defines the reduction-operand. */
2244 new_phi = create_phi_node (vec_dest, loop->header);
2246 /* In case the vectorization factor (VF) is bigger than the number
2247 of elements that we can fit in a vectype (nunits), we have to generate
2248 more than one vector stmt - i.e - we need to "unroll" the
2249 vector stmt by a factor VF/nunits. For more details see documentation
2250 in vectorizable_operation. */
2252 prev_stmt_info = NULL;
2253 for (j = 0; j < ncopies; j++)
2258 op = TREE_OPERAND (operation, 0);
2259 loop_vec_def0 = vect_get_vec_def_for_operand (op, stmt, NULL);
2260 if (op_type == ternary_op)
2262 op = TREE_OPERAND (operation, 1);
2263 loop_vec_def1 = vect_get_vec_def_for_operand (op, stmt, NULL);
2266 /* Get the vector def for the reduction variable from the phi node */
2267 reduc_def = PHI_RESULT (new_phi);
2271 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
2272 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
2273 if (op_type == ternary_op)
2274 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def1);
2276 /* Get the vector def for the reduction variable from the vectorized
2277 reduction operation generated in the previous iteration (j-1) */
2278 reduc_def = GIMPLE_STMT_OPERAND (new_stmt ,0);
2281 /* Arguments are ready. create the new vector stmt. */
2282 if (op_type == binary_op)
2283 expr = build2 (code, vectype, loop_vec_def0, reduc_def);
2285 expr = build3 (code, vectype, loop_vec_def0, loop_vec_def1,
2287 new_stmt = build_gimple_modify_stmt (vec_dest, expr);
2288 new_temp = make_ssa_name (vec_dest, new_stmt);
2289 GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
2290 vect_finish_stmt_generation (stmt, new_stmt, bsi);
2293 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2295 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2296 prev_stmt_info = vinfo_for_stmt (new_stmt);
2299 /* Finalize the reduction-phi (set it's arguments) and create the
2300 epilog reduction code. */
2301 vect_create_epilog_for_reduction (new_temp, stmt, epilog_reduc_code, new_phi);
2305 /* Checks if CALL can be vectorized in type VECTYPE. Returns
2306 a function declaration if the target has a vectorized version
2307 of the function, or NULL_TREE if the function cannot be vectorized. */
2310 vectorizable_function (tree call, tree vectype_out, tree vectype_in)
2312 tree fndecl = get_callee_fndecl (call);
2313 enum built_in_function code;
2315 /* We only handle functions that do not read or clobber memory -- i.e.
2316 const or novops ones. */
2317 if (!(call_expr_flags (call) & (ECF_CONST | ECF_NOVOPS)))
2321 || TREE_CODE (fndecl) != FUNCTION_DECL
2322 || !DECL_BUILT_IN (fndecl))
2325 code = DECL_FUNCTION_CODE (fndecl);
2326 return targetm.vectorize.builtin_vectorized_function (code, vectype_out,
2330 /* Function vectorizable_call.
2332 Check if STMT performs a function call that can be vectorized.
2333 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2334 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2335 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2338 vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
2344 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2345 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2346 tree vectype_out, vectype_in;
2349 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2350 tree fndecl, rhs, new_temp, def, def_stmt, rhs_type, lhs_type;
2351 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2353 int ncopies, j, nargs;
2354 call_expr_arg_iterator iter;
2356 enum { NARROW, NONE, WIDEN } modifier;
2358 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2361 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
2364 /* FORNOW: not yet supported. */
2365 if (STMT_VINFO_LIVE_P (stmt_info))
2367 if (vect_print_dump_info (REPORT_DETAILS))
2368 fprintf (vect_dump, "value used after loop.");
2372 /* Is STMT a vectorizable call? */
2373 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
2376 if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME)
2379 operation = GIMPLE_STMT_OPERAND (stmt, 1);
2380 if (TREE_CODE (operation) != CALL_EXPR)
2383 /* Process function arguments. */
2384 rhs_type = NULL_TREE;
2386 FOR_EACH_CALL_EXPR_ARG (op, iter, operation)
2388 /* Bail out if the function has more than two arguments, we
2389 do not have interesting builtin functions to vectorize with
2390 more than two arguments. */
2394 /* We can only handle calls with arguments of the same type. */
2396 && rhs_type != TREE_TYPE (op))
2398 if (vect_print_dump_info (REPORT_DETAILS))
2399 fprintf (vect_dump, "argument types differ.");
2402 rhs_type = TREE_TYPE (op);
2404 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt[nargs]))
2406 if (vect_print_dump_info (REPORT_DETAILS))
2407 fprintf (vect_dump, "use not simple.");
2414 /* No arguments is also not good. */
2418 vectype_in = get_vectype_for_scalar_type (rhs_type);
2419 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2421 lhs_type = TREE_TYPE (GIMPLE_STMT_OPERAND (stmt, 0));
2422 vectype_out = get_vectype_for_scalar_type (lhs_type);
2423 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2426 if (nunits_in == nunits_out / 2)
2428 else if (nunits_out == nunits_in)
2430 else if (nunits_out == nunits_in / 2)
2435 /* For now, we only vectorize functions if a target specific builtin
2436 is available. TODO -- in some cases, it might be profitable to
2437 insert the calls for pieces of the vector, in order to be able
2438 to vectorize other operations in the loop. */
2439 fndecl = vectorizable_function (operation, vectype_out, vectype_in);
2440 if (fndecl == NULL_TREE)
2442 if (vect_print_dump_info (REPORT_DETAILS))
2443 fprintf (vect_dump, "function is not vectorizable.");
2448 gcc_assert (ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS));
2450 if (modifier == NARROW)
2451 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2453 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2455 /* Sanity check: make sure that at least one copy of the vectorized stmt
2456 needs to be generated. */
2457 gcc_assert (ncopies >= 1);
2459 if (!vec_stmt) /* transformation not required. */
2461 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2462 if (vect_print_dump_info (REPORT_DETAILS))
2463 fprintf (vect_dump, "=== vectorizable_call ===");
2464 vect_model_simple_cost (stmt_info, ncopies, dt);
2470 if (vect_print_dump_info (REPORT_DETAILS))
2471 fprintf (vect_dump, "transform operation.");
2474 scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
2475 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2477 prev_stmt_info = NULL;
2481 for (j = 0; j < ncopies; ++j)
2483 /* Build argument list for the vectorized call. */
2484 /* FIXME: Rewrite this so that it doesn't
2485 construct a temporary list. */
2488 FOR_EACH_CALL_EXPR_ARG (op, iter, operation)
2492 = vect_get_vec_def_for_operand (op, stmt, NULL);
2495 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
2497 vargs = tree_cons (NULL_TREE, vec_oprnd0, vargs);
2501 vargs = nreverse (vargs);
2503 rhs = build_function_call_expr (fndecl, vargs);
2504 new_stmt = build_gimple_modify_stmt (vec_dest, rhs);
2505 new_temp = make_ssa_name (vec_dest, new_stmt);
2506 GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
2508 vect_finish_stmt_generation (stmt, new_stmt, bsi);
2511 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2513 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2515 prev_stmt_info = vinfo_for_stmt (new_stmt);
2521 for (j = 0; j < ncopies; ++j)
2523 /* Build argument list for the vectorized call. */
2524 /* FIXME: Rewrite this so that it doesn't
2525 construct a temporary list. */
2528 FOR_EACH_CALL_EXPR_ARG (op, iter, operation)
2533 = vect_get_vec_def_for_operand (op, stmt, NULL);
2535 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
2540 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd1);
2542 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
2545 vargs = tree_cons (NULL_TREE, vec_oprnd0, vargs);
2546 vargs = tree_cons (NULL_TREE, vec_oprnd1, vargs);
2550 vargs = nreverse (vargs);
2552 rhs = build_function_call_expr (fndecl, vargs);
2553 new_stmt = build_gimple_modify_stmt (vec_dest, rhs);
2554 new_temp = make_ssa_name (vec_dest, new_stmt);
2555 GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
2557 vect_finish_stmt_generation (stmt, new_stmt, bsi);
2560 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2562 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2564 prev_stmt_info = vinfo_for_stmt (new_stmt);
2567 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2572 /* No current target implements this case. */
2576 /* The call in STMT might prevent it from being removed in dce.
2577 We however cannot remove it here, due to the way the ssa name
2578 it defines is mapped to the new definition. So just replace
2579 rhs of the statement with something harmless. */
2580 type = TREE_TYPE (scalar_dest);
2581 GIMPLE_STMT_OPERAND (stmt, 1) = fold_convert (type, integer_zero_node);
2588 /* Function vect_gen_widened_results_half
2590 Create a vector stmt whose code, type, number of arguments, and result
2591 variable are CODE, VECTYPE, OP_TYPE, and VEC_DEST, and its arguments are
2592 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
2593 In the case that CODE is a CALL_EXPR, this means that a call to DECL
2594 needs to be created (DECL is a function-decl of a target-builtin).
2595 STMT is the original scalar stmt that we are vectorizing. */
2598 vect_gen_widened_results_half (enum tree_code code, tree vectype, tree decl,
2599 tree vec_oprnd0, tree vec_oprnd1, int op_type,
2600 tree vec_dest, block_stmt_iterator *bsi,
2609 /* Generate half of the widened result: */
2610 if (code == CALL_EXPR)
2612 /* Target specific support */
2613 if (op_type == binary_op)
2614 expr = build_call_expr (decl, 2, vec_oprnd0, vec_oprnd1);
2616 expr = build_call_expr (decl, 1, vec_oprnd0);
2620 /* Generic support */
2621 gcc_assert (op_type == TREE_CODE_LENGTH (code));
2622 if (op_type == binary_op)
2623 expr = build2 (code, vectype, vec_oprnd0, vec_oprnd1);
2625 expr = build1 (code, vectype, vec_oprnd0);
2627 new_stmt = build_gimple_modify_stmt (vec_dest, expr);
2628 new_temp = make_ssa_name (vec_dest, new_stmt);
2629 GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
2630 vect_finish_stmt_generation (stmt, new_stmt, bsi);
2632 if (code == CALL_EXPR)
2634 FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter, SSA_OP_ALL_VIRTUALS)
2636 if (TREE_CODE (sym) == SSA_NAME)
2637 sym = SSA_NAME_VAR (sym);
2638 mark_sym_for_renaming (sym);
2646 /* Function vectorizable_conversion.
2648 Check if STMT performs a conversion operation, that can be vectorized.
2649 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2650 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2651 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2654 vectorizable_conversion (tree stmt, block_stmt_iterator * bsi,
2661 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2662 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2663 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2664 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2665 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
2668 enum vect_def_type dt0;
2670 stmt_vec_info prev_stmt_info;
2673 tree vectype_out, vectype_in;
2676 tree rhs_type, lhs_type;
2678 enum { NARROW, NONE, WIDEN } modifier;
2680 /* Is STMT a vectorizable conversion? */
2682 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2685 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
2688 if (STMT_VINFO_LIVE_P (stmt_info))
2690 /* FORNOW: not yet supported. */
2691 if (vect_print_dump_info (REPORT_DETAILS))
2692 fprintf (vect_dump, "value used after loop.");
2696 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
2699 if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME)
2702 operation = GIMPLE_STMT_OPERAND (stmt, 1);
2703 code = TREE_CODE (operation);
2704 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
2707 /* Check types of lhs and rhs */
2708 op0 = TREE_OPERAND (operation, 0);
2709 rhs_type = TREE_TYPE (op0);
2710 vectype_in = get_vectype_for_scalar_type (rhs_type);
2711 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2713 scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
2714 lhs_type = TREE_TYPE (scalar_dest);
2715 vectype_out = get_vectype_for_scalar_type (lhs_type);
2716 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2719 if (nunits_in == nunits_out / 2)
2721 else if (nunits_out == nunits_in)
2723 else if (nunits_out == nunits_in / 2)
2728 if (modifier == NONE)
2729 gcc_assert (STMT_VINFO_VECTYPE (stmt_info) == vectype_out);
2731 /* Bail out if the types are both integral or non-integral */
2732 if ((INTEGRAL_TYPE_P (rhs_type) && INTEGRAL_TYPE_P (lhs_type))
2733 || (!INTEGRAL_TYPE_P (rhs_type) && !INTEGRAL_TYPE_P (lhs_type)))
2736 if (modifier == NARROW)
2737 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2739 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2741 /* Sanity check: make sure that at least one copy of the vectorized stmt
2742 needs to be generated. */
2743 gcc_assert (ncopies >= 1);
2745 /* Check the operands of the operation. */
2746 if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt0))
2748 if (vect_print_dump_info (REPORT_DETAILS))
2749 fprintf (vect_dump, "use not simple.");
2753 /* Supportable by target? */
2754 if ((modifier == NONE
2755 && !targetm.vectorize.builtin_conversion (code, vectype_in))
2756 || (modifier == WIDEN
2757 && !supportable_widening_operation (code, stmt, vectype_in,
2760 || (modifier == NARROW
2761 && !supportable_narrowing_operation (code, stmt, vectype_in,
2764 if (vect_print_dump_info (REPORT_DETAILS))
2765 fprintf (vect_dump, "op not supported by target.");
2769 if (modifier != NONE)
2770 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2772 if (!vec_stmt) /* transformation not required. */
2774 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
2779 if (vect_print_dump_info (REPORT_DETAILS))
2780 fprintf (vect_dump, "transform conversion.");
2783 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2785 prev_stmt_info = NULL;
2789 for (j = 0; j < ncopies; j++)
2795 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2797 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
2800 targetm.vectorize.builtin_conversion (code, vectype_in);
2801 new_stmt = build_call_expr (builtin_decl, 1, vec_oprnd0);
2803 /* Arguments are ready. create the new vector stmt. */
2804 new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt);
2805 new_temp = make_ssa_name (vec_dest, new_stmt);
2806 GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
2807 vect_finish_stmt_generation (stmt, new_stmt, bsi);
2808 FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter, SSA_OP_ALL_VIRTUALS)
2810 if (TREE_CODE (sym) == SSA_NAME)
2811 sym = SSA_NAME_VAR (sym);
2812 mark_sym_for_renaming (sym);
2816 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2818 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2819 prev_stmt_info = vinfo_for_stmt (new_stmt);
2824 /* In case the vectorization factor (VF) is bigger than the number
2825 of elements that we can fit in a vectype (nunits), we have to
2826 generate more than one vector stmt - i.e - we need to "unroll"
2827 the vector stmt by a factor VF/nunits. */
2828 for (j = 0; j < ncopies; j++)
2831 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2833 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
2835 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2837 /* Generate first half of the widened result: */
2839 = vect_gen_widened_results_half (code1, vectype_out, decl1,
2840 vec_oprnd0, vec_oprnd1,
2841 unary_op, vec_dest, bsi, stmt);
2843 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2845 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2846 prev_stmt_info = vinfo_for_stmt (new_stmt);
2848 /* Generate second half of the widened result: */
2850 = vect_gen_widened_results_half (code2, vectype_out, decl2,
2851 vec_oprnd0, vec_oprnd1,
2852 unary_op, vec_dest, bsi, stmt);
2853 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2854 prev_stmt_info = vinfo_for_stmt (new_stmt);
2859 /* In case the vectorization factor (VF) is bigger than the number
2860 of elements that we can fit in a vectype (nunits), we have to
2861 generate more than one vector stmt - i.e - we need to "unroll"
2862 the vector stmt by a factor VF/nunits. */
2863 for (j = 0; j < ncopies; j++)
2868 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2869 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
2873 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd1);
2874 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
2877 /* Arguments are ready. Create the new vector stmt. */
2878 expr = build2 (code1, vectype_out, vec_oprnd0, vec_oprnd1);
2879 new_stmt = build_gimple_modify_stmt (vec_dest, expr);
2880 new_temp = make_ssa_name (vec_dest, new_stmt);
2881 GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
2882 vect_finish_stmt_generation (stmt, new_stmt, bsi);
2885 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2887 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2889 prev_stmt_info = vinfo_for_stmt (new_stmt);
2892 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2898 /* Function vectorizable_assignment.
2900 Check if STMT performs an assignment (copy) that can be vectorized.
2901 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2902 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2903 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2906 vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
2912 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2913 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2914 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2917 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2918 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2919 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2921 gcc_assert (ncopies >= 1);
2923 return false; /* FORNOW */
2925 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2928 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
2931 /* FORNOW: not yet supported. */
2932 if (STMT_VINFO_LIVE_P (stmt_info))
2934 if (vect_print_dump_info (REPORT_DETAILS))
2935 fprintf (vect_dump, "value used after loop.");
2939 /* Is vectorizable assignment? */
2940 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
2943 scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
2944 if (TREE_CODE (scalar_dest) != SSA_NAME)
2947 op = GIMPLE_STMT_OPERAND (stmt, 1);
2948 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt[0]))
2950 if (vect_print_dump_info (REPORT_DETAILS))
2951 fprintf (vect_dump, "use not simple.");
2955 if (!vec_stmt) /* transformation not required. */
2957 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
2958 if (vect_print_dump_info (REPORT_DETAILS))
2959 fprintf (vect_dump, "=== vectorizable_assignment ===");
2960 vect_model_simple_cost (stmt_info, ncopies, dt);
2965 if (vect_print_dump_info (REPORT_DETAILS))
2966 fprintf (vect_dump, "transform assignment.");
2969 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2972 op = GIMPLE_STMT_OPERAND (stmt, 1);
2973 vec_oprnd = vect_get_vec_def_for_operand (op, stmt, NULL);
2975 /* Arguments are ready. create the new vector stmt. */
2976 *vec_stmt = build_gimple_modify_stmt (vec_dest, vec_oprnd);
2977 new_temp = make_ssa_name (vec_dest, *vec_stmt);
2978 GIMPLE_STMT_OPERAND (*vec_stmt, 0) = new_temp;
2979 vect_finish_stmt_generation (stmt, *vec_stmt, bsi);
2985 /* Function vect_min_worthwhile_factor.
2987 For a loop where we could vectorize the operation indicated by CODE,
2988 return the minimum vectorization factor that makes it worthwhile
2989 to use generic vectors. */
2991 vect_min_worthwhile_factor (enum tree_code code)
3012 /* Function vectorizable_induction
3014 Check if PHI performs an induction computation that can be vectorized.
3015 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
3016 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
3017 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3020 vectorizable_induction (tree phi, block_stmt_iterator *bsi ATTRIBUTE_UNUSED,
3023 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
3024 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3025 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3026 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3027 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3030 gcc_assert (ncopies >= 1);
3032 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3035 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
3037 if (STMT_VINFO_LIVE_P (stmt_info))
3039 /* FORNOW: not yet supported. */
3040 if (vect_print_dump_info (REPORT_DETAILS))
3041 fprintf (vect_dump, "value used after loop.");
3045 if (TREE_CODE (phi) != PHI_NODE)
3048 if (!vec_stmt) /* transformation not required. */
3050 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
3051 if (vect_print_dump_info (REPORT_DETAILS))
3052 fprintf (vect_dump, "=== vectorizable_induction ===");
3053 vect_model_induction_cost (stmt_info, ncopies);
3059 if (vect_print_dump_info (REPORT_DETAILS))
3060 fprintf (vect_dump, "transform induction phi.");
3062 vec_def = get_initial_def_for_induction (phi);
3063 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
3068 /* Function vectorizable_operation.
3070 Check if STMT performs a binary or unary operation that can be vectorized.
3071 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3072 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3073 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3076 vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
3081 tree op0, op1 = NULL;
3082 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3083 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3084 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3085 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3086 enum tree_code code;
3087 enum machine_mode vec_mode;
3092 enum machine_mode optab_op2_mode;
3094 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3096 stmt_vec_info prev_stmt_info;
3097 int nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
3100 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3103 gcc_assert (ncopies >= 1);
3105 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3108 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
3111 /* FORNOW: not yet supported. */
3112 if (STMT_VINFO_LIVE_P (stmt_info))
3114 if (vect_print_dump_info (REPORT_DETAILS))
3115 fprintf (vect_dump, "value used after loop.");
3119 /* Is STMT a vectorizable binary/unary operation? */
3120 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
3123 if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME)
3126 scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
3127 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
3128 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3129 if (nunits_out != nunits_in)
3132 operation = GIMPLE_STMT_OPERAND (stmt, 1);
3133 code = TREE_CODE (operation);
3135 /* For pointer addition, we should use the normal plus for
3136 the vector addition. */
3137 if (code == POINTER_PLUS_EXPR)
3140 optab = optab_for_tree_code (code, vectype);
3142 /* Support only unary or binary operations. */
3143 op_type = TREE_OPERAND_LENGTH (operation);
3144 if (op_type != unary_op && op_type != binary_op)
3146 if (vect_print_dump_info (REPORT_DETAILS))
3147 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
3151 op0 = TREE_OPERAND (operation, 0);
3152 if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0]))
3154 if (vect_print_dump_info (REPORT_DETAILS))
3155 fprintf (vect_dump, "use not simple.");
3159 if (op_type == binary_op)
3161 op1 = TREE_OPERAND (operation, 1);
3162 if (!vect_is_simple_use (op1, loop_vinfo, &def_stmt, &def, &dt[1]))
3164 if (vect_print_dump_info (REPORT_DETAILS))
3165 fprintf (vect_dump, "use not simple.");
3170 /* Supportable by target? */
3173 if (vect_print_dump_info (REPORT_DETAILS))
3174 fprintf (vect_dump, "no optab.");
3177 vec_mode = TYPE_MODE (vectype);
3178 icode = (int) optab->handlers[(int) vec_mode].insn_code;
3179 if (icode == CODE_FOR_nothing)
3181 if (vect_print_dump_info (REPORT_DETAILS))
3182 fprintf (vect_dump, "op not supported by target.");
3183 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
3184 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3185 < vect_min_worthwhile_factor (code))
3187 if (vect_print_dump_info (REPORT_DETAILS))
3188 fprintf (vect_dump, "proceeding using word mode.");
3191 /* Worthwhile without SIMD support? */
3192 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
3193 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3194 < vect_min_worthwhile_factor (code))
3196 if (vect_print_dump_info (REPORT_DETAILS))
3197 fprintf (vect_dump, "not worthwhile without SIMD support.");
3201 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR)
3203 /* FORNOW: not yet supported. */
3204 if (!VECTOR_MODE_P (vec_mode))
3207 /* Invariant argument is needed for a vector shift
3208 by a scalar shift operand. */
3209 optab_op2_mode = insn_data[icode].operand[2].mode;
3210 if (! (VECTOR_MODE_P (optab_op2_mode)
3211 || dt[1] == vect_constant_def
3212 || dt[1] == vect_invariant_def))
3214 if (vect_print_dump_info (REPORT_DETAILS))
3215 fprintf (vect_dump, "operand mode requires invariant argument.");
3220 if (!vec_stmt) /* transformation not required. */
3222 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
3223 if (vect_print_dump_info (REPORT_DETAILS))
3224 fprintf (vect_dump, "=== vectorizable_operation ===");
3225 vect_model_simple_cost (stmt_info, ncopies, dt);
3231 if (vect_print_dump_info (REPORT_DETAILS))
3232 fprintf (vect_dump, "transform binary/unary operation.");
3235 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3237 /* In case the vectorization factor (VF) is bigger than the number
3238 of elements that we can fit in a vectype (nunits), we have to generate
3239 more than one vector stmt - i.e - we need to "unroll" the
3240 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3241 from one copy of the vector stmt to the next, in the field
3242 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3243 stages to find the correct vector defs to be used when vectorizing
3244 stmts that use the defs of the current stmt. The example below illustrates
3245 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3246 4 vectorized stmts):
3248 before vectorization:
3249 RELATED_STMT VEC_STMT
3253 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
3255 RELATED_STMT VEC_STMT
3256 VS1_0: vx0 = memref0 VS1_1 -
3257 VS1_1: vx1 = memref1 VS1_2 -
3258 VS1_2: vx2 = memref2 VS1_3 -
3259 VS1_3: vx3 = memref3 - -
3260 S1: x = load - VS1_0
3263 step2: vectorize stmt S2 (done here):
3264 To vectorize stmt S2 we first need to find the relevant vector
3265 def for the first operand 'x'. This is, as usual, obtained from
3266 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
3267 that defines 'x' (S1). This way we find the stmt VS1_0, and the
3268 relevant vector def 'vx0'. Having found 'vx0' we can generate
3269 the vector stmt VS2_0, and as usual, record it in the
3270 STMT_VINFO_VEC_STMT of stmt S2.
3271 When creating the second copy (VS2_1), we obtain the relevant vector
3272 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
3273 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
3274 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
3275 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
3276 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
3277 chain of stmts and pointers:
3278 RELATED_STMT VEC_STMT
3279 VS1_0: vx0 = memref0 VS1_1 -
3280 VS1_1: vx1 = memref1 VS1_2 -
3281 VS1_2: vx2 = memref2 VS1_3 -
3282 VS1_3: vx3 = memref3 - -
3283 S1: x = load - VS1_0
3284 VS2_0: vz0 = vx0 + v1 VS2_1 -
3285 VS2_1: vz1 = vx1 + v1 VS2_2 -
3286 VS2_2: vz2 = vx2 + v1 VS2_3 -
3287 VS2_3: vz3 = vx3 + v1 - -
3288 S2: z = x + 1 - VS2_0 */
3290 prev_stmt_info = NULL;
3291 for (j = 0; j < ncopies; j++)
3296 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3297 if (op_type == binary_op)
3299 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR)
3301 /* Vector shl and shr insn patterns can be defined with
3302 scalar operand 2 (shift operand). In this case, use
3303 constant or loop invariant op1 directly, without
3304 extending it to vector mode first. */
3305 optab_op2_mode = insn_data[icode].operand[2].mode;
3306 if (!VECTOR_MODE_P (optab_op2_mode))
3308 if (vect_print_dump_info (REPORT_DETAILS))
3309 fprintf (vect_dump, "operand 1 using scalar mode.");
3314 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3319 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3320 if (op_type == binary_op)
3321 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3324 /* Arguments are ready. create the new vector stmt. */
3326 if (op_type == binary_op)
3327 new_stmt = build_gimple_modify_stmt (vec_dest,
3328 build2 (code, vectype, vec_oprnd0, vec_oprnd1));
3330 new_stmt = build_gimple_modify_stmt (vec_dest,
3331 build1 (code, vectype, vec_oprnd0));
3332 new_temp = make_ssa_name (vec_dest, new_stmt);
3333 GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
3334 vect_finish_stmt_generation (stmt, new_stmt, bsi);
3337 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3339 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3340 prev_stmt_info = vinfo_for_stmt (new_stmt);
3347 /* Function vectorizable_type_demotion
3349 Check if STMT performs a binary or unary operation that involves
3350 type demotion, and if it can be vectorized.
3351 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3352 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3353 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3356 vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi,
3363 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
3364 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3365 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3366 enum tree_code code, code1 = ERROR_MARK;
3369 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3371 stmt_vec_info prev_stmt_info;
3380 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3383 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
3386 /* FORNOW: not yet supported. */
3387 if (STMT_VINFO_LIVE_P (stmt_info))
3389 if (vect_print_dump_info (REPORT_DETAILS))
3390 fprintf (vect_dump, "value used after loop.");
3394 /* Is STMT a vectorizable type-demotion operation? */
3395 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
3398 if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME)
3401 operation = GIMPLE_STMT_OPERAND (stmt, 1);
3402 code = TREE_CODE (operation);
3403 if (code != NOP_EXPR && code != CONVERT_EXPR)
3406 op0 = TREE_OPERAND (operation, 0);
3407 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
3408 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3410 scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
3411 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
3412 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3413 if (nunits_in != nunits_out / 2) /* FORNOW */
3416 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3417 gcc_assert (ncopies >= 1);
3419 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
3420 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3421 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
3422 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
3423 && (code == NOP_EXPR || code == CONVERT_EXPR))))
3426 /* Check the operands of the operation. */
3427 if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0]))
3429 if (vect_print_dump_info (REPORT_DETAILS))
3430 fprintf (vect_dump, "use not simple.");
3434 /* Supportable by target? */
3435 if (!supportable_narrowing_operation (code, stmt, vectype_in, &code1))
3438 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
3440 if (!vec_stmt) /* transformation not required. */
3442 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3443 if (vect_print_dump_info (REPORT_DETAILS))
3444 fprintf (vect_dump, "=== vectorizable_demotion ===");
3445 vect_model_simple_cost (stmt_info, ncopies, dt);
3450 if (vect_print_dump_info (REPORT_DETAILS))
3451 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
3455 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3457 /* In case the vectorization factor (VF) is bigger than the number
3458 of elements that we can fit in a vectype (nunits), we have to generate
3459 more than one vector stmt - i.e - we need to "unroll" the
3460 vector stmt by a factor VF/nunits. */
3461 prev_stmt_info = NULL;
3462 for (j = 0; j < ncopies; j++)
3467 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3468 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3472 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
3473 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3476 /* Arguments are ready. Create the new vector stmt. */
3477 expr = build2 (code1, vectype_out, vec_oprnd0, vec_oprnd1);
3478 new_stmt = build_gimple_modify_stmt (vec_dest, expr);
3479 new_temp = make_ssa_name (vec_dest, new_stmt);
3480 GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
3481 vect_finish_stmt_generation (stmt, new_stmt, bsi);
3484 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3486 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3488 prev_stmt_info = vinfo_for_stmt (new_stmt);
3491 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3496 /* Function vectorizable_type_promotion
3498 Check if STMT performs a binary or unary operation that involves
3499 type promotion, and if it can be vectorized.
3500 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3501 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3502 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3505 vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi,
3511 tree op0, op1 = NULL;
3512 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
3513 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3514 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3515 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3516 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3519 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3521 stmt_vec_info prev_stmt_info;
3529 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3532 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
3535 /* FORNOW: not yet supported. */
3536 if (STMT_VINFO_LIVE_P (stmt_info))
3538 if (vect_print_dump_info (REPORT_DETAILS))
3539 fprintf (vect_dump, "value used after loop.");
3543 /* Is STMT a vectorizable type-promotion operation? */
3544 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
3547 if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME)
3550 operation = GIMPLE_STMT_OPERAND (stmt, 1);
3551 code = TREE_CODE (operation);
3552 if (code != NOP_EXPR && code != CONVERT_EXPR
3553 && code != WIDEN_MULT_EXPR)
3556 op0 = TREE_OPERAND (operation, 0);
3557 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
3558 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3560 scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
3561 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
3562 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3563 if (nunits_out != nunits_in / 2) /* FORNOW */
3566 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3567 gcc_assert (ncopies >= 1);
3569 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
3570 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3571 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
3572 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
3573 && (code == CONVERT_EXPR || code == NOP_EXPR))))
3576 /* Check the operands of the operation. */
3577 if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt[0]))
3579 if (vect_print_dump_info (REPORT_DETAILS))
3580 fprintf (vect_dump, "use not simple.");
3584 op_type = TREE_CODE_LENGTH (code);
3585 if (op_type == binary_op)
3587 op1 = TREE_OPERAND (operation, 1);
3588 if (!vect_is_simple_use (op1, loop_vinfo, &def_stmt, &def, &dt[1]))
3590 if (vect_print_dump_info (REPORT_DETAILS))
3591 fprintf (vect_dump, "use not simple.");
3596 /* Supportable by target? */
3597 if (!supportable_widening_operation (code, stmt, vectype_in,
3598 &decl1, &decl2, &code1, &code2))
3601 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
3603 if (!vec_stmt) /* transformation not required. */
3605 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3606 if (vect_print_dump_info (REPORT_DETAILS))
3607 fprintf (vect_dump, "=== vectorizable_promotion ===");
3608 vect_model_simple_cost (stmt_info, 2*ncopies, dt);
3614 if (vect_print_dump_info (REPORT_DETAILS))
3615 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
3619 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3621 /* In case the vectorization factor (VF) is bigger than the number
3622 of elements that we can fit in a vectype (nunits), we have to generate
3623 more than one vector stmt - i.e - we need to "unroll" the
3624 vector stmt by a factor VF/nunits. */
3626 prev_stmt_info = NULL;
3627 for (j = 0; j < ncopies; j++)
3632 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3633 if (op_type == binary_op)
3634 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3638 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3639 if (op_type == binary_op)
3640 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3643 /* Arguments are ready. Create the new vector stmt. We are creating
3644 two vector defs because the widened result does not fit in one vector.
3645 The vectorized stmt can be expressed as a call to a taregt builtin,
3646 or a using a tree-code. */
3647 /* Generate first half of the widened result: */
3648 new_stmt = vect_gen_widened_results_half (code1, vectype_out, decl1,
3649 vec_oprnd0, vec_oprnd1, op_type, vec_dest, bsi, stmt);
3651 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3653 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3654 prev_stmt_info = vinfo_for_stmt (new_stmt);
3656 /* Generate second half of the widened result: */
3657 new_stmt = vect_gen_widened_results_half (code2, vectype_out, decl2,
3658 vec_oprnd0, vec_oprnd1, op_type, vec_dest, bsi, stmt);
3659 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3660 prev_stmt_info = vinfo_for_stmt (new_stmt);
3664 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3669 /* Function vect_strided_store_supported.
3671 Returns TRUE is INTERLEAVE_HIGH and INTERLEAVE_LOW operations are supported,
3672 and FALSE otherwise. */
3675 vect_strided_store_supported (tree vectype)
3677 optab interleave_high_optab, interleave_low_optab;
3680 mode = (int) TYPE_MODE (vectype);
3682 /* Check that the operation is supported. */
3683 interleave_high_optab = optab_for_tree_code (VEC_INTERLEAVE_HIGH_EXPR,
3685 interleave_low_optab = optab_for_tree_code (VEC_INTERLEAVE_LOW_EXPR,
3687 if (!interleave_high_optab || !interleave_low_optab)
3689 if (vect_print_dump_info (REPORT_DETAILS))
3690 fprintf (vect_dump, "no optab for interleave.");
3694 if (interleave_high_optab->handlers[(int) mode].insn_code
3696 || interleave_low_optab->handlers[(int) mode].insn_code
3697 == CODE_FOR_nothing)
3699 if (vect_print_dump_info (REPORT_DETAILS))
3700 fprintf (vect_dump, "interleave op not supported by target.");
3707 /* Function vect_permute_store_chain.
3709 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
3710 a power of 2, generate interleave_high/low stmts to reorder the data
3711 correctly for the stores. Return the final references for stores in
3714 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
3715 The input is 4 vectors each containing 8 elements. We assign a number to each
3716 element, the input sequence is:
3718 1st vec: 0 1 2 3 4 5 6 7
3719 2nd vec: 8 9 10 11 12 13 14 15
3720 3rd vec: 16 17 18 19 20 21 22 23
3721 4th vec: 24 25 26 27 28 29 30 31
3723 The output sequence should be:
3725 1st vec: 0 8 16 24 1 9 17 25
3726 2nd vec: 2 10 18 26 3 11 19 27
3727 3rd vec: 4 12 20 28 5 13 21 30
3728 4th vec: 6 14 22 30 7 15 23 31
3730 i.e., we interleave the contents of the four vectors in their order.
3732 We use interleave_high/low instructions to create such output. The input of
3733 each interleave_high/low operation is two vectors:
3736 the even elements of the result vector are obtained left-to-right from the
3737 high/low elements of the first vector. The odd elements of the result are
3738 obtained left-to-right from the high/low elements of the second vector.
3739 The output of interleave_high will be: 0 4 1 5
3740 and of interleave_low: 2 6 3 7
3743 The permutation is done in log LENGTH stages. In each stage interleave_high
3744 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
3745 where the first argument is taken from the first half of DR_CHAIN and the
3746 second argument from it's second half.
3749 I1: interleave_high (1st vec, 3rd vec)
3750 I2: interleave_low (1st vec, 3rd vec)
3751 I3: interleave_high (2nd vec, 4th vec)
3752 I4: interleave_low (2nd vec, 4th vec)
3754 The output for the first stage is:
3756 I1: 0 16 1 17 2 18 3 19
3757 I2: 4 20 5 21 6 22 7 23
3758 I3: 8 24 9 25 10 26 11 27
3759 I4: 12 28 13 29 14 30 15 31
3761 The output of the second stage, i.e. the final result is:
3763 I1: 0 8 16 24 1 9 17 25
3764 I2: 2 10 18 26 3 11 19 27
3765 I3: 4 12 20 28 5 13 21 30
3766 I4: 6 14 22 30 7 15 23 31. */
3769 vect_permute_store_chain (VEC(tree,heap) *dr_chain,
3770 unsigned int length,
3772 block_stmt_iterator *bsi,
3773 VEC(tree,heap) **result_chain)
3775 tree perm_dest, perm_stmt, vect1, vect2, high, low;
3776 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
3777 tree scalar_dest, tmp;
3780 VEC(tree,heap) *first, *second;
3782 scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
3783 first = VEC_alloc (tree, heap, length/2);
3784 second = VEC_alloc (tree, heap, length/2);
3786 /* Check that the operation is supported. */
3787 if (!vect_strided_store_supported (vectype))
3790 *result_chain = VEC_copy (tree, heap, dr_chain);
3792 for (i = 0; i < exact_log2 (length); i++)
3794 for (j = 0; j < length/2; j++)
3796 vect1 = VEC_index (tree, dr_chain, j);
3797 vect2 = VEC_index (tree, dr_chain, j+length/2);
3799 /* Create interleaving stmt:
3800 in the case of big endian:
3801 high = interleave_high (vect1, vect2)
3802 and in the case of little endian:
3803 high = interleave_low (vect1, vect2). */
3804 perm_dest = create_tmp_var (vectype, "vect_inter_high");
3805 DECL_GIMPLE_REG_P (perm_dest) = 1;
3806 add_referenced_var (perm_dest);
3807 if (BYTES_BIG_ENDIAN)
3808 tmp = build2 (VEC_INTERLEAVE_HIGH_EXPR, vectype, vect1, vect2);
3810 tmp = build2 (VEC_INTERLEAVE_LOW_EXPR, vectype, vect1, vect2);
3811 perm_stmt = build_gimple_modify_stmt (perm_dest, tmp);
3812 high = make_ssa_name (perm_dest, perm_stmt);
3813 GIMPLE_STMT_OPERAND (perm_stmt, 0) = high;
3814 vect_finish_stmt_generation (stmt, perm_stmt, bsi);
3815 VEC_replace (tree, *result_chain, 2*j, high);
3817 /* Create interleaving stmt:
3818 in the case of big endian:
3819 low = interleave_low (vect1, vect2)
3820 and in the case of little endian:
3821 low = interleave_high (vect1, vect2). */
3822 perm_dest = create_tmp_var (vectype, "vect_inter_low");
3823 DECL_GIMPLE_REG_P (perm_dest) = 1;
3824 add_referenced_var (perm_dest);
3825 if (BYTES_BIG_ENDIAN)
3826 tmp = build2 (VEC_INTERLEAVE_LOW_EXPR, vectype, vect1, vect2);
3828 tmp = build2 (VEC_INTERLEAVE_HIGH_EXPR, vectype, vect1, vect2);
3829 perm_stmt = build_gimple_modify_stmt (perm_dest, tmp);
3830 low = make_ssa_name (perm_dest, perm_stmt);
3831 GIMPLE_STMT_OPERAND (perm_stmt, 0) = low;
3832 vect_finish_stmt_generation (stmt, perm_stmt, bsi);
3833 VEC_replace (tree, *result_chain, 2*j+1, low);
3835 dr_chain = VEC_copy (tree, heap, *result_chain);
3841 /* Function vectorizable_store.
3843 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3845 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3846 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3847 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3850 vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
3855 tree vec_oprnd = NULL_TREE;
3856 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3857 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3858 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3859 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3860 enum machine_mode vec_mode;
3862 enum dr_alignment_support alignment_support_cheme;
3864 def_operand_p def_p;
3866 enum vect_def_type dt;
3867 stmt_vec_info prev_stmt_info = NULL;
3868 tree dataref_ptr = NULL_TREE;
3869 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3870 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3872 tree next_stmt, first_stmt;
3873 bool strided_store = false;
3874 unsigned int group_size, i;
3875 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3876 gcc_assert (ncopies >= 1);
3878 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3881 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
3884 if (STMT_VINFO_LIVE_P (stmt_info))
3886 if (vect_print_dump_info (REPORT_DETAILS))
3887 fprintf (vect_dump, "value used after loop.");
3891 /* Is vectorizable store? */
3893 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
3896 scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
3897 if (TREE_CODE (scalar_dest) != ARRAY_REF
3898 && TREE_CODE (scalar_dest) != INDIRECT_REF
3899 && !DR_GROUP_FIRST_DR (stmt_info))
3902 op = GIMPLE_STMT_OPERAND (stmt, 1);
3903 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt))
3905 if (vect_print_dump_info (REPORT_DETAILS))
3906 fprintf (vect_dump, "use not simple.");
3910 vec_mode = TYPE_MODE (vectype);
3911 /* FORNOW. In some cases can vectorize even if data-type not supported
3912 (e.g. - array initialization with 0). */
3913 if (mov_optab->handlers[(int)vec_mode].insn_code == CODE_FOR_nothing)
3916 if (!STMT_VINFO_DATA_REF (stmt_info))
3919 if (DR_GROUP_FIRST_DR (stmt_info))
3921 strided_store = true;
3922 if (!vect_strided_store_supported (vectype))
3926 if (!vec_stmt) /* transformation not required. */
3928 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3929 vect_model_store_cost (stmt_info, ncopies, dt);
3937 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3938 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3939 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3941 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3943 /* We vectorize all the stmts of the interleaving group when we
3944 reach the last stmt in the group. */
3945 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3946 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt)))
3948 *vec_stmt = NULL_TREE;
3959 if (vect_print_dump_info (REPORT_DETAILS))
3960 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3962 dr_chain = VEC_alloc (tree, heap, group_size);
3963 oprnds = VEC_alloc (tree, heap, group_size);
3965 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
3966 gcc_assert (alignment_support_cheme);
3967 gcc_assert (alignment_support_cheme == dr_aligned); /* FORNOW */
3969 /* In case the vectorization factor (VF) is bigger than the number
3970 of elements that we can fit in a vectype (nunits), we have to generate
3971 more than one vector stmt - i.e - we need to "unroll" the
3972 vector stmt by a factor VF/nunits. For more details see documentation in
3973 vect_get_vec_def_for_copy_stmt. */
3975 /* In case of interleaving (non-unit strided access):
3982 We create vectorized stores starting from base address (the access of the
3983 first stmt in the chain (S2 in the above example), when the last store stmt
3984 of the chain (S4) is reached:
3987 VS2: &base + vec_size*1 = vx0
3988 VS3: &base + vec_size*2 = vx1
3989 VS4: &base + vec_size*3 = vx3
3991 Then permutation statements are generated:
3993 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3994 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3997 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3998 (the order of the data-refs in the output of vect_permute_store_chain
3999 corresponds to the order of scalar stmts in the interleaving chain - see
4000 the documentation of vect_permute_store_chain()).
4002 In case of both multiple types and interleaving, above vector stores and
4003 permutation stmts are created for every copy. The result vector stmts are
4004 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
4005 STMT_VINFO_RELATED_STMT for the next copies.
4008 prev_stmt_info = NULL;
4009 for (j = 0; j < ncopies; j++)
4016 /* For interleaved stores we collect vectorized defs for all the
4017 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then used
4018 as an input to vect_permute_store_chain(), and OPRNDS as an input
4019 to vect_get_vec_def_for_stmt_copy() for the next copy.
4020 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
4021 OPRNDS are of size 1. */
4022 next_stmt = first_stmt;
4023 for (i = 0; i < group_size; i++)
4025 /* Since gaps are not supported for interleaved stores, GROUP_SIZE
4026 is the exact number of stmts in the chain. Therefore, NEXT_STMT
4027 can't be NULL_TREE. In case that there is no interleaving,
4028 GROUP_SIZE is 1, and only one iteration of the loop will be
4030 gcc_assert (next_stmt);
4031 op = GIMPLE_STMT_OPERAND (next_stmt, 1);
4032 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt, NULL);
4033 VEC_quick_push(tree, dr_chain, vec_oprnd);
4034 VEC_quick_push(tree, oprnds, vec_oprnd);
4035 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
4037 dataref_ptr = vect_create_data_ref_ptr (first_stmt, bsi, NULL_TREE,
4038 &dummy, &ptr_incr, false,
4039 TREE_TYPE (vec_oprnd));
4043 /* For interleaved stores we created vectorized defs for all the
4044 defs stored in OPRNDS in the previous iteration (previous copy).
4045 DR_CHAIN is then used as an input to vect_permute_store_chain(),
4046 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
4048 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
4049 OPRNDS are of size 1. */
4050 for (i = 0; i < group_size; i++)
4052 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt,
4053 VEC_index (tree, oprnds, i));
4054 VEC_replace(tree, dr_chain, i, vec_oprnd);
4055 VEC_replace(tree, oprnds, i, vec_oprnd);
4057 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt);
4062 result_chain = VEC_alloc (tree, heap, group_size);
4064 if (!vect_permute_store_chain (dr_chain, group_size, stmt, bsi,
4069 next_stmt = first_stmt;
4070 for (i = 0; i < group_size; i++)
4072 /* For strided stores vectorized defs are interleaved in
4073 vect_permute_store_chain(). */
4075 vec_oprnd = VEC_index(tree, result_chain, i);
4077 data_ref = build_fold_indirect_ref (dataref_ptr);
4078 /* Arguments are ready. Create the new vector stmt. */
4079 new_stmt = build_gimple_modify_stmt (data_ref, vec_oprnd);
4080 vect_finish_stmt_generation (stmt, new_stmt, bsi);
4082 /* Set the VDEFs for the vector pointer. If this virtual def
4083 has a use outside the loop and a loop peel is performed
4084 then the def may be renamed by the peel. Mark it for
4085 renaming so the later use will also be renamed. */
4086 copy_virtual_operands (new_stmt, next_stmt);
4089 /* The original store is deleted so the same SSA_NAMEs
4091 FOR_EACH_SSA_TREE_OPERAND (def, next_stmt, iter, SSA_OP_VDEF)
4093 SSA_NAME_DEF_STMT (def) = new_stmt;
4094 mark_sym_for_renaming (SSA_NAME_VAR (def));
4097 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4101 /* Create new names for all the definitions created by COPY and
4102 add replacement mappings for each new name. */
4103 FOR_EACH_SSA_DEF_OPERAND (def_p, new_stmt, iter, SSA_OP_VDEF)
4105 create_new_def_for (DEF_FROM_PTR (def_p), new_stmt, def_p);
4106 mark_sym_for_renaming (SSA_NAME_VAR (DEF_FROM_PTR (def_p)));
4109 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4112 prev_stmt_info = vinfo_for_stmt (new_stmt);
4113 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
4116 /* Bump the vector pointer. */
4117 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt);
4125 /* Function vect_setup_realignment
4127 This function is called when vectorizing an unaligned load using
4128 the dr_unaligned_software_pipeline scheme.
4129 This function generates the following code at the loop prolog:
4132 msq_init = *(floor(p)); # prolog load
4133 realignment_token = call target_builtin;
4135 msq = phi (msq_init, ---)
4137 The code above sets up a new (vector) pointer, pointing to the first
4138 location accessed by STMT, and a "floor-aligned" load using that pointer.
4139 It also generates code to compute the "realignment-token" (if the relevant
4140 target hook was defined), and creates a phi-node at the loop-header bb
4141 whose arguments are the result of the prolog-load (created by this
4142 function) and the result of a load that takes place in the loop (to be
4143 created by the caller to this function).
4144 The caller to this function uses the phi-result (msq) to create the
4145 realignment code inside the loop, and sets up the missing phi argument,
4149 msq = phi (msq_init, lsq)
4150 lsq = *(floor(p')); # load in loop
4151 result = realign_load (msq, lsq, realignment_token);
4154 STMT - (scalar) load stmt to be vectorized. This load accesses
4155 a memory location that may be unaligned.
4156 BSI - place where new code is to be inserted.
4159 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
4160 target hook, if defined.
4161 Return value - the result of the loop-header phi node. */
4164 vect_setup_realignment (tree stmt, block_stmt_iterator *bsi,
4165 tree *realignment_token)
4167 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4168 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4169 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4170 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4171 edge pe = loop_preheader_edge (loop);
4172 tree scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
4185 /* 1. Create msq_init = *(floor(p1)) in the loop preheader */
4186 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4187 ptr = vect_create_data_ref_ptr (stmt, bsi, NULL_TREE, &init_addr, &inc, true,
4189 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
4190 new_stmt = build_gimple_modify_stmt (vec_dest, data_ref);
4191 new_temp = make_ssa_name (vec_dest, new_stmt);
4192 GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
4193 new_bb = bsi_insert_on_edge_immediate (pe, new_stmt);
4194 gcc_assert (!new_bb);
4195 msq_init = GIMPLE_STMT_OPERAND (new_stmt, 0);
4196 copy_virtual_operands (new_stmt, stmt);
4197 update_vuses_to_preheader (new_stmt, loop);
4199 /* 2. Create permutation mask, if required, in loop preheader. */
4200 if (targetm.vectorize.builtin_mask_for_load)
4204 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
4205 new_stmt = build_call_expr (builtin_decl, 1, init_addr);
4206 vec_dest = vect_create_destination_var (scalar_dest,
4207 TREE_TYPE (new_stmt));
4208 new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt);
4209 new_temp = make_ssa_name (vec_dest, new_stmt);
4210 GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
4211 new_bb = bsi_insert_on_edge_immediate (pe, new_stmt);
4212 gcc_assert (!new_bb);
4213 *realignment_token = GIMPLE_STMT_OPERAND (new_stmt, 0);
4215 /* The result of the CALL_EXPR to this builtin is determined from
4216 the value of the parameter and no global variables are touched
4217 which makes the builtin a "const" function. Requiring the
4218 builtin to have the "const" attribute makes it unnecessary
4219 to call mark_call_clobbered. */
4220 gcc_assert (TREE_READONLY (builtin_decl));
4223 /* 3. Create msq = phi <msq_init, lsq> in loop */
4224 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4225 msq = make_ssa_name (vec_dest, NULL_TREE);
4226 phi_stmt = create_phi_node (msq, loop->header);
4227 SSA_NAME_DEF_STMT (msq) = phi_stmt;
4228 add_phi_arg (phi_stmt, msq_init, loop_preheader_edge (loop));
4234 /* Function vect_strided_load_supported.
4236 Returns TRUE is EXTRACT_EVEN and EXTRACT_ODD operations are supported,
4237 and FALSE otherwise. */
4240 vect_strided_load_supported (tree vectype)
4242 optab perm_even_optab, perm_odd_optab;
4245 mode = (int) TYPE_MODE (vectype);
4247 perm_even_optab = optab_for_tree_code (VEC_EXTRACT_EVEN_EXPR, vectype);
4248 if (!perm_even_optab)
4250 if (vect_print_dump_info (REPORT_DETAILS))
4251 fprintf (vect_dump, "no optab for perm_even.");
4255 if (perm_even_optab->handlers[mode].insn_code == CODE_FOR_nothing)
4257 if (vect_print_dump_info (REPORT_DETAILS))
4258 fprintf (vect_dump, "perm_even op not supported by target.");
4262 perm_odd_optab = optab_for_tree_code (VEC_EXTRACT_ODD_EXPR, vectype);
4263 if (!perm_odd_optab)
4265 if (vect_print_dump_info (REPORT_DETAILS))
4266 fprintf (vect_dump, "no optab for perm_odd.");
4270 if (perm_odd_optab->handlers[mode].insn_code == CODE_FOR_nothing)
4272 if (vect_print_dump_info (REPORT_DETAILS))
4273 fprintf (vect_dump, "perm_odd op not supported by target.");
4280 /* Function vect_permute_load_chain.
4282 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
4283 a power of 2, generate extract_even/odd stmts to reorder the input data
4284 correctly. Return the final references for loads in RESULT_CHAIN.
4286 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4287 The input is 4 vectors each containing 8 elements. We assign a number to each
4288 element, the input sequence is:
4290 1st vec: 0 1 2 3 4 5 6 7
4291 2nd vec: 8 9 10 11 12 13 14 15
4292 3rd vec: 16 17 18 19 20 21 22 23
4293 4th vec: 24 25 26 27 28 29 30 31
4295 The output sequence should be:
4297 1st vec: 0 4 8 12 16 20 24 28
4298 2nd vec: 1 5 9 13 17 21 25 29
4299 3rd vec: 2 6 10 14 18 22 26 30
4300 4th vec: 3 7 11 15 19 23 27 31
4302 i.e., the first output vector should contain the first elements of each
4303 interleaving group, etc.
4305 We use extract_even/odd instructions to create such output. The input of each
4306 extract_even/odd operation is two vectors
4310 and the output is the vector of extracted even/odd elements. The output of
4311 extract_even will be: 0 2 4 6
4312 and of extract_odd: 1 3 5 7
4315 The permutation is done in log LENGTH stages. In each stage extract_even and
4316 extract_odd stmts are created for each pair of vectors in DR_CHAIN in their
4317 order. In our example,
4319 E1: extract_even (1st vec, 2nd vec)
4320 E2: extract_odd (1st vec, 2nd vec)
4321 E3: extract_even (3rd vec, 4th vec)
4322 E4: extract_odd (3rd vec, 4th vec)
4324 The output for the first stage will be:
4326 E1: 0 2 4 6 8 10 12 14
4327 E2: 1 3 5 7 9 11 13 15
4328 E3: 16 18 20 22 24 26 28 30
4329 E4: 17 19 21 23 25 27 29 31
4331 In order to proceed and create the correct sequence for the next stage (or
4332 for the correct output, if the second stage is the last one, as in our
4333 example), we first put the output of extract_even operation and then the
4334 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
4335 The input for the second stage is:
4337 1st vec (E1): 0 2 4 6 8 10 12 14
4338 2nd vec (E3): 16 18 20 22 24 26 28 30
4339 3rd vec (E2): 1 3 5 7 9 11 13 15
4340 4th vec (E4): 17 19 21 23 25 27 29 31
4342 The output of the second stage:
4344 E1: 0 4 8 12 16 20 24 28
4345 E2: 2 6 10 14 18 22 26 30
4346 E3: 1 5 9 13 17 21 25 29
4347 E4: 3 7 11 15 19 23 27 31
4349 And RESULT_CHAIN after reordering:
4351 1st vec (E1): 0 4 8 12 16 20 24 28
4352 2nd vec (E3): 1 5 9 13 17 21 25 29
4353 3rd vec (E2): 2 6 10 14 18 22 26 30
4354 4th vec (E4): 3 7 11 15 19 23 27 31. */
4357 vect_permute_load_chain (VEC(tree,heap) *dr_chain,
4358 unsigned int length,
4360 block_stmt_iterator *bsi,
4361 VEC(tree,heap) **result_chain)
4363 tree perm_dest, perm_stmt, data_ref, first_vect, second_vect;
4364 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
4369 /* Check that the operation is supported. */
4370 if (!vect_strided_load_supported (vectype))
4373 *result_chain = VEC_copy (tree, heap, dr_chain);
4374 for (i = 0; i < exact_log2 (length); i++)
4376 for (j = 0; j < length; j +=2)
4378 first_vect = VEC_index (tree, dr_chain, j);
4379 second_vect = VEC_index (tree, dr_chain, j+1);
4381 /* data_ref = permute_even (first_data_ref, second_data_ref); */
4382 perm_dest = create_tmp_var (vectype, "vect_perm_even");
4383 DECL_GIMPLE_REG_P (perm_dest) = 1;
4384 add_referenced_var (perm_dest);
4386 tmp = build2 (VEC_EXTRACT_EVEN_EXPR, vectype,
4387 first_vect, second_vect);
4388 perm_stmt = build_gimple_modify_stmt (perm_dest, tmp);
4390 data_ref = make_ssa_name (perm_dest, perm_stmt);
4391 GIMPLE_STMT_OPERAND (perm_stmt, 0) = data_ref;
4392 vect_finish_stmt_generation (stmt, perm_stmt, bsi);
4393 mark_symbols_for_renaming (perm_stmt);
4395 VEC_replace (tree, *result_chain, j/2, data_ref);
4397 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
4398 perm_dest = create_tmp_var (vectype, "vect_perm_odd");
4399 DECL_GIMPLE_REG_P (perm_dest) = 1;
4400 add_referenced_var (perm_dest);
4402 tmp = build2 (VEC_EXTRACT_ODD_EXPR, vectype,
4403 first_vect, second_vect);
4404 perm_stmt = build_gimple_modify_stmt (perm_dest, tmp);
4405 data_ref = make_ssa_name (perm_dest, perm_stmt);
4406 GIMPLE_STMT_OPERAND (perm_stmt, 0) = data_ref;
4407 vect_finish_stmt_generation (stmt, perm_stmt, bsi);
4408 mark_symbols_for_renaming (perm_stmt);
4410 VEC_replace (tree, *result_chain, j/2+length/2, data_ref);
4412 dr_chain = VEC_copy (tree, heap, *result_chain);
4418 /* Function vect_transform_strided_load.
4420 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
4421 to perform their permutation and ascribe the result vectorized statements to
4422 the scalar statements.
4426 vect_transform_strided_load (tree stmt, VEC(tree,heap) *dr_chain, int size,
4427 block_stmt_iterator *bsi)
4429 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4430 tree first_stmt = DR_GROUP_FIRST_DR (stmt_info);
4431 tree next_stmt, new_stmt;
4432 VEC(tree,heap) *result_chain = NULL;
4433 unsigned int i, gap_count;
4436 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
4437 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
4438 vectors, that are ready for vector computation. */
4439 result_chain = VEC_alloc (tree, heap, size);
4441 if (!vect_permute_load_chain (dr_chain, size, stmt, bsi, &result_chain))
4444 /* Put a permuted data-ref in the VECTORIZED_STMT field.
4445 Since we scan the chain starting from it's first node, their order
4446 corresponds the order of data-refs in RESULT_CHAIN. */
4447 next_stmt = first_stmt;
4449 for (i = 0; VEC_iterate (tree, result_chain, i, tmp_data_ref); i++)
4454 /* Skip the gaps. Loads created for the gaps will be removed by dead
4455 code elimination pass later.
4456 DR_GROUP_GAP is the number of steps in elements from the previous
4457 access (if there is no gap DR_GROUP_GAP is 1). We skip loads that
4458 correspond to the gaps.
4460 if (gap_count < DR_GROUP_GAP (vinfo_for_stmt (next_stmt)))
4468 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
4469 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
4470 copies, and we put the new vector statement in the first available
4472 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
4473 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
4476 tree prev_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
4477 tree rel_stmt = STMT_VINFO_RELATED_STMT (
4478 vinfo_for_stmt (prev_stmt));
4481 prev_stmt = rel_stmt;
4482 rel_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
4484 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) = new_stmt;
4486 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
4488 /* If NEXT_STMT accesses the same DR as the previous statement,
4489 put the same TMP_DATA_REF as its vectorized statement; otherwise
4490 get the next data-ref from RESULT_CHAIN. */
4491 if (!next_stmt || !DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
4499 /* vectorizable_load.
4501 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
4503 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4504 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4505 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4508 vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
4511 tree vec_dest = NULL;
4512 tree data_ref = NULL;
4514 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4515 stmt_vec_info prev_stmt_info;
4516 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4517 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4518 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
4519 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4522 tree new_stmt = NULL_TREE;
4524 enum dr_alignment_support alignment_support_cheme;
4525 tree dataref_ptr = NULL_TREE;
4527 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4528 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4529 int i, j, group_size;
4530 tree msq = NULL_TREE, lsq;
4531 tree offset = NULL_TREE;
4532 tree realignment_token = NULL_TREE;
4533 tree phi_stmt = NULL_TREE;
4534 VEC(tree,heap) *dr_chain = NULL;
4535 bool strided_load = false;
4538 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4541 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
4544 /* FORNOW: not yet supported. */
4545 if (STMT_VINFO_LIVE_P (stmt_info))
4547 if (vect_print_dump_info (REPORT_DETAILS))
4548 fprintf (vect_dump, "value used after loop.");
4552 /* Is vectorizable load? */
4553 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
4556 scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
4557 if (TREE_CODE (scalar_dest) != SSA_NAME)
4560 op = GIMPLE_STMT_OPERAND (stmt, 1);
4561 if (TREE_CODE (op) != ARRAY_REF
4562 && TREE_CODE (op) != INDIRECT_REF
4563 && !DR_GROUP_FIRST_DR (stmt_info))
4566 if (!STMT_VINFO_DATA_REF (stmt_info))
4569 mode = (int) TYPE_MODE (vectype);
4571 /* FORNOW. In some cases can vectorize even if data-type not supported
4572 (e.g. - data copies). */
4573 if (mov_optab->handlers[mode].insn_code == CODE_FOR_nothing)
4575 if (vect_print_dump_info (REPORT_DETAILS))
4576 fprintf (vect_dump, "Aligned load, but unsupported type.");
4580 /* Check if the load is a part of an interleaving chain. */
4581 if (DR_GROUP_FIRST_DR (stmt_info))
4583 strided_load = true;
4585 /* Check if interleaving is supported. */
4586 if (!vect_strided_load_supported (vectype))
4590 if (!vec_stmt) /* transformation not required. */
4592 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
4593 vect_model_load_cost (stmt_info, ncopies);
4597 if (vect_print_dump_info (REPORT_DETAILS))
4598 fprintf (vect_dump, "transform load.");
4604 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
4605 /* Check if the chain of loads is already vectorized. */
4606 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
4608 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4611 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
4612 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
4613 dr_chain = VEC_alloc (tree, heap, group_size);
4622 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
4623 gcc_assert (alignment_support_cheme);
4626 /* In case the vectorization factor (VF) is bigger than the number
4627 of elements that we can fit in a vectype (nunits), we have to generate
4628 more than one vector stmt - i.e - we need to "unroll" the
4629 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4630 from one copy of the vector stmt to the next, in the field
4631 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4632 stages to find the correct vector defs to be used when vectorizing
4633 stmts that use the defs of the current stmt. The example below illustrates
4634 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
4635 4 vectorized stmts):
4637 before vectorization:
4638 RELATED_STMT VEC_STMT
4642 step 1: vectorize stmt S1:
4643 We first create the vector stmt VS1_0, and, as usual, record a
4644 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
4645 Next, we create the vector stmt VS1_1, and record a pointer to
4646 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
4647 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
4649 RELATED_STMT VEC_STMT
4650 VS1_0: vx0 = memref0 VS1_1 -
4651 VS1_1: vx1 = memref1 VS1_2 -
4652 VS1_2: vx2 = memref2 VS1_3 -
4653 VS1_3: vx3 = memref3 - -
4654 S1: x = load - VS1_0
4657 See in documentation in vect_get_vec_def_for_stmt_copy for how the
4658 information we recorded in RELATED_STMT field is used to vectorize
4661 /* In case of interleaving (non-unit strided access):
4668 Vectorized loads are created in the order of memory accesses
4669 starting from the access of the first stmt of the chain:
4672 VS2: vx1 = &base + vec_size*1
4673 VS3: vx3 = &base + vec_size*2
4674 VS4: vx4 = &base + vec_size*3
4676 Then permutation statements are generated:
4678 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
4679 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
4682 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
4683 (the order of the data-refs in the output of vect_permute_load_chain
4684 corresponds to the order of scalar stmts in the interleaving chain - see
4685 the documentation of vect_permute_load_chain()).
4686 The generation of permutation stmts and recording them in
4687 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
4689 In case of both multiple types and interleaving, the vector loads and
4690 permutation stmts above are created for every copy. The result vector stmts
4691 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
4692 STMT_VINFO_RELATED_STMT for the next copies. */
4694 /* If the data reference is aligned (dr_aligned) or potentially unaligned
4695 on a target that supports unaligned accesses (dr_unaligned_supported)
4696 we generate the following code:
4700 p = p + indx * vectype_size;
4705 Otherwise, the data reference is potentially unaligned on a target that
4706 does not support unaligned accesses (dr_unaligned_software_pipeline) -
4707 then generate the following code, in which the data in each iteration is
4708 obtained by two vector loads, one from the previous iteration, and one
4709 from the current iteration:
4711 msq_init = *(floor(p1))
4712 p2 = initial_addr + VS - 1;
4713 realignment_token = call target_builtin;
4716 p2 = p2 + indx * vectype_size
4718 vec_dest = realign_load (msq, lsq, realignment_token)
4723 if (alignment_support_cheme == dr_unaligned_software_pipeline)
4725 msq = vect_setup_realignment (first_stmt, bsi, &realignment_token);
4726 phi_stmt = SSA_NAME_DEF_STMT (msq);
4727 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4730 prev_stmt_info = NULL;
4731 for (j = 0; j < ncopies; j++)
4733 /* 1. Create the vector pointer update chain. */
4735 dataref_ptr = vect_create_data_ref_ptr (first_stmt, bsi, offset, &dummy,
4736 &ptr_incr, false, NULL_TREE);
4738 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt);
4740 for (i = 0; i < group_size; i++)
4742 /* 2. Create the vector-load in the loop. */
4743 switch (alignment_support_cheme)
4746 gcc_assert (aligned_access_p (first_dr));
4747 data_ref = build_fold_indirect_ref (dataref_ptr);
4749 case dr_unaligned_supported:
4751 int mis = DR_MISALIGNMENT (first_dr);
4752 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
4754 gcc_assert (!aligned_access_p (first_dr));
4755 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
4757 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
4760 case dr_unaligned_software_pipeline:
4761 gcc_assert (!aligned_access_p (first_dr));
4762 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
4767 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4768 new_stmt = build_gimple_modify_stmt (vec_dest, data_ref);
4769 new_temp = make_ssa_name (vec_dest, new_stmt);
4770 GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
4771 vect_finish_stmt_generation (stmt, new_stmt, bsi);
4772 copy_virtual_operands (new_stmt, stmt);
4773 mark_symbols_for_renaming (new_stmt);
4775 /* 3. Handle explicit realignment if necessary/supported. */
4776 if (alignment_support_cheme == dr_unaligned_software_pipeline)
4779 <vec_dest = realign_load (msq, lsq, realignment_token)> */
4780 lsq = GIMPLE_STMT_OPERAND (new_stmt, 0);
4781 if (!realignment_token)
4782 realignment_token = dataref_ptr;
4783 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4785 build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq, realignment_token);
4786 new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt);
4787 new_temp = make_ssa_name (vec_dest, new_stmt);
4788 GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
4789 vect_finish_stmt_generation (stmt, new_stmt, bsi);
4790 if (i == group_size - 1 && j == ncopies - 1)
4791 add_phi_arg (phi_stmt, lsq, loop_latch_edge (loop));
4795 VEC_quick_push (tree, dr_chain, new_temp);
4796 if (i < group_size - 1)
4797 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, bsi, stmt);
4802 if (!vect_transform_strided_load (stmt, dr_chain, group_size, bsi))
4804 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4805 dr_chain = VEC_alloc (tree, heap, group_size);
4810 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4812 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4813 prev_stmt_info = vinfo_for_stmt (new_stmt);
4821 /* Function vectorizable_live_operation.
4823 STMT computes a value that is used outside the loop. Check if
4824 it can be supported. */
4827 vectorizable_live_operation (tree stmt,
4828 block_stmt_iterator *bsi ATTRIBUTE_UNUSED,
4829 tree *vec_stmt ATTRIBUTE_UNUSED)
4832 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4833 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4838 enum vect_def_type dt;
4840 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
4842 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
4845 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
4848 if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME)
4851 operation = GIMPLE_STMT_OPERAND (stmt, 1);
4852 op_type = TREE_OPERAND_LENGTH (operation);
4854 /* FORNOW: support only if all uses are invariant. This means
4855 that the scalar operations can remain in place, unvectorized.
4856 The original last scalar value that they compute will be used. */
4858 for (i = 0; i < op_type; i++)
4860 op = TREE_OPERAND (operation, i);
4861 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt))
4863 if (vect_print_dump_info (REPORT_DETAILS))
4864 fprintf (vect_dump, "use not simple.");
4868 if (dt != vect_invariant_def && dt != vect_constant_def)
4872 /* No transformation is required for the cases we currently support. */
4877 /* Function vect_is_simple_cond.
4880 LOOP - the loop that is being vectorized.
4881 COND - Condition that is checked for simple use.
4883 Returns whether a COND can be vectorized. Checks whether
4884 condition operands are supportable using vec_is_simple_use. */
4887 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
4891 enum vect_def_type dt;
4893 if (!COMPARISON_CLASS_P (cond))
4896 lhs = TREE_OPERAND (cond, 0);
4897 rhs = TREE_OPERAND (cond, 1);
4899 if (TREE_CODE (lhs) == SSA_NAME)
4901 tree lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
4902 if (!vect_is_simple_use (lhs, loop_vinfo, &lhs_def_stmt, &def, &dt))
4905 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST)
4908 if (TREE_CODE (rhs) == SSA_NAME)
4910 tree rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
4911 if (!vect_is_simple_use (rhs, loop_vinfo, &rhs_def_stmt, &def, &dt))
4914 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST)
4920 /* vectorizable_condition.
4922 Check if STMT is conditional modify expression that can be vectorized.
4923 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4924 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4927 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4930 vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
4932 tree scalar_dest = NULL_TREE;
4933 tree vec_dest = NULL_TREE;
4934 tree op = NULL_TREE;
4935 tree cond_expr, then_clause, else_clause;
4936 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4937 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4938 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
4939 tree vec_compare, vec_cond_expr;
4941 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4942 enum machine_mode vec_mode;
4944 enum vect_def_type dt;
4945 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4946 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4948 gcc_assert (ncopies >= 1);
4950 return false; /* FORNOW */
4952 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4955 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
4958 /* FORNOW: not yet supported. */
4959 if (STMT_VINFO_LIVE_P (stmt_info))
4961 if (vect_print_dump_info (REPORT_DETAILS))
4962 fprintf (vect_dump, "value used after loop.");
4966 /* Is vectorizable conditional operation? */
4967 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
4970 op = GIMPLE_STMT_OPERAND (stmt, 1);
4972 if (TREE_CODE (op) != COND_EXPR)
4975 cond_expr = TREE_OPERAND (op, 0);
4976 then_clause = TREE_OPERAND (op, 1);
4977 else_clause = TREE_OPERAND (op, 2);
4979 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4982 /* We do not handle two different vector types for the condition
4984 if (TREE_TYPE (TREE_OPERAND (cond_expr, 0)) != TREE_TYPE (vectype))
4987 if (TREE_CODE (then_clause) == SSA_NAME)
4989 tree then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
4990 if (!vect_is_simple_use (then_clause, loop_vinfo,
4991 &then_def_stmt, &def, &dt))
4994 else if (TREE_CODE (then_clause) != INTEGER_CST
4995 && TREE_CODE (then_clause) != REAL_CST)
4998 if (TREE_CODE (else_clause) == SSA_NAME)
5000 tree else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
5001 if (!vect_is_simple_use (else_clause, loop_vinfo,
5002 &else_def_stmt, &def, &dt))
5005 else if (TREE_CODE (else_clause) != INTEGER_CST
5006 && TREE_CODE (else_clause) != REAL_CST)
5010 vec_mode = TYPE_MODE (vectype);
5014 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
5015 return expand_vec_cond_expr_p (op, vec_mode);
5021 scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
5022 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5024 /* Handle cond expr. */
5026 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
5028 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
5029 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
5030 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
5032 /* Arguments are ready. create the new vector stmt. */
5033 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
5034 vec_cond_lhs, vec_cond_rhs);
5035 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
5036 vec_compare, vec_then_clause, vec_else_clause);
5038 *vec_stmt = build_gimple_modify_stmt (vec_dest, vec_cond_expr);
5039 new_temp = make_ssa_name (vec_dest, *vec_stmt);
5040 GIMPLE_STMT_OPERAND (*vec_stmt, 0) = new_temp;
5041 vect_finish_stmt_generation (stmt, *vec_stmt, bsi);
5046 /* Function vect_transform_stmt.
5048 Create a vectorized stmt to replace STMT, and insert it at BSI. */
5051 vect_transform_stmt (tree stmt, block_stmt_iterator *bsi, bool *strided_store)
5053 bool is_store = false;
5054 tree vec_stmt = NULL_TREE;
5055 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5056 tree orig_stmt_in_pattern;
5059 switch (STMT_VINFO_TYPE (stmt_info))
5061 case type_demotion_vec_info_type:
5062 done = vectorizable_type_demotion (stmt, bsi, &vec_stmt);
5066 case type_promotion_vec_info_type:
5067 done = vectorizable_type_promotion (stmt, bsi, &vec_stmt);
5071 case type_conversion_vec_info_type:
5072 done = vectorizable_conversion (stmt, bsi, &vec_stmt);
5076 case induc_vec_info_type:
5077 done = vectorizable_induction (stmt, bsi, &vec_stmt);
5081 case op_vec_info_type:
5082 done = vectorizable_operation (stmt, bsi, &vec_stmt);
5086 case assignment_vec_info_type:
5087 done = vectorizable_assignment (stmt, bsi, &vec_stmt);
5091 case load_vec_info_type:
5092 done = vectorizable_load (stmt, bsi, &vec_stmt);
5096 case store_vec_info_type:
5097 done = vectorizable_store (stmt, bsi, &vec_stmt);
5099 if (DR_GROUP_FIRST_DR (stmt_info))
5101 /* In case of interleaving, the whole chain is vectorized when the
5102 last store in the chain is reached. Store stmts before the last
5103 one are skipped, and there vec_stmt_info shouldn't be freed
5105 *strided_store = true;
5106 if (STMT_VINFO_VEC_STMT (stmt_info))
5113 case condition_vec_info_type:
5114 done = vectorizable_condition (stmt, bsi, &vec_stmt);
5118 case call_vec_info_type:
5119 done = vectorizable_call (stmt, bsi, &vec_stmt);
5122 case reduc_vec_info_type:
5123 done = vectorizable_reduction (stmt, bsi, &vec_stmt);
5128 if (!STMT_VINFO_LIVE_P (stmt_info))
5130 if (vect_print_dump_info (REPORT_DETAILS))
5131 fprintf (vect_dump, "stmt not supported.");
5136 if (STMT_VINFO_LIVE_P (stmt_info)
5137 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
5139 done = vectorizable_live_operation (stmt, bsi, &vec_stmt);
5145 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
5146 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
5147 if (orig_stmt_in_pattern)
5149 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
5150 /* STMT was inserted by the vectorizer to replace a computation idiom.
5151 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
5152 computed this idiom. We need to record a pointer to VEC_STMT in
5153 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
5154 documentation of vect_pattern_recog. */
5155 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
5157 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
5158 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
5167 /* This function builds ni_name = number of iterations loop executes
5168 on the loop preheader. */
5171 vect_build_loop_niters (loop_vec_info loop_vinfo)
5173 tree ni_name, stmt, var;
5175 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5176 tree ni = unshare_expr (LOOP_VINFO_NITERS (loop_vinfo));
5178 var = create_tmp_var (TREE_TYPE (ni), "niters");
5179 add_referenced_var (var);
5180 ni_name = force_gimple_operand (ni, &stmt, false, var);
5182 pe = loop_preheader_edge (loop);
5185 basic_block new_bb = bsi_insert_on_edge_immediate (pe, stmt);
5186 gcc_assert (!new_bb);
5193 /* This function generates the following statements:
5195 ni_name = number of iterations loop executes
5196 ratio = ni_name / vf
5197 ratio_mult_vf_name = ratio * vf
5199 and places them at the loop preheader edge. */
5202 vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
5204 tree *ratio_mult_vf_name_ptr,
5205 tree *ratio_name_ptr)
5213 tree ratio_mult_vf_name;
5214 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5215 tree ni = LOOP_VINFO_NITERS (loop_vinfo);
5216 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5219 pe = loop_preheader_edge (loop);
5221 /* Generate temporary variable that contains
5222 number of iterations loop executes. */
5224 ni_name = vect_build_loop_niters (loop_vinfo);
5225 log_vf = build_int_cst (TREE_TYPE (ni), exact_log2 (vf));
5227 /* Create: ratio = ni >> log2(vf) */
5229 ratio_name = fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_name), ni_name, log_vf);
5230 if (!is_gimple_val (ratio_name))
5232 var = create_tmp_var (TREE_TYPE (ni), "bnd");
5233 add_referenced_var (var);
5235 ratio_name = force_gimple_operand (ratio_name, &stmt, true, var);
5236 pe = loop_preheader_edge (loop);
5237 new_bb = bsi_insert_on_edge_immediate (pe, stmt);
5238 gcc_assert (!new_bb);
5241 /* Create: ratio_mult_vf = ratio << log2 (vf). */
5243 ratio_mult_vf_name = fold_build2 (LSHIFT_EXPR, TREE_TYPE (ratio_name),
5244 ratio_name, log_vf);
5245 if (!is_gimple_val (ratio_mult_vf_name))
5247 var = create_tmp_var (TREE_TYPE (ni), "ratio_mult_vf");
5248 add_referenced_var (var);
5250 ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmt,
5252 pe = loop_preheader_edge (loop);
5253 new_bb = bsi_insert_on_edge_immediate (pe, stmt);
5254 gcc_assert (!new_bb);
5257 *ni_name_ptr = ni_name;
5258 *ratio_mult_vf_name_ptr = ratio_mult_vf_name;
5259 *ratio_name_ptr = ratio_name;
5265 /* Function update_vuses_to_preheader.
5268 STMT - a statement with potential VUSEs.
5269 LOOP - the loop whose preheader will contain STMT.
5271 It's possible to vectorize a loop even though an SSA_NAME from a VUSE
5272 appears to be defined in a VDEF in another statement in a loop.
5273 One such case is when the VUSE is at the dereference of a __restricted__
5274 pointer in a load and the VDEF is at the dereference of a different
5275 __restricted__ pointer in a store. Vectorization may result in
5276 copy_virtual_uses being called to copy the problematic VUSE to a new
5277 statement that is being inserted in the loop preheader. This procedure
5278 is called to change the SSA_NAME in the new statement's VUSE from the
5279 SSA_NAME updated in the loop to the related SSA_NAME available on the
5280 path entering the loop.
5282 When this function is called, we have the following situation:
5287 # name1 = phi < name0 , name2>
5292 # name2 = vdef <name1>
5297 Stmt S1 was created in the loop preheader block as part of misaligned-load
5298 handling. This function fixes the name of the vuse of S1 from 'name1' to
5302 update_vuses_to_preheader (tree stmt, struct loop *loop)
5304 basic_block header_bb = loop->header;
5305 edge preheader_e = loop_preheader_edge (loop);
5307 use_operand_p use_p;
5309 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_VUSE)
5311 tree ssa_name = USE_FROM_PTR (use_p);
5312 tree def_stmt = SSA_NAME_DEF_STMT (ssa_name);
5313 tree name_var = SSA_NAME_VAR (ssa_name);
5314 basic_block bb = bb_for_stmt (def_stmt);
5316 /* For a use before any definitions, def_stmt is a NOP_EXPR. */
5317 if (!IS_EMPTY_STMT (def_stmt)
5318 && flow_bb_inside_loop_p (loop, bb))
5320 /* If the block containing the statement defining the SSA_NAME
5321 is in the loop then it's necessary to find the definition
5322 outside the loop using the PHI nodes of the header. */
5324 bool updated = false;
5326 for (phi = phi_nodes (header_bb); phi; phi = PHI_CHAIN (phi))
5328 if (SSA_NAME_VAR (PHI_RESULT (phi)) == name_var)
5330 SET_USE (use_p, PHI_ARG_DEF (phi, preheader_e->dest_idx));
5335 gcc_assert (updated);
5341 /* Function vect_update_ivs_after_vectorizer.
5343 "Advance" the induction variables of LOOP to the value they should take
5344 after the execution of LOOP. This is currently necessary because the
5345 vectorizer does not handle induction variables that are used after the
5346 loop. Such a situation occurs when the last iterations of LOOP are
5348 1. We introduced new uses after LOOP for IVs that were not originally used
5349 after LOOP: the IVs of LOOP are now used by an epilog loop.
5350 2. LOOP is going to be vectorized; this means that it will iterate N/VF
5351 times, whereas the loop IVs should be bumped N times.
5354 - LOOP - a loop that is going to be vectorized. The last few iterations
5355 of LOOP were peeled.
5356 - NITERS - the number of iterations that LOOP executes (before it is
5357 vectorized). i.e, the number of times the ivs should be bumped.
5358 - UPDATE_E - a successor edge of LOOP->exit that is on the (only) path
5359 coming out from LOOP on which there are uses of the LOOP ivs
5360 (this is the path from LOOP->exit to epilog_loop->preheader).
5362 The new definitions of the ivs are placed in LOOP->exit.
5363 The phi args associated with the edge UPDATE_E in the bb
5364 UPDATE_E->dest are updated accordingly.
5366 Assumption 1: Like the rest of the vectorizer, this function assumes
5367 a single loop exit that has a single predecessor.
5369 Assumption 2: The phi nodes in the LOOP header and in update_bb are
5370 organized in the same order.
5372 Assumption 3: The access function of the ivs is simple enough (see
5373 vect_can_advance_ivs_p). This assumption will be relaxed in the future.
5375 Assumption 4: Exactly one of the successors of LOOP exit-bb is on a path
5376 coming out of LOOP on which the ivs of LOOP are used (this is the path
5377 that leads to the epilog loop; other paths skip the epilog loop). This
5378 path starts with the edge UPDATE_E, and its destination (denoted update_bb)
5379 needs to have its phis updated.
5383 vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
5386 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5387 basic_block exit_bb = single_exit (loop)->dest;
5389 basic_block update_bb = update_e->dest;
5391 /* gcc_assert (vect_can_advance_ivs_p (loop_vinfo)); */
5393 /* Make sure there exists a single-predecessor exit bb: */
5394 gcc_assert (single_pred_p (exit_bb));
5396 for (phi = phi_nodes (loop->header), phi1 = phi_nodes (update_bb);
5398 phi = PHI_CHAIN (phi), phi1 = PHI_CHAIN (phi1))
5400 tree access_fn = NULL;
5401 tree evolution_part;
5404 tree var, ni, ni_name;
5405 block_stmt_iterator last_bsi;
5407 if (vect_print_dump_info (REPORT_DETAILS))
5409 fprintf (vect_dump, "vect_update_ivs_after_vectorizer: phi: ");
5410 print_generic_expr (vect_dump, phi, TDF_SLIM);
5413 /* Skip virtual phi's. */
5414 if (!is_gimple_reg (SSA_NAME_VAR (PHI_RESULT (phi))))
5416 if (vect_print_dump_info (REPORT_DETAILS))
5417 fprintf (vect_dump, "virtual phi. skip.");
5421 /* Skip reduction phis. */
5422 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (phi)) == vect_reduction_def)
5424 if (vect_print_dump_info (REPORT_DETAILS))
5425 fprintf (vect_dump, "reduc phi. skip.");
5429 access_fn = analyze_scalar_evolution (loop, PHI_RESULT (phi));
5430 gcc_assert (access_fn);
5432 unshare_expr (evolution_part_in_loop_num (access_fn, loop->num));
5433 gcc_assert (evolution_part != NULL_TREE);
5435 /* FORNOW: We do not support IVs whose evolution function is a polynomial
5436 of degree >= 2 or exponential. */
5437 gcc_assert (!tree_is_chrec (evolution_part));
5439 step_expr = evolution_part;
5440 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn,
5443 if (POINTER_TYPE_P (TREE_TYPE (init_expr)))
5444 ni = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (init_expr),
5446 fold_convert (sizetype,
5447 fold_build2 (MULT_EXPR, TREE_TYPE (niters),
5448 niters, step_expr)));
5450 ni = fold_build2 (PLUS_EXPR, TREE_TYPE (init_expr),
5451 fold_build2 (MULT_EXPR, TREE_TYPE (init_expr),
5452 fold_convert (TREE_TYPE (init_expr),
5459 var = create_tmp_var (TREE_TYPE (init_expr), "tmp");
5460 add_referenced_var (var);
5462 last_bsi = bsi_last (exit_bb);
5463 ni_name = force_gimple_operand_bsi (&last_bsi, ni, false, var,
5464 true, BSI_SAME_STMT);
5466 /* Fix phi expressions in the successor bb. */
5467 SET_PHI_ARG_DEF (phi1, update_e->dest_idx, ni_name);
5472 /* Function vect_do_peeling_for_loop_bound
5474 Peel the last iterations of the loop represented by LOOP_VINFO.
5475 The peeled iterations form a new epilog loop. Given that the loop now
5476 iterates NITERS times, the new epilog loop iterates
5477 NITERS % VECTORIZATION_FACTOR times.
5479 The original loop will later be made to iterate
5480 NITERS / VECTORIZATION_FACTOR times (this value is placed into RATIO). */
5483 vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio)
5485 tree ni_name, ratio_mult_vf_name;
5486 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5487 struct loop *new_loop;
5489 basic_block preheader;
5492 int min_scalar_loop_bound;
5493 int min_profitable_iters;
5495 if (vect_print_dump_info (REPORT_DETAILS))
5496 fprintf (vect_dump, "=== vect_do_peeling_for_loop_bound ===");
5498 initialize_original_copy_tables ();
5500 /* Generate the following variables on the preheader of original loop:
5502 ni_name = number of iteration the original loop executes
5503 ratio = ni_name / vf
5504 ratio_mult_vf_name = ratio * vf */
5505 vect_generate_tmps_on_preheader (loop_vinfo, &ni_name,
5506 &ratio_mult_vf_name, ratio);
5508 loop_num = loop->num;
5510 /* Analyze cost to set threshhold for vectorized loop. */
5511 min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo);
5512 min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND))
5513 * LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5515 /* Use the cost model only if it is more conservative than user specified
5518 th = (unsigned) min_scalar_loop_bound;
5519 if (min_profitable_iters
5520 && (!min_scalar_loop_bound
5521 || min_profitable_iters > min_scalar_loop_bound))
5522 th = (unsigned) min_profitable_iters;
5524 if (min_profitable_iters
5525 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
5526 && vect_print_dump_info (REPORT_DETAILS))
5527 fprintf (vect_dump, "vectorization may not be profitable.");
5529 new_loop = slpeel_tree_peel_loop_to_edge (loop, single_exit (loop),
5530 ratio_mult_vf_name, ni_name, false,
5532 gcc_assert (new_loop);
5533 gcc_assert (loop_num == loop->num);
5534 #ifdef ENABLE_CHECKING
5535 slpeel_verify_cfg_after_peeling (loop, new_loop);
5538 /* A guard that controls whether the new_loop is to be executed or skipped
5539 is placed in LOOP->exit. LOOP->exit therefore has two successors - one
5540 is the preheader of NEW_LOOP, where the IVs from LOOP are used. The other
5541 is a bb after NEW_LOOP, where these IVs are not used. Find the edge that
5542 is on the path where the LOOP IVs are used and need to be updated. */
5544 preheader = loop_preheader_edge (new_loop)->src;
5545 if (EDGE_PRED (preheader, 0)->src == single_exit (loop)->dest)
5546 update_e = EDGE_PRED (preheader, 0);
5548 update_e = EDGE_PRED (preheader, 1);
5550 /* Update IVs of original loop as if they were advanced
5551 by ratio_mult_vf_name steps. */
5552 vect_update_ivs_after_vectorizer (loop_vinfo, ratio_mult_vf_name, update_e);
5554 /* After peeling we have to reset scalar evolution analyzer. */
5557 free_original_copy_tables ();
5561 /* Function vect_gen_niters_for_prolog_loop
5563 Set the number of iterations for the loop represented by LOOP_VINFO
5564 to the minimum between LOOP_NITERS (the original iteration count of the loop)
5565 and the misalignment of DR - the data reference recorded in
5566 LOOP_VINFO_UNALIGNED_DR (LOOP_VINFO). As a result, after the execution of
5567 this loop, the data reference DR will refer to an aligned location.
5569 The following computation is generated:
5571 If the misalignment of DR is known at compile time:
5572 addr_mis = int mis = DR_MISALIGNMENT (dr);
5573 Else, compute address misalignment in bytes:
5574 addr_mis = addr & (vectype_size - 1)
5576 prolog_niters = min ( LOOP_NITERS , (VF - addr_mis/elem_size)&(VF-1) )
5578 (elem_size = element type size; an element is the scalar element
5579 whose type is the inner type of the vectype)
5583 prolog_niters = min ( LOOP_NITERS ,
5584 (VF/group_size - addr_mis/elem_size)&(VF/group_size-1) )
5585 where group_size is the size of the interleaved group.
5587 The above formulas assume that VF == number of elements in the vector. This
5588 may not hold when there are multiple-types in the loop.
5589 In this case, for some data-references in the loop the VF does not represent
5590 the number of elements that fit in the vector. Therefore, instead of VF we
5591 use TYPE_VECTOR_SUBPARTS. */
5594 vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
5596 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
5597 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5599 tree iters, iters_name;
5602 tree dr_stmt = DR_STMT (dr);
5603 stmt_vec_info stmt_info = vinfo_for_stmt (dr_stmt);
5604 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5605 int vectype_align = TYPE_ALIGN (vectype) / BITS_PER_UNIT;
5606 tree niters_type = TREE_TYPE (loop_niters);
5608 int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
5609 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
5611 if (DR_GROUP_FIRST_DR (stmt_info))
5613 /* For interleaved access element size must be multiplied by the size of
5614 the interleaved group. */
5615 group_size = DR_GROUP_SIZE (vinfo_for_stmt (
5616 DR_GROUP_FIRST_DR (stmt_info)));
5617 element_size *= group_size;
5620 pe = loop_preheader_edge (loop);
5622 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
5624 int byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
5625 int elem_misalign = byte_misalign / element_size;
5627 if (vect_print_dump_info (REPORT_DETAILS))
5628 fprintf (vect_dump, "known alignment = %d.", byte_misalign);
5629 iters = build_int_cst (niters_type,
5630 (nelements - elem_misalign)&(nelements/group_size-1));
5634 tree new_stmts = NULL_TREE;
5636 vect_create_addr_base_for_vector_ref (dr_stmt, &new_stmts, NULL_TREE);
5637 tree ptr_type = TREE_TYPE (start_addr);
5638 tree size = TYPE_SIZE (ptr_type);
5639 tree type = lang_hooks.types.type_for_size (tree_low_cst (size, 1), 1);
5640 tree vectype_size_minus_1 = build_int_cst (type, vectype_align - 1);
5641 tree elem_size_log =
5642 build_int_cst (type, exact_log2 (vectype_align/nelements));
5643 tree nelements_minus_1 = build_int_cst (type, nelements - 1);
5644 tree nelements_tree = build_int_cst (type, nelements);
5648 new_bb = bsi_insert_on_edge_immediate (pe, new_stmts);
5649 gcc_assert (!new_bb);
5651 /* Create: byte_misalign = addr & (vectype_size - 1) */
5653 fold_build2 (BIT_AND_EXPR, type, fold_convert (type, start_addr), vectype_size_minus_1);
5655 /* Create: elem_misalign = byte_misalign / element_size */
5657 fold_build2 (RSHIFT_EXPR, type, byte_misalign, elem_size_log);
5659 /* Create: (niters_type) (nelements - elem_misalign)&(nelements - 1) */
5660 iters = fold_build2 (MINUS_EXPR, type, nelements_tree, elem_misalign);
5661 iters = fold_build2 (BIT_AND_EXPR, type, iters, nelements_minus_1);
5662 iters = fold_convert (niters_type, iters);
5665 /* Create: prolog_loop_niters = min (iters, loop_niters) */
5666 /* If the loop bound is known at compile time we already verified that it is
5667 greater than vf; since the misalignment ('iters') is at most vf, there's
5668 no need to generate the MIN_EXPR in this case. */
5669 if (TREE_CODE (loop_niters) != INTEGER_CST)
5670 iters = fold_build2 (MIN_EXPR, niters_type, iters, loop_niters);
5672 if (vect_print_dump_info (REPORT_DETAILS))
5674 fprintf (vect_dump, "niters for prolog loop: ");
5675 print_generic_expr (vect_dump, iters, TDF_SLIM);
5678 var = create_tmp_var (niters_type, "prolog_loop_niters");
5679 add_referenced_var (var);
5680 iters_name = force_gimple_operand (iters, &stmt, false, var);
5682 /* Insert stmt on loop preheader edge. */
5685 basic_block new_bb = bsi_insert_on_edge_immediate (pe, stmt);
5686 gcc_assert (!new_bb);
5693 /* Function vect_update_init_of_dr
5695 NITERS iterations were peeled from LOOP. DR represents a data reference
5696 in LOOP. This function updates the information recorded in DR to
5697 account for the fact that the first NITERS iterations had already been
5698 executed. Specifically, it updates the OFFSET field of DR. */
5701 vect_update_init_of_dr (struct data_reference *dr, tree niters)
5703 tree offset = DR_OFFSET (dr);
5705 niters = fold_build2 (MULT_EXPR, TREE_TYPE (niters), niters, DR_STEP (dr));
5706 offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset, niters);
5707 DR_OFFSET (dr) = offset;
5711 /* Function vect_update_inits_of_drs
5713 NITERS iterations were peeled from the loop represented by LOOP_VINFO.
5714 This function updates the information recorded for the data references in
5715 the loop to account for the fact that the first NITERS iterations had
5716 already been executed. Specifically, it updates the initial_condition of
5717 the access_function of all the data_references in the loop. */
5720 vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
5723 VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
5724 struct data_reference *dr;
5726 if (vect_print_dump_info (REPORT_DETAILS))
5727 fprintf (vect_dump, "=== vect_update_inits_of_dr ===");
5729 for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
5730 vect_update_init_of_dr (dr, niters);
5734 /* Function vect_do_peeling_for_alignment
5736 Peel the first 'niters' iterations of the loop represented by LOOP_VINFO.
5737 'niters' is set to the misalignment of one of the data references in the
5738 loop, thereby forcing it to refer to an aligned location at the beginning
5739 of the execution of this loop. The data reference for which we are
5740 peeling is recorded in LOOP_VINFO_UNALIGNED_DR. */
5743 vect_do_peeling_for_alignment (loop_vec_info loop_vinfo)
5745 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5746 tree niters_of_prolog_loop, ni_name;
5748 struct loop *new_loop;
5750 if (vect_print_dump_info (REPORT_DETAILS))
5751 fprintf (vect_dump, "=== vect_do_peeling_for_alignment ===");
5753 initialize_original_copy_tables ();
5755 ni_name = vect_build_loop_niters (loop_vinfo);
5756 niters_of_prolog_loop = vect_gen_niters_for_prolog_loop (loop_vinfo, ni_name);
5758 /* Peel the prolog loop and iterate it niters_of_prolog_loop. */
5760 slpeel_tree_peel_loop_to_edge (loop, loop_preheader_edge (loop),
5761 niters_of_prolog_loop, ni_name, true, 0);
5762 gcc_assert (new_loop);
5763 #ifdef ENABLE_CHECKING
5764 slpeel_verify_cfg_after_peeling (new_loop, loop);
5767 /* Update number of times loop executes. */
5768 n_iters = LOOP_VINFO_NITERS (loop_vinfo);
5769 LOOP_VINFO_NITERS (loop_vinfo) = fold_build2 (MINUS_EXPR,
5770 TREE_TYPE (n_iters), n_iters, niters_of_prolog_loop);
5772 /* Update the init conditions of the access functions of all data refs. */
5773 vect_update_inits_of_drs (loop_vinfo, niters_of_prolog_loop);
5775 /* After peeling we have to reset scalar evolution analyzer. */
5778 free_original_copy_tables ();
5782 /* Function vect_create_cond_for_align_checks.
5784 Create a conditional expression that represents the alignment checks for
5785 all of data references (array element references) whose alignment must be
5789 LOOP_VINFO - two fields of the loop information are used.
5790 LOOP_VINFO_PTR_MASK is the mask used to check the alignment.
5791 LOOP_VINFO_MAY_MISALIGN_STMTS contains the refs to be checked.
5794 COND_EXPR_STMT_LIST - statements needed to construct the conditional
5796 The returned value is the conditional expression to be used in the if
5797 statement that controls which version of the loop gets executed at runtime.
5799 The algorithm makes two assumptions:
5800 1) The number of bytes "n" in a vector is a power of 2.
5801 2) An address "a" is aligned if a%n is zero and that this
5802 test can be done as a&(n-1) == 0. For example, for 16
5803 byte vectors the test is a&0xf == 0. */
5806 vect_create_cond_for_align_checks (loop_vec_info loop_vinfo,
5807 tree *cond_expr_stmt_list)
5809 VEC(tree,heap) *may_misalign_stmts
5810 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
5812 int mask = LOOP_VINFO_PTR_MASK (loop_vinfo);
5816 tree int_ptrsize_type;
5818 tree or_tmp_name = NULL_TREE;
5819 tree and_tmp, and_tmp_name, and_stmt;
5822 /* Check that mask is one less than a power of 2, i.e., mask is
5823 all zeros followed by all ones. */
5824 gcc_assert ((mask != 0) && ((mask & (mask+1)) == 0));
5826 /* CHECKME: what is the best integer or unsigned type to use to hold a
5827 cast from a pointer value? */
5828 psize = TYPE_SIZE (ptr_type_node);
5830 = lang_hooks.types.type_for_size (tree_low_cst (psize, 1), 0);
5832 /* Create expression (mask & (dr_1 || ... || dr_n)) where dr_i is the address
5833 of the first vector of the i'th data reference. */
5835 for (i = 0; VEC_iterate (tree, may_misalign_stmts, i, ref_stmt); i++)
5837 tree new_stmt_list = NULL_TREE;
5839 tree addr_tmp, addr_tmp_name, addr_stmt;
5840 tree or_tmp, new_or_tmp_name, or_stmt;
5842 /* create: addr_tmp = (int)(address_of_first_vector) */
5843 addr_base = vect_create_addr_base_for_vector_ref (ref_stmt,
5847 if (new_stmt_list != NULL_TREE)
5848 append_to_statement_list_force (new_stmt_list, cond_expr_stmt_list);
5850 sprintf (tmp_name, "%s%d", "addr2int", i);
5851 addr_tmp = create_tmp_var (int_ptrsize_type, tmp_name);
5852 add_referenced_var (addr_tmp);
5853 addr_tmp_name = make_ssa_name (addr_tmp, NULL_TREE);
5854 addr_stmt = fold_convert (int_ptrsize_type, addr_base);
5855 addr_stmt = build_gimple_modify_stmt (addr_tmp_name, addr_stmt);
5856 SSA_NAME_DEF_STMT (addr_tmp_name) = addr_stmt;
5857 append_to_statement_list_force (addr_stmt, cond_expr_stmt_list);
5859 /* The addresses are OR together. */
5861 if (or_tmp_name != NULL_TREE)
5863 /* create: or_tmp = or_tmp | addr_tmp */
5864 sprintf (tmp_name, "%s%d", "orptrs", i);
5865 or_tmp = create_tmp_var (int_ptrsize_type, tmp_name);
5866 add_referenced_var (or_tmp);
5867 new_or_tmp_name = make_ssa_name (or_tmp, NULL_TREE);
5868 tmp = build2 (BIT_IOR_EXPR, int_ptrsize_type,
5869 or_tmp_name, addr_tmp_name);
5870 or_stmt = build_gimple_modify_stmt (new_or_tmp_name, tmp);
5871 SSA_NAME_DEF_STMT (new_or_tmp_name) = or_stmt;
5872 append_to_statement_list_force (or_stmt, cond_expr_stmt_list);
5873 or_tmp_name = new_or_tmp_name;
5876 or_tmp_name = addr_tmp_name;
5880 mask_cst = build_int_cst (int_ptrsize_type, mask);
5882 /* create: and_tmp = or_tmp & mask */
5883 and_tmp = create_tmp_var (int_ptrsize_type, "andmask" );
5884 add_referenced_var (and_tmp);
5885 and_tmp_name = make_ssa_name (and_tmp, NULL_TREE);
5887 tmp = build2 (BIT_AND_EXPR, int_ptrsize_type, or_tmp_name, mask_cst);
5888 and_stmt = build_gimple_modify_stmt (and_tmp_name, tmp);
5889 SSA_NAME_DEF_STMT (and_tmp_name) = and_stmt;
5890 append_to_statement_list_force (and_stmt, cond_expr_stmt_list);
5892 /* Make and_tmp the left operand of the conditional test against zero.
5893 if and_tmp has a nonzero bit then some address is unaligned. */
5894 ptrsize_zero = build_int_cst (int_ptrsize_type, 0);
5895 return build2 (EQ_EXPR, boolean_type_node,
5896 and_tmp_name, ptrsize_zero);
5900 /* Function vect_transform_loop.
5902 The analysis phase has determined that the loop is vectorizable.
5903 Vectorize the loop - created vectorized stmts to replace the scalar
5904 stmts in the loop, and update the loop exit condition. */
5907 vect_transform_loop (loop_vec_info loop_vinfo)
5909 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5910 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
5911 int nbbs = loop->num_nodes;
5912 block_stmt_iterator si, next_si;
5915 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5918 if (vect_print_dump_info (REPORT_DETAILS))
5919 fprintf (vect_dump, "=== vec_transform_loop ===");
5921 /* If the loop has data references that may or may not be aligned then
5922 two versions of the loop need to be generated, one which is vectorized
5923 and one which isn't. A test is then generated to control which of the
5924 loops is executed. The test checks for the alignment of all of the
5925 data references that may or may not be aligned. */
5927 if (VEC_length (tree, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)))
5931 tree cond_expr_stmt_list = NULL_TREE;
5932 basic_block condition_bb;
5933 block_stmt_iterator cond_exp_bsi;
5934 basic_block merge_bb;
5935 basic_block new_exit_bb;
5937 tree orig_phi, new_phi, arg;
5938 unsigned prob = 4 * REG_BR_PROB_BASE / 5;
5940 cond_expr = vect_create_cond_for_align_checks (loop_vinfo,
5941 &cond_expr_stmt_list);
5942 initialize_original_copy_tables ();
5943 nloop = loop_version (loop, cond_expr, &condition_bb,
5944 prob, prob, REG_BR_PROB_BASE - prob, true);
5945 free_original_copy_tables();
5947 /** Loop versioning violates an assumption we try to maintain during
5948 vectorization - that the loop exit block has a single predecessor.
5949 After versioning, the exit block of both loop versions is the same
5950 basic block (i.e. it has two predecessors). Just in order to simplify
5951 following transformations in the vectorizer, we fix this situation
5952 here by adding a new (empty) block on the exit-edge of the loop,
5953 with the proper loop-exit phis to maintain loop-closed-form. **/
5955 merge_bb = single_exit (loop)->dest;
5956 gcc_assert (EDGE_COUNT (merge_bb->preds) == 2);
5957 new_exit_bb = split_edge (single_exit (loop));
5958 new_exit_e = single_exit (loop);
5959 e = EDGE_SUCC (new_exit_bb, 0);
5961 for (orig_phi = phi_nodes (merge_bb); orig_phi;
5962 orig_phi = PHI_CHAIN (orig_phi))
5964 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
5966 arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
5967 add_phi_arg (new_phi, arg, new_exit_e);
5968 SET_PHI_ARG_DEF (orig_phi, e->dest_idx, PHI_RESULT (new_phi));
5971 /** end loop-exit-fixes after versioning **/
5973 update_ssa (TODO_update_ssa);
5974 cond_exp_bsi = bsi_last (condition_bb);
5975 bsi_insert_before (&cond_exp_bsi, cond_expr_stmt_list, BSI_SAME_STMT);
5978 /* CHECKME: we wouldn't need this if we called update_ssa once
5980 bitmap_zero (vect_memsyms_to_rename);
5982 /* Peel the loop if there are data refs with unknown alignment.
5983 Only one data ref with unknown store is allowed. */
5985 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
5986 vect_do_peeling_for_alignment (loop_vinfo);
5988 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
5989 compile time constant), or it is a constant that doesn't divide by the
5990 vectorization factor, then an epilog loop needs to be created.
5991 We therefore duplicate the loop: the original loop will be vectorized,
5992 and will compute the first (n/VF) iterations. The second copy of the loop
5993 will remain scalar and will compute the remaining (n%VF) iterations.
5994 (VF is the vectorization factor). */
5996 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
5997 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
5998 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0))
5999 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio);
6001 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
6002 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
6004 /* 1) Make sure the loop header has exactly two entries
6005 2) Make sure we have a preheader basic block. */
6007 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
6009 split_edge (loop_preheader_edge (loop));
6011 /* FORNOW: the vectorizer supports only loops which body consist
6012 of one basic block (header + empty latch). When the vectorizer will
6013 support more involved loop forms, the order by which the BBs are
6014 traversed need to be reconsidered. */
6016 for (i = 0; i < nbbs; i++)
6018 basic_block bb = bbs[i];
6019 stmt_vec_info stmt_info;
6022 for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
6024 if (vect_print_dump_info (REPORT_DETAILS))
6026 fprintf (vect_dump, "------>vectorizing phi: ");
6027 print_generic_expr (vect_dump, phi, TDF_SLIM);
6029 stmt_info = vinfo_for_stmt (phi);
6032 if (!STMT_VINFO_RELEVANT_P (stmt_info)
6033 && !STMT_VINFO_LIVE_P (stmt_info))
6036 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
6037 != (unsigned HOST_WIDE_INT) vectorization_factor)
6038 && vect_print_dump_info (REPORT_DETAILS))
6039 fprintf (vect_dump, "multiple-types.");
6041 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
6043 if (vect_print_dump_info (REPORT_DETAILS))
6044 fprintf (vect_dump, "transform phi.");
6045 vect_transform_stmt (phi, NULL, NULL);
6049 for (si = bsi_start (bb); !bsi_end_p (si);)
6051 tree stmt = bsi_stmt (si);
6054 if (vect_print_dump_info (REPORT_DETAILS))
6056 fprintf (vect_dump, "------>vectorizing statement: ");
6057 print_generic_expr (vect_dump, stmt, TDF_SLIM);
6059 stmt_info = vinfo_for_stmt (stmt);
6060 gcc_assert (stmt_info);
6061 if (!STMT_VINFO_RELEVANT_P (stmt_info)
6062 && !STMT_VINFO_LIVE_P (stmt_info))
6068 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
6069 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
6070 != (unsigned HOST_WIDE_INT) vectorization_factor)
6071 && vect_print_dump_info (REPORT_DETAILS))
6072 fprintf (vect_dump, "multiple-types.");
6074 /* -------- vectorize statement ------------ */
6075 if (vect_print_dump_info (REPORT_DETAILS))
6076 fprintf (vect_dump, "transform statement.");
6078 strided_store = false;
6079 is_store = vect_transform_stmt (stmt, &si, &strided_store);
6083 if (DR_GROUP_FIRST_DR (stmt_info))
6085 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
6086 interleaving chain was completed - free all the stores in
6088 tree next = DR_GROUP_FIRST_DR (stmt_info);
6090 stmt_vec_info next_stmt_info;
6094 next_si = bsi_for_stmt (next);
6095 next_stmt_info = vinfo_for_stmt (next);
6096 /* Free the attached stmt_vec_info and remove the stmt. */
6097 ann = stmt_ann (next);
6098 tmp = DR_GROUP_NEXT_DR (next_stmt_info);
6099 free (next_stmt_info);
6100 set_stmt_info (ann, NULL);
6101 bsi_remove (&next_si, true);
6104 bsi_remove (&si, true);
6109 /* Free the attached stmt_vec_info and remove the stmt. */
6110 ann = stmt_ann (stmt);
6112 set_stmt_info (ann, NULL);
6113 bsi_remove (&si, true);
6121 slpeel_make_loop_iterate_ntimes (loop, ratio);
6123 mark_set_for_renaming (vect_memsyms_to_rename);
6125 /* The memory tags and pointers in vectorized statements need to
6126 have their SSA forms updated. FIXME, why can't this be delayed
6127 until all the loops have been transformed? */
6128 update_ssa (TODO_update_ssa);
6130 if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS))
6131 fprintf (vect_dump, "LOOP VECTORIZED.");