2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "basic-block.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
34 #include "cfglayout.h"
40 #include "tree-chrec.h"
41 #include "tree-scalar-evolution.h"
42 #include "tree-vectorizer.h"
44 /* Loop Vectorization Pass.
46 This pass tries to vectorize loops.
48 For example, the vectorizer transforms the following simple loop:
50 short a[N]; short b[N]; short c[N]; int i;
56 as if it was manually vectorized by rewriting the source code into:
58 typedef int __attribute__((mode(V8HI))) v8hi;
59 short a[N]; short b[N]; short c[N]; int i;
60 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
63 for (i=0; i<N/8; i++){
70 The main entry to this pass is vectorize_loops(), in which
71 the vectorizer applies a set of analyses on a given set of loops,
72 followed by the actual vectorization transformation for the loops that
73 had successfully passed the analysis phase.
74 Throughout this pass we make a distinction between two types of
75 data: scalars (which are represented by SSA_NAMES), and memory references
76 ("data-refs"). These two types of data require different handling both
77 during analysis and transformation. The types of data-refs that the
78 vectorizer currently supports are ARRAY_REFS which base is an array DECL
79 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
80 accesses are required to have a simple (consecutive) access pattern.
84 The driver for the analysis phase is vect_analyze_loop().
85 It applies a set of analyses, some of which rely on the scalar evolution
86 analyzer (scev) developed by Sebastian Pop.
88 During the analysis phase the vectorizer records some information
89 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
90 loop, as well as general information about the loop as a whole, which is
91 recorded in a "loop_vec_info" struct attached to each loop.
95 The loop transformation phase scans all the stmts in the loop, and
96 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
97 the loop that needs to be vectorized. It inserts the vector code sequence
98 just before the scalar stmt S, and records a pointer to the vector code
99 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
100 attached to S). This pointer will be used for the vectorization of following
101 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
102 otherwise, we rely on dead code elimination for removing it.
104 For example, say stmt S1 was vectorized into stmt VS1:
107 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
110 To vectorize stmt S2, the vectorizer first finds the stmt that defines
111 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
112 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
113 resulting sequence would be:
116 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
118 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
120 Operands that are not SSA_NAMEs, are data-refs that appear in
121 load/store operations (like 'x[i]' in S1), and are handled differently.
125 Currently the only target specific information that is used is the
126 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
127 support different sizes of vectors, for now will need to specify one value
128 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
130 Since we only vectorize operations which vector form can be
131 expressed using existing tree codes, to verify that an operation is
132 supported, the vectorizer checks the relevant optab at the relevant
133 machine_mode (e.g, optab_handler (add_optab, V8HImode)->insn_code). If
134 the value found is CODE_FOR_nothing, then there's no target support, and
135 we can't vectorize the stmt.
137 For additional information on this project see:
138 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
141 /* Function vect_determine_vectorization_factor
143 Determine the vectorization factor (VF). VF is the number of data elements
144 that are operated upon in parallel in a single iteration of the vectorized
145 loop. For example, when vectorizing a loop that operates on 4byte elements,
146 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
147 elements can fit in a single vector register.
149 We currently support vectorization of loops in which all types operated upon
150 are of the same size. Therefore this function currently sets VF according to
151 the size of the types operated upon, and fails if there are multiple sizes
154 VF is also the factor by which the loop iterations are strip-mined, e.g.:
161 for (i=0; i<N; i+=VF){
162 a[i:VF] = b[i:VF] + c[i:VF];
167 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
169 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
170 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
171 int nbbs = loop->num_nodes;
172 gimple_stmt_iterator si;
173 unsigned int vectorization_factor = 0;
178 stmt_vec_info stmt_info;
182 if (vect_print_dump_info (REPORT_DETAILS))
183 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
185 for (i = 0; i < nbbs; i++)
187 basic_block bb = bbs[i];
189 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
192 stmt_info = vinfo_for_stmt (phi);
193 if (vect_print_dump_info (REPORT_DETAILS))
195 fprintf (vect_dump, "==> examining phi: ");
196 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
199 gcc_assert (stmt_info);
201 if (STMT_VINFO_RELEVANT_P (stmt_info))
203 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
204 scalar_type = TREE_TYPE (PHI_RESULT (phi));
206 if (vect_print_dump_info (REPORT_DETAILS))
208 fprintf (vect_dump, "get vectype for scalar type: ");
209 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
212 vectype = get_vectype_for_scalar_type (scalar_type);
215 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
218 "not vectorized: unsupported data-type ");
219 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
223 STMT_VINFO_VECTYPE (stmt_info) = vectype;
225 if (vect_print_dump_info (REPORT_DETAILS))
227 fprintf (vect_dump, "vectype: ");
228 print_generic_expr (vect_dump, vectype, TDF_SLIM);
231 nunits = TYPE_VECTOR_SUBPARTS (vectype);
232 if (vect_print_dump_info (REPORT_DETAILS))
233 fprintf (vect_dump, "nunits = %d", nunits);
235 if (!vectorization_factor
236 || (nunits > vectorization_factor))
237 vectorization_factor = nunits;
241 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
243 gimple stmt = gsi_stmt (si);
244 stmt_info = vinfo_for_stmt (stmt);
246 if (vect_print_dump_info (REPORT_DETAILS))
248 fprintf (vect_dump, "==> examining statement: ");
249 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
252 gcc_assert (stmt_info);
254 /* skip stmts which do not need to be vectorized. */
255 if (!STMT_VINFO_RELEVANT_P (stmt_info)
256 && !STMT_VINFO_LIVE_P (stmt_info))
258 if (vect_print_dump_info (REPORT_DETAILS))
259 fprintf (vect_dump, "skip.");
263 if (gimple_get_lhs (stmt) == NULL_TREE)
265 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
267 fprintf (vect_dump, "not vectorized: irregular stmt.");
268 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
273 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
275 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
277 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
278 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
283 if (STMT_VINFO_VECTYPE (stmt_info))
285 /* The only case when a vectype had been already set is for stmts
286 that contain a dataref, or for "pattern-stmts" (stmts generated
287 by the vectorizer to represent/replace a certain idiom). */
288 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
289 || is_pattern_stmt_p (stmt_info));
290 vectype = STMT_VINFO_VECTYPE (stmt_info);
295 gcc_assert (! STMT_VINFO_DATA_REF (stmt_info)
296 && !is_pattern_stmt_p (stmt_info));
298 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
300 if (vect_print_dump_info (REPORT_DETAILS))
302 fprintf (vect_dump, "get vectype for scalar type: ");
303 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
306 vectype = get_vectype_for_scalar_type (scalar_type);
309 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
312 "not vectorized: unsupported data-type ");
313 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
317 STMT_VINFO_VECTYPE (stmt_info) = vectype;
320 if (vect_print_dump_info (REPORT_DETAILS))
322 fprintf (vect_dump, "vectype: ");
323 print_generic_expr (vect_dump, vectype, TDF_SLIM);
326 nunits = TYPE_VECTOR_SUBPARTS (vectype);
327 if (vect_print_dump_info (REPORT_DETAILS))
328 fprintf (vect_dump, "nunits = %d", nunits);
330 if (!vectorization_factor
331 || (nunits > vectorization_factor))
332 vectorization_factor = nunits;
337 /* TODO: Analyze cost. Decide if worth while to vectorize. */
338 if (vect_print_dump_info (REPORT_DETAILS))
339 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
340 if (vectorization_factor <= 1)
342 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
343 fprintf (vect_dump, "not vectorized: unsupported data-type");
346 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
352 /* Function vect_is_simple_iv_evolution.
354 FORNOW: A simple evolution of an induction variables in the loop is
355 considered a polynomial evolution with constant step. */
358 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
363 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
365 /* When there is no evolution in this loop, the evolution function
367 if (evolution_part == NULL_TREE)
370 /* When the evolution is a polynomial of degree >= 2
371 the evolution function is not "simple". */
372 if (tree_is_chrec (evolution_part))
375 step_expr = evolution_part;
376 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
378 if (vect_print_dump_info (REPORT_DETAILS))
380 fprintf (vect_dump, "step: ");
381 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
382 fprintf (vect_dump, ", init: ");
383 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
389 if (TREE_CODE (step_expr) != INTEGER_CST)
391 if (vect_print_dump_info (REPORT_DETAILS))
392 fprintf (vect_dump, "step unknown.");
399 /* Function vect_analyze_scalar_cycles_1.
401 Examine the cross iteration def-use cycles of scalar variables
402 in LOOP. LOOP_VINFO represents the loop that is now being
403 considered for vectorization (can be LOOP, or an outer-loop
407 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
409 basic_block bb = loop->header;
411 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
412 gimple_stmt_iterator gsi;
414 if (vect_print_dump_info (REPORT_DETAILS))
415 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
417 /* First - identify all inductions. */
418 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
420 gimple phi = gsi_stmt (gsi);
421 tree access_fn = NULL;
422 tree def = PHI_RESULT (phi);
423 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
425 if (vect_print_dump_info (REPORT_DETAILS))
427 fprintf (vect_dump, "Analyze phi: ");
428 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
431 /* Skip virtual phi's. The data dependences that are associated with
432 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
433 if (!is_gimple_reg (SSA_NAME_VAR (def)))
436 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
438 /* Analyze the evolution function. */
439 access_fn = analyze_scalar_evolution (loop, def);
440 if (access_fn && vect_print_dump_info (REPORT_DETAILS))
442 fprintf (vect_dump, "Access function of PHI: ");
443 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
447 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
449 VEC_safe_push (gimple, heap, worklist, phi);
453 if (vect_print_dump_info (REPORT_DETAILS))
454 fprintf (vect_dump, "Detected induction.");
455 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
459 /* Second - identify all reductions. */
460 while (VEC_length (gimple, worklist) > 0)
462 gimple phi = VEC_pop (gimple, worklist);
463 tree def = PHI_RESULT (phi);
464 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
467 if (vect_print_dump_info (REPORT_DETAILS))
469 fprintf (vect_dump, "Analyze phi: ");
470 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
473 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
474 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
476 reduc_stmt = vect_is_simple_reduction (loop_vinfo, phi);
479 if (vect_print_dump_info (REPORT_DETAILS))
480 fprintf (vect_dump, "Detected reduction.");
481 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
482 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
486 if (vect_print_dump_info (REPORT_DETAILS))
487 fprintf (vect_dump, "Unknown def-use cycle pattern.");
490 VEC_free (gimple, heap, worklist);
495 /* Function vect_analyze_scalar_cycles.
497 Examine the cross iteration def-use cycles of scalar variables, by
498 analyzing the loop-header PHIs of scalar variables; Classify each
499 cycle as one of the following: invariant, induction, reduction, unknown.
500 We do that for the loop represented by LOOP_VINFO, and also to its
501 inner-loop, if exists.
502 Examples for scalar cycles:
517 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
519 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
521 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
523 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
524 Reductions in such inner-loop therefore have different properties than
525 the reductions in the nest that gets vectorized:
526 1. When vectorized, they are executed in the same order as in the original
527 scalar loop, so we can't change the order of computation when
529 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
530 current checks are too strict. */
533 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
536 /* Function vect_get_loop_niters.
538 Determine how many iterations the loop is executed.
539 If an expression that represents the number of iterations
540 can be constructed, place it in NUMBER_OF_ITERATIONS.
541 Return the loop exit condition. */
544 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
548 if (vect_print_dump_info (REPORT_DETAILS))
549 fprintf (vect_dump, "=== get_loop_niters ===");
551 niters = number_of_exit_cond_executions (loop);
553 if (niters != NULL_TREE
554 && niters != chrec_dont_know)
556 *number_of_iterations = niters;
558 if (vect_print_dump_info (REPORT_DETAILS))
560 fprintf (vect_dump, "==> get_loop_niters:" );
561 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
565 return get_loop_exit_condition (loop);
569 /* Function bb_in_loop_p
571 Used as predicate for dfs order traversal of the loop bbs. */
574 bb_in_loop_p (const_basic_block bb, const void *data)
576 const struct loop *const loop = (const struct loop *)data;
577 if (flow_bb_inside_loop_p (loop, bb))
583 /* Function new_loop_vec_info.
585 Create and initialize a new loop_vec_info struct for LOOP, as well as
586 stmt_vec_info structs for all the stmts in LOOP. */
589 new_loop_vec_info (struct loop *loop)
593 gimple_stmt_iterator si;
594 unsigned int i, nbbs;
596 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
597 LOOP_VINFO_LOOP (res) = loop;
599 bbs = get_loop_body (loop);
601 /* Create/Update stmt_info for all stmts in the loop. */
602 for (i = 0; i < loop->num_nodes; i++)
604 basic_block bb = bbs[i];
606 /* BBs in a nested inner-loop will have been already processed (because
607 we will have called vect_analyze_loop_form for any nested inner-loop).
608 Therefore, for stmts in an inner-loop we just want to update the
609 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
610 loop_info of the outer-loop we are currently considering to vectorize
611 (instead of the loop_info of the inner-loop).
612 For stmts in other BBs we need to create a stmt_info from scratch. */
613 if (bb->loop_father != loop)
616 gcc_assert (loop->inner && bb->loop_father == loop->inner);
617 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
619 gimple phi = gsi_stmt (si);
620 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
621 loop_vec_info inner_loop_vinfo =
622 STMT_VINFO_LOOP_VINFO (stmt_info);
623 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
624 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
626 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
628 gimple stmt = gsi_stmt (si);
629 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
630 loop_vec_info inner_loop_vinfo =
631 STMT_VINFO_LOOP_VINFO (stmt_info);
632 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
633 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
638 /* bb in current nest. */
639 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
641 gimple phi = gsi_stmt (si);
642 gimple_set_uid (phi, 0);
643 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res));
646 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
648 gimple stmt = gsi_stmt (si);
649 gimple_set_uid (stmt, 0);
650 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res));
655 /* CHECKME: We want to visit all BBs before their successors (except for
656 latch blocks, for which this assertion wouldn't hold). In the simple
657 case of the loop forms we allow, a dfs order of the BBs would the same
658 as reversed postorder traversal, so we are safe. */
661 bbs = XCNEWVEC (basic_block, loop->num_nodes);
662 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
663 bbs, loop->num_nodes, loop);
664 gcc_assert (nbbs == loop->num_nodes);
666 LOOP_VINFO_BBS (res) = bbs;
667 LOOP_VINFO_NITERS (res) = NULL;
668 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
669 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
670 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
671 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
672 LOOP_VINFO_VECT_FACTOR (res) = 0;
673 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
674 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
675 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
676 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
677 VEC_alloc (gimple, heap,
678 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
679 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
680 VEC_alloc (ddr_p, heap,
681 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
682 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
683 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
684 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
690 /* Function destroy_loop_vec_info.
692 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
693 stmts in the loop. */
696 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
701 gimple_stmt_iterator si;
703 VEC (slp_instance, heap) *slp_instances;
704 slp_instance instance;
709 loop = LOOP_VINFO_LOOP (loop_vinfo);
711 bbs = LOOP_VINFO_BBS (loop_vinfo);
712 nbbs = loop->num_nodes;
716 free (LOOP_VINFO_BBS (loop_vinfo));
717 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
718 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
719 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
726 for (j = 0; j < nbbs; j++)
728 basic_block bb = bbs[j];
729 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
730 free_stmt_vec_info (gsi_stmt (si));
732 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
734 gimple stmt = gsi_stmt (si);
735 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
739 /* Check if this is a "pattern stmt" (introduced by the
740 vectorizer during the pattern recognition pass). */
741 bool remove_stmt_p = false;
742 gimple orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
745 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
747 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
748 remove_stmt_p = true;
751 /* Free stmt_vec_info. */
752 free_stmt_vec_info (stmt);
754 /* Remove dead "pattern stmts". */
756 gsi_remove (&si, true);
762 free (LOOP_VINFO_BBS (loop_vinfo));
763 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
764 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
765 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
766 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
767 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
768 for (j = 0; VEC_iterate (slp_instance, slp_instances, j, instance); j++)
769 vect_free_slp_instance (instance);
771 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
772 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
779 /* Function vect_analyze_loop_1.
781 Apply a set of analyses on LOOP, and create a loop_vec_info struct
782 for it. The different analyses will record information in the
783 loop_vec_info struct. This is a subset of the analyses applied in
784 vect_analyze_loop, to be applied on an inner-loop nested in the loop
785 that is now considered for (outer-loop) vectorization. */
788 vect_analyze_loop_1 (struct loop *loop)
790 loop_vec_info loop_vinfo;
792 if (vect_print_dump_info (REPORT_DETAILS))
793 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
795 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
797 loop_vinfo = vect_analyze_loop_form (loop);
800 if (vect_print_dump_info (REPORT_DETAILS))
801 fprintf (vect_dump, "bad inner-loop form.");
809 /* Function vect_analyze_loop_form.
811 Verify that certain CFG restrictions hold, including:
812 - the loop has a pre-header
813 - the loop has a single entry and exit
814 - the loop exit condition is simple enough, and the number of iterations
815 can be analyzed (a countable loop). */
818 vect_analyze_loop_form (struct loop *loop)
820 loop_vec_info loop_vinfo;
822 tree number_of_iterations = NULL;
823 loop_vec_info inner_loop_vinfo = NULL;
825 if (vect_print_dump_info (REPORT_DETAILS))
826 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
828 /* Different restrictions apply when we are considering an inner-most loop,
829 vs. an outer (nested) loop.
830 (FORNOW. May want to relax some of these restrictions in the future). */
834 /* Inner-most loop. We currently require that the number of BBs is
835 exactly 2 (the header and latch). Vectorizable inner-most loops
846 if (loop->num_nodes != 2)
848 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
849 fprintf (vect_dump, "not vectorized: too many BBs in loop.");
853 if (empty_block_p (loop->header))
855 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
856 fprintf (vect_dump, "not vectorized: empty loop.");
862 struct loop *innerloop = loop->inner;
863 edge backedge, entryedge;
865 /* Nested loop. We currently require that the loop is doubly-nested,
866 contains a single inner loop, and the number of BBs is exactly 5.
867 Vectorizable outer-loops look like this:
879 The inner-loop has the properties expected of inner-most loops
880 as described above. */
882 if ((loop->inner)->inner || (loop->inner)->next)
884 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
885 fprintf (vect_dump, "not vectorized: multiple nested loops.");
889 /* Analyze the inner-loop. */
890 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
891 if (!inner_loop_vinfo)
893 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
894 fprintf (vect_dump, "not vectorized: Bad inner loop.");
898 if (!expr_invariant_in_loop_p (loop,
899 LOOP_VINFO_NITERS (inner_loop_vinfo)))
901 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
903 "not vectorized: inner-loop count not invariant.");
904 destroy_loop_vec_info (inner_loop_vinfo, true);
908 if (loop->num_nodes != 5)
910 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
911 fprintf (vect_dump, "not vectorized: too many BBs in loop.");
912 destroy_loop_vec_info (inner_loop_vinfo, true);
916 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
917 backedge = EDGE_PRED (innerloop->header, 1);
918 entryedge = EDGE_PRED (innerloop->header, 0);
919 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
921 backedge = EDGE_PRED (innerloop->header, 0);
922 entryedge = EDGE_PRED (innerloop->header, 1);
925 if (entryedge->src != loop->header
926 || !single_exit (innerloop)
927 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
929 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
930 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
931 destroy_loop_vec_info (inner_loop_vinfo, true);
935 if (vect_print_dump_info (REPORT_DETAILS))
936 fprintf (vect_dump, "Considering outer-loop vectorization.");
939 if (!single_exit (loop)
940 || EDGE_COUNT (loop->header->preds) != 2)
942 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
944 if (!single_exit (loop))
945 fprintf (vect_dump, "not vectorized: multiple exits.");
946 else if (EDGE_COUNT (loop->header->preds) != 2)
947 fprintf (vect_dump, "not vectorized: too many incoming edges.");
949 if (inner_loop_vinfo)
950 destroy_loop_vec_info (inner_loop_vinfo, true);
954 /* We assume that the loop exit condition is at the end of the loop. i.e,
955 that the loop is represented as a do-while (with a proper if-guard
956 before the loop if needed), where the loop header contains all the
957 executable statements, and the latch is empty. */
958 if (!empty_block_p (loop->latch)
959 || phi_nodes (loop->latch))
961 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
962 fprintf (vect_dump, "not vectorized: unexpected loop form.");
963 if (inner_loop_vinfo)
964 destroy_loop_vec_info (inner_loop_vinfo, true);
968 /* Make sure there exists a single-predecessor exit bb: */
969 if (!single_pred_p (single_exit (loop)->dest))
971 edge e = single_exit (loop);
972 if (!(e->flags & EDGE_ABNORMAL))
974 split_loop_exit_edge (e);
975 if (vect_print_dump_info (REPORT_DETAILS))
976 fprintf (vect_dump, "split exit edge.");
980 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
981 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
982 if (inner_loop_vinfo)
983 destroy_loop_vec_info (inner_loop_vinfo, true);
988 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
991 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
992 fprintf (vect_dump, "not vectorized: complicated exit condition.");
993 if (inner_loop_vinfo)
994 destroy_loop_vec_info (inner_loop_vinfo, true);
998 if (!number_of_iterations)
1000 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1002 "not vectorized: number of iterations cannot be computed.");
1003 if (inner_loop_vinfo)
1004 destroy_loop_vec_info (inner_loop_vinfo, true);
1008 if (chrec_contains_undetermined (number_of_iterations))
1010 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1011 fprintf (vect_dump, "Infinite number of iterations.");
1012 if (inner_loop_vinfo)
1013 destroy_loop_vec_info (inner_loop_vinfo, true);
1017 if (!NITERS_KNOWN_P (number_of_iterations))
1019 if (vect_print_dump_info (REPORT_DETAILS))
1021 fprintf (vect_dump, "Symbolic number of iterations is ");
1022 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1025 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1027 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1028 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1029 if (inner_loop_vinfo)
1030 destroy_loop_vec_info (inner_loop_vinfo, false);
1034 loop_vinfo = new_loop_vec_info (loop);
1035 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1036 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1038 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1040 /* CHECKME: May want to keep it around it in the future. */
1041 if (inner_loop_vinfo)
1042 destroy_loop_vec_info (inner_loop_vinfo, false);
1044 gcc_assert (!loop->aux);
1045 loop->aux = loop_vinfo;
1050 /* Function vect_analyze_loop_operations.
1052 Scan the loop stmts and make sure they are all vectorizable. */
1055 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1057 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1058 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1059 int nbbs = loop->num_nodes;
1060 gimple_stmt_iterator si;
1061 unsigned int vectorization_factor = 0;
1064 stmt_vec_info stmt_info;
1065 bool need_to_vectorize = false;
1066 int min_profitable_iters;
1067 int min_scalar_loop_bound;
1069 bool only_slp_in_loop = true, ok;
1071 if (vect_print_dump_info (REPORT_DETAILS))
1072 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1074 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1075 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1077 for (i = 0; i < nbbs; i++)
1079 basic_block bb = bbs[i];
1081 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1083 phi = gsi_stmt (si);
1086 stmt_info = vinfo_for_stmt (phi);
1087 if (vect_print_dump_info (REPORT_DETAILS))
1089 fprintf (vect_dump, "examining phi: ");
1090 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1093 if (! is_loop_header_bb_p (bb))
1095 /* inner-loop loop-closed exit phi in outer-loop vectorization
1096 (i.e. a phi in the tail of the outer-loop).
1097 FORNOW: we currently don't support the case that these phis
1098 are not used in the outerloop, cause this case requires
1099 to actually do something here. */
1100 if (!STMT_VINFO_RELEVANT_P (stmt_info)
1101 || STMT_VINFO_LIVE_P (stmt_info))
1103 if (vect_print_dump_info (REPORT_DETAILS))
1105 "Unsupported loop-closed phi in outer-loop.");
1111 gcc_assert (stmt_info);
1113 if (STMT_VINFO_LIVE_P (stmt_info))
1115 /* FORNOW: not yet supported. */
1116 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1117 fprintf (vect_dump, "not vectorized: value used after loop.");
1121 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1122 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1124 /* A scalar-dependence cycle that we don't support. */
1125 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1126 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1130 if (STMT_VINFO_RELEVANT_P (stmt_info))
1132 need_to_vectorize = true;
1133 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1134 ok = vectorizable_induction (phi, NULL, NULL);
1139 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1142 "not vectorized: relevant phi not supported: ");
1143 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1149 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1151 gimple stmt = gsi_stmt (si);
1152 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1154 gcc_assert (stmt_info);
1156 if (!vect_analyze_stmt (stmt, &need_to_vectorize))
1159 if (STMT_VINFO_RELEVANT_P (stmt_info) && !PURE_SLP_STMT (stmt_info))
1160 /* STMT needs both SLP and loop-based vectorization. */
1161 only_slp_in_loop = false;
1165 /* All operations in the loop are either irrelevant (deal with loop
1166 control, or dead), or only used outside the loop and can be moved
1167 out of the loop (e.g. invariants, inductions). The loop can be
1168 optimized away by scalar optimizations. We're better off not
1169 touching this loop. */
1170 if (!need_to_vectorize)
1172 if (vect_print_dump_info (REPORT_DETAILS))
1174 "All the computation can be taken out of the loop.");
1175 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1177 "not vectorized: redundant loop. no profit to vectorize.");
1181 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1182 vectorization factor of the loop is the unrolling factor required by the
1183 SLP instances. If that unrolling factor is 1, we say, that we perform
1184 pure SLP on loop - cross iteration parallelism is not exploited. */
1185 if (only_slp_in_loop)
1186 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1188 vectorization_factor = least_common_multiple (vectorization_factor,
1189 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1191 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1193 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1194 && vect_print_dump_info (REPORT_DETAILS))
1196 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1197 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1199 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1200 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1202 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1203 fprintf (vect_dump, "not vectorized: iteration count too small.");
1204 if (vect_print_dump_info (REPORT_DETAILS))
1205 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1206 "vectorization factor.");
1210 /* Analyze cost. Decide if worth while to vectorize. */
1212 /* Once VF is set, SLP costs should be updated since the number of created
1213 vector stmts depends on VF. */
1214 vect_update_slp_costs_according_to_vf (loop_vinfo);
1216 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1217 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1219 if (min_profitable_iters < 0)
1221 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1222 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1223 if (vect_print_dump_info (REPORT_DETAILS))
1224 fprintf (vect_dump, "not vectorized: vector version will never be "
1229 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1230 * vectorization_factor) - 1);
1232 /* Use the cost model only if it is more conservative than user specified
1235 th = (unsigned) min_scalar_loop_bound;
1236 if (min_profitable_iters
1237 && (!min_scalar_loop_bound
1238 || min_profitable_iters > min_scalar_loop_bound))
1239 th = (unsigned) min_profitable_iters;
1241 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1242 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1244 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1245 fprintf (vect_dump, "not vectorized: vectorization not "
1247 if (vect_print_dump_info (REPORT_DETAILS))
1248 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1249 "user specified loop bound parameter or minimum "
1250 "profitable iterations (whichever is more conservative).");
1254 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1255 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1256 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1258 if (vect_print_dump_info (REPORT_DETAILS))
1259 fprintf (vect_dump, "epilog loop required.");
1260 if (!vect_can_advance_ivs_p (loop_vinfo))
1262 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1264 "not vectorized: can't create epilog loop 1.");
1267 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1269 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1271 "not vectorized: can't create epilog loop 2.");
1280 /* Function vect_analyze_loop.
1282 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1283 for it. The different analyses will record information in the
1284 loop_vec_info struct. */
1286 vect_analyze_loop (struct loop *loop)
1289 loop_vec_info loop_vinfo;
1291 if (vect_print_dump_info (REPORT_DETAILS))
1292 fprintf (vect_dump, "===== analyze_loop_nest =====");
1294 if (loop_outer (loop)
1295 && loop_vec_info_for_loop (loop_outer (loop))
1296 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1298 if (vect_print_dump_info (REPORT_DETAILS))
1299 fprintf (vect_dump, "outer-loop already vectorized.");
1303 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1305 loop_vinfo = vect_analyze_loop_form (loop);
1308 if (vect_print_dump_info (REPORT_DETAILS))
1309 fprintf (vect_dump, "bad loop form.");
1313 /* Find all data references in the loop (which correspond to vdefs/vuses)
1314 and analyze their evolution in the loop.
1316 FORNOW: Handle only simple, array references, which
1317 alignment can be forced, and aligned pointer-references. */
1319 ok = vect_analyze_data_refs (loop_vinfo);
1322 if (vect_print_dump_info (REPORT_DETAILS))
1323 fprintf (vect_dump, "bad data references.");
1324 destroy_loop_vec_info (loop_vinfo, true);
1328 /* Classify all cross-iteration scalar data-flow cycles.
1329 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1331 vect_analyze_scalar_cycles (loop_vinfo);
1333 vect_pattern_recog (loop_vinfo);
1335 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1337 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1340 if (vect_print_dump_info (REPORT_DETAILS))
1341 fprintf (vect_dump, "unexpected pattern.");
1342 destroy_loop_vec_info (loop_vinfo, true);
1346 /* Analyze the alignment of the data-refs in the loop.
1347 Fail if a data reference is found that cannot be vectorized. */
1349 ok = vect_analyze_data_refs_alignment (loop_vinfo);
1352 if (vect_print_dump_info (REPORT_DETAILS))
1353 fprintf (vect_dump, "bad data alignment.");
1354 destroy_loop_vec_info (loop_vinfo, true);
1358 ok = vect_determine_vectorization_factor (loop_vinfo);
1361 if (vect_print_dump_info (REPORT_DETAILS))
1362 fprintf (vect_dump, "can't determine vectorization factor.");
1363 destroy_loop_vec_info (loop_vinfo, true);
1367 /* Analyze data dependences between the data-refs in the loop.
1368 FORNOW: fail at the first data dependence that we encounter. */
1370 ok = vect_analyze_data_ref_dependences (loop_vinfo);
1373 if (vect_print_dump_info (REPORT_DETAILS))
1374 fprintf (vect_dump, "bad data dependence.");
1375 destroy_loop_vec_info (loop_vinfo, true);
1379 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1380 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1382 ok = vect_analyze_data_ref_accesses (loop_vinfo);
1385 if (vect_print_dump_info (REPORT_DETAILS))
1386 fprintf (vect_dump, "bad data access.");
1387 destroy_loop_vec_info (loop_vinfo, true);
1391 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1392 It is important to call pruning after vect_analyze_data_ref_accesses,
1393 since we use grouping information gathered by interleaving analysis. */
1394 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1397 if (vect_print_dump_info (REPORT_DETAILS))
1398 fprintf (vect_dump, "too long list of versioning for alias "
1400 destroy_loop_vec_info (loop_vinfo, true);
1404 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1405 ok = vect_analyze_slp (loop_vinfo);
1408 /* Decide which possible SLP instances to SLP. */
1409 vect_make_slp_decision (loop_vinfo);
1411 /* Find stmts that need to be both vectorized and SLPed. */
1412 vect_detect_hybrid_slp (loop_vinfo);
1415 /* This pass will decide on using loop versioning and/or loop peeling in
1416 order to enhance the alignment of data references in the loop. */
1418 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1421 if (vect_print_dump_info (REPORT_DETAILS))
1422 fprintf (vect_dump, "bad data alignment.");
1423 destroy_loop_vec_info (loop_vinfo, true);
1427 /* Scan all the operations in the loop and make sure they are
1430 ok = vect_analyze_loop_operations (loop_vinfo);
1433 if (vect_print_dump_info (REPORT_DETAILS))
1434 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1435 destroy_loop_vec_info (loop_vinfo, true);
1439 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1445 /* Function reduction_code_for_scalar_code
1448 CODE - tree_code of a reduction operations.
1451 REDUC_CODE - the corresponding tree-code to be used to reduce the
1452 vector of partial results into a single scalar result (which
1453 will also reside in a vector).
1455 Return TRUE if a corresponding REDUC_CODE was found, FALSE otherwise. */
1458 reduction_code_for_scalar_code (enum tree_code code,
1459 enum tree_code *reduc_code)
1464 *reduc_code = REDUC_MAX_EXPR;
1468 *reduc_code = REDUC_MIN_EXPR;
1472 *reduc_code = REDUC_PLUS_EXPR;
1481 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1482 STMT is printed with a message MSG. */
1485 report_vect_op (gimple stmt, const char *msg)
1487 fprintf (vect_dump, "%s", msg);
1488 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1492 /* Function vect_is_simple_reduction
1494 Detect a cross-iteration def-use cycle that represents a simple
1495 reduction computation. We look for the following pattern:
1500 a2 = operation (a3, a1)
1503 1. operation is commutative and associative and it is safe to
1504 change the order of the computation.
1505 2. no uses for a2 in the loop (a2 is used out of the loop)
1506 3. no uses of a1 in the loop besides the reduction operation.
1508 Condition 1 is tested here.
1509 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. */
1512 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi)
1514 struct loop *loop = (gimple_bb (phi))->loop_father;
1515 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1516 edge latch_e = loop_latch_edge (loop);
1517 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1518 gimple def_stmt, def1, def2;
1519 enum tree_code code;
1524 imm_use_iterator imm_iter;
1525 use_operand_p use_p;
1527 gcc_assert (loop == vect_loop || flow_loop_nested_p (vect_loop, loop));
1529 name = PHI_RESULT (phi);
1531 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1533 gimple use_stmt = USE_STMT (use_p);
1534 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1535 && vinfo_for_stmt (use_stmt)
1536 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1540 if (vect_print_dump_info (REPORT_DETAILS))
1541 fprintf (vect_dump, "reduction used in loop.");
1546 if (TREE_CODE (loop_arg) != SSA_NAME)
1548 if (vect_print_dump_info (REPORT_DETAILS))
1550 fprintf (vect_dump, "reduction: not ssa_name: ");
1551 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
1556 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
1559 if (vect_print_dump_info (REPORT_DETAILS))
1560 fprintf (vect_dump, "reduction: no def_stmt.");
1564 if (!is_gimple_assign (def_stmt))
1566 if (vect_print_dump_info (REPORT_DETAILS))
1567 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1571 name = gimple_assign_lhs (def_stmt);
1573 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1575 gimple use_stmt = USE_STMT (use_p);
1576 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1577 && vinfo_for_stmt (use_stmt)
1578 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1582 if (vect_print_dump_info (REPORT_DETAILS))
1583 fprintf (vect_dump, "reduction used in loop.");
1588 code = gimple_assign_rhs_code (def_stmt);
1590 if (!commutative_tree_code (code) || !associative_tree_code (code))
1592 if (vect_print_dump_info (REPORT_DETAILS))
1593 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
1597 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
1599 if (vect_print_dump_info (REPORT_DETAILS))
1600 report_vect_op (def_stmt, "reduction: not binary operation: ");
1604 op1 = gimple_assign_rhs1 (def_stmt);
1605 op2 = gimple_assign_rhs2 (def_stmt);
1606 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
1608 if (vect_print_dump_info (REPORT_DETAILS))
1609 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1613 /* Check that it's ok to change the order of the computation. */
1614 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
1615 if (TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (TREE_TYPE (op1))
1616 || TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (TREE_TYPE (op2)))
1618 if (vect_print_dump_info (REPORT_DETAILS))
1620 fprintf (vect_dump, "reduction: multiple types: operation type: ");
1621 print_generic_expr (vect_dump, type, TDF_SLIM);
1622 fprintf (vect_dump, ", operands types: ");
1623 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
1624 fprintf (vect_dump, ",");
1625 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
1630 /* Generally, when vectorizing a reduction we change the order of the
1631 computation. This may change the behavior of the program in some
1632 cases, so we need to check that this is ok. One exception is when
1633 vectorizing an outer-loop: the inner-loop is executed sequentially,
1634 and therefore vectorizing reductions in the inner-loop during
1635 outer-loop vectorization is safe. */
1637 /* CHECKME: check for !flag_finite_math_only too? */
1638 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
1639 && !nested_in_vect_loop_p (vect_loop, def_stmt))
1641 /* Changing the order of operations changes the semantics. */
1642 if (vect_print_dump_info (REPORT_DETAILS))
1643 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
1646 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
1647 && !nested_in_vect_loop_p (vect_loop, def_stmt))
1649 /* Changing the order of operations changes the semantics. */
1650 if (vect_print_dump_info (REPORT_DETAILS))
1651 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
1654 else if (SAT_FIXED_POINT_TYPE_P (type))
1656 /* Changing the order of operations changes the semantics. */
1657 if (vect_print_dump_info (REPORT_DETAILS))
1658 report_vect_op (def_stmt,
1659 "reduction: unsafe fixed-point math optimization: ");
1663 /* reduction is safe. we're dealing with one of the following:
1664 1) integer arithmetic and no trapv
1665 2) floating point arithmetic, and special flags permit this optimization.
1667 def1 = SSA_NAME_DEF_STMT (op1);
1668 def2 = SSA_NAME_DEF_STMT (op2);
1669 if (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2))
1671 if (vect_print_dump_info (REPORT_DETAILS))
1672 report_vect_op (def_stmt, "reduction: no defs for operands: ");
1677 /* Check that one def is the reduction def, defined by PHI,
1678 the other def is either defined in the loop ("vect_internal_def"),
1679 or it's an induction (defined by a loop-header phi-node). */
1682 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
1683 && (is_gimple_assign (def1)
1684 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_induction_def
1685 || (gimple_code (def1) == GIMPLE_PHI
1686 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_internal_def
1687 && !is_loop_header_bb_p (gimple_bb (def1)))))
1689 if (vect_print_dump_info (REPORT_DETAILS))
1690 report_vect_op (def_stmt, "detected reduction:");
1693 else if (def1 == phi
1694 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
1695 && (is_gimple_assign (def2)
1696 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_induction_def
1697 || (gimple_code (def2) == GIMPLE_PHI
1698 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_internal_def
1699 && !is_loop_header_bb_p (gimple_bb (def2)))))
1701 /* Swap operands (just for simplicity - so that the rest of the code
1702 can assume that the reduction variable is always the last (second)
1704 if (vect_print_dump_info (REPORT_DETAILS))
1705 report_vect_op (def_stmt ,
1706 "detected reduction: need to swap operands:");
1707 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
1708 gimple_assign_rhs2_ptr (def_stmt));
1713 if (vect_print_dump_info (REPORT_DETAILS))
1714 report_vect_op (def_stmt, "reduction: unknown pattern.");
1720 /* Function vect_estimate_min_profitable_iters
1722 Return the number of iterations required for the vector version of the
1723 loop to be profitable relative to the cost of the scalar version of the
1726 TODO: Take profile info into account before making vectorization
1727 decisions, if available. */
1730 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
1733 int min_profitable_iters;
1734 int peel_iters_prologue;
1735 int peel_iters_epilogue;
1736 int vec_inside_cost = 0;
1737 int vec_outside_cost = 0;
1738 int scalar_single_iter_cost = 0;
1739 int scalar_outside_cost = 0;
1740 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1741 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1742 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1743 int nbbs = loop->num_nodes;
1744 int byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
1745 int peel_guard_costs = 0;
1746 int innerloop_iters = 0, factor;
1747 VEC (slp_instance, heap) *slp_instances;
1748 slp_instance instance;
1750 /* Cost model disabled. */
1751 if (!flag_vect_cost_model)
1753 if (vect_print_dump_info (REPORT_COST))
1754 fprintf (vect_dump, "cost model disabled.");
1758 /* Requires loop versioning tests to handle misalignment. */
1759 if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)))
1761 /* FIXME: Make cost depend on complexity of individual check. */
1763 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
1764 if (vect_print_dump_info (REPORT_COST))
1765 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
1766 "versioning to treat misalignment.\n");
1769 if (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
1771 /* FIXME: Make cost depend on complexity of individual check. */
1773 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
1774 if (vect_print_dump_info (REPORT_COST))
1775 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
1776 "versioning aliasing.\n");
1779 if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
1780 || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
1782 vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST;
1785 /* Count statements in scalar loop. Using this as scalar cost for a single
1788 TODO: Add outer loop support.
1790 TODO: Consider assigning different costs to different scalar
1795 innerloop_iters = 50; /* FIXME */
1797 for (i = 0; i < nbbs; i++)
1799 gimple_stmt_iterator si;
1800 basic_block bb = bbs[i];
1802 if (bb->loop_father == loop->inner)
1803 factor = innerloop_iters;
1807 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1809 gimple stmt = gsi_stmt (si);
1810 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1811 /* Skip stmts that are not vectorized inside the loop. */
1812 if (!STMT_VINFO_RELEVANT_P (stmt_info)
1813 && (!STMT_VINFO_LIVE_P (stmt_info)
1814 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
1816 scalar_single_iter_cost += cost_for_stmt (stmt) * factor;
1817 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
1818 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
1819 some of the "outside" costs are generated inside the outer-loop. */
1820 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
1824 /* Add additional cost for the peeled instructions in prologue and epilogue
1827 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
1828 at compile-time - we assume it's vf/2 (the worst would be vf-1).
1830 TODO: Build an expression that represents peel_iters for prologue and
1831 epilogue to be used in a run-time test. */
1833 if (byte_misalign < 0)
1835 peel_iters_prologue = vf/2;
1836 if (vect_print_dump_info (REPORT_COST))
1837 fprintf (vect_dump, "cost model: "
1838 "prologue peel iters set to vf/2.");
1840 /* If peeling for alignment is unknown, loop bound of main loop becomes
1842 peel_iters_epilogue = vf/2;
1843 if (vect_print_dump_info (REPORT_COST))
1844 fprintf (vect_dump, "cost model: "
1845 "epilogue peel iters set to vf/2 because "
1846 "peeling for alignment is unknown .");
1848 /* If peeled iterations are unknown, count a taken branch and a not taken
1849 branch per peeled loop. Even if scalar loop iterations are known,
1850 vector iterations are not known since peeled prologue iterations are
1851 not known. Hence guards remain the same. */
1852 peel_guard_costs += 2 * (TARG_COND_TAKEN_BRANCH_COST
1853 + TARG_COND_NOT_TAKEN_BRANCH_COST);
1859 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
1860 int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
1861 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
1862 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1864 peel_iters_prologue = nelements - (byte_misalign / element_size);
1867 peel_iters_prologue = 0;
1869 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1871 peel_iters_epilogue = vf/2;
1872 if (vect_print_dump_info (REPORT_COST))
1873 fprintf (vect_dump, "cost model: "
1874 "epilogue peel iters set to vf/2 because "
1875 "loop iterations are unknown .");
1877 /* If peeled iterations are known but number of scalar loop
1878 iterations are unknown, count a taken branch per peeled loop. */
1879 peel_guard_costs += 2 * TARG_COND_TAKEN_BRANCH_COST;
1884 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
1885 peel_iters_prologue = niters < peel_iters_prologue ?
1886 niters : peel_iters_prologue;
1887 peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
1891 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
1892 + (peel_iters_epilogue * scalar_single_iter_cost)
1895 /* FORNOW: The scalar outside cost is incremented in one of the
1898 1. The vectorizer checks for alignment and aliasing and generates
1899 a condition that allows dynamic vectorization. A cost model
1900 check is ANDED with the versioning condition. Hence scalar code
1901 path now has the added cost of the versioning check.
1903 if (cost > th & versioning_check)
1906 Hence run-time scalar is incremented by not-taken branch cost.
1908 2. The vectorizer then checks if a prologue is required. If the
1909 cost model check was not done before during versioning, it has to
1910 be done before the prologue check.
1913 prologue = scalar_iters
1918 if (prologue == num_iters)
1921 Hence the run-time scalar cost is incremented by a taken branch,
1922 plus a not-taken branch, plus a taken branch cost.
1924 3. The vectorizer then checks if an epilogue is required. If the
1925 cost model check was not done before during prologue check, it
1926 has to be done with the epilogue check.
1932 if (prologue == num_iters)
1935 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
1938 Hence the run-time scalar cost should be incremented by 2 taken
1941 TODO: The back end may reorder the BBS's differently and reverse
1942 conditions/branch directions. Change the estimates below to
1943 something more reasonable. */
1945 /* If the number of iterations is known and we do not do versioning, we can
1946 decide whether to vectorize at compile time. Hence the scalar version
1947 do not carry cost model guard costs. */
1948 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1949 || VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
1950 || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
1952 /* Cost model check occurs at versioning. */
1953 if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
1954 || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
1955 scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST;
1958 /* Cost model check occurs at prologue generation. */
1959 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
1960 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST
1961 + TARG_COND_NOT_TAKEN_BRANCH_COST;
1962 /* Cost model check occurs at epilogue generation. */
1964 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST;
1968 /* Add SLP costs. */
1969 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1970 for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
1972 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
1973 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
1976 /* Calculate number of iterations required to make the vector version
1977 profitable, relative to the loop bodies only. The following condition
1979 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
1981 SIC = scalar iteration cost, VIC = vector iteration cost,
1982 VOC = vector outside cost, VF = vectorization factor,
1983 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
1984 SOC = scalar outside cost for run time cost model check. */
1986 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
1988 if (vec_outside_cost <= 0)
1989 min_profitable_iters = 1;
1992 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
1993 - vec_inside_cost * peel_iters_prologue
1994 - vec_inside_cost * peel_iters_epilogue)
1995 / ((scalar_single_iter_cost * vf)
1998 if ((scalar_single_iter_cost * vf * min_profitable_iters)
1999 <= ((vec_inside_cost * min_profitable_iters)
2000 + ((vec_outside_cost - scalar_outside_cost) * vf)))
2001 min_profitable_iters++;
2004 /* vector version will never be profitable. */
2007 if (vect_print_dump_info (REPORT_COST))
2008 fprintf (vect_dump, "cost model: vector iteration cost = %d "
2009 "is divisible by scalar iteration cost = %d by a factor "
2010 "greater than or equal to the vectorization factor = %d .",
2011 vec_inside_cost, scalar_single_iter_cost, vf);
2015 if (vect_print_dump_info (REPORT_COST))
2017 fprintf (vect_dump, "Cost model analysis: \n");
2018 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2020 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
2022 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2023 scalar_single_iter_cost);
2024 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2025 fprintf (vect_dump, " prologue iterations: %d\n",
2026 peel_iters_prologue);
2027 fprintf (vect_dump, " epilogue iterations: %d\n",
2028 peel_iters_epilogue);
2029 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2030 min_profitable_iters);
2033 min_profitable_iters =
2034 min_profitable_iters < vf ? vf : min_profitable_iters;
2036 /* Because the condition we create is:
2037 if (niters <= min_profitable_iters)
2038 then skip the vectorized loop. */
2039 min_profitable_iters--;
2041 if (vect_print_dump_info (REPORT_COST))
2042 fprintf (vect_dump, " Profitability threshold = %d\n",
2043 min_profitable_iters);
2045 return min_profitable_iters;
2049 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2050 functions. Design better to avoid maintenance issues. */
2052 /* Function vect_model_reduction_cost.
2054 Models cost for a reduction operation, including the vector ops
2055 generated within the strip-mine loop, the initial definition before
2056 the loop, and the epilogue code that must be generated. */
2059 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2063 enum tree_code code;
2066 gimple stmt, orig_stmt;
2068 enum machine_mode mode;
2069 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2070 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2073 /* Cost of reduction op inside loop. */
2074 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) += ncopies * TARG_VEC_STMT_COST;
2076 stmt = STMT_VINFO_STMT (stmt_info);
2078 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2080 case GIMPLE_SINGLE_RHS:
2081 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2082 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2084 case GIMPLE_UNARY_RHS:
2085 reduction_op = gimple_assign_rhs1 (stmt);
2087 case GIMPLE_BINARY_RHS:
2088 reduction_op = gimple_assign_rhs2 (stmt);
2094 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2097 if (vect_print_dump_info (REPORT_COST))
2099 fprintf (vect_dump, "unsupported data-type ");
2100 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2105 mode = TYPE_MODE (vectype);
2106 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2109 orig_stmt = STMT_VINFO_STMT (stmt_info);
2111 code = gimple_assign_rhs_code (orig_stmt);
2113 /* Add in cost for initial definition. */
2114 outer_cost += TARG_SCALAR_TO_VEC_COST;
2116 /* Determine cost of epilogue code.
2118 We have a reduction operator that will reduce the vector in one statement.
2119 Also requires scalar extract. */
2121 if (!nested_in_vect_loop_p (loop, orig_stmt))
2123 if (reduc_code != ERROR_MARK)
2124 outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
2127 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2129 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2130 int element_bitsize = tree_low_cst (bitsize, 1);
2131 int nelements = vec_size_in_bits / element_bitsize;
2133 optab = optab_for_tree_code (code, vectype, optab_default);
2135 /* We have a whole vector shift available. */
2136 if (VECTOR_MODE_P (mode)
2137 && optab_handler (optab, mode)->insn_code != CODE_FOR_nothing
2138 && optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
2139 /* Final reduction via vector shifts and the reduction operator. Also
2140 requires scalar extract. */
2141 outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST
2142 + TARG_VEC_TO_SCALAR_COST);
2144 /* Use extracts and reduction op for final reduction. For N elements,
2145 we have N extracts and N-1 reduction ops. */
2146 outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST);
2150 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
2152 if (vect_print_dump_info (REPORT_COST))
2153 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2154 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2155 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2161 /* Function vect_model_induction_cost.
2163 Models cost for induction operations. */
2166 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2168 /* loop cost for vec_loop. */
2169 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
2170 /* prologue cost for vec_init and vec_step. */
2171 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_SCALAR_TO_VEC_COST;
2173 if (vect_print_dump_info (REPORT_COST))
2174 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2175 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2176 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2180 /* Function get_initial_def_for_induction
2183 STMT - a stmt that performs an induction operation in the loop.
2184 IV_PHI - the initial value of the induction variable
2187 Return a vector variable, initialized with the first VF values of
2188 the induction variable. E.g., for an iv with IV_PHI='X' and
2189 evolution S, for a vector of 4 units, we want to return:
2190 [X, X + S, X + 2*S, X + 3*S]. */
2193 get_initial_def_for_induction (gimple iv_phi)
2195 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2196 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2197 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2198 tree scalar_type = TREE_TYPE (gimple_phi_result (iv_phi));
2201 edge pe = loop_preheader_edge (loop);
2202 struct loop *iv_loop;
2204 tree vec, vec_init, vec_step, t;
2208 gimple init_stmt, induction_phi, new_stmt;
2209 tree induc_def, vec_def, vec_dest;
2210 tree init_expr, step_expr;
2211 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2216 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2217 bool nested_in_vect_loop = false;
2218 gimple_seq stmts = NULL;
2219 imm_use_iterator imm_iter;
2220 use_operand_p use_p;
2224 gimple_stmt_iterator si;
2225 basic_block bb = gimple_bb (iv_phi);
2228 vectype = get_vectype_for_scalar_type (scalar_type);
2229 gcc_assert (vectype);
2230 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2231 ncopies = vf / nunits;
2233 gcc_assert (phi_info);
2234 gcc_assert (ncopies >= 1);
2236 /* Find the first insertion point in the BB. */
2237 si = gsi_after_labels (bb);
2239 if (INTEGRAL_TYPE_P (scalar_type))
2240 step_expr = build_int_cst (scalar_type, 0);
2241 else if (POINTER_TYPE_P (scalar_type))
2242 step_expr = build_int_cst (sizetype, 0);
2244 step_expr = build_real (scalar_type, dconst0);
2246 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2247 if (nested_in_vect_loop_p (loop, iv_phi))
2249 nested_in_vect_loop = true;
2250 iv_loop = loop->inner;
2254 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2256 latch_e = loop_latch_edge (iv_loop);
2257 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2259 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
2260 gcc_assert (access_fn);
2261 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
2262 &init_expr, &step_expr);
2264 pe = loop_preheader_edge (iv_loop);
2266 /* Create the vector that holds the initial_value of the induction. */
2267 if (nested_in_vect_loop)
2269 /* iv_loop is nested in the loop to be vectorized. init_expr had already
2270 been created during vectorization of previous stmts; We obtain it from
2271 the STMT_VINFO_VEC_STMT of the defining stmt. */
2272 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi, loop_preheader_edge (iv_loop));
2273 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
2277 /* iv_loop is the loop to be vectorized. Create:
2278 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
2279 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
2280 add_referenced_var (new_var);
2282 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
2285 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2286 gcc_assert (!new_bb);
2290 t = tree_cons (NULL_TREE, init_expr, t);
2291 for (i = 1; i < nunits; i++)
2293 /* Create: new_name_i = new_name + step_expr */
2294 enum tree_code code = POINTER_TYPE_P (scalar_type)
2295 ? POINTER_PLUS_EXPR : PLUS_EXPR;
2296 init_stmt = gimple_build_assign_with_ops (code, new_var,
2297 new_name, step_expr);
2298 new_name = make_ssa_name (new_var, init_stmt);
2299 gimple_assign_set_lhs (init_stmt, new_name);
2301 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
2302 gcc_assert (!new_bb);
2304 if (vect_print_dump_info (REPORT_DETAILS))
2306 fprintf (vect_dump, "created new init_stmt: ");
2307 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
2309 t = tree_cons (NULL_TREE, new_name, t);
2311 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
2312 vec = build_constructor_from_list (vectype, nreverse (t));
2313 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
2317 /* Create the vector that holds the step of the induction. */
2318 if (nested_in_vect_loop)
2319 /* iv_loop is nested in the loop to be vectorized. Generate:
2320 vec_step = [S, S, S, S] */
2321 new_name = step_expr;
2324 /* iv_loop is the loop to be vectorized. Generate:
2325 vec_step = [VF*S, VF*S, VF*S, VF*S] */
2326 expr = build_int_cst (TREE_TYPE (step_expr), vf);
2327 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2332 for (i = 0; i < nunits; i++)
2333 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2334 gcc_assert (CONSTANT_CLASS_P (new_name));
2335 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
2336 gcc_assert (stepvectype);
2337 vec = build_vector (stepvectype, t);
2338 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2341 /* Create the following def-use cycle:
2346 vec_iv = PHI <vec_init, vec_loop>
2350 vec_loop = vec_iv + vec_step; */
2352 /* Create the induction-phi that defines the induction-operand. */
2353 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
2354 add_referenced_var (vec_dest);
2355 induction_phi = create_phi_node (vec_dest, iv_loop->header);
2356 set_vinfo_for_stmt (induction_phi,
2357 new_stmt_vec_info (induction_phi, loop_vinfo));
2358 induc_def = PHI_RESULT (induction_phi);
2360 /* Create the iv update inside the loop */
2361 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2362 induc_def, vec_step);
2363 vec_def = make_ssa_name (vec_dest, new_stmt);
2364 gimple_assign_set_lhs (new_stmt, vec_def);
2365 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2366 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo));
2368 /* Set the arguments of the phi node: */
2369 add_phi_arg (induction_phi, vec_init, pe);
2370 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop));
2373 /* In case that vectorization factor (VF) is bigger than the number
2374 of elements that we can fit in a vectype (nunits), we have to generate
2375 more than one vector stmt - i.e - we need to "unroll" the
2376 vector stmt by a factor VF/nunits. For more details see documentation
2377 in vectorizable_operation. */
2381 stmt_vec_info prev_stmt_vinfo;
2382 /* FORNOW. This restriction should be relaxed. */
2383 gcc_assert (!nested_in_vect_loop);
2385 /* Create the vector that holds the step of the induction. */
2386 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
2387 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2390 for (i = 0; i < nunits; i++)
2391 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2392 gcc_assert (CONSTANT_CLASS_P (new_name));
2393 vec = build_vector (stepvectype, t);
2394 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2396 vec_def = induc_def;
2397 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
2398 for (i = 1; i < ncopies; i++)
2400 /* vec_i = vec_prev + vec_step */
2401 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2403 vec_def = make_ssa_name (vec_dest, new_stmt);
2404 gimple_assign_set_lhs (new_stmt, vec_def);
2406 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2407 set_vinfo_for_stmt (new_stmt,
2408 new_stmt_vec_info (new_stmt, loop_vinfo));
2409 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
2410 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
2414 if (nested_in_vect_loop)
2416 /* Find the loop-closed exit-phi of the induction, and record
2417 the final vector of induction results: */
2419 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
2421 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
2423 exit_phi = USE_STMT (use_p);
2429 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
2430 /* FORNOW. Currently not supporting the case that an inner-loop induction
2431 is not used in the outer-loop (i.e. only outside the outer-loop). */
2432 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
2433 && !STMT_VINFO_LIVE_P (stmt_vinfo));
2435 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
2436 if (vect_print_dump_info (REPORT_DETAILS))
2438 fprintf (vect_dump, "vector of inductions after inner-loop:");
2439 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
2445 if (vect_print_dump_info (REPORT_DETAILS))
2447 fprintf (vect_dump, "transform induction: created def-use cycle: ");
2448 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
2449 fprintf (vect_dump, "\n");
2450 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
2453 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
2458 /* Function get_initial_def_for_reduction
2461 STMT - a stmt that performs a reduction operation in the loop.
2462 INIT_VAL - the initial value of the reduction variable
2465 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
2466 of the reduction (used for adjusting the epilog - see below).
2467 Return a vector variable, initialized according to the operation that STMT
2468 performs. This vector will be used as the initial value of the
2469 vector of partial results.
2471 Option1 (adjust in epilog): Initialize the vector as follows:
2474 min/max: [init_val,init_val,..,init_val,init_val]
2475 bit and/or: [init_val,init_val,..,init_val,init_val]
2476 and when necessary (e.g. add/mult case) let the caller know
2477 that it needs to adjust the result by init_val.
2479 Option2: Initialize the vector as follows:
2480 add: [0,0,...,0,init_val]
2481 mult: [1,1,...,1,init_val]
2482 min/max: [init_val,init_val,...,init_val]
2483 bit and/or: [init_val,init_val,...,init_val]
2484 and no adjustments are needed.
2486 For example, for the following code:
2492 STMT is 's = s + a[i]', and the reduction variable is 's'.
2493 For a vector of 4 units, we want to return either [0,0,0,init_val],
2494 or [0,0,0,0] and let the caller know that it needs to adjust
2495 the result at the end by 'init_val'.
2497 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
2498 initialization vector is simpler (same element in all entries).
2499 A cost model should help decide between these two schemes. */
2502 get_initial_def_for_reduction (gimple stmt, tree init_val, tree *adjustment_def)
2504 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2505 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2506 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2507 tree scalar_type = TREE_TYPE (init_val);
2508 tree vectype = get_vectype_for_scalar_type (scalar_type);
2510 enum tree_code code = gimple_assign_rhs_code (stmt);
2515 bool nested_in_vect_loop = false;
2517 gcc_assert (vectype);
2518 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2520 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
2521 || SCALAR_FLOAT_TYPE_P (scalar_type));
2522 if (nested_in_vect_loop_p (loop, stmt))
2523 nested_in_vect_loop = true;
2525 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
2529 case WIDEN_SUM_EXPR:
2532 if (nested_in_vect_loop)
2533 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
2535 *adjustment_def = init_val;
2536 /* Create a vector of zeros for init_def. */
2537 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2538 def_for_init = build_real (scalar_type, dconst0);
2540 def_for_init = build_int_cst (scalar_type, 0);
2542 for (i = nunits - 1; i >= 0; --i)
2543 t = tree_cons (NULL_TREE, def_for_init, t);
2544 init_def = build_vector (vectype, t);
2549 *adjustment_def = NULL_TREE;
2550 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
2561 /* Function vect_create_epilog_for_reduction
2563 Create code at the loop-epilog to finalize the result of a reduction
2566 VECT_DEF is a vector of partial results.
2567 REDUC_CODE is the tree-code for the epilog reduction.
2568 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
2569 number of elements that we can fit in a vectype (nunits). In this case
2570 we have to generate more than one vector stmt - i.e - we need to "unroll"
2571 the vector stmt by a factor VF/nunits. For more details see documentation
2572 in vectorizable_operation.
2573 STMT is the scalar reduction stmt that is being vectorized.
2574 REDUCTION_PHI is the phi-node that carries the reduction computation.
2577 1. Creates the reduction def-use cycle: sets the arguments for
2579 The loop-entry argument is the vectorized initial-value of the reduction.
2580 The loop-latch argument is VECT_DEF - the vector of partial sums.
2581 2. "Reduces" the vector of partial results VECT_DEF into a single result,
2582 by applying the operation specified by REDUC_CODE if available, or by
2583 other means (whole-vector shifts or a scalar loop).
2584 The function also creates a new phi node at the loop exit to preserve
2585 loop-closed form, as illustrated below.
2587 The flow at the entry to this function:
2590 vec_def = phi <null, null> # REDUCTION_PHI
2591 VECT_DEF = vector_stmt # vectorized form of STMT
2592 s_loop = scalar_stmt # (scalar) STMT
2594 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2598 The above is transformed by this function into:
2601 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
2602 VECT_DEF = vector_stmt # vectorized form of STMT
2603 s_loop = scalar_stmt # (scalar) STMT
2605 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2606 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2607 v_out2 = reduce <v_out1>
2608 s_out3 = extract_field <v_out2, 0>
2609 s_out4 = adjust_result <s_out3>
2615 vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
2617 enum tree_code reduc_code,
2618 gimple reduction_phi)
2620 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2621 stmt_vec_info prev_phi_info;
2623 enum machine_mode mode;
2624 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2625 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2626 basic_block exit_bb;
2629 gimple new_phi = NULL, phi;
2630 gimple_stmt_iterator exit_gsi;
2632 tree new_temp = NULL_TREE;
2634 gimple epilog_stmt = NULL;
2635 tree new_scalar_dest, new_dest;
2637 tree bitsize, bitpos, bytesize;
2638 enum tree_code code = gimple_assign_rhs_code (stmt);
2639 tree adjustment_def;
2640 tree vec_initial_def, def;
2642 imm_use_iterator imm_iter;
2643 use_operand_p use_p;
2644 bool extract_scalar_result = false;
2645 tree reduction_op, expr;
2648 bool nested_in_vect_loop = false;
2649 VEC(gimple,heap) *phis = NULL;
2650 enum vect_def_type dt = vect_unknown_def_type;
2653 if (nested_in_vect_loop_p (loop, stmt))
2656 nested_in_vect_loop = true;
2659 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2661 case GIMPLE_SINGLE_RHS:
2662 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2663 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2665 case GIMPLE_UNARY_RHS:
2666 reduction_op = gimple_assign_rhs1 (stmt);
2668 case GIMPLE_BINARY_RHS:
2669 reduction_op = gimple_assign_rhs2 (stmt);
2675 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2676 gcc_assert (vectype);
2677 mode = TYPE_MODE (vectype);
2679 /*** 1. Create the reduction def-use cycle ***/
2681 /* For the case of reduction, vect_get_vec_def_for_operand returns
2682 the scalar def before the loop, that defines the initial value
2683 of the reduction variable. */
2684 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
2687 phi = reduction_phi;
2689 for (j = 0; j < ncopies; j++)
2691 /* 1.1 set the loop-entry arg of the reduction-phi: */
2692 add_phi_arg (phi, vec_initial_def, loop_preheader_edge (loop));
2694 /* 1.2 set the loop-latch arg for the reduction-phi: */
2696 def = vect_get_vec_def_for_stmt_copy (dt, def);
2697 add_phi_arg (phi, def, loop_latch_edge (loop));
2699 if (vect_print_dump_info (REPORT_DETAILS))
2701 fprintf (vect_dump, "transform reduction: created def-use cycle: ");
2702 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
2703 fprintf (vect_dump, "\n");
2704 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0, TDF_SLIM);
2707 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
2710 /*** 2. Create epilog code
2711 The reduction epilog code operates across the elements of the vector
2712 of partial results computed by the vectorized loop.
2713 The reduction epilog code consists of:
2714 step 1: compute the scalar result in a vector (v_out2)
2715 step 2: extract the scalar result (s_out3) from the vector (v_out2)
2716 step 3: adjust the scalar result (s_out3) if needed.
2718 Step 1 can be accomplished using one the following three schemes:
2719 (scheme 1) using reduc_code, if available.
2720 (scheme 2) using whole-vector shifts, if available.
2721 (scheme 3) using a scalar loop. In this case steps 1+2 above are
2724 The overall epilog code looks like this:
2726 s_out0 = phi <s_loop> # original EXIT_PHI
2727 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2728 v_out2 = reduce <v_out1> # step 1
2729 s_out3 = extract_field <v_out2, 0> # step 2
2730 s_out4 = adjust_result <s_out3> # step 3
2732 (step 3 is optional, and steps 1 and 2 may be combined).
2733 Lastly, the uses of s_out0 are replaced by s_out4.
2737 /* 2.1 Create new loop-exit-phi to preserve loop-closed form:
2738 v_out1 = phi <v_loop> */
2740 exit_bb = single_exit (loop)->dest;
2742 prev_phi_info = NULL;
2743 for (j = 0; j < ncopies; j++)
2745 phi = create_phi_node (SSA_NAME_VAR (vect_def), exit_bb);
2746 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo));
2751 def = vect_get_vec_def_for_stmt_copy (dt, def);
2752 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
2754 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
2755 prev_phi_info = vinfo_for_stmt (phi);
2757 exit_gsi = gsi_after_labels (exit_bb);
2759 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
2760 (i.e. when reduc_code is not available) and in the final adjustment
2761 code (if needed). Also get the original scalar reduction variable as
2762 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
2763 represents a reduction pattern), the tree-code and scalar-def are
2764 taken from the original stmt that the pattern-stmt (STMT) replaces.
2765 Otherwise (it is a regular reduction) - the tree-code and scalar-def
2766 are taken from STMT. */
2768 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2771 /* Regular reduction */
2776 /* Reduction pattern */
2777 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
2778 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
2779 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
2781 code = gimple_assign_rhs_code (orig_stmt);
2782 scalar_dest = gimple_assign_lhs (orig_stmt);
2783 scalar_type = TREE_TYPE (scalar_dest);
2784 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
2785 bitsize = TYPE_SIZE (scalar_type);
2786 bytesize = TYPE_SIZE_UNIT (scalar_type);
2789 /* In case this is a reduction in an inner-loop while vectorizing an outer
2790 loop - we don't need to extract a single scalar result at the end of the
2791 inner-loop. The final vector of partial results will be used in the
2792 vectorized outer-loop, or reduced to a scalar result at the end of the
2794 if (nested_in_vect_loop)
2795 goto vect_finalize_reduction;
2798 gcc_assert (ncopies == 1);
2800 /* 2.3 Create the reduction code, using one of the three schemes described
2803 if (reduc_code != ERROR_MARK)
2807 /*** Case 1: Create:
2808 v_out2 = reduc_expr <v_out1> */
2810 if (vect_print_dump_info (REPORT_DETAILS))
2811 fprintf (vect_dump, "Reduce using direct vector reduction.");
2813 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2814 tmp = build1 (reduc_code, vectype, PHI_RESULT (new_phi));
2815 epilog_stmt = gimple_build_assign (vec_dest, tmp);
2816 new_temp = make_ssa_name (vec_dest, epilog_stmt);
2817 gimple_assign_set_lhs (epilog_stmt, new_temp);
2818 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2820 extract_scalar_result = true;
2824 enum tree_code shift_code = ERROR_MARK;
2825 bool have_whole_vector_shift = true;
2827 int element_bitsize = tree_low_cst (bitsize, 1);
2828 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2831 if (optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
2832 shift_code = VEC_RSHIFT_EXPR;
2834 have_whole_vector_shift = false;
2836 /* Regardless of whether we have a whole vector shift, if we're
2837 emulating the operation via tree-vect-generic, we don't want
2838 to use it. Only the first round of the reduction is likely
2839 to still be profitable via emulation. */
2840 /* ??? It might be better to emit a reduction tree code here, so that
2841 tree-vect-generic can expand the first round via bit tricks. */
2842 if (!VECTOR_MODE_P (mode))
2843 have_whole_vector_shift = false;
2846 optab optab = optab_for_tree_code (code, vectype, optab_default);
2847 if (optab_handler (optab, mode)->insn_code == CODE_FOR_nothing)
2848 have_whole_vector_shift = false;
2851 if (have_whole_vector_shift)
2853 /*** Case 2: Create:
2854 for (offset = VS/2; offset >= element_size; offset/=2)
2856 Create: va' = vec_shift <va, offset>
2857 Create: va = vop <va, va'>
2860 if (vect_print_dump_info (REPORT_DETAILS))
2861 fprintf (vect_dump, "Reduce using vector shifts");
2863 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2864 new_temp = PHI_RESULT (new_phi);
2866 for (bit_offset = vec_size_in_bits/2;
2867 bit_offset >= element_bitsize;
2870 tree bitpos = size_int (bit_offset);
2871 epilog_stmt = gimple_build_assign_with_ops (shift_code, vec_dest,
2873 new_name = make_ssa_name (vec_dest, epilog_stmt);
2874 gimple_assign_set_lhs (epilog_stmt, new_name);
2875 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2877 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
2878 new_name, new_temp);
2879 new_temp = make_ssa_name (vec_dest, epilog_stmt);
2880 gimple_assign_set_lhs (epilog_stmt, new_temp);
2881 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2884 extract_scalar_result = true;
2890 /*** Case 3: Create:
2891 s = extract_field <v_out2, 0>
2892 for (offset = element_size;
2893 offset < vector_size;
2894 offset += element_size;)
2896 Create: s' = extract_field <v_out2, offset>
2897 Create: s = op <s, s'>
2900 if (vect_print_dump_info (REPORT_DETAILS))
2901 fprintf (vect_dump, "Reduce using scalar code. ");
2903 vec_temp = PHI_RESULT (new_phi);
2904 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2905 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
2907 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
2908 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
2909 gimple_assign_set_lhs (epilog_stmt, new_temp);
2910 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2912 for (bit_offset = element_bitsize;
2913 bit_offset < vec_size_in_bits;
2914 bit_offset += element_bitsize)
2916 tree bitpos = bitsize_int (bit_offset);
2917 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
2920 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
2921 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
2922 gimple_assign_set_lhs (epilog_stmt, new_name);
2923 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2925 epilog_stmt = gimple_build_assign_with_ops (code,
2927 new_name, new_temp);
2928 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
2929 gimple_assign_set_lhs (epilog_stmt, new_temp);
2930 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2933 extract_scalar_result = false;
2937 /* 2.4 Extract the final scalar result. Create:
2938 s_out3 = extract_field <v_out2, bitpos> */
2940 if (extract_scalar_result)
2944 gcc_assert (!nested_in_vect_loop);
2945 if (vect_print_dump_info (REPORT_DETAILS))
2946 fprintf (vect_dump, "extract scalar result");
2948 if (BYTES_BIG_ENDIAN)
2949 bitpos = size_binop (MULT_EXPR,
2950 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
2951 TYPE_SIZE (scalar_type));
2953 bitpos = bitsize_zero_node;
2955 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
2956 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
2957 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
2958 gimple_assign_set_lhs (epilog_stmt, new_temp);
2959 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2962 vect_finalize_reduction:
2964 /* 2.5 Adjust the final result by the initial value of the reduction
2965 variable. (When such adjustment is not needed, then
2966 'adjustment_def' is zero). For example, if code is PLUS we create:
2967 new_temp = loop_exit_def + adjustment_def */
2971 if (nested_in_vect_loop)
2973 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
2974 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
2975 new_dest = vect_create_destination_var (scalar_dest, vectype);
2979 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
2980 expr = build2 (code, scalar_type, new_temp, adjustment_def);
2981 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
2983 epilog_stmt = gimple_build_assign (new_dest, expr);
2984 new_temp = make_ssa_name (new_dest, epilog_stmt);
2985 gimple_assign_set_lhs (epilog_stmt, new_temp);
2986 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
2987 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2991 /* 2.6 Handle the loop-exit phi */
2993 /* Replace uses of s_out0 with uses of s_out3:
2994 Find the loop-closed-use at the loop exit of the original scalar result.
2995 (The reduction result is expected to have two immediate uses - one at the
2996 latch block, and one at the loop exit). */
2997 phis = VEC_alloc (gimple, heap, 10);
2998 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
3000 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
3002 exit_phi = USE_STMT (use_p);
3003 VEC_quick_push (gimple, phis, exit_phi);
3006 /* We expect to have found an exit_phi because of loop-closed-ssa form. */
3007 gcc_assert (!VEC_empty (gimple, phis));
3009 for (i = 0; VEC_iterate (gimple, phis, i, exit_phi); i++)
3011 if (nested_in_vect_loop)
3013 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3015 /* FORNOW. Currently not supporting the case that an inner-loop
3016 reduction is not used in the outer-loop (but only outside the
3018 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
3019 && !STMT_VINFO_LIVE_P (stmt_vinfo));
3021 epilog_stmt = adjustment_def ? epilog_stmt : new_phi;
3022 STMT_VINFO_VEC_STMT (stmt_vinfo) = epilog_stmt;
3023 set_vinfo_for_stmt (epilog_stmt,
3024 new_stmt_vec_info (epilog_stmt, loop_vinfo));
3026 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
3027 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
3031 /* Replace the uses: */
3032 orig_name = PHI_RESULT (exit_phi);
3033 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3034 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
3035 SET_USE (use_p, new_temp);
3037 VEC_free (gimple, heap, phis);
3041 /* Function vectorizable_reduction.
3043 Check if STMT performs a reduction operation that can be vectorized.
3044 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3045 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3046 Return FALSE if not a vectorizable STMT, TRUE otherwise.
3048 This function also handles reduction idioms (patterns) that have been
3049 recognized in advance during vect_pattern_recog. In this case, STMT may be
3051 X = pattern_expr (arg0, arg1, ..., X)
3052 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
3053 sequence that had been detected and replaced by the pattern-stmt (STMT).
3055 In some cases of reduction patterns, the type of the reduction variable X is
3056 different than the type of the other arguments of STMT.
3057 In such cases, the vectype that is used when transforming STMT into a vector
3058 stmt is different than the vectype that is used to determine the
3059 vectorization factor, because it consists of a different number of elements
3060 than the actual number of elements that are being operated upon in parallel.
3062 For example, consider an accumulation of shorts into an int accumulator.
3063 On some targets it's possible to vectorize this pattern operating on 8
3064 shorts at a time (hence, the vectype for purposes of determining the
3065 vectorization factor should be V8HI); on the other hand, the vectype that
3066 is used to create the vector form is actually V4SI (the type of the result).
3068 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
3069 indicates what is the actual level of parallelism (V8HI in the example), so
3070 that the right vectorization factor would be derived. This vectype
3071 corresponds to the type of arguments to the reduction stmt, and should *NOT*
3072 be used to create the vectorized stmt. The right vectype for the vectorized
3073 stmt is obtained from the type of the result X:
3074 get_vectype_for_scalar_type (TREE_TYPE (X))
3076 This means that, contrary to "regular" reductions (or "regular" stmts in
3077 general), the following equation:
3078 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
3079 does *NOT* necessarily hold for reduction patterns. */
3082 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
3087 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
3088 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3089 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3090 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3091 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3092 enum tree_code code, orig_code, epilog_reduc_code;
3093 enum machine_mode vec_mode;
3095 optab optab, reduc_optab;
3096 tree new_temp = NULL_TREE;
3099 enum vect_def_type dt;
3100 gimple new_phi = NULL;
3104 stmt_vec_info orig_stmt_info;
3105 tree expr = NULL_TREE;
3107 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3108 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3110 stmt_vec_info prev_stmt_info, prev_phi_info;
3111 gimple first_phi = NULL;
3112 bool single_defuse_cycle = false;
3114 gimple new_stmt = NULL;
3118 if (nested_in_vect_loop_p (loop, stmt))
3121 gcc_assert (ncopies >= 1);
3123 /* FORNOW: SLP not supported. */
3124 if (STMT_SLP_TYPE (stmt_info))
3127 /* 1. Is vectorizable reduction? */
3129 /* Not supportable if the reduction variable is used in the loop. */
3130 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer)
3133 /* Reductions that are not used even in an enclosing outer-loop,
3134 are expected to be "live" (used out of the loop). */
3135 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
3136 && !STMT_VINFO_LIVE_P (stmt_info))
3139 /* Make sure it was already recognized as a reduction computation. */
3140 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def)
3143 /* 2. Has this been recognized as a reduction pattern?
3145 Check if STMT represents a pattern that has been recognized
3146 in earlier analysis stages. For stmts that represent a pattern,
3147 the STMT_VINFO_RELATED_STMT field records the last stmt in
3148 the original sequence that constitutes the pattern. */
3150 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3153 orig_stmt_info = vinfo_for_stmt (orig_stmt);
3154 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
3155 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
3156 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
3159 /* 3. Check the operands of the operation. The first operands are defined
3160 inside the loop body. The last operand is the reduction variable,
3161 which is defined by the loop-header-phi. */
3163 gcc_assert (is_gimple_assign (stmt));
3166 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3168 case GIMPLE_SINGLE_RHS:
3169 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
3170 if (op_type == ternary_op)
3172 tree rhs = gimple_assign_rhs1 (stmt);
3173 ops[0] = TREE_OPERAND (rhs, 0);
3174 ops[1] = TREE_OPERAND (rhs, 1);
3175 ops[2] = TREE_OPERAND (rhs, 2);
3176 code = TREE_CODE (rhs);
3182 case GIMPLE_BINARY_RHS:
3183 code = gimple_assign_rhs_code (stmt);
3184 op_type = TREE_CODE_LENGTH (code);
3185 gcc_assert (op_type == binary_op);
3186 ops[0] = gimple_assign_rhs1 (stmt);
3187 ops[1] = gimple_assign_rhs2 (stmt);
3190 case GIMPLE_UNARY_RHS:
3197 scalar_dest = gimple_assign_lhs (stmt);
3198 scalar_type = TREE_TYPE (scalar_dest);
3199 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
3200 && !SCALAR_FLOAT_TYPE_P (scalar_type))
3203 /* All uses but the last are expected to be defined in the loop.
3204 The last use is the reduction variable. */
3205 for (i = 0; i < op_type-1; i++)
3207 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt,
3209 gcc_assert (is_simple_use);
3210 if (dt != vect_internal_def
3211 && dt != vect_external_def
3212 && dt != vect_constant_def
3213 && dt != vect_induction_def)
3217 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt, &def,
3219 gcc_assert (is_simple_use);
3220 gcc_assert (dt == vect_reduction_def);
3221 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
3223 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo, def_stmt));
3225 gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, def_stmt));
3227 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
3230 /* 4. Supportable by target? */
3232 /* 4.1. check support for the operation in the loop */
3233 optab = optab_for_tree_code (code, vectype, optab_default);
3236 if (vect_print_dump_info (REPORT_DETAILS))
3237 fprintf (vect_dump, "no optab.");
3240 vec_mode = TYPE_MODE (vectype);
3241 if (optab_handler (optab, vec_mode)->insn_code == CODE_FOR_nothing)
3243 if (vect_print_dump_info (REPORT_DETAILS))
3244 fprintf (vect_dump, "op not supported by target.");
3245 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
3246 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3247 < vect_min_worthwhile_factor (code))
3249 if (vect_print_dump_info (REPORT_DETAILS))
3250 fprintf (vect_dump, "proceeding using word mode.");
3253 /* Worthwhile without SIMD support? */
3254 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
3255 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3256 < vect_min_worthwhile_factor (code))
3258 if (vect_print_dump_info (REPORT_DETAILS))
3259 fprintf (vect_dump, "not worthwhile without SIMD support.");
3263 /* 4.2. Check support for the epilog operation.
3265 If STMT represents a reduction pattern, then the type of the
3266 reduction variable may be different than the type of the rest
3267 of the arguments. For example, consider the case of accumulation
3268 of shorts into an int accumulator; The original code:
3269 S1: int_a = (int) short_a;
3270 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
3273 STMT: int_acc = widen_sum <short_a, int_acc>
3276 1. The tree-code that is used to create the vector operation in the
3277 epilog code (that reduces the partial results) is not the
3278 tree-code of STMT, but is rather the tree-code of the original
3279 stmt from the pattern that STMT is replacing. I.e, in the example
3280 above we want to use 'widen_sum' in the loop, but 'plus' in the
3282 2. The type (mode) we use to check available target support
3283 for the vector operation to be created in the *epilog*, is
3284 determined by the type of the reduction variable (in the example
3285 above we'd check this: plus_optab[vect_int_mode]).
3286 However the type (mode) we use to check available target support
3287 for the vector operation to be created *inside the loop*, is
3288 determined by the type of the other arguments to STMT (in the
3289 example we'd check this: widen_sum_optab[vect_short_mode]).
3291 This is contrary to "regular" reductions, in which the types of all
3292 the arguments are the same as the type of the reduction variable.
3293 For "regular" reductions we can therefore use the same vector type
3294 (and also the same tree-code) when generating the epilog code and
3295 when generating the code inside the loop. */
3299 /* This is a reduction pattern: get the vectype from the type of the
3300 reduction variable, and get the tree-code from orig_stmt. */
3301 orig_code = gimple_assign_rhs_code (orig_stmt);
3302 vectype = get_vectype_for_scalar_type (TREE_TYPE (def));
3305 if (vect_print_dump_info (REPORT_DETAILS))
3307 fprintf (vect_dump, "unsupported data-type ");
3308 print_generic_expr (vect_dump, TREE_TYPE (def), TDF_SLIM);
3313 vec_mode = TYPE_MODE (vectype);
3317 /* Regular reduction: use the same vectype and tree-code as used for
3318 the vector code inside the loop can be used for the epilog code. */
3322 if (!reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
3324 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype, optab_default);
3327 if (vect_print_dump_info (REPORT_DETAILS))
3328 fprintf (vect_dump, "no optab for reduction.");
3329 epilog_reduc_code = ERROR_MARK;
3331 if (optab_handler (reduc_optab, vec_mode)->insn_code == CODE_FOR_nothing)
3333 if (vect_print_dump_info (REPORT_DETAILS))
3334 fprintf (vect_dump, "reduc op not supported by target.");
3335 epilog_reduc_code = ERROR_MARK;
3338 if (!vec_stmt) /* transformation not required. */
3340 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3341 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
3348 if (vect_print_dump_info (REPORT_DETAILS))
3349 fprintf (vect_dump, "transform reduction.");
3351 /* Create the destination vector */
3352 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3354 /* In case the vectorization factor (VF) is bigger than the number
3355 of elements that we can fit in a vectype (nunits), we have to generate
3356 more than one vector stmt - i.e - we need to "unroll" the
3357 vector stmt by a factor VF/nunits. For more details see documentation
3358 in vectorizable_operation. */
3360 /* If the reduction is used in an outer loop we need to generate
3361 VF intermediate results, like so (e.g. for ncopies=2):
3366 (i.e. we generate VF results in 2 registers).
3367 In this case we have a separate def-use cycle for each copy, and therefore
3368 for each copy we get the vector def for the reduction variable from the
3369 respective phi node created for this copy.
3371 Otherwise (the reduction is unused in the loop nest), we can combine
3372 together intermediate results, like so (e.g. for ncopies=2):
3376 (i.e. we generate VF/2 results in a single register).
3377 In this case for each copy we get the vector def for the reduction variable
3378 from the vectorized reduction operation generated in the previous iteration.
3381 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
3383 single_defuse_cycle = true;
3387 epilog_copies = ncopies;
3389 prev_stmt_info = NULL;
3390 prev_phi_info = NULL;
3391 for (j = 0; j < ncopies; j++)
3393 if (j == 0 || !single_defuse_cycle)
3395 /* Create the reduction-phi that defines the reduction-operand. */
3396 new_phi = create_phi_node (vec_dest, loop->header);
3397 set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo));
3403 loop_vec_def0 = vect_get_vec_def_for_operand (ops[0], stmt, NULL);
3404 if (op_type == ternary_op)
3406 loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt, NULL);
3409 /* Get the vector def for the reduction variable from the phi node */
3410 reduc_def = PHI_RESULT (new_phi);
3411 first_phi = new_phi;
3415 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
3416 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
3417 if (op_type == ternary_op)
3418 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def1);
3420 if (single_defuse_cycle)
3421 reduc_def = gimple_assign_lhs (new_stmt);
3423 reduc_def = PHI_RESULT (new_phi);
3425 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
3428 /* Arguments are ready. create the new vector stmt. */
3429 if (op_type == binary_op)
3430 expr = build2 (code, vectype, loop_vec_def0, reduc_def);
3432 expr = build3 (code, vectype, loop_vec_def0, loop_vec_def1,
3434 new_stmt = gimple_build_assign (vec_dest, expr);
3435 new_temp = make_ssa_name (vec_dest, new_stmt);
3436 gimple_assign_set_lhs (new_stmt, new_temp);
3437 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3440 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3442 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3443 prev_stmt_info = vinfo_for_stmt (new_stmt);
3444 prev_phi_info = vinfo_for_stmt (new_phi);
3447 /* Finalize the reduction-phi (set its arguments) and create the
3448 epilog reduction code. */
3449 if (!single_defuse_cycle)
3450 new_temp = gimple_assign_lhs (*vec_stmt);
3451 vect_create_epilog_for_reduction (new_temp, stmt, epilog_copies,
3452 epilog_reduc_code, first_phi);
3456 /* Function vect_min_worthwhile_factor.
3458 For a loop where we could vectorize the operation indicated by CODE,
3459 return the minimum vectorization factor that makes it worthwhile
3460 to use generic vectors. */
3462 vect_min_worthwhile_factor (enum tree_code code)
3483 /* Function vectorizable_induction
3485 Check if PHI performs an induction computation that can be vectorized.
3486 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
3487 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
3488 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3491 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
3494 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
3495 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3496 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3497 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3498 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3499 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3502 gcc_assert (ncopies >= 1);
3503 /* FORNOW. This restriction should be relaxed. */
3504 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
3506 if (vect_print_dump_info (REPORT_DETAILS))
3507 fprintf (vect_dump, "multiple types in nested loop.");
3511 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3514 /* FORNOW: SLP not supported. */
3515 if (STMT_SLP_TYPE (stmt_info))
3518 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
3520 if (gimple_code (phi) != GIMPLE_PHI)
3523 if (!vec_stmt) /* transformation not required. */
3525 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
3526 if (vect_print_dump_info (REPORT_DETAILS))
3527 fprintf (vect_dump, "=== vectorizable_induction ===");
3528 vect_model_induction_cost (stmt_info, ncopies);
3534 if (vect_print_dump_info (REPORT_DETAILS))
3535 fprintf (vect_dump, "transform induction phi.");
3537 vec_def = get_initial_def_for_induction (phi);
3538 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
3542 /* Function vectorizable_live_operation.
3544 STMT computes a value that is used outside the loop. Check if
3545 it can be supported. */
3548 vectorizable_live_operation (gimple stmt,
3549 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
3550 gimple *vec_stmt ATTRIBUTE_UNUSED)
3552 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3553 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3554 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3560 enum vect_def_type dt;
3561 enum tree_code code;
3562 enum gimple_rhs_class rhs_class;
3564 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
3566 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
3569 if (!is_gimple_assign (stmt))
3572 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3575 /* FORNOW. CHECKME. */
3576 if (nested_in_vect_loop_p (loop, stmt))
3579 code = gimple_assign_rhs_code (stmt);
3580 op_type = TREE_CODE_LENGTH (code);
3581 rhs_class = get_gimple_rhs_class (code);
3582 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
3583 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
3585 /* FORNOW: support only if all uses are invariant. This means
3586 that the scalar operations can remain in place, unvectorized.
3587 The original last scalar value that they compute will be used. */
3589 for (i = 0; i < op_type; i++)
3591 if (rhs_class == GIMPLE_SINGLE_RHS)
3592 op = TREE_OPERAND (gimple_op (stmt, 1), i);
3594 op = gimple_op (stmt, i + 1);
3595 if (op && !vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt))
3597 if (vect_print_dump_info (REPORT_DETAILS))
3598 fprintf (vect_dump, "use not simple.");
3602 if (dt != vect_external_def && dt != vect_constant_def)
3606 /* No transformation is required for the cases we currently support. */
3610 /* Function vect_transform_loop.
3612 The analysis phase has determined that the loop is vectorizable.
3613 Vectorize the loop - created vectorized stmts to replace the scalar
3614 stmts in the loop, and update the loop exit condition. */
3617 vect_transform_loop (loop_vec_info loop_vinfo)
3619 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3620 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
3621 int nbbs = loop->num_nodes;
3622 gimple_stmt_iterator si;
3625 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3627 bool slp_scheduled = false;
3628 unsigned int nunits;
3629 tree cond_expr = NULL_TREE;
3630 gimple_seq cond_expr_stmt_list = NULL;
3631 bool do_peeling_for_loop_bound;
3633 if (vect_print_dump_info (REPORT_DETAILS))
3634 fprintf (vect_dump, "=== vec_transform_loop ===");
3636 /* Peel the loop if there are data refs with unknown alignment.
3637 Only one data ref with unknown store is allowed. */
3639 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
3640 vect_do_peeling_for_alignment (loop_vinfo);
3642 do_peeling_for_loop_bound
3643 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3644 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3645 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0));
3647 if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
3648 || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
3649 vect_loop_versioning (loop_vinfo,
3650 !do_peeling_for_loop_bound,
3651 &cond_expr, &cond_expr_stmt_list);
3653 /* CHECKME: we wouldn't need this if we called update_ssa once
3655 bitmap_zero (vect_memsyms_to_rename);
3657 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
3658 compile time constant), or it is a constant that doesn't divide by the
3659 vectorization factor, then an epilog loop needs to be created.
3660 We therefore duplicate the loop: the original loop will be vectorized,
3661 and will compute the first (n/VF) iterations. The second copy of the loop
3662 will remain scalar and will compute the remaining (n%VF) iterations.
3663 (VF is the vectorization factor). */
3665 if (do_peeling_for_loop_bound)
3666 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
3667 cond_expr, cond_expr_stmt_list);
3669 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
3670 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
3672 /* 1) Make sure the loop header has exactly two entries
3673 2) Make sure we have a preheader basic block. */
3675 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
3677 split_edge (loop_preheader_edge (loop));
3679 /* FORNOW: the vectorizer supports only loops which body consist
3680 of one basic block (header + empty latch). When the vectorizer will
3681 support more involved loop forms, the order by which the BBs are
3682 traversed need to be reconsidered. */
3684 for (i = 0; i < nbbs; i++)
3686 basic_block bb = bbs[i];
3687 stmt_vec_info stmt_info;
3690 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
3692 phi = gsi_stmt (si);
3693 if (vect_print_dump_info (REPORT_DETAILS))
3695 fprintf (vect_dump, "------>vectorizing phi: ");
3696 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
3698 stmt_info = vinfo_for_stmt (phi);
3702 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3703 && !STMT_VINFO_LIVE_P (stmt_info))
3706 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
3707 != (unsigned HOST_WIDE_INT) vectorization_factor)
3708 && vect_print_dump_info (REPORT_DETAILS))
3709 fprintf (vect_dump, "multiple-types.");
3711 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
3713 if (vect_print_dump_info (REPORT_DETAILS))
3714 fprintf (vect_dump, "transform phi.");
3715 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
3719 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
3721 gimple stmt = gsi_stmt (si);
3724 if (vect_print_dump_info (REPORT_DETAILS))
3726 fprintf (vect_dump, "------>vectorizing statement: ");
3727 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
3730 stmt_info = vinfo_for_stmt (stmt);
3732 /* vector stmts created in the outer-loop during vectorization of
3733 stmts in an inner-loop may not have a stmt_info, and do not
3734 need to be vectorized. */
3741 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3742 && !STMT_VINFO_LIVE_P (stmt_info))
3748 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
3750 (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
3751 if (!STMT_SLP_TYPE (stmt_info)
3752 && nunits != (unsigned int) vectorization_factor
3753 && vect_print_dump_info (REPORT_DETAILS))
3754 /* For SLP VF is set according to unrolling factor, and not to
3755 vector size, hence for SLP this print is not valid. */
3756 fprintf (vect_dump, "multiple-types.");
3758 /* SLP. Schedule all the SLP instances when the first SLP stmt is
3760 if (STMT_SLP_TYPE (stmt_info))
3764 slp_scheduled = true;
3766 if (vect_print_dump_info (REPORT_DETAILS))
3767 fprintf (vect_dump, "=== scheduling SLP instances ===");
3769 vect_schedule_slp (loop_vinfo);
3772 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
3773 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
3780 /* -------- vectorize statement ------------ */
3781 if (vect_print_dump_info (REPORT_DETAILS))
3782 fprintf (vect_dump, "transform statement.");
3784 strided_store = false;
3785 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
3788 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3790 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
3791 interleaving chain was completed - free all the stores in
3793 vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
3794 gsi_remove (&si, true);
3799 /* Free the attached stmt_vec_info and remove the stmt. */
3800 free_stmt_vec_info (stmt);
3801 gsi_remove (&si, true);
3809 slpeel_make_loop_iterate_ntimes (loop, ratio);
3811 mark_set_for_renaming (vect_memsyms_to_rename);
3813 /* The memory tags and pointers in vectorized statements need to
3814 have their SSA forms updated. FIXME, why can't this be delayed
3815 until all the loops have been transformed? */
3816 update_ssa (TODO_update_ssa);
3818 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
3819 fprintf (vect_dump, "LOOP VECTORIZED.");
3820 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
3821 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");