2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "basic-block.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
34 #include "cfglayout.h"
40 #include "tree-chrec.h"
41 #include "tree-scalar-evolution.h"
42 #include "tree-vectorizer.h"
44 /* Loop Vectorization Pass.
46 This pass tries to vectorize loops.
48 For example, the vectorizer transforms the following simple loop:
50 short a[N]; short b[N]; short c[N]; int i;
56 as if it was manually vectorized by rewriting the source code into:
58 typedef int __attribute__((mode(V8HI))) v8hi;
59 short a[N]; short b[N]; short c[N]; int i;
60 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
63 for (i=0; i<N/8; i++){
70 The main entry to this pass is vectorize_loops(), in which
71 the vectorizer applies a set of analyses on a given set of loops,
72 followed by the actual vectorization transformation for the loops that
73 had successfully passed the analysis phase.
74 Throughout this pass we make a distinction between two types of
75 data: scalars (which are represented by SSA_NAMES), and memory references
76 ("data-refs"). These two types of data require different handling both
77 during analysis and transformation. The types of data-refs that the
78 vectorizer currently supports are ARRAY_REFS which base is an array DECL
79 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
80 accesses are required to have a simple (consecutive) access pattern.
84 The driver for the analysis phase is vect_analyze_loop().
85 It applies a set of analyses, some of which rely on the scalar evolution
86 analyzer (scev) developed by Sebastian Pop.
88 During the analysis phase the vectorizer records some information
89 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
90 loop, as well as general information about the loop as a whole, which is
91 recorded in a "loop_vec_info" struct attached to each loop.
95 The loop transformation phase scans all the stmts in the loop, and
96 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
97 the loop that needs to be vectorized. It inserts the vector code sequence
98 just before the scalar stmt S, and records a pointer to the vector code
99 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
100 attached to S). This pointer will be used for the vectorization of following
101 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
102 otherwise, we rely on dead code elimination for removing it.
104 For example, say stmt S1 was vectorized into stmt VS1:
107 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
110 To vectorize stmt S2, the vectorizer first finds the stmt that defines
111 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
112 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
113 resulting sequence would be:
116 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
118 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
120 Operands that are not SSA_NAMEs, are data-refs that appear in
121 load/store operations (like 'x[i]' in S1), and are handled differently.
125 Currently the only target specific information that is used is the
126 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
127 support different sizes of vectors, for now will need to specify one value
128 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
130 Since we only vectorize operations which vector form can be
131 expressed using existing tree codes, to verify that an operation is
132 supported, the vectorizer checks the relevant optab at the relevant
133 machine_mode (e.g, optab_handler (add_optab, V8HImode)->insn_code). If
134 the value found is CODE_FOR_nothing, then there's no target support, and
135 we can't vectorize the stmt.
137 For additional information on this project see:
138 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
141 /* Function vect_determine_vectorization_factor
143 Determine the vectorization factor (VF). VF is the number of data elements
144 that are operated upon in parallel in a single iteration of the vectorized
145 loop. For example, when vectorizing a loop that operates on 4byte elements,
146 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
147 elements can fit in a single vector register.
149 We currently support vectorization of loops in which all types operated upon
150 are of the same size. Therefore this function currently sets VF according to
151 the size of the types operated upon, and fails if there are multiple sizes
154 VF is also the factor by which the loop iterations are strip-mined, e.g.:
161 for (i=0; i<N; i+=VF){
162 a[i:VF] = b[i:VF] + c[i:VF];
167 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
169 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
170 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
171 int nbbs = loop->num_nodes;
172 gimple_stmt_iterator si;
173 unsigned int vectorization_factor = 0;
178 stmt_vec_info stmt_info;
182 if (vect_print_dump_info (REPORT_DETAILS))
183 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
185 for (i = 0; i < nbbs; i++)
187 basic_block bb = bbs[i];
189 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
192 stmt_info = vinfo_for_stmt (phi);
193 if (vect_print_dump_info (REPORT_DETAILS))
195 fprintf (vect_dump, "==> examining phi: ");
196 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
199 gcc_assert (stmt_info);
201 if (STMT_VINFO_RELEVANT_P (stmt_info))
203 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
204 scalar_type = TREE_TYPE (PHI_RESULT (phi));
206 if (vect_print_dump_info (REPORT_DETAILS))
208 fprintf (vect_dump, "get vectype for scalar type: ");
209 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
212 vectype = get_vectype_for_scalar_type (scalar_type);
215 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
218 "not vectorized: unsupported data-type ");
219 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
223 STMT_VINFO_VECTYPE (stmt_info) = vectype;
225 if (vect_print_dump_info (REPORT_DETAILS))
227 fprintf (vect_dump, "vectype: ");
228 print_generic_expr (vect_dump, vectype, TDF_SLIM);
231 nunits = TYPE_VECTOR_SUBPARTS (vectype);
232 if (vect_print_dump_info (REPORT_DETAILS))
233 fprintf (vect_dump, "nunits = %d", nunits);
235 if (!vectorization_factor
236 || (nunits > vectorization_factor))
237 vectorization_factor = nunits;
241 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
244 gimple stmt = gsi_stmt (si);
245 stmt_info = vinfo_for_stmt (stmt);
247 if (vect_print_dump_info (REPORT_DETAILS))
249 fprintf (vect_dump, "==> examining statement: ");
250 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
253 gcc_assert (stmt_info);
255 /* skip stmts which do not need to be vectorized. */
256 if (!STMT_VINFO_RELEVANT_P (stmt_info)
257 && !STMT_VINFO_LIVE_P (stmt_info))
259 if (vect_print_dump_info (REPORT_DETAILS))
260 fprintf (vect_dump, "skip.");
264 if (gimple_get_lhs (stmt) == NULL_TREE)
266 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
268 fprintf (vect_dump, "not vectorized: irregular stmt.");
269 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
274 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
276 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
278 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
279 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
284 if (STMT_VINFO_VECTYPE (stmt_info))
286 /* The only case when a vectype had been already set is for stmts
287 that contain a dataref, or for "pattern-stmts" (stmts generated
288 by the vectorizer to represent/replace a certain idiom). */
289 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
290 || is_pattern_stmt_p (stmt_info));
291 vectype = STMT_VINFO_VECTYPE (stmt_info);
295 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)
296 && !is_pattern_stmt_p (stmt_info));
298 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
299 if (vect_print_dump_info (REPORT_DETAILS))
301 fprintf (vect_dump, "get vectype for scalar type: ");
302 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
304 vectype = get_vectype_for_scalar_type (scalar_type);
307 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
310 "not vectorized: unsupported data-type ");
311 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
316 STMT_VINFO_VECTYPE (stmt_info) = vectype;
319 /* The vectorization factor is according to the smallest
320 scalar type (or the largest vector size, but we only
321 support one vector size per loop). */
322 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
324 if (vect_print_dump_info (REPORT_DETAILS))
326 fprintf (vect_dump, "get vectype for scalar type: ");
327 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
329 vf_vectype = get_vectype_for_scalar_type (scalar_type);
332 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
335 "not vectorized: unsupported data-type ");
336 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
341 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
342 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
344 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
347 "not vectorized: different sized vector "
348 "types in statement, ");
349 print_generic_expr (vect_dump, vectype, TDF_SLIM);
350 fprintf (vect_dump, " and ");
351 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
356 if (vect_print_dump_info (REPORT_DETAILS))
358 fprintf (vect_dump, "vectype: ");
359 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
362 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
363 if (vect_print_dump_info (REPORT_DETAILS))
364 fprintf (vect_dump, "nunits = %d", nunits);
366 if (!vectorization_factor
367 || (nunits > vectorization_factor))
368 vectorization_factor = nunits;
372 /* TODO: Analyze cost. Decide if worth while to vectorize. */
373 if (vect_print_dump_info (REPORT_DETAILS))
374 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
375 if (vectorization_factor <= 1)
377 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
378 fprintf (vect_dump, "not vectorized: unsupported data-type");
381 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
387 /* Function vect_is_simple_iv_evolution.
389 FORNOW: A simple evolution of an induction variables in the loop is
390 considered a polynomial evolution with constant step. */
393 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
398 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
400 /* When there is no evolution in this loop, the evolution function
402 if (evolution_part == NULL_TREE)
405 /* When the evolution is a polynomial of degree >= 2
406 the evolution function is not "simple". */
407 if (tree_is_chrec (evolution_part))
410 step_expr = evolution_part;
411 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
413 if (vect_print_dump_info (REPORT_DETAILS))
415 fprintf (vect_dump, "step: ");
416 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
417 fprintf (vect_dump, ", init: ");
418 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
424 if (TREE_CODE (step_expr) != INTEGER_CST)
426 if (vect_print_dump_info (REPORT_DETAILS))
427 fprintf (vect_dump, "step unknown.");
434 /* Function vect_analyze_scalar_cycles_1.
436 Examine the cross iteration def-use cycles of scalar variables
437 in LOOP. LOOP_VINFO represents the loop that is now being
438 considered for vectorization (can be LOOP, or an outer-loop
442 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
444 basic_block bb = loop->header;
446 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
447 gimple_stmt_iterator gsi;
450 if (vect_print_dump_info (REPORT_DETAILS))
451 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
453 /* First - identify all inductions. Reduction detection assumes that all the
454 inductions have been identified, therefore, this order must not be
456 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
458 gimple phi = gsi_stmt (gsi);
459 tree access_fn = NULL;
460 tree def = PHI_RESULT (phi);
461 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
463 if (vect_print_dump_info (REPORT_DETAILS))
465 fprintf (vect_dump, "Analyze phi: ");
466 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
469 /* Skip virtual phi's. The data dependences that are associated with
470 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
471 if (!is_gimple_reg (SSA_NAME_VAR (def)))
474 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
476 /* Analyze the evolution function. */
477 access_fn = analyze_scalar_evolution (loop, def);
478 if (access_fn && vect_print_dump_info (REPORT_DETAILS))
480 fprintf (vect_dump, "Access function of PHI: ");
481 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
485 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
487 VEC_safe_push (gimple, heap, worklist, phi);
491 if (vect_print_dump_info (REPORT_DETAILS))
492 fprintf (vect_dump, "Detected induction.");
493 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
497 /* Second - identify all reductions and nested cycles. */
498 while (VEC_length (gimple, worklist) > 0)
500 gimple phi = VEC_pop (gimple, worklist);
501 tree def = PHI_RESULT (phi);
502 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
506 if (vect_print_dump_info (REPORT_DETAILS))
508 fprintf (vect_dump, "Analyze phi: ");
509 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
512 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
513 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
515 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
516 reduc_stmt = vect_is_simple_reduction (loop_vinfo, phi, !nested_cycle,
522 if (vect_print_dump_info (REPORT_DETAILS))
523 fprintf (vect_dump, "Detected double reduction.");
525 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
526 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
527 vect_double_reduction_def;
533 if (vect_print_dump_info (REPORT_DETAILS))
534 fprintf (vect_dump, "Detected vectorizable nested cycle.");
536 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
537 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
542 if (vect_print_dump_info (REPORT_DETAILS))
543 fprintf (vect_dump, "Detected reduction.");
545 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
546 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
552 if (vect_print_dump_info (REPORT_DETAILS))
553 fprintf (vect_dump, "Unknown def-use cycle pattern.");
556 VEC_free (gimple, heap, worklist);
560 /* Function vect_analyze_scalar_cycles.
562 Examine the cross iteration def-use cycles of scalar variables, by
563 analyzing the loop-header PHIs of scalar variables; Classify each
564 cycle as one of the following: invariant, induction, reduction, unknown.
565 We do that for the loop represented by LOOP_VINFO, and also to its
566 inner-loop, if exists.
567 Examples for scalar cycles:
582 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
584 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
586 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
588 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
589 Reductions in such inner-loop therefore have different properties than
590 the reductions in the nest that gets vectorized:
591 1. When vectorized, they are executed in the same order as in the original
592 scalar loop, so we can't change the order of computation when
594 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
595 current checks are too strict. */
598 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
601 /* Function vect_get_loop_niters.
603 Determine how many iterations the loop is executed.
604 If an expression that represents the number of iterations
605 can be constructed, place it in NUMBER_OF_ITERATIONS.
606 Return the loop exit condition. */
609 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
613 if (vect_print_dump_info (REPORT_DETAILS))
614 fprintf (vect_dump, "=== get_loop_niters ===");
616 niters = number_of_exit_cond_executions (loop);
618 if (niters != NULL_TREE
619 && niters != chrec_dont_know)
621 *number_of_iterations = niters;
623 if (vect_print_dump_info (REPORT_DETAILS))
625 fprintf (vect_dump, "==> get_loop_niters:" );
626 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
630 return get_loop_exit_condition (loop);
634 /* Function bb_in_loop_p
636 Used as predicate for dfs order traversal of the loop bbs. */
639 bb_in_loop_p (const_basic_block bb, const void *data)
641 const struct loop *const loop = (const struct loop *)data;
642 if (flow_bb_inside_loop_p (loop, bb))
648 /* Function new_loop_vec_info.
650 Create and initialize a new loop_vec_info struct for LOOP, as well as
651 stmt_vec_info structs for all the stmts in LOOP. */
654 new_loop_vec_info (struct loop *loop)
658 gimple_stmt_iterator si;
659 unsigned int i, nbbs;
661 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
662 LOOP_VINFO_LOOP (res) = loop;
664 bbs = get_loop_body (loop);
666 /* Create/Update stmt_info for all stmts in the loop. */
667 for (i = 0; i < loop->num_nodes; i++)
669 basic_block bb = bbs[i];
671 /* BBs in a nested inner-loop will have been already processed (because
672 we will have called vect_analyze_loop_form for any nested inner-loop).
673 Therefore, for stmts in an inner-loop we just want to update the
674 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
675 loop_info of the outer-loop we are currently considering to vectorize
676 (instead of the loop_info of the inner-loop).
677 For stmts in other BBs we need to create a stmt_info from scratch. */
678 if (bb->loop_father != loop)
681 gcc_assert (loop->inner && bb->loop_father == loop->inner);
682 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
684 gimple phi = gsi_stmt (si);
685 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
686 loop_vec_info inner_loop_vinfo =
687 STMT_VINFO_LOOP_VINFO (stmt_info);
688 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
689 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
691 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
693 gimple stmt = gsi_stmt (si);
694 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
695 loop_vec_info inner_loop_vinfo =
696 STMT_VINFO_LOOP_VINFO (stmt_info);
697 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
698 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
703 /* bb in current nest. */
704 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
706 gimple phi = gsi_stmt (si);
707 gimple_set_uid (phi, 0);
708 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
711 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
713 gimple stmt = gsi_stmt (si);
714 gimple_set_uid (stmt, 0);
715 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
720 /* CHECKME: We want to visit all BBs before their successors (except for
721 latch blocks, for which this assertion wouldn't hold). In the simple
722 case of the loop forms we allow, a dfs order of the BBs would the same
723 as reversed postorder traversal, so we are safe. */
726 bbs = XCNEWVEC (basic_block, loop->num_nodes);
727 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
728 bbs, loop->num_nodes, loop);
729 gcc_assert (nbbs == loop->num_nodes);
731 LOOP_VINFO_BBS (res) = bbs;
732 LOOP_VINFO_NITERS (res) = NULL;
733 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
734 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
735 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
736 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
737 LOOP_VINFO_VECT_FACTOR (res) = 0;
738 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
739 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
740 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
741 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
742 VEC_alloc (gimple, heap,
743 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
744 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
745 VEC_alloc (ddr_p, heap,
746 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
747 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
748 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
749 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
755 /* Function destroy_loop_vec_info.
757 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
758 stmts in the loop. */
761 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
766 gimple_stmt_iterator si;
768 VEC (slp_instance, heap) *slp_instances;
769 slp_instance instance;
774 loop = LOOP_VINFO_LOOP (loop_vinfo);
776 bbs = LOOP_VINFO_BBS (loop_vinfo);
777 nbbs = loop->num_nodes;
781 free (LOOP_VINFO_BBS (loop_vinfo));
782 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
783 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
784 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
791 for (j = 0; j < nbbs; j++)
793 basic_block bb = bbs[j];
794 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
795 free_stmt_vec_info (gsi_stmt (si));
797 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
799 gimple stmt = gsi_stmt (si);
800 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
804 /* Check if this is a "pattern stmt" (introduced by the
805 vectorizer during the pattern recognition pass). */
806 bool remove_stmt_p = false;
807 gimple orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
810 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
812 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
813 remove_stmt_p = true;
816 /* Free stmt_vec_info. */
817 free_stmt_vec_info (stmt);
819 /* Remove dead "pattern stmts". */
821 gsi_remove (&si, true);
827 free (LOOP_VINFO_BBS (loop_vinfo));
828 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
829 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
830 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
831 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
832 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
833 for (j = 0; VEC_iterate (slp_instance, slp_instances, j, instance); j++)
834 vect_free_slp_instance (instance);
836 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
837 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
844 /* Function vect_analyze_loop_1.
846 Apply a set of analyses on LOOP, and create a loop_vec_info struct
847 for it. The different analyses will record information in the
848 loop_vec_info struct. This is a subset of the analyses applied in
849 vect_analyze_loop, to be applied on an inner-loop nested in the loop
850 that is now considered for (outer-loop) vectorization. */
853 vect_analyze_loop_1 (struct loop *loop)
855 loop_vec_info loop_vinfo;
857 if (vect_print_dump_info (REPORT_DETAILS))
858 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
860 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
862 loop_vinfo = vect_analyze_loop_form (loop);
865 if (vect_print_dump_info (REPORT_DETAILS))
866 fprintf (vect_dump, "bad inner-loop form.");
874 /* Function vect_analyze_loop_form.
876 Verify that certain CFG restrictions hold, including:
877 - the loop has a pre-header
878 - the loop has a single entry and exit
879 - the loop exit condition is simple enough, and the number of iterations
880 can be analyzed (a countable loop). */
883 vect_analyze_loop_form (struct loop *loop)
885 loop_vec_info loop_vinfo;
887 tree number_of_iterations = NULL;
888 loop_vec_info inner_loop_vinfo = NULL;
890 if (vect_print_dump_info (REPORT_DETAILS))
891 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
893 /* Different restrictions apply when we are considering an inner-most loop,
894 vs. an outer (nested) loop.
895 (FORNOW. May want to relax some of these restrictions in the future). */
899 /* Inner-most loop. We currently require that the number of BBs is
900 exactly 2 (the header and latch). Vectorizable inner-most loops
911 if (loop->num_nodes != 2)
913 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
914 fprintf (vect_dump, "not vectorized: control flow in loop.");
918 if (empty_block_p (loop->header))
920 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
921 fprintf (vect_dump, "not vectorized: empty loop.");
927 struct loop *innerloop = loop->inner;
930 /* Nested loop. We currently require that the loop is doubly-nested,
931 contains a single inner loop, and the number of BBs is exactly 5.
932 Vectorizable outer-loops look like this:
944 The inner-loop has the properties expected of inner-most loops
945 as described above. */
947 if ((loop->inner)->inner || (loop->inner)->next)
949 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
950 fprintf (vect_dump, "not vectorized: multiple nested loops.");
954 /* Analyze the inner-loop. */
955 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
956 if (!inner_loop_vinfo)
958 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
959 fprintf (vect_dump, "not vectorized: Bad inner loop.");
963 if (!expr_invariant_in_loop_p (loop,
964 LOOP_VINFO_NITERS (inner_loop_vinfo)))
966 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
968 "not vectorized: inner-loop count not invariant.");
969 destroy_loop_vec_info (inner_loop_vinfo, true);
973 if (loop->num_nodes != 5)
975 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
976 fprintf (vect_dump, "not vectorized: control flow in loop.");
977 destroy_loop_vec_info (inner_loop_vinfo, true);
981 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
982 entryedge = EDGE_PRED (innerloop->header, 0);
983 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
984 entryedge = EDGE_PRED (innerloop->header, 1);
986 if (entryedge->src != loop->header
987 || !single_exit (innerloop)
988 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
990 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
991 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
992 destroy_loop_vec_info (inner_loop_vinfo, true);
996 if (vect_print_dump_info (REPORT_DETAILS))
997 fprintf (vect_dump, "Considering outer-loop vectorization.");
1000 if (!single_exit (loop)
1001 || EDGE_COUNT (loop->header->preds) != 2)
1003 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1005 if (!single_exit (loop))
1006 fprintf (vect_dump, "not vectorized: multiple exits.");
1007 else if (EDGE_COUNT (loop->header->preds) != 2)
1008 fprintf (vect_dump, "not vectorized: too many incoming edges.");
1010 if (inner_loop_vinfo)
1011 destroy_loop_vec_info (inner_loop_vinfo, true);
1015 /* We assume that the loop exit condition is at the end of the loop. i.e,
1016 that the loop is represented as a do-while (with a proper if-guard
1017 before the loop if needed), where the loop header contains all the
1018 executable statements, and the latch is empty. */
1019 if (!empty_block_p (loop->latch)
1020 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1022 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1023 fprintf (vect_dump, "not vectorized: unexpected loop form.");
1024 if (inner_loop_vinfo)
1025 destroy_loop_vec_info (inner_loop_vinfo, true);
1029 /* Make sure there exists a single-predecessor exit bb: */
1030 if (!single_pred_p (single_exit (loop)->dest))
1032 edge e = single_exit (loop);
1033 if (!(e->flags & EDGE_ABNORMAL))
1035 split_loop_exit_edge (e);
1036 if (vect_print_dump_info (REPORT_DETAILS))
1037 fprintf (vect_dump, "split exit edge.");
1041 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1042 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
1043 if (inner_loop_vinfo)
1044 destroy_loop_vec_info (inner_loop_vinfo, true);
1049 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1052 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1053 fprintf (vect_dump, "not vectorized: complicated exit condition.");
1054 if (inner_loop_vinfo)
1055 destroy_loop_vec_info (inner_loop_vinfo, true);
1059 if (!number_of_iterations)
1061 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1063 "not vectorized: number of iterations cannot be computed.");
1064 if (inner_loop_vinfo)
1065 destroy_loop_vec_info (inner_loop_vinfo, true);
1069 if (chrec_contains_undetermined (number_of_iterations))
1071 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1072 fprintf (vect_dump, "Infinite number of iterations.");
1073 if (inner_loop_vinfo)
1074 destroy_loop_vec_info (inner_loop_vinfo, true);
1078 if (!NITERS_KNOWN_P (number_of_iterations))
1080 if (vect_print_dump_info (REPORT_DETAILS))
1082 fprintf (vect_dump, "Symbolic number of iterations is ");
1083 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1086 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1088 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1089 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1090 if (inner_loop_vinfo)
1091 destroy_loop_vec_info (inner_loop_vinfo, false);
1095 loop_vinfo = new_loop_vec_info (loop);
1096 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1097 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1099 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1101 /* CHECKME: May want to keep it around it in the future. */
1102 if (inner_loop_vinfo)
1103 destroy_loop_vec_info (inner_loop_vinfo, false);
1105 gcc_assert (!loop->aux);
1106 loop->aux = loop_vinfo;
1111 /* Function vect_analyze_loop_operations.
1113 Scan the loop stmts and make sure they are all vectorizable. */
1116 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1118 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1119 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1120 int nbbs = loop->num_nodes;
1121 gimple_stmt_iterator si;
1122 unsigned int vectorization_factor = 0;
1125 stmt_vec_info stmt_info;
1126 bool need_to_vectorize = false;
1127 int min_profitable_iters;
1128 int min_scalar_loop_bound;
1130 bool only_slp_in_loop = true, ok;
1132 if (vect_print_dump_info (REPORT_DETAILS))
1133 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1135 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1136 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1138 for (i = 0; i < nbbs; i++)
1140 basic_block bb = bbs[i];
1142 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1144 phi = gsi_stmt (si);
1147 stmt_info = vinfo_for_stmt (phi);
1148 if (vect_print_dump_info (REPORT_DETAILS))
1150 fprintf (vect_dump, "examining phi: ");
1151 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1154 if (! is_loop_header_bb_p (bb))
1156 /* inner-loop loop-closed exit phi in outer-loop vectorization
1157 (i.e. a phi in the tail of the outer-loop).
1158 FORNOW: we currently don't support the case that these phis
1159 are not used in the outerloop (unless it is double reduction,
1160 i.e., this phi is vect_reduction_def), cause this case
1161 requires to actually do something here. */
1162 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1163 || STMT_VINFO_LIVE_P (stmt_info))
1164 && STMT_VINFO_DEF_TYPE (stmt_info)
1165 != vect_double_reduction_def)
1167 if (vect_print_dump_info (REPORT_DETAILS))
1169 "Unsupported loop-closed phi in outer-loop.");
1175 gcc_assert (stmt_info);
1177 if (STMT_VINFO_LIVE_P (stmt_info))
1179 /* FORNOW: not yet supported. */
1180 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1181 fprintf (vect_dump, "not vectorized: value used after loop.");
1185 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1186 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1188 /* A scalar-dependence cycle that we don't support. */
1189 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1190 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1194 if (STMT_VINFO_RELEVANT_P (stmt_info))
1196 need_to_vectorize = true;
1197 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1198 ok = vectorizable_induction (phi, NULL, NULL);
1203 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1206 "not vectorized: relevant phi not supported: ");
1207 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1213 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1215 gimple stmt = gsi_stmt (si);
1216 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1218 gcc_assert (stmt_info);
1220 if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1223 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1224 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1225 && !PURE_SLP_STMT (stmt_info))
1227 /* STMT needs both SLP and loop-based vectorization. */
1228 only_slp_in_loop = false;
1232 /* All operations in the loop are either irrelevant (deal with loop
1233 control, or dead), or only used outside the loop and can be moved
1234 out of the loop (e.g. invariants, inductions). The loop can be
1235 optimized away by scalar optimizations. We're better off not
1236 touching this loop. */
1237 if (!need_to_vectorize)
1239 if (vect_print_dump_info (REPORT_DETAILS))
1241 "All the computation can be taken out of the loop.");
1242 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1244 "not vectorized: redundant loop. no profit to vectorize.");
1248 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1249 vectorization factor of the loop is the unrolling factor required by the
1250 SLP instances. If that unrolling factor is 1, we say, that we perform
1251 pure SLP on loop - cross iteration parallelism is not exploited. */
1252 if (only_slp_in_loop)
1253 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1255 vectorization_factor = least_common_multiple (vectorization_factor,
1256 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1258 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1260 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1261 && vect_print_dump_info (REPORT_DETAILS))
1263 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1264 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1266 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1267 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1269 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1270 fprintf (vect_dump, "not vectorized: iteration count too small.");
1271 if (vect_print_dump_info (REPORT_DETAILS))
1272 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1273 "vectorization factor.");
1277 /* Analyze cost. Decide if worth while to vectorize. */
1279 /* Once VF is set, SLP costs should be updated since the number of created
1280 vector stmts depends on VF. */
1281 vect_update_slp_costs_according_to_vf (loop_vinfo);
1283 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1284 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1286 if (min_profitable_iters < 0)
1288 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1289 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1290 if (vect_print_dump_info (REPORT_DETAILS))
1291 fprintf (vect_dump, "not vectorized: vector version will never be "
1296 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1297 * vectorization_factor) - 1);
1299 /* Use the cost model only if it is more conservative than user specified
1302 th = (unsigned) min_scalar_loop_bound;
1303 if (min_profitable_iters
1304 && (!min_scalar_loop_bound
1305 || min_profitable_iters > min_scalar_loop_bound))
1306 th = (unsigned) min_profitable_iters;
1308 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1309 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1311 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1312 fprintf (vect_dump, "not vectorized: vectorization not "
1314 if (vect_print_dump_info (REPORT_DETAILS))
1315 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1316 "user specified loop bound parameter or minimum "
1317 "profitable iterations (whichever is more conservative).");
1321 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1322 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1323 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1325 if (vect_print_dump_info (REPORT_DETAILS))
1326 fprintf (vect_dump, "epilog loop required.");
1327 if (!vect_can_advance_ivs_p (loop_vinfo))
1329 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1331 "not vectorized: can't create epilog loop 1.");
1334 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1336 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1338 "not vectorized: can't create epilog loop 2.");
1347 /* Function vect_analyze_loop.
1349 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1350 for it. The different analyses will record information in the
1351 loop_vec_info struct. */
1353 vect_analyze_loop (struct loop *loop)
1356 loop_vec_info loop_vinfo;
1358 if (vect_print_dump_info (REPORT_DETAILS))
1359 fprintf (vect_dump, "===== analyze_loop_nest =====");
1361 if (loop_outer (loop)
1362 && loop_vec_info_for_loop (loop_outer (loop))
1363 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1365 if (vect_print_dump_info (REPORT_DETAILS))
1366 fprintf (vect_dump, "outer-loop already vectorized.");
1370 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1372 loop_vinfo = vect_analyze_loop_form (loop);
1375 if (vect_print_dump_info (REPORT_DETAILS))
1376 fprintf (vect_dump, "bad loop form.");
1380 /* Find all data references in the loop (which correspond to vdefs/vuses)
1381 and analyze their evolution in the loop.
1383 FORNOW: Handle only simple, array references, which
1384 alignment can be forced, and aligned pointer-references. */
1386 ok = vect_analyze_data_refs (loop_vinfo, NULL);
1389 if (vect_print_dump_info (REPORT_DETAILS))
1390 fprintf (vect_dump, "bad data references.");
1391 destroy_loop_vec_info (loop_vinfo, true);
1395 /* Classify all cross-iteration scalar data-flow cycles.
1396 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1398 vect_analyze_scalar_cycles (loop_vinfo);
1400 vect_pattern_recog (loop_vinfo);
1402 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1404 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1407 if (vect_print_dump_info (REPORT_DETAILS))
1408 fprintf (vect_dump, "unexpected pattern.");
1409 destroy_loop_vec_info (loop_vinfo, true);
1413 /* Analyze the alignment of the data-refs in the loop.
1414 Fail if a data reference is found that cannot be vectorized. */
1416 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1419 if (vect_print_dump_info (REPORT_DETAILS))
1420 fprintf (vect_dump, "bad data alignment.");
1421 destroy_loop_vec_info (loop_vinfo, true);
1425 ok = vect_determine_vectorization_factor (loop_vinfo);
1428 if (vect_print_dump_info (REPORT_DETAILS))
1429 fprintf (vect_dump, "can't determine vectorization factor.");
1430 destroy_loop_vec_info (loop_vinfo, true);
1434 /* Analyze data dependences between the data-refs in the loop.
1435 FORNOW: fail at the first data dependence that we encounter. */
1437 ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL);
1440 if (vect_print_dump_info (REPORT_DETAILS))
1441 fprintf (vect_dump, "bad data dependence.");
1442 destroy_loop_vec_info (loop_vinfo, true);
1446 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1447 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1449 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1452 if (vect_print_dump_info (REPORT_DETAILS))
1453 fprintf (vect_dump, "bad data access.");
1454 destroy_loop_vec_info (loop_vinfo, true);
1458 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1459 It is important to call pruning after vect_analyze_data_ref_accesses,
1460 since we use grouping information gathered by interleaving analysis. */
1461 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1464 if (vect_print_dump_info (REPORT_DETAILS))
1465 fprintf (vect_dump, "too long list of versioning for alias "
1467 destroy_loop_vec_info (loop_vinfo, true);
1471 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1472 ok = vect_analyze_slp (loop_vinfo, NULL);
1475 /* Decide which possible SLP instances to SLP. */
1476 vect_make_slp_decision (loop_vinfo);
1478 /* Find stmts that need to be both vectorized and SLPed. */
1479 vect_detect_hybrid_slp (loop_vinfo);
1482 /* This pass will decide on using loop versioning and/or loop peeling in
1483 order to enhance the alignment of data references in the loop. */
1485 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1488 if (vect_print_dump_info (REPORT_DETAILS))
1489 fprintf (vect_dump, "bad data alignment.");
1490 destroy_loop_vec_info (loop_vinfo, true);
1494 /* Scan all the operations in the loop and make sure they are
1497 ok = vect_analyze_loop_operations (loop_vinfo);
1500 if (vect_print_dump_info (REPORT_DETAILS))
1501 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1502 destroy_loop_vec_info (loop_vinfo, true);
1506 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1512 /* Function reduction_code_for_scalar_code
1515 CODE - tree_code of a reduction operations.
1518 REDUC_CODE - the corresponding tree-code to be used to reduce the
1519 vector of partial results into a single scalar result (which
1520 will also reside in a vector) or ERROR_MARK if the operation is
1521 a supported reduction operation, but does not have such tree-code.
1523 Return FALSE if CODE currently cannot be vectorized as reduction. */
1526 reduction_code_for_scalar_code (enum tree_code code,
1527 enum tree_code *reduc_code)
1532 *reduc_code = REDUC_MAX_EXPR;
1536 *reduc_code = REDUC_MIN_EXPR;
1540 *reduc_code = REDUC_PLUS_EXPR;
1548 *reduc_code = ERROR_MARK;
1557 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1558 STMT is printed with a message MSG. */
1561 report_vect_op (gimple stmt, const char *msg)
1563 fprintf (vect_dump, "%s", msg);
1564 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1568 /* Function vect_is_simple_reduction
1570 (1) Detect a cross-iteration def-use cycle that represents a simple
1571 reduction computation. We look for the following pattern:
1576 a2 = operation (a3, a1)
1579 1. operation is commutative and associative and it is safe to
1580 change the order of the computation (if CHECK_REDUCTION is true)
1581 2. no uses for a2 in the loop (a2 is used out of the loop)
1582 3. no uses of a1 in the loop besides the reduction operation.
1584 Condition 1 is tested here.
1585 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
1587 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1588 nested cycles, if CHECK_REDUCTION is false.
1590 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1594 inner loop (def of a3)
1599 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
1600 bool check_reduction, bool *double_reduc)
1602 struct loop *loop = (gimple_bb (phi))->loop_father;
1603 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1604 edge latch_e = loop_latch_edge (loop);
1605 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1606 gimple def_stmt, def1 = NULL, def2 = NULL;
1607 enum tree_code code;
1608 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
1612 imm_use_iterator imm_iter;
1613 use_operand_p use_p;
1616 *double_reduc = false;
1618 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1619 otherwise, we assume outer loop vectorization. */
1620 gcc_assert ((check_reduction && loop == vect_loop)
1621 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
1623 name = PHI_RESULT (phi);
1625 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1627 gimple use_stmt = USE_STMT (use_p);
1628 if (is_gimple_debug (use_stmt))
1630 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1631 && vinfo_for_stmt (use_stmt)
1632 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1636 if (vect_print_dump_info (REPORT_DETAILS))
1637 fprintf (vect_dump, "reduction used in loop.");
1642 if (TREE_CODE (loop_arg) != SSA_NAME)
1644 if (vect_print_dump_info (REPORT_DETAILS))
1646 fprintf (vect_dump, "reduction: not ssa_name: ");
1647 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
1652 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
1655 if (vect_print_dump_info (REPORT_DETAILS))
1656 fprintf (vect_dump, "reduction: no def_stmt.");
1660 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
1662 if (vect_print_dump_info (REPORT_DETAILS))
1663 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1667 if (is_gimple_assign (def_stmt))
1669 name = gimple_assign_lhs (def_stmt);
1674 name = PHI_RESULT (def_stmt);
1679 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1681 gimple use_stmt = USE_STMT (use_p);
1682 if (is_gimple_debug (use_stmt))
1684 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1685 && vinfo_for_stmt (use_stmt)
1686 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1690 if (vect_print_dump_info (REPORT_DETAILS))
1691 fprintf (vect_dump, "reduction used in loop.");
1696 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
1697 defined in the inner loop. */
1700 op1 = PHI_ARG_DEF (def_stmt, 0);
1702 if (gimple_phi_num_args (def_stmt) != 1
1703 || TREE_CODE (op1) != SSA_NAME)
1705 if (vect_print_dump_info (REPORT_DETAILS))
1706 fprintf (vect_dump, "unsupported phi node definition.");
1711 def1 = SSA_NAME_DEF_STMT (op1);
1712 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1714 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
1715 && is_gimple_assign (def1))
1717 if (vect_print_dump_info (REPORT_DETAILS))
1718 report_vect_op (def_stmt, "detected double reduction: ");
1720 *double_reduc = true;
1727 code = gimple_assign_rhs_code (def_stmt);
1730 && (!commutative_tree_code (code) || !associative_tree_code (code)))
1732 if (vect_print_dump_info (REPORT_DETAILS))
1733 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
1737 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
1739 if (code != COND_EXPR)
1741 if (vect_print_dump_info (REPORT_DETAILS))
1742 report_vect_op (def_stmt, "reduction: not binary operation: ");
1747 op3 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
1748 if (COMPARISON_CLASS_P (op3))
1750 op4 = TREE_OPERAND (op3, 1);
1751 op3 = TREE_OPERAND (op3, 0);
1754 op1 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 1);
1755 op2 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 2);
1757 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
1759 if (vect_print_dump_info (REPORT_DETAILS))
1760 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1767 op1 = gimple_assign_rhs1 (def_stmt);
1768 op2 = gimple_assign_rhs2 (def_stmt);
1770 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
1772 if (vect_print_dump_info (REPORT_DETAILS))
1773 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1779 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
1780 if ((TREE_CODE (op1) == SSA_NAME
1781 && !types_compatible_p (type,TREE_TYPE (op1)))
1782 || (TREE_CODE (op2) == SSA_NAME
1783 && !types_compatible_p (type, TREE_TYPE (op2)))
1784 || (op3 && TREE_CODE (op3) == SSA_NAME
1785 && !types_compatible_p (type, TREE_TYPE (op3)))
1786 || (op4 && TREE_CODE (op4) == SSA_NAME
1787 && !types_compatible_p (type, TREE_TYPE (op4))))
1789 if (vect_print_dump_info (REPORT_DETAILS))
1791 fprintf (vect_dump, "reduction: multiple types: operation type: ");
1792 print_generic_expr (vect_dump, type, TDF_SLIM);
1793 fprintf (vect_dump, ", operands types: ");
1794 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
1795 fprintf (vect_dump, ",");
1796 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
1799 fprintf (vect_dump, ",");
1800 print_generic_expr (vect_dump, TREE_TYPE (op3), TDF_SLIM);
1805 fprintf (vect_dump, ",");
1806 print_generic_expr (vect_dump, TREE_TYPE (op4), TDF_SLIM);
1813 /* Check that it's ok to change the order of the computation.
1814 Generally, when vectorizing a reduction we change the order of the
1815 computation. This may change the behavior of the program in some
1816 cases, so we need to check that this is ok. One exception is when
1817 vectorizing an outer-loop: the inner-loop is executed sequentially,
1818 and therefore vectorizing reductions in the inner-loop during
1819 outer-loop vectorization is safe. */
1821 /* CHECKME: check for !flag_finite_math_only too? */
1822 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
1825 /* Changing the order of operations changes the semantics. */
1826 if (vect_print_dump_info (REPORT_DETAILS))
1827 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
1830 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
1833 /* Changing the order of operations changes the semantics. */
1834 if (vect_print_dump_info (REPORT_DETAILS))
1835 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
1838 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
1840 /* Changing the order of operations changes the semantics. */
1841 if (vect_print_dump_info (REPORT_DETAILS))
1842 report_vect_op (def_stmt,
1843 "reduction: unsafe fixed-point math optimization: ");
1847 /* Reduction is safe. We're dealing with one of the following:
1848 1) integer arithmetic and no trapv
1849 2) floating point arithmetic, and special flags permit this optimization
1850 3) nested cycle (i.e., outer loop vectorization). */
1851 if (TREE_CODE (op1) == SSA_NAME)
1852 def1 = SSA_NAME_DEF_STMT (op1);
1854 if (TREE_CODE (op2) == SSA_NAME)
1855 def2 = SSA_NAME_DEF_STMT (op2);
1857 if (code != COND_EXPR
1858 && (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2)))
1860 if (vect_print_dump_info (REPORT_DETAILS))
1861 report_vect_op (def_stmt, "reduction: no defs for operands: ");
1865 /* Check that one def is the reduction def, defined by PHI,
1866 the other def is either defined in the loop ("vect_internal_def"),
1867 or it's an induction (defined by a loop-header phi-node). */
1869 if (def2 && def2 == phi
1870 && (code == COND_EXPR
1871 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
1872 && (is_gimple_assign (def1)
1873 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1874 == vect_induction_def
1875 || (gimple_code (def1) == GIMPLE_PHI
1876 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1877 == vect_internal_def
1878 && !is_loop_header_bb_p (gimple_bb (def1)))))))
1880 if (vect_print_dump_info (REPORT_DETAILS))
1881 report_vect_op (def_stmt, "detected reduction: ");
1884 else if (def1 && def1 == phi
1885 && (code == COND_EXPR
1886 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
1887 && (is_gimple_assign (def2)
1888 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1889 == vect_induction_def
1890 || (gimple_code (def2) == GIMPLE_PHI
1891 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1892 == vect_internal_def
1893 && !is_loop_header_bb_p (gimple_bb (def2)))))))
1895 if (check_reduction)
1897 /* Swap operands (just for simplicity - so that the rest of the code
1898 can assume that the reduction variable is always the last (second)
1900 if (vect_print_dump_info (REPORT_DETAILS))
1901 report_vect_op (def_stmt,
1902 "detected reduction: need to swap operands: ");
1904 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
1905 gimple_assign_rhs2_ptr (def_stmt));
1909 if (vect_print_dump_info (REPORT_DETAILS))
1910 report_vect_op (def_stmt, "detected reduction: ");
1917 if (vect_print_dump_info (REPORT_DETAILS))
1918 report_vect_op (def_stmt, "reduction: unknown pattern: ");
1925 /* Function vect_estimate_min_profitable_iters
1927 Return the number of iterations required for the vector version of the
1928 loop to be profitable relative to the cost of the scalar version of the
1931 TODO: Take profile info into account before making vectorization
1932 decisions, if available. */
1935 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
1938 int min_profitable_iters;
1939 int peel_iters_prologue;
1940 int peel_iters_epilogue;
1941 int vec_inside_cost = 0;
1942 int vec_outside_cost = 0;
1943 int scalar_single_iter_cost = 0;
1944 int scalar_outside_cost = 0;
1945 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1946 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1947 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1948 int nbbs = loop->num_nodes;
1949 int byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
1950 int peel_guard_costs = 0;
1951 int innerloop_iters = 0, factor;
1952 VEC (slp_instance, heap) *slp_instances;
1953 slp_instance instance;
1955 /* Cost model disabled. */
1956 if (!flag_vect_cost_model)
1958 if (vect_print_dump_info (REPORT_COST))
1959 fprintf (vect_dump, "cost model disabled.");
1963 /* Requires loop versioning tests to handle misalignment. */
1964 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1966 /* FIXME: Make cost depend on complexity of individual check. */
1968 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
1969 if (vect_print_dump_info (REPORT_COST))
1970 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
1971 "versioning to treat misalignment.\n");
1974 /* Requires loop versioning with alias checks. */
1975 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
1977 /* FIXME: Make cost depend on complexity of individual check. */
1979 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
1980 if (vect_print_dump_info (REPORT_COST))
1981 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
1982 "versioning aliasing.\n");
1985 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
1986 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
1987 vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST;
1989 /* Count statements in scalar loop. Using this as scalar cost for a single
1992 TODO: Add outer loop support.
1994 TODO: Consider assigning different costs to different scalar
1999 innerloop_iters = 50; /* FIXME */
2001 for (i = 0; i < nbbs; i++)
2003 gimple_stmt_iterator si;
2004 basic_block bb = bbs[i];
2006 if (bb->loop_father == loop->inner)
2007 factor = innerloop_iters;
2011 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2013 gimple stmt = gsi_stmt (si);
2014 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2015 /* Skip stmts that are not vectorized inside the loop. */
2016 if (!STMT_VINFO_RELEVANT_P (stmt_info)
2017 && (!STMT_VINFO_LIVE_P (stmt_info)
2018 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2020 scalar_single_iter_cost += cost_for_stmt (stmt) * factor;
2021 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
2022 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
2023 some of the "outside" costs are generated inside the outer-loop. */
2024 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
2028 /* Add additional cost for the peeled instructions in prologue and epilogue
2031 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2032 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2034 TODO: Build an expression that represents peel_iters for prologue and
2035 epilogue to be used in a run-time test. */
2037 if (byte_misalign < 0)
2039 peel_iters_prologue = vf/2;
2040 if (vect_print_dump_info (REPORT_COST))
2041 fprintf (vect_dump, "cost model: "
2042 "prologue peel iters set to vf/2.");
2044 /* If peeling for alignment is unknown, loop bound of main loop becomes
2046 peel_iters_epilogue = vf/2;
2047 if (vect_print_dump_info (REPORT_COST))
2048 fprintf (vect_dump, "cost model: "
2049 "epilogue peel iters set to vf/2 because "
2050 "peeling for alignment is unknown .");
2052 /* If peeled iterations are unknown, count a taken branch and a not taken
2053 branch per peeled loop. Even if scalar loop iterations are known,
2054 vector iterations are not known since peeled prologue iterations are
2055 not known. Hence guards remain the same. */
2056 peel_guard_costs += 2 * (TARG_COND_TAKEN_BRANCH_COST
2057 + TARG_COND_NOT_TAKEN_BRANCH_COST);
2063 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2064 int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
2065 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
2066 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
2068 peel_iters_prologue = nelements - (byte_misalign / element_size);
2071 peel_iters_prologue = 0;
2073 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2075 peel_iters_epilogue = vf/2;
2076 if (vect_print_dump_info (REPORT_COST))
2077 fprintf (vect_dump, "cost model: "
2078 "epilogue peel iters set to vf/2 because "
2079 "loop iterations are unknown .");
2081 /* If peeled iterations are known but number of scalar loop
2082 iterations are unknown, count a taken branch per peeled loop. */
2083 peel_guard_costs += 2 * TARG_COND_TAKEN_BRANCH_COST;
2088 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2089 peel_iters_prologue = niters < peel_iters_prologue ?
2090 niters : peel_iters_prologue;
2091 peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2095 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
2096 + (peel_iters_epilogue * scalar_single_iter_cost)
2099 /* FORNOW: The scalar outside cost is incremented in one of the
2102 1. The vectorizer checks for alignment and aliasing and generates
2103 a condition that allows dynamic vectorization. A cost model
2104 check is ANDED with the versioning condition. Hence scalar code
2105 path now has the added cost of the versioning check.
2107 if (cost > th & versioning_check)
2110 Hence run-time scalar is incremented by not-taken branch cost.
2112 2. The vectorizer then checks if a prologue is required. If the
2113 cost model check was not done before during versioning, it has to
2114 be done before the prologue check.
2117 prologue = scalar_iters
2122 if (prologue == num_iters)
2125 Hence the run-time scalar cost is incremented by a taken branch,
2126 plus a not-taken branch, plus a taken branch cost.
2128 3. The vectorizer then checks if an epilogue is required. If the
2129 cost model check was not done before during prologue check, it
2130 has to be done with the epilogue check.
2136 if (prologue == num_iters)
2139 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2142 Hence the run-time scalar cost should be incremented by 2 taken
2145 TODO: The back end may reorder the BBS's differently and reverse
2146 conditions/branch directions. Change the estimates below to
2147 something more reasonable. */
2149 /* If the number of iterations is known and we do not do versioning, we can
2150 decide whether to vectorize at compile time. Hence the scalar version
2151 do not carry cost model guard costs. */
2152 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2153 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2154 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2156 /* Cost model check occurs at versioning. */
2157 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2158 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2159 scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST;
2162 /* Cost model check occurs at prologue generation. */
2163 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2164 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST
2165 + TARG_COND_NOT_TAKEN_BRANCH_COST;
2166 /* Cost model check occurs at epilogue generation. */
2168 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST;
2172 /* Add SLP costs. */
2173 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2174 for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
2176 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
2177 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
2180 /* Calculate number of iterations required to make the vector version
2181 profitable, relative to the loop bodies only. The following condition
2183 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2185 SIC = scalar iteration cost, VIC = vector iteration cost,
2186 VOC = vector outside cost, VF = vectorization factor,
2187 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2188 SOC = scalar outside cost for run time cost model check. */
2190 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
2192 if (vec_outside_cost <= 0)
2193 min_profitable_iters = 1;
2196 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2197 - vec_inside_cost * peel_iters_prologue
2198 - vec_inside_cost * peel_iters_epilogue)
2199 / ((scalar_single_iter_cost * vf)
2202 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2203 <= ((vec_inside_cost * min_profitable_iters)
2204 + ((vec_outside_cost - scalar_outside_cost) * vf)))
2205 min_profitable_iters++;
2208 /* vector version will never be profitable. */
2211 if (vect_print_dump_info (REPORT_COST))
2212 fprintf (vect_dump, "cost model: the vector iteration cost = %d "
2213 "divided by the scalar iteration cost = %d "
2214 "is greater or equal to the vectorization factor = %d.",
2215 vec_inside_cost, scalar_single_iter_cost, vf);
2219 if (vect_print_dump_info (REPORT_COST))
2221 fprintf (vect_dump, "Cost model analysis: \n");
2222 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2224 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
2226 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2227 scalar_single_iter_cost);
2228 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2229 fprintf (vect_dump, " prologue iterations: %d\n",
2230 peel_iters_prologue);
2231 fprintf (vect_dump, " epilogue iterations: %d\n",
2232 peel_iters_epilogue);
2233 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2234 min_profitable_iters);
2237 min_profitable_iters =
2238 min_profitable_iters < vf ? vf : min_profitable_iters;
2240 /* Because the condition we create is:
2241 if (niters <= min_profitable_iters)
2242 then skip the vectorized loop. */
2243 min_profitable_iters--;
2245 if (vect_print_dump_info (REPORT_COST))
2246 fprintf (vect_dump, " Profitability threshold = %d\n",
2247 min_profitable_iters);
2249 return min_profitable_iters;
2253 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2254 functions. Design better to avoid maintenance issues. */
2256 /* Function vect_model_reduction_cost.
2258 Models cost for a reduction operation, including the vector ops
2259 generated within the strip-mine loop, the initial definition before
2260 the loop, and the epilogue code that must be generated. */
2263 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2267 enum tree_code code;
2270 gimple stmt, orig_stmt;
2272 enum machine_mode mode;
2273 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2274 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2277 /* Cost of reduction op inside loop. */
2278 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) += ncopies * TARG_VEC_STMT_COST;
2280 stmt = STMT_VINFO_STMT (stmt_info);
2282 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2284 case GIMPLE_SINGLE_RHS:
2285 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2286 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2288 case GIMPLE_UNARY_RHS:
2289 reduction_op = gimple_assign_rhs1 (stmt);
2291 case GIMPLE_BINARY_RHS:
2292 reduction_op = gimple_assign_rhs2 (stmt);
2298 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2301 if (vect_print_dump_info (REPORT_COST))
2303 fprintf (vect_dump, "unsupported data-type ");
2304 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2309 mode = TYPE_MODE (vectype);
2310 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2313 orig_stmt = STMT_VINFO_STMT (stmt_info);
2315 code = gimple_assign_rhs_code (orig_stmt);
2317 /* Add in cost for initial definition. */
2318 outer_cost += TARG_SCALAR_TO_VEC_COST;
2320 /* Determine cost of epilogue code.
2322 We have a reduction operator that will reduce the vector in one statement.
2323 Also requires scalar extract. */
2325 if (!nested_in_vect_loop_p (loop, orig_stmt))
2327 if (reduc_code != ERROR_MARK)
2328 outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
2331 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2333 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2334 int element_bitsize = tree_low_cst (bitsize, 1);
2335 int nelements = vec_size_in_bits / element_bitsize;
2337 optab = optab_for_tree_code (code, vectype, optab_default);
2339 /* We have a whole vector shift available. */
2340 if (VECTOR_MODE_P (mode)
2341 && optab_handler (optab, mode)->insn_code != CODE_FOR_nothing
2342 && optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
2343 /* Final reduction via vector shifts and the reduction operator. Also
2344 requires scalar extract. */
2345 outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST
2346 + TARG_VEC_TO_SCALAR_COST);
2348 /* Use extracts and reduction op for final reduction. For N elements,
2349 we have N extracts and N-1 reduction ops. */
2350 outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST);
2354 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
2356 if (vect_print_dump_info (REPORT_COST))
2357 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2358 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2359 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2365 /* Function vect_model_induction_cost.
2367 Models cost for induction operations. */
2370 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2372 /* loop cost for vec_loop. */
2373 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
2374 /* prologue cost for vec_init and vec_step. */
2375 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_SCALAR_TO_VEC_COST;
2377 if (vect_print_dump_info (REPORT_COST))
2378 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2379 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2380 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2384 /* Function get_initial_def_for_induction
2387 STMT - a stmt that performs an induction operation in the loop.
2388 IV_PHI - the initial value of the induction variable
2391 Return a vector variable, initialized with the first VF values of
2392 the induction variable. E.g., for an iv with IV_PHI='X' and
2393 evolution S, for a vector of 4 units, we want to return:
2394 [X, X + S, X + 2*S, X + 3*S]. */
2397 get_initial_def_for_induction (gimple iv_phi)
2399 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2400 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2401 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2402 tree scalar_type = TREE_TYPE (gimple_phi_result (iv_phi));
2405 edge pe = loop_preheader_edge (loop);
2406 struct loop *iv_loop;
2408 tree vec, vec_init, vec_step, t;
2412 gimple init_stmt, induction_phi, new_stmt;
2413 tree induc_def, vec_def, vec_dest;
2414 tree init_expr, step_expr;
2415 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2420 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2421 bool nested_in_vect_loop = false;
2422 gimple_seq stmts = NULL;
2423 imm_use_iterator imm_iter;
2424 use_operand_p use_p;
2428 gimple_stmt_iterator si;
2429 basic_block bb = gimple_bb (iv_phi);
2432 vectype = get_vectype_for_scalar_type (scalar_type);
2433 gcc_assert (vectype);
2434 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2435 ncopies = vf / nunits;
2437 gcc_assert (phi_info);
2438 gcc_assert (ncopies >= 1);
2440 /* Find the first insertion point in the BB. */
2441 si = gsi_after_labels (bb);
2443 if (INTEGRAL_TYPE_P (scalar_type))
2444 step_expr = build_int_cst (scalar_type, 0);
2445 else if (POINTER_TYPE_P (scalar_type))
2446 step_expr = build_int_cst (sizetype, 0);
2448 step_expr = build_real (scalar_type, dconst0);
2450 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2451 if (nested_in_vect_loop_p (loop, iv_phi))
2453 nested_in_vect_loop = true;
2454 iv_loop = loop->inner;
2458 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2460 latch_e = loop_latch_edge (iv_loop);
2461 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2463 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
2464 gcc_assert (access_fn);
2465 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
2466 &init_expr, &step_expr);
2468 pe = loop_preheader_edge (iv_loop);
2470 /* Create the vector that holds the initial_value of the induction. */
2471 if (nested_in_vect_loop)
2473 /* iv_loop is nested in the loop to be vectorized. init_expr had already
2474 been created during vectorization of previous stmts; We obtain it from
2475 the STMT_VINFO_VEC_STMT of the defining stmt. */
2476 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
2477 loop_preheader_edge (iv_loop));
2478 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
2482 /* iv_loop is the loop to be vectorized. Create:
2483 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
2484 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
2485 add_referenced_var (new_var);
2487 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
2490 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2491 gcc_assert (!new_bb);
2495 t = tree_cons (NULL_TREE, init_expr, t);
2496 for (i = 1; i < nunits; i++)
2498 /* Create: new_name_i = new_name + step_expr */
2499 enum tree_code code = POINTER_TYPE_P (scalar_type)
2500 ? POINTER_PLUS_EXPR : PLUS_EXPR;
2501 init_stmt = gimple_build_assign_with_ops (code, new_var,
2502 new_name, step_expr);
2503 new_name = make_ssa_name (new_var, init_stmt);
2504 gimple_assign_set_lhs (init_stmt, new_name);
2506 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
2507 gcc_assert (!new_bb);
2509 if (vect_print_dump_info (REPORT_DETAILS))
2511 fprintf (vect_dump, "created new init_stmt: ");
2512 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
2514 t = tree_cons (NULL_TREE, new_name, t);
2516 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
2517 vec = build_constructor_from_list (vectype, nreverse (t));
2518 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
2522 /* Create the vector that holds the step of the induction. */
2523 if (nested_in_vect_loop)
2524 /* iv_loop is nested in the loop to be vectorized. Generate:
2525 vec_step = [S, S, S, S] */
2526 new_name = step_expr;
2529 /* iv_loop is the loop to be vectorized. Generate:
2530 vec_step = [VF*S, VF*S, VF*S, VF*S] */
2531 expr = build_int_cst (TREE_TYPE (step_expr), vf);
2532 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2537 for (i = 0; i < nunits; i++)
2538 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2539 gcc_assert (CONSTANT_CLASS_P (new_name));
2540 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
2541 gcc_assert (stepvectype);
2542 vec = build_vector (stepvectype, t);
2543 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2546 /* Create the following def-use cycle:
2551 vec_iv = PHI <vec_init, vec_loop>
2555 vec_loop = vec_iv + vec_step; */
2557 /* Create the induction-phi that defines the induction-operand. */
2558 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
2559 add_referenced_var (vec_dest);
2560 induction_phi = create_phi_node (vec_dest, iv_loop->header);
2561 set_vinfo_for_stmt (induction_phi,
2562 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
2563 induc_def = PHI_RESULT (induction_phi);
2565 /* Create the iv update inside the loop */
2566 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2567 induc_def, vec_step);
2568 vec_def = make_ssa_name (vec_dest, new_stmt);
2569 gimple_assign_set_lhs (new_stmt, vec_def);
2570 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2571 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
2574 /* Set the arguments of the phi node: */
2575 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
2576 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
2580 /* In case that vectorization factor (VF) is bigger than the number
2581 of elements that we can fit in a vectype (nunits), we have to generate
2582 more than one vector stmt - i.e - we need to "unroll" the
2583 vector stmt by a factor VF/nunits. For more details see documentation
2584 in vectorizable_operation. */
2588 stmt_vec_info prev_stmt_vinfo;
2589 /* FORNOW. This restriction should be relaxed. */
2590 gcc_assert (!nested_in_vect_loop);
2592 /* Create the vector that holds the step of the induction. */
2593 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
2594 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2597 for (i = 0; i < nunits; i++)
2598 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2599 gcc_assert (CONSTANT_CLASS_P (new_name));
2600 vec = build_vector (stepvectype, t);
2601 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2603 vec_def = induc_def;
2604 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
2605 for (i = 1; i < ncopies; i++)
2607 /* vec_i = vec_prev + vec_step */
2608 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2610 vec_def = make_ssa_name (vec_dest, new_stmt);
2611 gimple_assign_set_lhs (new_stmt, vec_def);
2613 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2614 set_vinfo_for_stmt (new_stmt,
2615 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
2616 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
2617 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
2621 if (nested_in_vect_loop)
2623 /* Find the loop-closed exit-phi of the induction, and record
2624 the final vector of induction results: */
2626 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
2628 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
2630 exit_phi = USE_STMT (use_p);
2636 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
2637 /* FORNOW. Currently not supporting the case that an inner-loop induction
2638 is not used in the outer-loop (i.e. only outside the outer-loop). */
2639 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
2640 && !STMT_VINFO_LIVE_P (stmt_vinfo));
2642 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
2643 if (vect_print_dump_info (REPORT_DETAILS))
2645 fprintf (vect_dump, "vector of inductions after inner-loop:");
2646 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
2652 if (vect_print_dump_info (REPORT_DETAILS))
2654 fprintf (vect_dump, "transform induction: created def-use cycle: ");
2655 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
2656 fprintf (vect_dump, "\n");
2657 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
2660 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
2665 /* Function get_initial_def_for_reduction
2668 STMT - a stmt that performs a reduction operation in the loop.
2669 INIT_VAL - the initial value of the reduction variable
2672 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
2673 of the reduction (used for adjusting the epilog - see below).
2674 Return a vector variable, initialized according to the operation that STMT
2675 performs. This vector will be used as the initial value of the
2676 vector of partial results.
2678 Option1 (adjust in epilog): Initialize the vector as follows:
2679 add/bit or/xor: [0,0,...,0,0]
2680 mult/bit and: [1,1,...,1,1]
2681 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
2682 and when necessary (e.g. add/mult case) let the caller know
2683 that it needs to adjust the result by init_val.
2685 Option2: Initialize the vector as follows:
2686 add/bit or/xor: [init_val,0,0,...,0]
2687 mult/bit and: [init_val,1,1,...,1]
2688 min/max/cond_expr: [init_val,init_val,...,init_val]
2689 and no adjustments are needed.
2691 For example, for the following code:
2697 STMT is 's = s + a[i]', and the reduction variable is 's'.
2698 For a vector of 4 units, we want to return either [0,0,0,init_val],
2699 or [0,0,0,0] and let the caller know that it needs to adjust
2700 the result at the end by 'init_val'.
2702 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
2703 initialization vector is simpler (same element in all entries), if
2704 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
2706 A cost model should help decide between these two schemes. */
2709 get_initial_def_for_reduction (gimple stmt, tree init_val,
2710 tree *adjustment_def)
2712 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2713 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2714 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2715 tree scalar_type = TREE_TYPE (init_val);
2716 tree vectype = get_vectype_for_scalar_type (scalar_type);
2718 enum tree_code code = gimple_assign_rhs_code (stmt);
2723 bool nested_in_vect_loop = false;
2725 REAL_VALUE_TYPE real_init_val = dconst0;
2726 int int_init_val = 0;
2727 gimple def_stmt = NULL;
2729 gcc_assert (vectype);
2730 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2732 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
2733 || SCALAR_FLOAT_TYPE_P (scalar_type));
2735 if (nested_in_vect_loop_p (loop, stmt))
2736 nested_in_vect_loop = true;
2738 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
2740 /* In case of double reduction we only create a vector variable to be put
2741 in the reduction phi node. The actual statement creation is done in
2742 vect_create_epilog_for_reduction. */
2743 if (adjustment_def && nested_in_vect_loop
2744 && TREE_CODE (init_val) == SSA_NAME
2745 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
2746 && gimple_code (def_stmt) == GIMPLE_PHI
2747 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2748 && vinfo_for_stmt (def_stmt)
2749 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2750 == vect_double_reduction_def)
2752 *adjustment_def = NULL;
2753 return vect_create_destination_var (init_val, vectype);
2756 if (TREE_CONSTANT (init_val))
2758 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2759 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
2761 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
2764 init_value = init_val;
2768 case WIDEN_SUM_EXPR:
2776 /* ADJUSMENT_DEF is NULL when called from
2777 vect_create_epilog_for_reduction to vectorize double reduction. */
2780 if (nested_in_vect_loop)
2781 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
2784 *adjustment_def = init_val;
2787 if (code == MULT_EXPR || code == BIT_AND_EXPR)
2789 real_init_val = dconst1;
2793 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2794 def_for_init = build_real (scalar_type, real_init_val);
2796 def_for_init = build_int_cst (scalar_type, int_init_val);
2798 /* Create a vector of '0' or '1' except the first element. */
2799 for (i = nunits - 2; i >= 0; --i)
2800 t = tree_cons (NULL_TREE, def_for_init, t);
2802 /* Option1: the first element is '0' or '1' as well. */
2805 t = tree_cons (NULL_TREE, def_for_init, t);
2806 init_def = build_vector (vectype, t);
2810 /* Option2: the first element is INIT_VAL. */
2811 t = tree_cons (NULL_TREE, init_value, t);
2812 if (TREE_CONSTANT (init_val))
2813 init_def = build_vector (vectype, t);
2815 init_def = build_constructor_from_list (vectype, t);
2824 *adjustment_def = NULL_TREE;
2825 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
2829 for (i = nunits - 1; i >= 0; --i)
2830 t = tree_cons (NULL_TREE, init_value, t);
2832 if (TREE_CONSTANT (init_val))
2833 init_def = build_vector (vectype, t);
2835 init_def = build_constructor_from_list (vectype, t);
2847 /* Function vect_create_epilog_for_reduction
2849 Create code at the loop-epilog to finalize the result of a reduction
2852 VECT_DEF is a vector of partial results.
2853 REDUC_CODE is the tree-code for the epilog reduction.
2854 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
2855 number of elements that we can fit in a vectype (nunits). In this case
2856 we have to generate more than one vector stmt - i.e - we need to "unroll"
2857 the vector stmt by a factor VF/nunits. For more details see documentation
2858 in vectorizable_operation.
2859 STMT is the scalar reduction stmt that is being vectorized.
2860 REDUCTION_PHI is the phi-node that carries the reduction computation.
2861 REDUC_INDEX is the index of the operand in the right hand side of the
2862 statement that is defined by REDUCTION_PHI.
2863 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
2866 1. Creates the reduction def-use cycle: sets the arguments for
2868 The loop-entry argument is the vectorized initial-value of the reduction.
2869 The loop-latch argument is VECT_DEF - the vector of partial sums.
2870 2. "Reduces" the vector of partial results VECT_DEF into a single result,
2871 by applying the operation specified by REDUC_CODE if available, or by
2872 other means (whole-vector shifts or a scalar loop).
2873 The function also creates a new phi node at the loop exit to preserve
2874 loop-closed form, as illustrated below.
2876 The flow at the entry to this function:
2879 vec_def = phi <null, null> # REDUCTION_PHI
2880 VECT_DEF = vector_stmt # vectorized form of STMT
2881 s_loop = scalar_stmt # (scalar) STMT
2883 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2887 The above is transformed by this function into:
2890 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
2891 VECT_DEF = vector_stmt # vectorized form of STMT
2892 s_loop = scalar_stmt # (scalar) STMT
2894 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2895 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2896 v_out2 = reduce <v_out1>
2897 s_out3 = extract_field <v_out2, 0>
2898 s_out4 = adjust_result <s_out3>
2904 vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
2906 enum tree_code reduc_code,
2907 gimple reduction_phi,
2911 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2912 stmt_vec_info prev_phi_info;
2914 enum machine_mode mode;
2915 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2916 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
2917 basic_block exit_bb;
2920 gimple new_phi = NULL, phi;
2921 gimple_stmt_iterator exit_gsi;
2923 tree new_temp = NULL_TREE;
2925 gimple epilog_stmt = NULL;
2926 tree new_scalar_dest, new_dest;
2928 tree bitsize, bitpos;
2929 enum tree_code code = gimple_assign_rhs_code (stmt);
2930 tree adjustment_def;
2931 tree vec_initial_def, def;
2933 imm_use_iterator imm_iter;
2934 use_operand_p use_p;
2935 bool extract_scalar_result = false;
2936 tree reduction_op, expr;
2939 bool nested_in_vect_loop = false;
2940 VEC(gimple,heap) *phis = NULL;
2941 enum vect_def_type dt = vect_unknown_def_type;
2944 if (nested_in_vect_loop_p (loop, stmt))
2948 nested_in_vect_loop = true;
2951 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2953 case GIMPLE_SINGLE_RHS:
2954 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
2956 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
2958 case GIMPLE_UNARY_RHS:
2959 reduction_op = gimple_assign_rhs1 (stmt);
2961 case GIMPLE_BINARY_RHS:
2962 reduction_op = reduc_index ?
2963 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
2969 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2970 gcc_assert (vectype);
2971 mode = TYPE_MODE (vectype);
2973 /*** 1. Create the reduction def-use cycle ***/
2975 /* For the case of reduction, vect_get_vec_def_for_operand returns
2976 the scalar def before the loop, that defines the initial value
2977 of the reduction variable. */
2978 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
2981 phi = reduction_phi;
2983 for (j = 0; j < ncopies; j++)
2985 /* 1.1 set the loop-entry arg of the reduction-phi: */
2986 add_phi_arg (phi, vec_initial_def, loop_preheader_edge (loop),
2989 /* 1.2 set the loop-latch arg for the reduction-phi: */
2991 def = vect_get_vec_def_for_stmt_copy (dt, def);
2992 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
2994 if (vect_print_dump_info (REPORT_DETAILS))
2996 fprintf (vect_dump, "transform reduction: created def-use cycle: ");
2997 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
2998 fprintf (vect_dump, "\n");
2999 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0, TDF_SLIM);
3002 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3005 /*** 2. Create epilog code
3006 The reduction epilog code operates across the elements of the vector
3007 of partial results computed by the vectorized loop.
3008 The reduction epilog code consists of:
3009 step 1: compute the scalar result in a vector (v_out2)
3010 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3011 step 3: adjust the scalar result (s_out3) if needed.
3013 Step 1 can be accomplished using one the following three schemes:
3014 (scheme 1) using reduc_code, if available.
3015 (scheme 2) using whole-vector shifts, if available.
3016 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3019 The overall epilog code looks like this:
3021 s_out0 = phi <s_loop> # original EXIT_PHI
3022 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3023 v_out2 = reduce <v_out1> # step 1
3024 s_out3 = extract_field <v_out2, 0> # step 2
3025 s_out4 = adjust_result <s_out3> # step 3
3027 (step 3 is optional, and steps 1 and 2 may be combined).
3028 Lastly, the uses of s_out0 are replaced by s_out4.
3032 /* 2.1 Create new loop-exit-phi to preserve loop-closed form:
3033 v_out1 = phi <v_loop> */
3035 exit_bb = single_exit (loop)->dest;
3037 prev_phi_info = NULL;
3038 for (j = 0; j < ncopies; j++)
3040 phi = create_phi_node (SSA_NAME_VAR (vect_def), exit_bb);
3041 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3046 def = vect_get_vec_def_for_stmt_copy (dt, def);
3047 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3049 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3050 prev_phi_info = vinfo_for_stmt (phi);
3053 exit_gsi = gsi_after_labels (exit_bb);
3055 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
3056 (i.e. when reduc_code is not available) and in the final adjustment
3057 code (if needed). Also get the original scalar reduction variable as
3058 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3059 represents a reduction pattern), the tree-code and scalar-def are
3060 taken from the original stmt that the pattern-stmt (STMT) replaces.
3061 Otherwise (it is a regular reduction) - the tree-code and scalar-def
3062 are taken from STMT. */
3064 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3067 /* Regular reduction */
3072 /* Reduction pattern */
3073 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
3074 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
3075 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
3078 code = gimple_assign_rhs_code (orig_stmt);
3079 scalar_dest = gimple_assign_lhs (orig_stmt);
3080 scalar_type = TREE_TYPE (scalar_dest);
3081 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
3082 bitsize = TYPE_SIZE (scalar_type);
3084 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3085 partial results are added and not subtracted. */
3086 if (code == MINUS_EXPR)
3089 /* In case this is a reduction in an inner-loop while vectorizing an outer
3090 loop - we don't need to extract a single scalar result at the end of the
3091 inner-loop (unless it is double reduction, i.e., the use of reduction is
3092 outside the outer-loop). The final vector of partial results will be used
3093 in the vectorized outer-loop, or reduced to a scalar result at the end of
3095 if (nested_in_vect_loop && !double_reduc)
3096 goto vect_finalize_reduction;
3098 /* The epilogue is created for the outer-loop, i.e., for the loop being
3104 gcc_assert (ncopies == 1);
3106 /* 2.3 Create the reduction code, using one of the three schemes described
3109 if (reduc_code != ERROR_MARK)
3113 /*** Case 1: Create:
3114 v_out2 = reduc_expr <v_out1> */
3116 if (vect_print_dump_info (REPORT_DETAILS))
3117 fprintf (vect_dump, "Reduce using direct vector reduction.");
3119 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3120 tmp = build1 (reduc_code, vectype, PHI_RESULT (new_phi));
3121 epilog_stmt = gimple_build_assign (vec_dest, tmp);
3122 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3123 gimple_assign_set_lhs (epilog_stmt, new_temp);
3124 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3126 extract_scalar_result = true;
3130 enum tree_code shift_code = ERROR_MARK;
3131 bool have_whole_vector_shift = true;
3133 int element_bitsize = tree_low_cst (bitsize, 1);
3134 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3137 if (optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
3138 shift_code = VEC_RSHIFT_EXPR;
3140 have_whole_vector_shift = false;
3142 /* Regardless of whether we have a whole vector shift, if we're
3143 emulating the operation via tree-vect-generic, we don't want
3144 to use it. Only the first round of the reduction is likely
3145 to still be profitable via emulation. */
3146 /* ??? It might be better to emit a reduction tree code here, so that
3147 tree-vect-generic can expand the first round via bit tricks. */
3148 if (!VECTOR_MODE_P (mode))
3149 have_whole_vector_shift = false;
3152 optab optab = optab_for_tree_code (code, vectype, optab_default);
3153 if (optab_handler (optab, mode)->insn_code == CODE_FOR_nothing)
3154 have_whole_vector_shift = false;
3157 if (have_whole_vector_shift)
3159 /*** Case 2: Create:
3160 for (offset = VS/2; offset >= element_size; offset/=2)
3162 Create: va' = vec_shift <va, offset>
3163 Create: va = vop <va, va'>
3166 if (vect_print_dump_info (REPORT_DETAILS))
3167 fprintf (vect_dump, "Reduce using vector shifts");
3169 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3170 new_temp = PHI_RESULT (new_phi);
3172 for (bit_offset = vec_size_in_bits/2;
3173 bit_offset >= element_bitsize;
3176 tree bitpos = size_int (bit_offset);
3178 epilog_stmt = gimple_build_assign_with_ops (shift_code, vec_dest,
3180 new_name = make_ssa_name (vec_dest, epilog_stmt);
3181 gimple_assign_set_lhs (epilog_stmt, new_name);
3182 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3184 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
3185 new_name, new_temp);
3186 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3187 gimple_assign_set_lhs (epilog_stmt, new_temp);
3188 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3191 extract_scalar_result = true;
3197 /*** Case 3: Create:
3198 s = extract_field <v_out2, 0>
3199 for (offset = element_size;
3200 offset < vector_size;
3201 offset += element_size;)
3203 Create: s' = extract_field <v_out2, offset>
3204 Create: s = op <s, s'>
3207 if (vect_print_dump_info (REPORT_DETAILS))
3208 fprintf (vect_dump, "Reduce using scalar code. ");
3210 vec_temp = PHI_RESULT (new_phi);
3211 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3212 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3214 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3215 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3216 gimple_assign_set_lhs (epilog_stmt, new_temp);
3217 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3219 for (bit_offset = element_bitsize;
3220 bit_offset < vec_size_in_bits;
3221 bit_offset += element_bitsize)
3223 tree bitpos = bitsize_int (bit_offset);
3224 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3227 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3228 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
3229 gimple_assign_set_lhs (epilog_stmt, new_name);
3230 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3232 epilog_stmt = gimple_build_assign_with_ops (code,
3234 new_name, new_temp);
3235 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3236 gimple_assign_set_lhs (epilog_stmt, new_temp);
3237 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3240 extract_scalar_result = false;
3244 /* 2.4 Extract the final scalar result. Create:
3245 s_out3 = extract_field <v_out2, bitpos> */
3247 if (extract_scalar_result)
3251 gcc_assert (!nested_in_vect_loop || double_reduc);
3252 if (vect_print_dump_info (REPORT_DETAILS))
3253 fprintf (vect_dump, "extract scalar result");
3255 if (BYTES_BIG_ENDIAN)
3256 bitpos = size_binop (MULT_EXPR,
3257 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
3258 TYPE_SIZE (scalar_type));
3260 bitpos = bitsize_zero_node;
3262 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
3263 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3264 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3265 gimple_assign_set_lhs (epilog_stmt, new_temp);
3266 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3269 vect_finalize_reduction:
3274 /* 2.5 Adjust the final result by the initial value of the reduction
3275 variable. (When such adjustment is not needed, then
3276 'adjustment_def' is zero). For example, if code is PLUS we create:
3277 new_temp = loop_exit_def + adjustment_def */
3281 if (nested_in_vect_loop)
3283 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
3284 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
3285 new_dest = vect_create_destination_var (scalar_dest, vectype);
3289 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
3290 expr = build2 (code, scalar_type, new_temp, adjustment_def);
3291 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
3294 epilog_stmt = gimple_build_assign (new_dest, expr);
3295 new_temp = make_ssa_name (new_dest, epilog_stmt);
3296 gimple_assign_set_lhs (epilog_stmt, new_temp);
3297 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
3298 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3302 /* 2.6 Handle the loop-exit phi */
3304 /* Replace uses of s_out0 with uses of s_out3:
3305 Find the loop-closed-use at the loop exit of the original scalar result.
3306 (The reduction result is expected to have two immediate uses - one at the
3307 latch block, and one at the loop exit). */
3308 phis = VEC_alloc (gimple, heap, 10);
3309 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
3311 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
3313 exit_phi = USE_STMT (use_p);
3314 VEC_quick_push (gimple, phis, exit_phi);
3318 /* We expect to have found an exit_phi because of loop-closed-ssa form. */
3319 gcc_assert (!VEC_empty (gimple, phis));
3321 for (i = 0; VEC_iterate (gimple, phis, i, exit_phi); i++)
3323 if (nested_in_vect_loop)
3325 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3328 /* FORNOW. Currently not supporting the case that an inner-loop
3329 reduction is not used in the outer-loop (but only outside the
3330 outer-loop), unless it is double reduction. */
3331 gcc_assert ((STMT_VINFO_RELEVANT_P (stmt_vinfo)
3332 && !STMT_VINFO_LIVE_P (stmt_vinfo)) || double_reduc);
3334 epilog_stmt = adjustment_def ? epilog_stmt : new_phi;
3335 STMT_VINFO_VEC_STMT (stmt_vinfo) = epilog_stmt;
3336 set_vinfo_for_stmt (epilog_stmt,
3337 new_stmt_vec_info (epilog_stmt, loop_vinfo,
3340 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
3341 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
3344 || STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_double_reduction_def)
3347 /* Handle double reduction:
3349 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
3350 stmt2: s3 = phi <s1, s4> - (regular) reduction phi (inner loop)
3351 stmt3: s4 = use (s3) - (regular) reduction stmt (inner loop)
3352 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
3354 At that point the regular reduction (stmt2 and stmt3) is already
3355 vectorized, as well as the exit phi node, stmt4.
3356 Here we vectorize the phi node of double reduction, stmt1, and
3357 update all relevant statements. */
3359 /* Go through all the uses of s2 to find double reduction phi node,
3360 i.e., stmt1 above. */
3361 orig_name = PHI_RESULT (exit_phi);
3362 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3364 stmt_vec_info use_stmt_vinfo = vinfo_for_stmt (use_stmt);
3365 stmt_vec_info new_phi_vinfo;
3366 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
3367 basic_block bb = gimple_bb (use_stmt);
3370 /* Check that USE_STMT is really double reduction phi node. */
3371 if (gimple_code (use_stmt) != GIMPLE_PHI
3372 || gimple_phi_num_args (use_stmt) != 2
3374 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
3375 != vect_double_reduction_def
3376 || bb->loop_father != outer_loop)
3379 /* Create vector phi node for double reduction:
3380 vs1 = phi <vs0, vs2>
3381 vs1 was created previously in this function by a call to
3382 vect_get_vec_def_for_operand and is stored in vec_initial_def;
3383 vs2 is defined by EPILOG_STMT, the vectorized EXIT_PHI;
3384 vs0 is created here. */
3386 /* Create vector phi node. */
3387 vect_phi = create_phi_node (vec_initial_def, bb);
3388 new_phi_vinfo = new_stmt_vec_info (vect_phi,
3389 loop_vec_info_for_loop (outer_loop), NULL);
3390 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
3392 /* Create vs0 - initial def of the double reduction phi. */
3393 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
3394 loop_preheader_edge (outer_loop));
3395 init_def = get_initial_def_for_reduction (stmt, preheader_arg,
3397 vect_phi_init = vect_init_vector (use_stmt, init_def, vectype,
3400 /* Update phi node arguments with vs0 and vs2. */
3401 add_phi_arg (vect_phi, vect_phi_init,
3402 loop_preheader_edge (outer_loop), UNKNOWN_LOCATION);
3403 add_phi_arg (vect_phi, PHI_RESULT (epilog_stmt),
3404 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
3405 if (vect_print_dump_info (REPORT_DETAILS))
3407 fprintf (vect_dump, "created double reduction phi node: ");
3408 print_gimple_stmt (vect_dump, vect_phi, 0, TDF_SLIM);
3411 vect_phi_res = PHI_RESULT (vect_phi);
3413 /* Replace the use, i.e., set the correct vs1 in the regular
3414 reduction phi node. FORNOW, NCOPIES is always 1, so the loop
3416 use = reduction_phi;
3417 for (j = 0; j < ncopies; j++)
3419 edge pr_edge = loop_preheader_edge (loop);
3420 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
3421 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
3426 /* Replace the uses: */
3427 orig_name = PHI_RESULT (exit_phi);
3428 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3429 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
3430 SET_USE (use_p, new_temp);
3433 VEC_free (gimple, heap, phis);
3437 /* Function vectorizable_reduction.
3439 Check if STMT performs a reduction operation that can be vectorized.
3440 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3441 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3442 Return FALSE if not a vectorizable STMT, TRUE otherwise.
3444 This function also handles reduction idioms (patterns) that have been
3445 recognized in advance during vect_pattern_recog. In this case, STMT may be
3447 X = pattern_expr (arg0, arg1, ..., X)
3448 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
3449 sequence that had been detected and replaced by the pattern-stmt (STMT).
3451 In some cases of reduction patterns, the type of the reduction variable X is
3452 different than the type of the other arguments of STMT.
3453 In such cases, the vectype that is used when transforming STMT into a vector
3454 stmt is different than the vectype that is used to determine the
3455 vectorization factor, because it consists of a different number of elements
3456 than the actual number of elements that are being operated upon in parallel.
3458 For example, consider an accumulation of shorts into an int accumulator.
3459 On some targets it's possible to vectorize this pattern operating on 8
3460 shorts at a time (hence, the vectype for purposes of determining the
3461 vectorization factor should be V8HI); on the other hand, the vectype that
3462 is used to create the vector form is actually V4SI (the type of the result).
3464 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
3465 indicates what is the actual level of parallelism (V8HI in the example), so
3466 that the right vectorization factor would be derived. This vectype
3467 corresponds to the type of arguments to the reduction stmt, and should *NOT*
3468 be used to create the vectorized stmt. The right vectype for the vectorized
3469 stmt is obtained from the type of the result X:
3470 get_vectype_for_scalar_type (TREE_TYPE (X))
3472 This means that, contrary to "regular" reductions (or "regular" stmts in
3473 general), the following equation:
3474 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
3475 does *NOT* necessarily hold for reduction patterns. */
3478 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
3483 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
3484 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3485 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3486 tree vectype_in = NULL_TREE;
3487 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3488 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3489 enum tree_code code, orig_code, epilog_reduc_code;
3490 enum machine_mode vec_mode;
3492 optab optab, reduc_optab;
3493 tree new_temp = NULL_TREE;
3496 enum vect_def_type dt;
3497 gimple new_phi = NULL;
3501 stmt_vec_info orig_stmt_info;
3502 tree expr = NULL_TREE;
3506 stmt_vec_info prev_stmt_info, prev_phi_info;
3507 gimple first_phi = NULL;
3508 bool single_defuse_cycle = false;
3509 tree reduc_def = NULL_TREE;
3510 gimple new_stmt = NULL;
3513 bool nested_cycle = false, found_nested_cycle_def = false;
3514 gimple reduc_def_stmt = NULL;
3515 /* The default is that the reduction variable is the last in statement. */
3516 int reduc_index = 2;
3517 bool double_reduc = false, dummy;
3519 struct loop * def_stmt_loop, *outer_loop = NULL;
3521 gimple def_arg_stmt;
3523 if (nested_in_vect_loop_p (loop, stmt))
3527 nested_cycle = true;
3530 /* FORNOW: SLP not supported. */
3531 if (STMT_SLP_TYPE (stmt_info))
3534 /* 1. Is vectorizable reduction? */
3535 /* Not supportable if the reduction variable is used in the loop. */
3536 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer)
3539 /* Reductions that are not used even in an enclosing outer-loop,
3540 are expected to be "live" (used out of the loop). */
3541 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
3542 && !STMT_VINFO_LIVE_P (stmt_info))
3545 /* Make sure it was already recognized as a reduction computation. */
3546 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
3547 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
3550 /* 2. Has this been recognized as a reduction pattern?
3552 Check if STMT represents a pattern that has been recognized
3553 in earlier analysis stages. For stmts that represent a pattern,
3554 the STMT_VINFO_RELATED_STMT field records the last stmt in
3555 the original sequence that constitutes the pattern. */
3557 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3560 orig_stmt_info = vinfo_for_stmt (orig_stmt);
3561 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
3562 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
3563 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
3566 /* 3. Check the operands of the operation. The first operands are defined
3567 inside the loop body. The last operand is the reduction variable,
3568 which is defined by the loop-header-phi. */
3570 gcc_assert (is_gimple_assign (stmt));
3573 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3575 case GIMPLE_SINGLE_RHS:
3576 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
3577 if (op_type == ternary_op)
3579 tree rhs = gimple_assign_rhs1 (stmt);
3580 ops[0] = TREE_OPERAND (rhs, 0);
3581 ops[1] = TREE_OPERAND (rhs, 1);
3582 ops[2] = TREE_OPERAND (rhs, 2);
3583 code = TREE_CODE (rhs);
3589 case GIMPLE_BINARY_RHS:
3590 code = gimple_assign_rhs_code (stmt);
3591 op_type = TREE_CODE_LENGTH (code);
3592 gcc_assert (op_type == binary_op);
3593 ops[0] = gimple_assign_rhs1 (stmt);
3594 ops[1] = gimple_assign_rhs2 (stmt);
3597 case GIMPLE_UNARY_RHS:
3604 scalar_dest = gimple_assign_lhs (stmt);
3605 scalar_type = TREE_TYPE (scalar_dest);
3606 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
3607 && !SCALAR_FLOAT_TYPE_P (scalar_type))
3610 /* All uses but the last are expected to be defined in the loop.
3611 The last use is the reduction variable. In case of nested cycle this
3612 assumption is not true: we use reduc_index to record the index of the
3613 reduction variable. */
3614 for (i = 0; i < op_type-1; i++)
3618 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
3619 if (i == 0 && code == COND_EXPR)
3622 is_simple_use = vect_is_simple_use_1 (ops[i], loop_vinfo, NULL,
3623 &def_stmt, &def, &dt, &tem);
3626 gcc_assert (is_simple_use);
3627 if (dt != vect_internal_def
3628 && dt != vect_external_def
3629 && dt != vect_constant_def
3630 && dt != vect_induction_def
3631 && !(dt == vect_nested_cycle && nested_cycle))
3634 if (dt == vect_nested_cycle)
3636 found_nested_cycle_def = true;
3637 reduc_def_stmt = def_stmt;
3642 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, NULL, &def_stmt,
3644 gcc_assert (is_simple_use);
3645 gcc_assert (dt == vect_reduction_def
3646 || dt == vect_nested_cycle
3647 || ((dt == vect_internal_def || dt == vect_external_def
3648 || dt == vect_constant_def || dt == vect_induction_def)
3649 && nested_cycle && found_nested_cycle_def));
3650 if (!found_nested_cycle_def)
3651 reduc_def_stmt = def_stmt;
3653 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
3655 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
3660 gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
3661 !nested_cycle, &dummy));
3663 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
3667 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3668 / TYPE_VECTOR_SUBPARTS (vectype_in));
3669 gcc_assert (ncopies >= 1);
3671 vec_mode = TYPE_MODE (vectype_in);
3673 if (code == COND_EXPR)
3675 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0))
3677 if (vect_print_dump_info (REPORT_DETAILS))
3678 fprintf (vect_dump, "unsupported condition in reduction");
3685 /* 4. Supportable by target? */
3687 /* 4.1. check support for the operation in the loop */
3688 optab = optab_for_tree_code (code, vectype_in, optab_default);
3691 if (vect_print_dump_info (REPORT_DETAILS))
3692 fprintf (vect_dump, "no optab.");
3697 if (optab_handler (optab, vec_mode)->insn_code == CODE_FOR_nothing)
3699 if (vect_print_dump_info (REPORT_DETAILS))
3700 fprintf (vect_dump, "op not supported by target.");
3702 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
3703 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3704 < vect_min_worthwhile_factor (code))
3707 if (vect_print_dump_info (REPORT_DETAILS))
3708 fprintf (vect_dump, "proceeding using word mode.");
3711 /* Worthwhile without SIMD support? */
3712 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
3713 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3714 < vect_min_worthwhile_factor (code))
3716 if (vect_print_dump_info (REPORT_DETAILS))
3717 fprintf (vect_dump, "not worthwhile without SIMD support.");
3723 /* 4.2. Check support for the epilog operation.
3725 If STMT represents a reduction pattern, then the type of the
3726 reduction variable may be different than the type of the rest
3727 of the arguments. For example, consider the case of accumulation
3728 of shorts into an int accumulator; The original code:
3729 S1: int_a = (int) short_a;
3730 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
3733 STMT: int_acc = widen_sum <short_a, int_acc>
3736 1. The tree-code that is used to create the vector operation in the
3737 epilog code (that reduces the partial results) is not the
3738 tree-code of STMT, but is rather the tree-code of the original
3739 stmt from the pattern that STMT is replacing. I.e, in the example
3740 above we want to use 'widen_sum' in the loop, but 'plus' in the
3742 2. The type (mode) we use to check available target support
3743 for the vector operation to be created in the *epilog*, is
3744 determined by the type of the reduction variable (in the example
3745 above we'd check this: plus_optab[vect_int_mode]).
3746 However the type (mode) we use to check available target support
3747 for the vector operation to be created *inside the loop*, is
3748 determined by the type of the other arguments to STMT (in the
3749 example we'd check this: widen_sum_optab[vect_short_mode]).
3751 This is contrary to "regular" reductions, in which the types of all
3752 the arguments are the same as the type of the reduction variable.
3753 For "regular" reductions we can therefore use the same vector type
3754 (and also the same tree-code) when generating the epilog code and
3755 when generating the code inside the loop. */
3759 /* This is a reduction pattern: get the vectype from the type of the
3760 reduction variable, and get the tree-code from orig_stmt. */
3761 orig_code = gimple_assign_rhs_code (orig_stmt);
3762 gcc_assert (vectype_out);
3763 vec_mode = TYPE_MODE (vectype_out);
3767 /* Regular reduction: use the same vectype and tree-code as used for
3768 the vector code inside the loop can be used for the epilog code. */
3774 def_bb = gimple_bb (reduc_def_stmt);
3775 def_stmt_loop = def_bb->loop_father;
3776 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
3777 loop_preheader_edge (def_stmt_loop));
3778 if (TREE_CODE (def_arg) == SSA_NAME
3779 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
3780 && gimple_code (def_arg_stmt) == GIMPLE_PHI
3781 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
3782 && vinfo_for_stmt (def_arg_stmt)
3783 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
3784 == vect_double_reduction_def)
3785 double_reduc = true;
3788 epilog_reduc_code = ERROR_MARK;
3789 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
3791 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
3795 if (vect_print_dump_info (REPORT_DETAILS))
3796 fprintf (vect_dump, "no optab for reduction.");
3798 epilog_reduc_code = ERROR_MARK;
3802 && optab_handler (reduc_optab, vec_mode)->insn_code
3803 == CODE_FOR_nothing)
3805 if (vect_print_dump_info (REPORT_DETAILS))
3806 fprintf (vect_dump, "reduc op not supported by target.");
3808 epilog_reduc_code = ERROR_MARK;
3813 if (!nested_cycle || double_reduc)
3815 if (vect_print_dump_info (REPORT_DETAILS))
3816 fprintf (vect_dump, "no reduc code for scalar code.");
3822 if (double_reduc && ncopies > 1)
3824 if (vect_print_dump_info (REPORT_DETAILS))
3825 fprintf (vect_dump, "multiple types in double reduction");
3830 if (!vec_stmt) /* transformation not required. */
3832 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3833 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
3840 if (vect_print_dump_info (REPORT_DETAILS))
3841 fprintf (vect_dump, "transform reduction.");
3843 /* FORNOW: Multiple types are not supported for condition. */
3844 if (code == COND_EXPR)
3845 gcc_assert (ncopies == 1);
3847 /* Create the destination vector */
3848 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3850 /* In case the vectorization factor (VF) is bigger than the number
3851 of elements that we can fit in a vectype (nunits), we have to generate
3852 more than one vector stmt - i.e - we need to "unroll" the
3853 vector stmt by a factor VF/nunits. For more details see documentation
3854 in vectorizable_operation. */
3856 /* If the reduction is used in an outer loop we need to generate
3857 VF intermediate results, like so (e.g. for ncopies=2):
3862 (i.e. we generate VF results in 2 registers).
3863 In this case we have a separate def-use cycle for each copy, and therefore
3864 for each copy we get the vector def for the reduction variable from the
3865 respective phi node created for this copy.
3867 Otherwise (the reduction is unused in the loop nest), we can combine
3868 together intermediate results, like so (e.g. for ncopies=2):
3872 (i.e. we generate VF/2 results in a single register).
3873 In this case for each copy we get the vector def for the reduction variable
3874 from the vectorized reduction operation generated in the previous iteration.
3877 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
3879 single_defuse_cycle = true;
3883 epilog_copies = ncopies;
3885 prev_stmt_info = NULL;
3886 prev_phi_info = NULL;
3887 for (j = 0; j < ncopies; j++)
3889 if (j == 0 || !single_defuse_cycle)
3891 /* Create the reduction-phi that defines the reduction-operand. */
3892 new_phi = create_phi_node (vec_dest, loop->header);
3893 set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo,
3895 /* Get the vector def for the reduction variable from the phi
3897 reduc_def = PHI_RESULT (new_phi);
3900 if (code == COND_EXPR)
3902 first_phi = new_phi;
3903 vectorizable_condition (stmt, gsi, vec_stmt, reduc_def, reduc_index);
3904 /* Multiple types are not supported for condition. */
3911 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
3913 if (op_type == ternary_op)
3915 if (reduc_index == 0)
3916 loop_vec_def1 = vect_get_vec_def_for_operand (ops[2], stmt,
3919 loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt,
3923 /* Get the vector def for the reduction variable from the phi
3925 first_phi = new_phi;
3929 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
3930 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
3931 if (op_type == ternary_op)
3932 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def1);
3934 if (single_defuse_cycle)
3935 reduc_def = gimple_assign_lhs (new_stmt);
3937 reduc_def = PHI_RESULT (new_phi);
3939 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
3942 /* Arguments are ready. Create the new vector stmt. */
3943 if (op_type == binary_op)
3945 if (reduc_index == 0)
3946 expr = build2 (code, vectype_out, reduc_def, loop_vec_def0);
3948 expr = build2 (code, vectype_out, loop_vec_def0, reduc_def);
3952 if (reduc_index == 0)
3953 expr = build3 (code, vectype_out, reduc_def, loop_vec_def0,
3957 if (reduc_index == 1)
3958 expr = build3 (code, vectype_out, loop_vec_def0, reduc_def,
3961 expr = build3 (code, vectype_out, loop_vec_def0, loop_vec_def1,
3966 new_stmt = gimple_build_assign (vec_dest, expr);
3967 new_temp = make_ssa_name (vec_dest, new_stmt);
3968 gimple_assign_set_lhs (new_stmt, new_temp);
3969 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3972 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3974 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3976 prev_stmt_info = vinfo_for_stmt (new_stmt);
3977 prev_phi_info = vinfo_for_stmt (new_phi);
3980 /* Finalize the reduction-phi (set its arguments) and create the
3981 epilog reduction code. */
3982 if (!single_defuse_cycle || code == COND_EXPR)
3983 new_temp = gimple_assign_lhs (*vec_stmt);
3985 vect_create_epilog_for_reduction (new_temp, stmt, epilog_copies,
3986 epilog_reduc_code, first_phi, reduc_index,
3991 /* Function vect_min_worthwhile_factor.
3993 For a loop where we could vectorize the operation indicated by CODE,
3994 return the minimum vectorization factor that makes it worthwhile
3995 to use generic vectors. */
3997 vect_min_worthwhile_factor (enum tree_code code)
4018 /* Function vectorizable_induction
4020 Check if PHI performs an induction computation that can be vectorized.
4021 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
4022 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
4023 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4026 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4029 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
4030 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4031 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4032 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4033 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4034 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4037 gcc_assert (ncopies >= 1);
4038 /* FORNOW. This restriction should be relaxed. */
4039 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
4041 if (vect_print_dump_info (REPORT_DETAILS))
4042 fprintf (vect_dump, "multiple types in nested loop.");
4046 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4049 /* FORNOW: SLP not supported. */
4050 if (STMT_SLP_TYPE (stmt_info))
4053 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
4055 if (gimple_code (phi) != GIMPLE_PHI)
4058 if (!vec_stmt) /* transformation not required. */
4060 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
4061 if (vect_print_dump_info (REPORT_DETAILS))
4062 fprintf (vect_dump, "=== vectorizable_induction ===");
4063 vect_model_induction_cost (stmt_info, ncopies);
4069 if (vect_print_dump_info (REPORT_DETAILS))
4070 fprintf (vect_dump, "transform induction phi.");
4072 vec_def = get_initial_def_for_induction (phi);
4073 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
4077 /* Function vectorizable_live_operation.
4079 STMT computes a value that is used outside the loop. Check if
4080 it can be supported. */
4083 vectorizable_live_operation (gimple stmt,
4084 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4085 gimple *vec_stmt ATTRIBUTE_UNUSED)
4087 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4088 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4089 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4095 enum vect_def_type dt;
4096 enum tree_code code;
4097 enum gimple_rhs_class rhs_class;
4099 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
4101 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
4104 if (!is_gimple_assign (stmt))
4107 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4110 /* FORNOW. CHECKME. */
4111 if (nested_in_vect_loop_p (loop, stmt))
4114 code = gimple_assign_rhs_code (stmt);
4115 op_type = TREE_CODE_LENGTH (code);
4116 rhs_class = get_gimple_rhs_class (code);
4117 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
4118 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
4120 /* FORNOW: support only if all uses are invariant. This means
4121 that the scalar operations can remain in place, unvectorized.
4122 The original last scalar value that they compute will be used. */
4124 for (i = 0; i < op_type; i++)
4126 if (rhs_class == GIMPLE_SINGLE_RHS)
4127 op = TREE_OPERAND (gimple_op (stmt, 1), i);
4129 op = gimple_op (stmt, i + 1);
4131 && !vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt))
4133 if (vect_print_dump_info (REPORT_DETAILS))
4134 fprintf (vect_dump, "use not simple.");
4138 if (dt != vect_external_def && dt != vect_constant_def)
4142 /* No transformation is required for the cases we currently support. */
4146 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
4149 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
4151 ssa_op_iter op_iter;
4152 imm_use_iterator imm_iter;
4153 def_operand_p def_p;
4156 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
4158 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
4162 if (!is_gimple_debug (ustmt))
4165 bb = gimple_bb (ustmt);
4167 if (!flow_bb_inside_loop_p (loop, bb))
4169 if (gimple_debug_bind_p (ustmt))
4171 if (vect_print_dump_info (REPORT_DETAILS))
4172 fprintf (vect_dump, "killing debug use");
4174 gimple_debug_bind_reset_value (ustmt);
4175 update_stmt (ustmt);
4184 /* Function vect_transform_loop.
4186 The analysis phase has determined that the loop is vectorizable.
4187 Vectorize the loop - created vectorized stmts to replace the scalar
4188 stmts in the loop, and update the loop exit condition. */
4191 vect_transform_loop (loop_vec_info loop_vinfo)
4193 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4194 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
4195 int nbbs = loop->num_nodes;
4196 gimple_stmt_iterator si;
4199 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4201 bool slp_scheduled = false;
4202 unsigned int nunits;
4203 tree cond_expr = NULL_TREE;
4204 gimple_seq cond_expr_stmt_list = NULL;
4205 bool do_peeling_for_loop_bound;
4207 if (vect_print_dump_info (REPORT_DETAILS))
4208 fprintf (vect_dump, "=== vec_transform_loop ===");
4210 /* Peel the loop if there are data refs with unknown alignment.
4211 Only one data ref with unknown store is allowed. */
4213 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
4214 vect_do_peeling_for_alignment (loop_vinfo);
4216 do_peeling_for_loop_bound
4217 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4218 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4219 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0));
4221 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
4222 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
4223 vect_loop_versioning (loop_vinfo,
4224 !do_peeling_for_loop_bound,
4225 &cond_expr, &cond_expr_stmt_list);
4227 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
4228 compile time constant), or it is a constant that doesn't divide by the
4229 vectorization factor, then an epilog loop needs to be created.
4230 We therefore duplicate the loop: the original loop will be vectorized,
4231 and will compute the first (n/VF) iterations. The second copy of the loop
4232 will remain scalar and will compute the remaining (n%VF) iterations.
4233 (VF is the vectorization factor). */
4235 if (do_peeling_for_loop_bound)
4236 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
4237 cond_expr, cond_expr_stmt_list);
4239 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
4240 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
4242 /* 1) Make sure the loop header has exactly two entries
4243 2) Make sure we have a preheader basic block. */
4245 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
4247 split_edge (loop_preheader_edge (loop));
4249 /* FORNOW: the vectorizer supports only loops which body consist
4250 of one basic block (header + empty latch). When the vectorizer will
4251 support more involved loop forms, the order by which the BBs are
4252 traversed need to be reconsidered. */
4254 for (i = 0; i < nbbs; i++)
4256 basic_block bb = bbs[i];
4257 stmt_vec_info stmt_info;
4260 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
4262 phi = gsi_stmt (si);
4263 if (vect_print_dump_info (REPORT_DETAILS))
4265 fprintf (vect_dump, "------>vectorizing phi: ");
4266 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
4268 stmt_info = vinfo_for_stmt (phi);
4272 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4273 vect_loop_kill_debug_uses (loop, phi);
4275 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4276 && !STMT_VINFO_LIVE_P (stmt_info))
4279 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
4280 != (unsigned HOST_WIDE_INT) vectorization_factor)
4281 && vect_print_dump_info (REPORT_DETAILS))
4282 fprintf (vect_dump, "multiple-types.");
4284 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
4286 if (vect_print_dump_info (REPORT_DETAILS))
4287 fprintf (vect_dump, "transform phi.");
4288 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
4292 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
4294 gimple stmt = gsi_stmt (si);
4297 if (vect_print_dump_info (REPORT_DETAILS))
4299 fprintf (vect_dump, "------>vectorizing statement: ");
4300 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4303 stmt_info = vinfo_for_stmt (stmt);
4305 /* vector stmts created in the outer-loop during vectorization of
4306 stmts in an inner-loop may not have a stmt_info, and do not
4307 need to be vectorized. */
4314 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4315 vect_loop_kill_debug_uses (loop, stmt);
4317 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4318 && !STMT_VINFO_LIVE_P (stmt_info))
4324 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4326 (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
4327 if (!STMT_SLP_TYPE (stmt_info)
4328 && nunits != (unsigned int) vectorization_factor
4329 && vect_print_dump_info (REPORT_DETAILS))
4330 /* For SLP VF is set according to unrolling factor, and not to
4331 vector size, hence for SLP this print is not valid. */
4332 fprintf (vect_dump, "multiple-types.");
4334 /* SLP. Schedule all the SLP instances when the first SLP stmt is
4336 if (STMT_SLP_TYPE (stmt_info))
4340 slp_scheduled = true;
4342 if (vect_print_dump_info (REPORT_DETAILS))
4343 fprintf (vect_dump, "=== scheduling SLP instances ===");
4345 vect_schedule_slp (loop_vinfo, NULL);
4348 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
4349 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
4356 /* -------- vectorize statement ------------ */
4357 if (vect_print_dump_info (REPORT_DETAILS))
4358 fprintf (vect_dump, "transform statement.");
4360 strided_store = false;
4361 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
4364 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
4366 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
4367 interleaving chain was completed - free all the stores in
4369 vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
4370 gsi_remove (&si, true);
4375 /* Free the attached stmt_vec_info and remove the stmt. */
4376 free_stmt_vec_info (stmt);
4377 gsi_remove (&si, true);
4385 slpeel_make_loop_iterate_ntimes (loop, ratio);
4387 /* The memory tags and pointers in vectorized statements need to
4388 have their SSA forms updated. FIXME, why can't this be delayed
4389 until all the loops have been transformed? */
4390 update_ssa (TODO_update_ssa);
4392 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4393 fprintf (vect_dump, "LOOP VECTORIZED.");
4394 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4395 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");