2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Loop Vectorization Pass.
24 This pass tries to vectorize loops. This first implementation focuses on
25 simple inner-most loops, with no conditional control flow, and a set of
26 simple operations which vector form can be expressed using existing
27 tree codes (PLUS, MULT etc).
29 For example, the vectorizer transforms the following simple loop:
31 short a[N]; short b[N]; short c[N]; int i;
37 as if it was manually vectorized by rewriting the source code into:
39 typedef int __attribute__((mode(V8HI))) v8hi;
40 short a[N]; short b[N]; short c[N]; int i;
41 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
44 for (i=0; i<N/8; i++){
51 The main entry to this pass is vectorize_loops(), in which
52 the vectorizer applies a set of analyses on a given set of loops,
53 followed by the actual vectorization transformation for the loops that
54 had successfully passed the analysis phase.
56 Throughout this pass we make a distinction between two types of
57 data: scalars (which are represented by SSA_NAMES), and memory references
58 ("data-refs"). These two types of data require different handling both
59 during analysis and transformation. The types of data-refs that the
60 vectorizer currently supports are ARRAY_REFS which base is an array DECL
61 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
62 accesses are required to have a simple (consecutive) access pattern.
66 The driver for the analysis phase is vect_analyze_loop_nest().
67 It applies a set of analyses, some of which rely on the scalar evolution
68 analyzer (scev) developed by Sebastian Pop.
70 During the analysis phase the vectorizer records some information
71 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
72 loop, as well as general information about the loop as a whole, which is
73 recorded in a "loop_vec_info" struct attached to each loop.
77 The loop transformation phase scans all the stmts in the loop, and
78 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
79 the loop that needs to be vectorized. It insert the vector code sequence
80 just before the scalar stmt S, and records a pointer to the vector code
81 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
82 attached to S). This pointer will be used for the vectorization of following
83 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
84 otherwise, we rely on dead code elimination for removing it.
86 For example, say stmt S1 was vectorized into stmt VS1:
89 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
92 To vectorize stmt S2, the vectorizer first finds the stmt that defines
93 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
94 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
95 resulting sequence would be:
98 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
100 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
102 Operands that are not SSA_NAMEs, are data-refs that appear in
103 load/store operations (like 'x[i]' in S1), and are handled differently.
107 Currently the only target specific information that is used is the
108 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
109 support different sizes of vectors, for now will need to specify one value
110 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
112 Since we only vectorize operations which vector form can be
113 expressed using existing tree codes, to verify that an operation is
114 supported, the vectorizer checks the relevant optab at the relevant
115 machine_mode (e.g, optab_handler (add_optab, V8HImode)->insn_code). If
116 the value found is CODE_FOR_nothing, then there's no target support, and
117 we can't vectorize the stmt.
119 For additional information on this project see:
120 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
125 #include "coretypes.h"
131 #include "basic-block.h"
132 #include "diagnostic.h"
133 #include "tree-flow.h"
134 #include "tree-dump.h"
137 #include "cfglayout.h"
143 #include "tree-chrec.h"
144 #include "tree-data-ref.h"
145 #include "tree-scalar-evolution.h"
148 #include "tree-vectorizer.h"
149 #include "tree-pass.h"
151 /*************************************************************************
152 General Vectorization Utilities
153 *************************************************************************/
155 /* vect_dump will be set to stderr or dump_file if exist. */
158 /* vect_verbosity_level set to an invalid value
159 to mark that it's uninitialized. */
160 enum verbosity_levels vect_verbosity_level = MAX_VERBOSITY_LEVEL;
163 static LOC vect_loop_location;
165 /* Bitmap of virtual variables to be renamed. */
166 bitmap vect_memsyms_to_rename;
168 /* Vector mapping GIMPLE stmt to stmt_vec_info. */
169 VEC(vec_void_p,heap) *stmt_vec_info_vec;
172 /*************************************************************************
173 Simple Loop Peeling Utilities
175 Utilities to support loop peeling for vectorization purposes.
176 *************************************************************************/
179 /* Renames the use *OP_P. */
182 rename_use_op (use_operand_p op_p)
186 if (TREE_CODE (USE_FROM_PTR (op_p)) != SSA_NAME)
189 new_name = get_current_def (USE_FROM_PTR (op_p));
191 /* Something defined outside of the loop. */
195 /* An ordinary ssa name defined in the loop. */
197 SET_USE (op_p, new_name);
201 /* Renames the variables in basic block BB. */
204 rename_variables_in_bb (basic_block bb)
206 gimple_stmt_iterator gsi;
212 struct loop *loop = bb->loop_father;
214 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
216 stmt = gsi_stmt (gsi);
217 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
218 rename_use_op (use_p);
221 FOR_EACH_EDGE (e, ei, bb->succs)
223 if (!flow_bb_inside_loop_p (loop, e->dest))
225 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
226 rename_use_op (PHI_ARG_DEF_PTR_FROM_EDGE (gsi_stmt (gsi), e));
231 /* Renames variables in new generated LOOP. */
234 rename_variables_in_loop (struct loop *loop)
239 bbs = get_loop_body (loop);
241 for (i = 0; i < loop->num_nodes; i++)
242 rename_variables_in_bb (bbs[i]);
248 /* Update the PHI nodes of NEW_LOOP.
250 NEW_LOOP is a duplicate of ORIG_LOOP.
251 AFTER indicates whether NEW_LOOP executes before or after ORIG_LOOP:
252 AFTER is true if NEW_LOOP executes after ORIG_LOOP, and false if it
253 executes before it. */
256 slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
257 struct loop *new_loop, bool after)
260 gimple phi_new, phi_orig;
262 edge orig_loop_latch = loop_latch_edge (orig_loop);
263 edge orig_entry_e = loop_preheader_edge (orig_loop);
264 edge new_loop_exit_e = single_exit (new_loop);
265 edge new_loop_entry_e = loop_preheader_edge (new_loop);
266 edge entry_arg_e = (after ? orig_loop_latch : orig_entry_e);
267 gimple_stmt_iterator gsi_new, gsi_orig;
270 step 1. For each loop-header-phi:
271 Add the first phi argument for the phi in NEW_LOOP
272 (the one associated with the entry of NEW_LOOP)
274 step 2. For each loop-header-phi:
275 Add the second phi argument for the phi in NEW_LOOP
276 (the one associated with the latch of NEW_LOOP)
278 step 3. Update the phis in the successor block of NEW_LOOP.
280 case 1: NEW_LOOP was placed before ORIG_LOOP:
281 The successor block of NEW_LOOP is the header of ORIG_LOOP.
282 Updating the phis in the successor block can therefore be done
283 along with the scanning of the loop header phis, because the
284 header blocks of ORIG_LOOP and NEW_LOOP have exactly the same
285 phi nodes, organized in the same order.
287 case 2: NEW_LOOP was placed after ORIG_LOOP:
288 The successor block of NEW_LOOP is the original exit block of
289 ORIG_LOOP - the phis to be updated are the loop-closed-ssa phis.
290 We postpone updating these phis to a later stage (when
291 loop guards are added).
295 /* Scan the phis in the headers of the old and new loops
296 (they are organized in exactly the same order). */
298 for (gsi_new = gsi_start_phis (new_loop->header),
299 gsi_orig = gsi_start_phis (orig_loop->header);
300 !gsi_end_p (gsi_new) && !gsi_end_p (gsi_orig);
301 gsi_next (&gsi_new), gsi_next (&gsi_orig))
303 phi_new = gsi_stmt (gsi_new);
304 phi_orig = gsi_stmt (gsi_orig);
307 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, entry_arg_e);
308 add_phi_arg (phi_new, def, new_loop_entry_e);
311 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
312 if (TREE_CODE (def) != SSA_NAME)
315 new_ssa_name = get_current_def (def);
318 /* This only happens if there are no definitions
319 inside the loop. use the phi_result in this case. */
320 new_ssa_name = PHI_RESULT (phi_new);
323 /* An ordinary ssa name defined in the loop. */
324 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop));
326 /* step 3 (case 1). */
329 gcc_assert (new_loop_exit_e == orig_entry_e);
330 SET_PHI_ARG_DEF (phi_orig,
331 new_loop_exit_e->dest_idx,
338 /* Update PHI nodes for a guard of the LOOP.
341 - LOOP, GUARD_EDGE: LOOP is a loop for which we added guard code that
342 controls whether LOOP is to be executed. GUARD_EDGE is the edge that
343 originates from the guard-bb, skips LOOP and reaches the (unique) exit
344 bb of LOOP. This loop-exit-bb is an empty bb with one successor.
345 We denote this bb NEW_MERGE_BB because before the guard code was added
346 it had a single predecessor (the LOOP header), and now it became a merge
347 point of two paths - the path that ends with the LOOP exit-edge, and
348 the path that ends with GUARD_EDGE.
349 - NEW_EXIT_BB: New basic block that is added by this function between LOOP
350 and NEW_MERGE_BB. It is used to place loop-closed-ssa-form exit-phis.
352 ===> The CFG before the guard-code was added:
355 if (exit_loop) goto update_bb
356 else goto LOOP_header_bb
359 ==> The CFG after the guard-code was added:
361 if (LOOP_guard_condition) goto new_merge_bb
362 else goto LOOP_header_bb
365 if (exit_loop_condition) goto new_merge_bb
366 else goto LOOP_header_bb
371 ==> The CFG after this function:
373 if (LOOP_guard_condition) goto new_merge_bb
374 else goto LOOP_header_bb
377 if (exit_loop_condition) goto new_exit_bb
378 else goto LOOP_header_bb
385 1. creates and updates the relevant phi nodes to account for the new
386 incoming edge (GUARD_EDGE) into NEW_MERGE_BB. This involves:
387 1.1. Create phi nodes at NEW_MERGE_BB.
388 1.2. Update the phi nodes at the successor of NEW_MERGE_BB (denoted
389 UPDATE_BB). UPDATE_BB was the exit-bb of LOOP before NEW_MERGE_BB
390 2. preserves loop-closed-ssa-form by creating the required phi nodes
391 at the exit of LOOP (i.e, in NEW_EXIT_BB).
393 There are two flavors to this function:
395 slpeel_update_phi_nodes_for_guard1:
396 Here the guard controls whether we enter or skip LOOP, where LOOP is a
397 prolog_loop (loop1 below), and the new phis created in NEW_MERGE_BB are
398 for variables that have phis in the loop header.
400 slpeel_update_phi_nodes_for_guard2:
401 Here the guard controls whether we enter or skip LOOP, where LOOP is an
402 epilog_loop (loop2 below), and the new phis created in NEW_MERGE_BB are
403 for variables that have phis in the loop exit.
405 I.E., the overall structure is:
408 guard1 (goto loop1/merge1_bb)
411 guard2 (goto merge1_bb/merge2_bb)
418 slpeel_update_phi_nodes_for_guard1 takes care of creating phis in
419 loop1_exit_bb and merge1_bb. These are entry phis (phis for the vars
420 that have phis in loop1->header).
422 slpeel_update_phi_nodes_for_guard2 takes care of creating phis in
423 loop2_exit_bb and merge2_bb. These are exit phis (phis for the vars
424 that have phis in next_bb). It also adds some of these phis to
427 slpeel_update_phi_nodes_for_guard1 is always called before
428 slpeel_update_phi_nodes_for_guard2. They are both needed in order
429 to create correct data-flow and loop-closed-ssa-form.
431 Generally slpeel_update_phi_nodes_for_guard1 creates phis for variables
432 that change between iterations of a loop (and therefore have a phi-node
433 at the loop entry), whereas slpeel_update_phi_nodes_for_guard2 creates
434 phis for variables that are used out of the loop (and therefore have
435 loop-closed exit phis). Some variables may be both updated between
436 iterations and used after the loop. This is why in loop1_exit_bb we
437 may need both entry_phis (created by slpeel_update_phi_nodes_for_guard1)
438 and exit phis (created by slpeel_update_phi_nodes_for_guard2).
440 - IS_NEW_LOOP: if IS_NEW_LOOP is true, then LOOP is a newly created copy of
441 an original loop. i.e., we have:
444 guard_bb (goto LOOP/new_merge)
450 If IS_NEW_LOOP is false, then LOOP is an original loop, in which case we
454 guard_bb (goto LOOP/new_merge)
460 The SSA names defined in the original loop have a current
461 reaching definition that that records the corresponding new
462 ssa-name used in the new duplicated loop copy.
465 /* Function slpeel_update_phi_nodes_for_guard1
468 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
469 - DEFS - a bitmap of ssa names to mark new names for which we recorded
472 In the context of the overall structure, we have:
475 guard1 (goto loop1/merge1_bb)
478 guard2 (goto merge1_bb/merge2_bb)
485 For each name updated between loop iterations (i.e - for each name that has
486 an entry (loop-header) phi in LOOP) we create a new phi in:
487 1. merge1_bb (to account for the edge from guard1)
488 2. loop1_exit_bb (an exit-phi to keep LOOP in loop-closed form)
492 slpeel_update_phi_nodes_for_guard1 (edge guard_edge, struct loop *loop,
493 bool is_new_loop, basic_block *new_exit_bb,
496 gimple orig_phi, new_phi;
497 gimple update_phi, update_phi2;
498 tree guard_arg, loop_arg;
499 basic_block new_merge_bb = guard_edge->dest;
500 edge e = EDGE_SUCC (new_merge_bb, 0);
501 basic_block update_bb = e->dest;
502 basic_block orig_bb = loop->header;
504 tree current_new_name;
506 gimple_stmt_iterator gsi_orig, gsi_update;
508 /* Create new bb between loop and new_merge_bb. */
509 *new_exit_bb = split_edge (single_exit (loop));
511 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
513 for (gsi_orig = gsi_start_phis (orig_bb),
514 gsi_update = gsi_start_phis (update_bb);
515 !gsi_end_p (gsi_orig) && !gsi_end_p (gsi_update);
516 gsi_next (&gsi_orig), gsi_next (&gsi_update))
518 orig_phi = gsi_stmt (gsi_orig);
519 update_phi = gsi_stmt (gsi_update);
521 /* Virtual phi; Mark it for renaming. We actually want to call
522 mar_sym_for_renaming, but since all ssa renaming datastructures
523 are going to be freed before we get to call ssa_update, we just
524 record this name for now in a bitmap, and will mark it for
526 name = PHI_RESULT (orig_phi);
527 if (!is_gimple_reg (SSA_NAME_VAR (name)))
528 bitmap_set_bit (vect_memsyms_to_rename, DECL_UID (SSA_NAME_VAR (name)));
530 /** 1. Handle new-merge-point phis **/
532 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
533 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
536 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
537 of LOOP. Set the two phi args in NEW_PHI for these edges: */
538 loop_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, EDGE_SUCC (loop->latch, 0));
539 guard_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, loop_preheader_edge (loop));
541 add_phi_arg (new_phi, loop_arg, new_exit_e);
542 add_phi_arg (new_phi, guard_arg, guard_edge);
544 /* 1.3. Update phi in successor block. */
545 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == loop_arg
546 || PHI_ARG_DEF_FROM_EDGE (update_phi, e) == guard_arg);
547 SET_PHI_ARG_DEF (update_phi, e->dest_idx, PHI_RESULT (new_phi));
548 update_phi2 = new_phi;
551 /** 2. Handle loop-closed-ssa-form phis **/
553 if (!is_gimple_reg (PHI_RESULT (orig_phi)))
556 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
557 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
560 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
561 add_phi_arg (new_phi, loop_arg, single_exit (loop));
563 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
564 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
565 SET_PHI_ARG_DEF (update_phi2, new_exit_e->dest_idx, PHI_RESULT (new_phi));
567 /* 2.4. Record the newly created name with set_current_def.
568 We want to find a name such that
569 name = get_current_def (orig_loop_name)
570 and to set its current definition as follows:
571 set_current_def (name, new_phi_name)
573 If LOOP is a new loop then loop_arg is already the name we're
574 looking for. If LOOP is the original loop, then loop_arg is
575 the orig_loop_name and the relevant name is recorded in its
576 current reaching definition. */
578 current_new_name = loop_arg;
581 current_new_name = get_current_def (loop_arg);
582 /* current_def is not available only if the variable does not
583 change inside the loop, in which case we also don't care
584 about recording a current_def for it because we won't be
585 trying to create loop-exit-phis for it. */
586 if (!current_new_name)
589 gcc_assert (get_current_def (current_new_name) == NULL_TREE);
591 set_current_def (current_new_name, PHI_RESULT (new_phi));
592 bitmap_set_bit (*defs, SSA_NAME_VERSION (current_new_name));
597 /* Function slpeel_update_phi_nodes_for_guard2
600 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
602 In the context of the overall structure, we have:
605 guard1 (goto loop1/merge1_bb)
608 guard2 (goto merge1_bb/merge2_bb)
615 For each name used out side the loop (i.e - for each name that has an exit
616 phi in next_bb) we create a new phi in:
617 1. merge2_bb (to account for the edge from guard_bb)
618 2. loop2_exit_bb (an exit-phi to keep LOOP in loop-closed form)
619 3. guard2 bb (an exit phi to keep the preceding loop in loop-closed form),
620 if needed (if it wasn't handled by slpeel_update_phis_nodes_for_phi1).
624 slpeel_update_phi_nodes_for_guard2 (edge guard_edge, struct loop *loop,
625 bool is_new_loop, basic_block *new_exit_bb)
627 gimple orig_phi, new_phi;
628 gimple update_phi, update_phi2;
629 tree guard_arg, loop_arg;
630 basic_block new_merge_bb = guard_edge->dest;
631 edge e = EDGE_SUCC (new_merge_bb, 0);
632 basic_block update_bb = e->dest;
634 tree orig_def, orig_def_new_name;
635 tree new_name, new_name2;
637 gimple_stmt_iterator gsi;
639 /* Create new bb between loop and new_merge_bb. */
640 *new_exit_bb = split_edge (single_exit (loop));
642 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
644 for (gsi = gsi_start_phis (update_bb); !gsi_end_p (gsi); gsi_next (&gsi))
646 update_phi = gsi_stmt (gsi);
647 orig_phi = update_phi;
648 orig_def = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
649 /* This loop-closed-phi actually doesn't represent a use
650 out of the loop - the phi arg is a constant. */
651 if (TREE_CODE (orig_def) != SSA_NAME)
653 orig_def_new_name = get_current_def (orig_def);
656 /** 1. Handle new-merge-point phis **/
658 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
659 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
662 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
663 of LOOP. Set the two PHI args in NEW_PHI for these edges: */
665 new_name2 = NULL_TREE;
666 if (orig_def_new_name)
668 new_name = orig_def_new_name;
669 /* Some variables have both loop-entry-phis and loop-exit-phis.
670 Such variables were given yet newer names by phis placed in
671 guard_bb by slpeel_update_phi_nodes_for_guard1. I.e:
672 new_name2 = get_current_def (get_current_def (orig_name)). */
673 new_name2 = get_current_def (new_name);
678 guard_arg = orig_def;
683 guard_arg = new_name;
687 guard_arg = new_name2;
689 add_phi_arg (new_phi, loop_arg, new_exit_e);
690 add_phi_arg (new_phi, guard_arg, guard_edge);
692 /* 1.3. Update phi in successor block. */
693 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == orig_def);
694 SET_PHI_ARG_DEF (update_phi, e->dest_idx, PHI_RESULT (new_phi));
695 update_phi2 = new_phi;
698 /** 2. Handle loop-closed-ssa-form phis **/
700 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
701 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
704 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
705 add_phi_arg (new_phi, loop_arg, single_exit (loop));
707 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
708 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
709 SET_PHI_ARG_DEF (update_phi2, new_exit_e->dest_idx, PHI_RESULT (new_phi));
712 /** 3. Handle loop-closed-ssa-form phis for first loop **/
714 /* 3.1. Find the relevant names that need an exit-phi in
715 GUARD_BB, i.e. names for which
716 slpeel_update_phi_nodes_for_guard1 had not already created a
717 phi node. This is the case for names that are used outside
718 the loop (and therefore need an exit phi) but are not updated
719 across loop iterations (and therefore don't have a
722 slpeel_update_phi_nodes_for_guard1 is responsible for
723 creating loop-exit phis in GUARD_BB for names that have a
724 loop-header-phi. When such a phi is created we also record
725 the new name in its current definition. If this new name
726 exists, then guard_arg was set to this new name (see 1.2
727 above). Therefore, if guard_arg is not this new name, this
728 is an indication that an exit-phi in GUARD_BB was not yet
729 created, so we take care of it here. */
730 if (guard_arg == new_name2)
734 /* 3.2. Generate new phi node in GUARD_BB: */
735 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
738 /* 3.3. GUARD_BB has one incoming edge: */
739 gcc_assert (EDGE_COUNT (guard_edge->src->preds) == 1);
740 add_phi_arg (new_phi, arg, EDGE_PRED (guard_edge->src, 0));
742 /* 3.4. Update phi in successor of GUARD_BB: */
743 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, guard_edge)
745 SET_PHI_ARG_DEF (update_phi2, guard_edge->dest_idx, PHI_RESULT (new_phi));
750 /* Make the LOOP iterate NITERS times. This is done by adding a new IV
751 that starts at zero, increases by one and its limit is NITERS.
753 Assumption: the exit-condition of LOOP is the last stmt in the loop. */
756 slpeel_make_loop_iterate_ntimes (struct loop *loop, tree niters)
758 tree indx_before_incr, indx_after_incr;
761 edge exit_edge = single_exit (loop);
762 gimple_stmt_iterator loop_cond_gsi;
763 gimple_stmt_iterator incr_gsi;
765 tree init = build_int_cst (TREE_TYPE (niters), 0);
766 tree step = build_int_cst (TREE_TYPE (niters), 1);
770 orig_cond = get_loop_exit_condition (loop);
771 gcc_assert (orig_cond);
772 loop_cond_gsi = gsi_for_stmt (orig_cond);
774 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
775 create_iv (init, step, NULL_TREE, loop,
776 &incr_gsi, insert_after, &indx_before_incr, &indx_after_incr);
778 indx_after_incr = force_gimple_operand_gsi (&loop_cond_gsi, indx_after_incr,
779 true, NULL_TREE, true,
781 niters = force_gimple_operand_gsi (&loop_cond_gsi, niters, true, NULL_TREE,
782 true, GSI_SAME_STMT);
784 code = (exit_edge->flags & EDGE_TRUE_VALUE) ? GE_EXPR : LT_EXPR;
785 cond_stmt = gimple_build_cond (code, indx_after_incr, niters, NULL_TREE,
788 gsi_insert_before (&loop_cond_gsi, cond_stmt, GSI_SAME_STMT);
790 /* Remove old loop exit test: */
791 gsi_remove (&loop_cond_gsi, true);
793 loop_loc = find_loop_location (loop);
794 if (dump_file && (dump_flags & TDF_DETAILS))
796 if (loop_loc != UNKNOWN_LOC)
797 fprintf (dump_file, "\nloop at %s:%d: ",
798 LOC_FILE (loop_loc), LOC_LINE (loop_loc));
799 print_gimple_stmt (dump_file, cond_stmt, 0, TDF_SLIM);
802 loop->nb_iterations = niters;
806 /* Given LOOP this function generates a new copy of it and puts it
807 on E which is either the entry or exit of LOOP. */
810 slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
812 struct loop *new_loop;
813 basic_block *new_bbs, *bbs;
816 basic_block exit_dest;
820 gimple_stmt_iterator gsi;
822 at_exit = (e == single_exit (loop));
823 if (!at_exit && e != loop_preheader_edge (loop))
826 bbs = get_loop_body (loop);
828 /* Check whether duplication is possible. */
829 if (!can_copy_bbs_p (bbs, loop->num_nodes))
835 /* Generate new loop structure. */
836 new_loop = duplicate_loop (loop, loop_outer (loop));
843 exit_dest = single_exit (loop)->dest;
844 was_imm_dom = (get_immediate_dominator (CDI_DOMINATORS,
845 exit_dest) == loop->header ?
848 new_bbs = XNEWVEC (basic_block, loop->num_nodes);
850 exit = single_exit (loop);
851 copy_bbs (bbs, loop->num_nodes, new_bbs,
852 &exit, 1, &new_exit, NULL,
855 /* Duplicating phi args at exit bbs as coming
856 also from exit of duplicated loop. */
857 for (gsi = gsi_start_phis (exit_dest); !gsi_end_p (gsi); gsi_next (&gsi))
859 phi = gsi_stmt (gsi);
860 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, single_exit (loop));
863 edge new_loop_exit_edge;
865 if (EDGE_SUCC (new_loop->header, 0)->dest == new_loop->latch)
866 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 1);
868 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 0);
870 add_phi_arg (phi, phi_arg, new_loop_exit_edge);
874 if (at_exit) /* Add the loop copy at exit. */
876 redirect_edge_and_branch_force (e, new_loop->header);
877 PENDING_STMT (e) = NULL;
878 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, e->src);
880 set_immediate_dominator (CDI_DOMINATORS, exit_dest, new_loop->header);
882 else /* Add the copy at entry. */
885 edge entry_e = loop_preheader_edge (loop);
886 basic_block preheader = entry_e->src;
888 if (!flow_bb_inside_loop_p (new_loop,
889 EDGE_SUCC (new_loop->header, 0)->dest))
890 new_exit_e = EDGE_SUCC (new_loop->header, 0);
892 new_exit_e = EDGE_SUCC (new_loop->header, 1);
894 redirect_edge_and_branch_force (new_exit_e, loop->header);
895 PENDING_STMT (new_exit_e) = NULL;
896 set_immediate_dominator (CDI_DOMINATORS, loop->header,
899 /* We have to add phi args to the loop->header here as coming
900 from new_exit_e edge. */
901 for (gsi = gsi_start_phis (loop->header);
905 phi = gsi_stmt (gsi);
906 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, entry_e);
908 add_phi_arg (phi, phi_arg, new_exit_e);
911 redirect_edge_and_branch_force (entry_e, new_loop->header);
912 PENDING_STMT (entry_e) = NULL;
913 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, preheader);
923 /* Given the condition statement COND, put it as the last statement
924 of GUARD_BB; EXIT_BB is the basic block to skip the loop;
925 Assumes that this is the single exit of the guarded loop.
926 Returns the skip edge. */
929 slpeel_add_loop_guard (basic_block guard_bb, tree cond, basic_block exit_bb,
932 gimple_stmt_iterator gsi;
935 gimple_seq gimplify_stmt_list = NULL;
937 enter_e = EDGE_SUCC (guard_bb, 0);
938 enter_e->flags &= ~EDGE_FALLTHRU;
939 enter_e->flags |= EDGE_FALSE_VALUE;
940 gsi = gsi_last_bb (guard_bb);
943 force_gimple_operand (cond, &gimplify_stmt_list, true,
945 cond_stmt = gimple_build_cond (NE_EXPR, cond, integer_zero_node,
946 NULL_TREE, NULL_TREE);
947 if (gimplify_stmt_list)
948 gsi_insert_seq_after (&gsi, gimplify_stmt_list, GSI_NEW_STMT);
950 gsi = gsi_last_bb (guard_bb);
951 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
953 /* Add new edge to connect guard block to the merge/loop-exit block. */
954 new_e = make_edge (guard_bb, exit_bb, EDGE_TRUE_VALUE);
955 set_immediate_dominator (CDI_DOMINATORS, exit_bb, dom_bb);
960 /* This function verifies that the following restrictions apply to LOOP:
962 (2) it consists of exactly 2 basic blocks - header, and an empty latch.
963 (3) it is single entry, single exit
964 (4) its exit condition is the last stmt in the header
965 (5) E is the entry/exit edge of LOOP.
969 slpeel_can_duplicate_loop_p (const struct loop *loop, const_edge e)
971 edge exit_e = single_exit (loop);
972 edge entry_e = loop_preheader_edge (loop);
973 gimple orig_cond = get_loop_exit_condition (loop);
974 gimple_stmt_iterator loop_exit_gsi = gsi_last_bb (exit_e->src);
976 if (need_ssa_update_p ())
980 /* All loops have an outer scope; the only case loop->outer is NULL is for
981 the function itself. */
982 || !loop_outer (loop)
983 || loop->num_nodes != 2
984 || !empty_block_p (loop->latch)
985 || !single_exit (loop)
986 /* Verify that new loop exit condition can be trivially modified. */
987 || (!orig_cond || orig_cond != gsi_stmt (loop_exit_gsi))
988 || (e != exit_e && e != entry_e))
994 #ifdef ENABLE_CHECKING
996 slpeel_verify_cfg_after_peeling (struct loop *first_loop,
997 struct loop *second_loop)
999 basic_block loop1_exit_bb = single_exit (first_loop)->dest;
1000 basic_block loop2_entry_bb = loop_preheader_edge (second_loop)->src;
1001 basic_block loop1_entry_bb = loop_preheader_edge (first_loop)->src;
1003 /* A guard that controls whether the second_loop is to be executed or skipped
1004 is placed in first_loop->exit. first_loop->exit therefore has two
1005 successors - one is the preheader of second_loop, and the other is a bb
1008 gcc_assert (EDGE_COUNT (loop1_exit_bb->succs) == 2);
1010 /* 1. Verify that one of the successors of first_loop->exit is the preheader
1013 /* The preheader of new_loop is expected to have two predecessors:
1014 first_loop->exit and the block that precedes first_loop. */
1016 gcc_assert (EDGE_COUNT (loop2_entry_bb->preds) == 2
1017 && ((EDGE_PRED (loop2_entry_bb, 0)->src == loop1_exit_bb
1018 && EDGE_PRED (loop2_entry_bb, 1)->src == loop1_entry_bb)
1019 || (EDGE_PRED (loop2_entry_bb, 1)->src == loop1_exit_bb
1020 && EDGE_PRED (loop2_entry_bb, 0)->src == loop1_entry_bb)));
1022 /* Verify that the other successor of first_loop->exit is after the
1028 /* If the run time cost model check determines that vectorization is
1029 not profitable and hence scalar loop should be generated then set
1030 FIRST_NITERS to prologue peeled iterations. This will allow all the
1031 iterations to be executed in the prologue peeled scalar loop. */
1034 set_prologue_iterations (basic_block bb_before_first_loop,
1040 basic_block cond_bb, then_bb;
1041 tree var, prologue_after_cost_adjust_name;
1042 gimple_stmt_iterator gsi;
1044 edge e_true, e_false, e_fallthru;
1046 gimple_seq gimplify_stmt_list = NULL, stmts = NULL;
1047 tree cost_pre_condition = NULL_TREE;
1048 tree scalar_loop_iters =
1049 unshare_expr (LOOP_VINFO_NITERS_UNCHANGED (loop_vec_info_for_loop (loop)));
1051 e = single_pred_edge (bb_before_first_loop);
1052 cond_bb = split_edge(e);
1054 e = single_pred_edge (bb_before_first_loop);
1055 then_bb = split_edge(e);
1056 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
1058 e_false = make_single_succ_edge (cond_bb, bb_before_first_loop,
1060 set_immediate_dominator (CDI_DOMINATORS, bb_before_first_loop, cond_bb);
1062 e_true = EDGE_PRED (then_bb, 0);
1063 e_true->flags &= ~EDGE_FALLTHRU;
1064 e_true->flags |= EDGE_TRUE_VALUE;
1066 e_fallthru = EDGE_SUCC (then_bb, 0);
1068 cost_pre_condition =
1069 build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
1070 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
1071 cost_pre_condition =
1072 force_gimple_operand (cost_pre_condition, &gimplify_stmt_list,
1074 cond_stmt = gimple_build_cond (NE_EXPR, cost_pre_condition,
1075 integer_zero_node, NULL_TREE, NULL_TREE);
1077 gsi = gsi_last_bb (cond_bb);
1078 if (gimplify_stmt_list)
1079 gsi_insert_seq_after (&gsi, gimplify_stmt_list, GSI_NEW_STMT);
1081 gsi = gsi_last_bb (cond_bb);
1082 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
1084 var = create_tmp_var (TREE_TYPE (scalar_loop_iters),
1085 "prologue_after_cost_adjust");
1086 add_referenced_var (var);
1087 prologue_after_cost_adjust_name =
1088 force_gimple_operand (scalar_loop_iters, &stmts, false, var);
1090 gsi = gsi_last_bb (then_bb);
1092 gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
1094 newphi = create_phi_node (var, bb_before_first_loop);
1095 add_phi_arg (newphi, prologue_after_cost_adjust_name, e_fallthru);
1096 add_phi_arg (newphi, first_niters, e_false);
1098 first_niters = PHI_RESULT (newphi);
1102 /* Function slpeel_tree_peel_loop_to_edge.
1104 Peel the first (last) iterations of LOOP into a new prolog (epilog) loop
1105 that is placed on the entry (exit) edge E of LOOP. After this transformation
1106 we have two loops one after the other - first-loop iterates FIRST_NITERS
1107 times, and second-loop iterates the remainder NITERS - FIRST_NITERS times.
1108 If the cost model indicates that it is profitable to emit a scalar
1109 loop instead of the vector one, then the prolog (epilog) loop will iterate
1110 for the entire unchanged scalar iterations of the loop.
1113 - LOOP: the loop to be peeled.
1114 - E: the exit or entry edge of LOOP.
1115 If it is the entry edge, we peel the first iterations of LOOP. In this
1116 case first-loop is LOOP, and second-loop is the newly created loop.
1117 If it is the exit edge, we peel the last iterations of LOOP. In this
1118 case, first-loop is the newly created loop, and second-loop is LOOP.
1119 - NITERS: the number of iterations that LOOP iterates.
1120 - FIRST_NITERS: the number of iterations that the first-loop should iterate.
1121 - UPDATE_FIRST_LOOP_COUNT: specified whether this function is responsible
1122 for updating the loop bound of the first-loop to FIRST_NITERS. If it
1123 is false, the caller of this function may want to take care of this
1124 (this can be useful if we don't want new stmts added to first-loop).
1125 - TH: cost model profitability threshold of iterations for vectorization.
1126 - CHECK_PROFITABILITY: specify whether cost model check has not occurred
1127 during versioning and hence needs to occur during
1128 prologue generation or whether cost model check
1129 has not occurred during prologue generation and hence
1130 needs to occur during epilogue generation.
1134 The function returns a pointer to the new loop-copy, or NULL if it failed
1135 to perform the transformation.
1137 The function generates two if-then-else guards: one before the first loop,
1138 and the other before the second loop:
1140 if (FIRST_NITERS == 0) then skip the first loop,
1141 and go directly to the second loop.
1142 The second guard is:
1143 if (FIRST_NITERS == NITERS) then skip the second loop.
1145 FORNOW only simple loops are supported (see slpeel_can_duplicate_loop_p).
1146 FORNOW the resulting code will not be in loop-closed-ssa form.
1150 slpeel_tree_peel_loop_to_edge (struct loop *loop,
1151 edge e, tree first_niters,
1152 tree niters, bool update_first_loop_count,
1153 unsigned int th, bool check_profitability)
1155 struct loop *new_loop = NULL, *first_loop, *second_loop;
1157 tree pre_condition = NULL_TREE;
1159 basic_block bb_before_second_loop, bb_after_second_loop;
1160 basic_block bb_before_first_loop;
1161 basic_block bb_between_loops;
1162 basic_block new_exit_bb;
1163 edge exit_e = single_exit (loop);
1165 tree cost_pre_condition = NULL_TREE;
1167 if (!slpeel_can_duplicate_loop_p (loop, e))
1170 /* We have to initialize cfg_hooks. Then, when calling
1171 cfg_hooks->split_edge, the function tree_split_edge
1172 is actually called and, when calling cfg_hooks->duplicate_block,
1173 the function tree_duplicate_bb is called. */
1174 gimple_register_cfg_hooks ();
1177 /* 1. Generate a copy of LOOP and put it on E (E is the entry/exit of LOOP).
1178 Resulting CFG would be:
1191 if (!(new_loop = slpeel_tree_duplicate_loop_to_edge_cfg (loop, e)))
1193 loop_loc = find_loop_location (loop);
1194 if (dump_file && (dump_flags & TDF_DETAILS))
1196 if (loop_loc != UNKNOWN_LOC)
1197 fprintf (dump_file, "\n%s:%d: note: ",
1198 LOC_FILE (loop_loc), LOC_LINE (loop_loc));
1199 fprintf (dump_file, "tree_duplicate_loop_to_edge_cfg failed.\n");
1206 /* NEW_LOOP was placed after LOOP. */
1208 second_loop = new_loop;
1212 /* NEW_LOOP was placed before LOOP. */
1213 first_loop = new_loop;
1217 definitions = ssa_names_to_replace ();
1218 slpeel_update_phis_for_duplicate_loop (loop, new_loop, e == exit_e);
1219 rename_variables_in_loop (new_loop);
1222 /* 2. Add the guard code in one of the following ways:
1224 2.a Add the guard that controls whether the first loop is executed.
1225 This occurs when this function is invoked for prologue or epilogue
1226 generation and when the cost model check can be done at compile time.
1228 Resulting CFG would be:
1230 bb_before_first_loop:
1231 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1238 bb_before_second_loop:
1246 2.b Add the cost model check that allows the prologue
1247 to iterate for the entire unchanged scalar
1248 iterations of the loop in the event that the cost
1249 model indicates that the scalar loop is more
1250 profitable than the vector one. This occurs when
1251 this function is invoked for prologue generation
1252 and the cost model check needs to be done at run
1255 Resulting CFG after prologue peeling would be:
1257 if (scalar_loop_iterations <= th)
1258 FIRST_NITERS = scalar_loop_iterations
1260 bb_before_first_loop:
1261 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1268 bb_before_second_loop:
1276 2.c Add the cost model check that allows the epilogue
1277 to iterate for the entire unchanged scalar
1278 iterations of the loop in the event that the cost
1279 model indicates that the scalar loop is more
1280 profitable than the vector one. This occurs when
1281 this function is invoked for epilogue generation
1282 and the cost model check needs to be done at run
1285 Resulting CFG after prologue peeling would be:
1287 bb_before_first_loop:
1288 if ((scalar_loop_iterations <= th)
1290 FIRST_NITERS == 0) GOTO bb_before_second_loop
1297 bb_before_second_loop:
1306 bb_before_first_loop = split_edge (loop_preheader_edge (first_loop));
1307 bb_before_second_loop = split_edge (single_exit (first_loop));
1309 /* Epilogue peeling. */
1310 if (!update_first_loop_count)
1313 fold_build2 (LE_EXPR, boolean_type_node, first_niters,
1314 build_int_cst (TREE_TYPE (first_niters), 0));
1315 if (check_profitability)
1317 tree scalar_loop_iters
1318 = unshare_expr (LOOP_VINFO_NITERS_UNCHANGED
1319 (loop_vec_info_for_loop (loop)));
1320 cost_pre_condition =
1321 build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
1322 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
1324 pre_condition = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1325 cost_pre_condition, pre_condition);
1329 /* Prologue peeling. */
1332 if (check_profitability)
1333 set_prologue_iterations (bb_before_first_loop, first_niters,
1337 fold_build2 (LE_EXPR, boolean_type_node, first_niters,
1338 build_int_cst (TREE_TYPE (first_niters), 0));
1341 skip_e = slpeel_add_loop_guard (bb_before_first_loop, pre_condition,
1342 bb_before_second_loop, bb_before_first_loop);
1343 slpeel_update_phi_nodes_for_guard1 (skip_e, first_loop,
1344 first_loop == new_loop,
1345 &new_exit_bb, &definitions);
1348 /* 3. Add the guard that controls whether the second loop is executed.
1349 Resulting CFG would be:
1351 bb_before_first_loop:
1352 if (FIRST_NITERS == 0) GOTO bb_before_second_loop (skip first loop)
1360 if (FIRST_NITERS == NITERS) GOTO bb_after_second_loop (skip second loop)
1361 GOTO bb_before_second_loop
1363 bb_before_second_loop:
1369 bb_after_second_loop:
1374 bb_between_loops = new_exit_bb;
1375 bb_after_second_loop = split_edge (single_exit (second_loop));
1378 fold_build2 (EQ_EXPR, boolean_type_node, first_niters, niters);
1379 skip_e = slpeel_add_loop_guard (bb_between_loops, pre_condition,
1380 bb_after_second_loop, bb_before_first_loop);
1381 slpeel_update_phi_nodes_for_guard2 (skip_e, second_loop,
1382 second_loop == new_loop, &new_exit_bb);
1384 /* 4. Make first-loop iterate FIRST_NITERS times, if requested.
1386 if (update_first_loop_count)
1387 slpeel_make_loop_iterate_ntimes (first_loop, first_niters);
1389 BITMAP_FREE (definitions);
1390 delete_update_ssa ();
1395 /* Function vect_get_loop_location.
1397 Extract the location of the loop in the source code.
1398 If the loop is not well formed for vectorization, an estimated
1399 location is calculated.
1400 Return the loop location if succeed and NULL if not. */
1403 find_loop_location (struct loop *loop)
1407 gimple_stmt_iterator si;
1412 stmt = get_loop_exit_condition (loop);
1414 if (stmt && gimple_location (stmt) != UNKNOWN_LOC)
1415 return gimple_location (stmt);
1417 /* If we got here the loop is probably not "well formed",
1418 try to estimate the loop location */
1425 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1427 stmt = gsi_stmt (si);
1428 if (gimple_location (stmt) != UNKNOWN_LOC)
1429 return gimple_location (stmt);
1436 /*************************************************************************
1437 Vectorization Debug Information.
1438 *************************************************************************/
1440 /* Function vect_set_verbosity_level.
1442 Called from toplev.c upon detection of the
1443 -ftree-vectorizer-verbose=N option. */
1446 vect_set_verbosity_level (const char *val)
1451 if (vl < MAX_VERBOSITY_LEVEL)
1452 vect_verbosity_level = vl;
1454 vect_verbosity_level = MAX_VERBOSITY_LEVEL - 1;
1458 /* Function vect_set_dump_settings.
1460 Fix the verbosity level of the vectorizer if the
1461 requested level was not set explicitly using the flag
1462 -ftree-vectorizer-verbose=N.
1463 Decide where to print the debugging information (dump_file/stderr).
1464 If the user defined the verbosity level, but there is no dump file,
1465 print to stderr, otherwise print to the dump file. */
1468 vect_set_dump_settings (void)
1470 vect_dump = dump_file;
1472 /* Check if the verbosity level was defined by the user: */
1473 if (vect_verbosity_level != MAX_VERBOSITY_LEVEL)
1475 /* If there is no dump file, print to stderr. */
1481 /* User didn't specify verbosity level: */
1482 if (dump_file && (dump_flags & TDF_DETAILS))
1483 vect_verbosity_level = REPORT_DETAILS;
1484 else if (dump_file && (dump_flags & TDF_STATS))
1485 vect_verbosity_level = REPORT_UNVECTORIZED_LOOPS;
1487 vect_verbosity_level = REPORT_NONE;
1489 gcc_assert (dump_file || vect_verbosity_level == REPORT_NONE);
1493 /* Function debug_loop_details.
1495 For vectorization debug dumps. */
1498 vect_print_dump_info (enum verbosity_levels vl)
1500 if (vl > vect_verbosity_level)
1503 if (!current_function_decl || !vect_dump)
1506 if (vect_loop_location == UNKNOWN_LOC)
1507 fprintf (vect_dump, "\n%s:%d: note: ",
1508 DECL_SOURCE_FILE (current_function_decl),
1509 DECL_SOURCE_LINE (current_function_decl));
1511 fprintf (vect_dump, "\n%s:%d: note: ",
1512 LOC_FILE (vect_loop_location), LOC_LINE (vect_loop_location));
1518 /*************************************************************************
1519 Vectorization Utilities.
1520 *************************************************************************/
1522 /* Function new_stmt_vec_info.
1524 Create and initialize a new stmt_vec_info struct for STMT. */
1527 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo)
1530 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
1532 STMT_VINFO_TYPE (res) = undef_vec_info_type;
1533 STMT_VINFO_STMT (res) = stmt;
1534 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
1535 STMT_VINFO_RELEVANT (res) = 0;
1536 STMT_VINFO_LIVE_P (res) = false;
1537 STMT_VINFO_VECTYPE (res) = NULL;
1538 STMT_VINFO_VEC_STMT (res) = NULL;
1539 STMT_VINFO_IN_PATTERN_P (res) = false;
1540 STMT_VINFO_RELATED_STMT (res) = NULL;
1541 STMT_VINFO_DATA_REF (res) = NULL;
1543 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
1544 STMT_VINFO_DR_OFFSET (res) = NULL;
1545 STMT_VINFO_DR_INIT (res) = NULL;
1546 STMT_VINFO_DR_STEP (res) = NULL;
1547 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
1549 if (gimple_code (stmt) == GIMPLE_PHI
1550 && is_loop_header_bb_p (gimple_bb (stmt)))
1551 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
1553 STMT_VINFO_DEF_TYPE (res) = vect_loop_def;
1554 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
1555 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
1556 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
1557 STMT_SLP_TYPE (res) = 0;
1558 DR_GROUP_FIRST_DR (res) = NULL;
1559 DR_GROUP_NEXT_DR (res) = NULL;
1560 DR_GROUP_SIZE (res) = 0;
1561 DR_GROUP_STORE_COUNT (res) = 0;
1562 DR_GROUP_GAP (res) = 0;
1563 DR_GROUP_SAME_DR_STMT (res) = NULL;
1564 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
1569 /* Create a hash table for stmt_vec_info. */
1572 init_stmt_vec_info_vec (void)
1574 gcc_assert (!stmt_vec_info_vec);
1575 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
1578 /* Free hash table for stmt_vec_info. */
1581 free_stmt_vec_info_vec (void)
1583 gcc_assert (stmt_vec_info_vec);
1584 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
1587 /* Free stmt vectorization related info. */
1590 free_stmt_vec_info (gimple stmt)
1592 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1597 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
1598 set_vinfo_for_stmt (stmt, NULL);
1603 /* Function bb_in_loop_p
1605 Used as predicate for dfs order traversal of the loop bbs. */
1608 bb_in_loop_p (const_basic_block bb, const void *data)
1610 const struct loop *const loop = (const struct loop *)data;
1611 if (flow_bb_inside_loop_p (loop, bb))
1617 /* Function new_loop_vec_info.
1619 Create and initialize a new loop_vec_info struct for LOOP, as well as
1620 stmt_vec_info structs for all the stmts in LOOP. */
1623 new_loop_vec_info (struct loop *loop)
1627 gimple_stmt_iterator si;
1628 unsigned int i, nbbs;
1630 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
1631 LOOP_VINFO_LOOP (res) = loop;
1633 bbs = get_loop_body (loop);
1635 /* Create/Update stmt_info for all stmts in the loop. */
1636 for (i = 0; i < loop->num_nodes; i++)
1638 basic_block bb = bbs[i];
1640 /* BBs in a nested inner-loop will have been already processed (because
1641 we will have called vect_analyze_loop_form for any nested inner-loop).
1642 Therefore, for stmts in an inner-loop we just want to update the
1643 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
1644 loop_info of the outer-loop we are currently considering to vectorize
1645 (instead of the loop_info of the inner-loop).
1646 For stmts in other BBs we need to create a stmt_info from scratch. */
1647 if (bb->loop_father != loop)
1649 /* Inner-loop bb. */
1650 gcc_assert (loop->inner && bb->loop_father == loop->inner);
1651 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1653 gimple phi = gsi_stmt (si);
1654 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
1655 loop_vec_info inner_loop_vinfo =
1656 STMT_VINFO_LOOP_VINFO (stmt_info);
1657 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
1658 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
1660 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1662 gimple stmt = gsi_stmt (si);
1663 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1664 loop_vec_info inner_loop_vinfo =
1665 STMT_VINFO_LOOP_VINFO (stmt_info);
1666 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
1667 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
1672 /* bb in current nest. */
1673 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1675 gimple phi = gsi_stmt (si);
1676 gimple_set_uid (phi, 0);
1677 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res));
1680 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1682 gimple stmt = gsi_stmt (si);
1683 gimple_set_uid (stmt, 0);
1684 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res));
1689 /* CHECKME: We want to visit all BBs before their successors (except for
1690 latch blocks, for which this assertion wouldn't hold). In the simple
1691 case of the loop forms we allow, a dfs order of the BBs would the same
1692 as reversed postorder traversal, so we are safe. */
1695 bbs = XCNEWVEC (basic_block, loop->num_nodes);
1696 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
1697 bbs, loop->num_nodes, loop);
1698 gcc_assert (nbbs == loop->num_nodes);
1700 LOOP_VINFO_BBS (res) = bbs;
1701 LOOP_VINFO_NITERS (res) = NULL;
1702 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
1703 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
1704 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
1705 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
1706 LOOP_VINFO_VECT_FACTOR (res) = 0;
1707 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
1708 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
1709 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
1710 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
1711 VEC_alloc (gimple, heap,
1712 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
1713 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
1714 VEC_alloc (ddr_p, heap,
1715 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
1716 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
1717 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
1718 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
1724 /* Function destroy_loop_vec_info.
1726 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
1727 stmts in the loop. */
1730 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
1735 gimple_stmt_iterator si;
1737 VEC (slp_instance, heap) *slp_instances;
1738 slp_instance instance;
1743 loop = LOOP_VINFO_LOOP (loop_vinfo);
1745 bbs = LOOP_VINFO_BBS (loop_vinfo);
1746 nbbs = loop->num_nodes;
1750 free (LOOP_VINFO_BBS (loop_vinfo));
1751 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
1752 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
1753 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
1760 for (j = 0; j < nbbs; j++)
1762 basic_block bb = bbs[j];
1764 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1765 free_stmt_vec_info (gsi_stmt (si));
1767 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
1769 gimple stmt = gsi_stmt (si);
1770 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1774 /* Check if this is a "pattern stmt" (introduced by the
1775 vectorizer during the pattern recognition pass). */
1776 bool remove_stmt_p = false;
1777 gimple orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
1780 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
1782 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
1783 remove_stmt_p = true;
1786 /* Free stmt_vec_info. */
1787 free_stmt_vec_info (stmt);
1789 /* Remove dead "pattern stmts". */
1791 gsi_remove (&si, true);
1797 free (LOOP_VINFO_BBS (loop_vinfo));
1798 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
1799 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
1800 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
1801 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
1802 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1803 for (j = 0; VEC_iterate (slp_instance, slp_instances, j, instance); j++)
1804 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
1805 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
1806 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
1813 /* Function vect_force_dr_alignment_p.
1815 Returns whether the alignment of a DECL can be forced to be aligned
1816 on ALIGNMENT bit boundary. */
1819 vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
1821 if (TREE_CODE (decl) != VAR_DECL)
1824 if (DECL_EXTERNAL (decl))
1827 if (TREE_ASM_WRITTEN (decl))
1830 if (TREE_STATIC (decl))
1831 return (alignment <= MAX_OFILE_ALIGNMENT);
1833 return (alignment <= MAX_STACK_ALIGNMENT);
1837 /* Function get_vectype_for_scalar_type.
1839 Returns the vector type corresponding to SCALAR_TYPE as supported
1843 get_vectype_for_scalar_type (tree scalar_type)
1845 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
1846 int nbytes = GET_MODE_SIZE (inner_mode);
1850 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
1853 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
1855 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
1857 vectype = build_vector_type (scalar_type, nunits);
1858 if (vect_print_dump_info (REPORT_DETAILS))
1860 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
1861 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
1867 if (vect_print_dump_info (REPORT_DETAILS))
1869 fprintf (vect_dump, "vectype: ");
1870 print_generic_expr (vect_dump, vectype, TDF_SLIM);
1873 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1874 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
1876 if (vect_print_dump_info (REPORT_DETAILS))
1877 fprintf (vect_dump, "mode not supported by target.");
1885 /* Function vect_supportable_dr_alignment
1887 Return whether the data reference DR is supported with respect to its
1890 enum dr_alignment_support
1891 vect_supportable_dr_alignment (struct data_reference *dr)
1893 gimple stmt = DR_STMT (dr);
1894 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1895 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1896 enum machine_mode mode = (int) TYPE_MODE (vectype);
1897 struct loop *vect_loop = LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info));
1898 bool nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
1899 bool invariant_in_outerloop = false;
1901 if (aligned_access_p (dr))
1904 if (nested_in_vect_loop)
1906 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
1907 invariant_in_outerloop =
1908 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
1911 /* Possibly unaligned access. */
1913 /* We can choose between using the implicit realignment scheme (generating
1914 a misaligned_move stmt) and the explicit realignment scheme (generating
1915 aligned loads with a REALIGN_LOAD). There are two variants to the explicit
1916 realignment scheme: optimized, and unoptimized.
1917 We can optimize the realignment only if the step between consecutive
1918 vector loads is equal to the vector size. Since the vector memory
1919 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
1920 is guaranteed that the misalignment amount remains the same throughout the
1921 execution of the vectorized loop. Therefore, we can create the
1922 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
1923 at the loop preheader.
1925 However, in the case of outer-loop vectorization, when vectorizing a
1926 memory access in the inner-loop nested within the LOOP that is now being
1927 vectorized, while it is guaranteed that the misalignment of the
1928 vectorized memory access will remain the same in different outer-loop
1929 iterations, it is *not* guaranteed that is will remain the same throughout
1930 the execution of the inner-loop. This is because the inner-loop advances
1931 with the original scalar step (and not in steps of VS). If the inner-loop
1932 step happens to be a multiple of VS, then the misalignment remains fixed
1933 and we can use the optimized realignment scheme. For example:
1939 When vectorizing the i-loop in the above example, the step between
1940 consecutive vector loads is 1, and so the misalignment does not remain
1941 fixed across the execution of the inner-loop, and the realignment cannot
1942 be optimized (as illustrated in the following pseudo vectorized loop):
1944 for (i=0; i<N; i+=4)
1945 for (j=0; j<M; j++){
1946 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
1947 // when j is {0,1,2,3,4,5,6,7,...} respectively.
1948 // (assuming that we start from an aligned address).
1951 We therefore have to use the unoptimized realignment scheme:
1953 for (i=0; i<N; i+=4)
1954 for (j=k; j<M; j+=4)
1955 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
1956 // that the misalignment of the initial address is
1959 The loop can then be vectorized as follows:
1961 for (k=0; k<4; k++){
1962 rt = get_realignment_token (&vp[k]);
1963 for (i=0; i<N; i+=4){
1965 for (j=k; j<M; j+=4){
1967 va = REALIGN_LOAD <v1,v2,rt>;
1974 if (DR_IS_READ (dr))
1976 if (optab_handler (vec_realign_load_optab, mode)->insn_code !=
1978 && (!targetm.vectorize.builtin_mask_for_load
1979 || targetm.vectorize.builtin_mask_for_load ()))
1981 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1982 if (nested_in_vect_loop
1983 && (TREE_INT_CST_LOW (DR_STEP (dr))
1984 != GET_MODE_SIZE (TYPE_MODE (vectype))))
1985 return dr_explicit_realign;
1987 return dr_explicit_realign_optimized;
1990 if (optab_handler (movmisalign_optab, mode)->insn_code !=
1992 /* Can't software pipeline the loads, but can at least do them. */
1993 return dr_unaligned_supported;
1997 return dr_unaligned_unsupported;
2001 /* Function vect_is_simple_use.
2004 LOOP - the loop that is being vectorized.
2005 OPERAND - operand of a stmt in LOOP.
2006 DEF - the defining stmt in case OPERAND is an SSA_NAME.
2008 Returns whether a stmt with OPERAND can be vectorized.
2009 Supportable operands are constants, loop invariants, and operands that are
2010 defined by the current iteration of the loop. Unsupportable operands are
2011 those that are defined by a previous iteration of the loop (as is the case
2012 in reduction/induction computations). */
2015 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo, gimple *def_stmt,
2016 tree *def, enum vect_def_type *dt)
2019 stmt_vec_info stmt_vinfo;
2020 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2025 if (vect_print_dump_info (REPORT_DETAILS))
2027 fprintf (vect_dump, "vect_is_simple_use: operand ");
2028 print_generic_expr (vect_dump, operand, TDF_SLIM);
2031 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
2033 *dt = vect_constant_def;
2036 if (is_gimple_min_invariant (operand))
2039 *dt = vect_invariant_def;
2043 if (TREE_CODE (operand) == PAREN_EXPR)
2045 if (vect_print_dump_info (REPORT_DETAILS))
2046 fprintf (vect_dump, "non-associatable copy.");
2047 operand = TREE_OPERAND (operand, 0);
2049 if (TREE_CODE (operand) != SSA_NAME)
2051 if (vect_print_dump_info (REPORT_DETAILS))
2052 fprintf (vect_dump, "not ssa-name.");
2056 *def_stmt = SSA_NAME_DEF_STMT (operand);
2057 if (*def_stmt == NULL)
2059 if (vect_print_dump_info (REPORT_DETAILS))
2060 fprintf (vect_dump, "no def_stmt.");
2064 if (vect_print_dump_info (REPORT_DETAILS))
2066 fprintf (vect_dump, "def_stmt: ");
2067 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
2070 /* empty stmt is expected only in case of a function argument.
2071 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
2072 if (gimple_nop_p (*def_stmt))
2075 *dt = vect_invariant_def;
2079 bb = gimple_bb (*def_stmt);
2080 if (!flow_bb_inside_loop_p (loop, bb))
2081 *dt = vect_invariant_def;
2084 stmt_vinfo = vinfo_for_stmt (*def_stmt);
2085 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
2088 if (*dt == vect_unknown_def_type)
2090 if (vect_print_dump_info (REPORT_DETAILS))
2091 fprintf (vect_dump, "Unsupported pattern.");
2095 if (vect_print_dump_info (REPORT_DETAILS))
2096 fprintf (vect_dump, "type of def: %d.",*dt);
2098 switch (gimple_code (*def_stmt))
2101 *def = gimple_phi_result (*def_stmt);
2105 *def = gimple_assign_lhs (*def_stmt);
2109 *def = gimple_call_lhs (*def_stmt);
2114 if (vect_print_dump_info (REPORT_DETAILS))
2115 fprintf (vect_dump, "unsupported defining stmt: ");
2123 /* Function supportable_widening_operation
2125 Check whether an operation represented by the code CODE is a
2126 widening operation that is supported by the target platform in
2127 vector form (i.e., when operating on arguments of type VECTYPE).
2129 Widening operations we currently support are NOP (CONVERT), FLOAT
2130 and WIDEN_MULT. This function checks if these operations are supported
2131 by the target platform either directly (via vector tree-codes), or via
2135 - CODE1 and CODE2 are codes of vector operations to be used when
2136 vectorizing the operation, if available.
2137 - DECL1 and DECL2 are decls of target builtin functions to be used
2138 when vectorizing the operation, if available. In this case,
2139 CODE1 and CODE2 are CALL_EXPR. */
2142 supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype,
2143 tree *decl1, tree *decl2,
2144 enum tree_code *code1, enum tree_code *code2)
2146 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2147 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
2148 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2150 enum machine_mode vec_mode;
2151 enum insn_code icode1, icode2;
2152 optab optab1, optab2;
2153 tree type = gimple_expr_type (stmt);
2154 tree wide_vectype = get_vectype_for_scalar_type (type);
2155 enum tree_code c1, c2;
2157 /* The result of a vectorized widening operation usually requires two vectors
2158 (because the widened results do not fit int one vector). The generated
2159 vector results would normally be expected to be generated in the same
2160 order as in the original scalar computation, i.e. if 8 results are
2161 generated in each vector iteration, they are to be organized as follows:
2162 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
2164 However, in the special case that the result of the widening operation is
2165 used in a reduction computation only, the order doesn't matter (because
2166 when vectorizing a reduction we change the order of the computation).
2167 Some targets can take advantage of this and generate more efficient code.
2168 For example, targets like Altivec, that support widen_mult using a sequence
2169 of {mult_even,mult_odd} generate the following vectors:
2170 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
2172 When vectorizing outer-loops, we execute the inner-loop sequentially
2173 (each vectorized inner-loop iteration contributes to VF outer-loop
2174 iterations in parallel). We therefore don't allow to change the order
2175 of the computation in the inner-loop during outer-loop vectorization. */
2177 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
2178 && !nested_in_vect_loop_p (vect_loop, stmt))
2184 && code == WIDEN_MULT_EXPR
2185 && targetm.vectorize.builtin_mul_widen_even
2186 && targetm.vectorize.builtin_mul_widen_even (vectype)
2187 && targetm.vectorize.builtin_mul_widen_odd
2188 && targetm.vectorize.builtin_mul_widen_odd (vectype))
2190 if (vect_print_dump_info (REPORT_DETAILS))
2191 fprintf (vect_dump, "Unordered widening operation detected.");
2193 *code1 = *code2 = CALL_EXPR;
2194 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
2195 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
2201 case WIDEN_MULT_EXPR:
2202 if (BYTES_BIG_ENDIAN)
2204 c1 = VEC_WIDEN_MULT_HI_EXPR;
2205 c2 = VEC_WIDEN_MULT_LO_EXPR;
2209 c2 = VEC_WIDEN_MULT_HI_EXPR;
2210 c1 = VEC_WIDEN_MULT_LO_EXPR;
2215 if (BYTES_BIG_ENDIAN)
2217 c1 = VEC_UNPACK_HI_EXPR;
2218 c2 = VEC_UNPACK_LO_EXPR;
2222 c2 = VEC_UNPACK_HI_EXPR;
2223 c1 = VEC_UNPACK_LO_EXPR;
2228 if (BYTES_BIG_ENDIAN)
2230 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
2231 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
2235 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
2236 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
2240 case FIX_TRUNC_EXPR:
2241 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
2242 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
2243 computing the operation. */
2250 if (code == FIX_TRUNC_EXPR)
2252 /* The signedness is determined from output operand. */
2253 optab1 = optab_for_tree_code (c1, type, optab_default);
2254 optab2 = optab_for_tree_code (c2, type, optab_default);
2258 optab1 = optab_for_tree_code (c1, vectype, optab_default);
2259 optab2 = optab_for_tree_code (c2, vectype, optab_default);
2262 if (!optab1 || !optab2)
2265 vec_mode = TYPE_MODE (vectype);
2266 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
2267 || insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
2268 || (icode2 = optab_handler (optab2, vec_mode)->insn_code)
2270 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
2279 /* Function supportable_narrowing_operation
2281 Check whether an operation represented by the code CODE is a
2282 narrowing operation that is supported by the target platform in
2283 vector form (i.e., when operating on arguments of type VECTYPE).
2285 Narrowing operations we currently support are NOP (CONVERT) and
2286 FIX_TRUNC. This function checks if these operations are supported by
2287 the target platform directly via vector tree-codes.
2290 - CODE1 is the code of a vector operation to be used when
2291 vectorizing the operation, if available. */
2294 supportable_narrowing_operation (enum tree_code code,
2295 const_gimple stmt, const_tree vectype,
2296 enum tree_code *code1)
2298 enum machine_mode vec_mode;
2299 enum insn_code icode1;
2301 tree type = gimple_expr_type (stmt);
2302 tree narrow_vectype = get_vectype_for_scalar_type (type);
2308 c1 = VEC_PACK_TRUNC_EXPR;
2311 case FIX_TRUNC_EXPR:
2312 c1 = VEC_PACK_FIX_TRUNC_EXPR;
2316 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
2317 tree code and optabs used for computing the operation. */
2324 if (code == FIX_TRUNC_EXPR)
2325 /* The signedness is determined from output operand. */
2326 optab1 = optab_for_tree_code (c1, type, optab_default);
2328 optab1 = optab_for_tree_code (c1, vectype, optab_default);
2333 vec_mode = TYPE_MODE (vectype);
2334 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
2335 || insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
2343 /* Function reduction_code_for_scalar_code
2346 CODE - tree_code of a reduction operations.
2349 REDUC_CODE - the corresponding tree-code to be used to reduce the
2350 vector of partial results into a single scalar result (which
2351 will also reside in a vector).
2353 Return TRUE if a corresponding REDUC_CODE was found, FALSE otherwise. */
2356 reduction_code_for_scalar_code (enum tree_code code,
2357 enum tree_code *reduc_code)
2362 *reduc_code = REDUC_MAX_EXPR;
2366 *reduc_code = REDUC_MIN_EXPR;
2370 *reduc_code = REDUC_PLUS_EXPR;
2378 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2379 STMT is printed with a message MSG. */
2382 report_vect_op (gimple stmt, const char *msg)
2384 fprintf (vect_dump, "%s", msg);
2385 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2388 /* Function vect_is_simple_reduction
2390 Detect a cross-iteration def-use cycle that represents a simple
2391 reduction computation. We look for the following pattern:
2396 a2 = operation (a3, a1)
2399 1. operation is commutative and associative and it is safe to
2400 change the order of the computation.
2401 2. no uses for a2 in the loop (a2 is used out of the loop)
2402 3. no uses of a1 in the loop besides the reduction operation.
2404 Condition 1 is tested here.
2405 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. */
2408 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi)
2410 struct loop *loop = (gimple_bb (phi))->loop_father;
2411 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2412 edge latch_e = loop_latch_edge (loop);
2413 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2414 gimple def_stmt, def1, def2;
2415 enum tree_code code;
2420 imm_use_iterator imm_iter;
2421 use_operand_p use_p;
2423 gcc_assert (loop == vect_loop || flow_loop_nested_p (vect_loop, loop));
2425 name = PHI_RESULT (phi);
2427 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2429 gimple use_stmt = USE_STMT (use_p);
2430 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2431 && vinfo_for_stmt (use_stmt)
2432 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2436 if (vect_print_dump_info (REPORT_DETAILS))
2437 fprintf (vect_dump, "reduction used in loop.");
2442 if (TREE_CODE (loop_arg) != SSA_NAME)
2444 if (vect_print_dump_info (REPORT_DETAILS))
2446 fprintf (vect_dump, "reduction: not ssa_name: ");
2447 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
2452 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
2455 if (vect_print_dump_info (REPORT_DETAILS))
2456 fprintf (vect_dump, "reduction: no def_stmt.");
2460 if (!is_gimple_assign (def_stmt))
2462 if (vect_print_dump_info (REPORT_DETAILS))
2463 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
2467 name = gimple_assign_lhs (def_stmt);
2469 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2471 gimple use_stmt = USE_STMT (use_p);
2472 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2473 && vinfo_for_stmt (use_stmt)
2474 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2478 if (vect_print_dump_info (REPORT_DETAILS))
2479 fprintf (vect_dump, "reduction used in loop.");
2484 code = gimple_assign_rhs_code (def_stmt);
2486 if (!commutative_tree_code (code) || !associative_tree_code (code))
2488 if (vect_print_dump_info (REPORT_DETAILS))
2489 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
2493 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
2495 if (vect_print_dump_info (REPORT_DETAILS))
2496 report_vect_op (def_stmt, "reduction: not binary operation: ");
2500 op1 = gimple_assign_rhs1 (def_stmt);
2501 op2 = gimple_assign_rhs2 (def_stmt);
2502 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
2504 if (vect_print_dump_info (REPORT_DETAILS))
2505 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
2509 /* Check that it's ok to change the order of the computation. */
2510 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
2511 if (TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (TREE_TYPE (op1))
2512 || TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (TREE_TYPE (op2)))
2514 if (vect_print_dump_info (REPORT_DETAILS))
2516 fprintf (vect_dump, "reduction: multiple types: operation type: ");
2517 print_generic_expr (vect_dump, type, TDF_SLIM);
2518 fprintf (vect_dump, ", operands types: ");
2519 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
2520 fprintf (vect_dump, ",");
2521 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
2526 /* Generally, when vectorizing a reduction we change the order of the
2527 computation. This may change the behavior of the program in some
2528 cases, so we need to check that this is ok. One exception is when
2529 vectorizing an outer-loop: the inner-loop is executed sequentially,
2530 and therefore vectorizing reductions in the inner-loop during
2531 outer-loop vectorization is safe. */
2533 /* CHECKME: check for !flag_finite_math_only too? */
2534 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
2535 && !nested_in_vect_loop_p (vect_loop, def_stmt))
2537 /* Changing the order of operations changes the semantics. */
2538 if (vect_print_dump_info (REPORT_DETAILS))
2539 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
2542 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
2543 && !nested_in_vect_loop_p (vect_loop, def_stmt))
2545 /* Changing the order of operations changes the semantics. */
2546 if (vect_print_dump_info (REPORT_DETAILS))
2547 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
2550 else if (SAT_FIXED_POINT_TYPE_P (type))
2552 /* Changing the order of operations changes the semantics. */
2553 if (vect_print_dump_info (REPORT_DETAILS))
2554 report_vect_op (def_stmt,
2555 "reduction: unsafe fixed-point math optimization: ");
2559 /* reduction is safe. we're dealing with one of the following:
2560 1) integer arithmetic and no trapv
2561 2) floating point arithmetic, and special flags permit this optimization.
2563 def1 = SSA_NAME_DEF_STMT (op1);
2564 def2 = SSA_NAME_DEF_STMT (op2);
2565 if (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2))
2567 if (vect_print_dump_info (REPORT_DETAILS))
2568 report_vect_op (def_stmt, "reduction: no defs for operands: ");
2573 /* Check that one def is the reduction def, defined by PHI,
2574 the other def is either defined in the loop ("vect_loop_def"),
2575 or it's an induction (defined by a loop-header phi-node). */
2578 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
2579 && (is_gimple_assign (def1)
2580 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_induction_def
2581 || (gimple_code (def1) == GIMPLE_PHI
2582 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_loop_def
2583 && !is_loop_header_bb_p (gimple_bb (def1)))))
2585 if (vect_print_dump_info (REPORT_DETAILS))
2586 report_vect_op (def_stmt, "detected reduction:");
2589 else if (def1 == phi
2590 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
2591 && (is_gimple_assign (def2)
2592 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_induction_def
2593 || (gimple_code (def2) == GIMPLE_PHI
2594 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_loop_def
2595 && !is_loop_header_bb_p (gimple_bb (def2)))))
2597 /* Swap operands (just for simplicity - so that the rest of the code
2598 can assume that the reduction variable is always the last (second)
2600 if (vect_print_dump_info (REPORT_DETAILS))
2601 report_vect_op (def_stmt ,
2602 "detected reduction: need to swap operands:");
2603 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
2604 gimple_assign_rhs2_ptr (def_stmt));
2609 if (vect_print_dump_info (REPORT_DETAILS))
2610 report_vect_op (def_stmt, "reduction: unknown pattern.");
2616 /* Function vect_is_simple_iv_evolution.
2618 FORNOW: A simple evolution of an induction variables in the loop is
2619 considered a polynomial evolution with constant step. */
2622 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
2627 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
2629 /* When there is no evolution in this loop, the evolution function
2631 if (evolution_part == NULL_TREE)
2634 /* When the evolution is a polynomial of degree >= 2
2635 the evolution function is not "simple". */
2636 if (tree_is_chrec (evolution_part))
2639 step_expr = evolution_part;
2640 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
2642 if (vect_print_dump_info (REPORT_DETAILS))
2644 fprintf (vect_dump, "step: ");
2645 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
2646 fprintf (vect_dump, ", init: ");
2647 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
2653 if (TREE_CODE (step_expr) != INTEGER_CST)
2655 if (vect_print_dump_info (REPORT_DETAILS))
2656 fprintf (vect_dump, "step unknown.");
2664 /* Function vectorize_loops.
2666 Entry Point to loop vectorization phase. */
2669 vectorize_loops (void)
2672 unsigned int num_vectorized_loops = 0;
2673 unsigned int vect_loops_num;
2677 vect_loops_num = number_of_loops ();
2679 /* Bail out if there are no loops. */
2680 if (vect_loops_num <= 1)
2683 /* Fix the verbosity level if not defined explicitly by the user. */
2684 vect_set_dump_settings ();
2686 /* Allocate the bitmap that records which virtual variables that
2687 need to be renamed. */
2688 vect_memsyms_to_rename = BITMAP_ALLOC (NULL);
2690 init_stmt_vec_info_vec ();
2692 /* ----------- Analyze loops. ----------- */
2694 /* If some loop was duplicated, it gets bigger number
2695 than all previously defined loops. This fact allows us to run
2696 only over initial loops skipping newly generated ones. */
2697 FOR_EACH_LOOP (li, loop, 0)
2699 loop_vec_info loop_vinfo;
2701 vect_loop_location = find_loop_location (loop);
2702 loop_vinfo = vect_analyze_loop (loop);
2703 loop->aux = loop_vinfo;
2705 if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
2708 vect_transform_loop (loop_vinfo);
2709 num_vectorized_loops++;
2711 vect_loop_location = UNKNOWN_LOC;
2713 statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
2714 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)
2715 || (vect_print_dump_info (REPORT_VECTORIZED_LOOPS)
2716 && num_vectorized_loops > 0))
2717 fprintf (vect_dump, "vectorized %u loops in function.\n",
2718 num_vectorized_loops);
2720 /* ----------- Finalize. ----------- */
2722 BITMAP_FREE (vect_memsyms_to_rename);
2724 for (i = 1; i < vect_loops_num; i++)
2726 loop_vec_info loop_vinfo;
2728 loop = get_loop (i);
2731 loop_vinfo = (loop_vec_info) loop->aux;
2732 destroy_loop_vec_info (loop_vinfo, true);
2736 free_stmt_vec_info_vec ();
2738 return num_vectorized_loops > 0 ? TODO_cleanup_cfg : 0;
2741 /* Increase alignment of global arrays to improve vectorization potential.
2743 - Consider also structs that have an array field.
2744 - Use ipa analysis to prune arrays that can't be vectorized?
2745 This should involve global alignment analysis and in the future also
2749 increase_alignment (void)
2751 struct varpool_node *vnode;
2753 /* Increase the alignment of all global arrays for vectorization. */
2754 for (vnode = varpool_nodes_queue;
2756 vnode = vnode->next_needed)
2758 tree vectype, decl = vnode->decl;
2759 unsigned int alignment;
2761 if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE)
2763 vectype = get_vectype_for_scalar_type (TREE_TYPE (TREE_TYPE (decl)));
2766 alignment = TYPE_ALIGN (vectype);
2767 if (DECL_ALIGN (decl) >= alignment)
2770 if (vect_can_force_dr_alignment_p (decl, alignment))
2772 DECL_ALIGN (decl) = TYPE_ALIGN (vectype);
2773 DECL_USER_ALIGN (decl) = 1;
2776 fprintf (dump_file, "Increasing alignment of decl: ");
2777 print_generic_expr (dump_file, decl, TDF_SLIM);
2785 gate_increase_alignment (void)
2787 return flag_section_anchors && flag_tree_vectorize;
2790 struct simple_ipa_opt_pass pass_ipa_increase_alignment =
2794 "increase_alignment", /* name */
2795 gate_increase_alignment, /* gate */
2796 increase_alignment, /* execute */
2799 0, /* static_pass_number */
2801 0, /* properties_required */
2802 0, /* properties_provided */
2803 0, /* properties_destroyed */
2804 0, /* todo_flags_start */
2805 0 /* todo_flags_finish */