2 Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22 /* Loop Vectorization Pass.
24 This pass tries to vectorize loops. This first implementation focuses on
25 simple inner-most loops, with no conditional control flow, and a set of
26 simple operations which vector form can be expressed using existing
27 tree codes (PLUS, MULT etc).
29 For example, the vectorizer transforms the following simple loop:
31 short a[N]; short b[N]; short c[N]; int i;
37 as if it was manually vectorized by rewriting the source code into:
39 typedef int __attribute__((mode(V8HI))) v8hi;
40 short a[N]; short b[N]; short c[N]; int i;
41 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
44 for (i=0; i<N/8; i++){
51 The main entry to this pass is vectorize_loops(), in which
52 the vectorizer applies a set of analyses on a given set of loops,
53 followed by the actual vectorization transformation for the loops that
54 had successfully passed the analysis phase.
56 Throughout this pass we make a distinction between two types of
57 data: scalars (which are represented by SSA_NAMES), and memory references
58 ("data-refs"). These two types of data require different handling both
59 during analysis and transformation. The types of data-refs that the
60 vectorizer currently supports are ARRAY_REFS which base is an array DECL
61 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
62 accesses are required to have a simple (consecutive) access pattern.
66 The driver for the analysis phase is vect_analyze_loop_nest().
67 It applies a set of analyses, some of which rely on the scalar evolution
68 analyzer (scev) developed by Sebastian Pop.
70 During the analysis phase the vectorizer records some information
71 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
72 loop, as well as general information about the loop as a whole, which is
73 recorded in a "loop_vec_info" struct attached to each loop.
77 The loop transformation phase scans all the stmts in the loop, and
78 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
79 the loop that needs to be vectorized. It insert the vector code sequence
80 just before the scalar stmt S, and records a pointer to the vector code
81 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
82 attached to S). This pointer will be used for the vectorization of following
83 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
84 otherwise, we rely on dead code elimination for removing it.
86 For example, say stmt S1 was vectorized into stmt VS1:
89 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
92 To vectorize stmt S2, the vectorizer first finds the stmt that defines
93 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
94 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
95 resulting sequence would be:
98 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
100 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
102 Operands that are not SSA_NAMEs, are data-refs that appear in
103 load/store operations (like 'x[i]' in S1), and are handled differently.
107 Currently the only target specific information that is used is the
108 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
109 support different sizes of vectors, for now will need to specify one value
110 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
112 Since we only vectorize operations which vector form can be
113 expressed using existing tree codes, to verify that an operation is
114 supported, the vectorizer checks the relevant optab at the relevant
115 machine_mode (e.g, add_optab->handlers[(int) V8HImode].insn_code). If
116 the value found is CODE_FOR_nothing, then there's no target support, and
117 we can't vectorize the stmt.
119 For additional information on this project see:
120 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
125 #include "coretypes.h"
131 #include "basic-block.h"
132 #include "diagnostic.h"
133 #include "tree-flow.h"
134 #include "tree-dump.h"
137 #include "cfglayout.h"
143 #include "tree-chrec.h"
144 #include "tree-data-ref.h"
145 #include "tree-scalar-evolution.h"
147 #include "tree-vectorizer.h"
148 #include "tree-pass.h"
150 /*************************************************************************
151 Simple Loop Peeling Utilities
152 *************************************************************************/
153 static struct loop *slpeel_tree_duplicate_loop_to_edge_cfg
154 (struct loop *, struct loops *, edge);
155 static void slpeel_update_phis_for_duplicate_loop
156 (struct loop *, struct loop *, bool after);
157 static void slpeel_update_phi_nodes_for_guard1
158 (edge, struct loop *, bool, basic_block *, bitmap *);
159 static void slpeel_update_phi_nodes_for_guard2
160 (edge, struct loop *, bool, basic_block *);
161 static edge slpeel_add_loop_guard (basic_block, tree, basic_block, basic_block);
163 static void rename_use_op (use_operand_p);
164 static void rename_variables_in_bb (basic_block);
165 static void rename_variables_in_loop (struct loop *);
167 /*************************************************************************
168 General Vectorization Utilities
169 *************************************************************************/
170 static void vect_set_dump_settings (void);
172 /* vect_dump will be set to stderr or dump_file if exist. */
175 /* vect_verbosity_level set to an invalid value
176 to mark that it's uninitialized. */
177 enum verbosity_levels vect_verbosity_level = MAX_VERBOSITY_LEVEL;
179 /* Number of loops, at the beginning of vectorization. */
180 unsigned int vect_loops_num;
183 static LOC vect_loop_location;
185 /* Bitmap of virtual variables to be renamed. */
186 bitmap vect_vnames_to_rename;
188 /*************************************************************************
189 Simple Loop Peeling Utilities
191 Utilities to support loop peeling for vectorization purposes.
192 *************************************************************************/
195 /* Renames the use *OP_P. */
198 rename_use_op (use_operand_p op_p)
202 if (TREE_CODE (USE_FROM_PTR (op_p)) != SSA_NAME)
205 new_name = get_current_def (USE_FROM_PTR (op_p));
207 /* Something defined outside of the loop. */
211 /* An ordinary ssa name defined in the loop. */
213 SET_USE (op_p, new_name);
217 /* Renames the variables in basic block BB. */
220 rename_variables_in_bb (basic_block bb)
223 block_stmt_iterator bsi;
229 struct loop *loop = bb->loop_father;
231 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
233 stmt = bsi_stmt (bsi);
234 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter,
235 (SSA_OP_ALL_USES | SSA_OP_ALL_KILLS))
236 rename_use_op (use_p);
239 FOR_EACH_EDGE (e, ei, bb->succs)
241 if (!flow_bb_inside_loop_p (loop, e->dest))
243 for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi))
244 rename_use_op (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e));
249 /* Renames variables in new generated LOOP. */
252 rename_variables_in_loop (struct loop *loop)
257 bbs = get_loop_body (loop);
259 for (i = 0; i < loop->num_nodes; i++)
260 rename_variables_in_bb (bbs[i]);
266 /* Update the PHI nodes of NEW_LOOP.
268 NEW_LOOP is a duplicate of ORIG_LOOP.
269 AFTER indicates whether NEW_LOOP executes before or after ORIG_LOOP:
270 AFTER is true if NEW_LOOP executes after ORIG_LOOP, and false if it
271 executes before it. */
274 slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
275 struct loop *new_loop, bool after)
278 tree phi_new, phi_orig;
280 edge orig_loop_latch = loop_latch_edge (orig_loop);
281 edge orig_entry_e = loop_preheader_edge (orig_loop);
282 edge new_loop_exit_e = single_exit (new_loop);
283 edge new_loop_entry_e = loop_preheader_edge (new_loop);
284 edge entry_arg_e = (after ? orig_loop_latch : orig_entry_e);
287 step 1. For each loop-header-phi:
288 Add the first phi argument for the phi in NEW_LOOP
289 (the one associated with the entry of NEW_LOOP)
291 step 2. For each loop-header-phi:
292 Add the second phi argument for the phi in NEW_LOOP
293 (the one associated with the latch of NEW_LOOP)
295 step 3. Update the phis in the successor block of NEW_LOOP.
297 case 1: NEW_LOOP was placed before ORIG_LOOP:
298 The successor block of NEW_LOOP is the header of ORIG_LOOP.
299 Updating the phis in the successor block can therefore be done
300 along with the scanning of the loop header phis, because the
301 header blocks of ORIG_LOOP and NEW_LOOP have exactly the same
302 phi nodes, organized in the same order.
304 case 2: NEW_LOOP was placed after ORIG_LOOP:
305 The successor block of NEW_LOOP is the original exit block of
306 ORIG_LOOP - the phis to be updated are the loop-closed-ssa phis.
307 We postpone updating these phis to a later stage (when
308 loop guards are added).
312 /* Scan the phis in the headers of the old and new loops
313 (they are organized in exactly the same order). */
315 for (phi_new = phi_nodes (new_loop->header),
316 phi_orig = phi_nodes (orig_loop->header);
318 phi_new = PHI_CHAIN (phi_new), phi_orig = PHI_CHAIN (phi_orig))
321 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, entry_arg_e);
322 add_phi_arg (phi_new, def, new_loop_entry_e);
325 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
326 if (TREE_CODE (def) != SSA_NAME)
329 new_ssa_name = get_current_def (def);
332 /* This only happens if there are no definitions
333 inside the loop. use the phi_result in this case. */
334 new_ssa_name = PHI_RESULT (phi_new);
337 /* An ordinary ssa name defined in the loop. */
338 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop));
340 /* step 3 (case 1). */
343 gcc_assert (new_loop_exit_e == orig_entry_e);
344 SET_PHI_ARG_DEF (phi_orig,
345 new_loop_exit_e->dest_idx,
352 /* Update PHI nodes for a guard of the LOOP.
355 - LOOP, GUARD_EDGE: LOOP is a loop for which we added guard code that
356 controls whether LOOP is to be executed. GUARD_EDGE is the edge that
357 originates from the guard-bb, skips LOOP and reaches the (unique) exit
358 bb of LOOP. This loop-exit-bb is an empty bb with one successor.
359 We denote this bb NEW_MERGE_BB because before the guard code was added
360 it had a single predecessor (the LOOP header), and now it became a merge
361 point of two paths - the path that ends with the LOOP exit-edge, and
362 the path that ends with GUARD_EDGE.
363 - NEW_EXIT_BB: New basic block that is added by this function between LOOP
364 and NEW_MERGE_BB. It is used to place loop-closed-ssa-form exit-phis.
366 ===> The CFG before the guard-code was added:
369 if (exit_loop) goto update_bb
370 else goto LOOP_header_bb
373 ==> The CFG after the guard-code was added:
375 if (LOOP_guard_condition) goto new_merge_bb
376 else goto LOOP_header_bb
379 if (exit_loop_condition) goto new_merge_bb
380 else goto LOOP_header_bb
385 ==> The CFG after this function:
387 if (LOOP_guard_condition) goto new_merge_bb
388 else goto LOOP_header_bb
391 if (exit_loop_condition) goto new_exit_bb
392 else goto LOOP_header_bb
399 1. creates and updates the relevant phi nodes to account for the new
400 incoming edge (GUARD_EDGE) into NEW_MERGE_BB. This involves:
401 1.1. Create phi nodes at NEW_MERGE_BB.
402 1.2. Update the phi nodes at the successor of NEW_MERGE_BB (denoted
403 UPDATE_BB). UPDATE_BB was the exit-bb of LOOP before NEW_MERGE_BB
404 2. preserves loop-closed-ssa-form by creating the required phi nodes
405 at the exit of LOOP (i.e, in NEW_EXIT_BB).
407 There are two flavors to this function:
409 slpeel_update_phi_nodes_for_guard1:
410 Here the guard controls whether we enter or skip LOOP, where LOOP is a
411 prolog_loop (loop1 below), and the new phis created in NEW_MERGE_BB are
412 for variables that have phis in the loop header.
414 slpeel_update_phi_nodes_for_guard2:
415 Here the guard controls whether we enter or skip LOOP, where LOOP is an
416 epilog_loop (loop2 below), and the new phis created in NEW_MERGE_BB are
417 for variables that have phis in the loop exit.
419 I.E., the overall structure is:
422 guard1 (goto loop1/merg1_bb)
425 guard2 (goto merge1_bb/merge2_bb)
432 slpeel_update_phi_nodes_for_guard1 takes care of creating phis in
433 loop1_exit_bb and merge1_bb. These are entry phis (phis for the vars
434 that have phis in loop1->header).
436 slpeel_update_phi_nodes_for_guard2 takes care of creating phis in
437 loop2_exit_bb and merge2_bb. These are exit phis (phis for the vars
438 that have phis in next_bb). It also adds some of these phis to
441 slpeel_update_phi_nodes_for_guard1 is always called before
442 slpeel_update_phi_nodes_for_guard2. They are both needed in order
443 to create correct data-flow and loop-closed-ssa-form.
445 Generally slpeel_update_phi_nodes_for_guard1 creates phis for variables
446 that change between iterations of a loop (and therefore have a phi-node
447 at the loop entry), whereas slpeel_update_phi_nodes_for_guard2 creates
448 phis for variables that are used out of the loop (and therefore have
449 loop-closed exit phis). Some variables may be both updated between
450 iterations and used after the loop. This is why in loop1_exit_bb we
451 may need both entry_phis (created by slpeel_update_phi_nodes_for_guard1)
452 and exit phis (created by slpeel_update_phi_nodes_for_guard2).
454 - IS_NEW_LOOP: if IS_NEW_LOOP is true, then LOOP is a newly created copy of
455 an original loop. i.e., we have:
458 guard_bb (goto LOOP/new_merge)
464 If IS_NEW_LOOP is false, then LOOP is an original loop, in which case we
468 guard_bb (goto LOOP/new_merge)
474 The SSA names defined in the original loop have a current
475 reaching definition that that records the corresponding new
476 ssa-name used in the new duplicated loop copy.
479 /* Function slpeel_update_phi_nodes_for_guard1
482 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
483 - DEFS - a bitmap of ssa names to mark new names for which we recorded
486 In the context of the overall structure, we have:
489 guard1 (goto loop1/merg1_bb)
492 guard2 (goto merge1_bb/merge2_bb)
499 For each name updated between loop iterations (i.e - for each name that has
500 an entry (loop-header) phi in LOOP) we create a new phi in:
501 1. merge1_bb (to account for the edge from guard1)
502 2. loop1_exit_bb (an exit-phi to keep LOOP in loop-closed form)
506 slpeel_update_phi_nodes_for_guard1 (edge guard_edge, struct loop *loop,
507 bool is_new_loop, basic_block *new_exit_bb,
510 tree orig_phi, new_phi;
511 tree update_phi, update_phi2;
512 tree guard_arg, loop_arg;
513 basic_block new_merge_bb = guard_edge->dest;
514 edge e = EDGE_SUCC (new_merge_bb, 0);
515 basic_block update_bb = e->dest;
516 basic_block orig_bb = loop->header;
518 tree current_new_name;
521 /* Create new bb between loop and new_merge_bb. */
522 *new_exit_bb = split_edge (single_exit (loop));
524 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
526 for (orig_phi = phi_nodes (orig_bb), update_phi = phi_nodes (update_bb);
527 orig_phi && update_phi;
528 orig_phi = PHI_CHAIN (orig_phi), update_phi = PHI_CHAIN (update_phi))
530 /* Virtual phi; Mark it for renaming. We actually want to call
531 mar_sym_for_renaming, but since all ssa renaming datastructures
532 are going to be freed before we get to call ssa_upate, we just
533 record this name for now in a bitmap, and will mark it for
535 name = PHI_RESULT (orig_phi);
536 if (!is_gimple_reg (SSA_NAME_VAR (name)))
537 bitmap_set_bit (vect_vnames_to_rename, SSA_NAME_VERSION (name));
539 /** 1. Handle new-merge-point phis **/
541 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
542 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
545 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
546 of LOOP. Set the two phi args in NEW_PHI for these edges: */
547 loop_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, EDGE_SUCC (loop->latch, 0));
548 guard_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, loop_preheader_edge (loop));
550 add_phi_arg (new_phi, loop_arg, new_exit_e);
551 add_phi_arg (new_phi, guard_arg, guard_edge);
553 /* 1.3. Update phi in successor block. */
554 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == loop_arg
555 || PHI_ARG_DEF_FROM_EDGE (update_phi, e) == guard_arg);
556 SET_PHI_ARG_DEF (update_phi, e->dest_idx, PHI_RESULT (new_phi));
557 update_phi2 = new_phi;
560 /** 2. Handle loop-closed-ssa-form phis **/
562 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
563 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
566 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
567 add_phi_arg (new_phi, loop_arg, single_exit (loop));
569 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
570 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
571 SET_PHI_ARG_DEF (update_phi2, new_exit_e->dest_idx, PHI_RESULT (new_phi));
573 /* 2.4. Record the newly created name with set_current_def.
574 We want to find a name such that
575 name = get_current_def (orig_loop_name)
576 and to set its current definition as follows:
577 set_current_def (name, new_phi_name)
579 If LOOP is a new loop then loop_arg is already the name we're
580 looking for. If LOOP is the original loop, then loop_arg is
581 the orig_loop_name and the relevant name is recorded in its
582 current reaching definition. */
584 current_new_name = loop_arg;
587 current_new_name = get_current_def (loop_arg);
588 /* current_def is not available only if the variable does not
589 change inside the loop, in which case we also don't care
590 about recording a current_def for it because we won't be
591 trying to create loop-exit-phis for it. */
592 if (!current_new_name)
595 gcc_assert (get_current_def (current_new_name) == NULL_TREE);
597 set_current_def (current_new_name, PHI_RESULT (new_phi));
598 bitmap_set_bit (*defs, SSA_NAME_VERSION (current_new_name));
601 set_phi_nodes (new_merge_bb, phi_reverse (phi_nodes (new_merge_bb)));
605 /* Function slpeel_update_phi_nodes_for_guard2
608 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
610 In the context of the overall structure, we have:
613 guard1 (goto loop1/merg1_bb)
616 guard2 (goto merge1_bb/merge2_bb)
623 For each name used out side the loop (i.e - for each name that has an exit
624 phi in next_bb) we create a new phi in:
625 1. merge2_bb (to account for the edge from guard_bb)
626 2. loop2_exit_bb (an exit-phi to keep LOOP in loop-closed form)
627 3. guard2 bb (an exit phi to keep the preceding loop in loop-closed form),
628 if needed (if it wasn't handled by slpeel_update_phis_nodes_for_phi1).
632 slpeel_update_phi_nodes_for_guard2 (edge guard_edge, struct loop *loop,
633 bool is_new_loop, basic_block *new_exit_bb)
635 tree orig_phi, new_phi;
636 tree update_phi, update_phi2;
637 tree guard_arg, loop_arg;
638 basic_block new_merge_bb = guard_edge->dest;
639 edge e = EDGE_SUCC (new_merge_bb, 0);
640 basic_block update_bb = e->dest;
642 tree orig_def, orig_def_new_name;
643 tree new_name, new_name2;
646 /* Create new bb between loop and new_merge_bb. */
647 *new_exit_bb = split_edge (single_exit (loop));
649 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
651 for (update_phi = phi_nodes (update_bb); update_phi;
652 update_phi = PHI_CHAIN (update_phi))
654 orig_phi = update_phi;
655 orig_def = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
656 /* This loop-closed-phi actually doesn't represent a use
657 out of the loop - the phi arg is a constant. */
658 if (TREE_CODE (orig_def) != SSA_NAME)
660 orig_def_new_name = get_current_def (orig_def);
663 /** 1. Handle new-merge-point phis **/
665 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
666 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
669 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
670 of LOOP. Set the two PHI args in NEW_PHI for these edges: */
672 new_name2 = NULL_TREE;
673 if (orig_def_new_name)
675 new_name = orig_def_new_name;
676 /* Some variables have both loop-entry-phis and loop-exit-phis.
677 Such variables were given yet newer names by phis placed in
678 guard_bb by slpeel_update_phi_nodes_for_guard1. I.e:
679 new_name2 = get_current_def (get_current_def (orig_name)). */
680 new_name2 = get_current_def (new_name);
685 guard_arg = orig_def;
690 guard_arg = new_name;
694 guard_arg = new_name2;
696 add_phi_arg (new_phi, loop_arg, new_exit_e);
697 add_phi_arg (new_phi, guard_arg, guard_edge);
699 /* 1.3. Update phi in successor block. */
700 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == orig_def);
701 SET_PHI_ARG_DEF (update_phi, e->dest_idx, PHI_RESULT (new_phi));
702 update_phi2 = new_phi;
705 /** 2. Handle loop-closed-ssa-form phis **/
707 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
708 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
711 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
712 add_phi_arg (new_phi, loop_arg, single_exit (loop));
714 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
715 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
716 SET_PHI_ARG_DEF (update_phi2, new_exit_e->dest_idx, PHI_RESULT (new_phi));
719 /** 3. Handle loop-closed-ssa-form phis for first loop **/
721 /* 3.1. Find the relevant names that need an exit-phi in
722 GUARD_BB, i.e. names for which
723 slpeel_update_phi_nodes_for_guard1 had not already created a
724 phi node. This is the case for names that are used outside
725 the loop (and therefore need an exit phi) but are not updated
726 across loop iterations (and therefore don't have a
729 slpeel_update_phi_nodes_for_guard1 is responsible for
730 creating loop-exit phis in GUARD_BB for names that have a
731 loop-header-phi. When such a phi is created we also record
732 the new name in its current definition. If this new name
733 exists, then guard_arg was set to this new name (see 1.2
734 above). Therefore, if guard_arg is not this new name, this
735 is an indication that an exit-phi in GUARD_BB was not yet
736 created, so we take care of it here. */
737 if (guard_arg == new_name2)
741 /* 3.2. Generate new phi node in GUARD_BB: */
742 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
745 /* 3.3. GUARD_BB has one incoming edge: */
746 gcc_assert (EDGE_COUNT (guard_edge->src->preds) == 1);
747 add_phi_arg (new_phi, arg, EDGE_PRED (guard_edge->src, 0));
749 /* 3.4. Update phi in successor of GUARD_BB: */
750 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, guard_edge)
752 SET_PHI_ARG_DEF (update_phi2, guard_edge->dest_idx, PHI_RESULT (new_phi));
755 set_phi_nodes (new_merge_bb, phi_reverse (phi_nodes (new_merge_bb)));
759 /* Make the LOOP iterate NITERS times. This is done by adding a new IV
760 that starts at zero, increases by one and its limit is NITERS.
762 Assumption: the exit-condition of LOOP is the last stmt in the loop. */
765 slpeel_make_loop_iterate_ntimes (struct loop *loop, tree niters)
767 tree indx_before_incr, indx_after_incr, cond_stmt, cond;
769 edge exit_edge = single_exit (loop);
770 block_stmt_iterator loop_cond_bsi;
771 block_stmt_iterator incr_bsi;
773 tree begin_label = tree_block_label (loop->latch);
774 tree exit_label = tree_block_label (single_exit (loop)->dest);
775 tree init = build_int_cst (TREE_TYPE (niters), 0);
776 tree step = build_int_cst (TREE_TYPE (niters), 1);
781 orig_cond = get_loop_exit_condition (loop);
782 gcc_assert (orig_cond);
783 loop_cond_bsi = bsi_for_stmt (orig_cond);
785 standard_iv_increment_position (loop, &incr_bsi, &insert_after);
786 create_iv (init, step, NULL_TREE, loop,
787 &incr_bsi, insert_after, &indx_before_incr, &indx_after_incr);
789 if (exit_edge->flags & EDGE_TRUE_VALUE) /* 'then' edge exits the loop. */
791 cond = build2 (GE_EXPR, boolean_type_node, indx_after_incr, niters);
792 then_label = build1 (GOTO_EXPR, void_type_node, exit_label);
793 else_label = build1 (GOTO_EXPR, void_type_node, begin_label);
795 else /* 'then' edge loops back. */
797 cond = build2 (LT_EXPR, boolean_type_node, indx_after_incr, niters);
798 then_label = build1 (GOTO_EXPR, void_type_node, begin_label);
799 else_label = build1 (GOTO_EXPR, void_type_node, exit_label);
802 cond_stmt = build3 (COND_EXPR, TREE_TYPE (orig_cond), cond,
803 then_label, else_label);
804 bsi_insert_before (&loop_cond_bsi, cond_stmt, BSI_SAME_STMT);
806 /* Remove old loop exit test: */
807 bsi_remove (&loop_cond_bsi, true);
809 loop_loc = find_loop_location (loop);
810 if (dump_file && (dump_flags & TDF_DETAILS))
812 if (loop_loc != UNKNOWN_LOC)
813 fprintf (dump_file, "\nloop at %s:%d: ",
814 LOC_FILE (loop_loc), LOC_LINE (loop_loc));
815 print_generic_expr (dump_file, cond_stmt, TDF_SLIM);
818 loop->nb_iterations = niters;
822 /* Given LOOP this function generates a new copy of it and puts it
823 on E which is either the entry or exit of LOOP. */
826 slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, struct loops *loops,
829 struct loop *new_loop;
830 basic_block *new_bbs, *bbs;
833 basic_block exit_dest;
837 at_exit = (e == single_exit (loop));
838 if (!at_exit && e != loop_preheader_edge (loop))
841 bbs = get_loop_body (loop);
843 /* Check whether duplication is possible. */
844 if (!can_copy_bbs_p (bbs, loop->num_nodes))
850 /* Generate new loop structure. */
851 new_loop = duplicate_loop (loops, loop, loop->outer);
858 exit_dest = single_exit (loop)->dest;
859 was_imm_dom = (get_immediate_dominator (CDI_DOMINATORS,
860 exit_dest) == loop->header ?
863 new_bbs = XNEWVEC (basic_block, loop->num_nodes);
865 exit = single_exit (loop);
866 copy_bbs (bbs, loop->num_nodes, new_bbs,
867 &exit, 1, &new_exit, NULL,
869 set_single_exit (new_loop, new_exit);
871 /* Duplicating phi args at exit bbs as coming
872 also from exit of duplicated loop. */
873 for (phi = phi_nodes (exit_dest); phi; phi = PHI_CHAIN (phi))
875 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, single_exit (loop));
878 edge new_loop_exit_edge;
880 if (EDGE_SUCC (new_loop->header, 0)->dest == new_loop->latch)
881 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 1);
883 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 0);
885 add_phi_arg (phi, phi_arg, new_loop_exit_edge);
889 if (at_exit) /* Add the loop copy at exit. */
891 redirect_edge_and_branch_force (e, new_loop->header);
892 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, e->src);
894 set_immediate_dominator (CDI_DOMINATORS, exit_dest, new_loop->header);
896 else /* Add the copy at entry. */
899 edge entry_e = loop_preheader_edge (loop);
900 basic_block preheader = entry_e->src;
902 if (!flow_bb_inside_loop_p (new_loop,
903 EDGE_SUCC (new_loop->header, 0)->dest))
904 new_exit_e = EDGE_SUCC (new_loop->header, 0);
906 new_exit_e = EDGE_SUCC (new_loop->header, 1);
908 redirect_edge_and_branch_force (new_exit_e, loop->header);
909 set_immediate_dominator (CDI_DOMINATORS, loop->header,
912 /* We have to add phi args to the loop->header here as coming
913 from new_exit_e edge. */
914 for (phi = phi_nodes (loop->header); phi; phi = PHI_CHAIN (phi))
916 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, entry_e);
918 add_phi_arg (phi, phi_arg, new_exit_e);
921 redirect_edge_and_branch_force (entry_e, new_loop->header);
922 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, preheader);
932 /* Given the condition statement COND, put it as the last statement
933 of GUARD_BB; EXIT_BB is the basic block to skip the loop;
934 Assumes that this is the single exit of the guarded loop.
935 Returns the skip edge. */
938 slpeel_add_loop_guard (basic_block guard_bb, tree cond, basic_block exit_bb,
941 block_stmt_iterator bsi;
943 tree cond_stmt, then_label, else_label;
945 enter_e = EDGE_SUCC (guard_bb, 0);
946 enter_e->flags &= ~EDGE_FALLTHRU;
947 enter_e->flags |= EDGE_FALSE_VALUE;
948 bsi = bsi_last (guard_bb);
950 then_label = build1 (GOTO_EXPR, void_type_node,
951 tree_block_label (exit_bb));
952 else_label = build1 (GOTO_EXPR, void_type_node,
953 tree_block_label (enter_e->dest));
954 cond_stmt = build3 (COND_EXPR, void_type_node, cond,
955 then_label, else_label);
956 bsi_insert_after (&bsi, cond_stmt, BSI_NEW_STMT);
957 /* Add new edge to connect guard block to the merge/loop-exit block. */
958 new_e = make_edge (guard_bb, exit_bb, EDGE_TRUE_VALUE);
959 set_immediate_dominator (CDI_DOMINATORS, exit_bb, dom_bb);
964 /* This function verifies that the following restrictions apply to LOOP:
966 (2) it consists of exactly 2 basic blocks - header, and an empty latch.
967 (3) it is single entry, single exit
968 (4) its exit condition is the last stmt in the header
969 (5) E is the entry/exit edge of LOOP.
973 slpeel_can_duplicate_loop_p (struct loop *loop, edge e)
975 edge exit_e = single_exit (loop);
976 edge entry_e = loop_preheader_edge (loop);
977 tree orig_cond = get_loop_exit_condition (loop);
978 block_stmt_iterator loop_exit_bsi = bsi_last (exit_e->src);
980 if (need_ssa_update_p ())
984 /* All loops have an outer scope; the only case loop->outer is NULL is for
985 the function itself. */
987 || loop->num_nodes != 2
988 || !empty_block_p (loop->latch)
989 || !single_exit (loop)
990 /* Verify that new loop exit condition can be trivially modified. */
991 || (!orig_cond || orig_cond != bsi_stmt (loop_exit_bsi))
992 || (e != exit_e && e != entry_e))
998 #ifdef ENABLE_CHECKING
1000 slpeel_verify_cfg_after_peeling (struct loop *first_loop,
1001 struct loop *second_loop)
1003 basic_block loop1_exit_bb = single_exit (first_loop)->dest;
1004 basic_block loop2_entry_bb = loop_preheader_edge (second_loop)->src;
1005 basic_block loop1_entry_bb = loop_preheader_edge (first_loop)->src;
1007 /* A guard that controls whether the second_loop is to be executed or skipped
1008 is placed in first_loop->exit. first_loopt->exit therefore has two
1009 successors - one is the preheader of second_loop, and the other is a bb
1012 gcc_assert (EDGE_COUNT (loop1_exit_bb->succs) == 2);
1014 /* 1. Verify that one of the successors of first_loopt->exit is the preheader
1017 /* The preheader of new_loop is expected to have two predecessors:
1018 first_loop->exit and the block that precedes first_loop. */
1020 gcc_assert (EDGE_COUNT (loop2_entry_bb->preds) == 2
1021 && ((EDGE_PRED (loop2_entry_bb, 0)->src == loop1_exit_bb
1022 && EDGE_PRED (loop2_entry_bb, 1)->src == loop1_entry_bb)
1023 || (EDGE_PRED (loop2_entry_bb, 1)->src == loop1_exit_bb
1024 && EDGE_PRED (loop2_entry_bb, 0)->src == loop1_entry_bb)));
1026 /* Verify that the other successor of first_loopt->exit is after the
1032 /* Function slpeel_tree_peel_loop_to_edge.
1034 Peel the first (last) iterations of LOOP into a new prolog (epilog) loop
1035 that is placed on the entry (exit) edge E of LOOP. After this transformation
1036 we have two loops one after the other - first-loop iterates FIRST_NITERS
1037 times, and second-loop iterates the remainder NITERS - FIRST_NITERS times.
1040 - LOOP: the loop to be peeled.
1041 - E: the exit or entry edge of LOOP.
1042 If it is the entry edge, we peel the first iterations of LOOP. In this
1043 case first-loop is LOOP, and second-loop is the newly created loop.
1044 If it is the exit edge, we peel the last iterations of LOOP. In this
1045 case, first-loop is the newly created loop, and second-loop is LOOP.
1046 - NITERS: the number of iterations that LOOP iterates.
1047 - FIRST_NITERS: the number of iterations that the first-loop should iterate.
1048 - UPDATE_FIRST_LOOP_COUNT: specified whether this function is responsible
1049 for updating the loop bound of the first-loop to FIRST_NITERS. If it
1050 is false, the caller of this function may want to take care of this
1051 (this can be useful if we don't want new stmts added to first-loop).
1054 The function returns a pointer to the new loop-copy, or NULL if it failed
1055 to perform the transformation.
1057 The function generates two if-then-else guards: one before the first loop,
1058 and the other before the second loop:
1060 if (FIRST_NITERS == 0) then skip the first loop,
1061 and go directly to the second loop.
1062 The second guard is:
1063 if (FIRST_NITERS == NITERS) then skip the second loop.
1065 FORNOW only simple loops are supported (see slpeel_can_duplicate_loop_p).
1066 FORNOW the resulting code will not be in loop-closed-ssa form.
1070 slpeel_tree_peel_loop_to_edge (struct loop *loop, struct loops *loops,
1071 edge e, tree first_niters,
1072 tree niters, bool update_first_loop_count)
1074 struct loop *new_loop = NULL, *first_loop, *second_loop;
1078 basic_block bb_before_second_loop, bb_after_second_loop;
1079 basic_block bb_before_first_loop;
1080 basic_block bb_between_loops;
1081 basic_block new_exit_bb;
1082 edge exit_e = single_exit (loop);
1085 if (!slpeel_can_duplicate_loop_p (loop, e))
1088 /* We have to initialize cfg_hooks. Then, when calling
1089 cfg_hooks->split_edge, the function tree_split_edge
1090 is actually called and, when calling cfg_hooks->duplicate_block,
1091 the function tree_duplicate_bb is called. */
1092 tree_register_cfg_hooks ();
1095 /* 1. Generate a copy of LOOP and put it on E (E is the entry/exit of LOOP).
1096 Resulting CFG would be:
1109 if (!(new_loop = slpeel_tree_duplicate_loop_to_edge_cfg (loop, loops, e)))
1111 loop_loc = find_loop_location (loop);
1112 if (dump_file && (dump_flags & TDF_DETAILS))
1114 if (loop_loc != UNKNOWN_LOC)
1115 fprintf (dump_file, "\n%s:%d: note: ",
1116 LOC_FILE (loop_loc), LOC_LINE (loop_loc));
1117 fprintf (dump_file, "tree_duplicate_loop_to_edge_cfg failed.\n");
1124 /* NEW_LOOP was placed after LOOP. */
1126 second_loop = new_loop;
1130 /* NEW_LOOP was placed before LOOP. */
1131 first_loop = new_loop;
1135 definitions = ssa_names_to_replace ();
1136 slpeel_update_phis_for_duplicate_loop (loop, new_loop, e == exit_e);
1137 rename_variables_in_loop (new_loop);
1140 /* 2. Add the guard that controls whether the first loop is executed.
1141 Resulting CFG would be:
1143 bb_before_first_loop:
1144 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1151 bb_before_second_loop:
1160 bb_before_first_loop = split_edge (loop_preheader_edge (first_loop));
1161 bb_before_second_loop = split_edge (single_exit (first_loop));
1164 fold_build2 (LE_EXPR, boolean_type_node, first_niters,
1165 build_int_cst (TREE_TYPE (first_niters), 0));
1166 skip_e = slpeel_add_loop_guard (bb_before_first_loop, pre_condition,
1167 bb_before_second_loop, bb_before_first_loop);
1168 slpeel_update_phi_nodes_for_guard1 (skip_e, first_loop,
1169 first_loop == new_loop,
1170 &new_exit_bb, &definitions);
1173 /* 3. Add the guard that controls whether the second loop is executed.
1174 Resulting CFG would be:
1176 bb_before_first_loop:
1177 if (FIRST_NITERS == 0) GOTO bb_before_second_loop (skip first loop)
1185 if (FIRST_NITERS == NITERS) GOTO bb_after_second_loop (skip second loop)
1186 GOTO bb_before_second_loop
1188 bb_before_second_loop:
1194 bb_after_second_loop:
1199 bb_between_loops = new_exit_bb;
1200 bb_after_second_loop = split_edge (single_exit (second_loop));
1203 fold_build2 (EQ_EXPR, boolean_type_node, first_niters, niters);
1204 skip_e = slpeel_add_loop_guard (bb_between_loops, pre_condition,
1205 bb_after_second_loop, bb_before_first_loop);
1206 slpeel_update_phi_nodes_for_guard2 (skip_e, second_loop,
1207 second_loop == new_loop, &new_exit_bb);
1209 /* 4. Make first-loop iterate FIRST_NITERS times, if requested.
1211 if (update_first_loop_count)
1212 slpeel_make_loop_iterate_ntimes (first_loop, first_niters);
1214 BITMAP_FREE (definitions);
1215 delete_update_ssa ();
1220 /* Function vect_get_loop_location.
1222 Extract the location of the loop in the source code.
1223 If the loop is not well formed for vectorization, an estimated
1224 location is calculated.
1225 Return the loop location if succeed and NULL if not. */
1228 find_loop_location (struct loop *loop)
1230 tree node = NULL_TREE;
1232 block_stmt_iterator si;
1237 node = get_loop_exit_condition (loop);
1239 if (node && EXPR_P (node) && EXPR_HAS_LOCATION (node)
1240 && EXPR_FILENAME (node) && EXPR_LINENO (node))
1241 return EXPR_LOC (node);
1243 /* If we got here the loop is probably not "well formed",
1244 try to estimate the loop location */
1251 for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
1253 node = bsi_stmt (si);
1254 if (node && EXPR_P (node) && EXPR_HAS_LOCATION (node))
1255 return EXPR_LOC (node);
1262 /*************************************************************************
1263 Vectorization Debug Information.
1264 *************************************************************************/
1266 /* Function vect_set_verbosity_level.
1268 Called from toplev.c upon detection of the
1269 -ftree-vectorizer-verbose=N option. */
1272 vect_set_verbosity_level (const char *val)
1277 if (vl < MAX_VERBOSITY_LEVEL)
1278 vect_verbosity_level = vl;
1280 vect_verbosity_level = MAX_VERBOSITY_LEVEL - 1;
1284 /* Function vect_set_dump_settings.
1286 Fix the verbosity level of the vectorizer if the
1287 requested level was not set explicitly using the flag
1288 -ftree-vectorizer-verbose=N.
1289 Decide where to print the debugging information (dump_file/stderr).
1290 If the user defined the verbosity level, but there is no dump file,
1291 print to stderr, otherwise print to the dump file. */
1294 vect_set_dump_settings (void)
1296 vect_dump = dump_file;
1298 /* Check if the verbosity level was defined by the user: */
1299 if (vect_verbosity_level != MAX_VERBOSITY_LEVEL)
1301 /* If there is no dump file, print to stderr. */
1307 /* User didn't specify verbosity level: */
1308 if (dump_file && (dump_flags & TDF_DETAILS))
1309 vect_verbosity_level = REPORT_DETAILS;
1310 else if (dump_file && (dump_flags & TDF_STATS))
1311 vect_verbosity_level = REPORT_UNVECTORIZED_LOOPS;
1313 vect_verbosity_level = REPORT_NONE;
1315 gcc_assert (dump_file || vect_verbosity_level == REPORT_NONE);
1319 /* Function debug_loop_details.
1321 For vectorization debug dumps. */
1324 vect_print_dump_info (enum verbosity_levels vl)
1326 if (vl > vect_verbosity_level)
1329 if (!current_function_decl || !vect_dump)
1332 if (vect_loop_location == UNKNOWN_LOC)
1333 fprintf (vect_dump, "\n%s:%d: note: ",
1334 DECL_SOURCE_FILE (current_function_decl),
1335 DECL_SOURCE_LINE (current_function_decl));
1337 fprintf (vect_dump, "\n%s:%d: note: ",
1338 LOC_FILE (vect_loop_location), LOC_LINE (vect_loop_location));
1344 /*************************************************************************
1345 Vectorization Utilities.
1346 *************************************************************************/
1348 /* Function new_stmt_vec_info.
1350 Create and initialize a new stmt_vec_info struct for STMT. */
1353 new_stmt_vec_info (tree stmt, loop_vec_info loop_vinfo)
1356 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
1358 STMT_VINFO_TYPE (res) = undef_vec_info_type;
1359 STMT_VINFO_STMT (res) = stmt;
1360 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
1361 STMT_VINFO_RELEVANT (res) = 0;
1362 STMT_VINFO_LIVE_P (res) = false;
1363 STMT_VINFO_VECTYPE (res) = NULL;
1364 STMT_VINFO_VEC_STMT (res) = NULL;
1365 STMT_VINFO_IN_PATTERN_P (res) = false;
1366 STMT_VINFO_RELATED_STMT (res) = NULL;
1367 STMT_VINFO_DATA_REF (res) = NULL;
1368 if (TREE_CODE (stmt) == PHI_NODE)
1369 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
1371 STMT_VINFO_DEF_TYPE (res) = vect_loop_def;
1372 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
1378 /* Function new_loop_vec_info.
1380 Create and initialize a new loop_vec_info struct for LOOP, as well as
1381 stmt_vec_info structs for all the stmts in LOOP. */
1384 new_loop_vec_info (struct loop *loop)
1388 block_stmt_iterator si;
1391 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
1393 bbs = get_loop_body (loop);
1395 /* Create stmt_info for all stmts in the loop. */
1396 for (i = 0; i < loop->num_nodes; i++)
1398 basic_block bb = bbs[i];
1401 for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
1403 stmt_ann_t ann = get_stmt_ann (phi);
1404 set_stmt_info (ann, new_stmt_vec_info (phi, res));
1407 for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
1409 tree stmt = bsi_stmt (si);
1412 ann = stmt_ann (stmt);
1413 set_stmt_info (ann, new_stmt_vec_info (stmt, res));
1417 LOOP_VINFO_LOOP (res) = loop;
1418 LOOP_VINFO_BBS (res) = bbs;
1419 LOOP_VINFO_EXIT_COND (res) = NULL;
1420 LOOP_VINFO_NITERS (res) = NULL;
1421 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
1422 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
1423 LOOP_VINFO_VECT_FACTOR (res) = 0;
1424 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
1425 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
1426 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
1427 LOOP_VINFO_MAY_MISALIGN_STMTS (res)
1428 = VEC_alloc (tree, heap, PARAM_VALUE (PARAM_VECT_MAX_VERSION_CHECKS));
1434 /* Function destroy_loop_vec_info.
1436 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
1437 stmts in the loop. */
1440 destroy_loop_vec_info (loop_vec_info loop_vinfo)
1445 block_stmt_iterator si;
1451 loop = LOOP_VINFO_LOOP (loop_vinfo);
1453 bbs = LOOP_VINFO_BBS (loop_vinfo);
1454 nbbs = loop->num_nodes;
1456 for (j = 0; j < nbbs; j++)
1458 basic_block bb = bbs[j];
1460 stmt_vec_info stmt_info;
1462 for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
1464 stmt_ann_t ann = stmt_ann (phi);
1466 stmt_info = vinfo_for_stmt (phi);
1468 set_stmt_info (ann, NULL);
1471 for (si = bsi_start (bb); !bsi_end_p (si); )
1473 tree stmt = bsi_stmt (si);
1474 stmt_ann_t ann = stmt_ann (stmt);
1475 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1479 /* Check if this is a "pattern stmt" (introduced by the
1480 vectorizer during the pattern recognition pass). */
1481 bool remove_stmt_p = false;
1482 tree orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
1485 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
1487 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
1488 remove_stmt_p = true;
1491 /* Free stmt_vec_info. */
1492 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
1494 set_stmt_info (ann, NULL);
1496 /* Remove dead "pattern stmts". */
1498 bsi_remove (&si, true);
1504 free (LOOP_VINFO_BBS (loop_vinfo));
1505 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
1506 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
1507 VEC_free (tree, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
1513 /* Function vect_force_dr_alignment_p.
1515 Returns whether the alignment of a DECL can be forced to be aligned
1516 on ALIGNMENT bit boundary. */
1519 vect_can_force_dr_alignment_p (tree decl, unsigned int alignment)
1521 if (TREE_CODE (decl) != VAR_DECL)
1524 if (DECL_EXTERNAL (decl))
1527 if (TREE_ASM_WRITTEN (decl))
1530 if (TREE_STATIC (decl))
1531 return (alignment <= MAX_OFILE_ALIGNMENT);
1533 /* This is not 100% correct. The absolute correct stack alignment
1534 is STACK_BOUNDARY. We're supposed to hope, but not assume, that
1535 PREFERRED_STACK_BOUNDARY is honored by all translation units.
1536 However, until someone implements forced stack alignment, SSE
1537 isn't really usable without this. */
1538 return (alignment <= PREFERRED_STACK_BOUNDARY);
1542 /* Function get_vectype_for_scalar_type.
1544 Returns the vector type corresponding to SCALAR_TYPE as supported
1548 get_vectype_for_scalar_type (tree scalar_type)
1550 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
1551 int nbytes = GET_MODE_SIZE (inner_mode);
1555 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD)
1558 /* FORNOW: Only a single vector size per target (UNITS_PER_SIMD_WORD)
1560 nunits = UNITS_PER_SIMD_WORD / nbytes;
1562 vectype = build_vector_type (scalar_type, nunits);
1563 if (vect_print_dump_info (REPORT_DETAILS))
1565 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
1566 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
1572 if (vect_print_dump_info (REPORT_DETAILS))
1574 fprintf (vect_dump, "vectype: ");
1575 print_generic_expr (vect_dump, vectype, TDF_SLIM);
1578 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1579 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
1581 if (vect_print_dump_info (REPORT_DETAILS))
1582 fprintf (vect_dump, "mode not supported by target.");
1590 /* Function vect_supportable_dr_alignment
1592 Return whether the data reference DR is supported with respect to its
1595 enum dr_alignment_support
1596 vect_supportable_dr_alignment (struct data_reference *dr)
1598 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
1599 enum machine_mode mode = (int) TYPE_MODE (vectype);
1601 if (aligned_access_p (dr))
1604 /* Possibly unaligned access. */
1606 if (DR_IS_READ (dr))
1608 if (vec_realign_load_optab->handlers[mode].insn_code != CODE_FOR_nothing
1609 && (!targetm.vectorize.builtin_mask_for_load
1610 || targetm.vectorize.builtin_mask_for_load ()))
1611 return dr_unaligned_software_pipeline;
1613 if (movmisalign_optab->handlers[mode].insn_code != CODE_FOR_nothing)
1614 /* Can't software pipeline the loads, but can at least do them. */
1615 return dr_unaligned_supported;
1619 return dr_unaligned_unsupported;
1623 /* Function vect_is_simple_use.
1626 LOOP - the loop that is being vectorized.
1627 OPERAND - operand of a stmt in LOOP.
1628 DEF - the defining stmt in case OPERAND is an SSA_NAME.
1630 Returns whether a stmt with OPERAND can be vectorized.
1631 Supportable operands are constants, loop invariants, and operands that are
1632 defined by the current iteration of the loop. Unsupportable operands are
1633 those that are defined by a previous iteration of the loop (as is the case
1634 in reduction/induction computations). */
1637 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo, tree *def_stmt,
1638 tree *def, enum vect_def_type *dt)
1641 stmt_vec_info stmt_vinfo;
1642 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1644 *def_stmt = NULL_TREE;
1647 if (vect_print_dump_info (REPORT_DETAILS))
1649 fprintf (vect_dump, "vect_is_simple_use: operand ");
1650 print_generic_expr (vect_dump, operand, TDF_SLIM);
1653 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
1655 *dt = vect_constant_def;
1659 if (TREE_CODE (operand) != SSA_NAME)
1661 if (vect_print_dump_info (REPORT_DETAILS))
1662 fprintf (vect_dump, "not ssa-name.");
1666 *def_stmt = SSA_NAME_DEF_STMT (operand);
1667 if (*def_stmt == NULL_TREE )
1669 if (vect_print_dump_info (REPORT_DETAILS))
1670 fprintf (vect_dump, "no def_stmt.");
1674 if (vect_print_dump_info (REPORT_DETAILS))
1676 fprintf (vect_dump, "def_stmt: ");
1677 print_generic_expr (vect_dump, *def_stmt, TDF_SLIM);
1680 /* empty stmt is expected only in case of a function argument.
1681 (Otherwise - we expect a phi_node or a modify_expr). */
1682 if (IS_EMPTY_STMT (*def_stmt))
1684 tree arg = TREE_OPERAND (*def_stmt, 0);
1685 if (TREE_CODE (arg) == INTEGER_CST || TREE_CODE (arg) == REAL_CST)
1688 *dt = vect_invariant_def;
1692 if (vect_print_dump_info (REPORT_DETAILS))
1693 fprintf (vect_dump, "Unexpected empty stmt.");
1697 bb = bb_for_stmt (*def_stmt);
1698 if (!flow_bb_inside_loop_p (loop, bb))
1699 *dt = vect_invariant_def;
1702 stmt_vinfo = vinfo_for_stmt (*def_stmt);
1703 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
1706 if (*dt == vect_unknown_def_type)
1708 if (vect_print_dump_info (REPORT_DETAILS))
1709 fprintf (vect_dump, "Unsupported pattern.");
1713 /* stmts inside the loop that have been identified as performing
1714 a reduction operation cannot have uses in the loop. */
1715 if (*dt == vect_reduction_def && TREE_CODE (*def_stmt) != PHI_NODE)
1717 if (vect_print_dump_info (REPORT_DETAILS))
1718 fprintf (vect_dump, "reduction used in loop.");
1722 if (vect_print_dump_info (REPORT_DETAILS))
1723 fprintf (vect_dump, "type of def: %d.",*dt);
1725 switch (TREE_CODE (*def_stmt))
1728 *def = PHI_RESULT (*def_stmt);
1729 gcc_assert (*dt == vect_induction_def || *dt == vect_reduction_def
1730 || *dt == vect_invariant_def);
1734 *def = TREE_OPERAND (*def_stmt, 0);
1735 gcc_assert (*dt == vect_loop_def || *dt == vect_invariant_def);
1739 if (vect_print_dump_info (REPORT_DETAILS))
1740 fprintf (vect_dump, "unsupported defining stmt: ");
1744 if (*dt == vect_induction_def)
1746 if (vect_print_dump_info (REPORT_DETAILS))
1747 fprintf (vect_dump, "induction not supported.");
1755 /* Function supportable_widening_operation
1757 Check whether an operation represented by the code CODE is a
1758 widening operation that is supported by the target platform in
1759 vector form (i.e., when operating on arguments of type VECTYPE).
1761 The two kinds of widening operations we currently support are
1762 NOP and WIDEN_MULT. This function checks if these oprations
1763 are supported by the target platform either directly (via vector
1764 tree-codes), or via target builtins.
1767 - CODE1 and CODE2 are codes of vector operations to be used when
1768 vectorizing the operation, if available.
1769 - DECL1 and DECL2 are decls of target builtin functions to be used
1770 when vectorizing the operation, if available. In this case,
1771 CODE1 and CODE2 are CALL_EXPR. */
1774 supportable_widening_operation (enum tree_code code, tree stmt, tree vectype,
1775 tree *decl1, tree *decl2,
1776 enum tree_code *code1, enum tree_code *code2)
1778 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1780 enum machine_mode vec_mode;
1781 enum insn_code icode1, icode2;
1782 optab optab1, optab2;
1783 tree expr = TREE_OPERAND (stmt, 1);
1784 tree type = TREE_TYPE (expr);
1785 tree wide_vectype = get_vectype_for_scalar_type (type);
1786 enum tree_code c1, c2;
1788 /* The result of a vectorized widening operation usually requires two vectors
1789 (because the widened results do not fit int one vector). The generated
1790 vector results would normally be expected to be generated in the same
1791 order as in the original scalar computation. i.e. if 8 results are
1792 generated in each vector iteration, they are to be organized as follows:
1793 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
1795 However, in the special case that the result of the widening operation is
1796 used in a reduction copmutation only, the order doesn't matter (because
1797 when vectorizing a reduction we change the order of the computation).
1798 Some targets can take advatage of this and generate more efficient code.
1799 For example, targets like Altivec, that support widen_mult using a sequence
1800 of {mult_even,mult_odd} generate the following vectors:
1801 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8]. */
1803 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction)
1809 && code == WIDEN_MULT_EXPR
1810 && targetm.vectorize.builtin_mul_widen_even
1811 && targetm.vectorize.builtin_mul_widen_even (vectype)
1812 && targetm.vectorize.builtin_mul_widen_odd
1813 && targetm.vectorize.builtin_mul_widen_odd (vectype))
1815 if (vect_print_dump_info (REPORT_DETAILS))
1816 fprintf (vect_dump, "Unordered widening operation detected.");
1818 *code1 = *code2 = CALL_EXPR;
1819 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
1820 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
1826 case WIDEN_MULT_EXPR:
1827 if (BYTES_BIG_ENDIAN)
1829 c1 = VEC_WIDEN_MULT_HI_EXPR;
1830 c2 = VEC_WIDEN_MULT_LO_EXPR;
1834 c2 = VEC_WIDEN_MULT_HI_EXPR;
1835 c1 = VEC_WIDEN_MULT_LO_EXPR;
1840 if (BYTES_BIG_ENDIAN)
1842 c1 = VEC_UNPACK_HI_EXPR;
1843 c2 = VEC_UNPACK_LO_EXPR;
1847 c2 = VEC_UNPACK_HI_EXPR;
1848 c1 = VEC_UNPACK_LO_EXPR;
1858 optab1 = optab_for_tree_code (c1, vectype);
1859 optab2 = optab_for_tree_code (c2, vectype);
1861 if (!optab1 || !optab2)
1864 vec_mode = TYPE_MODE (vectype);
1865 if ((icode1 = optab1->handlers[(int) vec_mode].insn_code) == CODE_FOR_nothing
1866 || insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
1867 || (icode2 = optab2->handlers[(int) vec_mode].insn_code)
1869 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
1876 /* Function reduction_code_for_scalar_code
1879 CODE - tree_code of a reduction operations.
1882 REDUC_CODE - the corresponding tree-code to be used to reduce the
1883 vector of partial results into a single scalar result (which
1884 will also reside in a vector).
1886 Return TRUE if a corresponding REDUC_CODE was found, FALSE otherwise. */
1889 reduction_code_for_scalar_code (enum tree_code code,
1890 enum tree_code *reduc_code)
1895 *reduc_code = REDUC_MAX_EXPR;
1899 *reduc_code = REDUC_MIN_EXPR;
1903 *reduc_code = REDUC_PLUS_EXPR;
1912 /* Function vect_is_simple_reduction
1914 Detect a cross-iteration def-use cucle that represents a simple
1915 reduction computation. We look for the following pattern:
1920 a2 = operation (a3, a1)
1923 1. operation is commutative and associative and it is safe to
1924 change the order of the computation.
1925 2. no uses for a2 in the loop (a2 is used out of the loop)
1926 3. no uses of a1 in the loop besides the reduction operation.
1928 Condition 1 is tested here.
1929 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. */
1932 vect_is_simple_reduction (struct loop *loop, tree phi)
1934 edge latch_e = loop_latch_edge (loop);
1935 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1936 tree def_stmt, def1, def2;
1937 enum tree_code code;
1939 tree operation, op1, op2;
1942 if (TREE_CODE (loop_arg) != SSA_NAME)
1944 if (vect_print_dump_info (REPORT_DETAILS))
1946 fprintf (vect_dump, "reduction: not ssa_name: ");
1947 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
1952 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
1955 if (vect_print_dump_info (REPORT_DETAILS))
1956 fprintf (vect_dump, "reduction: no def_stmt.");
1960 if (TREE_CODE (def_stmt) != MODIFY_EXPR)
1962 if (vect_print_dump_info (REPORT_DETAILS))
1964 print_generic_expr (vect_dump, def_stmt, TDF_SLIM);
1969 operation = TREE_OPERAND (def_stmt, 1);
1970 code = TREE_CODE (operation);
1971 if (!commutative_tree_code (code) || !associative_tree_code (code))
1973 if (vect_print_dump_info (REPORT_DETAILS))
1975 fprintf (vect_dump, "reduction: not commutative/associative: ");
1976 print_generic_expr (vect_dump, operation, TDF_SLIM);
1981 op_type = TREE_CODE_LENGTH (code);
1982 if (op_type != binary_op)
1984 if (vect_print_dump_info (REPORT_DETAILS))
1986 fprintf (vect_dump, "reduction: not binary operation: ");
1987 print_generic_expr (vect_dump, operation, TDF_SLIM);
1992 op1 = TREE_OPERAND (operation, 0);
1993 op2 = TREE_OPERAND (operation, 1);
1994 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
1996 if (vect_print_dump_info (REPORT_DETAILS))
1998 fprintf (vect_dump, "reduction: uses not ssa_names: ");
1999 print_generic_expr (vect_dump, operation, TDF_SLIM);
2004 /* Check that it's ok to change the order of the computation. */
2005 type = TREE_TYPE (operation);
2006 if (TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (TREE_TYPE (op1))
2007 || TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (TREE_TYPE (op2)))
2009 if (vect_print_dump_info (REPORT_DETAILS))
2011 fprintf (vect_dump, "reduction: multiple types: operation type: ");
2012 print_generic_expr (vect_dump, type, TDF_SLIM);
2013 fprintf (vect_dump, ", operands types: ");
2014 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
2015 fprintf (vect_dump, ",");
2016 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
2021 /* CHECKME: check for !flag_finite_math_only too? */
2022 if (SCALAR_FLOAT_TYPE_P (type) && !flag_unsafe_math_optimizations)
2024 /* Changing the order of operations changes the semantics. */
2025 if (vect_print_dump_info (REPORT_DETAILS))
2027 fprintf (vect_dump, "reduction: unsafe fp math optimization: ");
2028 print_generic_expr (vect_dump, operation, TDF_SLIM);
2032 else if (INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type) && flag_trapv)
2034 /* Changing the order of operations changes the semantics. */
2035 if (vect_print_dump_info (REPORT_DETAILS))
2037 fprintf (vect_dump, "reduction: unsafe int math optimization: ");
2038 print_generic_expr (vect_dump, operation, TDF_SLIM);
2043 /* reduction is safe. we're dealing with one of the following:
2044 1) integer arithmetic and no trapv
2045 2) floating point arithmetic, and special flags permit this optimization.
2047 def1 = SSA_NAME_DEF_STMT (op1);
2048 def2 = SSA_NAME_DEF_STMT (op2);
2051 if (vect_print_dump_info (REPORT_DETAILS))
2053 fprintf (vect_dump, "reduction: no defs for operands: ");
2054 print_generic_expr (vect_dump, operation, TDF_SLIM);
2059 if (TREE_CODE (def1) == MODIFY_EXPR
2060 && flow_bb_inside_loop_p (loop, bb_for_stmt (def1))
2063 if (vect_print_dump_info (REPORT_DETAILS))
2065 fprintf (vect_dump, "detected reduction:");
2066 print_generic_expr (vect_dump, operation, TDF_SLIM);
2070 else if (TREE_CODE (def2) == MODIFY_EXPR
2071 && flow_bb_inside_loop_p (loop, bb_for_stmt (def2))
2074 /* Swap operands (just for simplicity - so that the rest of the code
2075 can assume that the reduction variable is always the last (second)
2077 if (vect_print_dump_info (REPORT_DETAILS))
2079 fprintf (vect_dump, "detected reduction: need to swap operands:");
2080 print_generic_expr (vect_dump, operation, TDF_SLIM);
2082 swap_tree_operands (def_stmt, &TREE_OPERAND (operation, 0),
2083 &TREE_OPERAND (operation, 1));
2088 if (vect_print_dump_info (REPORT_DETAILS))
2090 fprintf (vect_dump, "reduction: unknown pattern.");
2091 print_generic_expr (vect_dump, operation, TDF_SLIM);
2098 /* Function vect_is_simple_iv_evolution.
2100 FORNOW: A simple evolution of an induction variables in the loop is
2101 considered a polynomial evolution with constant step. */
2104 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
2110 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
2112 /* When there is no evolution in this loop, the evolution function
2114 if (evolution_part == NULL_TREE)
2117 /* When the evolution is a polynomial of degree >= 2
2118 the evolution function is not "simple". */
2119 if (tree_is_chrec (evolution_part))
2122 step_expr = evolution_part;
2123 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn,
2126 if (vect_print_dump_info (REPORT_DETAILS))
2128 fprintf (vect_dump, "step: ");
2129 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
2130 fprintf (vect_dump, ", init: ");
2131 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
2137 if (TREE_CODE (step_expr) != INTEGER_CST)
2139 if (vect_print_dump_info (REPORT_DETAILS))
2140 fprintf (vect_dump, "step unknown.");
2148 /* Function vectorize_loops.
2150 Entry Point to loop vectorization phase. */
2153 vectorize_loops (struct loops *loops)
2156 unsigned int num_vectorized_loops = 0;
2158 /* Fix the verbosity level if not defined explicitly by the user. */
2159 vect_set_dump_settings ();
2161 /* Allocate the bitmap that records which virtual variables that
2162 need to be renamed. */
2163 vect_vnames_to_rename = BITMAP_ALLOC (NULL);
2165 /* ----------- Analyze loops. ----------- */
2167 /* If some loop was duplicated, it gets bigger number
2168 than all previously defined loops. This fact allows us to run
2169 only over initial loops skipping newly generated ones. */
2170 vect_loops_num = loops->num;
2171 for (i = 1; i < vect_loops_num; i++)
2173 loop_vec_info loop_vinfo;
2174 struct loop *loop = loops->parray[i];
2179 vect_loop_location = find_loop_location (loop);
2180 loop_vinfo = vect_analyze_loop (loop);
2181 loop->aux = loop_vinfo;
2183 if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
2186 vect_transform_loop (loop_vinfo, loops);
2187 num_vectorized_loops++;
2189 vect_loop_location = UNKNOWN_LOC;
2191 if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS))
2192 fprintf (vect_dump, "vectorized %u loops in function.\n",
2193 num_vectorized_loops);
2195 /* ----------- Finalize. ----------- */
2197 BITMAP_FREE (vect_vnames_to_rename);
2199 for (i = 1; i < vect_loops_num; i++)
2201 struct loop *loop = loops->parray[i];
2202 loop_vec_info loop_vinfo;
2206 loop_vinfo = loop->aux;
2207 destroy_loop_vec_info (loop_vinfo);
2211 return num_vectorized_loops > 0 ? TODO_cleanup_cfg : 0;