1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
30 #include "basic-block.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
35 #include "cfglayout.h"
40 #include "tree-vectorizer.h"
41 #include "langhooks.h"
44 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
46 /* Function vect_mark_relevant.
48 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
51 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
52 enum vect_relevant relevant, bool live_p)
54 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
55 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
56 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
58 if (vect_print_dump_info (REPORT_DETAILS))
59 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
61 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
65 /* This is the last stmt in a sequence that was detected as a
66 pattern that can potentially be vectorized. Don't mark the stmt
67 as relevant/live because it's not going to be vectorized.
68 Instead mark the pattern-stmt that replaces it. */
70 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
72 if (vect_print_dump_info (REPORT_DETAILS))
73 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
74 stmt_info = vinfo_for_stmt (pattern_stmt);
75 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
76 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
77 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
81 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
82 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
83 STMT_VINFO_RELEVANT (stmt_info) = relevant;
85 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
86 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
88 if (vect_print_dump_info (REPORT_DETAILS))
89 fprintf (vect_dump, "already marked relevant/live.");
93 VEC_safe_push (gimple, heap, *worklist, stmt);
97 /* Function vect_stmt_relevant_p.
99 Return true if STMT in loop that is represented by LOOP_VINFO is
100 "relevant for vectorization".
102 A stmt is considered "relevant for vectorization" if:
103 - it has uses outside the loop.
104 - it has vdefs (it alters memory).
105 - control stmts in the loop (except for the exit condition).
107 CHECKME: what other side effects would the vectorizer allow? */
110 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
111 enum vect_relevant *relevant, bool *live_p)
113 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
115 imm_use_iterator imm_iter;
119 *relevant = vect_unused_in_scope;
122 /* cond stmt other than loop exit cond. */
123 if (is_ctrl_stmt (stmt)
124 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
125 != loop_exit_ctrl_vec_info_type)
126 *relevant = vect_used_in_scope;
128 /* changing memory. */
129 if (gimple_code (stmt) != GIMPLE_PHI)
130 if (gimple_vdef (stmt))
132 if (vect_print_dump_info (REPORT_DETAILS))
133 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
134 *relevant = vect_used_in_scope;
137 /* uses outside the loop. */
138 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
140 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
142 basic_block bb = gimple_bb (USE_STMT (use_p));
143 if (!flow_bb_inside_loop_p (loop, bb))
145 if (vect_print_dump_info (REPORT_DETAILS))
146 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
148 if (is_gimple_debug (USE_STMT (use_p)))
151 /* We expect all such uses to be in the loop exit phis
152 (because of loop closed form) */
153 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
154 gcc_assert (bb == single_exit (loop)->dest);
161 return (*live_p || *relevant);
165 /* Function exist_non_indexing_operands_for_use_p
167 USE is one of the uses attached to STMT. Check if USE is
168 used in STMT for anything other than indexing an array. */
171 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
174 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
176 /* USE corresponds to some operand in STMT. If there is no data
177 reference in STMT, then any operand that corresponds to USE
178 is not indexing an array. */
179 if (!STMT_VINFO_DATA_REF (stmt_info))
182 /* STMT has a data_ref. FORNOW this means that its of one of
186 (This should have been verified in analyze_data_refs).
188 'var' in the second case corresponds to a def, not a use,
189 so USE cannot correspond to any operands that are not used
192 Therefore, all we need to check is if STMT falls into the
193 first case, and whether var corresponds to USE. */
195 if (!gimple_assign_copy_p (stmt))
197 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
199 operand = gimple_assign_rhs1 (stmt);
200 if (TREE_CODE (operand) != SSA_NAME)
211 Function process_use.
214 - a USE in STMT in a loop represented by LOOP_VINFO
215 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
216 that defined USE. This is done by calling mark_relevant and passing it
217 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
220 Generally, LIVE_P and RELEVANT are used to define the liveness and
221 relevance info of the DEF_STMT of this USE:
222 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
223 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
225 - case 1: If USE is used only for address computations (e.g. array indexing),
226 which does not need to be directly vectorized, then the liveness/relevance
227 of the respective DEF_STMT is left unchanged.
228 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
229 skip DEF_STMT cause it had already been processed.
230 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
231 be modified accordingly.
233 Return true if everything is as expected. Return false otherwise. */
236 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
237 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
239 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
240 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
241 stmt_vec_info dstmt_vinfo;
242 basic_block bb, def_bb;
245 enum vect_def_type dt;
247 /* case 1: we are only interested in uses that need to be vectorized. Uses
248 that are used for address computation are not considered relevant. */
249 if (!exist_non_indexing_operands_for_use_p (use, stmt))
252 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
254 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
255 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
259 if (!def_stmt || gimple_nop_p (def_stmt))
262 def_bb = gimple_bb (def_stmt);
263 if (!flow_bb_inside_loop_p (loop, def_bb))
265 if (vect_print_dump_info (REPORT_DETAILS))
266 fprintf (vect_dump, "def_stmt is out of loop.");
270 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
271 DEF_STMT must have already been processed, because this should be the
272 only way that STMT, which is a reduction-phi, was put in the worklist,
273 as there should be no other uses for DEF_STMT in the loop. So we just
274 check that everything is as expected, and we are done. */
275 dstmt_vinfo = vinfo_for_stmt (def_stmt);
276 bb = gimple_bb (stmt);
277 if (gimple_code (stmt) == GIMPLE_PHI
278 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
279 && gimple_code (def_stmt) != GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
281 && bb->loop_father == def_bb->loop_father)
283 if (vect_print_dump_info (REPORT_DETAILS))
284 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
285 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
286 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
287 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
288 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
289 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
293 /* case 3a: outer-loop stmt defining an inner-loop stmt:
294 outer-loop-header-bb:
300 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
302 if (vect_print_dump_info (REPORT_DETAILS))
303 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
307 case vect_unused_in_scope:
308 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
309 vect_used_in_scope : vect_unused_in_scope;
312 case vect_used_in_outer_by_reduction:
313 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
314 relevant = vect_used_by_reduction;
317 case vect_used_in_outer:
318 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
319 relevant = vect_used_in_scope;
322 case vect_used_in_scope:
330 /* case 3b: inner-loop stmt defining an outer-loop stmt:
331 outer-loop-header-bb:
335 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
337 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
339 if (vect_print_dump_info (REPORT_DETAILS))
340 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
344 case vect_unused_in_scope:
345 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
346 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
347 vect_used_in_outer_by_reduction : vect_unused_in_scope;
350 case vect_used_by_reduction:
351 relevant = vect_used_in_outer_by_reduction;
354 case vect_used_in_scope:
355 relevant = vect_used_in_outer;
363 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
368 /* Function vect_mark_stmts_to_be_vectorized.
370 Not all stmts in the loop need to be vectorized. For example:
379 Stmt 1 and 3 do not need to be vectorized, because loop control and
380 addressing of vectorized data-refs are handled differently.
382 This pass detects such stmts. */
385 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
387 VEC(gimple,heap) *worklist;
388 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
389 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
390 unsigned int nbbs = loop->num_nodes;
391 gimple_stmt_iterator si;
394 stmt_vec_info stmt_vinfo;
398 enum vect_relevant relevant, tmp_relevant;
399 enum vect_def_type def_type;
401 if (vect_print_dump_info (REPORT_DETAILS))
402 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
404 worklist = VEC_alloc (gimple, heap, 64);
406 /* 1. Init worklist. */
407 for (i = 0; i < nbbs; i++)
410 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
413 if (vect_print_dump_info (REPORT_DETAILS))
415 fprintf (vect_dump, "init: phi relevant? ");
416 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
419 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
420 vect_mark_relevant (&worklist, phi, relevant, live_p);
422 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
424 stmt = gsi_stmt (si);
425 if (vect_print_dump_info (REPORT_DETAILS))
427 fprintf (vect_dump, "init: stmt relevant? ");
428 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
431 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
432 vect_mark_relevant (&worklist, stmt, relevant, live_p);
436 /* 2. Process_worklist */
437 while (VEC_length (gimple, worklist) > 0)
442 stmt = VEC_pop (gimple, worklist);
443 if (vect_print_dump_info (REPORT_DETAILS))
445 fprintf (vect_dump, "worklist: examine stmt: ");
446 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
449 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
450 (DEF_STMT) as relevant/irrelevant and live/dead according to the
451 liveness and relevance properties of STMT. */
452 stmt_vinfo = vinfo_for_stmt (stmt);
453 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
454 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
456 /* Generally, the liveness and relevance properties of STMT are
457 propagated as is to the DEF_STMTs of its USEs:
458 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
459 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
461 One exception is when STMT has been identified as defining a reduction
462 variable; in this case we set the liveness/relevance as follows:
464 relevant = vect_used_by_reduction
465 This is because we distinguish between two kinds of relevant stmts -
466 those that are used by a reduction computation, and those that are
467 (also) used by a regular computation. This allows us later on to
468 identify stmts that are used solely by a reduction, and therefore the
469 order of the results that they produce does not have to be kept. */
471 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
472 tmp_relevant = relevant;
475 case vect_reduction_def:
476 switch (tmp_relevant)
478 case vect_unused_in_scope:
479 relevant = vect_used_by_reduction;
482 case vect_used_by_reduction:
483 if (gimple_code (stmt) == GIMPLE_PHI)
488 if (vect_print_dump_info (REPORT_DETAILS))
489 fprintf (vect_dump, "unsupported use of reduction.");
491 VEC_free (gimple, heap, worklist);
498 case vect_nested_cycle:
499 if (tmp_relevant != vect_unused_in_scope
500 && tmp_relevant != vect_used_in_outer_by_reduction
501 && tmp_relevant != vect_used_in_outer)
503 if (vect_print_dump_info (REPORT_DETAILS))
504 fprintf (vect_dump, "unsupported use of nested cycle.");
506 VEC_free (gimple, heap, worklist);
513 case vect_double_reduction_def:
514 if (tmp_relevant != vect_unused_in_scope
515 && tmp_relevant != vect_used_by_reduction)
517 if (vect_print_dump_info (REPORT_DETAILS))
518 fprintf (vect_dump, "unsupported use of double reduction.");
520 VEC_free (gimple, heap, worklist);
531 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
533 tree op = USE_FROM_PTR (use_p);
534 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
536 VEC_free (gimple, heap, worklist);
540 } /* while worklist */
542 VEC_free (gimple, heap, worklist);
548 cost_for_stmt (gimple stmt)
550 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
552 switch (STMT_VINFO_TYPE (stmt_info))
554 case load_vec_info_type:
555 return TARG_SCALAR_LOAD_COST;
556 case store_vec_info_type:
557 return TARG_SCALAR_STORE_COST;
558 case op_vec_info_type:
559 case condition_vec_info_type:
560 case assignment_vec_info_type:
561 case reduc_vec_info_type:
562 case induc_vec_info_type:
563 case type_promotion_vec_info_type:
564 case type_demotion_vec_info_type:
565 case type_conversion_vec_info_type:
566 case call_vec_info_type:
567 return TARG_SCALAR_STMT_COST;
568 case undef_vec_info_type:
574 /* Function vect_model_simple_cost.
576 Models cost for simple operations, i.e. those that only emit ncopies of a
577 single op. Right now, this does not account for multiple insns that could
578 be generated for the single vector op. We will handle that shortly. */
581 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
582 enum vect_def_type *dt, slp_tree slp_node)
585 int inside_cost = 0, outside_cost = 0;
587 /* The SLP costs were already calculated during SLP tree build. */
588 if (PURE_SLP_STMT (stmt_info))
591 inside_cost = ncopies * TARG_VEC_STMT_COST;
593 /* FORNOW: Assuming maximum 2 args per stmts. */
594 for (i = 0; i < 2; i++)
596 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
597 outside_cost += TARG_SCALAR_TO_VEC_COST;
600 if (vect_print_dump_info (REPORT_COST))
601 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
602 "outside_cost = %d .", inside_cost, outside_cost);
604 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
605 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
606 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
610 /* Function vect_cost_strided_group_size
612 For strided load or store, return the group_size only if it is the first
613 load or store of a group, else return 1. This ensures that group size is
614 only returned once per group. */
617 vect_cost_strided_group_size (stmt_vec_info stmt_info)
619 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
621 if (first_stmt == STMT_VINFO_STMT (stmt_info))
622 return DR_GROUP_SIZE (stmt_info);
628 /* Function vect_model_store_cost
630 Models cost for stores. In the case of strided accesses, one access
631 has the overhead of the strided access attributed to it. */
634 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
635 enum vect_def_type dt, slp_tree slp_node)
638 int inside_cost = 0, outside_cost = 0;
640 /* The SLP costs were already calculated during SLP tree build. */
641 if (PURE_SLP_STMT (stmt_info))
644 if (dt == vect_constant_def || dt == vect_external_def)
645 outside_cost = TARG_SCALAR_TO_VEC_COST;
647 /* Strided access? */
648 if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
649 group_size = vect_cost_strided_group_size (stmt_info);
650 /* Not a strided access. */
654 /* Is this an access in a group of stores, which provide strided access?
655 If so, add in the cost of the permutes. */
658 /* Uses a high and low interleave operation for each needed permute. */
659 inside_cost = ncopies * exact_log2(group_size) * group_size
660 * TARG_VEC_STMT_COST;
662 if (vect_print_dump_info (REPORT_COST))
663 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
668 /* Costs of the stores. */
669 inside_cost += ncopies * TARG_VEC_STORE_COST;
671 if (vect_print_dump_info (REPORT_COST))
672 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
673 "outside_cost = %d .", inside_cost, outside_cost);
675 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
676 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
677 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
681 /* Function vect_model_load_cost
683 Models cost for loads. In the case of strided accesses, the last access
684 has the overhead of the strided access attributed to it. Since unaligned
685 accesses are supported for loads, we also account for the costs of the
686 access scheme chosen. */
689 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
693 int alignment_support_cheme;
695 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
696 int inside_cost = 0, outside_cost = 0;
698 /* The SLP costs were already calculated during SLP tree build. */
699 if (PURE_SLP_STMT (stmt_info))
702 /* Strided accesses? */
703 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
704 if (first_stmt && !slp_node)
706 group_size = vect_cost_strided_group_size (stmt_info);
707 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
709 /* Not a strided access. */
716 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
718 /* Is this an access in a group of loads providing strided access?
719 If so, add in the cost of the permutes. */
722 /* Uses an even and odd extract operations for each needed permute. */
723 inside_cost = ncopies * exact_log2(group_size) * group_size
724 * TARG_VEC_STMT_COST;
726 if (vect_print_dump_info (REPORT_COST))
727 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
732 /* The loads themselves. */
733 switch (alignment_support_cheme)
737 inside_cost += ncopies * TARG_VEC_LOAD_COST;
739 if (vect_print_dump_info (REPORT_COST))
740 fprintf (vect_dump, "vect_model_load_cost: aligned.");
744 case dr_unaligned_supported:
746 /* Here, we assign an additional cost for the unaligned load. */
747 inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
749 if (vect_print_dump_info (REPORT_COST))
750 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
755 case dr_explicit_realign:
757 inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
759 /* FIXME: If the misalignment remains fixed across the iterations of
760 the containing loop, the following cost should be added to the
762 if (targetm.vectorize.builtin_mask_for_load)
763 inside_cost += TARG_VEC_STMT_COST;
767 case dr_explicit_realign_optimized:
769 if (vect_print_dump_info (REPORT_COST))
770 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
773 /* Unaligned software pipeline has a load of an address, an initial
774 load, and possibly a mask operation to "prime" the loop. However,
775 if this is an access in a group of loads, which provide strided
776 access, then the above cost should only be considered for one
777 access in the group. Inside the loop, there is a load op
778 and a realignment op. */
780 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
782 outside_cost = 2*TARG_VEC_STMT_COST;
783 if (targetm.vectorize.builtin_mask_for_load)
784 outside_cost += TARG_VEC_STMT_COST;
787 inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
796 if (vect_print_dump_info (REPORT_COST))
797 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
798 "outside_cost = %d .", inside_cost, outside_cost);
800 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
801 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
802 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
806 /* Function vect_init_vector.
808 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
809 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
810 is not NULL. Otherwise, place the initialization at the loop preheader.
811 Return the DEF of INIT_STMT.
812 It will be used in the vectorization of STMT. */
815 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
816 gimple_stmt_iterator *gsi)
818 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
826 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
827 add_referenced_var (new_var);
828 init_stmt = gimple_build_assign (new_var, vector_var);
829 new_temp = make_ssa_name (new_var, init_stmt);
830 gimple_assign_set_lhs (init_stmt, new_temp);
833 vect_finish_stmt_generation (stmt, init_stmt, gsi);
836 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
840 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
842 if (nested_in_vect_loop_p (loop, stmt))
845 pe = loop_preheader_edge (loop);
846 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
847 gcc_assert (!new_bb);
851 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
853 gimple_stmt_iterator gsi_bb_start;
855 gcc_assert (bb_vinfo);
856 bb = BB_VINFO_BB (bb_vinfo);
857 gsi_bb_start = gsi_after_labels (bb);
858 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
862 if (vect_print_dump_info (REPORT_DETAILS))
864 fprintf (vect_dump, "created new init_stmt: ");
865 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
868 vec_oprnd = gimple_assign_lhs (init_stmt);
873 /* Function vect_get_vec_def_for_operand.
875 OP is an operand in STMT. This function returns a (vector) def that will be
876 used in the vectorized stmt for STMT.
878 In the case that OP is an SSA_NAME which is defined in the loop, then
879 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
881 In case OP is an invariant or constant, a new stmt that creates a vector def
882 needs to be introduced. */
885 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
890 stmt_vec_info def_stmt_info = NULL;
891 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
892 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
893 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
894 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
900 enum vect_def_type dt;
904 if (vect_print_dump_info (REPORT_DETAILS))
906 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
907 print_generic_expr (vect_dump, op, TDF_SLIM);
910 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
912 gcc_assert (is_simple_use);
913 if (vect_print_dump_info (REPORT_DETAILS))
917 fprintf (vect_dump, "def = ");
918 print_generic_expr (vect_dump, def, TDF_SLIM);
922 fprintf (vect_dump, " def_stmt = ");
923 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
929 /* Case 1: operand is a constant. */
930 case vect_constant_def:
932 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
933 gcc_assert (vector_type);
938 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
939 if (vect_print_dump_info (REPORT_DETAILS))
940 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
942 for (i = nunits - 1; i >= 0; --i)
944 t = tree_cons (NULL_TREE, op, t);
946 vec_cst = build_vector (vector_type, t);
947 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
950 /* Case 2: operand is defined outside the loop - loop invariant. */
951 case vect_external_def:
953 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
954 gcc_assert (vector_type);
955 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
960 /* Create 'vec_inv = {inv,inv,..,inv}' */
961 if (vect_print_dump_info (REPORT_DETAILS))
962 fprintf (vect_dump, "Create vector_inv.");
964 for (i = nunits - 1; i >= 0; --i)
966 t = tree_cons (NULL_TREE, def, t);
969 /* FIXME: use build_constructor directly. */
970 vec_inv = build_constructor_from_list (vector_type, t);
971 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
974 /* Case 3: operand is defined inside the loop. */
975 case vect_internal_def:
978 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
980 /* Get the def from the vectorized stmt. */
981 def_stmt_info = vinfo_for_stmt (def_stmt);
982 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
983 gcc_assert (vec_stmt);
984 if (gimple_code (vec_stmt) == GIMPLE_PHI)
985 vec_oprnd = PHI_RESULT (vec_stmt);
986 else if (is_gimple_call (vec_stmt))
987 vec_oprnd = gimple_call_lhs (vec_stmt);
989 vec_oprnd = gimple_assign_lhs (vec_stmt);
993 /* Case 4: operand is defined by a loop header phi - reduction */
994 case vect_reduction_def:
995 case vect_double_reduction_def:
996 case vect_nested_cycle:
1000 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1001 loop = (gimple_bb (def_stmt))->loop_father;
1003 /* Get the def before the loop */
1004 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1005 return get_initial_def_for_reduction (stmt, op, scalar_def);
1008 /* Case 5: operand is defined by loop-header phi - induction. */
1009 case vect_induction_def:
1011 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1013 /* Get the def from the vectorized stmt. */
1014 def_stmt_info = vinfo_for_stmt (def_stmt);
1015 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1016 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1017 vec_oprnd = PHI_RESULT (vec_stmt);
1027 /* Function vect_get_vec_def_for_stmt_copy
1029 Return a vector-def for an operand. This function is used when the
1030 vectorized stmt to be created (by the caller to this function) is a "copy"
1031 created in case the vectorized result cannot fit in one vector, and several
1032 copies of the vector-stmt are required. In this case the vector-def is
1033 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1034 of the stmt that defines VEC_OPRND.
1035 DT is the type of the vector def VEC_OPRND.
1038 In case the vectorization factor (VF) is bigger than the number
1039 of elements that can fit in a vectype (nunits), we have to generate
1040 more than one vector stmt to vectorize the scalar stmt. This situation
1041 arises when there are multiple data-types operated upon in the loop; the
1042 smallest data-type determines the VF, and as a result, when vectorizing
1043 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1044 vector stmt (each computing a vector of 'nunits' results, and together
1045 computing 'VF' results in each iteration). This function is called when
1046 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1047 which VF=16 and nunits=4, so the number of copies required is 4):
1049 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1051 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1052 VS1.1: vx.1 = memref1 VS1.2
1053 VS1.2: vx.2 = memref2 VS1.3
1054 VS1.3: vx.3 = memref3
1056 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1057 VSnew.1: vz1 = vx.1 + ... VSnew.2
1058 VSnew.2: vz2 = vx.2 + ... VSnew.3
1059 VSnew.3: vz3 = vx.3 + ...
1061 The vectorization of S1 is explained in vectorizable_load.
1062 The vectorization of S2:
1063 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1064 the function 'vect_get_vec_def_for_operand' is called to
1065 get the relevant vector-def for each operand of S2. For operand x it
1066 returns the vector-def 'vx.0'.
1068 To create the remaining copies of the vector-stmt (VSnew.j), this
1069 function is called to get the relevant vector-def for each operand. It is
1070 obtained from the respective VS1.j stmt, which is recorded in the
1071 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1073 For example, to obtain the vector-def 'vx.1' in order to create the
1074 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1075 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1076 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1077 and return its def ('vx.1').
1078 Overall, to create the above sequence this function will be called 3 times:
1079 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1080 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1081 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1084 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1086 gimple vec_stmt_for_operand;
1087 stmt_vec_info def_stmt_info;
1089 /* Do nothing; can reuse same def. */
1090 if (dt == vect_external_def || dt == vect_constant_def )
1093 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1094 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1095 gcc_assert (def_stmt_info);
1096 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1097 gcc_assert (vec_stmt_for_operand);
1098 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1099 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1100 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1102 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1107 /* Get vectorized definitions for the operands to create a copy of an original
1108 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1111 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1112 VEC(tree,heap) **vec_oprnds0,
1113 VEC(tree,heap) **vec_oprnds1)
1115 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1117 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1118 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1120 if (vec_oprnds1 && *vec_oprnds1)
1122 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1123 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1124 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1129 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1132 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1133 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1137 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1);
1142 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1143 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1144 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1148 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1149 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1150 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1156 /* Function vect_finish_stmt_generation.
1158 Insert a new stmt. */
1161 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1162 gimple_stmt_iterator *gsi)
1164 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1165 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1166 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1168 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1170 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1172 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1175 if (vect_print_dump_info (REPORT_DETAILS))
1177 fprintf (vect_dump, "add new stmt: ");
1178 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1181 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1184 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1185 a function declaration if the target has a vectorized version
1186 of the function, or NULL_TREE if the function cannot be vectorized. */
1189 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1191 tree fndecl = gimple_call_fndecl (call);
1193 /* We only handle functions that do not read or clobber memory -- i.e.
1194 const or novops ones. */
1195 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1199 || TREE_CODE (fndecl) != FUNCTION_DECL
1200 || !DECL_BUILT_IN (fndecl))
1203 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1207 /* Function vectorizable_call.
1209 Check if STMT performs a function call that can be vectorized.
1210 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1211 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1212 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1215 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1220 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1221 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1222 tree vectype_out, vectype_in;
1225 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1226 tree fndecl, new_temp, def, rhs_type;
1228 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1229 gimple new_stmt = NULL;
1231 VEC(tree, heap) *vargs = NULL;
1232 enum { NARROW, NONE, WIDEN } modifier;
1235 /* FORNOW: unsupported in basic block SLP. */
1236 gcc_assert (loop_vinfo);
1238 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1241 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1244 /* FORNOW: SLP not supported. */
1245 if (STMT_SLP_TYPE (stmt_info))
1248 /* Is STMT a vectorizable call? */
1249 if (!is_gimple_call (stmt))
1252 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1255 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1257 /* Process function arguments. */
1258 rhs_type = NULL_TREE;
1259 vectype_in = NULL_TREE;
1260 nargs = gimple_call_num_args (stmt);
1262 /* Bail out if the function has more than two arguments, we
1263 do not have interesting builtin functions to vectorize with
1264 more than two arguments. No arguments is also not good. */
1265 if (nargs == 0 || nargs > 2)
1268 for (i = 0; i < nargs; i++)
1272 op = gimple_call_arg (stmt, i);
1274 /* We can only handle calls with arguments of the same type. */
1276 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1278 if (vect_print_dump_info (REPORT_DETAILS))
1279 fprintf (vect_dump, "argument types differ.");
1283 rhs_type = TREE_TYPE (op);
1285 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1286 &def_stmt, &def, &dt[i], &opvectype))
1288 if (vect_print_dump_info (REPORT_DETAILS))
1289 fprintf (vect_dump, "use not simple.");
1294 vectype_in = opvectype;
1296 && opvectype != vectype_in)
1298 if (vect_print_dump_info (REPORT_DETAILS))
1299 fprintf (vect_dump, "argument vector types differ.");
1303 /* If all arguments are external or constant defs use a vector type with
1304 the same size as the output vector type. */
1306 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1309 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1310 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1311 if (nunits_in == nunits_out / 2)
1313 else if (nunits_out == nunits_in)
1315 else if (nunits_out == nunits_in / 2)
1320 /* For now, we only vectorize functions if a target specific builtin
1321 is available. TODO -- in some cases, it might be profitable to
1322 insert the calls for pieces of the vector, in order to be able
1323 to vectorize other operations in the loop. */
1324 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1325 if (fndecl == NULL_TREE)
1327 if (vect_print_dump_info (REPORT_DETAILS))
1328 fprintf (vect_dump, "function is not vectorizable.");
1333 gcc_assert (!gimple_vuse (stmt));
1335 if (modifier == NARROW)
1336 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1338 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1340 /* Sanity check: make sure that at least one copy of the vectorized stmt
1341 needs to be generated. */
1342 gcc_assert (ncopies >= 1);
1344 if (!vec_stmt) /* transformation not required. */
1346 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1347 if (vect_print_dump_info (REPORT_DETAILS))
1348 fprintf (vect_dump, "=== vectorizable_call ===");
1349 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1355 if (vect_print_dump_info (REPORT_DETAILS))
1356 fprintf (vect_dump, "transform operation.");
1359 scalar_dest = gimple_call_lhs (stmt);
1360 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1362 prev_stmt_info = NULL;
1366 for (j = 0; j < ncopies; ++j)
1368 /* Build argument list for the vectorized call. */
1370 vargs = VEC_alloc (tree, heap, nargs);
1372 VEC_truncate (tree, vargs, 0);
1374 for (i = 0; i < nargs; i++)
1376 op = gimple_call_arg (stmt, i);
1379 = vect_get_vec_def_for_operand (op, stmt, NULL);
1382 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1384 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1387 VEC_quick_push (tree, vargs, vec_oprnd0);
1390 new_stmt = gimple_build_call_vec (fndecl, vargs);
1391 new_temp = make_ssa_name (vec_dest, new_stmt);
1392 gimple_call_set_lhs (new_stmt, new_temp);
1394 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1395 mark_symbols_for_renaming (new_stmt);
1398 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1400 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1402 prev_stmt_info = vinfo_for_stmt (new_stmt);
1408 for (j = 0; j < ncopies; ++j)
1410 /* Build argument list for the vectorized call. */
1412 vargs = VEC_alloc (tree, heap, nargs * 2);
1414 VEC_truncate (tree, vargs, 0);
1416 for (i = 0; i < nargs; i++)
1418 op = gimple_call_arg (stmt, i);
1422 = vect_get_vec_def_for_operand (op, stmt, NULL);
1424 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1428 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1430 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1432 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1435 VEC_quick_push (tree, vargs, vec_oprnd0);
1436 VEC_quick_push (tree, vargs, vec_oprnd1);
1439 new_stmt = gimple_build_call_vec (fndecl, vargs);
1440 new_temp = make_ssa_name (vec_dest, new_stmt);
1441 gimple_call_set_lhs (new_stmt, new_temp);
1443 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1444 mark_symbols_for_renaming (new_stmt);
1447 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1449 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1451 prev_stmt_info = vinfo_for_stmt (new_stmt);
1454 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1459 /* No current target implements this case. */
1463 VEC_free (tree, heap, vargs);
1465 /* Update the exception handling table with the vector stmt if necessary. */
1466 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1467 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1469 /* The call in STMT might prevent it from being removed in dce.
1470 We however cannot remove it here, due to the way the ssa name
1471 it defines is mapped to the new definition. So just replace
1472 rhs of the statement with something harmless. */
1474 type = TREE_TYPE (scalar_dest);
1475 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1476 fold_convert (type, integer_zero_node));
1477 set_vinfo_for_stmt (new_stmt, stmt_info);
1478 set_vinfo_for_stmt (stmt, NULL);
1479 STMT_VINFO_STMT (stmt_info) = new_stmt;
1480 gsi_replace (gsi, new_stmt, false);
1481 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1487 /* Function vect_gen_widened_results_half
1489 Create a vector stmt whose code, type, number of arguments, and result
1490 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1491 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1492 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1493 needs to be created (DECL is a function-decl of a target-builtin).
1494 STMT is the original scalar stmt that we are vectorizing. */
1497 vect_gen_widened_results_half (enum tree_code code,
1499 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1500 tree vec_dest, gimple_stmt_iterator *gsi,
1506 /* Generate half of the widened result: */
1507 if (code == CALL_EXPR)
1509 /* Target specific support */
1510 if (op_type == binary_op)
1511 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1513 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1514 new_temp = make_ssa_name (vec_dest, new_stmt);
1515 gimple_call_set_lhs (new_stmt, new_temp);
1519 /* Generic support */
1520 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1521 if (op_type != binary_op)
1523 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1525 new_temp = make_ssa_name (vec_dest, new_stmt);
1526 gimple_assign_set_lhs (new_stmt, new_temp);
1528 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1534 /* Check if STMT performs a conversion operation, that can be vectorized.
1535 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1536 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1537 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1540 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1541 gimple *vec_stmt, slp_tree slp_node)
1546 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1547 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1548 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1549 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1550 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1554 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1555 gimple new_stmt = NULL;
1556 stmt_vec_info prev_stmt_info;
1559 tree vectype_out, vectype_in;
1563 enum { NARROW, NONE, WIDEN } modifier;
1565 VEC(tree,heap) *vec_oprnds0 = NULL;
1568 VEC(tree,heap) *dummy = NULL;
1571 /* Is STMT a vectorizable conversion? */
1573 /* FORNOW: unsupported in basic block SLP. */
1574 gcc_assert (loop_vinfo);
1576 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1579 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1582 if (!is_gimple_assign (stmt))
1585 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1588 code = gimple_assign_rhs_code (stmt);
1589 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1592 /* Check types of lhs and rhs. */
1593 scalar_dest = gimple_assign_lhs (stmt);
1594 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1596 op0 = gimple_assign_rhs1 (stmt);
1597 rhs_type = TREE_TYPE (op0);
1598 /* Check the operands of the operation. */
1599 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1600 &def_stmt, &def, &dt[0], &vectype_in))
1602 if (vect_print_dump_info (REPORT_DETAILS))
1603 fprintf (vect_dump, "use not simple.");
1606 /* If op0 is an external or constant defs use a vector type of
1607 the same size as the output vector type. */
1609 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1612 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1613 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1614 if (nunits_in == nunits_out / 2)
1616 else if (nunits_out == nunits_in)
1618 else if (nunits_out == nunits_in / 2)
1623 integral_type = INTEGRAL_TYPE_P (rhs_type) ? vectype_in : vectype_out;
1625 if (modifier == NARROW)
1626 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1628 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1630 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1631 this, so we can safely override NCOPIES with 1 here. */
1635 /* Sanity check: make sure that at least one copy of the vectorized stmt
1636 needs to be generated. */
1637 gcc_assert (ncopies >= 1);
1639 /* Supportable by target? */
1640 if ((modifier == NONE
1641 && !targetm.vectorize.builtin_conversion (code, integral_type))
1642 || (modifier == WIDEN
1643 && !supportable_widening_operation (code, stmt,
1644 vectype_out, vectype_in,
1647 &dummy_int, &dummy))
1648 || (modifier == NARROW
1649 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1650 &code1, &dummy_int, &dummy)))
1652 if (vect_print_dump_info (REPORT_DETAILS))
1653 fprintf (vect_dump, "conversion not supported by target.");
1657 if (modifier != NONE)
1659 /* FORNOW: SLP not supported. */
1660 if (STMT_SLP_TYPE (stmt_info))
1664 if (!vec_stmt) /* transformation not required. */
1666 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1671 if (vect_print_dump_info (REPORT_DETAILS))
1672 fprintf (vect_dump, "transform conversion.");
1675 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1677 if (modifier == NONE && !slp_node)
1678 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1680 prev_stmt_info = NULL;
1684 for (j = 0; j < ncopies; j++)
1687 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1689 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1692 targetm.vectorize.builtin_conversion (code, integral_type);
1693 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
1695 /* Arguments are ready. create the new vector stmt. */
1696 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1697 new_temp = make_ssa_name (vec_dest, new_stmt);
1698 gimple_call_set_lhs (new_stmt, new_temp);
1699 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1701 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1705 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1707 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1708 prev_stmt_info = vinfo_for_stmt (new_stmt);
1713 /* In case the vectorization factor (VF) is bigger than the number
1714 of elements that we can fit in a vectype (nunits), we have to
1715 generate more than one vector stmt - i.e - we need to "unroll"
1716 the vector stmt by a factor VF/nunits. */
1717 for (j = 0; j < ncopies; j++)
1720 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1722 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1724 /* Generate first half of the widened result: */
1726 = vect_gen_widened_results_half (code1, decl1,
1727 vec_oprnd0, vec_oprnd1,
1728 unary_op, vec_dest, gsi, stmt);
1730 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1732 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1733 prev_stmt_info = vinfo_for_stmt (new_stmt);
1735 /* Generate second half of the widened result: */
1737 = vect_gen_widened_results_half (code2, decl2,
1738 vec_oprnd0, vec_oprnd1,
1739 unary_op, vec_dest, gsi, stmt);
1740 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1741 prev_stmt_info = vinfo_for_stmt (new_stmt);
1746 /* In case the vectorization factor (VF) is bigger than the number
1747 of elements that we can fit in a vectype (nunits), we have to
1748 generate more than one vector stmt - i.e - we need to "unroll"
1749 the vector stmt by a factor VF/nunits. */
1750 for (j = 0; j < ncopies; j++)
1755 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1756 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1760 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1761 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1764 /* Arguments are ready. Create the new vector stmt. */
1765 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1767 new_temp = make_ssa_name (vec_dest, new_stmt);
1768 gimple_assign_set_lhs (new_stmt, new_temp);
1769 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1772 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1774 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1776 prev_stmt_info = vinfo_for_stmt (new_stmt);
1779 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1783 VEC_free (tree, heap, vec_oprnds0);
1787 /* Function vectorizable_assignment.
1789 Check if STMT performs an assignment (copy) that can be vectorized.
1790 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1791 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1792 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1795 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1796 gimple *vec_stmt, slp_tree slp_node)
1801 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1802 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1803 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1807 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1808 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1811 VEC(tree,heap) *vec_oprnds = NULL;
1813 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1814 gimple new_stmt = NULL;
1815 stmt_vec_info prev_stmt_info = NULL;
1817 /* Multiple types in SLP are handled by creating the appropriate number of
1818 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1823 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1825 gcc_assert (ncopies >= 1);
1827 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1830 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1833 /* Is vectorizable assignment? */
1834 if (!is_gimple_assign (stmt))
1837 scalar_dest = gimple_assign_lhs (stmt);
1838 if (TREE_CODE (scalar_dest) != SSA_NAME)
1841 if (gimple_assign_single_p (stmt)
1842 || gimple_assign_rhs_code (stmt) == PAREN_EXPR)
1843 op = gimple_assign_rhs1 (stmt);
1847 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
1849 if (vect_print_dump_info (REPORT_DETAILS))
1850 fprintf (vect_dump, "use not simple.");
1854 if (!vec_stmt) /* transformation not required. */
1856 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1857 if (vect_print_dump_info (REPORT_DETAILS))
1858 fprintf (vect_dump, "=== vectorizable_assignment ===");
1859 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1864 if (vect_print_dump_info (REPORT_DETAILS))
1865 fprintf (vect_dump, "transform assignment.");
1868 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1871 for (j = 0; j < ncopies; j++)
1875 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
1877 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
1879 /* Arguments are ready. create the new vector stmt. */
1880 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
1882 new_stmt = gimple_build_assign (vec_dest, vop);
1883 new_temp = make_ssa_name (vec_dest, new_stmt);
1884 gimple_assign_set_lhs (new_stmt, new_temp);
1885 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1887 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1894 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1896 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1898 prev_stmt_info = vinfo_for_stmt (new_stmt);
1901 VEC_free (tree, heap, vec_oprnds);
1905 /* Function vectorizable_operation.
1907 Check if STMT performs a binary or unary operation that can be vectorized.
1908 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1909 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1910 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1913 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
1914 gimple *vec_stmt, slp_tree slp_node)
1918 tree op0, op1 = NULL;
1919 tree vec_oprnd1 = NULL_TREE;
1920 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1922 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1923 enum tree_code code;
1924 enum machine_mode vec_mode;
1929 enum machine_mode optab_op2_mode;
1932 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1933 gimple new_stmt = NULL;
1934 stmt_vec_info prev_stmt_info;
1940 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
1943 bool scalar_shift_arg = false;
1944 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1947 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1950 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1953 /* Is STMT a vectorizable binary/unary operation? */
1954 if (!is_gimple_assign (stmt))
1957 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1960 code = gimple_assign_rhs_code (stmt);
1962 /* For pointer addition, we should use the normal plus for
1963 the vector addition. */
1964 if (code == POINTER_PLUS_EXPR)
1967 /* Support only unary or binary operations. */
1968 op_type = TREE_CODE_LENGTH (code);
1969 if (op_type != unary_op && op_type != binary_op)
1971 if (vect_print_dump_info (REPORT_DETAILS))
1972 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
1976 scalar_dest = gimple_assign_lhs (stmt);
1977 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1979 op0 = gimple_assign_rhs1 (stmt);
1980 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
1981 &def_stmt, &def, &dt[0], &vectype))
1983 if (vect_print_dump_info (REPORT_DETAILS))
1984 fprintf (vect_dump, "use not simple.");
1987 /* If op0 is an external or constant def use a vector type with
1988 the same size as the output vector type. */
1990 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
1991 gcc_assert (vectype);
1993 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1994 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
1995 if (nunits_out != nunits_in)
1998 if (op_type == binary_op)
2000 op1 = gimple_assign_rhs2 (stmt);
2001 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2004 if (vect_print_dump_info (REPORT_DETAILS))
2005 fprintf (vect_dump, "use not simple.");
2011 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2015 /* Multiple types in SLP are handled by creating the appropriate number of
2016 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2021 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2023 gcc_assert (ncopies >= 1);
2025 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2026 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2028 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2029 || code == RROTATE_EXPR)
2031 /* vector shifted by vector */
2032 if (dt[1] == vect_internal_def)
2034 optab = optab_for_tree_code (code, vectype, optab_vector);
2035 if (vect_print_dump_info (REPORT_DETAILS))
2036 fprintf (vect_dump, "vector/vector shift/rotate found.");
2039 /* See if the machine has a vector shifted by scalar insn and if not
2040 then see if it has a vector shifted by vector insn */
2041 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2043 optab = optab_for_tree_code (code, vectype, optab_scalar);
2045 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2046 != CODE_FOR_nothing))
2048 scalar_shift_arg = true;
2049 if (vect_print_dump_info (REPORT_DETAILS))
2050 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2054 optab = optab_for_tree_code (code, vectype, optab_vector);
2056 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2057 != CODE_FOR_nothing))
2059 if (vect_print_dump_info (REPORT_DETAILS))
2060 fprintf (vect_dump, "vector/vector shift/rotate found.");
2062 /* Unlike the other binary operators, shifts/rotates have
2063 the rhs being int, instead of the same type as the lhs,
2064 so make sure the scalar is the right type if we are
2065 dealing with vectors of short/char. */
2066 if (dt[1] == vect_constant_def)
2067 op1 = fold_convert (TREE_TYPE (vectype), op1);
2074 if (vect_print_dump_info (REPORT_DETAILS))
2075 fprintf (vect_dump, "operand mode requires invariant argument.");
2080 optab = optab_for_tree_code (code, vectype, optab_default);
2082 /* Supportable by target? */
2085 if (vect_print_dump_info (REPORT_DETAILS))
2086 fprintf (vect_dump, "no optab.");
2089 vec_mode = TYPE_MODE (vectype);
2090 icode = (int) optab_handler (optab, vec_mode)->insn_code;
2091 if (icode == CODE_FOR_nothing)
2093 if (vect_print_dump_info (REPORT_DETAILS))
2094 fprintf (vect_dump, "op not supported by target.");
2095 /* Check only during analysis. */
2096 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2097 || (vf < vect_min_worthwhile_factor (code)
2100 if (vect_print_dump_info (REPORT_DETAILS))
2101 fprintf (vect_dump, "proceeding using word mode.");
2104 /* Worthwhile without SIMD support? Check only during analysis. */
2105 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2106 && vf < vect_min_worthwhile_factor (code)
2109 if (vect_print_dump_info (REPORT_DETAILS))
2110 fprintf (vect_dump, "not worthwhile without SIMD support.");
2114 if (!vec_stmt) /* transformation not required. */
2116 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2117 if (vect_print_dump_info (REPORT_DETAILS))
2118 fprintf (vect_dump, "=== vectorizable_operation ===");
2119 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2125 if (vect_print_dump_info (REPORT_DETAILS))
2126 fprintf (vect_dump, "transform binary/unary operation.");
2129 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2131 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2132 created in the previous stages of the recursion, so no allocation is
2133 needed, except for the case of shift with scalar shift argument. In that
2134 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2135 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2136 In case of loop-based vectorization we allocate VECs of size 1. We
2137 allocate VEC_OPRNDS1 only in case of binary operation. */
2140 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2141 if (op_type == binary_op)
2142 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2144 else if (scalar_shift_arg)
2145 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2147 /* In case the vectorization factor (VF) is bigger than the number
2148 of elements that we can fit in a vectype (nunits), we have to generate
2149 more than one vector stmt - i.e - we need to "unroll" the
2150 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2151 from one copy of the vector stmt to the next, in the field
2152 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2153 stages to find the correct vector defs to be used when vectorizing
2154 stmts that use the defs of the current stmt. The example below illustrates
2155 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2156 4 vectorized stmts):
2158 before vectorization:
2159 RELATED_STMT VEC_STMT
2163 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2165 RELATED_STMT VEC_STMT
2166 VS1_0: vx0 = memref0 VS1_1 -
2167 VS1_1: vx1 = memref1 VS1_2 -
2168 VS1_2: vx2 = memref2 VS1_3 -
2169 VS1_3: vx3 = memref3 - -
2170 S1: x = load - VS1_0
2173 step2: vectorize stmt S2 (done here):
2174 To vectorize stmt S2 we first need to find the relevant vector
2175 def for the first operand 'x'. This is, as usual, obtained from
2176 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2177 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2178 relevant vector def 'vx0'. Having found 'vx0' we can generate
2179 the vector stmt VS2_0, and as usual, record it in the
2180 STMT_VINFO_VEC_STMT of stmt S2.
2181 When creating the second copy (VS2_1), we obtain the relevant vector
2182 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2183 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2184 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2185 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2186 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2187 chain of stmts and pointers:
2188 RELATED_STMT VEC_STMT
2189 VS1_0: vx0 = memref0 VS1_1 -
2190 VS1_1: vx1 = memref1 VS1_2 -
2191 VS1_2: vx2 = memref2 VS1_3 -
2192 VS1_3: vx3 = memref3 - -
2193 S1: x = load - VS1_0
2194 VS2_0: vz0 = vx0 + v1 VS2_1 -
2195 VS2_1: vz1 = vx1 + v1 VS2_2 -
2196 VS2_2: vz2 = vx2 + v1 VS2_3 -
2197 VS2_3: vz3 = vx3 + v1 - -
2198 S2: z = x + 1 - VS2_0 */
2200 prev_stmt_info = NULL;
2201 for (j = 0; j < ncopies; j++)
2206 if (op_type == binary_op && scalar_shift_arg)
2208 /* Vector shl and shr insn patterns can be defined with scalar
2209 operand 2 (shift operand). In this case, use constant or loop
2210 invariant op1 directly, without extending it to vector mode
2212 optab_op2_mode = insn_data[icode].operand[2].mode;
2213 if (!VECTOR_MODE_P (optab_op2_mode))
2215 if (vect_print_dump_info (REPORT_DETAILS))
2216 fprintf (vect_dump, "operand 1 using scalar mode.");
2218 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2221 /* Store vec_oprnd1 for every vector stmt to be created
2222 for SLP_NODE. We check during the analysis that all the
2223 shift arguments are the same.
2224 TODO: Allow different constants for different vector
2225 stmts generated for an SLP instance. */
2226 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2227 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2232 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2233 (a special case for certain kind of vector shifts); otherwise,
2234 operand 1 should be of a vector type (the usual case). */
2235 if (op_type == binary_op && !vec_oprnd1)
2236 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2239 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2243 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2245 /* Arguments are ready. Create the new vector stmt. */
2246 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
2248 vop1 = ((op_type == binary_op)
2249 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2250 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2251 new_temp = make_ssa_name (vec_dest, new_stmt);
2252 gimple_assign_set_lhs (new_stmt, new_temp);
2253 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2255 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2262 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2264 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2265 prev_stmt_info = vinfo_for_stmt (new_stmt);
2268 VEC_free (tree, heap, vec_oprnds0);
2270 VEC_free (tree, heap, vec_oprnds1);
2276 /* Get vectorized definitions for loop-based vectorization. For the first
2277 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2278 scalar operand), and for the rest we get a copy with
2279 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2280 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2281 The vectors are collected into VEC_OPRNDS. */
2284 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2285 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2289 /* Get first vector operand. */
2290 /* All the vector operands except the very first one (that is scalar oprnd)
2292 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2293 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2295 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2297 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2299 /* Get second vector operand. */
2300 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2301 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2305 /* For conversion in multiple steps, continue to get operands
2308 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2312 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2313 For multi-step conversions store the resulting vectors and call the function
2317 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2318 int multi_step_cvt, gimple stmt,
2319 VEC (tree, heap) *vec_dsts,
2320 gimple_stmt_iterator *gsi,
2321 slp_tree slp_node, enum tree_code code,
2322 stmt_vec_info *prev_stmt_info)
2325 tree vop0, vop1, new_tmp, vec_dest;
2327 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2329 vec_dest = VEC_pop (tree, vec_dsts);
2331 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2333 /* Create demotion operation. */
2334 vop0 = VEC_index (tree, *vec_oprnds, i);
2335 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2336 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2337 new_tmp = make_ssa_name (vec_dest, new_stmt);
2338 gimple_assign_set_lhs (new_stmt, new_tmp);
2339 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2342 /* Store the resulting vector for next recursive call. */
2343 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2346 /* This is the last step of the conversion sequence. Store the
2347 vectors in SLP_NODE or in vector info of the scalar statement
2348 (or in STMT_VINFO_RELATED_STMT chain). */
2350 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2353 if (!*prev_stmt_info)
2354 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2356 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2358 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2363 /* For multi-step demotion operations we first generate demotion operations
2364 from the source type to the intermediate types, and then combine the
2365 results (stored in VEC_OPRNDS) in demotion operation to the destination
2369 /* At each level of recursion we have have of the operands we had at the
2371 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2372 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2373 stmt, vec_dsts, gsi, slp_node,
2374 code, prev_stmt_info);
2379 /* Function vectorizable_type_demotion
2381 Check if STMT performs a binary or unary operation that involves
2382 type demotion, and if it can be vectorized.
2383 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2384 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2385 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2388 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2389 gimple *vec_stmt, slp_tree slp_node)
2394 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2395 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2396 enum tree_code code, code1 = ERROR_MARK;
2399 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2400 stmt_vec_info prev_stmt_info;
2407 int multi_step_cvt = 0;
2408 VEC (tree, heap) *vec_oprnds0 = NULL;
2409 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2410 tree last_oprnd, intermediate_type;
2412 /* FORNOW: not supported by basic block SLP vectorization. */
2413 gcc_assert (loop_vinfo);
2415 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2418 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2421 /* Is STMT a vectorizable type-demotion operation? */
2422 if (!is_gimple_assign (stmt))
2425 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2428 code = gimple_assign_rhs_code (stmt);
2429 if (!CONVERT_EXPR_CODE_P (code))
2432 scalar_dest = gimple_assign_lhs (stmt);
2433 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2435 /* Check the operands of the operation. */
2436 op0 = gimple_assign_rhs1 (stmt);
2437 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2438 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2439 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2440 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2441 && CONVERT_EXPR_CODE_P (code))))
2443 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2444 &def_stmt, &def, &dt[0], &vectype_in))
2446 if (vect_print_dump_info (REPORT_DETAILS))
2447 fprintf (vect_dump, "use not simple.");
2450 /* If op0 is an external def use a vector type with the
2451 same size as the output vector type if possible. */
2453 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2457 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2458 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2459 if (nunits_in >= nunits_out)
2462 /* Multiple types in SLP are handled by creating the appropriate number of
2463 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2468 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2469 gcc_assert (ncopies >= 1);
2471 /* Supportable by target? */
2472 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2473 &code1, &multi_step_cvt, &interm_types))
2476 if (!vec_stmt) /* transformation not required. */
2478 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2479 if (vect_print_dump_info (REPORT_DETAILS))
2480 fprintf (vect_dump, "=== vectorizable_demotion ===");
2481 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2486 if (vect_print_dump_info (REPORT_DETAILS))
2487 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2490 /* In case of multi-step demotion, we first generate demotion operations to
2491 the intermediate types, and then from that types to the final one.
2492 We create vector destinations for the intermediate type (TYPES) received
2493 from supportable_narrowing_operation, and store them in the correct order
2494 for future use in vect_create_vectorized_demotion_stmts(). */
2496 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2498 vec_dsts = VEC_alloc (tree, heap, 1);
2500 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2501 VEC_quick_push (tree, vec_dsts, vec_dest);
2505 for (i = VEC_length (tree, interm_types) - 1;
2506 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2508 vec_dest = vect_create_destination_var (scalar_dest,
2510 VEC_quick_push (tree, vec_dsts, vec_dest);
2514 /* In case the vectorization factor (VF) is bigger than the number
2515 of elements that we can fit in a vectype (nunits), we have to generate
2516 more than one vector stmt - i.e - we need to "unroll" the
2517 vector stmt by a factor VF/nunits. */
2519 prev_stmt_info = NULL;
2520 for (j = 0; j < ncopies; j++)
2524 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL);
2527 VEC_free (tree, heap, vec_oprnds0);
2528 vec_oprnds0 = VEC_alloc (tree, heap,
2529 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2530 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2531 vect_pow2 (multi_step_cvt) - 1);
2534 /* Arguments are ready. Create the new vector stmts. */
2535 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2536 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2537 multi_step_cvt, stmt, tmp_vec_dsts,
2538 gsi, slp_node, code1,
2542 VEC_free (tree, heap, vec_oprnds0);
2543 VEC_free (tree, heap, vec_dsts);
2544 VEC_free (tree, heap, tmp_vec_dsts);
2545 VEC_free (tree, heap, interm_types);
2547 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2552 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2553 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2554 the resulting vectors and call the function recursively. */
2557 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2558 VEC (tree, heap) **vec_oprnds1,
2559 int multi_step_cvt, gimple stmt,
2560 VEC (tree, heap) *vec_dsts,
2561 gimple_stmt_iterator *gsi,
2562 slp_tree slp_node, enum tree_code code1,
2563 enum tree_code code2, tree decl1,
2564 tree decl2, int op_type,
2565 stmt_vec_info *prev_stmt_info)
2568 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2569 gimple new_stmt1, new_stmt2;
2570 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2571 VEC (tree, heap) *vec_tmp;
2573 vec_dest = VEC_pop (tree, vec_dsts);
2574 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2576 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
2578 if (op_type == binary_op)
2579 vop1 = VEC_index (tree, *vec_oprnds1, i);
2583 /* Generate the two halves of promotion operation. */
2584 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2585 op_type, vec_dest, gsi, stmt);
2586 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2587 op_type, vec_dest, gsi, stmt);
2588 if (is_gimple_call (new_stmt1))
2590 new_tmp1 = gimple_call_lhs (new_stmt1);
2591 new_tmp2 = gimple_call_lhs (new_stmt2);
2595 new_tmp1 = gimple_assign_lhs (new_stmt1);
2596 new_tmp2 = gimple_assign_lhs (new_stmt2);
2601 /* Store the results for the recursive call. */
2602 VEC_quick_push (tree, vec_tmp, new_tmp1);
2603 VEC_quick_push (tree, vec_tmp, new_tmp2);
2607 /* Last step of promotion sequience - store the results. */
2610 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2611 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2615 if (!*prev_stmt_info)
2616 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2618 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2620 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2621 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2622 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2629 /* For multi-step promotion operation we first generate we call the
2630 function recurcively for every stage. We start from the input type,
2631 create promotion operations to the intermediate types, and then
2632 create promotions to the output type. */
2633 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2634 VEC_free (tree, heap, vec_tmp);
2635 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2636 multi_step_cvt - 1, stmt,
2637 vec_dsts, gsi, slp_node, code1,
2638 code2, decl2, decl2, op_type,
2644 /* Function vectorizable_type_promotion
2646 Check if STMT performs a binary or unary operation that involves
2647 type promotion, and if it can be vectorized.
2648 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2649 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2650 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2653 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2654 gimple *vec_stmt, slp_tree slp_node)
2658 tree op0, op1 = NULL;
2659 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2660 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2661 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2662 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2663 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
2667 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2668 stmt_vec_info prev_stmt_info;
2675 tree intermediate_type = NULL_TREE;
2676 int multi_step_cvt = 0;
2677 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2678 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2680 /* FORNOW: not supported by basic block SLP vectorization. */
2681 gcc_assert (loop_vinfo);
2683 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2686 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2689 /* Is STMT a vectorizable type-promotion operation? */
2690 if (!is_gimple_assign (stmt))
2693 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2696 code = gimple_assign_rhs_code (stmt);
2697 if (!CONVERT_EXPR_CODE_P (code)
2698 && code != WIDEN_MULT_EXPR)
2701 scalar_dest = gimple_assign_lhs (stmt);
2702 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2704 /* Check the operands of the operation. */
2705 op0 = gimple_assign_rhs1 (stmt);
2706 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2707 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2708 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2709 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2710 && CONVERT_EXPR_CODE_P (code))))
2712 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2713 &def_stmt, &def, &dt[0], &vectype_in))
2715 if (vect_print_dump_info (REPORT_DETAILS))
2716 fprintf (vect_dump, "use not simple.");
2719 /* If op0 is an external or constant def use a vector type with
2720 the same size as the output vector type. */
2722 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2726 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2727 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2728 if (nunits_in <= nunits_out)
2731 /* Multiple types in SLP are handled by creating the appropriate number of
2732 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2737 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2739 gcc_assert (ncopies >= 1);
2741 op_type = TREE_CODE_LENGTH (code);
2742 if (op_type == binary_op)
2744 op1 = gimple_assign_rhs2 (stmt);
2745 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
2747 if (vect_print_dump_info (REPORT_DETAILS))
2748 fprintf (vect_dump, "use not simple.");
2753 /* Supportable by target? */
2754 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
2755 &decl1, &decl2, &code1, &code2,
2756 &multi_step_cvt, &interm_types))
2759 /* Binary widening operation can only be supported directly by the
2761 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2763 if (!vec_stmt) /* transformation not required. */
2765 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2766 if (vect_print_dump_info (REPORT_DETAILS))
2767 fprintf (vect_dump, "=== vectorizable_promotion ===");
2768 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2774 if (vect_print_dump_info (REPORT_DETAILS))
2775 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2779 /* In case of multi-step promotion, we first generate promotion operations
2780 to the intermediate types, and then from that types to the final one.
2781 We store vector destination in VEC_DSTS in the correct order for
2782 recursive creation of promotion operations in
2783 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2784 according to TYPES recieved from supportable_widening_operation(). */
2786 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2788 vec_dsts = VEC_alloc (tree, heap, 1);
2790 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2791 VEC_quick_push (tree, vec_dsts, vec_dest);
2795 for (i = VEC_length (tree, interm_types) - 1;
2796 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2798 vec_dest = vect_create_destination_var (scalar_dest,
2800 VEC_quick_push (tree, vec_dsts, vec_dest);
2806 vec_oprnds0 = VEC_alloc (tree, heap,
2807 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2808 if (op_type == binary_op)
2809 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2812 /* In case the vectorization factor (VF) is bigger than the number
2813 of elements that we can fit in a vectype (nunits), we have to generate
2814 more than one vector stmt - i.e - we need to "unroll" the
2815 vector stmt by a factor VF/nunits. */
2817 prev_stmt_info = NULL;
2818 for (j = 0; j < ncopies; j++)
2824 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1);
2827 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2828 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2829 if (op_type == binary_op)
2831 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2832 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2838 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2839 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2840 if (op_type == binary_op)
2842 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
2843 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
2847 /* Arguments are ready. Create the new vector stmts. */
2848 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2849 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
2850 multi_step_cvt, stmt,
2852 gsi, slp_node, code1, code2,
2853 decl1, decl2, op_type,
2857 VEC_free (tree, heap, vec_dsts);
2858 VEC_free (tree, heap, tmp_vec_dsts);
2859 VEC_free (tree, heap, interm_types);
2860 VEC_free (tree, heap, vec_oprnds0);
2861 VEC_free (tree, heap, vec_oprnds1);
2863 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2868 /* Function vectorizable_store.
2870 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
2872 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2873 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2874 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2877 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2883 tree vec_oprnd = NULL_TREE;
2884 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2885 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
2886 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2887 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2888 struct loop *loop = NULL;
2889 enum machine_mode vec_mode;
2891 enum dr_alignment_support alignment_support_scheme;
2894 enum vect_def_type dt;
2895 stmt_vec_info prev_stmt_info = NULL;
2896 tree dataref_ptr = NULL_TREE;
2897 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2900 gimple next_stmt, first_stmt = NULL;
2901 bool strided_store = false;
2902 unsigned int group_size, i;
2903 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
2905 VEC(tree,heap) *vec_oprnds = NULL;
2906 bool slp = (slp_node != NULL);
2907 unsigned int vec_num;
2908 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2911 loop = LOOP_VINFO_LOOP (loop_vinfo);
2913 /* Multiple types in SLP are handled by creating the appropriate number of
2914 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2919 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2921 gcc_assert (ncopies >= 1);
2923 /* FORNOW. This restriction should be relaxed. */
2924 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
2926 if (vect_print_dump_info (REPORT_DETAILS))
2927 fprintf (vect_dump, "multiple types in nested loop.");
2931 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2934 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2937 /* Is vectorizable store? */
2939 if (!is_gimple_assign (stmt))
2942 scalar_dest = gimple_assign_lhs (stmt);
2943 if (TREE_CODE (scalar_dest) != ARRAY_REF
2944 && TREE_CODE (scalar_dest) != INDIRECT_REF
2945 && TREE_CODE (scalar_dest) != COMPONENT_REF
2946 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
2947 && TREE_CODE (scalar_dest) != REALPART_EXPR)
2950 gcc_assert (gimple_assign_single_p (stmt));
2951 op = gimple_assign_rhs1 (stmt);
2952 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
2954 if (vect_print_dump_info (REPORT_DETAILS))
2955 fprintf (vect_dump, "use not simple.");
2959 /* The scalar rhs type needs to be trivially convertible to the vector
2960 component type. This should always be the case. */
2961 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
2963 if (vect_print_dump_info (REPORT_DETAILS))
2964 fprintf (vect_dump, "??? operands of different types");
2968 vec_mode = TYPE_MODE (vectype);
2969 /* FORNOW. In some cases can vectorize even if data-type not supported
2970 (e.g. - array initialization with 0). */
2971 if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
2974 if (!STMT_VINFO_DATA_REF (stmt_info))
2977 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
2979 strided_store = true;
2980 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
2981 if (!vect_strided_store_supported (vectype)
2982 && !PURE_SLP_STMT (stmt_info) && !slp)
2985 if (first_stmt == stmt)
2987 /* STMT is the leader of the group. Check the operands of all the
2988 stmts of the group. */
2989 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
2992 gcc_assert (gimple_assign_single_p (next_stmt));
2993 op = gimple_assign_rhs1 (next_stmt);
2994 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
2997 if (vect_print_dump_info (REPORT_DETAILS))
2998 fprintf (vect_dump, "use not simple.");
3001 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3006 if (!vec_stmt) /* transformation not required. */
3008 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3009 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3017 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3018 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3020 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3023 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3025 /* We vectorize all the stmts of the interleaving group when we
3026 reach the last stmt in the group. */
3027 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3028 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3036 strided_store = false;
3038 /* VEC_NUM is the number of vect stmts to be created for this group. */
3040 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3042 vec_num = group_size;
3048 group_size = vec_num = 1;
3051 if (vect_print_dump_info (REPORT_DETAILS))
3052 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3054 dr_chain = VEC_alloc (tree, heap, group_size);
3055 oprnds = VEC_alloc (tree, heap, group_size);
3057 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3058 gcc_assert (alignment_support_scheme);
3060 /* In case the vectorization factor (VF) is bigger than the number
3061 of elements that we can fit in a vectype (nunits), we have to generate
3062 more than one vector stmt - i.e - we need to "unroll" the
3063 vector stmt by a factor VF/nunits. For more details see documentation in
3064 vect_get_vec_def_for_copy_stmt. */
3066 /* In case of interleaving (non-unit strided access):
3073 We create vectorized stores starting from base address (the access of the
3074 first stmt in the chain (S2 in the above example), when the last store stmt
3075 of the chain (S4) is reached:
3078 VS2: &base + vec_size*1 = vx0
3079 VS3: &base + vec_size*2 = vx1
3080 VS4: &base + vec_size*3 = vx3
3082 Then permutation statements are generated:
3084 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3085 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3088 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3089 (the order of the data-refs in the output of vect_permute_store_chain
3090 corresponds to the order of scalar stmts in the interleaving chain - see
3091 the documentation of vect_permute_store_chain()).
3093 In case of both multiple types and interleaving, above vector stores and
3094 permutation stmts are created for every copy. The result vector stmts are
3095 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3096 STMT_VINFO_RELATED_STMT for the next copies.
3099 prev_stmt_info = NULL;
3100 for (j = 0; j < ncopies; j++)
3109 /* Get vectorized arguments for SLP_NODE. */
3110 vect_get_slp_defs (slp_node, &vec_oprnds, NULL);
3112 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3116 /* For interleaved stores we collect vectorized defs for all the
3117 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3118 used as an input to vect_permute_store_chain(), and OPRNDS as
3119 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3121 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3122 OPRNDS are of size 1. */
3123 next_stmt = first_stmt;
3124 for (i = 0; i < group_size; i++)
3126 /* Since gaps are not supported for interleaved stores,
3127 GROUP_SIZE is the exact number of stmts in the chain.
3128 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3129 there is no interleaving, GROUP_SIZE is 1, and only one
3130 iteration of the loop will be executed. */
3131 gcc_assert (next_stmt
3132 && gimple_assign_single_p (next_stmt));
3133 op = gimple_assign_rhs1 (next_stmt);
3135 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3137 VEC_quick_push(tree, dr_chain, vec_oprnd);
3138 VEC_quick_push(tree, oprnds, vec_oprnd);
3139 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3143 /* We should have catched mismatched types earlier. */
3144 gcc_assert (useless_type_conversion_p (vectype,
3145 TREE_TYPE (vec_oprnd)));
3146 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3147 &dummy, &ptr_incr, false,
3149 gcc_assert (bb_vinfo || !inv_p);
3153 /* For interleaved stores we created vectorized defs for all the
3154 defs stored in OPRNDS in the previous iteration (previous copy).
3155 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3156 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3158 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3159 OPRNDS are of size 1. */
3160 for (i = 0; i < group_size; i++)
3162 op = VEC_index (tree, oprnds, i);
3163 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3165 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3166 VEC_replace(tree, dr_chain, i, vec_oprnd);
3167 VEC_replace(tree, oprnds, i, vec_oprnd);
3170 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3175 result_chain = VEC_alloc (tree, heap, group_size);
3177 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3182 next_stmt = first_stmt;
3183 for (i = 0; i < vec_num; i++)
3186 /* Bump the vector pointer. */
3187 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3191 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3192 else if (strided_store)
3193 /* For strided stores vectorized defs are interleaved in
3194 vect_permute_store_chain(). */
3195 vec_oprnd = VEC_index (tree, result_chain, i);
3197 if (aligned_access_p (first_dr))
3198 data_ref = build_fold_indirect_ref (dataref_ptr);
3201 int mis = DR_MISALIGNMENT (first_dr);
3202 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3203 tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT));
3204 data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3207 /* If accesses through a pointer to vectype do not alias the original
3208 memory reference we have a problem. This should never happen. */
3209 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3210 get_alias_set (gimple_assign_lhs (stmt))));
3212 /* Arguments are ready. Create the new vector stmt. */
3213 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3214 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3215 mark_symbols_for_renaming (new_stmt);
3221 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3223 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3225 prev_stmt_info = vinfo_for_stmt (new_stmt);
3226 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3232 VEC_free (tree, heap, dr_chain);
3233 VEC_free (tree, heap, oprnds);
3235 VEC_free (tree, heap, result_chain);
3240 /* vectorizable_load.
3242 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3244 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3245 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3246 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3249 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3250 slp_tree slp_node, slp_instance slp_node_instance)
3253 tree vec_dest = NULL;
3254 tree data_ref = NULL;
3255 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3256 stmt_vec_info prev_stmt_info;
3257 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3258 struct loop *loop = NULL;
3259 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3260 bool nested_in_vect_loop = false;
3261 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3262 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3265 gimple new_stmt = NULL;
3267 enum dr_alignment_support alignment_support_scheme;
3268 tree dataref_ptr = NULL_TREE;
3270 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3272 int i, j, group_size;
3273 tree msq = NULL_TREE, lsq;
3274 tree offset = NULL_TREE;
3275 tree realignment_token = NULL_TREE;
3277 VEC(tree,heap) *dr_chain = NULL;
3278 bool strided_load = false;
3282 bool compute_in_loop = false;
3283 struct loop *at_loop;
3285 bool slp = (slp_node != NULL);
3286 bool slp_perm = false;
3287 enum tree_code code;
3288 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3293 loop = LOOP_VINFO_LOOP (loop_vinfo);
3294 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3295 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3300 /* Multiple types in SLP are handled by creating the appropriate number of
3301 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3306 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3308 gcc_assert (ncopies >= 1);
3310 /* FORNOW. This restriction should be relaxed. */
3311 if (nested_in_vect_loop && ncopies > 1)
3313 if (vect_print_dump_info (REPORT_DETAILS))
3314 fprintf (vect_dump, "multiple types in nested loop.");
3318 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3321 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3324 /* Is vectorizable load? */
3325 if (!is_gimple_assign (stmt))
3328 scalar_dest = gimple_assign_lhs (stmt);
3329 if (TREE_CODE (scalar_dest) != SSA_NAME)
3332 code = gimple_assign_rhs_code (stmt);
3333 if (code != ARRAY_REF
3334 && code != INDIRECT_REF
3335 && code != COMPONENT_REF
3336 && code != IMAGPART_EXPR
3337 && code != REALPART_EXPR)
3340 if (!STMT_VINFO_DATA_REF (stmt_info))
3343 scalar_type = TREE_TYPE (DR_REF (dr));
3344 mode = (int) TYPE_MODE (vectype);
3346 /* FORNOW. In some cases can vectorize even if data-type not supported
3347 (e.g. - data copies). */
3348 if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
3350 if (vect_print_dump_info (REPORT_DETAILS))
3351 fprintf (vect_dump, "Aligned load, but unsupported type.");
3355 /* The vector component type needs to be trivially convertible to the
3356 scalar lhs. This should always be the case. */
3357 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3359 if (vect_print_dump_info (REPORT_DETAILS))
3360 fprintf (vect_dump, "??? operands of different types");
3364 /* Check if the load is a part of an interleaving chain. */
3365 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3367 strided_load = true;
3369 gcc_assert (! nested_in_vect_loop);
3371 /* Check if interleaving is supported. */
3372 if (!vect_strided_load_supported (vectype)
3373 && !PURE_SLP_STMT (stmt_info) && !slp)
3377 if (!vec_stmt) /* transformation not required. */
3379 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3380 vect_model_load_cost (stmt_info, ncopies, NULL);
3384 if (vect_print_dump_info (REPORT_DETAILS))
3385 fprintf (vect_dump, "transform load.");
3391 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3392 /* Check if the chain of loads is already vectorized. */
3393 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3395 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3398 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3399 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3401 /* VEC_NUM is the number of vect stmts to be created for this group. */
3404 strided_load = false;
3405 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3406 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3410 vec_num = group_size;
3412 dr_chain = VEC_alloc (tree, heap, vec_num);
3418 group_size = vec_num = 1;
3421 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3422 gcc_assert (alignment_support_scheme);
3424 /* In case the vectorization factor (VF) is bigger than the number
3425 of elements that we can fit in a vectype (nunits), we have to generate
3426 more than one vector stmt - i.e - we need to "unroll" the
3427 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3428 from one copy of the vector stmt to