1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
36 #include "cfglayout.h"
40 #include "tree-vectorizer.h"
42 /* Extract the location of the basic block in the source code.
43 Return the basic block location if succeed and NULL if not. */
46 find_bb_location (basic_block bb)
49 gimple_stmt_iterator si;
54 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
57 if (gimple_location (stmt) != UNKNOWN_LOC)
58 return gimple_location (stmt);
65 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
68 vect_free_slp_tree (slp_tree node)
73 if (SLP_TREE_LEFT (node))
74 vect_free_slp_tree (SLP_TREE_LEFT (node));
76 if (SLP_TREE_RIGHT (node))
77 vect_free_slp_tree (SLP_TREE_RIGHT (node));
79 VEC_free (gimple, heap, SLP_TREE_SCALAR_STMTS (node));
81 if (SLP_TREE_VEC_STMTS (node))
82 VEC_free (gimple, heap, SLP_TREE_VEC_STMTS (node));
88 /* Free the memory allocated for the SLP instance. */
91 vect_free_slp_instance (slp_instance instance)
93 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
94 VEC_free (int, heap, SLP_INSTANCE_LOAD_PERMUTATION (instance));
95 VEC_free (slp_tree, heap, SLP_INSTANCE_LOADS (instance));
99 /* Get the defs for the rhs of STMT (collect them in DEF_STMTS0/1), check that
100 they are of a legal type and that they match the defs of the first stmt of
101 the SLP group (stored in FIRST_STMT_...). */
104 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
105 slp_tree slp_node, gimple stmt,
106 VEC (gimple, heap) **def_stmts0,
107 VEC (gimple, heap) **def_stmts1,
108 enum vect_def_type *first_stmt_dt0,
109 enum vect_def_type *first_stmt_dt1,
110 tree *first_stmt_def0_type,
111 tree *first_stmt_def1_type,
112 tree *first_stmt_const_oprnd,
113 int ncopies_for_cost,
114 bool *pattern0, bool *pattern1)
117 unsigned int i, number_of_oprnds;
120 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
121 stmt_vec_info stmt_info =
122 vinfo_for_stmt (VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0));
123 enum gimple_rhs_class rhs_class;
124 struct loop *loop = NULL;
127 loop = LOOP_VINFO_LOOP (loop_vinfo);
129 rhs_class = get_gimple_rhs_class (gimple_assign_rhs_code (stmt));
130 number_of_oprnds = gimple_num_ops (stmt) - 1; /* RHS only */
132 for (i = 0; i < number_of_oprnds; i++)
134 oprnd = gimple_op (stmt, i + 1);
136 if (!vect_is_simple_use (oprnd, loop_vinfo, bb_vinfo, &def_stmt, &def,
138 || (!def_stmt && dt[i] != vect_constant_def))
140 if (vect_print_dump_info (REPORT_SLP))
142 fprintf (vect_dump, "Build SLP failed: can't find def for ");
143 print_generic_expr (vect_dump, oprnd, TDF_SLIM);
149 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
150 from the pattern. Check that all the stmts of the node are in the
152 if (loop && def_stmt && gimple_bb (def_stmt)
153 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
154 && vinfo_for_stmt (def_stmt)
155 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt)))
157 if (!*first_stmt_dt0)
161 if (i == 1 && !*first_stmt_dt1)
163 else if ((i == 0 && !*pattern0) || (i == 1 && !*pattern1))
165 if (vect_print_dump_info (REPORT_DETAILS))
167 fprintf (vect_dump, "Build SLP failed: some of the stmts"
168 " are in a pattern, and others are not ");
169 print_generic_expr (vect_dump, oprnd, TDF_SLIM);
176 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
177 dt[i] = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
179 if (*dt == vect_unknown_def_type)
181 if (vect_print_dump_info (REPORT_DETAILS))
182 fprintf (vect_dump, "Unsupported pattern.");
186 switch (gimple_code (def_stmt))
189 def = gimple_phi_result (def_stmt);
193 def = gimple_assign_lhs (def_stmt);
197 if (vect_print_dump_info (REPORT_DETAILS))
198 fprintf (vect_dump, "unsupported defining stmt: ");
203 if (!*first_stmt_dt0)
205 /* op0 of the first stmt of the group - store its info. */
206 *first_stmt_dt0 = dt[i];
208 *first_stmt_def0_type = TREE_TYPE (def);
210 *first_stmt_const_oprnd = oprnd;
212 /* Analyze costs (for the first stmt of the group only). */
213 if (rhs_class != GIMPLE_SINGLE_RHS)
214 /* Not memory operation (we don't call this functions for loads). */
215 vect_model_simple_cost (stmt_info, ncopies_for_cost, dt, slp_node);
218 vect_model_store_cost (stmt_info, ncopies_for_cost, dt[0], slp_node);
223 if (!*first_stmt_dt1 && i == 1)
225 /* op1 of the first stmt of the group - store its info. */
226 *first_stmt_dt1 = dt[i];
228 *first_stmt_def1_type = TREE_TYPE (def);
231 /* We assume that the stmt contains only one constant
232 operand. We fail otherwise, to be on the safe side. */
233 if (*first_stmt_const_oprnd)
235 if (vect_print_dump_info (REPORT_SLP))
236 fprintf (vect_dump, "Build SLP failed: two constant "
240 *first_stmt_const_oprnd = oprnd;
245 /* Not first stmt of the group, check that the def-stmt/s match
246 the def-stmt/s of the first stmt. */
248 && (*first_stmt_dt0 != dt[i]
249 || (*first_stmt_def0_type && def
250 && !types_compatible_p (*first_stmt_def0_type,
253 && (*first_stmt_dt1 != dt[i]
254 || (*first_stmt_def1_type && def
255 && !types_compatible_p (*first_stmt_def1_type,
258 && !types_compatible_p (TREE_TYPE (*first_stmt_const_oprnd),
261 if (vect_print_dump_info (REPORT_SLP))
262 fprintf (vect_dump, "Build SLP failed: different types ");
269 /* Check the types of the definitions. */
272 case vect_constant_def:
273 case vect_external_def:
276 case vect_internal_def:
277 case vect_reduction_def:
279 VEC_safe_push (gimple, heap, *def_stmts0, def_stmt);
281 VEC_safe_push (gimple, heap, *def_stmts1, def_stmt);
285 /* FORNOW: Not supported. */
286 if (vect_print_dump_info (REPORT_SLP))
288 fprintf (vect_dump, "Build SLP failed: illegal type of def ");
289 print_generic_expr (vect_dump, def, TDF_SLIM);
300 /* Recursively build an SLP tree starting from NODE.
301 Fail (and return FALSE) if def-stmts are not isomorphic, require data
302 permutation or are of unsupported types of operation. Otherwise, return
306 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
307 slp_tree *node, unsigned int group_size,
308 int *inside_cost, int *outside_cost,
309 int ncopies_for_cost, unsigned int *max_nunits,
310 VEC (int, heap) **load_permutation,
311 VEC (slp_tree, heap) **loads,
312 unsigned int vectorization_factor)
314 VEC (gimple, heap) *def_stmts0 = VEC_alloc (gimple, heap, group_size);
315 VEC (gimple, heap) *def_stmts1 = VEC_alloc (gimple, heap, group_size);
317 VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (*node);
318 gimple stmt = VEC_index (gimple, stmts, 0);
319 enum vect_def_type first_stmt_dt0 = vect_uninitialized_def;
320 enum vect_def_type first_stmt_dt1 = vect_uninitialized_def;
321 enum tree_code first_stmt_code = ERROR_MARK, rhs_code = ERROR_MARK;
322 tree first_stmt_def1_type = NULL_TREE, first_stmt_def0_type = NULL_TREE;
324 bool stop_recursion = false, need_same_oprnds = false;
325 tree vectype, scalar_type, first_op1 = NULL_TREE;
326 unsigned int ncopies;
329 enum machine_mode optab_op2_mode;
330 enum machine_mode vec_mode;
331 tree first_stmt_const_oprnd = NULL_TREE;
332 struct data_reference *first_dr;
333 bool pattern0 = false, pattern1 = false;
335 bool permutation = false;
336 unsigned int load_place;
337 gimple first_load, prev_first_load = NULL;
339 /* For every stmt in NODE find its def stmt/s. */
340 FOR_EACH_VEC_ELT (gimple, stmts, i, stmt)
342 if (vect_print_dump_info (REPORT_SLP))
344 fprintf (vect_dump, "Build SLP for ");
345 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
348 /* Fail to vectorize statements marked as unvectorizable. */
349 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
351 if (vect_print_dump_info (REPORT_SLP))
354 "Build SLP failed: unvectorizable statement ");
355 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
361 lhs = gimple_get_lhs (stmt);
362 if (lhs == NULL_TREE)
364 if (vect_print_dump_info (REPORT_SLP))
367 "Build SLP failed: not GIMPLE_ASSIGN nor GIMPLE_CALL");
368 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
374 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
375 vectype = get_vectype_for_scalar_type (scalar_type);
378 if (vect_print_dump_info (REPORT_SLP))
380 fprintf (vect_dump, "Build SLP failed: unsupported data-type ");
381 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
386 ncopies = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
389 if (vect_print_dump_info (REPORT_SLP))
390 fprintf (vect_dump, "SLP with multiple types ");
392 /* FORNOW: multiple types are unsupported in BB SLP. */
397 /* In case of multiple types we need to detect the smallest type. */
398 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
399 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
401 if (is_gimple_call (stmt))
402 rhs_code = CALL_EXPR;
404 rhs_code = gimple_assign_rhs_code (stmt);
406 /* Check the operation. */
409 first_stmt_code = rhs_code;
411 /* Shift arguments should be equal in all the packed stmts for a
412 vector shift with scalar shift operand. */
413 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
414 || rhs_code == LROTATE_EXPR
415 || rhs_code == RROTATE_EXPR)
417 vec_mode = TYPE_MODE (vectype);
419 /* First see if we have a vector/vector shift. */
420 optab = optab_for_tree_code (rhs_code, vectype,
424 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
426 /* No vector/vector shift, try for a vector/scalar shift. */
427 optab = optab_for_tree_code (rhs_code, vectype,
432 if (vect_print_dump_info (REPORT_SLP))
433 fprintf (vect_dump, "Build SLP failed: no optab.");
436 icode = (int) optab_handler (optab, vec_mode);
437 if (icode == CODE_FOR_nothing)
439 if (vect_print_dump_info (REPORT_SLP))
440 fprintf (vect_dump, "Build SLP failed: "
441 "op not supported by target.");
444 optab_op2_mode = insn_data[icode].operand[2].mode;
445 if (!VECTOR_MODE_P (optab_op2_mode))
447 need_same_oprnds = true;
448 first_op1 = gimple_assign_rhs2 (stmt);
455 if (first_stmt_code != rhs_code
456 && (first_stmt_code != IMAGPART_EXPR
457 || rhs_code != REALPART_EXPR)
458 && (first_stmt_code != REALPART_EXPR
459 || rhs_code != IMAGPART_EXPR)
460 && !(STMT_VINFO_STRIDED_ACCESS (vinfo_for_stmt (stmt))
461 && (first_stmt_code == ARRAY_REF
462 || first_stmt_code == INDIRECT_REF
463 || first_stmt_code == COMPONENT_REF
464 || first_stmt_code == MEM_REF)))
466 if (vect_print_dump_info (REPORT_SLP))
469 "Build SLP failed: different operation in stmt ");
470 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
477 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
479 if (vect_print_dump_info (REPORT_SLP))
482 "Build SLP failed: different shift arguments in ");
483 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
490 /* Strided store or load. */
491 if (STMT_VINFO_STRIDED_ACCESS (vinfo_for_stmt (stmt)))
493 if (REFERENCE_CLASS_P (lhs))
496 if (!vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo, *node,
497 stmt, &def_stmts0, &def_stmts1,
500 &first_stmt_def0_type,
501 &first_stmt_def1_type,
502 &first_stmt_const_oprnd,
504 &pattern0, &pattern1))
510 /* FORNOW: Check that there is no gap between the loads. */
511 if ((DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) == stmt
512 && DR_GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
513 || (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) != stmt
514 && DR_GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
516 if (vect_print_dump_info (REPORT_SLP))
518 fprintf (vect_dump, "Build SLP failed: strided "
520 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
526 /* Check that the size of interleaved loads group is not
527 greater than the SLP group size. */
528 if (DR_GROUP_SIZE (vinfo_for_stmt (stmt)) > ncopies * group_size)
530 if (vect_print_dump_info (REPORT_SLP))
532 fprintf (vect_dump, "Build SLP failed: the number of "
533 "interleaved loads is greater than"
534 " the SLP group size ");
535 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
541 first_load = DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt));
544 /* Check that there are no loads from different interleaving
545 chains in the same node. The only exception is complex
547 if (prev_first_load != first_load
548 && rhs_code != REALPART_EXPR
549 && rhs_code != IMAGPART_EXPR)
551 if (vect_print_dump_info (REPORT_SLP))
553 fprintf (vect_dump, "Build SLP failed: different "
554 "interleaving chains in one node ");
555 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
562 prev_first_load = first_load;
564 if (first_load == stmt)
566 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
567 if (vect_supportable_dr_alignment (first_dr, false)
568 == dr_unaligned_unsupported)
570 if (vect_print_dump_info (REPORT_SLP))
572 fprintf (vect_dump, "Build SLP failed: unsupported "
574 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
580 /* Analyze costs (for the first stmt in the group). */
581 vect_model_load_cost (vinfo_for_stmt (stmt),
582 ncopies_for_cost, *node);
585 /* Store the place of this load in the interleaving chain. In
586 case that permutation is needed we later decide if a specific
587 permutation is supported. */
588 load_place = vect_get_place_in_interleaving_chain (stmt,
593 VEC_safe_push (int, heap, *load_permutation, load_place);
595 /* We stop the tree when we reach a group of loads. */
596 stop_recursion = true;
599 } /* Strided access. */
602 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
604 /* Not strided load. */
605 if (vect_print_dump_info (REPORT_SLP))
607 fprintf (vect_dump, "Build SLP failed: not strided load ");
608 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
611 /* FORNOW: Not strided loads are not supported. */
615 /* Not memory operation. */
616 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
617 && TREE_CODE_CLASS (rhs_code) != tcc_unary)
619 if (vect_print_dump_info (REPORT_SLP))
621 fprintf (vect_dump, "Build SLP failed: operation");
622 fprintf (vect_dump, " unsupported ");
623 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
629 /* Find the def-stmts. */
630 if (!vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo, *node, stmt,
631 &def_stmts0, &def_stmts1,
632 &first_stmt_dt0, &first_stmt_dt1,
633 &first_stmt_def0_type,
634 &first_stmt_def1_type,
635 &first_stmt_const_oprnd,
637 &pattern0, &pattern1))
642 /* Add the costs of the node to the overall instance costs. */
643 *inside_cost += SLP_TREE_INSIDE_OF_LOOP_COST (*node);
644 *outside_cost += SLP_TREE_OUTSIDE_OF_LOOP_COST (*node);
646 /* Strided loads were reached - stop the recursion. */
651 VEC_safe_push (slp_tree, heap, *loads, *node);
653 += targetm.vectorize.builtin_vectorization_cost (vec_perm, NULL, 0)
658 /* We don't check here complex numbers chains, so we keep them in
659 LOADS for further check in vect_supported_load_permutation_p. */
660 if (rhs_code == REALPART_EXPR || rhs_code == IMAGPART_EXPR)
661 VEC_safe_push (slp_tree, heap, *loads, *node);
667 /* Create SLP_TREE nodes for the definition node/s. */
668 if (first_stmt_dt0 == vect_internal_def)
670 slp_tree left_node = XNEW (struct _slp_tree);
671 SLP_TREE_SCALAR_STMTS (left_node) = def_stmts0;
672 SLP_TREE_VEC_STMTS (left_node) = NULL;
673 SLP_TREE_LEFT (left_node) = NULL;
674 SLP_TREE_RIGHT (left_node) = NULL;
675 SLP_TREE_OUTSIDE_OF_LOOP_COST (left_node) = 0;
676 SLP_TREE_INSIDE_OF_LOOP_COST (left_node) = 0;
677 if (!vect_build_slp_tree (loop_vinfo, bb_vinfo, &left_node, group_size,
678 inside_cost, outside_cost, ncopies_for_cost,
679 max_nunits, load_permutation, loads,
680 vectorization_factor))
683 SLP_TREE_LEFT (*node) = left_node;
686 if (first_stmt_dt1 == vect_internal_def)
688 slp_tree right_node = XNEW (struct _slp_tree);
689 SLP_TREE_SCALAR_STMTS (right_node) = def_stmts1;
690 SLP_TREE_VEC_STMTS (right_node) = NULL;
691 SLP_TREE_LEFT (right_node) = NULL;
692 SLP_TREE_RIGHT (right_node) = NULL;
693 SLP_TREE_OUTSIDE_OF_LOOP_COST (right_node) = 0;
694 SLP_TREE_INSIDE_OF_LOOP_COST (right_node) = 0;
695 if (!vect_build_slp_tree (loop_vinfo, bb_vinfo, &right_node, group_size,
696 inside_cost, outside_cost, ncopies_for_cost,
697 max_nunits, load_permutation, loads,
698 vectorization_factor))
701 SLP_TREE_RIGHT (*node) = right_node;
709 vect_print_slp_tree (slp_tree node)
717 fprintf (vect_dump, "node ");
718 FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
720 fprintf (vect_dump, "\n\tstmt %d ", i);
721 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
723 fprintf (vect_dump, "\n");
725 vect_print_slp_tree (SLP_TREE_LEFT (node));
726 vect_print_slp_tree (SLP_TREE_RIGHT (node));
730 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
731 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
732 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
733 stmts in NODE are to be marked. */
736 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
744 FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
746 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
748 vect_mark_slp_stmts (SLP_TREE_LEFT (node), mark, j);
749 vect_mark_slp_stmts (SLP_TREE_RIGHT (node), mark, j);
753 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
756 vect_mark_slp_stmts_relevant (slp_tree node)
760 stmt_vec_info stmt_info;
765 FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
767 stmt_info = vinfo_for_stmt (stmt);
768 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
769 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
770 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
773 vect_mark_slp_stmts_relevant (SLP_TREE_LEFT (node));
774 vect_mark_slp_stmts_relevant (SLP_TREE_RIGHT (node));
778 /* Check if the permutation required by the SLP INSTANCE is supported.
779 Reorganize the SLP nodes stored in SLP_INSTANCE_LOADS if needed. */
782 vect_supported_slp_permutation_p (slp_instance instance)
784 slp_tree node = VEC_index (slp_tree, SLP_INSTANCE_LOADS (instance), 0);
785 gimple stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0);
786 gimple first_load = DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt));
787 VEC (slp_tree, heap) *sorted_loads = NULL;
789 slp_tree *tmp_loads = NULL;
790 int group_size = SLP_INSTANCE_GROUP_SIZE (instance), i, j;
793 /* FORNOW: The only supported loads permutation is loads from the same
794 location in all the loads in the node, when the data-refs in
795 nodes of LOADS constitute an interleaving chain.
796 Sort the nodes according to the order of accesses in the chain. */
797 tmp_loads = (slp_tree *) xmalloc (sizeof (slp_tree) * group_size);
799 VEC_iterate (int, SLP_INSTANCE_LOAD_PERMUTATION (instance), i, index)
800 && VEC_iterate (slp_tree, SLP_INSTANCE_LOADS (instance), j, load);
801 i += group_size, j++)
803 gimple scalar_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (load), 0);
804 /* Check that the loads are all in the same interleaving chain. */
805 if (DR_GROUP_FIRST_DR (vinfo_for_stmt (scalar_stmt)) != first_load)
807 if (vect_print_dump_info (REPORT_DETAILS))
809 fprintf (vect_dump, "Build SLP failed: unsupported data "
811 print_gimple_stmt (vect_dump, scalar_stmt, 0, TDF_SLIM);
818 tmp_loads[index] = load;
821 sorted_loads = VEC_alloc (slp_tree, heap, group_size);
822 for (i = 0; i < group_size; i++)
823 VEC_safe_push (slp_tree, heap, sorted_loads, tmp_loads[i]);
825 VEC_free (slp_tree, heap, SLP_INSTANCE_LOADS (instance));
826 SLP_INSTANCE_LOADS (instance) = sorted_loads;
829 if (!vect_transform_slp_perm_load (stmt, NULL, NULL,
830 SLP_INSTANCE_UNROLLING_FACTOR (instance),
838 /* Rearrange the statements of NODE according to PERMUTATION. */
841 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
842 VEC (int, heap) *permutation)
845 VEC (gimple, heap) *tmp_stmts;
846 unsigned int index, i;
851 vect_slp_rearrange_stmts (SLP_TREE_LEFT (node), group_size, permutation);
852 vect_slp_rearrange_stmts (SLP_TREE_RIGHT (node), group_size, permutation);
854 gcc_assert (group_size == VEC_length (gimple, SLP_TREE_SCALAR_STMTS (node)));
855 tmp_stmts = VEC_alloc (gimple, heap, group_size);
857 for (i = 0; i < group_size; i++)
858 VEC_safe_push (gimple, heap, tmp_stmts, NULL);
860 FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
862 index = VEC_index (int, permutation, i);
863 VEC_replace (gimple, tmp_stmts, index, stmt);
866 VEC_free (gimple, heap, SLP_TREE_SCALAR_STMTS (node));
867 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
871 /* Check if the required load permutation is supported.
872 LOAD_PERMUTATION contains a list of indices of the loads.
873 In SLP this permutation is relative to the order of strided stores that are
874 the base of the SLP instance. */
877 vect_supported_load_permutation_p (slp_instance slp_instn, int group_size,
878 VEC (int, heap) *load_permutation)
880 int i = 0, j, prev = -1, next, k, number_of_groups;
881 bool supported, bad_permutation = false;
883 slp_tree node, other_complex_node;
884 gimple stmt, first = NULL, other_node_first;
885 unsigned complex_numbers = 0;
887 /* FORNOW: permutations are only supported in SLP. */
891 if (vect_print_dump_info (REPORT_SLP))
893 fprintf (vect_dump, "Load permutation ");
894 FOR_EACH_VEC_ELT (int, load_permutation, i, next)
895 fprintf (vect_dump, "%d ", next);
898 /* In case of reduction every load permutation is allowed, since the order
899 of the reduction statements is not important (as opposed to the case of
900 strided stores). The only condition we need to check is that all the
901 load nodes are of the same size and have the same permutation (and then
902 rearrange all the nodes of the SLP instance according to this
905 /* Check that all the load nodes are of the same size. */
906 FOR_EACH_VEC_ELT (slp_tree, SLP_INSTANCE_LOADS (slp_instn), i, node)
908 if (VEC_length (gimple, SLP_TREE_SCALAR_STMTS (node))
909 != (unsigned) group_size)
912 stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0);
913 if (is_gimple_assign (stmt)
914 && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
915 || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR))
919 /* Complex operands can be swapped as following:
920 real_c = real_b + real_a;
921 imag_c = imag_a + imag_b;
922 i.e., we have {real_b, imag_a} and {real_a, imag_b} instead of
923 {real_a, imag_a} and {real_b, imag_b}. We check here that if interleaving
924 chains are mixed, they match the above pattern. */
927 FOR_EACH_VEC_ELT (slp_tree, SLP_INSTANCE_LOADS (slp_instn), i, node)
929 FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), j, stmt)
935 if (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) != first)
937 if (complex_numbers != 2)
945 other_complex_node = VEC_index (slp_tree,
946 SLP_INSTANCE_LOADS (slp_instn), k);
947 other_node_first = VEC_index (gimple,
948 SLP_TREE_SCALAR_STMTS (other_complex_node), 0);
950 if (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt))
959 /* We checked that this case ok, so there is no need to proceed with
960 permutation tests. */
961 if (complex_numbers == 2)
963 VEC_free (slp_tree, heap, SLP_INSTANCE_LOADS (slp_instn));
964 VEC_free (int, heap, SLP_INSTANCE_LOAD_PERMUTATION (slp_instn));
968 node = SLP_INSTANCE_TREE (slp_instn);
969 stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0);
970 /* LOAD_PERMUTATION is a list of indices of all the loads of the SLP
971 instance, not all the loads belong to the same node or interleaving
972 group. Hence, we need to divide them into groups according to
974 number_of_groups = VEC_length (int, load_permutation) / group_size;
976 /* Reduction (there are no data-refs in the root). */
977 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
979 int first_group_load_index;
981 /* Compare all the permutation sequences to the first one. */
982 for (i = 1; i < number_of_groups; i++)
985 for (j = i * group_size; j < i * group_size + group_size; j++)
987 next = VEC_index (int, load_permutation, j);
988 first_group_load_index = VEC_index (int, load_permutation, k);
990 if (next != first_group_load_index)
992 bad_permutation = true;
1003 if (!bad_permutation)
1005 /* Check that the loads in the first sequence are different and there
1006 are no gaps between them. */
1007 load_index = sbitmap_alloc (group_size);
1008 sbitmap_zero (load_index);
1009 for (k = 0; k < group_size; k++)
1011 first_group_load_index = VEC_index (int, load_permutation, k);
1012 if (TEST_BIT (load_index, first_group_load_index))
1014 bad_permutation = true;
1018 SET_BIT (load_index, first_group_load_index);
1021 if (!bad_permutation)
1022 for (k = 0; k < group_size; k++)
1023 if (!TEST_BIT (load_index, k))
1025 bad_permutation = true;
1029 sbitmap_free (load_index);
1032 if (!bad_permutation)
1034 /* This permutation is valid for reduction. Since the order of the
1035 statements in the nodes is not important unless they are memory
1036 accesses, we can rearrange the statements in all the nodes
1037 according to the order of the loads. */
1038 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1040 VEC_free (int, heap, SLP_INSTANCE_LOAD_PERMUTATION (slp_instn));
1045 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1046 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1047 well (unless it's reduction). */
1048 if (VEC_length (int, load_permutation)
1049 != (unsigned int) (group_size * group_size))
1053 load_index = sbitmap_alloc (group_size);
1054 sbitmap_zero (load_index);
1055 for (j = 0; j < group_size; j++)
1057 for (i = j * group_size, k = 0;
1058 VEC_iterate (int, load_permutation, i, next) && k < group_size;
1061 if (i != j * group_size && next != prev)
1070 if (TEST_BIT (load_index, prev))
1076 SET_BIT (load_index, prev);
1079 for (j = 0; j < group_size; j++)
1080 if (!TEST_BIT (load_index, j))
1083 sbitmap_free (load_index);
1085 if (supported && i == group_size * group_size
1086 && vect_supported_slp_permutation_p (slp_instn))
1093 /* Find the first load in the loop that belongs to INSTANCE.
1094 When loads are in several SLP nodes, there can be a case in which the first
1095 load does not appear in the first SLP node to be transformed, causing
1096 incorrect order of statements. Since we generate all the loads together,
1097 they must be inserted before the first load of the SLP instance and not
1098 before the first load of the first node of the instance. */
1101 vect_find_first_load_in_slp_instance (slp_instance instance)
1105 gimple first_load = NULL, load;
1107 FOR_EACH_VEC_ELT (slp_tree, SLP_INSTANCE_LOADS (instance), i, load_node)
1108 FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (load_node), j, load)
1109 first_load = get_earlier_stmt (load, first_load);
1115 /* Find the last store in SLP INSTANCE. */
1118 vect_find_last_store_in_slp_instance (slp_instance instance)
1122 gimple last_store = NULL, store;
1124 node = SLP_INSTANCE_TREE (instance);
1126 VEC_iterate (gimple, SLP_TREE_SCALAR_STMTS (node), i, store);
1128 last_store = get_later_stmt (store, last_store);
1134 /* Analyze an SLP instance starting from a group of strided stores. Call
1135 vect_build_slp_tree to build a tree of packed stmts if possible.
1136 Return FALSE if it's impossible to SLP any stmt in the loop. */
1139 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1142 slp_instance new_instance;
1143 slp_tree node = XNEW (struct _slp_tree);
1144 unsigned int group_size = DR_GROUP_SIZE (vinfo_for_stmt (stmt));
1145 unsigned int unrolling_factor = 1, nunits;
1146 tree vectype, scalar_type = NULL_TREE;
1148 unsigned int vectorization_factor = 0;
1149 int inside_cost = 0, outside_cost = 0, ncopies_for_cost, i;
1150 unsigned int max_nunits = 0;
1151 VEC (int, heap) *load_permutation;
1152 VEC (slp_tree, heap) *loads;
1153 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1157 scalar_type = TREE_TYPE (DR_REF (dr));
1158 vectype = get_vectype_for_scalar_type (scalar_type);
1159 group_size = DR_GROUP_SIZE (vinfo_for_stmt (stmt));
1163 gcc_assert (loop_vinfo);
1164 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1165 group_size = VEC_length (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo));
1170 if (vect_print_dump_info (REPORT_SLP))
1172 fprintf (vect_dump, "Build SLP failed: unsupported data-type ");
1173 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
1179 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1181 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1183 /* No multitypes in BB SLP. */
1184 vectorization_factor = nunits;
1186 /* Calculate the unrolling factor. */
1187 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1188 if (unrolling_factor != 1 && !loop_vinfo)
1190 if (vect_print_dump_info (REPORT_SLP))
1191 fprintf (vect_dump, "Build SLP failed: unrolling required in basic"
1197 /* Create a node (a root of the SLP tree) for the packed strided stores. */
1198 SLP_TREE_SCALAR_STMTS (node) = VEC_alloc (gimple, heap, group_size);
1202 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1205 VEC_safe_push (gimple, heap, SLP_TREE_SCALAR_STMTS (node), next);
1206 next = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
1211 /* Collect reduction statements. */
1212 for (i = 0; VEC_iterate (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i,
1216 VEC_safe_push (gimple, heap, SLP_TREE_SCALAR_STMTS (node), next);
1217 if (vect_print_dump_info (REPORT_DETAILS))
1219 fprintf (vect_dump, "pushing reduction into node: ");
1220 print_gimple_stmt (vect_dump, next, 0, TDF_SLIM);
1225 SLP_TREE_VEC_STMTS (node) = NULL;
1226 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = 0;
1227 SLP_TREE_LEFT (node) = NULL;
1228 SLP_TREE_RIGHT (node) = NULL;
1229 SLP_TREE_OUTSIDE_OF_LOOP_COST (node) = 0;
1230 SLP_TREE_INSIDE_OF_LOOP_COST (node) = 0;
1232 /* Calculate the number of vector stmts to create based on the unrolling
1233 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1234 GROUP_SIZE / NUNITS otherwise. */
1235 ncopies_for_cost = unrolling_factor * group_size / nunits;
1237 load_permutation = VEC_alloc (int, heap, group_size * group_size);
1238 loads = VEC_alloc (slp_tree, heap, group_size);
1240 /* Build the tree for the SLP instance. */
1241 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1242 &inside_cost, &outside_cost, ncopies_for_cost,
1243 &max_nunits, &load_permutation, &loads,
1244 vectorization_factor))
1246 /* Create a new SLP instance. */
1247 new_instance = XNEW (struct _slp_instance);
1248 SLP_INSTANCE_TREE (new_instance) = node;
1249 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1250 /* Calculate the unrolling factor based on the smallest type in the
1252 if (max_nunits > nunits)
1253 unrolling_factor = least_common_multiple (max_nunits, group_size)
1256 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1257 SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (new_instance) = outside_cost;
1258 SLP_INSTANCE_INSIDE_OF_LOOP_COST (new_instance) = inside_cost;
1259 SLP_INSTANCE_LOADS (new_instance) = loads;
1260 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance) = NULL;
1261 SLP_INSTANCE_LOAD_PERMUTATION (new_instance) = load_permutation;
1262 if (VEC_length (slp_tree, loads))
1264 if (!vect_supported_load_permutation_p (new_instance, group_size,
1267 if (vect_print_dump_info (REPORT_SLP))
1269 fprintf (vect_dump, "Build SLP failed: unsupported load "
1271 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1274 vect_free_slp_instance (new_instance);
1278 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance)
1279 = vect_find_first_load_in_slp_instance (new_instance);
1282 VEC_free (int, heap, SLP_INSTANCE_LOAD_PERMUTATION (new_instance));
1285 VEC_safe_push (slp_instance, heap,
1286 LOOP_VINFO_SLP_INSTANCES (loop_vinfo),
1289 VEC_safe_push (slp_instance, heap, BB_VINFO_SLP_INSTANCES (bb_vinfo),
1292 if (vect_print_dump_info (REPORT_SLP))
1293 vect_print_slp_tree (node);
1298 /* Failed to SLP. */
1299 /* Free the allocated memory. */
1300 vect_free_slp_tree (node);
1301 VEC_free (int, heap, load_permutation);
1302 VEC_free (slp_tree, heap, loads);
1308 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1309 trees of packed scalar stmts if SLP is possible. */
1312 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
1315 VEC (gimple, heap) *strided_stores, *reductions = NULL;
1319 if (vect_print_dump_info (REPORT_SLP))
1320 fprintf (vect_dump, "=== vect_analyze_slp ===");
1324 strided_stores = LOOP_VINFO_STRIDED_STORES (loop_vinfo);
1325 reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1328 strided_stores = BB_VINFO_STRIDED_STORES (bb_vinfo);
1330 /* Find SLP sequences starting from groups of strided stores. */
1331 FOR_EACH_VEC_ELT (gimple, strided_stores, i, store)
1332 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, store))
1335 if (bb_vinfo && !ok)
1337 if (vect_print_dump_info (REPORT_SLP))
1338 fprintf (vect_dump, "Failed to SLP the basic block.");
1343 /* Find SLP sequences starting from groups of reductions. */
1344 if (loop_vinfo && VEC_length (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo)) > 1
1345 && vect_analyze_slp_instance (loop_vinfo, bb_vinfo,
1346 VEC_index (gimple, reductions, 0)))
1353 /* For each possible SLP instance decide whether to SLP it and calculate overall
1354 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1355 least one instance. */
1358 vect_make_slp_decision (loop_vec_info loop_vinfo)
1360 unsigned int i, unrolling_factor = 1;
1361 VEC (slp_instance, heap) *slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1362 slp_instance instance;
1363 int decided_to_slp = 0;
1365 if (vect_print_dump_info (REPORT_SLP))
1366 fprintf (vect_dump, "=== vect_make_slp_decision ===");
1368 FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
1370 /* FORNOW: SLP if you can. */
1371 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1372 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1374 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1375 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1376 loop-based vectorization. Such stmts will be marked as HYBRID. */
1377 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1381 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1383 if (decided_to_slp && vect_print_dump_info (REPORT_SLP))
1384 fprintf (vect_dump, "Decided to SLP %d instances. Unrolling factor %d",
1385 decided_to_slp, unrolling_factor);
1387 return (decided_to_slp > 0);
1391 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1392 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1395 vect_detect_hybrid_slp_stmts (slp_tree node)
1399 imm_use_iterator imm_iter;
1401 stmt_vec_info stmt_vinfo;
1406 FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
1407 if (PURE_SLP_STMT (vinfo_for_stmt (stmt))
1408 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
1409 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
1410 if ((stmt_vinfo = vinfo_for_stmt (use_stmt))
1411 && !STMT_SLP_TYPE (stmt_vinfo)
1412 && (STMT_VINFO_RELEVANT (stmt_vinfo)
1413 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_vinfo)))
1414 && !(gimple_code (use_stmt) == GIMPLE_PHI
1415 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (use_stmt))
1416 == vect_reduction_def))
1417 vect_mark_slp_stmts (node, hybrid, i);
1419 vect_detect_hybrid_slp_stmts (SLP_TREE_LEFT (node));
1420 vect_detect_hybrid_slp_stmts (SLP_TREE_RIGHT (node));
1424 /* Find stmts that must be both vectorized and SLPed. */
1427 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
1430 VEC (slp_instance, heap) *slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1431 slp_instance instance;
1433 if (vect_print_dump_info (REPORT_SLP))
1434 fprintf (vect_dump, "=== vect_detect_hybrid_slp ===");
1436 FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
1437 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance));
1441 /* Create and initialize a new bb_vec_info struct for BB, as well as
1442 stmt_vec_info structs for all the stmts in it. */
1445 new_bb_vec_info (basic_block bb)
1447 bb_vec_info res = NULL;
1448 gimple_stmt_iterator gsi;
1450 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
1451 BB_VINFO_BB (res) = bb;
1453 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1455 gimple stmt = gsi_stmt (gsi);
1456 gimple_set_uid (stmt, 0);
1457 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
1460 BB_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
1461 BB_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 2);
1468 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
1469 stmts in the basic block. */
1472 destroy_bb_vec_info (bb_vec_info bb_vinfo)
1475 gimple_stmt_iterator si;
1480 bb = BB_VINFO_BB (bb_vinfo);
1482 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1484 gimple stmt = gsi_stmt (si);
1485 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1488 /* Free stmt_vec_info. */
1489 free_stmt_vec_info (stmt);
1492 free_data_refs (BB_VINFO_DATAREFS (bb_vinfo));
1493 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
1494 VEC_free (gimple, heap, BB_VINFO_STRIDED_STORES (bb_vinfo));
1495 VEC_free (slp_instance, heap, BB_VINFO_SLP_INSTANCES (bb_vinfo));
1501 /* Analyze statements contained in SLP tree node after recursively analyzing
1502 the subtree. Return TRUE if the operations are supported. */
1505 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo, slp_tree node)
1514 if (!vect_slp_analyze_node_operations (bb_vinfo, SLP_TREE_LEFT (node))
1515 || !vect_slp_analyze_node_operations (bb_vinfo, SLP_TREE_RIGHT (node)))
1518 FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
1520 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1521 gcc_assert (stmt_info);
1522 gcc_assert (PURE_SLP_STMT (stmt_info));
1524 if (!vect_analyze_stmt (stmt, &dummy, node))
1532 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
1533 operations are supported. */
1536 vect_slp_analyze_operations (bb_vec_info bb_vinfo)
1538 VEC (slp_instance, heap) *slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
1539 slp_instance instance;
1542 for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); )
1544 if (!vect_slp_analyze_node_operations (bb_vinfo,
1545 SLP_INSTANCE_TREE (instance)))
1547 vect_free_slp_instance (instance);
1548 VEC_ordered_remove (slp_instance, slp_instances, i);
1554 if (!VEC_length (slp_instance, slp_instances))
1560 /* Check if loads and stores are mixed in the basic block (in that
1561 case if we are not sure that the accesses differ, we can't vectorize the
1562 basic block). Also return FALSE in case that there is statement marked as
1563 not vectorizable. */
1566 vect_bb_vectorizable_with_dependencies (bb_vec_info bb_vinfo)
1568 basic_block bb = BB_VINFO_BB (bb_vinfo);
1569 gimple_stmt_iterator si;
1570 bool detected_store = false;
1572 struct data_reference *dr;
1574 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1576 stmt = gsi_stmt (si);
1578 /* We can't allow not analyzed statements, since they may contain data
1580 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
1583 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
1586 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1587 if (DR_IS_READ (dr) && detected_store)
1590 if (!DR_IS_READ (dr))
1591 detected_store = true;
1597 /* Check if vectorization of the basic block is profitable. */
1600 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
1602 VEC (slp_instance, heap) *slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
1603 slp_instance instance;
1605 unsigned int vec_outside_cost = 0, vec_inside_cost = 0, scalar_cost = 0;
1606 unsigned int stmt_cost;
1608 gimple_stmt_iterator si;
1609 basic_block bb = BB_VINFO_BB (bb_vinfo);
1610 stmt_vec_info stmt_info = NULL;
1611 tree dummy_type = NULL;
1614 /* Calculate vector costs. */
1615 FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
1617 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
1618 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
1621 /* Calculate scalar cost. */
1622 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1624 stmt = gsi_stmt (si);
1625 stmt_info = vinfo_for_stmt (stmt);
1627 if (!stmt_info || !STMT_VINFO_VECTORIZABLE (stmt_info)
1628 || !PURE_SLP_STMT (stmt_info))
1631 if (STMT_VINFO_DATA_REF (stmt_info))
1633 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1634 stmt_cost = targetm.vectorize.builtin_vectorization_cost
1635 (scalar_load, dummy_type, dummy);
1637 stmt_cost = targetm.vectorize.builtin_vectorization_cost
1638 (scalar_store, dummy_type, dummy);
1641 stmt_cost = targetm.vectorize.builtin_vectorization_cost
1642 (scalar_stmt, dummy_type, dummy);
1644 scalar_cost += stmt_cost;
1647 if (vect_print_dump_info (REPORT_COST))
1649 fprintf (vect_dump, "Cost model analysis: \n");
1650 fprintf (vect_dump, " Vector inside of basic block cost: %d\n",
1652 fprintf (vect_dump, " Vector outside of basic block cost: %d\n",
1654 fprintf (vect_dump, " Scalar cost of basic block: %d", scalar_cost);
1657 /* Vectorization is profitable if its cost is less than the cost of scalar
1659 if (vec_outside_cost + vec_inside_cost >= scalar_cost)
1665 /* Check if the basic block can be vectorized. */
1668 vect_slp_analyze_bb (basic_block bb)
1670 bb_vec_info bb_vinfo;
1671 VEC (ddr_p, heap) *ddrs;
1672 VEC (slp_instance, heap) *slp_instances;
1673 slp_instance instance;
1675 gimple_stmt_iterator gsi;
1677 int max_vf = MAX_VECTORIZATION_FACTOR;
1678 bool data_dependence_in_bb = false;
1680 current_vector_size = 0;
1682 if (vect_print_dump_info (REPORT_DETAILS))
1683 fprintf (vect_dump, "===vect_slp_analyze_bb===\n");
1685 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1687 gimple stmt = gsi_stmt (gsi);
1688 if (!is_gimple_debug (stmt)
1689 && !gimple_nop_p (stmt)
1690 && gimple_code (stmt) != GIMPLE_LABEL)
1694 if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
1696 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1697 fprintf (vect_dump, "not vectorized: too many instructions in basic "
1703 bb_vinfo = new_bb_vec_info (bb);
1707 if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf))
1709 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1710 fprintf (vect_dump, "not vectorized: unhandled data-ref in basic "
1713 destroy_bb_vec_info (bb_vinfo);
1717 ddrs = BB_VINFO_DDRS (bb_vinfo);
1718 if (!VEC_length (ddr_p, ddrs))
1720 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1721 fprintf (vect_dump, "not vectorized: not enough data-refs in basic "
1724 destroy_bb_vec_info (bb_vinfo);
1728 if (!vect_analyze_data_ref_dependences (NULL, bb_vinfo, &max_vf,
1729 &data_dependence_in_bb)
1731 || (data_dependence_in_bb
1732 && !vect_bb_vectorizable_with_dependencies (bb_vinfo)))
1734 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1735 fprintf (vect_dump, "not vectorized: unhandled data dependence "
1736 "in basic block.\n");
1738 destroy_bb_vec_info (bb_vinfo);
1742 if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
1744 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1745 fprintf (vect_dump, "not vectorized: bad data alignment in basic "
1748 destroy_bb_vec_info (bb_vinfo);
1752 if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
1754 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1755 fprintf (vect_dump, "not vectorized: unhandled data access in basic "
1758 destroy_bb_vec_info (bb_vinfo);
1762 if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
1764 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1765 fprintf (vect_dump, "not vectorized: unsupported alignment in basic "
1768 destroy_bb_vec_info (bb_vinfo);
1772 /* Check the SLP opportunities in the basic block, analyze and build SLP
1774 if (!vect_analyze_slp (NULL, bb_vinfo))
1776 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1777 fprintf (vect_dump, "not vectorized: failed to find SLP opportunities "
1778 "in basic block.\n");
1780 destroy_bb_vec_info (bb_vinfo);
1784 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
1786 /* Mark all the statements that we want to vectorize as pure SLP and
1788 FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
1790 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1791 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
1794 if (!vect_slp_analyze_operations (bb_vinfo))
1796 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1797 fprintf (vect_dump, "not vectorized: bad operation in basic block.\n");
1799 destroy_bb_vec_info (bb_vinfo);
1803 /* Cost model: check if the vectorization is worthwhile. */
1804 if (flag_vect_cost_model
1805 && !vect_bb_vectorization_profitable_p (bb_vinfo))
1807 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1808 fprintf (vect_dump, "not vectorized: vectorization is not "
1811 destroy_bb_vec_info (bb_vinfo);
1815 if (vect_print_dump_info (REPORT_DETAILS))
1816 fprintf (vect_dump, "Basic block will be vectorized using SLP\n");
1822 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
1823 the number of created vector stmts depends on the unrolling factor).
1824 However, the actual number of vector stmts for every SLP node depends on
1825 VF which is set later in vect_analyze_operations (). Hence, SLP costs
1826 should be updated. In this function we assume that the inside costs
1827 calculated in vect_model_xxx_cost are linear in ncopies. */
1830 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
1832 unsigned int i, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1833 VEC (slp_instance, heap) *slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1834 slp_instance instance;
1836 if (vect_print_dump_info (REPORT_SLP))
1837 fprintf (vect_dump, "=== vect_update_slp_costs_according_to_vf ===");
1839 FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
1840 /* We assume that costs are linear in ncopies. */
1841 SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance) *= vf
1842 / SLP_INSTANCE_UNROLLING_FACTOR (instance);
1846 /* For constant and loop invariant defs of SLP_NODE this function returns
1847 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
1848 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
1849 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
1850 REDUC_INDEX is the index of the reduction operand in the statements, unless
1854 vect_get_constant_vectors (tree op, slp_tree slp_node,
1855 VEC (tree, heap) **vec_oprnds,
1856 unsigned int op_num, unsigned int number_of_vectors,
1859 VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node);
1860 gimple stmt = VEC_index (gimple, stmts, 0);
1861 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1865 int j, number_of_places_left_in_vector;
1868 int group_size = VEC_length (gimple, stmts);
1869 unsigned int vec_num, i;
1870 int number_of_copies = 1;
1871 VEC (tree, heap) *voprnds = VEC_alloc (tree, heap, number_of_vectors);
1872 bool constant_p, is_store;
1873 tree neutral_op = NULL;
1874 enum tree_code code = gimple_assign_rhs_code (stmt);
1876 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def)
1878 if (reduc_index == -1)
1880 VEC_free (tree, heap, *vec_oprnds);
1884 op_num = reduc_index - 1;
1885 op = gimple_op (stmt, reduc_index);
1886 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
1887 we need either neutral operands or the original operands. See
1888 get_initial_def_for_reduction() for details. */
1891 case WIDEN_SUM_EXPR:
1897 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
1898 neutral_op = build_real (TREE_TYPE (op), dconst0);
1900 neutral_op = build_int_cst (TREE_TYPE (op), 0);
1905 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
1906 neutral_op = build_real (TREE_TYPE (op), dconst1);
1908 neutral_op = build_int_cst (TREE_TYPE (op), 1);
1913 neutral_op = build_int_cst (TREE_TYPE (op), -1);
1921 if (STMT_VINFO_DATA_REF (stmt_vinfo))
1924 op = gimple_assign_rhs1 (stmt);
1931 if (CONSTANT_CLASS_P (op))
1936 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1937 gcc_assert (vector_type);
1938 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1940 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
1941 created vectors. It is greater than 1 if unrolling is performed.
1943 For example, we have two scalar operands, s1 and s2 (e.g., group of
1944 strided accesses of size two), while NUNITS is four (i.e., four scalars
1945 of this type can be packed in a vector). The output vector will contain
1946 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
1949 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
1950 containing the operands.
1952 For example, NUNITS is four as before, and the group size is 8
1953 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
1954 {s5, s6, s7, s8}. */
1956 number_of_copies = least_common_multiple (nunits, group_size) / group_size;
1958 number_of_places_left_in_vector = nunits;
1959 for (j = 0; j < number_of_copies; j++)
1961 for (i = group_size - 1; VEC_iterate (gimple, stmts, i, stmt); i--)
1964 op = gimple_assign_rhs1 (stmt);
1966 op = gimple_op (stmt, op_num + 1);
1968 if (reduc_index != -1)
1970 struct loop *loop = (gimple_bb (stmt))->loop_father;
1971 gimple def_stmt = SSA_NAME_DEF_STMT (op);
1974 /* Get the def before the loop. */
1975 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
1976 loop_preheader_edge (loop));
1977 if (j != (number_of_copies - 1) && neutral_op)
1981 /* Create 'vect_ = {op0,op1,...,opn}'. */
1982 t = tree_cons (NULL_TREE, op, t);
1984 number_of_places_left_in_vector--;
1986 if (number_of_places_left_in_vector == 0)
1988 number_of_places_left_in_vector = nunits;
1991 vec_cst = build_vector (vector_type, t);
1993 vec_cst = build_constructor_from_list (vector_type, t);
1994 VEC_quick_push (tree, voprnds,
1995 vect_init_vector (stmt, vec_cst, vector_type, NULL));
2001 /* Since the vectors are created in the reverse order, we should invert
2003 vec_num = VEC_length (tree, voprnds);
2004 for (j = vec_num - 1; j >= 0; j--)
2006 vop = VEC_index (tree, voprnds, j);
2007 VEC_quick_push (tree, *vec_oprnds, vop);
2010 VEC_free (tree, heap, voprnds);
2012 /* In case that VF is greater than the unrolling factor needed for the SLP
2013 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2014 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2015 to replicate the vectors. */
2016 while (number_of_vectors > VEC_length (tree, *vec_oprnds))
2018 tree neutral_vec = NULL;
2023 neutral_vec = build_vector_from_val (vector_type, neutral_op);
2025 VEC_quick_push (tree, *vec_oprnds, neutral_vec);
2029 for (i = 0; VEC_iterate (tree, *vec_oprnds, i, vop) && i < vec_num; i++)
2030 VEC_quick_push (tree, *vec_oprnds, vop);
2036 /* Get vectorized definitions from SLP_NODE that contains corresponding
2037 vectorized def-stmts. */
2040 vect_get_slp_vect_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds)
2043 gimple vec_def_stmt;
2046 gcc_assert (SLP_TREE_VEC_STMTS (slp_node));
2048 FOR_EACH_VEC_ELT (gimple, SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2050 gcc_assert (vec_def_stmt);
2051 vec_oprnd = gimple_get_lhs (vec_def_stmt);
2052 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2057 /* Get vectorized definitions for SLP_NODE.
2058 If the scalar definitions are loop invariants or constants, collect them and
2059 call vect_get_constant_vectors() to create vector stmts.
2060 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2061 must be stored in the LEFT/RIGHT node of SLP_NODE, and we call
2062 vect_get_slp_vect_defs() to retrieve them.
2063 If VEC_OPRNDS1 is NULL, don't get vector defs for the second operand (from
2064 the right node. This is used when the second operand must remain scalar. */
2067 vect_get_slp_defs (tree op0, tree op1, slp_tree slp_node,
2068 VEC (tree,heap) **vec_oprnds0,
2069 VEC (tree,heap) **vec_oprnds1, int reduc_index)
2072 enum tree_code code;
2073 int number_of_vects;
2074 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2076 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
2077 /* The number of vector defs is determined by the number of vector statements
2078 in the node from which we get those statements. */
2079 if (SLP_TREE_LEFT (slp_node))
2080 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_LEFT (slp_node));
2083 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
2084 /* Number of vector stmts was calculated according to LHS in
2085 vect_schedule_slp_instance(), fix it by replacing LHS with RHS, if
2086 necessary. See vect_get_smallest_scalar_type () for details. */
2087 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
2089 if (rhs_size_unit != lhs_size_unit)
2091 number_of_vects *= rhs_size_unit;
2092 number_of_vects /= lhs_size_unit;
2096 /* Allocate memory for vectorized defs. */
2097 *vec_oprnds0 = VEC_alloc (tree, heap, number_of_vects);
2099 /* SLP_NODE corresponds either to a group of stores or to a group of
2100 unary/binary operations. We don't call this function for loads.
2101 For reduction defs we call vect_get_constant_vectors(), since we are
2102 looking for initial loop invariant values. */
2103 if (SLP_TREE_LEFT (slp_node) && reduc_index == -1)
2104 /* The defs are already vectorized. */
2105 vect_get_slp_vect_defs (SLP_TREE_LEFT (slp_node), vec_oprnds0);
2107 /* Build vectors from scalar defs. */
2108 vect_get_constant_vectors (op0, slp_node, vec_oprnds0, 0, number_of_vects,
2111 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)))
2112 /* Since we don't call this function with loads, this is a group of
2116 /* For reductions, we only need initial values. */
2117 if (reduc_index != -1)
2120 code = gimple_assign_rhs_code (first_stmt);
2121 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS || !vec_oprnds1)
2124 /* The number of vector defs is determined by the number of vector statements
2125 in the node from which we get those statements. */
2126 if (SLP_TREE_RIGHT (slp_node))
2127 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_RIGHT (slp_node));
2129 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
2131 *vec_oprnds1 = VEC_alloc (tree, heap, number_of_vects);
2133 if (SLP_TREE_RIGHT (slp_node))
2134 /* The defs are already vectorized. */
2135 vect_get_slp_vect_defs (SLP_TREE_RIGHT (slp_node), vec_oprnds1);
2137 /* Build vectors from scalar defs. */
2138 vect_get_constant_vectors (op1, slp_node, vec_oprnds1, 1, number_of_vects,
2143 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
2144 building a vector of type MASK_TYPE from it) and two input vectors placed in
2145 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
2146 shifting by STRIDE elements of DR_CHAIN for every copy.
2147 (STRIDE is the number of vectorized stmts for NODE divided by the number of
2149 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
2150 the created stmts must be inserted. */
2153 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
2154 tree mask, int first_vec_indx, int second_vec_indx,
2155 gimple_stmt_iterator *gsi, slp_tree node,
2156 tree builtin_decl, tree vectype,
2157 VEC(tree,heap) *dr_chain,
2158 int ncopies, int vect_stmts_counter)
2161 gimple perm_stmt = NULL;
2162 stmt_vec_info next_stmt_info;
2164 tree first_vec, second_vec, data_ref;
2166 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
2168 /* Initialize the vect stmts of NODE to properly insert the generated
2170 for (i = VEC_length (gimple, SLP_TREE_VEC_STMTS (node));
2171 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
2172 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (node), NULL);
2174 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
2175 for (i = 0; i < ncopies; i++)
2177 first_vec = VEC_index (tree, dr_chain, first_vec_indx);
2178 second_vec = VEC_index (tree, dr_chain, second_vec_indx);
2180 /* Generate the permute statement. */
2181 perm_stmt = gimple_build_call (builtin_decl,
2182 3, first_vec, second_vec, mask);
2183 data_ref = make_ssa_name (perm_dest, perm_stmt);
2184 gimple_call_set_lhs (perm_stmt, data_ref);
2185 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
2187 /* Store the vector statement in NODE. */
2188 VEC_replace (gimple, SLP_TREE_VEC_STMTS (node),
2189 stride * i + vect_stmts_counter, perm_stmt);
2191 first_vec_indx += stride;
2192 second_vec_indx += stride;
2195 /* Mark the scalar stmt as vectorized. */
2196 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
2197 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
2201 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
2202 return in CURRENT_MASK_ELEMENT its equivalent in target specific
2203 representation. Check that the mask is valid and return FALSE if not.
2204 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
2205 the next vector, i.e., the current first vector is not needed. */
2208 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
2209 int mask_nunits, bool only_one_vec, int index,
2210 int *mask, int *current_mask_element,
2211 bool *need_next_vector, int *number_of_mask_fixes,
2212 bool *mask_fixed, bool *needs_first_vector)
2216 /* Convert to target specific representation. */
2217 *current_mask_element = first_mask_element + m;
2218 /* Adjust the value in case it's a mask for second and third vectors. */
2219 *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
2221 if (*current_mask_element < mask_nunits)
2222 *needs_first_vector = true;
2224 /* We have only one input vector to permute but the mask accesses values in
2225 the next vector as well. */
2226 if (only_one_vec && *current_mask_element >= mask_nunits)
2228 if (vect_print_dump_info (REPORT_DETAILS))
2230 fprintf (vect_dump, "permutation requires at least two vectors ");
2231 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2237 /* The mask requires the next vector. */
2238 if (*current_mask_element >= mask_nunits * 2)
2240 if (*needs_first_vector || *mask_fixed)
2242 /* We either need the first vector too or have already moved to the
2243 next vector. In both cases, this permutation needs three
2245 if (vect_print_dump_info (REPORT_DETAILS))
2247 fprintf (vect_dump, "permutation requires at "
2248 "least three vectors ");
2249 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2255 /* We move to the next vector, dropping the first one and working with
2256 the second and the third - we need to adjust the values of the mask
2258 *current_mask_element -= mask_nunits * *number_of_mask_fixes;
2260 for (i = 0; i < index; i++)
2261 mask[i] -= mask_nunits * *number_of_mask_fixes;
2263 (*number_of_mask_fixes)++;
2267 *need_next_vector = *mask_fixed;
2269 /* This was the last element of this mask. Start a new one. */
2270 if (index == mask_nunits - 1)
2272 *number_of_mask_fixes = 1;
2273 *mask_fixed = false;
2274 *needs_first_vector = false;
2281 /* Generate vector permute statements from a list of loads in DR_CHAIN.
2282 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
2283 permute statements for SLP_NODE_INSTANCE. */
2285 vect_transform_slp_perm_load (gimple stmt, VEC (tree, heap) *dr_chain,
2286 gimple_stmt_iterator *gsi, int vf,
2287 slp_instance slp_node_instance, bool analyze_only)
2289 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2290 tree mask_element_type = NULL_TREE, mask_type;
2291 int i, j, k, m, scale, mask_nunits, nunits, vec_index = 0, scalar_index;
2293 tree vectype = STMT_VINFO_VECTYPE (stmt_info), builtin_decl;
2294 gimple next_scalar_stmt;
2295 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
2296 int first_mask_element;
2297 int index, unroll_factor, *mask, current_mask_element, ncopies;
2298 bool only_one_vec = false, need_next_vector = false;
2299 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
2300 int number_of_mask_fixes = 1;
2301 bool mask_fixed = false;
2302 bool needs_first_vector = false;
2304 if (!targetm.vectorize.builtin_vec_perm)
2306 if (vect_print_dump_info (REPORT_DETAILS))
2308 fprintf (vect_dump, "no builtin for vect permute for ");
2309 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2315 builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
2316 &mask_element_type);
2317 if (!builtin_decl || !mask_element_type)
2319 if (vect_print_dump_info (REPORT_DETAILS))
2321 fprintf (vect_dump, "no builtin for vect permute for ");
2322 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2328 mask_type = get_vectype_for_scalar_type (mask_element_type);
2329 mask_nunits = TYPE_VECTOR_SUBPARTS (mask_type);
2330 mask = (int *) xmalloc (sizeof (int) * mask_nunits);
2331 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2332 scale = mask_nunits / nunits;
2333 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
2335 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
2336 unrolling factor. */
2337 orig_vec_stmts_num = group_size *
2338 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
2339 if (orig_vec_stmts_num == 1)
2340 only_one_vec = true;
2342 /* Number of copies is determined by the final vectorization factor
2343 relatively to SLP_NODE_INSTANCE unrolling factor. */
2344 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
2346 /* Generate permutation masks for every NODE. Number of masks for each NODE
2347 is equal to GROUP_SIZE.
2348 E.g., we have a group of three nodes with three loads from the same
2349 location in each node, and the vector size is 4. I.e., we have a
2350 a0b0c0a1b1c1... sequence and we need to create the following vectors:
2351 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
2352 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
2355 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9} (in target
2356 scpecific type, e.g., in bytes for Altivec.
2357 The last mask is illegal since we assume two operands for permute
2358 operation, and the mask element values can't be outside that range.
2359 Hence, the last mask must be converted into {2,5,5,5}.
2360 For the first two permutations we need the first and the second input
2361 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
2362 we need the second and the third vectors: {b1,c1,a2,b2} and
2365 FOR_EACH_VEC_ELT (slp_tree, SLP_INSTANCE_LOADS (slp_node_instance), i, node)
2369 vect_stmts_counter = 0;
2371 first_vec_index = vec_index++;
2373 second_vec_index = first_vec_index;
2375 second_vec_index = vec_index++;
2377 for (j = 0; j < unroll_factor; j++)
2379 for (k = 0; k < group_size; k++)
2381 first_mask_element = (i + j * group_size) * scale;
2382 for (m = 0; m < scale; m++)
2384 if (!vect_get_mask_element (stmt, first_mask_element, m,
2385 mask_nunits, only_one_vec, index, mask,
2386 ¤t_mask_element, &need_next_vector,
2387 &number_of_mask_fixes, &mask_fixed,
2388 &needs_first_vector))
2391 mask[index++] = current_mask_element;
2394 if (index == mask_nunits)
2396 tree mask_vec = NULL;
2398 while (--index >= 0)
2400 tree t = build_int_cst (mask_element_type, mask[index]);
2401 mask_vec = tree_cons (NULL, t, mask_vec);
2403 mask_vec = build_vector (mask_type, mask_vec);
2406 if (!targetm.vectorize.builtin_vec_perm_ok (vectype,
2409 if (vect_print_dump_info (REPORT_DETAILS))
2411 fprintf (vect_dump, "unsupported vect permute ");
2412 print_generic_expr (vect_dump, mask_vec, 0);
2420 if (need_next_vector)
2422 first_vec_index = second_vec_index;
2423 second_vec_index = vec_index;
2426 next_scalar_stmt = VEC_index (gimple,
2427 SLP_TREE_SCALAR_STMTS (node), scalar_index++);
2429 vect_create_mask_and_perm (stmt, next_scalar_stmt,
2430 mask_vec, first_vec_index, second_vec_index,
2431 gsi, node, builtin_decl, vectype, dr_chain,
2432 ncopies, vect_stmts_counter++);
2445 /* Vectorize SLP instance tree in postorder. */
2448 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
2449 unsigned int vectorization_factor)
2452 bool strided_store, is_store;
2453 gimple_stmt_iterator si;
2454 stmt_vec_info stmt_info;
2455 unsigned int vec_stmts_size, nunits, group_size;
2458 slp_tree loads_node;
2463 vect_schedule_slp_instance (SLP_TREE_LEFT (node), instance,
2464 vectorization_factor);
2465 vect_schedule_slp_instance (SLP_TREE_RIGHT (node), instance,
2466 vectorization_factor);
2468 stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0);
2469 stmt_info = vinfo_for_stmt (stmt);
2471 /* VECTYPE is the type of the destination. */
2472 vectype = STMT_VINFO_VECTYPE (stmt_info);
2473 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
2474 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
2476 /* For each SLP instance calculate number of vector stmts to be created
2477 for the scalar stmts in each node of the SLP tree. Number of vector
2478 elements in one vector iteration is the number of scalar elements in
2479 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
2481 vec_stmts_size = (vectorization_factor * group_size) / nunits;
2483 /* In case of load permutation we have to allocate vectorized statements for
2484 all the nodes that participate in that permutation. */
2485 if (SLP_INSTANCE_LOAD_PERMUTATION (instance))
2487 FOR_EACH_VEC_ELT (slp_tree, SLP_INSTANCE_LOADS (instance), i, loads_node)
2489 if (!SLP_TREE_VEC_STMTS (loads_node))
2491 SLP_TREE_VEC_STMTS (loads_node) = VEC_alloc (gimple, heap,
2493 SLP_TREE_NUMBER_OF_VEC_STMTS (loads_node) = vec_stmts_size;
2498 if (!SLP_TREE_VEC_STMTS (node))
2500 SLP_TREE_VEC_STMTS (node) = VEC_alloc (gimple, heap, vec_stmts_size);
2501 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
2504 if (vect_print_dump_info (REPORT_DETAILS))
2506 fprintf (vect_dump, "------>vectorizing SLP node starting from: ");
2507 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2510 /* Loads should be inserted before the first load. */
2511 if (SLP_INSTANCE_FIRST_LOAD_STMT (instance)
2512 && STMT_VINFO_STRIDED_ACCESS (stmt_info)
2513 && !REFERENCE_CLASS_P (gimple_get_lhs (stmt)))
2514 si = gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance));
2516 si = gsi_for_stmt (stmt);
2518 /* Stores should be inserted just before the last store. */
2519 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
2520 && REFERENCE_CLASS_P (gimple_get_lhs (stmt)))
2522 gimple last_store = vect_find_last_store_in_slp_instance (instance);
2523 si = gsi_for_stmt (last_store);
2526 is_store = vect_transform_stmt (stmt, &si, &strided_store, node, instance);
2531 /* Generate vector code for all SLP instances in the loop/basic block. */
2534 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
2536 VEC (slp_instance, heap) *slp_instances;
2537 slp_instance instance;
2539 bool is_store = false;
2543 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2544 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2548 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2552 FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
2554 /* Schedule the tree of INSTANCE. */
2555 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
2557 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS)
2558 || vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2559 fprintf (vect_dump, "vectorizing stmts using SLP.");
2562 FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
2564 slp_tree root = SLP_INSTANCE_TREE (instance);
2567 gimple_stmt_iterator gsi;
2569 for (j = 0; VEC_iterate (gimple, SLP_TREE_SCALAR_STMTS (root), j, store)
2570 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
2572 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
2575 /* Free the attached stmt_vec_info and remove the stmt. */
2576 gsi = gsi_for_stmt (store);
2577 gsi_remove (&gsi, true);
2578 free_stmt_vec_info (store);
2586 /* Vectorize the basic block. */
2589 vect_slp_transform_bb (basic_block bb)
2591 bb_vec_info bb_vinfo = vec_info_for_bb (bb);
2592 gimple_stmt_iterator si;
2594 gcc_assert (bb_vinfo);
2596 if (vect_print_dump_info (REPORT_DETAILS))
2597 fprintf (vect_dump, "SLPing BB\n");
2599 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2601 gimple stmt = gsi_stmt (si);
2602 stmt_vec_info stmt_info;
2604 if (vect_print_dump_info (REPORT_DETAILS))
2606 fprintf (vect_dump, "------>SLPing statement: ");
2607 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2610 stmt_info = vinfo_for_stmt (stmt);
2611 gcc_assert (stmt_info);
2613 /* Schedule all the SLP instances when the first SLP stmt is reached. */
2614 if (STMT_SLP_TYPE (stmt_info))
2616 vect_schedule_slp (NULL, bb_vinfo);
2621 mark_sym_for_renaming (gimple_vop (cfun));
2622 /* The memory tags and pointers in vectorized statements need to
2623 have their SSA forms updated. FIXME, why can't this be delayed
2624 until all the loops have been transformed? */
2625 update_ssa (TODO_update_ssa);
2627 if (vect_print_dump_info (REPORT_DETAILS))
2628 fprintf (vect_dump, "BASIC BLOCK VECTORIZED\n");
2630 destroy_bb_vec_info (bb_vinfo);