1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Dorit Nuzman <dorit@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "basic-block.h"
30 #include "gimple-pretty-print.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
37 #include "tree-data-ref.h"
38 #include "tree-vectorizer.h"
40 #include "diagnostic-core.h"
42 /* Pattern recognition functions */
43 static gimple vect_recog_widen_sum_pattern (VEC (gimple, heap) **, tree *,
45 static gimple vect_recog_widen_mult_pattern (VEC (gimple, heap) **, tree *,
47 static gimple vect_recog_dot_prod_pattern (VEC (gimple, heap) **, tree *,
49 static gimple vect_recog_pow_pattern (VEC (gimple, heap) **, tree *, tree *);
50 static vect_recog_func_ptr vect_vect_recog_func_ptrs[NUM_PATTERNS] = {
51 vect_recog_widen_mult_pattern,
52 vect_recog_widen_sum_pattern,
53 vect_recog_dot_prod_pattern,
54 vect_recog_pow_pattern};
57 /* Function widened_name_p
59 Check whether NAME, an ssa-name used in USE_STMT,
60 is a result of a type-promotion, such that:
61 DEF_STMT: NAME = NOP (name0)
62 where the type of name0 (HALF_TYPE) is smaller than the type of NAME.
63 If CHECK_SIGN is TRUE, check that either both types are signed or both are
67 widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt,
72 loop_vec_info loop_vinfo;
73 stmt_vec_info stmt_vinfo;
74 tree type = TREE_TYPE (name);
76 enum vect_def_type dt;
79 stmt_vinfo = vinfo_for_stmt (use_stmt);
80 loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
82 if (!vect_is_simple_use (name, loop_vinfo, NULL, def_stmt, &def, &dt))
85 if (dt != vect_internal_def
86 && dt != vect_external_def && dt != vect_constant_def)
92 if (!is_gimple_assign (*def_stmt))
95 if (gimple_assign_rhs_code (*def_stmt) != NOP_EXPR)
98 oprnd0 = gimple_assign_rhs1 (*def_stmt);
100 *half_type = TREE_TYPE (oprnd0);
101 if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*half_type)
102 || ((TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*half_type)) && check_sign)
103 || (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 2)))
106 if (!vect_is_simple_use (oprnd0, loop_vinfo, NULL, &dummy_gimple, &dummy,
113 /* Helper to return a new temporary for pattern of TYPE for STMT. If STMT
114 is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */
117 vect_recog_temp_ssa_var (tree type, gimple stmt)
119 tree var = create_tmp_var (type, "patt");
121 add_referenced_var (var);
122 var = make_ssa_name (var, stmt);
126 /* Function vect_recog_dot_prod_pattern
128 Try to find the following pattern:
134 sum_0 = phi <init, sum_1>
137 S3 x_T = (TYPE1) x_t;
138 S4 y_T = (TYPE1) y_t;
140 [S6 prod = (TYPE2) prod; #optional]
141 S7 sum_1 = prod + sum_0;
143 where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
144 same size of 'TYPE1' or bigger. This is a special case of a reduction
149 * STMTS: Contains a stmt from which the pattern search begins. In the
150 example, when this function is called with S7, the pattern {S3,S4,S5,S6,S7}
155 * TYPE_IN: The type of the input arguments to the pattern.
157 * TYPE_OUT: The type of the output of this pattern.
159 * Return value: A new stmt that will be used to replace the sequence of
160 stmts that constitute the pattern. In this case it will be:
161 WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
163 Note: The dot-prod idiom is a widening reduction pattern that is
164 vectorized without preserving all the intermediate results. It
165 produces only N/2 (widened) results (by summing up pairs of
166 intermediate results) rather than all N results. Therefore, we
167 cannot allow this pattern when we want to get all the results and in
168 the correct order (as is the case when this computation is in an
169 inner-loop nested in an outer-loop that us being vectorized). */
172 vect_recog_dot_prod_pattern (VEC (gimple, heap) **stmts, tree *type_in,
175 gimple stmt, last_stmt = VEC_index (gimple, *stmts, 0);
177 tree oprnd00, oprnd01;
178 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
179 tree type, half_type;
182 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
183 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
186 if (!is_gimple_assign (last_stmt))
189 type = gimple_expr_type (last_stmt);
191 /* Look for the following pattern
195 DDPROD = (TYPE2) DPROD;
196 sum_1 = DDPROD + sum_0;
198 - DX is double the size of X
199 - DY is double the size of Y
200 - DX, DY, DPROD all have the same type
201 - sum is the same size of DPROD or bigger
202 - sum has been recognized as a reduction variable.
204 This is equivalent to:
205 DPROD = X w* Y; #widen mult
206 sum_1 = DPROD w+ sum_0; #widen summation
208 DPROD = X w* Y; #widen mult
209 sum_1 = DPROD + sum_0; #summation
212 /* Starting from LAST_STMT, follow the defs of its uses in search
213 of the above pattern. */
215 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
218 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
220 /* Has been detected as widening-summation? */
222 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
223 type = gimple_expr_type (stmt);
224 if (gimple_assign_rhs_code (stmt) != WIDEN_SUM_EXPR)
226 oprnd0 = gimple_assign_rhs1 (stmt);
227 oprnd1 = gimple_assign_rhs2 (stmt);
228 half_type = TREE_TYPE (oprnd0);
234 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
236 oprnd0 = gimple_assign_rhs1 (last_stmt);
237 oprnd1 = gimple_assign_rhs2 (last_stmt);
238 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
239 || !types_compatible_p (TREE_TYPE (oprnd1), type))
243 if (widened_name_p (oprnd0, stmt, &half_type, &def_stmt, true))
246 oprnd0 = gimple_assign_rhs1 (stmt);
252 /* So far so good. Since last_stmt was detected as a (summation) reduction,
253 we know that oprnd1 is the reduction variable (defined by a loop-header
254 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
255 Left to check that oprnd0 is defined by a (widen_)mult_expr */
257 prod_type = half_type;
258 stmt = SSA_NAME_DEF_STMT (oprnd0);
260 /* It could not be the dot_prod pattern if the stmt is outside the loop. */
261 if (!gimple_bb (stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
264 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
265 inside the loop (in case we are analyzing an outer-loop). */
266 if (!is_gimple_assign (stmt))
268 stmt_vinfo = vinfo_for_stmt (stmt);
269 gcc_assert (stmt_vinfo);
270 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_internal_def)
272 if (gimple_assign_rhs_code (stmt) != MULT_EXPR)
274 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
276 /* Has been detected as a widening multiplication? */
278 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
279 if (gimple_assign_rhs_code (stmt) != WIDEN_MULT_EXPR)
281 stmt_vinfo = vinfo_for_stmt (stmt);
282 gcc_assert (stmt_vinfo);
283 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_internal_def);
284 oprnd00 = gimple_assign_rhs1 (stmt);
285 oprnd01 = gimple_assign_rhs2 (stmt);
289 tree half_type0, half_type1;
293 oprnd0 = gimple_assign_rhs1 (stmt);
294 oprnd1 = gimple_assign_rhs2 (stmt);
295 if (!types_compatible_p (TREE_TYPE (oprnd0), prod_type)
296 || !types_compatible_p (TREE_TYPE (oprnd1), prod_type))
298 if (!widened_name_p (oprnd0, stmt, &half_type0, &def_stmt, true))
300 oprnd00 = gimple_assign_rhs1 (def_stmt);
301 if (!widened_name_p (oprnd1, stmt, &half_type1, &def_stmt, true))
303 oprnd01 = gimple_assign_rhs1 (def_stmt);
304 if (!types_compatible_p (half_type0, half_type1))
306 if (TYPE_PRECISION (prod_type) != TYPE_PRECISION (half_type0) * 2)
310 half_type = TREE_TYPE (oprnd00);
311 *type_in = half_type;
314 /* Pattern detected. Create a stmt to be used to replace the pattern: */
315 var = vect_recog_temp_ssa_var (type, NULL);
316 pattern_stmt = gimple_build_assign_with_ops3 (DOT_PROD_EXPR, var,
317 oprnd00, oprnd01, oprnd1);
319 if (vect_print_dump_info (REPORT_DETAILS))
321 fprintf (vect_dump, "vect_recog_dot_prod_pattern: detected: ");
322 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
325 /* We don't allow changing the order of the computation in the inner-loop
326 when doing outer-loop vectorization. */
327 gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));
333 /* Handle two cases of multiplication by a constant. The first one is when
334 the constant, CONST_OPRND, fits the type (HALF_TYPE) of the second
335 operand (OPRND). In that case, we can peform widen-mult from HALF_TYPE to
338 Otherwise, if the type of the result (TYPE) is at least 4 times bigger than
339 HALF_TYPE, and CONST_OPRND fits an intermediate type (2 times smaller than
340 TYPE), we can perform widen-mult from the intermediate type to TYPE and
341 replace a_T = (TYPE) a_t; with a_it - (interm_type) a_t; */
344 vect_handle_widen_mult_by_const (tree const_oprnd, tree *oprnd,
345 VEC (gimple, heap) **stmts, tree type,
346 tree *half_type, gimple def_stmt)
348 tree new_type, new_oprnd, tmp;
351 if (int_fits_type_p (const_oprnd, *half_type))
353 /* CONST_OPRND is a constant of HALF_TYPE. */
354 *oprnd = gimple_assign_rhs1 (def_stmt);
358 if (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 4)
359 || !vinfo_for_stmt (def_stmt))
362 /* TYPE is 4 times bigger than HALF_TYPE, try widen-mult for
363 a type 2 times bigger than HALF_TYPE. */
364 new_type = build_nonstandard_integer_type (TYPE_PRECISION (type) / 2,
365 TYPE_UNSIGNED (type));
366 if (!int_fits_type_p (const_oprnd, new_type))
369 /* Use NEW_TYPE for widen_mult. */
370 if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt)))
372 new_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
373 /* Check if the already created pattern stmt is what we need. */
374 if (!is_gimple_assign (new_stmt)
375 || gimple_assign_rhs_code (new_stmt) != NOP_EXPR
376 || TREE_TYPE (gimple_assign_lhs (new_stmt)) != new_type)
379 *oprnd = gimple_assign_lhs (new_stmt);
383 /* Create a_T = (NEW_TYPE) a_t; */
384 *oprnd = gimple_assign_rhs1 (def_stmt);
385 tmp = create_tmp_var (new_type, NULL);
386 add_referenced_var (tmp);
387 new_oprnd = make_ssa_name (tmp, NULL);
388 new_stmt = gimple_build_assign_with_ops (NOP_EXPR, new_oprnd, *oprnd,
390 SSA_NAME_DEF_STMT (new_oprnd) = new_stmt;
391 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt)) = new_stmt;
392 VEC_safe_push (gimple, heap, *stmts, def_stmt);
396 *half_type = new_type;
401 /* Function vect_recog_widen_mult_pattern
403 Try to find the following pattern:
406 TYPE a_T, b_T, prod_T;
412 S5 prod_T = a_T * b_T;
414 where type 'TYPE' is at least double the size of type 'type'.
416 Also detect unsgigned cases:
418 unsigned type a_t, b_t;
419 unsigned TYPE u_prod_T;
420 TYPE a_T, b_T, prod_T;
426 S5 prod_T = a_T * b_T;
427 S6 u_prod_T = (unsigned TYPE) prod_T;
429 and multiplication by constants:
436 S5 prod_T = a_T * CONST;
438 A special case of multiplication by constants is when 'TYPE' is 4 times
439 bigger than 'type', but CONST fits an intermediate type 2 times smaller
440 than 'TYPE'. In that case we create an additional pattern stmt for S3
441 to create a variable of the intermediate type, and perform widen-mult
442 on the intermediate type as well:
446 TYPE a_T, prod_T, prod_T';
450 '--> a_it = (interm_type) a_t;
451 S5 prod_T = a_T * CONST;
452 '--> prod_T' = a_it w* CONST;
456 * STMTS: Contains a stmt from which the pattern search begins. In the
457 example, when this function is called with S5, the pattern {S3,S4,S5,(S6)}
458 is detected. In case of unsigned widen-mult, the original stmt (S5) is
459 replaced with S6 in STMTS. In case of multiplication by a constant
460 of an intermediate type (the last case above), STMTS also contains S3
461 (inserted before S5).
465 * TYPE_IN: The type of the input arguments to the pattern.
467 * TYPE_OUT: The type of the output of this pattern.
469 * Return value: A new stmt that will be used to replace the sequence of
470 stmts that constitute the pattern. In this case it will be:
471 WIDEN_MULT <a_t, b_t>
475 vect_recog_widen_mult_pattern (VEC (gimple, heap) **stmts,
476 tree *type_in, tree *type_out)
478 gimple last_stmt = VEC_pop (gimple, *stmts);
479 gimple def_stmt0, def_stmt1;
481 tree type, half_type0, half_type1;
483 tree vectype, vectype_out = NULL_TREE;
486 enum tree_code dummy_code;
488 VEC (tree, heap) *dummy_vec;
491 if (!is_gimple_assign (last_stmt))
494 type = gimple_expr_type (last_stmt);
496 /* Starting from LAST_STMT, follow the defs of its uses in search
497 of the above pattern. */
499 if (gimple_assign_rhs_code (last_stmt) != MULT_EXPR)
502 oprnd0 = gimple_assign_rhs1 (last_stmt);
503 oprnd1 = gimple_assign_rhs2 (last_stmt);
504 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
505 || !types_compatible_p (TREE_TYPE (oprnd1), type))
508 /* Check argument 0. */
509 op0_ok = widened_name_p (oprnd0, last_stmt, &half_type0, &def_stmt0, false);
510 /* Check argument 1. */
511 op1_ok = widened_name_p (oprnd1, last_stmt, &half_type1, &def_stmt1, false);
513 /* In case of multiplication by a constant one of the operands may not match
514 the pattern, but not both. */
515 if (!op0_ok && !op1_ok)
518 if (op0_ok && op1_ok)
520 oprnd0 = gimple_assign_rhs1 (def_stmt0);
521 oprnd1 = gimple_assign_rhs1 (def_stmt1);
525 if (TREE_CODE (oprnd0) == INTEGER_CST
526 && TREE_CODE (half_type1) == INTEGER_TYPE
527 && vect_handle_widen_mult_by_const (oprnd0, &oprnd1, stmts, type,
528 &half_type1, def_stmt1))
529 half_type0 = half_type1;
535 if (TREE_CODE (oprnd1) == INTEGER_CST
536 && TREE_CODE (half_type0) == INTEGER_TYPE
537 && vect_handle_widen_mult_by_const (oprnd1, &oprnd0, stmts, type,
538 &half_type0, def_stmt0))
539 half_type1 = half_type0;
544 /* Handle unsigned case. Look for
545 S6 u_prod_T = (unsigned TYPE) prod_T;
546 Use unsigned TYPE as the type for WIDEN_MULT_EXPR. */
547 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (half_type0))
549 tree lhs = gimple_assign_lhs (last_stmt), use_lhs;
550 imm_use_iterator imm_iter;
553 gimple use_stmt = NULL;
556 if (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (half_type1))
559 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
561 if (is_gimple_debug (USE_STMT (use_p)))
563 use_stmt = USE_STMT (use_p);
567 if (nuses != 1 || !is_gimple_assign (use_stmt)
568 || gimple_assign_rhs_code (use_stmt) != NOP_EXPR)
571 use_lhs = gimple_assign_lhs (use_stmt);
572 use_type = TREE_TYPE (use_lhs);
573 if (!INTEGRAL_TYPE_P (use_type)
574 || (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (use_type))
575 || (TYPE_PRECISION (type) != TYPE_PRECISION (use_type)))
579 last_stmt = use_stmt;
582 if (!types_compatible_p (half_type0, half_type1))
585 /* Pattern detected. */
586 if (vect_print_dump_info (REPORT_DETAILS))
587 fprintf (vect_dump, "vect_recog_widen_mult_pattern: detected: ");
589 /* Check target support */
590 vectype = get_vectype_for_scalar_type (half_type0);
591 vectype_out = get_vectype_for_scalar_type (type);
594 || !supportable_widening_operation (WIDEN_MULT_EXPR, last_stmt,
595 vectype_out, vectype,
596 &dummy, &dummy, &dummy_code,
597 &dummy_code, &dummy_int, &dummy_vec))
601 *type_out = vectype_out;
603 /* Pattern supported. Create a stmt to be used to replace the pattern: */
604 var = vect_recog_temp_ssa_var (type, NULL);
605 pattern_stmt = gimple_build_assign_with_ops (WIDEN_MULT_EXPR, var, oprnd0,
607 SSA_NAME_DEF_STMT (var) = pattern_stmt;
609 if (vect_print_dump_info (REPORT_DETAILS))
610 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
612 VEC_safe_push (gimple, heap, *stmts, last_stmt);
617 /* Function vect_recog_pow_pattern
619 Try to find the following pattern:
623 with POW being one of pow, powf, powi, powif and N being
628 * LAST_STMT: A stmt from which the pattern search begins.
632 * TYPE_IN: The type of the input arguments to the pattern.
634 * TYPE_OUT: The type of the output of this pattern.
636 * Return value: A new stmt that will be used to replace the sequence of
637 stmts that constitute the pattern. In this case it will be:
644 vect_recog_pow_pattern (VEC (gimple, heap) **stmts, tree *type_in,
647 gimple last_stmt = VEC_index (gimple, *stmts, 0);
648 tree fn, base, exp = NULL;
652 if (!is_gimple_call (last_stmt) || gimple_call_lhs (last_stmt) == NULL)
655 fn = gimple_call_fndecl (last_stmt);
656 if (fn == NULL_TREE || DECL_BUILT_IN_CLASS (fn) != BUILT_IN_NORMAL)
659 switch (DECL_FUNCTION_CODE (fn))
665 base = gimple_call_arg (last_stmt, 0);
666 exp = gimple_call_arg (last_stmt, 1);
667 if (TREE_CODE (exp) != REAL_CST
668 && TREE_CODE (exp) != INTEGER_CST)
676 /* We now have a pow or powi builtin function call with a constant
679 *type_out = NULL_TREE;
681 /* Catch squaring. */
682 if ((host_integerp (exp, 0)
683 && tree_low_cst (exp, 0) == 2)
684 || (TREE_CODE (exp) == REAL_CST
685 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconst2)))
687 *type_in = TREE_TYPE (base);
689 var = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
690 stmt = gimple_build_assign_with_ops (MULT_EXPR, var, base, base);
691 SSA_NAME_DEF_STMT (var) = stmt;
695 /* Catch square root. */
696 if (TREE_CODE (exp) == REAL_CST
697 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconsthalf))
699 tree newfn = mathfn_built_in (TREE_TYPE (base), BUILT_IN_SQRT);
700 *type_in = get_vectype_for_scalar_type (TREE_TYPE (base));
703 gimple stmt = gimple_build_call (newfn, 1, base);
704 if (vectorizable_function (stmt, *type_in, *type_in)
707 var = vect_recog_temp_ssa_var (TREE_TYPE (base), stmt);
708 gimple_call_set_lhs (stmt, var);
718 /* Function vect_recog_widen_sum_pattern
720 Try to find the following pattern:
723 TYPE x_T, sum = init;
725 sum_0 = phi <init, sum_1>
728 S3 sum_1 = x_T + sum_0;
730 where type 'TYPE' is at least double the size of type 'type', i.e - we're
731 summing elements of type 'type' into an accumulator of type 'TYPE'. This is
732 a special case of a reduction computation.
736 * LAST_STMT: A stmt from which the pattern search begins. In the example,
737 when this function is called with S3, the pattern {S2,S3} will be detected.
741 * TYPE_IN: The type of the input arguments to the pattern.
743 * TYPE_OUT: The type of the output of this pattern.
745 * Return value: A new stmt that will be used to replace the sequence of
746 stmts that constitute the pattern. In this case it will be:
747 WIDEN_SUM <x_t, sum_0>
749 Note: The widening-sum idiom is a widening reduction pattern that is
750 vectorized without preserving all the intermediate results. It
751 produces only N/2 (widened) results (by summing up pairs of
752 intermediate results) rather than all N results. Therefore, we
753 cannot allow this pattern when we want to get all the results and in
754 the correct order (as is the case when this computation is in an
755 inner-loop nested in an outer-loop that us being vectorized). */
758 vect_recog_widen_sum_pattern (VEC (gimple, heap) **stmts, tree *type_in,
761 gimple stmt, last_stmt = VEC_index (gimple, *stmts, 0);
763 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
764 tree type, half_type;
766 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
767 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
770 if (!is_gimple_assign (last_stmt))
773 type = gimple_expr_type (last_stmt);
775 /* Look for the following pattern
778 In which DX is at least double the size of X, and sum_1 has been
779 recognized as a reduction variable.
782 /* Starting from LAST_STMT, follow the defs of its uses in search
783 of the above pattern. */
785 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
788 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
791 oprnd0 = gimple_assign_rhs1 (last_stmt);
792 oprnd1 = gimple_assign_rhs2 (last_stmt);
793 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
794 || !types_compatible_p (TREE_TYPE (oprnd1), type))
797 /* So far so good. Since last_stmt was detected as a (summation) reduction,
798 we know that oprnd1 is the reduction variable (defined by a loop-header
799 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
800 Left to check that oprnd0 is defined by a cast from type 'type' to type
803 if (!widened_name_p (oprnd0, last_stmt, &half_type, &stmt, true))
806 oprnd0 = gimple_assign_rhs1 (stmt);
807 *type_in = half_type;
810 /* Pattern detected. Create a stmt to be used to replace the pattern: */
811 var = vect_recog_temp_ssa_var (type, NULL);
812 pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var,
814 SSA_NAME_DEF_STMT (var) = pattern_stmt;
816 if (vect_print_dump_info (REPORT_DETAILS))
818 fprintf (vect_dump, "vect_recog_widen_sum_pattern: detected: ");
819 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
822 /* We don't allow changing the order of the computation in the inner-loop
823 when doing outer-loop vectorization. */
824 gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));
830 /* Function vect_pattern_recog_1
833 PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
835 STMT: A stmt from which the pattern search should start.
837 If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
838 expression that computes the same functionality and can be used to
839 replace the sequence of stmts that are involved in the pattern.
842 This function checks if the expression returned by PATTERN_RECOG_FUNC is
843 supported in vector form by the target. We use 'TYPE_IN' to obtain the
844 relevant vector type. If 'TYPE_IN' is already a vector type, then this
845 indicates that target support had already been checked by PATTERN_RECOG_FUNC.
846 If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
847 to the available target pattern.
849 This function also does some bookkeeping, as explained in the documentation
850 for vect_recog_pattern. */
853 vect_pattern_recog_1 (
854 gimple (* vect_recog_func) (VEC (gimple, heap) **, tree *, tree *),
855 gimple_stmt_iterator si)
857 gimple stmt = gsi_stmt (si), pattern_stmt;
858 stmt_vec_info stmt_info;
859 stmt_vec_info pattern_stmt_info;
860 loop_vec_info loop_vinfo;
861 tree pattern_vectype;
862 tree type_in, type_out;
866 VEC (gimple, heap) *stmts_to_replace = VEC_alloc (gimple, heap, 1);
868 VEC_quick_push (gimple, stmts_to_replace, stmt);
869 pattern_stmt = (* vect_recog_func) (&stmts_to_replace, &type_in, &type_out);
873 stmt = VEC_last (gimple, stmts_to_replace);
874 stmt_info = vinfo_for_stmt (stmt);
875 loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
877 if (VECTOR_MODE_P (TYPE_MODE (type_in)))
879 /* No need to check target support (already checked by the pattern
880 recognition function). */
882 gcc_assert (VECTOR_MODE_P (TYPE_MODE (type_out)));
883 pattern_vectype = type_out ? type_out : type_in;
887 enum machine_mode vec_mode;
888 enum insn_code icode;
891 /* Check target support */
892 type_in = get_vectype_for_scalar_type (type_in);
896 type_out = get_vectype_for_scalar_type (type_out);
901 pattern_vectype = type_out;
903 if (is_gimple_assign (pattern_stmt))
904 code = gimple_assign_rhs_code (pattern_stmt);
907 gcc_assert (is_gimple_call (pattern_stmt));
911 optab = optab_for_tree_code (code, type_in, optab_default);
912 vec_mode = TYPE_MODE (type_in);
914 || (icode = optab_handler (optab, vec_mode)) == CODE_FOR_nothing
915 || (insn_data[icode].operand[0].mode != TYPE_MODE (type_out)))
919 /* Found a vectorizable pattern. */
920 if (vect_print_dump_info (REPORT_DETAILS))
922 fprintf (vect_dump, "pattern recognized: ");
923 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
926 /* Mark the stmts that are involved in the pattern. */
927 set_vinfo_for_stmt (pattern_stmt,
928 new_stmt_vec_info (pattern_stmt, loop_vinfo, NULL));
929 gimple_set_bb (pattern_stmt, gimple_bb (stmt));
930 pattern_stmt_info = vinfo_for_stmt (pattern_stmt);
932 STMT_VINFO_RELATED_STMT (pattern_stmt_info) = stmt;
933 STMT_VINFO_DEF_TYPE (pattern_stmt_info) = STMT_VINFO_DEF_TYPE (stmt_info);
934 STMT_VINFO_VECTYPE (pattern_stmt_info) = pattern_vectype;
935 STMT_VINFO_IN_PATTERN_P (stmt_info) = true;
936 STMT_VINFO_RELATED_STMT (stmt_info) = pattern_stmt;
938 /* Patterns cannot be vectorized using SLP, because they change the order of
940 FOR_EACH_VEC_ELT (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i, next)
942 VEC_ordered_remove (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i);
944 /* In case of widen-mult by a constant, it is possible that an additional
945 pattern stmt is created and inserted in STMTS_TO_REPLACE. We create a
946 stmt_info for it, and mark the relevant statements. */
947 for (i = 0; VEC_iterate (gimple, stmts_to_replace, i, stmt)
948 && (unsigned) i < (VEC_length (gimple, stmts_to_replace) - 1);
951 stmt_info = vinfo_for_stmt (stmt);
952 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
953 if (vect_print_dump_info (REPORT_DETAILS))
955 fprintf (vect_dump, "additional pattern stmt: ");
956 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
959 set_vinfo_for_stmt (pattern_stmt,
960 new_stmt_vec_info (pattern_stmt, loop_vinfo, NULL));
961 gimple_set_bb (pattern_stmt, gimple_bb (stmt));
962 pattern_stmt_info = vinfo_for_stmt (pattern_stmt);
964 STMT_VINFO_RELATED_STMT (pattern_stmt_info) = stmt;
965 STMT_VINFO_DEF_TYPE (pattern_stmt_info)
966 = STMT_VINFO_DEF_TYPE (stmt_info);
967 STMT_VINFO_VECTYPE (pattern_stmt_info) = STMT_VINFO_VECTYPE (stmt_info);
968 STMT_VINFO_IN_PATTERN_P (stmt_info) = true;
971 VEC_free (gimple, heap, stmts_to_replace);
975 /* Function vect_pattern_recog
978 LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
981 Output - for each computation idiom that is detected we create a new stmt
982 that provides the same functionality and that can be vectorized. We
983 also record some information in the struct_stmt_info of the relevant
984 stmts, as explained below:
986 At the entry to this function we have the following stmts, with the
987 following initial value in the STMT_VINFO fields:
989 stmt in_pattern_p related_stmt vec_stmt
991 S2: a_2 = ..use(a_i).. - - -
992 S3: a_1 = ..use(a_2).. - - -
993 S4: a_0 = ..use(a_1).. - - -
994 S5: ... = ..use(a_0).. - - -
996 Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
997 represented by a single stmt. We then:
998 - create a new stmt S6 equivalent to the pattern (the stmt is not
999 inserted into the code)
1000 - fill in the STMT_VINFO fields as follows:
1002 in_pattern_p related_stmt vec_stmt
1003 S1: a_i = .... - - -
1004 S2: a_2 = ..use(a_i).. - - -
1005 S3: a_1 = ..use(a_2).. - - -
1006 S4: a_0 = ..use(a_1).. true S6 -
1007 '---> S6: a_new = .... - S4 -
1008 S5: ... = ..use(a_0).. - - -
1010 (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
1011 to each other through the RELATED_STMT field).
1013 S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
1014 of S4 because it will replace all its uses. Stmts {S1,S2,S3} will
1015 remain irrelevant unless used by stmts other than S4.
1017 If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
1018 (because they are marked as irrelevant). It will vectorize S6, and record
1019 a pointer to the new vector stmt VS6 from S6 (as usual).
1020 S4 will be skipped, and S5 will be vectorized as usual:
1022 in_pattern_p related_stmt vec_stmt
1023 S1: a_i = .... - - -
1024 S2: a_2 = ..use(a_i).. - - -
1025 S3: a_1 = ..use(a_2).. - - -
1026 > VS6: va_new = .... - - -
1027 S4: a_0 = ..use(a_1).. true S6 VS6
1028 '---> S6: a_new = .... - S4 VS6
1029 > VS5: ... = ..vuse(va_new).. - - -
1030 S5: ... = ..use(a_0).. - - -
1032 DCE could then get rid of {S1,S2,S3,S4,S5} (if their defs are not used
1033 elsewhere), and we'll end up with:
1036 VS5: ... = ..vuse(va_new)..
1038 In case of more than one pattern statements, e.g., widen-mult with
1042 S2 a_T = (TYPE) a_t;
1043 '--> S3: a_it = (interm_type) a_t;
1044 S4 prod_T = a_T * CONST;
1045 '--> S5: prod_T' = a_it w* CONST;
1047 there may be other users of a_T outside the pattern. In that case S2 will
1048 be marked as relevant (as well as S3), and both S2 and S3 will be analyzed
1049 and vectorized. The vector stmt VS2 will be recorded in S2, and VS3 will
1050 be recorded in S3. */
1053 vect_pattern_recog (loop_vec_info loop_vinfo)
1055 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1056 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1057 unsigned int nbbs = loop->num_nodes;
1058 gimple_stmt_iterator si;
1060 gimple (* vect_recog_func_ptr) (VEC (gimple, heap) **, tree *, tree *);
1062 if (vect_print_dump_info (REPORT_DETAILS))
1063 fprintf (vect_dump, "=== vect_pattern_recog ===");
1065 /* Scan through the loop stmts, applying the pattern recognition
1066 functions starting at each stmt visited: */
1067 for (i = 0; i < nbbs; i++)
1069 basic_block bb = bbs[i];
1070 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1072 /* Scan over all generic vect_recog_xxx_pattern functions. */
1073 for (j = 0; j < NUM_PATTERNS; j++)
1075 vect_recog_func_ptr = vect_vect_recog_func_ptrs[j];
1076 vect_pattern_recog_1 (vect_recog_func_ptr, si);