1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "basic-block.h"
32 #include "tree-pretty-print.h"
33 #include "gimple-pretty-print.h"
34 #include "tree-flow.h"
35 #include "tree-dump.h"
37 #include "tree-chrec.h"
38 #include "tree-scalar-evolution.h"
39 #include "tree-vectorizer.h"
40 #include "diagnostic-core.h"
42 /* Need to include rtl.h, expr.h, etc. for optabs. */
46 /* Return true if load- or store-lanes optab OPTAB is implemented for
47 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
50 vect_lanes_optab_supported_p (const char *name, convert_optab optab,
51 tree vectype, unsigned HOST_WIDE_INT count)
53 enum machine_mode mode, array_mode;
56 mode = TYPE_MODE (vectype);
57 limit_p = !targetm.array_mode_supported_p (mode, count);
58 array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
61 if (array_mode == BLKmode)
63 if (vect_print_dump_info (REPORT_DETAILS))
64 fprintf (vect_dump, "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]",
65 GET_MODE_NAME (mode), count);
69 if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
71 if (vect_print_dump_info (REPORT_DETAILS))
72 fprintf (vect_dump, "cannot use %s<%s><%s>",
73 name, GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
77 if (vect_print_dump_info (REPORT_DETAILS))
78 fprintf (vect_dump, "can use %s<%s><%s>",
79 name, GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
85 /* Return the smallest scalar part of STMT.
86 This is used to determine the vectype of the stmt. We generally set the
87 vectype according to the type of the result (lhs). For stmts whose
88 result-type is different than the type of the arguments (e.g., demotion,
89 promotion), vectype will be reset appropriately (later). Note that we have
90 to visit the smallest datatype in this function, because that determines the
91 VF. If the smallest datatype in the loop is present only as the rhs of a
92 promotion operation - we'd miss it.
93 Such a case, where a variable of this datatype does not appear in the lhs
94 anywhere in the loop, can only occur if it's an invariant: e.g.:
95 'int_x = (int) short_inv', which we'd expect to have been optimized away by
96 invariant motion. However, we cannot rely on invariant motion to always
97 take invariants out of the loop, and so in the case of promotion we also
98 have to check the rhs.
99 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
103 vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
104 HOST_WIDE_INT *rhs_size_unit)
106 tree scalar_type = gimple_expr_type (stmt);
107 HOST_WIDE_INT lhs, rhs;
109 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
111 if (is_gimple_assign (stmt)
112 && (gimple_assign_cast_p (stmt)
113 || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
114 || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
116 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
118 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
120 scalar_type = rhs_type;
123 *lhs_size_unit = lhs;
124 *rhs_size_unit = rhs;
129 /* Find the place of the data-ref in STMT in the interleaving chain that starts
130 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
133 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
135 gimple next_stmt = first_stmt;
138 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
141 while (next_stmt && next_stmt != stmt)
144 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
154 /* Function vect_insert_into_interleaving_chain.
156 Insert DRA into the interleaving chain of DRB according to DRA's INIT. */
159 vect_insert_into_interleaving_chain (struct data_reference *dra,
160 struct data_reference *drb)
164 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
165 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
167 prev = GROUP_FIRST_ELEMENT (stmtinfo_b);
168 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
171 next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
172 if (tree_int_cst_compare (next_init, DR_INIT (dra)) > 0)
175 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = DR_STMT (dra);
176 GROUP_NEXT_ELEMENT (stmtinfo_a) = next;
180 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
183 /* We got to the end of the list. Insert here. */
184 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = DR_STMT (dra);
185 GROUP_NEXT_ELEMENT (stmtinfo_a) = NULL;
189 /* Function vect_update_interleaving_chain.
191 For two data-refs DRA and DRB that are a part of a chain interleaved data
192 accesses, update the interleaving chain. DRB's INIT is smaller than DRA's.
194 There are four possible cases:
195 1. New stmts - both DRA and DRB are not a part of any chain:
198 2. DRB is a part of a chain and DRA is not:
199 no need to update FIRST_DR
200 no need to insert DRB
201 insert DRA according to init
202 3. DRA is a part of a chain and DRB is not:
203 if (init of FIRST_DR > init of DRB)
205 NEXT(FIRST_DR) = previous FIRST_DR
207 insert DRB according to its init
208 4. both DRA and DRB are in some interleaving chains:
209 choose the chain with the smallest init of FIRST_DR
210 insert the nodes of the second chain into the first one. */
213 vect_update_interleaving_chain (struct data_reference *drb,
214 struct data_reference *dra)
216 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
217 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
218 tree next_init, init_dra_chain, init_drb_chain;
219 gimple first_a, first_b;
221 gimple node, prev, next, first_stmt;
223 /* 1. New stmts - both DRA and DRB are not a part of any chain. */
224 if (!GROUP_FIRST_ELEMENT (stmtinfo_a) && !GROUP_FIRST_ELEMENT (stmtinfo_b))
226 GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (drb);
227 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (drb);
228 GROUP_NEXT_ELEMENT (stmtinfo_b) = DR_STMT (dra);
232 /* 2. DRB is a part of a chain and DRA is not. */
233 if (!GROUP_FIRST_ELEMENT (stmtinfo_a) && GROUP_FIRST_ELEMENT (stmtinfo_b))
235 GROUP_FIRST_ELEMENT (stmtinfo_a) = GROUP_FIRST_ELEMENT (stmtinfo_b);
236 /* Insert DRA into the chain of DRB. */
237 vect_insert_into_interleaving_chain (dra, drb);
241 /* 3. DRA is a part of a chain and DRB is not. */
242 if (GROUP_FIRST_ELEMENT (stmtinfo_a) && !GROUP_FIRST_ELEMENT (stmtinfo_b))
244 gimple old_first_stmt = GROUP_FIRST_ELEMENT (stmtinfo_a);
245 tree init_old = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (
249 if (tree_int_cst_compare (init_old, DR_INIT (drb)) > 0)
251 /* DRB's init is smaller than the init of the stmt previously marked
252 as the first stmt of the interleaving chain of DRA. Therefore, we
253 update FIRST_STMT and put DRB in the head of the list. */
254 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (drb);
255 GROUP_NEXT_ELEMENT (stmtinfo_b) = old_first_stmt;
257 /* Update all the stmts in the list to point to the new FIRST_STMT. */
258 tmp = old_first_stmt;
261 GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) = DR_STMT (drb);
262 tmp = GROUP_NEXT_ELEMENT (vinfo_for_stmt (tmp));
267 /* Insert DRB in the list of DRA. */
268 vect_insert_into_interleaving_chain (drb, dra);
269 GROUP_FIRST_ELEMENT (stmtinfo_b) = GROUP_FIRST_ELEMENT (stmtinfo_a);
274 /* 4. both DRA and DRB are in some interleaving chains. */
275 first_a = GROUP_FIRST_ELEMENT (stmtinfo_a);
276 first_b = GROUP_FIRST_ELEMENT (stmtinfo_b);
277 if (first_a == first_b)
279 init_dra_chain = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_a)));
280 init_drb_chain = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_b)));
282 if (tree_int_cst_compare (init_dra_chain, init_drb_chain) > 0)
284 /* Insert the nodes of DRA chain into the DRB chain.
285 After inserting a node, continue from this node of the DRB chain (don't
286 start from the beginning. */
287 node = GROUP_FIRST_ELEMENT (stmtinfo_a);
288 prev = GROUP_FIRST_ELEMENT (stmtinfo_b);
289 first_stmt = first_b;
293 /* Insert the nodes of DRB chain into the DRA chain.
294 After inserting a node, continue from this node of the DRA chain (don't
295 start from the beginning. */
296 node = GROUP_FIRST_ELEMENT (stmtinfo_b);
297 prev = GROUP_FIRST_ELEMENT (stmtinfo_a);
298 first_stmt = first_a;
303 node_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (node)));
304 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
307 next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
308 if (tree_int_cst_compare (next_init, node_init) > 0)
311 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = node;
312 GROUP_NEXT_ELEMENT (vinfo_for_stmt (node)) = next;
317 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
321 /* We got to the end of the list. Insert here. */
322 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = node;
323 GROUP_NEXT_ELEMENT (vinfo_for_stmt (node)) = NULL;
326 GROUP_FIRST_ELEMENT (vinfo_for_stmt (node)) = first_stmt;
327 node = GROUP_NEXT_ELEMENT (vinfo_for_stmt (node));
331 /* Check dependence between DRA and DRB for basic block vectorization.
332 If the accesses share same bases and offsets, we can compare their initial
333 constant offsets to decide whether they differ or not. In case of a read-
334 write dependence we check that the load is before the store to ensure that
335 vectorization will not change the order of the accesses. */
338 vect_drs_dependent_in_basic_block (struct data_reference *dra,
339 struct data_reference *drb)
341 HOST_WIDE_INT type_size_a, type_size_b, init_a, init_b;
344 /* We only call this function for pairs of loads and stores, but we verify
346 if (DR_IS_READ (dra) == DR_IS_READ (drb))
348 if (DR_IS_READ (dra))
354 /* Check that the data-refs have same bases and offsets. If not, we can't
355 determine if they are dependent. */
356 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
357 || !dr_equal_offsets_p (dra, drb))
360 /* Check the types. */
361 type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))));
362 type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
364 if (type_size_a != type_size_b
365 || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
366 TREE_TYPE (DR_REF (drb))))
369 init_a = TREE_INT_CST_LOW (DR_INIT (dra));
370 init_b = TREE_INT_CST_LOW (DR_INIT (drb));
372 /* Two different locations - no dependence. */
373 if (init_a != init_b)
376 /* We have a read-write dependence. Check that the load is before the store.
377 When we vectorize basic blocks, vector load can be only before
378 corresponding scalar load, and vector store can be only after its
379 corresponding scalar store. So the order of the acceses is preserved in
380 case the load is before the store. */
381 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
382 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
389 /* Function vect_check_interleaving.
391 Check if DRA and DRB are a part of interleaving. In case they are, insert
392 DRA and DRB in an interleaving chain. */
395 vect_check_interleaving (struct data_reference *dra,
396 struct data_reference *drb)
398 HOST_WIDE_INT type_size_a, type_size_b, diff_mod_size, step, init_a, init_b;
400 /* Check that the data-refs have same first location (except init) and they
401 are both either store or load (not load and store). */
402 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
403 || !dr_equal_offsets_p (dra, drb)
404 || !tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb))
405 || DR_IS_READ (dra) != DR_IS_READ (drb))
409 1. data-refs are of the same type
410 2. their steps are equal
411 3. the step (if greater than zero) is greater than the difference between
413 type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))));
414 type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
416 if (type_size_a != type_size_b
417 || tree_int_cst_compare (DR_STEP (dra), DR_STEP (drb))
418 || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
419 TREE_TYPE (DR_REF (drb))))
422 init_a = TREE_INT_CST_LOW (DR_INIT (dra));
423 init_b = TREE_INT_CST_LOW (DR_INIT (drb));
424 step = TREE_INT_CST_LOW (DR_STEP (dra));
428 /* If init_a == init_b + the size of the type * k, we have an interleaving,
429 and DRB is accessed before DRA. */
430 diff_mod_size = (init_a - init_b) % type_size_a;
432 if (step && (init_a - init_b) > step)
435 if (diff_mod_size == 0)
437 vect_update_interleaving_chain (drb, dra);
438 if (vect_print_dump_info (REPORT_DR_DETAILS))
440 fprintf (vect_dump, "Detected interleaving ");
441 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
442 fprintf (vect_dump, " and ");
443 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
450 /* If init_b == init_a + the size of the type * k, we have an
451 interleaving, and DRA is accessed before DRB. */
452 diff_mod_size = (init_b - init_a) % type_size_a;
454 if (step && (init_b - init_a) > step)
457 if (diff_mod_size == 0)
459 vect_update_interleaving_chain (dra, drb);
460 if (vect_print_dump_info (REPORT_DR_DETAILS))
462 fprintf (vect_dump, "Detected interleaving ");
463 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
464 fprintf (vect_dump, " and ");
465 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
474 /* Check if data references pointed by DR_I and DR_J are same or
475 belong to same interleaving group. Return FALSE if drs are
476 different, otherwise return TRUE. */
479 vect_same_range_drs (data_reference_p dr_i, data_reference_p dr_j)
481 gimple stmt_i = DR_STMT (dr_i);
482 gimple stmt_j = DR_STMT (dr_j);
484 if (operand_equal_p (DR_REF (dr_i), DR_REF (dr_j), 0)
485 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i))
486 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j))
487 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i))
488 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j)))))
494 /* If address ranges represented by DDR_I and DDR_J are equal,
495 return TRUE, otherwise return FALSE. */
498 vect_vfa_range_equal (ddr_p ddr_i, ddr_p ddr_j)
500 if ((vect_same_range_drs (DDR_A (ddr_i), DDR_A (ddr_j))
501 && vect_same_range_drs (DDR_B (ddr_i), DDR_B (ddr_j)))
502 || (vect_same_range_drs (DDR_A (ddr_i), DDR_B (ddr_j))
503 && vect_same_range_drs (DDR_B (ddr_i), DDR_A (ddr_j))))
509 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
510 tested at run-time. Return TRUE if DDR was successfully inserted.
511 Return false if versioning is not supported. */
514 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
516 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
518 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
521 if (vect_print_dump_info (REPORT_DR_DETAILS))
523 fprintf (vect_dump, "mark for run-time aliasing test between ");
524 print_generic_expr (vect_dump, DR_REF (DDR_A (ddr)), TDF_SLIM);
525 fprintf (vect_dump, " and ");
526 print_generic_expr (vect_dump, DR_REF (DDR_B (ddr)), TDF_SLIM);
529 if (optimize_loop_nest_for_size_p (loop))
531 if (vect_print_dump_info (REPORT_DR_DETAILS))
532 fprintf (vect_dump, "versioning not supported when optimizing for size.");
536 /* FORNOW: We don't support versioning with outer-loop vectorization. */
539 if (vect_print_dump_info (REPORT_DR_DETAILS))
540 fprintf (vect_dump, "versioning not yet supported for outer-loops.");
544 VEC_safe_push (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo), ddr);
549 /* Function vect_analyze_data_ref_dependence.
551 Return TRUE if there (might) exist a dependence between a memory-reference
552 DRA and a memory-reference DRB. When versioning for alias may check a
553 dependence at run-time, return FALSE. Adjust *MAX_VF according to
554 the data dependence. */
557 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
558 loop_vec_info loop_vinfo, int *max_vf)
561 struct loop *loop = NULL;
562 struct data_reference *dra = DDR_A (ddr);
563 struct data_reference *drb = DDR_B (ddr);
564 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
565 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
566 lambda_vector dist_v;
567 unsigned int loop_depth;
569 /* Don't bother to analyze statements marked as unvectorizable. */
570 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
571 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
574 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
576 /* Independent data accesses. */
577 vect_check_interleaving (dra, drb);
582 loop = LOOP_VINFO_LOOP (loop_vinfo);
584 if ((DR_IS_READ (dra) && DR_IS_READ (drb) && loop_vinfo) || dra == drb)
587 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
593 if (vect_print_dump_info (REPORT_DR_DETAILS))
595 fprintf (vect_dump, "versioning for alias required: "
596 "can't determine dependence between ");
597 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
598 fprintf (vect_dump, " and ");
599 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
602 /* Add to list of ddrs that need to be tested at run-time. */
603 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
606 /* When vectorizing a basic block unknown depnedence can still mean
608 if (vect_check_interleaving (dra, drb))
611 /* Read-read is OK (we need this check here, after checking for
613 if (DR_IS_READ (dra) && DR_IS_READ (drb))
616 if (vect_print_dump_info (REPORT_DR_DETAILS))
618 fprintf (vect_dump, "can't determine dependence between ");
619 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
620 fprintf (vect_dump, " and ");
621 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
624 /* We do not vectorize basic blocks with write-write dependencies. */
625 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
628 /* Check that it's not a load-after-store dependence. */
629 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
630 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
636 /* Versioning for alias is not yet supported for basic block SLP, and
637 dependence distance is unapplicable, hence, in case of known data
638 dependence, basic block vectorization is impossible for now. */
641 if (dra != drb && vect_check_interleaving (dra, drb))
644 if (vect_print_dump_info (REPORT_DR_DETAILS))
646 fprintf (vect_dump, "determined dependence between ");
647 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
648 fprintf (vect_dump, " and ");
649 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
652 /* Do not vectorize basic blcoks with write-write dependences. */
653 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
656 /* Check if this dependence is allowed in basic block vectorization. */
657 return vect_drs_dependent_in_basic_block (dra, drb);
660 /* Loop-based vectorization and known data dependence. */
661 if (DDR_NUM_DIST_VECTS (ddr) == 0)
663 if (vect_print_dump_info (REPORT_DR_DETAILS))
665 fprintf (vect_dump, "versioning for alias required: bad dist vector for ");
666 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
667 fprintf (vect_dump, " and ");
668 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
670 /* Add to list of ddrs that need to be tested at run-time. */
671 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
674 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
675 FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
677 int dist = dist_v[loop_depth];
679 if (vect_print_dump_info (REPORT_DR_DETAILS))
680 fprintf (vect_dump, "dependence distance = %d.", dist);
684 if (vect_print_dump_info (REPORT_DR_DETAILS))
686 fprintf (vect_dump, "dependence distance == 0 between ");
687 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
688 fprintf (vect_dump, " and ");
689 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
692 /* For interleaving, mark that there is a read-write dependency if
693 necessary. We check before that one of the data-refs is store. */
694 if (DR_IS_READ (dra))
695 GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a) = true;
698 if (DR_IS_READ (drb))
699 GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b) = true;
705 if (dist > 0 && DDR_REVERSED_P (ddr))
707 /* If DDR_REVERSED_P the order of the data-refs in DDR was
708 reversed (to make distance vector positive), and the actual
709 distance is negative. */
710 if (vect_print_dump_info (REPORT_DR_DETAILS))
711 fprintf (vect_dump, "dependence distance negative.");
716 && abs (dist) < *max_vf)
718 /* The dependence distance requires reduction of the maximal
719 vectorization factor. */
720 *max_vf = abs (dist);
721 if (vect_print_dump_info (REPORT_DR_DETAILS))
722 fprintf (vect_dump, "adjusting maximal vectorization factor to %i",
726 if (abs (dist) >= *max_vf)
728 /* Dependence distance does not create dependence, as far as
729 vectorization is concerned, in this case. */
730 if (vect_print_dump_info (REPORT_DR_DETAILS))
731 fprintf (vect_dump, "dependence distance >= VF.");
735 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
737 fprintf (vect_dump, "not vectorized, possible dependence "
738 "between data-refs ");
739 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
740 fprintf (vect_dump, " and ");
741 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
750 /* Function vect_analyze_data_ref_dependences.
752 Examine all the data references in the loop, and make sure there do not
753 exist any data dependences between them. Set *MAX_VF according to
754 the maximum vectorization factor the data dependences allow. */
757 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
758 bb_vec_info bb_vinfo, int *max_vf)
761 VEC (ddr_p, heap) *ddrs = NULL;
762 struct data_dependence_relation *ddr;
764 if (vect_print_dump_info (REPORT_DETAILS))
765 fprintf (vect_dump, "=== vect_analyze_dependences ===");
768 ddrs = LOOP_VINFO_DDRS (loop_vinfo);
770 ddrs = BB_VINFO_DDRS (bb_vinfo);
772 FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
773 if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
780 /* Function vect_compute_data_ref_alignment
782 Compute the misalignment of the data reference DR.
785 1. If during the misalignment computation it is found that the data reference
786 cannot be vectorized then false is returned.
787 2. DR_MISALIGNMENT (DR) is defined.
789 FOR NOW: No analysis is actually performed. Misalignment is calculated
790 only for trivial cases. TODO. */
793 vect_compute_data_ref_alignment (struct data_reference *dr)
795 gimple stmt = DR_STMT (dr);
796 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
797 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
798 struct loop *loop = NULL;
799 tree ref = DR_REF (dr);
801 tree base, base_addr;
804 tree aligned_to, alignment;
806 if (vect_print_dump_info (REPORT_DETAILS))
807 fprintf (vect_dump, "vect_compute_data_ref_alignment:");
810 loop = LOOP_VINFO_LOOP (loop_vinfo);
812 /* Initialize misalignment to unknown. */
813 SET_DR_MISALIGNMENT (dr, -1);
815 misalign = DR_INIT (dr);
816 aligned_to = DR_ALIGNED_TO (dr);
817 base_addr = DR_BASE_ADDRESS (dr);
818 vectype = STMT_VINFO_VECTYPE (stmt_info);
820 /* In case the dataref is in an inner-loop of the loop that is being
821 vectorized (LOOP), we use the base and misalignment information
822 relative to the outer-loop (LOOP). This is ok only if the misalignment
823 stays the same throughout the execution of the inner-loop, which is why
824 we have to check that the stride of the dataref in the inner-loop evenly
825 divides by the vector size. */
826 if (loop && nested_in_vect_loop_p (loop, stmt))
828 tree step = DR_STEP (dr);
829 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
831 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
833 if (vect_print_dump_info (REPORT_ALIGNMENT))
834 fprintf (vect_dump, "inner step divides the vector-size.");
835 misalign = STMT_VINFO_DR_INIT (stmt_info);
836 aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
837 base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
841 if (vect_print_dump_info (REPORT_ALIGNMENT))
842 fprintf (vect_dump, "inner step doesn't divide the vector-size.");
843 misalign = NULL_TREE;
847 base = build_fold_indirect_ref (base_addr);
848 alignment = ssize_int (TYPE_ALIGN (vectype)/BITS_PER_UNIT);
850 if ((aligned_to && tree_int_cst_compare (aligned_to, alignment) < 0)
853 if (vect_print_dump_info (REPORT_ALIGNMENT))
855 fprintf (vect_dump, "Unknown alignment for access: ");
856 print_generic_expr (vect_dump, base, TDF_SLIM);
862 && tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base)),
864 || (TREE_CODE (base_addr) == SSA_NAME
865 && tree_int_cst_compare (ssize_int (TYPE_ALIGN_UNIT (TREE_TYPE (
866 TREE_TYPE (base_addr)))),
868 || (get_pointer_alignment (base_addr) >= TYPE_ALIGN (vectype)))
871 base_aligned = false;
875 /* Do not change the alignment of global variables if
876 flag_section_anchors is enabled. */
877 if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype))
878 || (TREE_STATIC (base) && flag_section_anchors))
880 if (vect_print_dump_info (REPORT_DETAILS))
882 fprintf (vect_dump, "can't force alignment of ref: ");
883 print_generic_expr (vect_dump, ref, TDF_SLIM);
888 /* Force the alignment of the decl.
889 NOTE: This is the only change to the code we make during
890 the analysis phase, before deciding to vectorize the loop. */
891 if (vect_print_dump_info (REPORT_DETAILS))
893 fprintf (vect_dump, "force alignment of ");
894 print_generic_expr (vect_dump, ref, TDF_SLIM);
897 DECL_ALIGN (base) = TYPE_ALIGN (vectype);
898 DECL_USER_ALIGN (base) = 1;
901 /* At this point we assume that the base is aligned. */
902 gcc_assert (base_aligned
903 || (TREE_CODE (base) == VAR_DECL
904 && DECL_ALIGN (base) >= TYPE_ALIGN (vectype)));
906 /* If this is a backward running DR then first access in the larger
907 vectype actually is N-1 elements before the address in the DR.
908 Adjust misalign accordingly. */
909 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
911 tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
912 /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
913 otherwise we wouldn't be here. */
914 offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
915 /* PLUS because DR_STEP was negative. */
916 misalign = size_binop (PLUS_EXPR, misalign, offset);
919 /* Modulo alignment. */
920 misalign = size_binop (FLOOR_MOD_EXPR, misalign, alignment);
922 if (!host_integerp (misalign, 1))
924 /* Negative or overflowed misalignment value. */
925 if (vect_print_dump_info (REPORT_DETAILS))
926 fprintf (vect_dump, "unexpected misalign value");
930 SET_DR_MISALIGNMENT (dr, TREE_INT_CST_LOW (misalign));
932 if (vect_print_dump_info (REPORT_DETAILS))
934 fprintf (vect_dump, "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
935 print_generic_expr (vect_dump, ref, TDF_SLIM);
942 /* Function vect_compute_data_refs_alignment
944 Compute the misalignment of data references in the loop.
945 Return FALSE if a data reference is found that cannot be vectorized. */
948 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
949 bb_vec_info bb_vinfo)
951 VEC (data_reference_p, heap) *datarefs;
952 struct data_reference *dr;
956 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
958 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
960 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
961 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
962 && !vect_compute_data_ref_alignment (dr))
966 /* Mark unsupported statement as unvectorizable. */
967 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
978 /* Function vect_update_misalignment_for_peel
980 DR - the data reference whose misalignment is to be adjusted.
981 DR_PEEL - the data reference whose misalignment is being made
982 zero in the vector loop by the peel.
983 NPEEL - the number of iterations in the peel loop if the misalignment
984 of DR_PEEL is known at compile time. */
987 vect_update_misalignment_for_peel (struct data_reference *dr,
988 struct data_reference *dr_peel, int npeel)
991 VEC(dr_p,heap) *same_align_drs;
992 struct data_reference *current_dr;
993 int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
994 int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
995 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
996 stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
998 /* For interleaved data accesses the step in the loop must be multiplied by
999 the size of the interleaving group. */
1000 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
1001 dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
1002 if (STMT_VINFO_STRIDED_ACCESS (peel_stmt_info))
1003 dr_peel_size *= GROUP_SIZE (peel_stmt_info);
1005 /* It can be assumed that the data refs with the same alignment as dr_peel
1006 are aligned in the vector loop. */
1008 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
1009 FOR_EACH_VEC_ELT (dr_p, same_align_drs, i, current_dr)
1011 if (current_dr != dr)
1013 gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
1014 DR_MISALIGNMENT (dr_peel) / dr_peel_size);
1015 SET_DR_MISALIGNMENT (dr, 0);
1019 if (known_alignment_for_access_p (dr)
1020 && known_alignment_for_access_p (dr_peel))
1022 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
1023 int misal = DR_MISALIGNMENT (dr);
1024 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1025 misal += negative ? -npeel * dr_size : npeel * dr_size;
1026 misal &= GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1027 SET_DR_MISALIGNMENT (dr, misal);
1031 if (vect_print_dump_info (REPORT_DETAILS))
1032 fprintf (vect_dump, "Setting misalignment to -1.");
1033 SET_DR_MISALIGNMENT (dr, -1);
1037 /* Function vect_verify_datarefs_alignment
1039 Return TRUE if all data references in the loop can be
1040 handled with respect to alignment. */
1043 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
1045 VEC (data_reference_p, heap) *datarefs;
1046 struct data_reference *dr;
1047 enum dr_alignment_support supportable_dr_alignment;
1051 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1053 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
1055 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1057 gimple stmt = DR_STMT (dr);
1058 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1060 /* For interleaving, only the alignment of the first access matters.
1061 Skip statements marked as not vectorizable. */
1062 if ((STMT_VINFO_STRIDED_ACCESS (stmt_info)
1063 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1064 || !STMT_VINFO_VECTORIZABLE (stmt_info))
1067 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1068 if (!supportable_dr_alignment)
1070 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1072 if (DR_IS_READ (dr))
1074 "not vectorized: unsupported unaligned load.");
1077 "not vectorized: unsupported unaligned store.");
1079 print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM);
1083 if (supportable_dr_alignment != dr_aligned
1084 && vect_print_dump_info (REPORT_ALIGNMENT))
1085 fprintf (vect_dump, "Vectorizing an unaligned access.");
1091 /* Function vector_alignment_reachable_p
1093 Return true if vector alignment for DR is reachable by peeling
1094 a few loop iterations. Return false otherwise. */
1097 vector_alignment_reachable_p (struct data_reference *dr)
1099 gimple stmt = DR_STMT (dr);
1100 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1101 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1103 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
1105 /* For interleaved access we peel only if number of iterations in
1106 the prolog loop ({VF - misalignment}), is a multiple of the
1107 number of the interleaved accesses. */
1108 int elem_size, mis_in_elements;
1109 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1111 /* FORNOW: handle only known alignment. */
1112 if (!known_alignment_for_access_p (dr))
1115 elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
1116 mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
1118 if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
1122 /* If misalignment is known at the compile time then allow peeling
1123 only if natural alignment is reachable through peeling. */
1124 if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
1126 HOST_WIDE_INT elmsize =
1127 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1128 if (vect_print_dump_info (REPORT_DETAILS))
1130 fprintf (vect_dump, "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
1131 fprintf (vect_dump, ". misalignment = %d. ", DR_MISALIGNMENT (dr));
1133 if (DR_MISALIGNMENT (dr) % elmsize)
1135 if (vect_print_dump_info (REPORT_DETAILS))
1136 fprintf (vect_dump, "data size does not divide the misalignment.\n");
1141 if (!known_alignment_for_access_p (dr))
1143 tree type = (TREE_TYPE (DR_REF (dr)));
1144 tree ba = DR_BASE_OBJECT (dr);
1145 bool is_packed = false;
1148 is_packed = contains_packed_reference (ba);
1150 if (compare_tree_int (TYPE_SIZE (type), TYPE_ALIGN (type)) > 0)
1153 if (vect_print_dump_info (REPORT_DETAILS))
1154 fprintf (vect_dump, "Unknown misalignment, is_packed = %d",is_packed);
1155 if (targetm.vectorize.vector_alignment_reachable (type, is_packed))
1165 /* Calculate the cost of the memory access represented by DR. */
1168 vect_get_data_access_cost (struct data_reference *dr,
1169 unsigned int *inside_cost,
1170 unsigned int *outside_cost)
1172 gimple stmt = DR_STMT (dr);
1173 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1174 int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1175 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1176 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1177 int ncopies = vf / nunits;
1178 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1180 if (!supportable_dr_alignment)
1181 *inside_cost = VECT_MAX_COST;
1184 if (DR_IS_READ (dr))
1185 vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost);
1187 vect_get_store_cost (dr, ncopies, inside_cost);
1190 if (vect_print_dump_info (REPORT_COST))
1191 fprintf (vect_dump, "vect_get_data_access_cost: inside_cost = %d, "
1192 "outside_cost = %d.", *inside_cost, *outside_cost);
1197 vect_peeling_hash (const void *elem)
1199 const struct _vect_peel_info *peel_info;
1201 peel_info = (const struct _vect_peel_info *) elem;
1202 return (hashval_t) peel_info->npeel;
1207 vect_peeling_hash_eq (const void *elem1, const void *elem2)
1209 const struct _vect_peel_info *a, *b;
1211 a = (const struct _vect_peel_info *) elem1;
1212 b = (const struct _vect_peel_info *) elem2;
1213 return (a->npeel == b->npeel);
1217 /* Insert DR into peeling hash table with NPEEL as key. */
1220 vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr,
1223 struct _vect_peel_info elem, *slot;
1225 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1228 slot = (vect_peel_info) htab_find (LOOP_VINFO_PEELING_HTAB (loop_vinfo),
1234 slot = XNEW (struct _vect_peel_info);
1235 slot->npeel = npeel;
1238 new_slot = htab_find_slot (LOOP_VINFO_PEELING_HTAB (loop_vinfo), slot,
1243 if (!supportable_dr_alignment && !flag_vect_cost_model)
1244 slot->count += VECT_MAX_COST;
1248 /* Traverse peeling hash table to find peeling option that aligns maximum
1249 number of data accesses. */
1252 vect_peeling_hash_get_most_frequent (void **slot, void *data)
1254 vect_peel_info elem = (vect_peel_info) *slot;
1255 vect_peel_extended_info max = (vect_peel_extended_info) data;
1257 if (elem->count > max->peel_info.count
1258 || (elem->count == max->peel_info.count
1259 && max->peel_info.npeel > elem->npeel))
1261 max->peel_info.npeel = elem->npeel;
1262 max->peel_info.count = elem->count;
1263 max->peel_info.dr = elem->dr;
1270 /* Traverse peeling hash table and calculate cost for each peeling option.
1271 Find the one with the lowest cost. */
1274 vect_peeling_hash_get_lowest_cost (void **slot, void *data)
1276 vect_peel_info elem = (vect_peel_info) *slot;
1277 vect_peel_extended_info min = (vect_peel_extended_info) data;
1278 int save_misalignment, dummy;
1279 unsigned int inside_cost = 0, outside_cost = 0, i;
1280 gimple stmt = DR_STMT (elem->dr);
1281 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1282 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1283 VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1284 struct data_reference *dr;
1286 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1288 stmt = DR_STMT (dr);
1289 stmt_info = vinfo_for_stmt (stmt);
1290 /* For interleaving, only the alignment of the first access
1292 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1293 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1296 save_misalignment = DR_MISALIGNMENT (dr);
1297 vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
1298 vect_get_data_access_cost (dr, &inside_cost, &outside_cost);
1299 SET_DR_MISALIGNMENT (dr, save_misalignment);
1302 outside_cost += vect_get_known_peeling_cost (loop_vinfo, elem->npeel, &dummy,
1303 vect_get_single_scalar_iteraion_cost (loop_vinfo));
1305 if (inside_cost < min->inside_cost
1306 || (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
1308 min->inside_cost = inside_cost;
1309 min->outside_cost = outside_cost;
1310 min->peel_info.dr = elem->dr;
1311 min->peel_info.npeel = elem->npeel;
1318 /* Choose best peeling option by traversing peeling hash table and either
1319 choosing an option with the lowest cost (if cost model is enabled) or the
1320 option that aligns as many accesses as possible. */
1322 static struct data_reference *
1323 vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo,
1324 unsigned int *npeel)
1326 struct _vect_peel_extended_info res;
1328 res.peel_info.dr = NULL;
1330 if (flag_vect_cost_model)
1332 res.inside_cost = INT_MAX;
1333 res.outside_cost = INT_MAX;
1334 htab_traverse (LOOP_VINFO_PEELING_HTAB (loop_vinfo),
1335 vect_peeling_hash_get_lowest_cost, &res);
1339 res.peel_info.count = 0;
1340 htab_traverse (LOOP_VINFO_PEELING_HTAB (loop_vinfo),
1341 vect_peeling_hash_get_most_frequent, &res);
1344 *npeel = res.peel_info.npeel;
1345 return res.peel_info.dr;
1349 /* Function vect_enhance_data_refs_alignment
1351 This pass will use loop versioning and loop peeling in order to enhance
1352 the alignment of data references in the loop.
1354 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1355 original loop is to be vectorized. Any other loops that are created by
1356 the transformations performed in this pass - are not supposed to be
1357 vectorized. This restriction will be relaxed.
1359 This pass will require a cost model to guide it whether to apply peeling
1360 or versioning or a combination of the two. For example, the scheme that
1361 intel uses when given a loop with several memory accesses, is as follows:
1362 choose one memory access ('p') which alignment you want to force by doing
1363 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1364 other accesses are not necessarily aligned, or (2) use loop versioning to
1365 generate one loop in which all accesses are aligned, and another loop in
1366 which only 'p' is necessarily aligned.
1368 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1369 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1370 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1372 Devising a cost model is the most critical aspect of this work. It will
1373 guide us on which access to peel for, whether to use loop versioning, how
1374 many versions to create, etc. The cost model will probably consist of
1375 generic considerations as well as target specific considerations (on
1376 powerpc for example, misaligned stores are more painful than misaligned
1379 Here are the general steps involved in alignment enhancements:
1381 -- original loop, before alignment analysis:
1382 for (i=0; i<N; i++){
1383 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1384 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1387 -- After vect_compute_data_refs_alignment:
1388 for (i=0; i<N; i++){
1389 x = q[i]; # DR_MISALIGNMENT(q) = 3
1390 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1393 -- Possibility 1: we do loop versioning:
1395 for (i=0; i<N; i++){ # loop 1A
1396 x = q[i]; # DR_MISALIGNMENT(q) = 3
1397 p[i] = y; # DR_MISALIGNMENT(p) = 0
1401 for (i=0; i<N; i++){ # loop 1B
1402 x = q[i]; # DR_MISALIGNMENT(q) = 3
1403 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1407 -- Possibility 2: we do loop peeling:
1408 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1412 for (i = 3; i < N; i++){ # loop 2A
1413 x = q[i]; # DR_MISALIGNMENT(q) = 0
1414 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1417 -- Possibility 3: combination of loop peeling and versioning:
1418 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1423 for (i = 3; i<N; i++){ # loop 3A
1424 x = q[i]; # DR_MISALIGNMENT(q) = 0
1425 p[i] = y; # DR_MISALIGNMENT(p) = 0
1429 for (i = 3; i<N; i++){ # loop 3B
1430 x = q[i]; # DR_MISALIGNMENT(q) = 0
1431 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1435 These loops are later passed to loop_transform to be vectorized. The
1436 vectorizer will use the alignment information to guide the transformation
1437 (whether to generate regular loads/stores, or with special handling for
1441 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1443 VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1444 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1445 enum dr_alignment_support supportable_dr_alignment;
1446 struct data_reference *dr0 = NULL, *first_store = NULL;
1447 struct data_reference *dr;
1449 bool do_peeling = false;
1450 bool do_versioning = false;
1453 stmt_vec_info stmt_info;
1454 int vect_versioning_for_alias_required;
1455 unsigned int npeel = 0;
1456 bool all_misalignments_unknown = true;
1457 unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1458 unsigned possible_npeel_number = 1;
1460 unsigned int nelements, mis, same_align_drs_max = 0;
1462 if (vect_print_dump_info (REPORT_DETAILS))
1463 fprintf (vect_dump, "=== vect_enhance_data_refs_alignment ===");
1465 /* While cost model enhancements are expected in the future, the high level
1466 view of the code at this time is as follows:
1468 A) If there is a misaligned access then see if peeling to align
1469 this access can make all data references satisfy
1470 vect_supportable_dr_alignment. If so, update data structures
1471 as needed and return true.
1473 B) If peeling wasn't possible and there is a data reference with an
1474 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1475 then see if loop versioning checks can be used to make all data
1476 references satisfy vect_supportable_dr_alignment. If so, update
1477 data structures as needed and return true.
1479 C) If neither peeling nor versioning were successful then return false if
1480 any data reference does not satisfy vect_supportable_dr_alignment.
1482 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1484 Note, Possibility 3 above (which is peeling and versioning together) is not
1485 being done at this time. */
1487 /* (1) Peeling to force alignment. */
1489 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1491 + How many accesses will become aligned due to the peeling
1492 - How many accesses will become unaligned due to the peeling,
1493 and the cost of misaligned accesses.
1494 - The cost of peeling (the extra runtime checks, the increase
1497 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1499 stmt = DR_STMT (dr);
1500 stmt_info = vinfo_for_stmt (stmt);
1502 if (!STMT_VINFO_RELEVANT (stmt_info))
1505 /* For interleaving, only the alignment of the first access
1507 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1508 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1511 /* For invariant accesses there is nothing to enhance. */
1512 if (integer_zerop (DR_STEP (dr)))
1515 supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1516 do_peeling = vector_alignment_reachable_p (dr);
1519 if (known_alignment_for_access_p (dr))
1521 unsigned int npeel_tmp;
1522 bool negative = tree_int_cst_compare (DR_STEP (dr),
1523 size_zero_node) < 0;
1525 /* Save info about DR in the hash table. */
1526 if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
1527 LOOP_VINFO_PEELING_HTAB (loop_vinfo) =
1528 htab_create (1, vect_peeling_hash,
1529 vect_peeling_hash_eq, free);
1531 vectype = STMT_VINFO_VECTYPE (stmt_info);
1532 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1533 mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
1534 TREE_TYPE (DR_REF (dr))));
1535 npeel_tmp = (negative
1536 ? (mis - nelements) : (nelements - mis))
1539 /* For multiple types, it is possible that the bigger type access
1540 will have more than one peeling option. E.g., a loop with two
1541 types: one of size (vector size / 4), and the other one of
1542 size (vector size / 8). Vectorization factor will 8. If both
1543 access are misaligned by 3, the first one needs one scalar
1544 iteration to be aligned, and the second one needs 5. But the
1545 the first one will be aligned also by peeling 5 scalar
1546 iterations, and in that case both accesses will be aligned.
1547 Hence, except for the immediate peeling amount, we also want
1548 to try to add full vector size, while we don't exceed
1549 vectorization factor.
1550 We do this automtically for cost model, since we calculate cost
1551 for every peeling option. */
1552 if (!flag_vect_cost_model)
1553 possible_npeel_number = vf /nelements;
1555 /* Handle the aligned case. We may decide to align some other
1556 access, making DR unaligned. */
1557 if (DR_MISALIGNMENT (dr) == 0)
1560 if (!flag_vect_cost_model)
1561 possible_npeel_number++;
1564 for (j = 0; j < possible_npeel_number; j++)
1566 gcc_assert (npeel_tmp <= vf);
1567 vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp);
1568 npeel_tmp += nelements;
1571 all_misalignments_unknown = false;
1572 /* Data-ref that was chosen for the case that all the
1573 misalignments are unknown is not relevant anymore, since we
1574 have a data-ref with known alignment. */
1579 /* If we don't know all the misalignment values, we prefer
1580 peeling for data-ref that has maximum number of data-refs
1581 with the same alignment, unless the target prefers to align
1582 stores over load. */
1583 if (all_misalignments_unknown)
1585 if (same_align_drs_max < VEC_length (dr_p,
1586 STMT_VINFO_SAME_ALIGN_REFS (stmt_info))
1589 same_align_drs_max = VEC_length (dr_p,
1590 STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
1594 if (!first_store && DR_IS_WRITE (dr))
1598 /* If there are both known and unknown misaligned accesses in the
1599 loop, we choose peeling amount according to the known
1603 if (!supportable_dr_alignment)
1606 if (!first_store && DR_IS_WRITE (dr))
1613 if (!aligned_access_p (dr))
1615 if (vect_print_dump_info (REPORT_DETAILS))
1616 fprintf (vect_dump, "vector alignment may not be reachable");
1623 vect_versioning_for_alias_required
1624 = LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo);
1626 /* Temporarily, if versioning for alias is required, we disable peeling
1627 until we support peeling and versioning. Often peeling for alignment
1628 will require peeling for loop-bound, which in turn requires that we
1629 know how to adjust the loop ivs after the loop. */
1630 if (vect_versioning_for_alias_required
1631 || !vect_can_advance_ivs_p (loop_vinfo)
1632 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1635 if (do_peeling && all_misalignments_unknown
1636 && vect_supportable_dr_alignment (dr0, false))
1639 /* Check if the target requires to prefer stores over loads, i.e., if
1640 misaligned stores are more expensive than misaligned loads (taking
1641 drs with same alignment into account). */
1642 if (first_store && DR_IS_READ (dr0))
1644 unsigned int load_inside_cost = 0, load_outside_cost = 0;
1645 unsigned int store_inside_cost = 0, store_outside_cost = 0;
1646 unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
1647 unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
1649 vect_get_data_access_cost (dr0, &load_inside_cost,
1650 &load_outside_cost);
1651 vect_get_data_access_cost (first_store, &store_inside_cost,
1652 &store_outside_cost);
1654 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1655 aligning the load DR0). */
1656 load_inside_penalty = store_inside_cost;
1657 load_outside_penalty = store_outside_cost;
1658 for (i = 0; VEC_iterate (dr_p, STMT_VINFO_SAME_ALIGN_REFS
1659 (vinfo_for_stmt (DR_STMT (first_store))),
1662 if (DR_IS_READ (dr))
1664 load_inside_penalty += load_inside_cost;
1665 load_outside_penalty += load_outside_cost;
1669 load_inside_penalty += store_inside_cost;
1670 load_outside_penalty += store_outside_cost;
1673 /* Calculate the penalty for leaving DR0 unaligned (by
1674 aligning the FIRST_STORE). */
1675 store_inside_penalty = load_inside_cost;
1676 store_outside_penalty = load_outside_cost;
1677 for (i = 0; VEC_iterate (dr_p, STMT_VINFO_SAME_ALIGN_REFS
1678 (vinfo_for_stmt (DR_STMT (dr0))),
1681 if (DR_IS_READ (dr))
1683 store_inside_penalty += load_inside_cost;
1684 store_outside_penalty += load_outside_cost;
1688 store_inside_penalty += store_inside_cost;
1689 store_outside_penalty += store_outside_cost;
1692 if (load_inside_penalty > store_inside_penalty
1693 || (load_inside_penalty == store_inside_penalty
1694 && load_outside_penalty > store_outside_penalty))
1698 /* In case there are only loads with different unknown misalignments, use
1699 peeling only if it may help to align other accesses in the loop. */
1700 if (!first_store && !VEC_length (dr_p, STMT_VINFO_SAME_ALIGN_REFS
1701 (vinfo_for_stmt (DR_STMT (dr0))))
1702 && vect_supportable_dr_alignment (dr0, false)
1703 != dr_unaligned_supported)
1707 if (do_peeling && !dr0)
1709 /* Peeling is possible, but there is no data access that is not supported
1710 unless aligned. So we try to choose the best possible peeling. */
1712 /* We should get here only if there are drs with known misalignment. */
1713 gcc_assert (!all_misalignments_unknown);
1715 /* Choose the best peeling from the hash table. */
1716 dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel);
1723 stmt = DR_STMT (dr0);
1724 stmt_info = vinfo_for_stmt (stmt);
1725 vectype = STMT_VINFO_VECTYPE (stmt_info);
1726 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1728 if (known_alignment_for_access_p (dr0))
1730 bool negative = tree_int_cst_compare (DR_STEP (dr0),
1731 size_zero_node) < 0;
1734 /* Since it's known at compile time, compute the number of
1735 iterations in the peeled loop (the peeling factor) for use in
1736 updating DR_MISALIGNMENT values. The peeling factor is the
1737 vectorization factor minus the misalignment as an element
1739 mis = DR_MISALIGNMENT (dr0);
1740 mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
1741 npeel = ((negative ? mis - nelements : nelements - mis)
1745 /* For interleaved data access every iteration accesses all the
1746 members of the group, therefore we divide the number of iterations
1747 by the group size. */
1748 stmt_info = vinfo_for_stmt (DR_STMT (dr0));
1749 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
1750 npeel /= GROUP_SIZE (stmt_info);
1752 if (vect_print_dump_info (REPORT_DETAILS))
1753 fprintf (vect_dump, "Try peeling by %d", npeel);
1756 /* Ensure that all data refs can be vectorized after the peel. */
1757 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1759 int save_misalignment;
1764 stmt = DR_STMT (dr);
1765 stmt_info = vinfo_for_stmt (stmt);
1766 /* For interleaving, only the alignment of the first access
1768 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1769 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1772 save_misalignment = DR_MISALIGNMENT (dr);
1773 vect_update_misalignment_for_peel (dr, dr0, npeel);
1774 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1775 SET_DR_MISALIGNMENT (dr, save_misalignment);
1777 if (!supportable_dr_alignment)
1784 if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
1786 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1795 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1796 If the misalignment of DR_i is identical to that of dr0 then set
1797 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1798 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1799 by the peeling factor times the element size of DR_i (MOD the
1800 vectorization factor times the size). Otherwise, the
1801 misalignment of DR_i must be set to unknown. */
1802 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1804 vect_update_misalignment_for_peel (dr, dr0, npeel);
1806 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
1808 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
1810 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = DR_MISALIGNMENT (dr0);
1811 SET_DR_MISALIGNMENT (dr0, 0);
1812 if (vect_print_dump_info (REPORT_ALIGNMENT))
1813 fprintf (vect_dump, "Alignment of access forced using peeling.");
1815 if (vect_print_dump_info (REPORT_DETAILS))
1816 fprintf (vect_dump, "Peeling for alignment will be applied.");
1818 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1825 /* (2) Versioning to force alignment. */
1827 /* Try versioning if:
1828 1) flag_tree_vect_loop_version is TRUE
1829 2) optimize loop for speed
1830 3) there is at least one unsupported misaligned data ref with an unknown
1832 4) all misaligned data refs with a known misalignment are supported, and
1833 5) the number of runtime alignment checks is within reason. */
1836 flag_tree_vect_loop_version
1837 && optimize_loop_nest_for_speed_p (loop)
1838 && (!loop->inner); /* FORNOW */
1842 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1844 stmt = DR_STMT (dr);
1845 stmt_info = vinfo_for_stmt (stmt);
1847 /* For interleaving, only the alignment of the first access
1849 if (aligned_access_p (dr)
1850 || (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1851 && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
1854 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1856 if (!supportable_dr_alignment)
1862 if (known_alignment_for_access_p (dr)
1863 || VEC_length (gimple,
1864 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
1865 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
1867 do_versioning = false;
1871 stmt = DR_STMT (dr);
1872 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1873 gcc_assert (vectype);
1875 /* The rightmost bits of an aligned address must be zeros.
1876 Construct the mask needed for this test. For example,
1877 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1878 mask must be 15 = 0xf. */
1879 mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1881 /* FORNOW: use the same mask to test all potentially unaligned
1882 references in the loop. The vectorizer currently supports
1883 a single vector size, see the reference to
1884 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1885 vectorization factor is computed. */
1886 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
1887 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
1888 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
1889 VEC_safe_push (gimple, heap,
1890 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo),
1895 /* Versioning requires at least one misaligned data reference. */
1896 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1897 do_versioning = false;
1898 else if (!do_versioning)
1899 VEC_truncate (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo), 0);
1904 VEC(gimple,heap) *may_misalign_stmts
1905 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
1908 /* It can now be assumed that the data references in the statements
1909 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1910 of the loop being vectorized. */
1911 FOR_EACH_VEC_ELT (gimple, may_misalign_stmts, i, stmt)
1913 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1914 dr = STMT_VINFO_DATA_REF (stmt_info);
1915 SET_DR_MISALIGNMENT (dr, 0);
1916 if (vect_print_dump_info (REPORT_ALIGNMENT))
1917 fprintf (vect_dump, "Alignment of access forced using versioning.");
1920 if (vect_print_dump_info (REPORT_DETAILS))
1921 fprintf (vect_dump, "Versioning for alignment will be applied.");
1923 /* Peeling and versioning can't be done together at this time. */
1924 gcc_assert (! (do_peeling && do_versioning));
1926 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1931 /* This point is reached if neither peeling nor versioning is being done. */
1932 gcc_assert (! (do_peeling || do_versioning));
1934 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1939 /* Function vect_find_same_alignment_drs.
1941 Update group and alignment relations according to the chosen
1942 vectorization factor. */
1945 vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
1946 loop_vec_info loop_vinfo)
1949 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1950 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1951 struct data_reference *dra = DDR_A (ddr);
1952 struct data_reference *drb = DDR_B (ddr);
1953 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
1954 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
1955 int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
1956 int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
1957 lambda_vector dist_v;
1958 unsigned int loop_depth;
1960 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1966 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1969 /* Loop-based vectorization and known data dependence. */
1970 if (DDR_NUM_DIST_VECTS (ddr) == 0)
1973 /* Data-dependence analysis reports a distance vector of zero
1974 for data-references that overlap only in the first iteration
1975 but have different sign step (see PR45764).
1976 So as a sanity check require equal DR_STEP. */
1977 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
1980 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
1981 FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
1983 int dist = dist_v[loop_depth];
1985 if (vect_print_dump_info (REPORT_DR_DETAILS))
1986 fprintf (vect_dump, "dependence distance = %d.", dist);
1988 /* Same loop iteration. */
1990 || (dist % vectorization_factor == 0 && dra_size == drb_size))
1992 /* Two references with distance zero have the same alignment. */
1993 VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a), drb);
1994 VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b), dra);
1995 if (vect_print_dump_info (REPORT_ALIGNMENT))
1996 fprintf (vect_dump, "accesses have the same alignment.");
1997 if (vect_print_dump_info (REPORT_DR_DETAILS))
1999 fprintf (vect_dump, "dependence distance modulo vf == 0 between ");
2000 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
2001 fprintf (vect_dump, " and ");
2002 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
2009 /* Function vect_analyze_data_refs_alignment
2011 Analyze the alignment of the data-references in the loop.
2012 Return FALSE if a data reference is found that cannot be vectorized. */
2015 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
2016 bb_vec_info bb_vinfo)
2018 if (vect_print_dump_info (REPORT_DETAILS))
2019 fprintf (vect_dump, "=== vect_analyze_data_refs_alignment ===");
2021 /* Mark groups of data references with same alignment using
2022 data dependence information. */
2025 VEC (ddr_p, heap) *ddrs = LOOP_VINFO_DDRS (loop_vinfo);
2026 struct data_dependence_relation *ddr;
2029 FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
2030 vect_find_same_alignment_drs (ddr, loop_vinfo);
2033 if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
2035 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2037 "not vectorized: can't calculate alignment for data ref.");
2045 /* Analyze groups of strided accesses: check that DR belongs to a group of
2046 strided accesses of legal size, step, etc. Detect gaps, single element
2047 interleaving, and other special cases. Set strided access info.
2048 Collect groups of strided stores for further use in SLP analysis. */
2051 vect_analyze_group_access (struct data_reference *dr)
2053 tree step = DR_STEP (dr);
2054 tree scalar_type = TREE_TYPE (DR_REF (dr));
2055 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2056 gimple stmt = DR_STMT (dr);
2057 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2058 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2059 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2060 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2061 HOST_WIDE_INT stride, last_accessed_element = 1;
2062 bool slp_impossible = false;
2063 struct loop *loop = NULL;
2066 loop = LOOP_VINFO_LOOP (loop_vinfo);
2068 /* For interleaving, STRIDE is STEP counted in elements, i.e., the size of the
2069 interleaving group (including gaps). */
2070 stride = dr_step / type_size;
2072 /* Not consecutive access is possible only if it is a part of interleaving. */
2073 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
2075 /* Check if it this DR is a part of interleaving, and is a single
2076 element of the group that is accessed in the loop. */
2078 /* Gaps are supported only for loads. STEP must be a multiple of the type
2079 size. The size of the group must be a power of 2. */
2081 && (dr_step % type_size) == 0
2083 && exact_log2 (stride) != -1)
2085 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
2086 GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
2087 if (vect_print_dump_info (REPORT_DR_DETAILS))
2089 fprintf (vect_dump, "Detected single element interleaving ");
2090 print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM);
2091 fprintf (vect_dump, " step ");
2092 print_generic_expr (vect_dump, step, TDF_SLIM);
2097 if (vect_print_dump_info (REPORT_DETAILS))
2098 fprintf (vect_dump, "Data access with gaps requires scalar "
2102 if (vect_print_dump_info (REPORT_DETAILS))
2103 fprintf (vect_dump, "Peeling for outer loop is not"
2108 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2114 if (vect_print_dump_info (REPORT_DETAILS))
2116 fprintf (vect_dump, "not consecutive access ");
2117 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2122 /* Mark the statement as unvectorizable. */
2123 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2130 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
2132 /* First stmt in the interleaving chain. Check the chain. */
2133 gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
2134 struct data_reference *data_ref = dr;
2135 unsigned int count = 1;
2137 tree prev_init = DR_INIT (data_ref);
2139 HOST_WIDE_INT diff, count_in_bytes, gaps = 0;
2143 /* Skip same data-refs. In case that two or more stmts share
2144 data-ref (supported only for loads), we vectorize only the first
2145 stmt, and the rest get their vectorized loads from the first
2147 if (!tree_int_cst_compare (DR_INIT (data_ref),
2148 DR_INIT (STMT_VINFO_DATA_REF (
2149 vinfo_for_stmt (next)))))
2151 if (DR_IS_WRITE (data_ref))
2153 if (vect_print_dump_info (REPORT_DETAILS))
2154 fprintf (vect_dump, "Two store stmts share the same dr.");
2158 /* Check that there is no load-store dependencies for this loads
2159 to prevent a case of load-store-load to the same location. */
2160 if (GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next))
2161 || GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev)))
2163 if (vect_print_dump_info (REPORT_DETAILS))
2165 "READ_WRITE dependence in interleaving.");
2169 /* For load use the same data-ref load. */
2170 GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
2173 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2179 /* Check that all the accesses have the same STEP. */
2180 next_step = DR_STEP (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
2181 if (tree_int_cst_compare (step, next_step))
2183 if (vect_print_dump_info (REPORT_DETAILS))
2184 fprintf (vect_dump, "not consecutive access in interleaving");
2188 data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
2189 /* Check that the distance between two accesses is equal to the type
2190 size. Otherwise, we have gaps. */
2191 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2192 - TREE_INT_CST_LOW (prev_init)) / type_size;
2195 /* FORNOW: SLP of accesses with gaps is not supported. */
2196 slp_impossible = true;
2197 if (DR_IS_WRITE (data_ref))
2199 if (vect_print_dump_info (REPORT_DETAILS))
2200 fprintf (vect_dump, "interleaved store with gaps");
2207 last_accessed_element += diff;
2209 /* Store the gap from the previous member of the group. If there is no
2210 gap in the access, GROUP_GAP is always 1. */
2211 GROUP_GAP (vinfo_for_stmt (next)) = diff;
2213 prev_init = DR_INIT (data_ref);
2214 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2215 /* Count the number of data-refs in the chain. */
2219 /* COUNT is the number of accesses found, we multiply it by the size of
2220 the type to get COUNT_IN_BYTES. */
2221 count_in_bytes = type_size * count;
2223 /* Check that the size of the interleaving (including gaps) is not
2224 greater than STEP. */
2225 if (dr_step && dr_step < count_in_bytes + gaps * type_size)
2227 if (vect_print_dump_info (REPORT_DETAILS))
2229 fprintf (vect_dump, "interleaving size is greater than step for ");
2230 print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM);
2235 /* Check that the size of the interleaving is equal to STEP for stores,
2236 i.e., that there are no gaps. */
2237 if (dr_step && dr_step != count_in_bytes)
2239 if (DR_IS_READ (dr))
2241 slp_impossible = true;
2242 /* There is a gap after the last load in the group. This gap is a
2243 difference between the stride and the number of elements. When
2244 there is no gap, this difference should be 0. */
2245 GROUP_GAP (vinfo_for_stmt (stmt)) = stride - count;
2249 if (vect_print_dump_info (REPORT_DETAILS))
2250 fprintf (vect_dump, "interleaved store with gaps");
2255 /* Check that STEP is a multiple of type size. */
2256 if (dr_step && (dr_step % type_size) != 0)
2258 if (vect_print_dump_info (REPORT_DETAILS))
2260 fprintf (vect_dump, "step is not a multiple of type size: step ");
2261 print_generic_expr (vect_dump, step, TDF_SLIM);
2262 fprintf (vect_dump, " size ");
2263 print_generic_expr (vect_dump, TYPE_SIZE_UNIT (scalar_type),
2272 GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
2273 if (vect_print_dump_info (REPORT_DETAILS))
2274 fprintf (vect_dump, "Detected interleaving of size %d", (int)stride);
2276 /* SLP: create an SLP data structure for every interleaving group of
2277 stores for further analysis in vect_analyse_slp. */
2278 if (DR_IS_WRITE (dr) && !slp_impossible)
2281 VEC_safe_push (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo),
2284 VEC_safe_push (gimple, heap, BB_VINFO_STRIDED_STORES (bb_vinfo),
2288 /* There is a gap in the end of the group. */
2289 if (stride - last_accessed_element > 0 && loop_vinfo)
2291 if (vect_print_dump_info (REPORT_DETAILS))
2292 fprintf (vect_dump, "Data access with gaps requires scalar "
2296 if (vect_print_dump_info (REPORT_DETAILS))
2297 fprintf (vect_dump, "Peeling for outer loop is not supported");
2301 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2309 /* Analyze the access pattern of the data-reference DR.
2310 In case of non-consecutive accesses call vect_analyze_group_access() to
2311 analyze groups of strided accesses. */
2314 vect_analyze_data_ref_access (struct data_reference *dr)
2316 tree step = DR_STEP (dr);
2317 tree scalar_type = TREE_TYPE (DR_REF (dr));
2318 gimple stmt = DR_STMT (dr);
2319 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2320 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2321 struct loop *loop = NULL;
2322 HOST_WIDE_INT dr_step;
2325 loop = LOOP_VINFO_LOOP (loop_vinfo);
2327 if (loop_vinfo && !step)
2329 if (vect_print_dump_info (REPORT_DETAILS))
2330 fprintf (vect_dump, "bad data-ref access in loop");
2334 /* Allow invariant loads in loops. */
2335 dr_step = TREE_INT_CST_LOW (step);
2336 if (loop_vinfo && dr_step == 0)
2338 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2339 return DR_IS_READ (dr);
2342 if (loop && nested_in_vect_loop_p (loop, stmt))
2344 /* Interleaved accesses are not yet supported within outer-loop
2345 vectorization for references in the inner-loop. */
2346 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2348 /* For the rest of the analysis we use the outer-loop step. */
2349 step = STMT_VINFO_DR_STEP (stmt_info);
2350 dr_step = TREE_INT_CST_LOW (step);
2354 if (vect_print_dump_info (REPORT_ALIGNMENT))
2355 fprintf (vect_dump, "zero step in outer loop.");
2356 if (DR_IS_READ (dr))
2364 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
2366 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
2368 /* Mark that it is not interleaving. */
2369 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2373 if (loop && nested_in_vect_loop_p (loop, stmt))
2375 if (vect_print_dump_info (REPORT_ALIGNMENT))
2376 fprintf (vect_dump, "strided access in outer loop.");
2380 /* Not consecutive access - check if it's a part of interleaving group. */
2381 return vect_analyze_group_access (dr);
2385 /* Function vect_analyze_data_ref_accesses.
2387 Analyze the access pattern of all the data references in the loop.
2389 FORNOW: the only access pattern that is considered vectorizable is a
2390 simple step 1 (consecutive) access.
2392 FORNOW: handle only arrays and pointer accesses. */
2395 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
2398 VEC (data_reference_p, heap) *datarefs;
2399 struct data_reference *dr;
2401 if (vect_print_dump_info (REPORT_DETAILS))
2402 fprintf (vect_dump, "=== vect_analyze_data_ref_accesses ===");
2405 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2407 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2409 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
2410 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
2411 && !vect_analyze_data_ref_access (dr))
2413 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2414 fprintf (vect_dump, "not vectorized: complicated access pattern.");
2418 /* Mark the statement as not vectorizable. */
2419 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2429 /* Function vect_prune_runtime_alias_test_list.
2431 Prune a list of ddrs to be tested at run-time by versioning for alias.
2432 Return FALSE if resulting list of ddrs is longer then allowed by
2433 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2436 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
2438 VEC (ddr_p, heap) * ddrs =
2439 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2442 if (vect_print_dump_info (REPORT_DETAILS))
2443 fprintf (vect_dump, "=== vect_prune_runtime_alias_test_list ===");
2445 for (i = 0; i < VEC_length (ddr_p, ddrs); )
2450 ddr_i = VEC_index (ddr_p, ddrs, i);
2453 for (j = 0; j < i; j++)
2455 ddr_p ddr_j = VEC_index (ddr_p, ddrs, j);
2457 if (vect_vfa_range_equal (ddr_i, ddr_j))
2459 if (vect_print_dump_info (REPORT_DR_DETAILS))
2461 fprintf (vect_dump, "found equal ranges ");
2462 print_generic_expr (vect_dump, DR_REF (DDR_A (ddr_i)), TDF_SLIM);
2463 fprintf (vect_dump, ", ");
2464 print_generic_expr (vect_dump, DR_REF (DDR_B (ddr_i)), TDF_SLIM);
2465 fprintf (vect_dump, " and ");
2466 print_generic_expr (vect_dump, DR_REF (DDR_A (ddr_j)), TDF_SLIM);
2467 fprintf (vect_dump, ", ");
2468 print_generic_expr (vect_dump, DR_REF (DDR_B (ddr_j)), TDF_SLIM);
2477 VEC_ordered_remove (ddr_p, ddrs, i);
2483 if (VEC_length (ddr_p, ddrs) >
2484 (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
2486 if (vect_print_dump_info (REPORT_DR_DETAILS))
2489 "disable versioning for alias - max number of generated "
2490 "checks exceeded.");
2493 VEC_truncate (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo), 0);
2501 /* Check whether a non-affine read in stmt is suitable for gather load
2502 and if so, return a builtin decl for that operation. */
2505 vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
2506 tree *offp, int *scalep)
2508 HOST_WIDE_INT scale = 1, pbitpos, pbitsize;
2509 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2510 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2511 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2512 tree offtype = NULL_TREE;
2513 tree decl, base, off;
2514 enum machine_mode pmode;
2515 int punsignedp, pvolatilep;
2517 /* The gather builtins need address of the form
2518 loop_invariant + vector * {1, 2, 4, 8}
2520 loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
2521 Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
2522 of loop invariants/SSA_NAMEs defined in the loop, with casts,
2523 multiplications and additions in it. To get a vector, we need
2524 a single SSA_NAME that will be defined in the loop and will
2525 contain everything that is not loop invariant and that can be
2526 vectorized. The following code attempts to find such a preexistng
2527 SSA_NAME OFF and put the loop invariants into a tree BASE
2528 that can be gimplified before the loop. */
2529 base = get_inner_reference (DR_REF (dr), &pbitsize, &pbitpos, &off,
2530 &pmode, &punsignedp, &pvolatilep, false);
2531 gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
2533 if (TREE_CODE (base) == MEM_REF)
2535 if (!integer_zerop (TREE_OPERAND (base, 1)))
2537 if (off == NULL_TREE)
2539 double_int moff = mem_ref_offset (base);
2540 off = double_int_to_tree (sizetype, moff);
2543 off = size_binop (PLUS_EXPR, off,
2544 fold_convert (sizetype, TREE_OPERAND (base, 1)));
2546 base = TREE_OPERAND (base, 0);
2549 base = build_fold_addr_expr (base);
2551 if (off == NULL_TREE)
2552 off = size_zero_node;
2554 /* If base is not loop invariant, either off is 0, then we start with just
2555 the constant offset in the loop invariant BASE and continue with base
2556 as OFF, otherwise give up.
2557 We could handle that case by gimplifying the addition of base + off
2558 into some SSA_NAME and use that as off, but for now punt. */
2559 if (!expr_invariant_in_loop_p (loop, base))
2561 if (!integer_zerop (off))
2564 base = size_int (pbitpos / BITS_PER_UNIT);
2566 /* Otherwise put base + constant offset into the loop invariant BASE
2567 and continue with OFF. */
2570 base = fold_convert (sizetype, base);
2571 base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT));
2574 /* OFF at this point may be either a SSA_NAME or some tree expression
2575 from get_inner_reference. Try to peel off loop invariants from it
2576 into BASE as long as possible. */
2578 while (offtype == NULL_TREE)
2580 enum tree_code code;
2581 tree op0, op1, add = NULL_TREE;
2583 if (TREE_CODE (off) == SSA_NAME)
2585 gimple def_stmt = SSA_NAME_DEF_STMT (off);
2587 if (expr_invariant_in_loop_p (loop, off))
2590 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
2593 op0 = gimple_assign_rhs1 (def_stmt);
2594 code = gimple_assign_rhs_code (def_stmt);
2595 op1 = gimple_assign_rhs2 (def_stmt);
2599 if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
2601 code = TREE_CODE (off);
2602 extract_ops_from_tree (off, &code, &op0, &op1);
2606 case POINTER_PLUS_EXPR:
2608 if (expr_invariant_in_loop_p (loop, op0))
2613 add = fold_convert (sizetype, add);
2615 add = size_binop (MULT_EXPR, add, size_int (scale));
2616 base = size_binop (PLUS_EXPR, base, add);
2619 if (expr_invariant_in_loop_p (loop, op1))
2627 if (expr_invariant_in_loop_p (loop, op1))
2629 add = fold_convert (sizetype, op1);
2630 add = size_binop (MINUS_EXPR, size_zero_node, add);
2636 if (scale == 1 && host_integerp (op1, 0))
2638 scale = tree_low_cst (op1, 0);
2647 if (!POINTER_TYPE_P (TREE_TYPE (op0))
2648 && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2650 if (TYPE_PRECISION (TREE_TYPE (op0))
2651 == TYPE_PRECISION (TREE_TYPE (off)))
2656 if (TYPE_PRECISION (TREE_TYPE (op0))
2657 < TYPE_PRECISION (TREE_TYPE (off)))
2660 offtype = TREE_TYPE (off);
2671 /* If at the end OFF still isn't a SSA_NAME or isn't
2672 defined in the loop, punt. */
2673 if (TREE_CODE (off) != SSA_NAME
2674 || expr_invariant_in_loop_p (loop, off))
2677 if (offtype == NULL_TREE)
2678 offtype = TREE_TYPE (off);
2680 decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
2682 if (decl == NULL_TREE)
2695 /* Function vect_analyze_data_refs.
2697 Find all the data references in the loop or basic block.
2699 The general structure of the analysis of data refs in the vectorizer is as
2701 1- vect_analyze_data_refs(loop/bb): call
2702 compute_data_dependences_for_loop/bb to find and analyze all data-refs
2703 in the loop/bb and their dependences.
2704 2- vect_analyze_dependences(): apply dependence testing using ddrs.
2705 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
2706 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
2711 vect_analyze_data_refs (loop_vec_info loop_vinfo,
2712 bb_vec_info bb_vinfo,
2715 struct loop *loop = NULL;
2716 basic_block bb = NULL;
2718 VEC (data_reference_p, heap) *datarefs;
2719 struct data_reference *dr;
2721 bool res, stop_bb_analysis = false;
2723 if (vect_print_dump_info (REPORT_DETAILS))
2724 fprintf (vect_dump, "=== vect_analyze_data_refs ===\n");
2728 loop = LOOP_VINFO_LOOP (loop_vinfo);
2729 res = compute_data_dependences_for_loop
2731 &LOOP_VINFO_LOOP_NEST (loop_vinfo),
2732 &LOOP_VINFO_DATAREFS (loop_vinfo),
2733 &LOOP_VINFO_DDRS (loop_vinfo));
2737 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2738 fprintf (vect_dump, "not vectorized: loop contains function calls"
2739 " or data references that cannot be analyzed");
2743 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2747 bb = BB_VINFO_BB (bb_vinfo);
2748 res = compute_data_dependences_for_bb (bb, true,
2749 &BB_VINFO_DATAREFS (bb_vinfo),
2750 &BB_VINFO_DDRS (bb_vinfo));
2753 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2754 fprintf (vect_dump, "not vectorized: basic block contains function"
2755 " calls or data references that cannot be analyzed");
2759 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2762 /* Go through the data-refs, check that the analysis succeeded. Update
2763 pointer from stmt_vec_info struct to DR and vectype. */
2765 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
2768 stmt_vec_info stmt_info;
2769 tree base, offset, init;
2770 bool gather = false;
2773 if (!dr || !DR_REF (dr))
2775 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2776 fprintf (vect_dump, "not vectorized: unhandled data-ref ");
2781 stmt = DR_STMT (dr);
2782 stmt_info = vinfo_for_stmt (stmt);
2784 if (stop_bb_analysis)
2786 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2790 /* Check that analysis of the data-ref succeeded. */
2791 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
2794 /* If target supports vector gather loads, see if they can't
2798 && !TREE_THIS_VOLATILE (DR_REF (dr))
2799 && targetm.vectorize.builtin_gather != NULL
2800 && !nested_in_vect_loop_p (loop, stmt))
2802 struct data_reference *newdr
2803 = create_data_ref (NULL, loop_containing_stmt (stmt),
2804 DR_REF (dr), stmt, true);
2805 gcc_assert (newdr != NULL && DR_REF (newdr));
2806 if (DR_BASE_ADDRESS (newdr)
2807 && DR_OFFSET (newdr)
2810 && integer_zerop (DR_STEP (newdr)))
2816 free_data_ref (newdr);
2821 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2823 fprintf (vect_dump, "not vectorized: data ref analysis "
2825 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2830 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2831 stop_bb_analysis = true;
2839 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
2841 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2842 fprintf (vect_dump, "not vectorized: base addr of dr is a "
2847 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2848 stop_bb_analysis = true;
2857 if (TREE_THIS_VOLATILE (DR_REF (dr)))
2859 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2861 fprintf (vect_dump, "not vectorized: volatile type ");
2862 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2867 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2868 stop_bb_analysis = true;
2875 base = unshare_expr (DR_BASE_ADDRESS (dr));
2876 offset = unshare_expr (DR_OFFSET (dr));
2877 init = unshare_expr (DR_INIT (dr));
2879 if (stmt_can_throw_internal (stmt))
2881 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2883 fprintf (vect_dump, "not vectorized: statement can throw an "
2885 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2890 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2891 stop_bb_analysis = true;
2900 if (is_gimple_call (stmt))
2902 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2904 fprintf (vect_dump, "not vectorized: dr in a call ");
2905 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2910 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2911 stop_bb_analysis = true;
2920 /* Update DR field in stmt_vec_info struct. */
2922 /* If the dataref is in an inner-loop of the loop that is considered for
2923 for vectorization, we also want to analyze the access relative to
2924 the outer-loop (DR contains information only relative to the
2925 inner-most enclosing loop). We do that by building a reference to the
2926 first location accessed by the inner-loop, and analyze it relative to
2928 if (loop && nested_in_vect_loop_p (loop, stmt))
2930 tree outer_step, outer_base, outer_init;
2931 HOST_WIDE_INT pbitsize, pbitpos;
2933 enum machine_mode pmode;
2934 int punsignedp, pvolatilep;
2935 affine_iv base_iv, offset_iv;
2938 /* Build a reference to the first location accessed by the
2939 inner-loop: *(BASE+INIT). (The first location is actually
2940 BASE+INIT+OFFSET, but we add OFFSET separately later). */
2941 tree inner_base = build_fold_indirect_ref
2942 (fold_build_pointer_plus (base, init));
2944 if (vect_print_dump_info (REPORT_DETAILS))
2946 fprintf (vect_dump, "analyze in outer-loop: ");
2947 print_generic_expr (vect_dump, inner_base, TDF_SLIM);
2950 outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
2951 &poffset, &pmode, &punsignedp, &pvolatilep, false);
2952 gcc_assert (outer_base != NULL_TREE);
2954 if (pbitpos % BITS_PER_UNIT != 0)
2956 if (vect_print_dump_info (REPORT_DETAILS))
2957 fprintf (vect_dump, "failed: bit offset alignment.\n");
2961 outer_base = build_fold_addr_expr (outer_base);
2962 if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
2965 if (vect_print_dump_info (REPORT_DETAILS))
2966 fprintf (vect_dump, "failed: evolution of base is not affine.\n");
2973 poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
2981 offset_iv.base = ssize_int (0);
2982 offset_iv.step = ssize_int (0);
2984 else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
2987 if (vect_print_dump_info (REPORT_DETAILS))
2988 fprintf (vect_dump, "evolution of offset is not affine.\n");
2992 outer_init = ssize_int (pbitpos / BITS_PER_UNIT);
2993 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
2994 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
2995 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
2996 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
2998 outer_step = size_binop (PLUS_EXPR,
2999 fold_convert (ssizetype, base_iv.step),
3000 fold_convert (ssizetype, offset_iv.step));
3002 STMT_VINFO_DR_STEP (stmt_info) = outer_step;
3003 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
3004 STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
3005 STMT_VINFO_DR_INIT (stmt_info) = outer_init;
3006 STMT_VINFO_DR_OFFSET (stmt_info) =
3007 fold_convert (ssizetype, offset_iv.base);
3008 STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
3009 size_int (highest_pow2_factor (offset_iv.base));
3011 if (vect_print_dump_info (REPORT_DETAILS))
3013 fprintf (vect_dump, "\touter base_address: ");
3014 print_generic_expr (vect_dump, STMT_VINFO_DR_BASE_ADDRESS (stmt_info), TDF_SLIM);
3015 fprintf (vect_dump, "\n\touter offset from base address: ");
3016 print_generic_expr (vect_dump, STMT_VINFO_DR_OFFSET (stmt_info), TDF_SLIM);
3017 fprintf (vect_dump, "\n\touter constant offset from base address: ");
3018 print_generic_expr (vect_dump, STMT_VINFO_DR_INIT (stmt_info), TDF_SLIM);
3019 fprintf (vect_dump, "\n\touter step: ");
3020 print_generic_expr (vect_dump, STMT_VINFO_DR_STEP (stmt_info), TDF_SLIM);
3021 fprintf (vect_dump, "\n\touter aligned to: ");
3022 print_generic_expr (vect_dump, STMT_VINFO_DR_ALIGNED_TO (stmt_info), TDF_SLIM);
3026 if (STMT_VINFO_DATA_REF (stmt_info))
3028 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
3031 "not vectorized: more than one data ref in stmt: ");
3032 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
3037 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
3038 stop_bb_analysis = true;
3047 STMT_VINFO_DATA_REF (stmt_info) = dr;
3049 /* Set vectype for STMT. */
3050 scalar_type = TREE_TYPE (DR_REF (dr));
3051 STMT_VINFO_VECTYPE (stmt_info) =
3052 get_vectype_for_scalar_type (scalar_type);
3053 if (!STMT_VINFO_VECTYPE (stmt_info))
3055 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
3058 "not vectorized: no vectype for stmt: ");
3059 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
3060 fprintf (vect_dump, " scalar_type: ");
3061 print_generic_expr (vect_dump, scalar_type, TDF_DETAILS);
3066 /* Mark the statement as not vectorizable. */
3067 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
3068 stop_bb_analysis = true;
3074 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3080 /* Adjust the minimal vectorization factor according to the
3082 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
3088 unsigned int j, k, n;
3089 struct data_reference *olddr
3090 = VEC_index (data_reference_p, datarefs, i);
3091 VEC (ddr_p, heap) *ddrs = LOOP_VINFO_DDRS (loop_vinfo);
3092 struct data_dependence_relation *ddr, *newddr;
3095 VEC (loop_p, heap) *nest = LOOP_VINFO_LOOP_NEST (loop_vinfo);
3097 if (!vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL)
3098 || get_vectype_for_scalar_type (TREE_TYPE (off)) == NULL_TREE)
3100 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
3103 "not vectorized: not suitable for gather ");
3104 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
3109 n = VEC_length (data_reference_p, datarefs) - 1;
3110 for (j = 0, k = i - 1; j < i; j++)
3112 ddr = VEC_index (ddr_p, ddrs, k);
3113 gcc_assert (DDR_B (ddr) == olddr);
3114 newddr = initialize_data_dependence_relation (DDR_A (ddr), dr,
3116 VEC_replace (ddr_p, ddrs, k, newddr);
3117 free_dependence_relation (ddr);
3119 && DR_IS_WRITE (DDR_A (newddr))
3120 && DDR_ARE_DEPENDENT (newddr) != chrec_known)
3126 n = k + VEC_length (data_reference_p, datarefs) - i - 1;
3129 ddr = VEC_index (ddr_p, ddrs, k);
3130 gcc_assert (DDR_A (ddr) == olddr);
3131 newddr = initialize_data_dependence_relation (dr, DDR_B (ddr),
3133 VEC_replace (ddr_p, ddrs, k, newddr);
3134 free_dependence_relation (ddr);
3136 && DR_IS_WRITE (DDR_B (newddr))
3137 && DDR_ARE_DEPENDENT (newddr) != chrec_known)
3141 k = VEC_length (ddr_p, ddrs)
3142 - VEC_length (data_reference_p, datarefs) + i;
3143 ddr = VEC_index (ddr_p, ddrs, k);
3144 gcc_assert (DDR_A (ddr) == olddr && DDR_B (ddr) == olddr);
3145 newddr = initialize_data_dependence_relation (dr, dr, nest);
3146 VEC_replace (ddr_p, ddrs, k, newddr);
3147 free_dependence_relation (ddr);
3148 VEC_replace (data_reference_p, datarefs, i, dr);
3152 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
3155 "not vectorized: data dependence conflict"
3156 " prevents gather");
3157 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
3162 STMT_VINFO_GATHER_P (stmt_info) = true;
3170 /* Function vect_get_new_vect_var.
3172 Returns a name for a new variable. The current naming scheme appends the
3173 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
3174 the name of vectorizer generated variables, and appends that to NAME if
3178 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
3185 case vect_simple_var:
3188 case vect_scalar_var:
3191 case vect_pointer_var:
3200 char* tmp = concat (prefix, name, NULL);
3201 new_vect_var = create_tmp_var (type, tmp);
3205 new_vect_var = create_tmp_var (type, prefix);
3207 /* Mark vector typed variable as a gimple register variable. */
3208 if (TREE_CODE (type) == VECTOR_TYPE)
3209 DECL_GIMPLE_REG_P (new_vect_var) = true;
3211 return new_vect_var;
3215 /* Function vect_create_addr_base_for_vector_ref.
3217 Create an expression that computes the address of the first memory location
3218 that will be accessed for a data reference.
3221 STMT: The statement containing the data reference.
3222 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
3223 OFFSET: Optional. If supplied, it is be added to the initial address.
3224 LOOP: Specify relative to which loop-nest should the address be computed.
3225 For example, when the dataref is in an inner-loop nested in an
3226 outer-loop that is now being vectorized, LOOP can be either the
3227 outer-loop, or the inner-loop. The first memory location accessed
3228 by the following dataref ('in' points to short):
3235 if LOOP=i_loop: &in (relative to i_loop)
3236 if LOOP=j_loop: &in+i*2B (relative to j_loop)
3239 1. Return an SSA_NAME whose value is the address of the memory location of
3240 the first vector of the data reference.
3241 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
3242 these statement(s) which define the returned SSA_NAME.
3244 FORNOW: We are only handling array accesses with step 1. */
3247 vect_create_addr_base_for_vector_ref (gimple stmt,
3248 gimple_seq *new_stmt_list,
3252 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3253 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3254 tree data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
3256 tree data_ref_base_var;
3258 tree addr_base, addr_expr;
3260 gimple_seq seq = NULL;
3261 tree base_offset = unshare_expr (DR_OFFSET (dr));
3262 tree init = unshare_expr (DR_INIT (dr));
3264 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
3265 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3268 if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father)
3270 struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo);
3272 gcc_assert (nested_in_vect_loop_p (outer_loop, stmt));
3274 data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3275 base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
3276 init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
3280 base_name = build_fold_indirect_ref (data_ref_base);
3283 base_offset = ssize_int (0);
3284 init = ssize_int (0);
3285 base_name = build_fold_indirect_ref (unshare_expr (DR_REF (dr)));
3288 data_ref_base_var = create_tmp_var (TREE_TYPE (data_ref_base), "batmp");
3289 add_referenced_var (data_ref_base_var);
3290 data_ref_base = force_gimple_operand (data_ref_base, &seq, true,
3292 gimple_seq_add_seq (new_stmt_list, seq);
3294 /* Create base_offset */
3295 base_offset = size_binop (PLUS_EXPR,
3296 fold_convert (sizetype, base_offset),
3297 fold_convert (sizetype, init));
3298 dest = create_tmp_var (sizetype, "base_off");
3299 add_referenced_var (dest);
3300 base_offset = force_gimple_operand (base_offset, &seq, true, dest);
3301 gimple_seq_add_seq (new_stmt_list, seq);
3305 tree tmp = create_tmp_var (sizetype, "offset");
3307 add_referenced_var (tmp);
3308 offset = fold_build2 (MULT_EXPR, sizetype,
3309 fold_convert (sizetype, offset), step);
3310 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3311 base_offset, offset);
3312 base_offset = force_gimple_operand (base_offset, &seq, false, tmp);
3313 gimple_seq_add_seq (new_stmt_list, seq);
3316 /* base + base_offset */
3318 addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
3321 addr_base = build1 (ADDR_EXPR,
3322 build_pointer_type (TREE_TYPE (DR_REF (dr))),
3323 unshare_expr (DR_REF (dr)));
3326 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
3327 base = get_base_address (DR_REF (dr));
3329 && TREE_CODE (base) == MEM_REF)
3331 = build_qualified_type (vect_ptr_type,
3332 TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base, 0))));
3334 vec_stmt = fold_convert (vect_ptr_type, addr_base);
3335 addr_expr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
3336 get_name (base_name));
3337 add_referenced_var (addr_expr);
3338 vec_stmt = force_gimple_operand (vec_stmt, &seq, false, addr_expr);
3339 gimple_seq_add_seq (new_stmt_list, seq);
3341 if (DR_PTR_INFO (dr)
3342 && TREE_CODE (vec_stmt) == SSA_NAME)
3344 duplicate_ssa_name_ptr_info (vec_stmt, DR_PTR_INFO (dr));
3347 SSA_NAME_PTR_INFO (vec_stmt)->align = 1;
3348 SSA_NAME_PTR_INFO (vec_stmt)->misalign = 0;
3352 if (vect_print_dump_info (REPORT_DETAILS))
3354 fprintf (vect_dump, "created ");
3355 print_generic_expr (vect_dump, vec_stmt, TDF_SLIM);
3362 /* Function vect_create_data_ref_ptr.
3364 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
3365 location accessed in the loop by STMT, along with the def-use update
3366 chain to appropriately advance the pointer through the loop iterations.
3367 Also set aliasing information for the pointer. This pointer is used by
3368 the callers to this function to create a memory reference expression for
3369 vector load/store access.
3372 1. STMT: a stmt that references memory. Expected to be of the form
3373 GIMPLE_ASSIGN <name, data-ref> or
3374 GIMPLE_ASSIGN <data-ref, name>.
3375 2. AGGR_TYPE: the type of the reference, which should be either a vector
3377 3. AT_LOOP: the loop where the vector memref is to be created.
3378 4. OFFSET (optional): an offset to be added to the initial address accessed
3379 by the data-ref in STMT.
3380 5. BSI: location where the new stmts are to be placed if there is no loop
3381 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
3382 pointing to the initial address.
3385 1. Declare a new ptr to vector_type, and have it point to the base of the
3386 data reference (initial addressed accessed by the data reference).
3387 For example, for vector of type V8HI, the following code is generated:
3390 ap = (v8hi *)initial_address;
3392 if OFFSET is not supplied:
3393 initial_address = &a[init];
3394 if OFFSET is supplied:
3395 initial_address = &a[init + OFFSET];
3397 Return the initial_address in INITIAL_ADDRESS.
3399 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
3400 update the pointer in each iteration of the loop.
3402 Return the increment stmt that updates the pointer in PTR_INCR.
3404 3. Set INV_P to true if the access pattern of the data reference in the
3405 vectorized loop is invariant. Set it to false otherwise.
3407 4. Return the pointer. */
3410 vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
3411 tree offset, tree *initial_address,
3412 gimple_stmt_iterator *gsi, gimple *ptr_incr,
3413 bool only_init, bool *inv_p)
3416 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3417 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3418 struct loop *loop = NULL;
3419 bool nested_in_vect_loop = false;
3420 struct loop *containing_loop = NULL;
3425 gimple_seq new_stmt_list = NULL;
3429 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3431 gimple_stmt_iterator incr_gsi;
3434 tree indx_before_incr, indx_after_incr;
3437 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3440 gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
3441 || TREE_CODE (aggr_type) == VECTOR_TYPE);
3445 loop = LOOP_VINFO_LOOP (loop_vinfo);
3446 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3447 containing_loop = (gimple_bb (stmt))->loop_father;
3448 pe = loop_preheader_edge (loop);
3452 gcc_assert (bb_vinfo);
3457 /* Check the step (evolution) of the load in LOOP, and record
3458 whether it's invariant. */
3459 if (nested_in_vect_loop)
3460 step = STMT_VINFO_DR_STEP (stmt_info);
3462 step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
3464 if (tree_int_cst_compare (step, size_zero_node) == 0)
3468 negative = tree_int_cst_compare (step, size_zero_node) < 0;
3470 /* Create an expression for the first address accessed by this load
3472 base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr)));
3474 if (vect_print_dump_info (REPORT_DETAILS))
3476 tree data_ref_base = base_name;
3477 fprintf (vect_dump, "create %s-pointer variable to type: ",
3478 tree_code_name[(int) TREE_CODE (aggr_type)]);
3479 print_generic_expr (vect_dump, aggr_type, TDF_SLIM);
3480 if (TREE_CODE (data_ref_base) == VAR_DECL
3481 || TREE_CODE (data_ref_base) == ARRAY_REF)
3482 fprintf (vect_dump, " vectorizing an array ref: ");
3483 else if (TREE_CODE (data_ref_base) == COMPONENT_REF)
3484 fprintf (vect_dump, " vectorizing a record based array ref: ");
3485 else if (TREE_CODE (data_ref_base) == SSA_NAME)
3486 fprintf (vect_dump, " vectorizing a pointer ref: ");
3487 print_generic_expr (vect_dump, base_name, TDF_SLIM);
3490 /* (1) Create the new aggregate-pointer variable. */
3491 aggr_ptr_type = build_pointer_type (aggr_type);
3492 base = get_base_address (DR_REF (dr));
3494 && TREE_CODE (base) == MEM_REF)
3496 = build_qualified_type (aggr_ptr_type,
3497 TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base, 0))));
3498 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var,
3499 get_name (base_name));
3501 /* Vector and array types inherit the alias set of their component
3502 type by default so we need to use a ref-all pointer if the data
3503 reference does not conflict with the created aggregated data
3504 reference because it is not addressable. */
3505 if (!alias_sets_conflict_p (get_deref_alias_set (aggr_ptr),
3506 get_alias_set (DR_REF (dr))))
3509 = build_pointer_type_for_mode (aggr_type,
3510 TYPE_MODE (aggr_ptr_type), true);
3511 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var,
3512 get_name (base_name));
3515 /* Likewise for any of the data references in the stmt group. */
3516 else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
3518 gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
3521 tree lhs = gimple_assign_lhs (orig_stmt);
3522 if (!alias_sets_conflict_p (get_deref_alias_set (aggr_ptr),
3523 get_alias_set (lhs)))
3526 = build_pointer_type_for_mode (aggr_type,
3527 TYPE_MODE (aggr_ptr_type), true);
3529 = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var,
3530 get_name (base_name));
3534 orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo_for_stmt (orig_stmt));
3539 add_referenced_var (aggr_ptr);
3541 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
3542 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
3543 def-use update cycles for the pointer: one relative to the outer-loop
3544 (LOOP), which is what steps (3) and (4) below do. The other is relative
3545 to the inner-loop (which is the inner-most loop containing the dataref),
3546 and this is done be step (5) below.
3548 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
3549 inner-most loop, and so steps (3),(4) work the same, and step (5) is
3550 redundant. Steps (3),(4) create the following:
3553 LOOP: vp1 = phi(vp0,vp2)
3559 If there is an inner-loop nested in loop, then step (5) will also be
3560 applied, and an additional update in the inner-loop will be created:
3563 LOOP: vp1 = phi(vp0,vp2)
3565 inner: vp3 = phi(vp1,vp4)
3566 vp4 = vp3 + inner_step
3572 /* (2) Calculate the initial address of the aggregate-pointer, and set
3573 the aggregate-pointer to point to it before the loop. */
3575 /* Create: (&(base[init_val+offset]) in the loop preheader. */
3577 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
3583 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
3584 gcc_assert (!new_bb);
3587 gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
3590 *initial_address = new_temp;
3592 /* Create: p = (aggr_type *) initial_base */
3593 if (TREE_CODE (new_temp) != SSA_NAME
3594 || !useless_type_conversion_p (aggr_ptr_type, TREE_TYPE (new_temp)))
3596 vec_stmt = gimple_build_assign (aggr_ptr,
3597 fold_convert (aggr_ptr_type, new_temp));
3598 aggr_ptr_init = make_ssa_name (aggr_ptr, vec_stmt);
3599 /* Copy the points-to information if it exists. */
3600 if (DR_PTR_INFO (dr))
3601 duplicate_ssa_name_ptr_info (aggr_ptr_init, DR_PTR_INFO (dr));
3602 gimple_assign_set_lhs (vec_stmt, aggr_ptr_init);
3605 new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt);
3606 gcc_assert (!new_bb);
3609 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
3612 aggr_ptr_init = new_temp;
3614 /* (3) Handle the updating of the aggregate-pointer inside the loop.
3615 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
3616 inner-loop nested in LOOP (during outer-loop vectorization). */
3618 /* No update in loop is required. */
3619 if (only_init && (!loop_vinfo || at_loop == loop))
3620 aptr = aggr_ptr_init;
3623 /* The step of the aggregate pointer is the type size. */
3624 tree step = TYPE_SIZE_UNIT (aggr_type);
3625 /* One exception to the above is when the scalar step of the load in
3626 LOOP is zero. In this case the step here is also zero. */
3628 step = size_zero_node;
3630 step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
3632 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
3634 create_iv (aggr_ptr_init,
3635 fold_convert (aggr_ptr_type, step),
3636 aggr_ptr, loop, &incr_gsi, insert_after,
3637 &indx_before_incr, &indx_after_incr);
3638 incr = gsi_stmt (incr_gsi);
3639 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
3641 /* Copy the points-to information if it exists. */
3642 if (DR_PTR_INFO (dr))
3644 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
3645 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
3650 aptr = indx_before_incr;
3653 if (!nested_in_vect_loop || only_init)
3657 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
3658 nested in LOOP, if exists. */
3660 gcc_assert (nested_in_vect_loop);
3663 standard_iv_increment_position (containing_loop, &incr_gsi,
3665 create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
3666 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
3668 incr = gsi_stmt (incr_gsi);
3669 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
3671 /* Copy the points-to information if it exists. */
3672 if (DR_PTR_INFO (dr))
3674 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
3675 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
3680 return indx_before_incr;
3687 /* Function bump_vector_ptr
3689 Increment a pointer (to a vector type) by vector-size. If requested,
3690 i.e. if PTR-INCR is given, then also connect the new increment stmt
3691 to the existing def-use update-chain of the pointer, by modifying
3692 the PTR_INCR as illustrated below:
3694 The pointer def-use update-chain before this function:
3695 DATAREF_PTR = phi (p_0, p_2)
3697 PTR_INCR: p_2 = DATAREF_PTR + step
3699 The pointer def-use update-chain after this function:
3700 DATAREF_PTR = phi (p_0, p_2)
3702 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
3704 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
3707 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
3709 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
3710 the loop. The increment amount across iterations is expected
3712 BSI - location where the new update stmt is to be placed.
3713 STMT - the original scalar memory-access stmt that is being vectorized.
3714 BUMP - optional. The offset by which to bump the pointer. If not given,
3715 the offset is assumed to be vector_size.
3717 Output: Return NEW_DATAREF_PTR as illustrated above.
3722 bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
3723 gimple stmt, tree bump)
3725 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3726 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3727 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3728 tree ptr_var = SSA_NAME_VAR (dataref_ptr);
3729 tree update = TYPE_SIZE_UNIT (vectype);
3732 use_operand_p use_p;
3733 tree new_dataref_ptr;
3738 incr_stmt = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, ptr_var,
3739 dataref_ptr, update);
3740 new_dataref_ptr = make_ssa_name (ptr_var, incr_stmt);
3741 gimple_assign_set_lhs (incr_stmt, new_dataref_ptr);
3742 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
3744 /* Copy the points-to information if it exists. */
3745 if (DR_PTR_INFO (dr))
3747 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
3748 SSA_NAME_PTR_INFO (new_dataref_ptr)->align = 1;
3749 SSA_NAME_PTR_INFO (new_dataref_ptr)->misalign = 0;
3753 return new_dataref_ptr;
3755 /* Update the vector-pointer's cross-iteration increment. */
3756 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
3758 tree use = USE_FROM_PTR (use_p);
3760 if (use == dataref_ptr)
3761 SET_USE (use_p, new_dataref_ptr);
3763 gcc_assert (tree_int_cst_compare (use, update) == 0);
3766 return new_dataref_ptr;
3770 /* Function vect_create_destination_var.
3772 Create a new temporary of type VECTYPE. */
3775 vect_create_destination_var (tree scalar_dest, tree vectype)
3778 const char *new_name;
3780 enum vect_var_kind kind;
3782 kind = vectype ? vect_simple_var : vect_scalar_var;
3783 type = vectype ? vectype : TREE_TYPE (scalar_dest);
3785 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
3787 new_name = get_name (scalar_dest);
3790 vec_dest = vect_get_new_vect_var (type, kind, new_name);
3791 add_referenced_var (vec_dest);
3796 /* Function vect_strided_store_supported.
3798 Returns TRUE if interleave high and interleave low permutations
3799 are supported, and FALSE otherwise. */
3802 vect_strided_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
3804 enum machine_mode mode = TYPE_MODE (vectype);
3806 /* vect_permute_store_chain requires the group size to be a power of two. */
3807 if (exact_log2 (count) == -1)
3809 if (vect_print_dump_info (REPORT_DETAILS))
3810 fprintf (vect_dump, "the size of the group of strided accesses"
3811 " is not a power of 2");
3815 /* Check that the permutation is supported. */
3816 if (VECTOR_MODE_P (mode))
3818 unsigned int i, nelt = GET_MODE_NUNITS (mode);
3819 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
3820 for (i = 0; i < nelt / 2; i++)
3823 sel[i * 2 + 1] = i + nelt;
3825 if (can_vec_perm_p (mode, false, sel))
3827 for (i = 0; i < nelt; i++)
3829 if (can_vec_perm_p (mode, false, sel))
3834 if (vect_print_dump_info (REPORT_DETAILS))
3835 fprintf (vect_dump, "interleave op not supported by target.");
3840 /* Return TRUE if vec_store_lanes is available for COUNT vectors of
3844 vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
3846 return vect_lanes_optab_supported_p ("vec_store_lanes",
3847 vec_store_lanes_optab,
3852 /* Function vect_permute_store_chain.
3854 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
3855 a power of 2, generate interleave_high/low stmts to reorder the data
3856 correctly for the stores. Return the final references for stores in
3859 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
3860 The input is 4 vectors each containing 8 elements. We assign a number to
3861 each element, the input sequence is:
3863 1st vec: 0 1 2 3 4 5 6 7
3864 2nd vec: 8 9 10 11 12 13 14 15
3865 3rd vec: 16 17 18 19 20 21 22 23
3866 4th vec: 24 25 26 27 28 29 30 31
3868 The output sequence should be:
3870 1st vec: 0 8 16 24 1 9 17 25
3871 2nd vec: 2 10 18 26 3 11 19 27
3872 3rd vec: 4 12 20 28 5 13 21 30
3873 4th vec: 6 14 22 30 7 15 23 31
3875 i.e., we interleave the contents of the four vectors in their order.
3877 We use interleave_high/low instructions to create such output. The input of
3878 each interleave_high/low operation is two vectors:
3881 the even elements of the result vector are obtained left-to-right from the
3882 high/low elements of the first vector. The odd elements of the result are
3883 obtained left-to-right from the high/low elements of the second vector.
3884 The output of interleave_high will be: 0 4 1 5
3885 and of interleave_low: 2 6 3 7
3888 The permutation is done in log LENGTH stages. In each stage interleave_high
3889 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
3890 where the first argument is taken from the first half of DR_CHAIN and the
3891 second argument from it's second half.
3894 I1: interleave_high (1st vec, 3rd vec)
3895 I2: interleave_low (1st vec, 3rd vec)
3896 I3: interleave_high (2nd vec, 4th vec)
3897 I4: interleave_low (2nd vec, 4th vec)
3899 The output for the first stage is:
3901 I1: 0 16 1 17 2 18 3 19
3902 I2: 4 20 5 21 6 22 7 23
3903 I3: 8 24 9 25 10 26 11 27
3904 I4: 12 28 13 29 14 30 15 31
3906 The output of the second stage, i.e. the final result is:
3908 I1: 0 8 16 24 1 9 17 25
3909 I2: 2 10 18 26 3 11 19 27
3910 I3: 4 12 20 28 5 13 21 30
3911 I4: 6 14 22 30 7 15 23 31. */
3914 vect_permute_store_chain (VEC(tree,heap) *dr_chain,
3915 unsigned int length,
3917 gimple_stmt_iterator *gsi,
3918 VEC(tree,heap) **result_chain)
3920 tree perm_dest, vect1, vect2, high, low;
3922 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
3923 tree perm_mask_low, perm_mask_high;
3925 unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype);
3926 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
3928 *result_chain = VEC_copy (tree, heap, dr_chain);
3930 for (i = 0, n = nelt / 2; i < n; i++)
3933 sel[i * 2 + 1] = i + nelt;
3935 perm_mask_high = vect_gen_perm_mask (vectype, sel);
3936 gcc_assert (perm_mask_high != NULL);
3938 for (i = 0; i < nelt; i++)
3940 perm_mask_low = vect_gen_perm_mask (vectype, sel);
3941 gcc_assert (perm_mask_low != NULL);
3943 for (i = 0, n = exact_log2 (length); i < n; i++)
3945 for (j = 0; j < length/2; j++)
3947 vect1 = VEC_index (tree, dr_chain, j);
3948 vect2 = VEC_index (tree, dr_chain, j+length/2);
3950 /* Create interleaving stmt:
3951 high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1, ...}> */
3952 perm_dest = create_tmp_var (vectype, "vect_inter_high");
3953 DECL_GIMPLE_REG_P (perm_dest) = 1;
3954 add_referenced_var (perm_dest);
3955 high = make_ssa_name (perm_dest, NULL);
3957 = gimple_build_assign_with_ops3 (VEC_PERM_EXPR, high,
3958 vect1, vect2, perm_mask_high);
3959 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3960 VEC_replace (tree, *result_chain, 2*j, high);
3962 /* Create interleaving stmt:
3963 low = VEC_PERM_EXPR <vect1, vect2, {nelt/2, nelt*3/2, nelt/2+1,
3964 nelt*3/2+1, ...}> */
3965 perm_dest = create_tmp_var (vectype, "vect_inter_low");
3966 DECL_GIMPLE_REG_P (perm_dest) = 1;
3967 add_referenced_var (perm_dest);
3968 low = make_ssa_name (perm_dest, NULL);
3970 = gimple_build_assign_with_ops3 (VEC_PERM_EXPR, low,
3971 vect1, vect2, perm_mask_low);
3972 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3973 VEC_replace (tree, *result_chain, 2*j+1, low);
3975 dr_chain = VEC_copy (tree, heap, *result_chain);
3979 /* Function vect_setup_realignment
3981 This function is called when vectorizing an unaligned load using
3982 the dr_explicit_realign[_optimized] scheme.
3983 This function generates the following code at the loop prolog:
3986 x msq_init = *(floor(p)); # prolog load
3987 realignment_token = call target_builtin;
3989 x msq = phi (msq_init, ---)
3991 The stmts marked with x are generated only for the case of
3992 dr_explicit_realign_optimized.
3994 The code above sets up a new (vector) pointer, pointing to the first
3995 location accessed by STMT, and a "floor-aligned" load using that pointer.
3996 It also generates code to compute the "realignment-token" (if the relevant
3997 target hook was defined), and creates a phi-node at the loop-header bb
3998 whose arguments are the result of the prolog-load (created by this
3999 function) and the result of a load that takes place in the loop (to be
4000 created by the caller to this function).
4002 For the case of dr_explicit_realign_optimized:
4003 The caller to this function uses the phi-result (msq) to create the
4004 realignment code inside the loop, and sets up the missing phi argument,
4007 msq = phi (msq_init, lsq)
4008 lsq = *(floor(p')); # load in loop
4009 result = realign_load (msq, lsq, realignment_token);
4011 For the case of dr_explicit_realign:
4013 msq = *(floor(p)); # load in loop
4015 lsq = *(floor(p')); # load in loop
4016 result = realign_load (msq, lsq, realignment_token);
4019 STMT - (scalar) load stmt to be vectorized. This load accesses
4020 a memory location that may be unaligned.
4021 BSI - place where new code is to be inserted.
4022 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
4026 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
4027 target hook, if defined.
4028 Return value - the result of the loop-header phi node. */
4031 vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
4032 tree *realignment_token,
4033 enum dr_alignment_support alignment_support_scheme,
4035 struct loop **at_loop)
4037 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4038 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4039 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4040 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4041 struct loop *loop = NULL;
4043 tree scalar_dest = gimple_assign_lhs (stmt);
4050 tree msq_init = NULL_TREE;
4053 tree msq = NULL_TREE;
4054 gimple_seq stmts = NULL;
4056 bool compute_in_loop = false;
4057 bool nested_in_vect_loop = false;
4058 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
4059 struct loop *loop_for_initial_load = NULL;
4063 loop = LOOP_VINFO_LOOP (loop_vinfo);
4064 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4067 gcc_assert (alignment_support_scheme == dr_explicit_realign
4068 || alignment_support_scheme == dr_explicit_realign_optimized);
4070 /* We need to generate three things:
4071 1. the misalignment computation
4072 2. the extra vector load (for the optimized realignment scheme).
4073 3. the phi node for the two vectors from which the realignment is
4074 done (for the optimized realignment scheme). */
4076 /* 1. Determine where to generate the misalignment computation.
4078 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
4079 calculation will be generated by this function, outside the loop (in the
4080 preheader). Otherwise, INIT_ADDR had already been computed for us by the
4081 caller, inside the loop.
4083 Background: If the misalignment remains fixed throughout the iterations of
4084 the loop, then both realignment schemes are applicable, and also the
4085 misalignment computation can be done outside LOOP. This is because we are
4086 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
4087 are a multiple of VS (the Vector Size), and therefore the misalignment in
4088 different vectorized LOOP iterations is always the same.
4089 The problem arises only if the memory access is in an inner-loop nested
4090 inside LOOP, which is now being vectorized using outer-loop vectorization.
4091 This is the only case when the misalignment of the memory access may not
4092 remain fixed throughout the iterations of the inner-loop (as explained in
4093 detail in vect_supportable_dr_alignment). In this case, not only is the
4094 optimized realignment scheme not applicable, but also the misalignment
4095 computation (and generation of the realignment token that is passed to
4096 REALIGN_LOAD) have to be done inside the loop.
4098 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
4099 or not, which in turn determines if the misalignment is computed inside
4100 the inner-loop, or outside LOOP. */
4102 if (init_addr != NULL_TREE || !loop_vinfo)
4104 compute_in_loop = true;
4105 gcc_assert (alignment_support_scheme == dr_explicit_realign);
4109 /* 2. Determine where to generate the extra vector load.
4111 For the optimized realignment scheme, instead of generating two vector
4112 loads in each iteration, we generate a single extra vector load in the
4113 preheader of the loop, and in each iteration reuse the result of the
4114 vector load from the previous iteration. In case the memory access is in
4115 an inner-loop nested inside LOOP, which is now being vectorized using
4116 outer-loop vectorization, we need to determine whether this initial vector
4117 load should be generated at the preheader of the inner-loop, or can be
4118 generated at the preheader of LOOP. If the memory access has no evolution
4119 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
4120 to be generated inside LOOP (in the preheader of the inner-loop). */
4122 if (nested_in_vect_loop)
4124 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
4125 bool invariant_in_outerloop =
4126 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
4127 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
4130 loop_for_initial_load = loop;
4132 *at_loop = loop_for_initial_load;
4134 if (loop_for_initial_load)
4135 pe = loop_preheader_edge (loop_for_initial_load);
4137 /* 3. For the case of the optimized realignment, create the first vector
4138 load at the loop preheader. */
4140 if (alignment_support_scheme == dr_explicit_realign_optimized)
4142 /* Create msq_init = *(floor(p1)) in the loop preheader */
4144 gcc_assert (!compute_in_loop);
4145 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4146 ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
4147 NULL_TREE, &init_addr, NULL, &inc,
4149 new_stmt = gimple_build_assign_with_ops
4150 (BIT_AND_EXPR, NULL_TREE, ptr,
4151 build_int_cst (TREE_TYPE (ptr),
4152 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4153 new_temp = make_ssa_name (SSA_NAME_VAR (ptr), new_stmt);
4154 gimple_assign_set_lhs (new_stmt, new_temp);
4155 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4156 gcc_assert (!new_bb);
4158 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
4159 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
4160 new_stmt = gimple_build_assign (vec_dest, data_ref);
4161 new_temp = make_ssa_name (vec_dest, new_stmt);
4162 gimple_assign_set_lhs (new_stmt, new_temp);
4163 mark_symbols_for_renaming (new_stmt);
4166 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4167 gcc_assert (!new_bb);
4170 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4172 msq_init = gimple_assign_lhs (new_stmt);
4175 /* 4. Create realignment token using a target builtin, if available.
4176 It is done either inside the containing loop, or before LOOP (as
4177 determined above). */
4179 if (targetm.vectorize.builtin_mask_for_load)
4183 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
4186 /* Generate the INIT_ADDR computation outside LOOP. */
4187 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
4191 pe = loop_preheader_edge (loop);
4192 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4193 gcc_assert (!new_bb);
4196 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
4199 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
4200 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
4202 vect_create_destination_var (scalar_dest,
4203 gimple_call_return_type (new_stmt));
4204 new_temp = make_ssa_name (vec_dest, new_stmt);
4205 gimple_call_set_lhs (new_stmt, new_temp);
4207 if (compute_in_loop)
4208 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4211 /* Generate the misalignment computation outside LOOP. */
4212 pe = loop_preheader_edge (loop);
4213 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4214 gcc_assert (!new_bb);
4217 *realignment_token = gimple_call_lhs (new_stmt);
4219 /* The result of the CALL_EXPR to this builtin is determined from
4220 the value of the parameter and no global variables are touched
4221 which makes the builtin a "const" function. Requiring the
4222 builtin to have the "const" attribute makes it unnecessary
4223 to call mark_call_clobbered. */
4224 gcc_assert (TREE_READONLY (builtin_decl));
4227 if (alignment_support_scheme == dr_explicit_realign)
4230 gcc_assert (!compute_in_loop);
4231 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
4234 /* 5. Create msq = phi <msq_init, lsq> in loop */
4236 pe = loop_preheader_edge (containing_loop);
4237 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4238 msq = make_ssa_name (vec_dest, NULL);
4239 phi_stmt = create_phi_node (msq, containing_loop->header);
4240 SSA_NAME_DEF_STMT (msq) = phi_stmt;
4241 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
4247 /* Function vect_strided_load_supported.
4249 Returns TRUE if even and odd permutations are supported,
4250 and FALSE otherwise. */
4253 vect_strided_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
4255 enum machine_mode mode = TYPE_MODE (vectype);
4257 /* vect_permute_load_chain requires the group size to be a power of two. */
4258 if (exact_log2 (count) == -1)
4260 if (vect_print_dump_info (REPORT_DETAILS))
4261 fprintf (vect_dump, "the size of the group of strided accesses"
4262 " is not a power of 2");
4266 /* Check that the permutation is supported. */
4267 if (VECTOR_MODE_P (mode))
4269 unsigned int i, nelt = GET_MODE_NUNITS (mode);
4270 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4272 for (i = 0; i < nelt; i++)
4274 if (can_vec_perm_p (mode, false, sel))
4276 for (i = 0; i < nelt; i++)
4278 if (can_vec_perm_p (mode, false, sel))
4283 if (vect_print_dump_info (REPORT_DETAILS))
4284 fprintf (vect_dump, "extract even/odd not supported by target");
4288 /* Return TRUE if vec_load_lanes is available for COUNT vectors of
4292 vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
4294 return vect_lanes_optab_supported_p ("vec_load_lanes",
4295 vec_load_lanes_optab,
4299 /* Function vect_permute_load_chain.
4301 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
4302 a power of 2, generate extract_even/odd stmts to reorder the input data
4303 correctly. Return the final references for loads in RESULT_CHAIN.
4305 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4306 The input is 4 vectors each containing 8 elements. We assign a number to each
4307 element, the input sequence is:
4309 1st vec: 0 1 2 3 4 5 6 7
4310 2nd vec: 8 9 10 11 12 13 14 15
4311 3rd vec: 16 17 18 19 20 21 22 23
4312 4th vec: 24 25 26 27 28 29 30 31
4314 The output sequence should be:
4316 1st vec: 0 4 8 12 16 20 24 28
4317 2nd vec: 1 5 9 13 17 21 25 29
4318 3rd vec: 2 6 10 14 18 22 26 30
4319 4th vec: 3 7 11 15 19 23 27 31
4321 i.e., the first output vector should contain the first elements of each
4322 interleaving group, etc.
4324 We use extract_even/odd instructions to create such output. The input of
4325 each extract_even/odd operation is two vectors
4329 and the output is the vector of extracted even/odd elements. The output of
4330 extract_even will be: 0 2 4 6
4331 and of extract_odd: 1 3 5 7
4334 The permutation is done in log LENGTH stages. In each stage extract_even
4335 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
4336 their order. In our example,
4338 E1: extract_even (1st vec, 2nd vec)
4339 E2: extract_odd (1st vec, 2nd vec)
4340 E3: extract_even (3rd vec, 4th vec)
4341 E4: extract_odd (3rd vec, 4th vec)
4343 The output for the first stage will be:
4345 E1: 0 2 4 6 8 10 12 14
4346 E2: 1 3 5 7 9 11 13 15
4347 E3: 16 18 20 22 24 26 28 30
4348 E4: 17 19 21 23 25 27 29 31
4350 In order to proceed and create the correct sequence for the next stage (or
4351 for the correct output, if the second stage is the last one, as in our
4352 example), we first put the output of extract_even operation and then the
4353 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
4354 The input for the second stage is:
4356 1st vec (E1): 0 2 4 6 8 10 12 14
4357 2nd vec (E3): 16 18 20 22 24 26 28 30
4358 3rd vec (E2): 1 3 5 7 9 11 13 15
4359 4th vec (E4): 17 19 21 23 25 27 29 31
4361 The output of the second stage:
4363 E1: 0 4 8 12 16 20 24 28
4364 E2: 2 6 10 14 18 22 26 30
4365 E3: 1 5 9 13 17 21 25 29
4366 E4: 3 7 11 15 19 23 27 31
4368 And RESULT_CHAIN after reordering:
4370 1st vec (E1): 0 4 8 12 16 20 24 28
4371 2nd vec (E3): 1 5 9 13 17 21 25 29
4372 3rd vec (E2): 2 6 10 14 18 22 26 30
4373 4th vec (E4): 3 7 11 15 19 23 27 31. */
4376 vect_permute_load_chain (VEC(tree,heap) *dr_chain,
4377 unsigned int length,
4379 gimple_stmt_iterator *gsi,
4380 VEC(tree,heap) **result_chain)
4382 tree perm_dest, data_ref, first_vect, second_vect;
4383 tree perm_mask_even, perm_mask_odd;
4385 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
4386 unsigned int i, j, log_length = exact_log2 (length);
4387 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
4388 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4390 *result_chain = VEC_copy (tree, heap, dr_chain);
4392 for (i = 0; i < nelt; ++i)
4394 perm_mask_even = vect_gen_perm_mask (vectype, sel);
4395 gcc_assert (perm_mask_even != NULL);
4397 for (i = 0; i < nelt; ++i)
4399 perm_mask_odd = vect_gen_perm_mask (vectype, sel);
4400 gcc_assert (perm_mask_odd != NULL);
4402 for (i = 0; i < log_length; i++)
4404 for (j = 0; j < length; j += 2)
4406 first_vect = VEC_index (tree, dr_chain, j);
4407 second_vect = VEC_index (tree, dr_chain, j+1);
4409 /* data_ref = permute_even (first_data_ref, second_data_ref); */
4410 perm_dest = create_tmp_var (vectype, "vect_perm_even");
4411 DECL_GIMPLE_REG_P (perm_dest) = 1;
4412 add_referenced_var (perm_dest);
4414 perm_stmt = gimple_build_assign_with_ops3 (VEC_PERM_EXPR, perm_dest,
4415 first_vect, second_vect,
4418 data_ref = make_ssa_name (perm_dest, perm_stmt);
4419 gimple_assign_set_lhs (perm_stmt, data_ref);
4420 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4421 mark_symbols_for_renaming (perm_stmt);
4423 VEC_replace (tree, *result_chain, j/2, data_ref);
4425 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
4426 perm_dest = create_tmp_var (vectype, "vect_perm_odd");
4427 DECL_GIMPLE_REG_P (perm_dest) = 1;
4428 add_referenced_var (perm_dest);
4430 perm_stmt = gimple_build_assign_with_ops3 (VEC_PERM_EXPR, perm_dest,
4431 first_vect, second_vect,
4434 data_ref = make_ssa_name (perm_dest, perm_stmt);
4435 gimple_assign_set_lhs (perm_stmt, data_ref);
4436 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4437 mark_symbols_for_renaming (perm_stmt);
4439 VEC_replace (tree, *result_chain, j/2+length/2, data_ref);
4441 dr_chain = VEC_copy (tree, heap, *result_chain);
4446 /* Function vect_transform_strided_load.
4448 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
4449 to perform their permutation and ascribe the result vectorized statements to
4450 the scalar statements.
4454 vect_transform_strided_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
4455 gimple_stmt_iterator *gsi)
4457 VEC(tree,heap) *result_chain = NULL;
4459 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
4460 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
4461 vectors, that are ready for vector computation. */
4462 result_chain = VEC_alloc (tree, heap, size);
4463 vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
4464 vect_record_strided_load_vectors (stmt, result_chain);
4465 VEC_free (tree, heap, result_chain);
4468 /* RESULT_CHAIN contains the output of a group of strided loads that were
4469 generated as part of the vectorization of STMT. Assign the statement
4470 for each vector to the associated scalar statement. */
4473 vect_record_strided_load_vectors (gimple stmt, VEC(tree,heap) *result_chain)
4475 gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
4476 gimple next_stmt, new_stmt;
4477 unsigned int i, gap_count;
4480 /* Put a permuted data-ref in the VECTORIZED_STMT field.
4481 Since we scan the chain starting from it's first node, their order
4482 corresponds the order of data-refs in RESULT_CHAIN. */
4483 next_stmt = first_stmt;
4485 FOR_EACH_VEC_ELT (tree, result_chain, i, tmp_data_ref)
4490 /* Skip the gaps. Loads created for the gaps will be removed by dead
4491 code elimination pass later. No need to check for the first stmt in
4492 the group, since it always exists.
4493 GROUP_GAP is the number of steps in elements from the previous
4494 access (if there is no gap GROUP_GAP is 1). We skip loads that
4495 correspond to the gaps. */
4496 if (next_stmt != first_stmt
4497 && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
4505 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
4506 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
4507 copies, and we put the new vector statement in the first available
4509 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
4510 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
4513 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
4516 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
4518 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
4521 prev_stmt = rel_stmt;
4523 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
4526 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
4531 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
4533 /* If NEXT_STMT accesses the same DR as the previous statement,
4534 put the same TMP_DATA_REF as its vectorized statement; otherwise
4535 get the next data-ref from RESULT_CHAIN. */
4536 if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
4542 /* Function vect_force_dr_alignment_p.
4544 Returns whether the alignment of a DECL can be forced to be aligned
4545 on ALIGNMENT bit boundary. */
4548 vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
4550 if (TREE_CODE (decl) != VAR_DECL)
4553 if (DECL_EXTERNAL (decl))
4556 if (TREE_ASM_WRITTEN (decl))
4559 if (TREE_STATIC (decl))
4560 return (alignment <= MAX_OFILE_ALIGNMENT);
4562 return (alignment <= MAX_STACK_ALIGNMENT);
4566 /* Return whether the data reference DR is supported with respect to its
4568 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
4569 it is aligned, i.e., check if it is possible to vectorize it with different
4572 enum dr_alignment_support
4573 vect_supportable_dr_alignment (struct data_reference *dr,
4574 bool check_aligned_accesses)
4576 gimple stmt = DR_STMT (dr);
4577 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4578 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4579 enum machine_mode mode = TYPE_MODE (vectype);
4580 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4581 struct loop *vect_loop = NULL;
4582 bool nested_in_vect_loop = false;
4584 if (aligned_access_p (dr) && !check_aligned_accesses)
4589 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
4590 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
4593 /* Possibly unaligned access. */
4595 /* We can choose between using the implicit realignment scheme (generating
4596 a misaligned_move stmt) and the explicit realignment scheme (generating
4597 aligned loads with a REALIGN_LOAD). There are two variants to the
4598 explicit realignment scheme: optimized, and unoptimized.
4599 We can optimize the realignment only if the step between consecutive
4600 vector loads is equal to the vector size. Since the vector memory
4601 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
4602 is guaranteed that the misalignment amount remains the same throughout the
4603 execution of the vectorized loop. Therefore, we can create the
4604 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
4605 at the loop preheader.
4607 However, in the case of outer-loop vectorization, when vectorizing a
4608 memory access in the inner-loop nested within the LOOP that is now being
4609 vectorized, while it is guaranteed that the misalignment of the
4610 vectorized memory access will remain the same in different outer-loop
4611 iterations, it is *not* guaranteed that is will remain the same throughout
4612 the execution of the inner-loop. This is because the inner-loop advances
4613 with the original scalar step (and not in steps of VS). If the inner-loop
4614 step happens to be a multiple of VS, then the misalignment remains fixed
4615 and we can use the optimized realignment scheme. For example:
4621 When vectorizing the i-loop in the above example, the step between
4622 consecutive vector loads is 1, and so the misalignment does not remain
4623 fixed across the execution of the inner-loop, and the realignment cannot
4624 be optimized (as illustrated in the following pseudo vectorized loop):
4626 for (i=0; i<N; i+=4)
4627 for (j=0; j<M; j++){
4628 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
4629 // when j is {0,1,2,3,4,5,6,7,...} respectively.
4630 // (assuming that we start from an aligned address).
4633 We therefore have to use the unoptimized realignment scheme:
4635 for (i=0; i<N; i+=4)
4636 for (j=k; j<M; j+=4)
4637 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
4638 // that the misalignment of the initial address is
4641 The loop can then be vectorized as follows:
4643 for (k=0; k<4; k++){
4644 rt = get_realignment_token (&vp[k]);
4645 for (i=0; i<N; i+=4){
4647 for (j=k; j<M; j+=4){
4649 va = REALIGN_LOAD <v1,v2,rt>;
4656 if (DR_IS_READ (dr))
4658 bool is_packed = false;
4659 tree type = (TREE_TYPE (DR_REF (dr)));
4661 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
4662 && (!targetm.vectorize.builtin_mask_for_load
4663 || targetm.vectorize.builtin_mask_for_load ()))
4665 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4666 if ((nested_in_vect_loop
4667 && (TREE_INT_CST_LOW (DR_STEP (dr))
4668 != GET_MODE_SIZE (TYPE_MODE (vectype))))
4670 return dr_explicit_realign;
4672 return dr_explicit_realign_optimized;
4674 if (!known_alignment_for_access_p (dr))
4676 tree ba = DR_BASE_OBJECT (dr);
4679 is_packed = contains_packed_reference (ba);
4682 if (targetm.vectorize.
4683 support_vector_misalignment (mode, type,
4684 DR_MISALIGNMENT (dr), is_packed))
4685 /* Can't software pipeline the loads, but can at least do them. */
4686 return dr_unaligned_supported;
4690 bool is_packed = false;
4691 tree type = (TREE_TYPE (DR_REF (dr)));
4693 if (!known_alignment_for_access_p (dr))
4695 tree ba = DR_BASE_OBJECT (dr);
4698 is_packed = contains_packed_reference (ba);
4701 if (targetm.vectorize.
4702 support_vector_misalignment (mode, type,
4703 DR_MISALIGNMENT (dr), is_packed))
4704 return dr_unaligned_supported;
4708 return dr_unaligned_unsupported;